code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def images(self):
"""Generate images for all the datasets from the scene."""
for ds_id, projectable in self.datasets.items():
if ds_id in self.wishlist:
yield projectable.to_image() | def function[images, parameter[self]]:
constant[Generate images for all the datasets from the scene.]
for taget[tuple[[<ast.Name object at 0x7da1b22f8df0>, <ast.Name object at 0x7da1b22f9390>]]] in starred[call[name[self].datasets.items, parameter[]]] begin[:]
if compare[name[ds_id] in name[self].wishlist] begin[:]
<ast.Yield object at 0x7da1b22f8580> | keyword[def] identifier[images] ( identifier[self] ):
literal[string]
keyword[for] identifier[ds_id] , identifier[projectable] keyword[in] identifier[self] . identifier[datasets] . identifier[items] ():
keyword[if] identifier[ds_id] keyword[in] identifier[self] . identifier[wishlist] :
keyword[yield] identifier[projectable] . identifier[to_image] () | def images(self):
"""Generate images for all the datasets from the scene."""
for (ds_id, projectable) in self.datasets.items():
if ds_id in self.wishlist:
yield projectable.to_image() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def flush(self):
"""
Sends buffered data to the target
"""
# Flush buffer
content = self._buffer.getvalue()
self._buffer = StringIO()
if content:
# Send message
self._client.send_message(self._target, content, mtype="chat") | def function[flush, parameter[self]]:
constant[
Sends buffered data to the target
]
variable[content] assign[=] call[name[self]._buffer.getvalue, parameter[]]
name[self]._buffer assign[=] call[name[StringIO], parameter[]]
if name[content] begin[:]
call[name[self]._client.send_message, parameter[name[self]._target, name[content]]] | keyword[def] identifier[flush] ( identifier[self] ):
literal[string]
identifier[content] = identifier[self] . identifier[_buffer] . identifier[getvalue] ()
identifier[self] . identifier[_buffer] = identifier[StringIO] ()
keyword[if] identifier[content] :
identifier[self] . identifier[_client] . identifier[send_message] ( identifier[self] . identifier[_target] , identifier[content] , identifier[mtype] = literal[string] ) | def flush(self):
"""
Sends buffered data to the target
"""
# Flush buffer
content = self._buffer.getvalue()
self._buffer = StringIO()
if content:
# Send message
self._client.send_message(self._target, content, mtype='chat') # depends on [control=['if'], data=[]] |
def run_process(self, process):
"""Runs a single action."""
message = u'#{bright}'
message += u'{} '.format(str(process)[:68]).ljust(69, '.')
stashed = False
if self.unstaged_changes and not self.include_unstaged_changes:
out, err, code = self.git.stash(keep_index=True, quiet=True)
stashed = code == 0
try:
result = process(files=self.files, cwd=self.cwd, fix=self.fix)
# Check for modified files
out, err, code = self.git.status(porcelain=True, untracked_files='no')
for line in out.splitlines():
file_status = Status(line)
# Make sure the file is one of the files that was processed
if file_status.path in self.files and file_status.is_modified:
mtime = os.path.getmtime(file_status.path) if os.path.exists(file_status.path) else 0
if mtime > self.file_mtimes.get(file_status.path, 0):
self.file_mtimes[file_status.path] = mtime
result.add_modified_file(file_status.path)
if self.stage_modified_files:
self.git.add(file_status.path)
except: # noqa: E722
raise
finally:
if stashed:
self.git.reset(hard=True, quiet=True)
self.git.stash.pop(index=True, quiet=True)
if result.is_success:
message += u' #{green}[SUCCESS]'
elif result.is_failure:
message += u' #{red}[FAILURE]'
elif result.is_skip:
message += u' #{cyan}[SKIPPED]'
elif result.is_error:
message += u' #{red}[ERROR!!]'
return result, message | def function[run_process, parameter[self, process]]:
constant[Runs a single action.]
variable[message] assign[=] constant[#{bright}]
<ast.AugAssign object at 0x7da20c76df60>
variable[stashed] assign[=] constant[False]
if <ast.BoolOp object at 0x7da20c76ca60> begin[:]
<ast.Tuple object at 0x7da20c76f9d0> assign[=] call[name[self].git.stash, parameter[]]
variable[stashed] assign[=] compare[name[code] equal[==] constant[0]]
<ast.Try object at 0x7da20c76dc00>
if name[result].is_success begin[:]
<ast.AugAssign object at 0x7da20c76c580>
return[tuple[[<ast.Name object at 0x7da18dc055d0>, <ast.Name object at 0x7da18dc06530>]]] | keyword[def] identifier[run_process] ( identifier[self] , identifier[process] ):
literal[string]
identifier[message] = literal[string]
identifier[message] += literal[string] . identifier[format] ( identifier[str] ( identifier[process] )[: literal[int] ]). identifier[ljust] ( literal[int] , literal[string] )
identifier[stashed] = keyword[False]
keyword[if] identifier[self] . identifier[unstaged_changes] keyword[and] keyword[not] identifier[self] . identifier[include_unstaged_changes] :
identifier[out] , identifier[err] , identifier[code] = identifier[self] . identifier[git] . identifier[stash] ( identifier[keep_index] = keyword[True] , identifier[quiet] = keyword[True] )
identifier[stashed] = identifier[code] == literal[int]
keyword[try] :
identifier[result] = identifier[process] ( identifier[files] = identifier[self] . identifier[files] , identifier[cwd] = identifier[self] . identifier[cwd] , identifier[fix] = identifier[self] . identifier[fix] )
identifier[out] , identifier[err] , identifier[code] = identifier[self] . identifier[git] . identifier[status] ( identifier[porcelain] = keyword[True] , identifier[untracked_files] = literal[string] )
keyword[for] identifier[line] keyword[in] identifier[out] . identifier[splitlines] ():
identifier[file_status] = identifier[Status] ( identifier[line] )
keyword[if] identifier[file_status] . identifier[path] keyword[in] identifier[self] . identifier[files] keyword[and] identifier[file_status] . identifier[is_modified] :
identifier[mtime] = identifier[os] . identifier[path] . identifier[getmtime] ( identifier[file_status] . identifier[path] ) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[file_status] . identifier[path] ) keyword[else] literal[int]
keyword[if] identifier[mtime] > identifier[self] . identifier[file_mtimes] . identifier[get] ( identifier[file_status] . identifier[path] , literal[int] ):
identifier[self] . identifier[file_mtimes] [ identifier[file_status] . identifier[path] ]= identifier[mtime]
identifier[result] . identifier[add_modified_file] ( identifier[file_status] . identifier[path] )
keyword[if] identifier[self] . identifier[stage_modified_files] :
identifier[self] . identifier[git] . identifier[add] ( identifier[file_status] . identifier[path] )
keyword[except] :
keyword[raise]
keyword[finally] :
keyword[if] identifier[stashed] :
identifier[self] . identifier[git] . identifier[reset] ( identifier[hard] = keyword[True] , identifier[quiet] = keyword[True] )
identifier[self] . identifier[git] . identifier[stash] . identifier[pop] ( identifier[index] = keyword[True] , identifier[quiet] = keyword[True] )
keyword[if] identifier[result] . identifier[is_success] :
identifier[message] += literal[string]
keyword[elif] identifier[result] . identifier[is_failure] :
identifier[message] += literal[string]
keyword[elif] identifier[result] . identifier[is_skip] :
identifier[message] += literal[string]
keyword[elif] identifier[result] . identifier[is_error] :
identifier[message] += literal[string]
keyword[return] identifier[result] , identifier[message] | def run_process(self, process):
"""Runs a single action."""
message = u'#{bright}'
message += u'{} '.format(str(process)[:68]).ljust(69, '.')
stashed = False
if self.unstaged_changes and (not self.include_unstaged_changes):
(out, err, code) = self.git.stash(keep_index=True, quiet=True)
stashed = code == 0 # depends on [control=['if'], data=[]]
try:
result = process(files=self.files, cwd=self.cwd, fix=self.fix)
# Check for modified files
(out, err, code) = self.git.status(porcelain=True, untracked_files='no')
for line in out.splitlines():
file_status = Status(line)
# Make sure the file is one of the files that was processed
if file_status.path in self.files and file_status.is_modified:
mtime = os.path.getmtime(file_status.path) if os.path.exists(file_status.path) else 0
if mtime > self.file_mtimes.get(file_status.path, 0):
self.file_mtimes[file_status.path] = mtime
result.add_modified_file(file_status.path)
if self.stage_modified_files:
self.git.add(file_status.path) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['mtime']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['try'], data=[]]
except: # noqa: E722
raise # depends on [control=['except'], data=[]]
finally:
if stashed:
self.git.reset(hard=True, quiet=True)
self.git.stash.pop(index=True, quiet=True) # depends on [control=['if'], data=[]]
if result.is_success:
message += u' #{green}[SUCCESS]' # depends on [control=['if'], data=[]]
elif result.is_failure:
message += u' #{red}[FAILURE]' # depends on [control=['if'], data=[]]
elif result.is_skip:
message += u' #{cyan}[SKIPPED]' # depends on [control=['if'], data=[]]
elif result.is_error:
message += u' #{red}[ERROR!!]' # depends on [control=['if'], data=[]]
return (result, message) |
def _add_plots_to_output(out, data):
"""Add CNVkit plots summarizing called copy number values.
"""
out["plot"] = {}
diagram_plot = _add_diagram_plot(out, data)
if diagram_plot:
out["plot"]["diagram"] = diagram_plot
scatter = _add_scatter_plot(out, data)
if scatter:
out["plot"]["scatter"] = scatter
scatter_global = _add_global_scatter_plot(out, data)
if scatter_global:
out["plot"]["scatter_global"] = scatter_global
return out | def function[_add_plots_to_output, parameter[out, data]]:
constant[Add CNVkit plots summarizing called copy number values.
]
call[name[out]][constant[plot]] assign[=] dictionary[[], []]
variable[diagram_plot] assign[=] call[name[_add_diagram_plot], parameter[name[out], name[data]]]
if name[diagram_plot] begin[:]
call[call[name[out]][constant[plot]]][constant[diagram]] assign[=] name[diagram_plot]
variable[scatter] assign[=] call[name[_add_scatter_plot], parameter[name[out], name[data]]]
if name[scatter] begin[:]
call[call[name[out]][constant[plot]]][constant[scatter]] assign[=] name[scatter]
variable[scatter_global] assign[=] call[name[_add_global_scatter_plot], parameter[name[out], name[data]]]
if name[scatter_global] begin[:]
call[call[name[out]][constant[plot]]][constant[scatter_global]] assign[=] name[scatter_global]
return[name[out]] | keyword[def] identifier[_add_plots_to_output] ( identifier[out] , identifier[data] ):
literal[string]
identifier[out] [ literal[string] ]={}
identifier[diagram_plot] = identifier[_add_diagram_plot] ( identifier[out] , identifier[data] )
keyword[if] identifier[diagram_plot] :
identifier[out] [ literal[string] ][ literal[string] ]= identifier[diagram_plot]
identifier[scatter] = identifier[_add_scatter_plot] ( identifier[out] , identifier[data] )
keyword[if] identifier[scatter] :
identifier[out] [ literal[string] ][ literal[string] ]= identifier[scatter]
identifier[scatter_global] = identifier[_add_global_scatter_plot] ( identifier[out] , identifier[data] )
keyword[if] identifier[scatter_global] :
identifier[out] [ literal[string] ][ literal[string] ]= identifier[scatter_global]
keyword[return] identifier[out] | def _add_plots_to_output(out, data):
"""Add CNVkit plots summarizing called copy number values.
"""
out['plot'] = {}
diagram_plot = _add_diagram_plot(out, data)
if diagram_plot:
out['plot']['diagram'] = diagram_plot # depends on [control=['if'], data=[]]
scatter = _add_scatter_plot(out, data)
if scatter:
out['plot']['scatter'] = scatter # depends on [control=['if'], data=[]]
scatter_global = _add_global_scatter_plot(out, data)
if scatter_global:
out['plot']['scatter_global'] = scatter_global # depends on [control=['if'], data=[]]
return out |
def parse_tweet(raw_tweet, source, now=None):
"""
Parses a single raw tweet line from a twtxt file
and returns a :class:`Tweet` object.
:param str raw_tweet: a single raw tweet line
:param Source source: the source of the given tweet
:param Datetime now: the current datetime
:returns: the parsed tweet
:rtype: Tweet
"""
if now is None:
now = datetime.now(timezone.utc)
raw_created_at, text = raw_tweet.split("\t", 1)
created_at = parse_iso8601(raw_created_at)
if created_at > now:
raise ValueError("Tweet is from the future")
return Tweet(click.unstyle(text.strip()), created_at, source) | def function[parse_tweet, parameter[raw_tweet, source, now]]:
constant[
Parses a single raw tweet line from a twtxt file
and returns a :class:`Tweet` object.
:param str raw_tweet: a single raw tweet line
:param Source source: the source of the given tweet
:param Datetime now: the current datetime
:returns: the parsed tweet
:rtype: Tweet
]
if compare[name[now] is constant[None]] begin[:]
variable[now] assign[=] call[name[datetime].now, parameter[name[timezone].utc]]
<ast.Tuple object at 0x7da1b009c0a0> assign[=] call[name[raw_tweet].split, parameter[constant[ ], constant[1]]]
variable[created_at] assign[=] call[name[parse_iso8601], parameter[name[raw_created_at]]]
if compare[name[created_at] greater[>] name[now]] begin[:]
<ast.Raise object at 0x7da1b00b4940>
return[call[name[Tweet], parameter[call[name[click].unstyle, parameter[call[name[text].strip, parameter[]]]], name[created_at], name[source]]]] | keyword[def] identifier[parse_tweet] ( identifier[raw_tweet] , identifier[source] , identifier[now] = keyword[None] ):
literal[string]
keyword[if] identifier[now] keyword[is] keyword[None] :
identifier[now] = identifier[datetime] . identifier[now] ( identifier[timezone] . identifier[utc] )
identifier[raw_created_at] , identifier[text] = identifier[raw_tweet] . identifier[split] ( literal[string] , literal[int] )
identifier[created_at] = identifier[parse_iso8601] ( identifier[raw_created_at] )
keyword[if] identifier[created_at] > identifier[now] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[Tweet] ( identifier[click] . identifier[unstyle] ( identifier[text] . identifier[strip] ()), identifier[created_at] , identifier[source] ) | def parse_tweet(raw_tweet, source, now=None):
"""
Parses a single raw tweet line from a twtxt file
and returns a :class:`Tweet` object.
:param str raw_tweet: a single raw tweet line
:param Source source: the source of the given tweet
:param Datetime now: the current datetime
:returns: the parsed tweet
:rtype: Tweet
"""
if now is None:
now = datetime.now(timezone.utc) # depends on [control=['if'], data=['now']]
(raw_created_at, text) = raw_tweet.split('\t', 1)
created_at = parse_iso8601(raw_created_at)
if created_at > now:
raise ValueError('Tweet is from the future') # depends on [control=['if'], data=[]]
return Tweet(click.unstyle(text.strip()), created_at, source) |
def get(self, fragment_info):
"""
Get the value associated with the given key.
:param fragment_info: the text key
:type fragment_info: tuple of str ``(language, text)``
:raises: KeyError if the key is not present in the cache
"""
if not self.is_cached(fragment_info):
raise KeyError(u"Attempt to get text not cached")
return self.cache[fragment_info] | def function[get, parameter[self, fragment_info]]:
constant[
Get the value associated with the given key.
:param fragment_info: the text key
:type fragment_info: tuple of str ``(language, text)``
:raises: KeyError if the key is not present in the cache
]
if <ast.UnaryOp object at 0x7da1b17bbb80> begin[:]
<ast.Raise object at 0x7da1b17bbe20>
return[call[name[self].cache][name[fragment_info]]] | keyword[def] identifier[get] ( identifier[self] , identifier[fragment_info] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_cached] ( identifier[fragment_info] ):
keyword[raise] identifier[KeyError] ( literal[string] )
keyword[return] identifier[self] . identifier[cache] [ identifier[fragment_info] ] | def get(self, fragment_info):
"""
Get the value associated with the given key.
:param fragment_info: the text key
:type fragment_info: tuple of str ``(language, text)``
:raises: KeyError if the key is not present in the cache
"""
if not self.is_cached(fragment_info):
raise KeyError(u'Attempt to get text not cached') # depends on [control=['if'], data=[]]
return self.cache[fragment_info] |
def _parse_invited_member(self, values):
"""
Parses the column texts of an invited row into a invited dictionary.
Parameters
----------
values: tuple[:class:`str`]
A list of row contents.
"""
name, date = values
if date != "Invitation Date":
self.invites.append(GuildInvite(name, date)) | def function[_parse_invited_member, parameter[self, values]]:
constant[
Parses the column texts of an invited row into a invited dictionary.
Parameters
----------
values: tuple[:class:`str`]
A list of row contents.
]
<ast.Tuple object at 0x7da18f00f820> assign[=] name[values]
if compare[name[date] not_equal[!=] constant[Invitation Date]] begin[:]
call[name[self].invites.append, parameter[call[name[GuildInvite], parameter[name[name], name[date]]]]] | keyword[def] identifier[_parse_invited_member] ( identifier[self] , identifier[values] ):
literal[string]
identifier[name] , identifier[date] = identifier[values]
keyword[if] identifier[date] != literal[string] :
identifier[self] . identifier[invites] . identifier[append] ( identifier[GuildInvite] ( identifier[name] , identifier[date] )) | def _parse_invited_member(self, values):
"""
Parses the column texts of an invited row into a invited dictionary.
Parameters
----------
values: tuple[:class:`str`]
A list of row contents.
"""
(name, date) = values
if date != 'Invitation Date':
self.invites.append(GuildInvite(name, date)) # depends on [control=['if'], data=['date']] |
def draw_grid(self):
"""Draws the grid and tiles."""
self.screen.fill((0xbb, 0xad, 0xa0), self.origin + (self.game_width, self.game_height))
for y, row in enumerate(self.grid):
for x, cell in enumerate(row):
self.screen.blit(self.tiles[cell], self.get_tile_location(x, y)) | def function[draw_grid, parameter[self]]:
constant[Draws the grid and tiles.]
call[name[self].screen.fill, parameter[tuple[[<ast.Constant object at 0x7da1b2346080>, <ast.Constant object at 0x7da1b23476d0>, <ast.Constant object at 0x7da1b2345300>]], binary_operation[name[self].origin + tuple[[<ast.Attribute object at 0x7da18dc9ae00>, <ast.Attribute object at 0x7da18dc99300>]]]]]
for taget[tuple[[<ast.Name object at 0x7da18dc99bd0>, <ast.Name object at 0x7da18dc99450>]]] in starred[call[name[enumerate], parameter[name[self].grid]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18dc98850>, <ast.Name object at 0x7da18dc9a3e0>]]] in starred[call[name[enumerate], parameter[name[row]]]] begin[:]
call[name[self].screen.blit, parameter[call[name[self].tiles][name[cell]], call[name[self].get_tile_location, parameter[name[x], name[y]]]]] | keyword[def] identifier[draw_grid] ( identifier[self] ):
literal[string]
identifier[self] . identifier[screen] . identifier[fill] (( literal[int] , literal[int] , literal[int] ), identifier[self] . identifier[origin] +( identifier[self] . identifier[game_width] , identifier[self] . identifier[game_height] ))
keyword[for] identifier[y] , identifier[row] keyword[in] identifier[enumerate] ( identifier[self] . identifier[grid] ):
keyword[for] identifier[x] , identifier[cell] keyword[in] identifier[enumerate] ( identifier[row] ):
identifier[self] . identifier[screen] . identifier[blit] ( identifier[self] . identifier[tiles] [ identifier[cell] ], identifier[self] . identifier[get_tile_location] ( identifier[x] , identifier[y] )) | def draw_grid(self):
"""Draws the grid and tiles."""
self.screen.fill((187, 173, 160), self.origin + (self.game_width, self.game_height))
for (y, row) in enumerate(self.grid):
for (x, cell) in enumerate(row):
self.screen.blit(self.tiles[cell], self.get_tile_location(x, y)) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] |
def calc_downsample(w, h, target=400):
"""Calculate downsampling value."""
if w > h:
return h / target
elif h >= w:
return w / target | def function[calc_downsample, parameter[w, h, target]]:
constant[Calculate downsampling value.]
if compare[name[w] greater[>] name[h]] begin[:]
return[binary_operation[name[h] / name[target]]] | keyword[def] identifier[calc_downsample] ( identifier[w] , identifier[h] , identifier[target] = literal[int] ):
literal[string]
keyword[if] identifier[w] > identifier[h] :
keyword[return] identifier[h] / identifier[target]
keyword[elif] identifier[h] >= identifier[w] :
keyword[return] identifier[w] / identifier[target] | def calc_downsample(w, h, target=400):
"""Calculate downsampling value."""
if w > h:
return h / target # depends on [control=['if'], data=['h']]
elif h >= w:
return w / target # depends on [control=['if'], data=['w']] |
def _build_opsgenie_payload(self):
"""
Construct the Opsgenie JSON payload. All relevant parameters are combined here
to a valid Opsgenie JSON payload.
:return: Opsgenie payload (dict) to send
"""
payload = {}
for key in [
"message", "alias", "description", "responders",
"visibleTo", "actions", "tags", "details", "entity",
"source", "priority", "user", "note"
]:
val = getattr(self, key)
if val:
payload[key] = val
return payload | def function[_build_opsgenie_payload, parameter[self]]:
constant[
Construct the Opsgenie JSON payload. All relevant parameters are combined here
to a valid Opsgenie JSON payload.
:return: Opsgenie payload (dict) to send
]
variable[payload] assign[=] dictionary[[], []]
for taget[name[key]] in starred[list[[<ast.Constant object at 0x7da1b0594dc0>, <ast.Constant object at 0x7da1b05952d0>, <ast.Constant object at 0x7da1b0594df0>, <ast.Constant object at 0x7da1b0594760>, <ast.Constant object at 0x7da1b0594d30>, <ast.Constant object at 0x7da20c6c6500>, <ast.Constant object at 0x7da20c6c6170>, <ast.Constant object at 0x7da20c6c6ad0>, <ast.Constant object at 0x7da20c6c59c0>, <ast.Constant object at 0x7da20c6c74c0>, <ast.Constant object at 0x7da20c6c49d0>, <ast.Constant object at 0x7da20c6c4820>, <ast.Constant object at 0x7da20c6c4220>]]] begin[:]
variable[val] assign[=] call[name[getattr], parameter[name[self], name[key]]]
if name[val] begin[:]
call[name[payload]][name[key]] assign[=] name[val]
return[name[payload]] | keyword[def] identifier[_build_opsgenie_payload] ( identifier[self] ):
literal[string]
identifier[payload] ={}
keyword[for] identifier[key] keyword[in] [
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string]
]:
identifier[val] = identifier[getattr] ( identifier[self] , identifier[key] )
keyword[if] identifier[val] :
identifier[payload] [ identifier[key] ]= identifier[val]
keyword[return] identifier[payload] | def _build_opsgenie_payload(self):
"""
Construct the Opsgenie JSON payload. All relevant parameters are combined here
to a valid Opsgenie JSON payload.
:return: Opsgenie payload (dict) to send
"""
payload = {}
for key in ['message', 'alias', 'description', 'responders', 'visibleTo', 'actions', 'tags', 'details', 'entity', 'source', 'priority', 'user', 'note']:
val = getattr(self, key)
if val:
payload[key] = val # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
return payload |
def bin_range_strings(bins, fmt=':g'):
"""Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1']
"""
return [('{' + fmt + '}-{' + fmt + '}').format(i, j)
for i, j in zip(bins, bins[1:])] | def function[bin_range_strings, parameter[bins, fmt]]:
constant[Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1']
]
return[<ast.ListComp object at 0x7da1b10e7f10>] | keyword[def] identifier[bin_range_strings] ( identifier[bins] , identifier[fmt] = literal[string] ):
literal[string]
keyword[return] [( literal[string] + identifier[fmt] + literal[string] + identifier[fmt] + literal[string] ). identifier[format] ( identifier[i] , identifier[j] )
keyword[for] identifier[i] , identifier[j] keyword[in] identifier[zip] ( identifier[bins] , identifier[bins] [ literal[int] :])] | def bin_range_strings(bins, fmt=':g'):
"""Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1']
"""
return [('{' + fmt + '}-{' + fmt + '}').format(i, j) for (i, j) in zip(bins, bins[1:])] |
def operation_recorder_stage_pywbem_args(self, method, **kwargs):
"""
**Experimental:** Low-level method used by the operation-specific
methods of this class.
*New in pywbem 0.9 as experimental.*
It forwards the operation method name and arguments to all recorders of
this connection.
"""
for recorder in self._operation_recorders:
recorder.stage_pywbem_args(method, **kwargs) | def function[operation_recorder_stage_pywbem_args, parameter[self, method]]:
constant[
**Experimental:** Low-level method used by the operation-specific
methods of this class.
*New in pywbem 0.9 as experimental.*
It forwards the operation method name and arguments to all recorders of
this connection.
]
for taget[name[recorder]] in starred[name[self]._operation_recorders] begin[:]
call[name[recorder].stage_pywbem_args, parameter[name[method]]] | keyword[def] identifier[operation_recorder_stage_pywbem_args] ( identifier[self] , identifier[method] ,** identifier[kwargs] ):
literal[string]
keyword[for] identifier[recorder] keyword[in] identifier[self] . identifier[_operation_recorders] :
identifier[recorder] . identifier[stage_pywbem_args] ( identifier[method] ,** identifier[kwargs] ) | def operation_recorder_stage_pywbem_args(self, method, **kwargs):
"""
**Experimental:** Low-level method used by the operation-specific
methods of this class.
*New in pywbem 0.9 as experimental.*
It forwards the operation method name and arguments to all recorders of
this connection.
"""
for recorder in self._operation_recorders:
recorder.stage_pywbem_args(method, **kwargs) # depends on [control=['for'], data=['recorder']] |
def _get_inference_input(self,
trans_inputs: List[TranslatorInput]) -> Tuple[mx.nd.NDArray,
int,
Optional[lexicon.TopKLexicon],
List[
Optional[constrained.RawConstraintList]],
List[
Optional[constrained.RawConstraintList]],
mx.nd.NDArray]:
"""
Returns NDArray of images and corresponding bucket_key and an NDArray of maximum output lengths
for each sentence in the batch.
:param trans_inputs: List of TranslatorInputs. The path of the image/feature is in the token field.
:param constraints: Optional list of constraints.
:return: NDArray of images paths, bucket key, a list of raw constraint lists,
an NDArray of maximum output lengths.
"""
batch_size = len(trans_inputs)
image_paths = [None for _ in range(batch_size)] # type: List[Optional[str]]
restrict_lexicon = None # type: Optional[lexicon.TopKLexicon]
raw_constraints = [None for _ in range(batch_size)] # type: List[Optional[constrained.RawConstraintList]]
raw_avoid_list = [None for _ in range(batch_size)] # type: List[Optional[constrained.RawConstraintList]]
for j, trans_input in enumerate(trans_inputs):
# Join relative path with absolute
path = trans_input.tokens[0]
if self.source_root is not None:
path = os.path.join(self.source_root, path)
image_paths[j] = path
# Preprocess constraints
if trans_input.constraints is not None:
raw_constraints[j] = [data_io.tokens2ids(phrase, self.vocab_target) for phrase in
trans_input.constraints]
# Read data and zero pad if necessary
images = self.data_loader(image_paths)
images = utils_image.zero_pad_features(images, self.source_image_size)
max_input_length = 0
max_output_lengths = [self.models[0].get_max_output_length(max_input_length)] * len(image_paths)
return mx.nd.array(images), max_input_length, restrict_lexicon, raw_constraints, raw_avoid_list, \
mx.nd.array(max_output_lengths, ctx=self.context, dtype='int32') | def function[_get_inference_input, parameter[self, trans_inputs]]:
constant[
Returns NDArray of images and corresponding bucket_key and an NDArray of maximum output lengths
for each sentence in the batch.
:param trans_inputs: List of TranslatorInputs. The path of the image/feature is in the token field.
:param constraints: Optional list of constraints.
:return: NDArray of images paths, bucket key, a list of raw constraint lists,
an NDArray of maximum output lengths.
]
variable[batch_size] assign[=] call[name[len], parameter[name[trans_inputs]]]
variable[image_paths] assign[=] <ast.ListComp object at 0x7da1b1d89c90>
variable[restrict_lexicon] assign[=] constant[None]
variable[raw_constraints] assign[=] <ast.ListComp object at 0x7da1b1d8b9a0>
variable[raw_avoid_list] assign[=] <ast.ListComp object at 0x7da1b1d8b5e0>
for taget[tuple[[<ast.Name object at 0x7da1b1d88fd0>, <ast.Name object at 0x7da1b1d8beb0>]]] in starred[call[name[enumerate], parameter[name[trans_inputs]]]] begin[:]
variable[path] assign[=] call[name[trans_input].tokens][constant[0]]
if compare[name[self].source_root is_not constant[None]] begin[:]
variable[path] assign[=] call[name[os].path.join, parameter[name[self].source_root, name[path]]]
call[name[image_paths]][name[j]] assign[=] name[path]
if compare[name[trans_input].constraints is_not constant[None]] begin[:]
call[name[raw_constraints]][name[j]] assign[=] <ast.ListComp object at 0x7da1b1d89d80>
variable[images] assign[=] call[name[self].data_loader, parameter[name[image_paths]]]
variable[images] assign[=] call[name[utils_image].zero_pad_features, parameter[name[images], name[self].source_image_size]]
variable[max_input_length] assign[=] constant[0]
variable[max_output_lengths] assign[=] binary_operation[list[[<ast.Call object at 0x7da1b1d89f30>]] * call[name[len], parameter[name[image_paths]]]]
return[tuple[[<ast.Call object at 0x7da1b1d8a5f0>, <ast.Name object at 0x7da1b1d8abf0>, <ast.Name object at 0x7da1b1d895a0>, <ast.Name object at 0x7da1b1d89f90>, <ast.Name object at 0x7da1b1d88070>, <ast.Call object at 0x7da1b1d8b250>]]] | keyword[def] identifier[_get_inference_input] ( identifier[self] ,
identifier[trans_inputs] : identifier[List] [ identifier[TranslatorInput] ])-> identifier[Tuple] [ identifier[mx] . identifier[nd] . identifier[NDArray] ,
identifier[int] ,
identifier[Optional] [ identifier[lexicon] . identifier[TopKLexicon] ],
identifier[List] [
identifier[Optional] [ identifier[constrained] . identifier[RawConstraintList] ]],
identifier[List] [
identifier[Optional] [ identifier[constrained] . identifier[RawConstraintList] ]],
identifier[mx] . identifier[nd] . identifier[NDArray] ]:
literal[string]
identifier[batch_size] = identifier[len] ( identifier[trans_inputs] )
identifier[image_paths] =[ keyword[None] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[batch_size] )]
identifier[restrict_lexicon] = keyword[None]
identifier[raw_constraints] =[ keyword[None] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[batch_size] )]
identifier[raw_avoid_list] =[ keyword[None] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[batch_size] )]
keyword[for] identifier[j] , identifier[trans_input] keyword[in] identifier[enumerate] ( identifier[trans_inputs] ):
identifier[path] = identifier[trans_input] . identifier[tokens] [ literal[int] ]
keyword[if] identifier[self] . identifier[source_root] keyword[is] keyword[not] keyword[None] :
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[source_root] , identifier[path] )
identifier[image_paths] [ identifier[j] ]= identifier[path]
keyword[if] identifier[trans_input] . identifier[constraints] keyword[is] keyword[not] keyword[None] :
identifier[raw_constraints] [ identifier[j] ]=[ identifier[data_io] . identifier[tokens2ids] ( identifier[phrase] , identifier[self] . identifier[vocab_target] ) keyword[for] identifier[phrase] keyword[in]
identifier[trans_input] . identifier[constraints] ]
identifier[images] = identifier[self] . identifier[data_loader] ( identifier[image_paths] )
identifier[images] = identifier[utils_image] . identifier[zero_pad_features] ( identifier[images] , identifier[self] . identifier[source_image_size] )
identifier[max_input_length] = literal[int]
identifier[max_output_lengths] =[ identifier[self] . identifier[models] [ literal[int] ]. identifier[get_max_output_length] ( identifier[max_input_length] )]* identifier[len] ( identifier[image_paths] )
keyword[return] identifier[mx] . identifier[nd] . identifier[array] ( identifier[images] ), identifier[max_input_length] , identifier[restrict_lexicon] , identifier[raw_constraints] , identifier[raw_avoid_list] , identifier[mx] . identifier[nd] . identifier[array] ( identifier[max_output_lengths] , identifier[ctx] = identifier[self] . identifier[context] , identifier[dtype] = literal[string] ) | def _get_inference_input(self, trans_inputs: List[TranslatorInput]) -> Tuple[mx.nd.NDArray, int, Optional[lexicon.TopKLexicon], List[Optional[constrained.RawConstraintList]], List[Optional[constrained.RawConstraintList]], mx.nd.NDArray]:
"""
Returns NDArray of images and corresponding bucket_key and an NDArray of maximum output lengths
for each sentence in the batch.
:param trans_inputs: List of TranslatorInputs. The path of the image/feature is in the token field.
:param constraints: Optional list of constraints.
:return: NDArray of images paths, bucket key, a list of raw constraint lists,
an NDArray of maximum output lengths.
"""
batch_size = len(trans_inputs)
image_paths = [None for _ in range(batch_size)] # type: List[Optional[str]]
restrict_lexicon = None # type: Optional[lexicon.TopKLexicon]
raw_constraints = [None for _ in range(batch_size)] # type: List[Optional[constrained.RawConstraintList]]
raw_avoid_list = [None for _ in range(batch_size)] # type: List[Optional[constrained.RawConstraintList]]
for (j, trans_input) in enumerate(trans_inputs):
# Join relative path with absolute
path = trans_input.tokens[0]
if self.source_root is not None:
path = os.path.join(self.source_root, path) # depends on [control=['if'], data=[]]
image_paths[j] = path
# Preprocess constraints
if trans_input.constraints is not None:
raw_constraints[j] = [data_io.tokens2ids(phrase, self.vocab_target) for phrase in trans_input.constraints] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# Read data and zero pad if necessary
images = self.data_loader(image_paths)
images = utils_image.zero_pad_features(images, self.source_image_size)
max_input_length = 0
max_output_lengths = [self.models[0].get_max_output_length(max_input_length)] * len(image_paths)
return (mx.nd.array(images), max_input_length, restrict_lexicon, raw_constraints, raw_avoid_list, mx.nd.array(max_output_lengths, ctx=self.context, dtype='int32')) |
def filter_significance(diff, significance):
"""
Prune any changes in the patch which are due to numeric changes less than this level of
significance.
"""
changed = diff['changed']
# remove individual field changes that are significant
reduced = [{'key': delta['key'],
'fields': {k: v
for k, v in delta['fields'].items()
if _is_significant(v, significance)}}
for delta in changed]
# call a key changed only if it still has significant changes
filtered = [delta for delta in reduced if delta['fields']]
diff = diff.copy()
diff['changed'] = filtered
return diff | def function[filter_significance, parameter[diff, significance]]:
constant[
Prune any changes in the patch which are due to numeric changes less than this level of
significance.
]
variable[changed] assign[=] call[name[diff]][constant[changed]]
variable[reduced] assign[=] <ast.ListComp object at 0x7da1b0efc3d0>
variable[filtered] assign[=] <ast.ListComp object at 0x7da1b0effcd0>
variable[diff] assign[=] call[name[diff].copy, parameter[]]
call[name[diff]][constant[changed]] assign[=] name[filtered]
return[name[diff]] | keyword[def] identifier[filter_significance] ( identifier[diff] , identifier[significance] ):
literal[string]
identifier[changed] = identifier[diff] [ literal[string] ]
identifier[reduced] =[{ literal[string] : identifier[delta] [ literal[string] ],
literal[string] :{ identifier[k] : identifier[v]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[delta] [ literal[string] ]. identifier[items] ()
keyword[if] identifier[_is_significant] ( identifier[v] , identifier[significance] )}}
keyword[for] identifier[delta] keyword[in] identifier[changed] ]
identifier[filtered] =[ identifier[delta] keyword[for] identifier[delta] keyword[in] identifier[reduced] keyword[if] identifier[delta] [ literal[string] ]]
identifier[diff] = identifier[diff] . identifier[copy] ()
identifier[diff] [ literal[string] ]= identifier[filtered]
keyword[return] identifier[diff] | def filter_significance(diff, significance):
"""
Prune any changes in the patch which are due to numeric changes less than this level of
significance.
"""
changed = diff['changed']
# remove individual field changes that are significant
reduced = [{'key': delta['key'], 'fields': {k: v for (k, v) in delta['fields'].items() if _is_significant(v, significance)}} for delta in changed]
# call a key changed only if it still has significant changes
filtered = [delta for delta in reduced if delta['fields']]
diff = diff.copy()
diff['changed'] = filtered
return diff |
def get_challenge_for_url(url):
""" Gets the challenge for the cached URL.
:param url: the URL the challenge is cached for.
:rtype: HttpBearerChallenge """
if not url:
raise ValueError('URL cannot be None')
url = parse.urlparse(url)
_lock.acquire()
val = _cache.get(url.netloc)
_lock.release()
return val | def function[get_challenge_for_url, parameter[url]]:
constant[ Gets the challenge for the cached URL.
:param url: the URL the challenge is cached for.
:rtype: HttpBearerChallenge ]
if <ast.UnaryOp object at 0x7da18c4cc910> begin[:]
<ast.Raise object at 0x7da18c4cd540>
variable[url] assign[=] call[name[parse].urlparse, parameter[name[url]]]
call[name[_lock].acquire, parameter[]]
variable[val] assign[=] call[name[_cache].get, parameter[name[url].netloc]]
call[name[_lock].release, parameter[]]
return[name[val]] | keyword[def] identifier[get_challenge_for_url] ( identifier[url] ):
literal[string]
keyword[if] keyword[not] identifier[url] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[url] = identifier[parse] . identifier[urlparse] ( identifier[url] )
identifier[_lock] . identifier[acquire] ()
identifier[val] = identifier[_cache] . identifier[get] ( identifier[url] . identifier[netloc] )
identifier[_lock] . identifier[release] ()
keyword[return] identifier[val] | def get_challenge_for_url(url):
""" Gets the challenge for the cached URL.
:param url: the URL the challenge is cached for.
:rtype: HttpBearerChallenge """
if not url:
raise ValueError('URL cannot be None') # depends on [control=['if'], data=[]]
url = parse.urlparse(url)
_lock.acquire()
val = _cache.get(url.netloc)
_lock.release()
return val |
def translate(root_list, use_bag_semantics=False):
"""
Translate a list of relational algebra trees into SQL statements.
:param root_list: a list of tree roots
:param use_bag_semantics: flag for using relational algebra bag semantics
:return: a list of SQL statements
"""
translator = (Translator() if use_bag_semantics else SetTranslator())
return [translator.translate(root).to_sql() for root in root_list] | def function[translate, parameter[root_list, use_bag_semantics]]:
constant[
Translate a list of relational algebra trees into SQL statements.
:param root_list: a list of tree roots
:param use_bag_semantics: flag for using relational algebra bag semantics
:return: a list of SQL statements
]
variable[translator] assign[=] <ast.IfExp object at 0x7da207f994b0>
return[<ast.ListComp object at 0x7da207f99540>] | keyword[def] identifier[translate] ( identifier[root_list] , identifier[use_bag_semantics] = keyword[False] ):
literal[string]
identifier[translator] =( identifier[Translator] () keyword[if] identifier[use_bag_semantics] keyword[else] identifier[SetTranslator] ())
keyword[return] [ identifier[translator] . identifier[translate] ( identifier[root] ). identifier[to_sql] () keyword[for] identifier[root] keyword[in] identifier[root_list] ] | def translate(root_list, use_bag_semantics=False):
"""
Translate a list of relational algebra trees into SQL statements.
:param root_list: a list of tree roots
:param use_bag_semantics: flag for using relational algebra bag semantics
:return: a list of SQL statements
"""
translator = Translator() if use_bag_semantics else SetTranslator()
return [translator.translate(root).to_sql() for root in root_list] |
def get_course_final_price(self, mode, currency='$', enterprise_catalog_uuid=None):
"""
Get course mode's SKU discounted price after applying any entitlement available for this user.
Returns:
str: Discounted price of the course mode.
"""
try:
price_details = self.client.baskets.calculate.get(
sku=[mode['sku']],
username=self.user.username,
catalog=enterprise_catalog_uuid,
)
except (SlumberBaseException, ConnectionError, Timeout) as exc:
LOGGER.exception('Failed to get price details for sku %s due to: %s', mode['sku'], str(exc))
price_details = {}
price = price_details.get('total_incl_tax', mode['min_price'])
if price != mode['min_price']:
return format_price(price, currency)
return mode['original_price'] | def function[get_course_final_price, parameter[self, mode, currency, enterprise_catalog_uuid]]:
constant[
Get course mode's SKU discounted price after applying any entitlement available for this user.
Returns:
str: Discounted price of the course mode.
]
<ast.Try object at 0x7da1b0125660>
variable[price] assign[=] call[name[price_details].get, parameter[constant[total_incl_tax], call[name[mode]][constant[min_price]]]]
if compare[name[price] not_equal[!=] call[name[mode]][constant[min_price]]] begin[:]
return[call[name[format_price], parameter[name[price], name[currency]]]]
return[call[name[mode]][constant[original_price]]] | keyword[def] identifier[get_course_final_price] ( identifier[self] , identifier[mode] , identifier[currency] = literal[string] , identifier[enterprise_catalog_uuid] = keyword[None] ):
literal[string]
keyword[try] :
identifier[price_details] = identifier[self] . identifier[client] . identifier[baskets] . identifier[calculate] . identifier[get] (
identifier[sku] =[ identifier[mode] [ literal[string] ]],
identifier[username] = identifier[self] . identifier[user] . identifier[username] ,
identifier[catalog] = identifier[enterprise_catalog_uuid] ,
)
keyword[except] ( identifier[SlumberBaseException] , identifier[ConnectionError] , identifier[Timeout] ) keyword[as] identifier[exc] :
identifier[LOGGER] . identifier[exception] ( literal[string] , identifier[mode] [ literal[string] ], identifier[str] ( identifier[exc] ))
identifier[price_details] ={}
identifier[price] = identifier[price_details] . identifier[get] ( literal[string] , identifier[mode] [ literal[string] ])
keyword[if] identifier[price] != identifier[mode] [ literal[string] ]:
keyword[return] identifier[format_price] ( identifier[price] , identifier[currency] )
keyword[return] identifier[mode] [ literal[string] ] | def get_course_final_price(self, mode, currency='$', enterprise_catalog_uuid=None):
"""
Get course mode's SKU discounted price after applying any entitlement available for this user.
Returns:
str: Discounted price of the course mode.
"""
try:
price_details = self.client.baskets.calculate.get(sku=[mode['sku']], username=self.user.username, catalog=enterprise_catalog_uuid) # depends on [control=['try'], data=[]]
except (SlumberBaseException, ConnectionError, Timeout) as exc:
LOGGER.exception('Failed to get price details for sku %s due to: %s', mode['sku'], str(exc))
price_details = {} # depends on [control=['except'], data=['exc']]
price = price_details.get('total_incl_tax', mode['min_price'])
if price != mode['min_price']:
return format_price(price, currency) # depends on [control=['if'], data=['price']]
return mode['original_price'] |
def reverse_complement(sequence, material):
'''Reverse complement a sequence.
:param sequence: Sequence to reverse complement
:type sequence: str
:param material: dna, rna, or peptide.
:type material: str
'''
code = dict(COMPLEMENTS[material])
reverse_sequence = sequence[::-1]
return ''.join([code[str(base)] for base in reverse_sequence]) | def function[reverse_complement, parameter[sequence, material]]:
constant[Reverse complement a sequence.
:param sequence: Sequence to reverse complement
:type sequence: str
:param material: dna, rna, or peptide.
:type material: str
]
variable[code] assign[=] call[name[dict], parameter[call[name[COMPLEMENTS]][name[material]]]]
variable[reverse_sequence] assign[=] call[name[sequence]][<ast.Slice object at 0x7da204620ca0>]
return[call[constant[].join, parameter[<ast.ListComp object at 0x7da204621cc0>]]] | keyword[def] identifier[reverse_complement] ( identifier[sequence] , identifier[material] ):
literal[string]
identifier[code] = identifier[dict] ( identifier[COMPLEMENTS] [ identifier[material] ])
identifier[reverse_sequence] = identifier[sequence] [::- literal[int] ]
keyword[return] literal[string] . identifier[join] ([ identifier[code] [ identifier[str] ( identifier[base] )] keyword[for] identifier[base] keyword[in] identifier[reverse_sequence] ]) | def reverse_complement(sequence, material):
"""Reverse complement a sequence.
:param sequence: Sequence to reverse complement
:type sequence: str
:param material: dna, rna, or peptide.
:type material: str
"""
code = dict(COMPLEMENTS[material])
reverse_sequence = sequence[::-1]
return ''.join([code[str(base)] for base in reverse_sequence]) |
def getmodule(object):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if isclass(object):
return sys.modules.get(object.__module__)
try:
file = getabsfile(object)
except TypeError:
return None
if modulesbyfile.has_key(file):
return sys.modules[modulesbyfile[file]]
for module in sys.modules.values():
if hasattr(module, '__file__'):
modulesbyfile[getabsfile(module)] = module.__name__
if modulesbyfile.has_key(file):
return sys.modules[modulesbyfile[file]]
main = sys.modules['__main__']
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
builtin = sys.modules['__builtin__']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin | def function[getmodule, parameter[object]]:
constant[Return the module an object was defined in, or None if not found.]
if call[name[ismodule], parameter[name[object]]] begin[:]
return[name[object]]
if call[name[isclass], parameter[name[object]]] begin[:]
return[call[name[sys].modules.get, parameter[name[object].__module__]]]
<ast.Try object at 0x7da1b088dd50>
if call[name[modulesbyfile].has_key, parameter[name[file]]] begin[:]
return[call[name[sys].modules][call[name[modulesbyfile]][name[file]]]]
for taget[name[module]] in starred[call[name[sys].modules.values, parameter[]]] begin[:]
if call[name[hasattr], parameter[name[module], constant[__file__]]] begin[:]
call[name[modulesbyfile]][call[name[getabsfile], parameter[name[module]]]] assign[=] name[module].__name__
if call[name[modulesbyfile].has_key, parameter[name[file]]] begin[:]
return[call[name[sys].modules][call[name[modulesbyfile]][name[file]]]]
variable[main] assign[=] call[name[sys].modules][constant[__main__]]
if call[name[hasattr], parameter[name[main], name[object].__name__]] begin[:]
variable[mainobject] assign[=] call[name[getattr], parameter[name[main], name[object].__name__]]
if compare[name[mainobject] is name[object]] begin[:]
return[name[main]]
variable[builtin] assign[=] call[name[sys].modules][constant[__builtin__]]
if call[name[hasattr], parameter[name[builtin], name[object].__name__]] begin[:]
variable[builtinobject] assign[=] call[name[getattr], parameter[name[builtin], name[object].__name__]]
if compare[name[builtinobject] is name[object]] begin[:]
return[name[builtin]] | keyword[def] identifier[getmodule] ( identifier[object] ):
literal[string]
keyword[if] identifier[ismodule] ( identifier[object] ):
keyword[return] identifier[object]
keyword[if] identifier[isclass] ( identifier[object] ):
keyword[return] identifier[sys] . identifier[modules] . identifier[get] ( identifier[object] . identifier[__module__] )
keyword[try] :
identifier[file] = identifier[getabsfile] ( identifier[object] )
keyword[except] identifier[TypeError] :
keyword[return] keyword[None]
keyword[if] identifier[modulesbyfile] . identifier[has_key] ( identifier[file] ):
keyword[return] identifier[sys] . identifier[modules] [ identifier[modulesbyfile] [ identifier[file] ]]
keyword[for] identifier[module] keyword[in] identifier[sys] . identifier[modules] . identifier[values] ():
keyword[if] identifier[hasattr] ( identifier[module] , literal[string] ):
identifier[modulesbyfile] [ identifier[getabsfile] ( identifier[module] )]= identifier[module] . identifier[__name__]
keyword[if] identifier[modulesbyfile] . identifier[has_key] ( identifier[file] ):
keyword[return] identifier[sys] . identifier[modules] [ identifier[modulesbyfile] [ identifier[file] ]]
identifier[main] = identifier[sys] . identifier[modules] [ literal[string] ]
keyword[if] identifier[hasattr] ( identifier[main] , identifier[object] . identifier[__name__] ):
identifier[mainobject] = identifier[getattr] ( identifier[main] , identifier[object] . identifier[__name__] )
keyword[if] identifier[mainobject] keyword[is] identifier[object] :
keyword[return] identifier[main]
identifier[builtin] = identifier[sys] . identifier[modules] [ literal[string] ]
keyword[if] identifier[hasattr] ( identifier[builtin] , identifier[object] . identifier[__name__] ):
identifier[builtinobject] = identifier[getattr] ( identifier[builtin] , identifier[object] . identifier[__name__] )
keyword[if] identifier[builtinobject] keyword[is] identifier[object] :
keyword[return] identifier[builtin] | def getmodule(object):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object # depends on [control=['if'], data=[]]
if isclass(object):
return sys.modules.get(object.__module__) # depends on [control=['if'], data=[]]
try:
file = getabsfile(object) # depends on [control=['try'], data=[]]
except TypeError:
return None # depends on [control=['except'], data=[]]
if modulesbyfile.has_key(file):
return sys.modules[modulesbyfile[file]] # depends on [control=['if'], data=[]]
for module in sys.modules.values():
if hasattr(module, '__file__'):
modulesbyfile[getabsfile(module)] = module.__name__ # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['module']]
if modulesbyfile.has_key(file):
return sys.modules[modulesbyfile[file]] # depends on [control=['if'], data=[]]
main = sys.modules['__main__']
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
builtin = sys.modules['__builtin__']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def _oneInteraction(self):
""" Coordinates one interaction between each agent and its environment.
"""
self.stepid += 1
logger.info("Entering simulation period %d." % self.stepid)
# Apply branches outages.
if self.branchOutages is not None:
self.doOutages()
# Initialise the market.
self.market.reset()
# Get an action from each agent and perform it.
for task, agent in zip(self.tasks, self.agents):
# if self.do_optimisation[agent]:
# raise Exception("When using a black-box learning algorithm, "
# "only full episodes can be done.")
# if not task.isFinished():
observation = task.getObservation()
agent.integrateObservation(observation)
action = agent.getAction()
task.performAction(action)
# Clear the market.
self.market.run()
# Reward each agent appropriately.
for task, agent in zip(self.tasks, self.agents):
# if not task.isFinished():
reward = task.getReward()
agent.giveReward(reward)
# Scale loads.
c = self._pcycle.next()
for bus in self.market.case.buses:
bus.p_demand = self.pdemand[bus] * c
logger.info("") | def function[_oneInteraction, parameter[self]]:
constant[ Coordinates one interaction between each agent and its environment.
]
<ast.AugAssign object at 0x7da1b255cdc0>
call[name[logger].info, parameter[binary_operation[constant[Entering simulation period %d.] <ast.Mod object at 0x7da2590d6920> name[self].stepid]]]
if compare[name[self].branchOutages is_not constant[None]] begin[:]
call[name[self].doOutages, parameter[]]
call[name[self].market.reset, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b255cbb0>, <ast.Name object at 0x7da1b255cbe0>]]] in starred[call[name[zip], parameter[name[self].tasks, name[self].agents]]] begin[:]
variable[observation] assign[=] call[name[task].getObservation, parameter[]]
call[name[agent].integrateObservation, parameter[name[observation]]]
variable[action] assign[=] call[name[agent].getAction, parameter[]]
call[name[task].performAction, parameter[name[action]]]
call[name[self].market.run, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b255d720>, <ast.Name object at 0x7da1b255c160>]]] in starred[call[name[zip], parameter[name[self].tasks, name[self].agents]]] begin[:]
variable[reward] assign[=] call[name[task].getReward, parameter[]]
call[name[agent].giveReward, parameter[name[reward]]]
variable[c] assign[=] call[name[self]._pcycle.next, parameter[]]
for taget[name[bus]] in starred[name[self].market.case.buses] begin[:]
name[bus].p_demand assign[=] binary_operation[call[name[self].pdemand][name[bus]] * name[c]]
call[name[logger].info, parameter[constant[]]] | keyword[def] identifier[_oneInteraction] ( identifier[self] ):
literal[string]
identifier[self] . identifier[stepid] += literal[int]
identifier[logger] . identifier[info] ( literal[string] % identifier[self] . identifier[stepid] )
keyword[if] identifier[self] . identifier[branchOutages] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[doOutages] ()
identifier[self] . identifier[market] . identifier[reset] ()
keyword[for] identifier[task] , identifier[agent] keyword[in] identifier[zip] ( identifier[self] . identifier[tasks] , identifier[self] . identifier[agents] ):
identifier[observation] = identifier[task] . identifier[getObservation] ()
identifier[agent] . identifier[integrateObservation] ( identifier[observation] )
identifier[action] = identifier[agent] . identifier[getAction] ()
identifier[task] . identifier[performAction] ( identifier[action] )
identifier[self] . identifier[market] . identifier[run] ()
keyword[for] identifier[task] , identifier[agent] keyword[in] identifier[zip] ( identifier[self] . identifier[tasks] , identifier[self] . identifier[agents] ):
identifier[reward] = identifier[task] . identifier[getReward] ()
identifier[agent] . identifier[giveReward] ( identifier[reward] )
identifier[c] = identifier[self] . identifier[_pcycle] . identifier[next] ()
keyword[for] identifier[bus] keyword[in] identifier[self] . identifier[market] . identifier[case] . identifier[buses] :
identifier[bus] . identifier[p_demand] = identifier[self] . identifier[pdemand] [ identifier[bus] ]* identifier[c]
identifier[logger] . identifier[info] ( literal[string] ) | def _oneInteraction(self):
""" Coordinates one interaction between each agent and its environment.
"""
self.stepid += 1
logger.info('Entering simulation period %d.' % self.stepid)
# Apply branches outages.
if self.branchOutages is not None:
self.doOutages() # depends on [control=['if'], data=[]]
# Initialise the market.
self.market.reset()
# Get an action from each agent and perform it.
for (task, agent) in zip(self.tasks, self.agents):
# if self.do_optimisation[agent]:
# raise Exception("When using a black-box learning algorithm, "
# "only full episodes can be done.")
# if not task.isFinished():
observation = task.getObservation()
agent.integrateObservation(observation)
action = agent.getAction()
task.performAction(action) # depends on [control=['for'], data=[]]
# Clear the market.
self.market.run()
# Reward each agent appropriately.
for (task, agent) in zip(self.tasks, self.agents):
# if not task.isFinished():
reward = task.getReward()
agent.giveReward(reward) # depends on [control=['for'], data=[]]
# Scale loads.
c = self._pcycle.next()
for bus in self.market.case.buses:
bus.p_demand = self.pdemand[bus] * c # depends on [control=['for'], data=['bus']]
logger.info('') |
def add_row(self, label, row_data, columns=""):
"""
Add a row with data.
If any new keys are present in row_data dictionary,
that column will be added to the dataframe.
This is done inplace
"""
# use provided column order, making sure you don't lose any values
# from self.df.columns
if len(columns):
if sorted(self.df.columns) == sorted(columns):
self.df.columns = columns
else:
new_columns = []
new_columns.extend(columns)
for col in self.df.columns:
if col not in new_columns:
new_columns.append(col)
# makes sure all columns have data or None
if sorted(row_data.keys()) != sorted(self.df.columns):
# add any new column names
for key in row_data:
if key not in self.df.columns:
self.df[key] = None
# add missing column names into row_data
for col_label in self.df.columns:
if col_label not in list(row_data.keys()):
row_data[col_label] = None
# (make sure you are working with strings)
self.df.index = self.df.index.astype(str)
label = str(label)
# create a new row with suffix "new"
# (this ensures that you get a unique, new row,
# instead of adding on to an existing row with the same label)
self.df.loc[label + "new"] = pd.Series(row_data)
# rename it to be correct
self.df.rename(index={label + "new": label}, inplace=True)
# use next line to sort index inplace
#self.df.sort_index(inplace=True)
return self.df | def function[add_row, parameter[self, label, row_data, columns]]:
constant[
Add a row with data.
If any new keys are present in row_data dictionary,
that column will be added to the dataframe.
This is done inplace
]
if call[name[len], parameter[name[columns]]] begin[:]
if compare[call[name[sorted], parameter[name[self].df.columns]] equal[==] call[name[sorted], parameter[name[columns]]]] begin[:]
name[self].df.columns assign[=] name[columns]
if compare[call[name[sorted], parameter[call[name[row_data].keys, parameter[]]]] not_equal[!=] call[name[sorted], parameter[name[self].df.columns]]] begin[:]
for taget[name[key]] in starred[name[row_data]] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[self].df.columns] begin[:]
call[name[self].df][name[key]] assign[=] constant[None]
for taget[name[col_label]] in starred[name[self].df.columns] begin[:]
if compare[name[col_label] <ast.NotIn object at 0x7da2590d7190> call[name[list], parameter[call[name[row_data].keys, parameter[]]]]] begin[:]
call[name[row_data]][name[col_label]] assign[=] constant[None]
name[self].df.index assign[=] call[name[self].df.index.astype, parameter[name[str]]]
variable[label] assign[=] call[name[str], parameter[name[label]]]
call[name[self].df.loc][binary_operation[name[label] + constant[new]]] assign[=] call[name[pd].Series, parameter[name[row_data]]]
call[name[self].df.rename, parameter[]]
return[name[self].df] | keyword[def] identifier[add_row] ( identifier[self] , identifier[label] , identifier[row_data] , identifier[columns] = literal[string] ):
literal[string]
keyword[if] identifier[len] ( identifier[columns] ):
keyword[if] identifier[sorted] ( identifier[self] . identifier[df] . identifier[columns] )== identifier[sorted] ( identifier[columns] ):
identifier[self] . identifier[df] . identifier[columns] = identifier[columns]
keyword[else] :
identifier[new_columns] =[]
identifier[new_columns] . identifier[extend] ( identifier[columns] )
keyword[for] identifier[col] keyword[in] identifier[self] . identifier[df] . identifier[columns] :
keyword[if] identifier[col] keyword[not] keyword[in] identifier[new_columns] :
identifier[new_columns] . identifier[append] ( identifier[col] )
keyword[if] identifier[sorted] ( identifier[row_data] . identifier[keys] ())!= identifier[sorted] ( identifier[self] . identifier[df] . identifier[columns] ):
keyword[for] identifier[key] keyword[in] identifier[row_data] :
keyword[if] identifier[key] keyword[not] keyword[in] identifier[self] . identifier[df] . identifier[columns] :
identifier[self] . identifier[df] [ identifier[key] ]= keyword[None]
keyword[for] identifier[col_label] keyword[in] identifier[self] . identifier[df] . identifier[columns] :
keyword[if] identifier[col_label] keyword[not] keyword[in] identifier[list] ( identifier[row_data] . identifier[keys] ()):
identifier[row_data] [ identifier[col_label] ]= keyword[None]
identifier[self] . identifier[df] . identifier[index] = identifier[self] . identifier[df] . identifier[index] . identifier[astype] ( identifier[str] )
identifier[label] = identifier[str] ( identifier[label] )
identifier[self] . identifier[df] . identifier[loc] [ identifier[label] + literal[string] ]= identifier[pd] . identifier[Series] ( identifier[row_data] )
identifier[self] . identifier[df] . identifier[rename] ( identifier[index] ={ identifier[label] + literal[string] : identifier[label] }, identifier[inplace] = keyword[True] )
keyword[return] identifier[self] . identifier[df] | def add_row(self, label, row_data, columns=''):
"""
Add a row with data.
If any new keys are present in row_data dictionary,
that column will be added to the dataframe.
This is done inplace
"""
# use provided column order, making sure you don't lose any values
# from self.df.columns
if len(columns):
if sorted(self.df.columns) == sorted(columns):
self.df.columns = columns # depends on [control=['if'], data=[]]
else:
new_columns = []
new_columns.extend(columns)
for col in self.df.columns:
if col not in new_columns:
new_columns.append(col) # depends on [control=['if'], data=['col', 'new_columns']] # depends on [control=['for'], data=['col']] # depends on [control=['if'], data=[]]
# makes sure all columns have data or None
if sorted(row_data.keys()) != sorted(self.df.columns):
# add any new column names
for key in row_data:
if key not in self.df.columns:
self.df[key] = None # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['key']]
# add missing column names into row_data
for col_label in self.df.columns:
if col_label not in list(row_data.keys()):
row_data[col_label] = None # depends on [control=['if'], data=['col_label']] # depends on [control=['for'], data=['col_label']] # depends on [control=['if'], data=[]]
# (make sure you are working with strings)
self.df.index = self.df.index.astype(str)
label = str(label)
# create a new row with suffix "new"
# (this ensures that you get a unique, new row,
# instead of adding on to an existing row with the same label)
self.df.loc[label + 'new'] = pd.Series(row_data)
# rename it to be correct
self.df.rename(index={label + 'new': label}, inplace=True)
# use next line to sort index inplace
#self.df.sort_index(inplace=True)
return self.df |
def update_stack(self, name, working_bucket, wait=False, update_only=False, disable_progress=False):
"""
Update or create the CF stack managed by Zappa.
"""
capabilities = []
template = name + '-template-' + str(int(time.time())) + '.json'
with open(template, 'wb') as out:
out.write(bytes(self.cf_template.to_json(indent=None, separators=(',',':')), "utf-8"))
self.upload_to_s3(template, working_bucket, disable_progress=disable_progress)
if self.boto_session.region_name == "us-gov-west-1":
url = 'https://s3-us-gov-west-1.amazonaws.com/{0}/{1}'.format(working_bucket, template)
else:
url = 'https://s3.amazonaws.com/{0}/{1}'.format(working_bucket, template)
tags = [{'Key': key, 'Value': self.tags[key]}
for key in self.tags.keys()
if key != 'ZappaProject']
tags.append({'Key':'ZappaProject','Value':name})
update = True
try:
self.cf_client.describe_stacks(StackName=name)
except botocore.client.ClientError:
update = False
if update_only and not update:
print('CloudFormation stack missing, re-deploy to enable updates')
return
if not update:
self.cf_client.create_stack(StackName=name,
Capabilities=capabilities,
TemplateURL=url,
Tags=tags)
print('Waiting for stack {0} to create (this can take a bit)..'.format(name))
else:
try:
self.cf_client.update_stack(StackName=name,
Capabilities=capabilities,
TemplateURL=url,
Tags=tags)
print('Waiting for stack {0} to update..'.format(name))
except botocore.client.ClientError as e:
if e.response['Error']['Message'] == 'No updates are to be performed.':
wait = False
else:
raise
if wait:
total_resources = len(self.cf_template.resources)
current_resources = 0
sr = self.cf_client.get_paginator('list_stack_resources')
progress = tqdm(total=total_resources, unit='res', disable=disable_progress)
while True:
time.sleep(3)
result = self.cf_client.describe_stacks(StackName=name)
if not result['Stacks']:
continue # might need to wait a bit
if result['Stacks'][0]['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']:
break
# Something has gone wrong.
# Is raising enough? Should we also remove the Lambda function?
if result['Stacks'][0]['StackStatus'] in [
'DELETE_COMPLETE',
'DELETE_IN_PROGRESS',
'ROLLBACK_IN_PROGRESS',
'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_ROLLBACK_COMPLETE'
]:
raise EnvironmentError("Stack creation failed. "
"Please check your CloudFormation console. "
"You may also need to `undeploy`.")
count = 0
for result in sr.paginate(StackName=name):
done = (1 for x in result['StackResourceSummaries']
if 'COMPLETE' in x['ResourceStatus'])
count += sum(done)
if count:
# We can end up in a situation where we have more resources being created
# than anticipated.
if (count - current_resources) > 0:
progress.update(count - current_resources)
current_resources = count
progress.close()
try:
os.remove(template)
except OSError:
pass
self.remove_from_s3(template, working_bucket) | def function[update_stack, parameter[self, name, working_bucket, wait, update_only, disable_progress]]:
constant[
Update or create the CF stack managed by Zappa.
]
variable[capabilities] assign[=] list[[]]
variable[template] assign[=] binary_operation[binary_operation[binary_operation[name[name] + constant[-template-]] + call[name[str], parameter[call[name[int], parameter[call[name[time].time, parameter[]]]]]]] + constant[.json]]
with call[name[open], parameter[name[template], constant[wb]]] begin[:]
call[name[out].write, parameter[call[name[bytes], parameter[call[name[self].cf_template.to_json, parameter[]], constant[utf-8]]]]]
call[name[self].upload_to_s3, parameter[name[template], name[working_bucket]]]
if compare[name[self].boto_session.region_name equal[==] constant[us-gov-west-1]] begin[:]
variable[url] assign[=] call[constant[https://s3-us-gov-west-1.amazonaws.com/{0}/{1}].format, parameter[name[working_bucket], name[template]]]
variable[tags] assign[=] <ast.ListComp object at 0x7da1b1f8d8d0>
call[name[tags].append, parameter[dictionary[[<ast.Constant object at 0x7da1b1f8feb0>, <ast.Constant object at 0x7da1b1f8fd30>], [<ast.Constant object at 0x7da1b1f8ca90>, <ast.Name object at 0x7da1b1f8efb0>]]]]
variable[update] assign[=] constant[True]
<ast.Try object at 0x7da1b1f8c580>
if <ast.BoolOp object at 0x7da1b1f8fb80> begin[:]
call[name[print], parameter[constant[CloudFormation stack missing, re-deploy to enable updates]]]
return[None]
if <ast.UnaryOp object at 0x7da1b1f8d630> begin[:]
call[name[self].cf_client.create_stack, parameter[]]
call[name[print], parameter[call[constant[Waiting for stack {0} to create (this can take a bit)..].format, parameter[name[name]]]]]
if name[wait] begin[:]
variable[total_resources] assign[=] call[name[len], parameter[name[self].cf_template.resources]]
variable[current_resources] assign[=] constant[0]
variable[sr] assign[=] call[name[self].cf_client.get_paginator, parameter[constant[list_stack_resources]]]
variable[progress] assign[=] call[name[tqdm], parameter[]]
while constant[True] begin[:]
call[name[time].sleep, parameter[constant[3]]]
variable[result] assign[=] call[name[self].cf_client.describe_stacks, parameter[]]
if <ast.UnaryOp object at 0x7da1b1f8c040> begin[:]
continue
if compare[call[call[call[name[result]][constant[Stacks]]][constant[0]]][constant[StackStatus]] in list[[<ast.Constant object at 0x7da1b1f8d990>, <ast.Constant object at 0x7da1b1f8e2c0>]]] begin[:]
break
if compare[call[call[call[name[result]][constant[Stacks]]][constant[0]]][constant[StackStatus]] in list[[<ast.Constant object at 0x7da1b1f8fbe0>, <ast.Constant object at 0x7da1b1f8e3e0>, <ast.Constant object at 0x7da1b1f8ff40>, <ast.Constant object at 0x7da1b1f8f310>, <ast.Constant object at 0x7da1b1f8e4a0>]]] begin[:]
<ast.Raise object at 0x7da1b1f8dff0>
variable[count] assign[=] constant[0]
for taget[name[result]] in starred[call[name[sr].paginate, parameter[]]] begin[:]
variable[done] assign[=] <ast.GeneratorExp object at 0x7da1b1f8f220>
<ast.AugAssign object at 0x7da1b1f8dea0>
if name[count] begin[:]
if compare[binary_operation[name[count] - name[current_resources]] greater[>] constant[0]] begin[:]
call[name[progress].update, parameter[binary_operation[name[count] - name[current_resources]]]]
variable[current_resources] assign[=] name[count]
call[name[progress].close, parameter[]]
<ast.Try object at 0x7da1b2034310>
call[name[self].remove_from_s3, parameter[name[template], name[working_bucket]]] | keyword[def] identifier[update_stack] ( identifier[self] , identifier[name] , identifier[working_bucket] , identifier[wait] = keyword[False] , identifier[update_only] = keyword[False] , identifier[disable_progress] = keyword[False] ):
literal[string]
identifier[capabilities] =[]
identifier[template] = identifier[name] + literal[string] + identifier[str] ( identifier[int] ( identifier[time] . identifier[time] ()))+ literal[string]
keyword[with] identifier[open] ( identifier[template] , literal[string] ) keyword[as] identifier[out] :
identifier[out] . identifier[write] ( identifier[bytes] ( identifier[self] . identifier[cf_template] . identifier[to_json] ( identifier[indent] = keyword[None] , identifier[separators] =( literal[string] , literal[string] )), literal[string] ))
identifier[self] . identifier[upload_to_s3] ( identifier[template] , identifier[working_bucket] , identifier[disable_progress] = identifier[disable_progress] )
keyword[if] identifier[self] . identifier[boto_session] . identifier[region_name] == literal[string] :
identifier[url] = literal[string] . identifier[format] ( identifier[working_bucket] , identifier[template] )
keyword[else] :
identifier[url] = literal[string] . identifier[format] ( identifier[working_bucket] , identifier[template] )
identifier[tags] =[{ literal[string] : identifier[key] , literal[string] : identifier[self] . identifier[tags] [ identifier[key] ]}
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[tags] . identifier[keys] ()
keyword[if] identifier[key] != literal[string] ]
identifier[tags] . identifier[append] ({ literal[string] : literal[string] , literal[string] : identifier[name] })
identifier[update] = keyword[True]
keyword[try] :
identifier[self] . identifier[cf_client] . identifier[describe_stacks] ( identifier[StackName] = identifier[name] )
keyword[except] identifier[botocore] . identifier[client] . identifier[ClientError] :
identifier[update] = keyword[False]
keyword[if] identifier[update_only] keyword[and] keyword[not] identifier[update] :
identifier[print] ( literal[string] )
keyword[return]
keyword[if] keyword[not] identifier[update] :
identifier[self] . identifier[cf_client] . identifier[create_stack] ( identifier[StackName] = identifier[name] ,
identifier[Capabilities] = identifier[capabilities] ,
identifier[TemplateURL] = identifier[url] ,
identifier[Tags] = identifier[tags] )
identifier[print] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[else] :
keyword[try] :
identifier[self] . identifier[cf_client] . identifier[update_stack] ( identifier[StackName] = identifier[name] ,
identifier[Capabilities] = identifier[capabilities] ,
identifier[TemplateURL] = identifier[url] ,
identifier[Tags] = identifier[tags] )
identifier[print] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[except] identifier[botocore] . identifier[client] . identifier[ClientError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[response] [ literal[string] ][ literal[string] ]== literal[string] :
identifier[wait] = keyword[False]
keyword[else] :
keyword[raise]
keyword[if] identifier[wait] :
identifier[total_resources] = identifier[len] ( identifier[self] . identifier[cf_template] . identifier[resources] )
identifier[current_resources] = literal[int]
identifier[sr] = identifier[self] . identifier[cf_client] . identifier[get_paginator] ( literal[string] )
identifier[progress] = identifier[tqdm] ( identifier[total] = identifier[total_resources] , identifier[unit] = literal[string] , identifier[disable] = identifier[disable_progress] )
keyword[while] keyword[True] :
identifier[time] . identifier[sleep] ( literal[int] )
identifier[result] = identifier[self] . identifier[cf_client] . identifier[describe_stacks] ( identifier[StackName] = identifier[name] )
keyword[if] keyword[not] identifier[result] [ literal[string] ]:
keyword[continue]
keyword[if] identifier[result] [ literal[string] ][ literal[int] ][ literal[string] ] keyword[in] [ literal[string] , literal[string] ]:
keyword[break]
keyword[if] identifier[result] [ literal[string] ][ literal[int] ][ literal[string] ] keyword[in] [
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string]
]:
keyword[raise] identifier[EnvironmentError] ( literal[string]
literal[string]
literal[string] )
identifier[count] = literal[int]
keyword[for] identifier[result] keyword[in] identifier[sr] . identifier[paginate] ( identifier[StackName] = identifier[name] ):
identifier[done] =( literal[int] keyword[for] identifier[x] keyword[in] identifier[result] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[x] [ literal[string] ])
identifier[count] += identifier[sum] ( identifier[done] )
keyword[if] identifier[count] :
keyword[if] ( identifier[count] - identifier[current_resources] )> literal[int] :
identifier[progress] . identifier[update] ( identifier[count] - identifier[current_resources] )
identifier[current_resources] = identifier[count]
identifier[progress] . identifier[close] ()
keyword[try] :
identifier[os] . identifier[remove] ( identifier[template] )
keyword[except] identifier[OSError] :
keyword[pass]
identifier[self] . identifier[remove_from_s3] ( identifier[template] , identifier[working_bucket] ) | def update_stack(self, name, working_bucket, wait=False, update_only=False, disable_progress=False):
"""
Update or create the CF stack managed by Zappa.
"""
capabilities = []
template = name + '-template-' + str(int(time.time())) + '.json'
with open(template, 'wb') as out:
out.write(bytes(self.cf_template.to_json(indent=None, separators=(',', ':')), 'utf-8')) # depends on [control=['with'], data=['out']]
self.upload_to_s3(template, working_bucket, disable_progress=disable_progress)
if self.boto_session.region_name == 'us-gov-west-1':
url = 'https://s3-us-gov-west-1.amazonaws.com/{0}/{1}'.format(working_bucket, template) # depends on [control=['if'], data=[]]
else:
url = 'https://s3.amazonaws.com/{0}/{1}'.format(working_bucket, template)
tags = [{'Key': key, 'Value': self.tags[key]} for key in self.tags.keys() if key != 'ZappaProject']
tags.append({'Key': 'ZappaProject', 'Value': name})
update = True
try:
self.cf_client.describe_stacks(StackName=name) # depends on [control=['try'], data=[]]
except botocore.client.ClientError:
update = False # depends on [control=['except'], data=[]]
if update_only and (not update):
print('CloudFormation stack missing, re-deploy to enable updates')
return # depends on [control=['if'], data=[]]
if not update:
self.cf_client.create_stack(StackName=name, Capabilities=capabilities, TemplateURL=url, Tags=tags)
print('Waiting for stack {0} to create (this can take a bit)..'.format(name)) # depends on [control=['if'], data=[]]
else:
try:
self.cf_client.update_stack(StackName=name, Capabilities=capabilities, TemplateURL=url, Tags=tags)
print('Waiting for stack {0} to update..'.format(name)) # depends on [control=['try'], data=[]]
except botocore.client.ClientError as e:
if e.response['Error']['Message'] == 'No updates are to be performed.':
wait = False # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['e']]
if wait:
total_resources = len(self.cf_template.resources)
current_resources = 0
sr = self.cf_client.get_paginator('list_stack_resources')
progress = tqdm(total=total_resources, unit='res', disable=disable_progress)
while True:
time.sleep(3)
result = self.cf_client.describe_stacks(StackName=name)
if not result['Stacks']:
continue # might need to wait a bit # depends on [control=['if'], data=[]]
if result['Stacks'][0]['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']:
break # depends on [control=['if'], data=[]]
# Something has gone wrong.
# Is raising enough? Should we also remove the Lambda function?
if result['Stacks'][0]['StackStatus'] in ['DELETE_COMPLETE', 'DELETE_IN_PROGRESS', 'ROLLBACK_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE']:
raise EnvironmentError('Stack creation failed. Please check your CloudFormation console. You may also need to `undeploy`.') # depends on [control=['if'], data=[]]
count = 0
for result in sr.paginate(StackName=name):
done = (1 for x in result['StackResourceSummaries'] if 'COMPLETE' in x['ResourceStatus'])
count += sum(done) # depends on [control=['for'], data=['result']]
if count:
# We can end up in a situation where we have more resources being created
# than anticipated.
if count - current_resources > 0:
progress.update(count - current_resources) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
current_resources = count # depends on [control=['while'], data=[]]
progress.close() # depends on [control=['if'], data=[]]
try:
os.remove(template) # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]]
self.remove_from_s3(template, working_bucket) |
def main(command_line=True, **kwargs):
"""
NAME
iodp_srm_magic.py
DESCRIPTION
converts IODP LIMS and LORE SRM archive half sample format files to magic_measurements format files
SYNTAX
iodp_srm_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input .csv file, default is all in directory
-F FILE: specify output measurements file, default is magic_measurements.txt
-Fsp FILE: specify output er_specimens.txt file, default is er_specimens.txt
-Fsa FILE: specify output er_samples.txt file, default is er_samples.txt
-Fsi FILE: specify output er_sites.txt file, default is er_sites.txt
-A : don't average replicate measurements
INPUTS
IODP .csv file format exported from LIMS database
"""
#
# initialize defaults
version_num=pmag.get_version()
meas_file='magic_measurements.txt'
spec_file='er_specimens.txt'
samp_file='er_samples.txt'
site_file='er_sites.txt'
csv_file=''
ErSpecs,ErSamps,ErSites,ErLocs,ErCits=[],[],[],[],[]
MagRecs=[]
citation="This study"
dir_path,demag='.','NRM'
args=sys.argv
noave=0
depth_method='a'
# get command line args
if command_line:
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if '-ID' in args:
ind = args.index('-ID')
input_dir_path = args[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if "-A" in args: noave=1
if '-f' in args:
ind=args.index("-f")
csv_file=args[ind+1]
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if '-Fsp' in args:
ind=args.index("-Fsp")
spec_file = args[ind+1]
if '-Fsi' in args:
ind=args.index("-Fsi")
site_file=args[ind+1]
if '-Fsa' in args:
ind=args.index("-Fsa")
samp_file = args[ind+1]
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path # rename dir_path after input_dir_path is set
noave = kwargs.get('noave', 0) # default (0) is DO average
csv_file = kwargs.get('csv_file', '')
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
spec_file = kwargs.get('spec_file', 'er_specimens.txt')
samp_file = kwargs.get('samp_file', 'er_samples.txt')
site_file = kwargs.get('site_file', 'er_sites.txt')
# format variables
meas_file = os.path.join(output_dir_path, meas_file)
spec_file = os.path.join(output_dir_path, spec_file)
Specs,file_type = pmag.magic_read(spec_file)
samp_file = os.path.join(output_dir_path, samp_file)
ErSamps,file_type = pmag.magic_read(samp_file)
site_file = os.path.join(output_dir_path, site_file)
if csv_file=="":
filelist=os.listdir(input_dir_path) # read in list of files to import
else:
csv_file = os.path.join(input_dir_path, csv_file)
filelist=[csv_file]
# parsing the data
specimens,samples,sites=[],[],[]
MagRecs,SpecRecs,SampRecs,SiteRecs=[],[],[],[]
for samp in ErSamps:
if samp['er_sample_name'] not in samples:
samples.append(samp['er_sample_name'])
SampRecs.append(samp)
file_found = False
for f in filelist: # parse each file
if f[-3:].lower()=='csv':
file_found = True
print('processing: ',f)
full_file = os.path.join(input_dir_path, f)
with open(full_file, 'r') as fin:
file_input = fin.readlines()
keys=file_input[0].replace('\n','').split(',') # splits on underscores
if "Interval Top (cm) on SHLF" in keys:interval_key="Interval Top (cm) on SHLF"
if " Interval Bot (cm) on SECT" in keys:interval_key=" Interval Bot (cm) on SECT"
if "Offset (cm)" in keys: interval_key="Offset (cm)"
if "Top Depth (m)" in keys:depth_key="Top Depth (m)"
if "CSF-A Top (m)" in keys:depth_key="CSF-A Top (m)"
if "Depth CSF-A (m)" in keys:depth_key="Depth CSF-A (m)"
if "CSF-B Top (m)" in keys:
comp_depth_key="CSF-B Top (m)" # use this model if available
elif "Depth CSF-B (m)" in keys:
comp_depth_key="Depth CSF-B (m)"
else:
comp_depth_key=""
if "Demag level (mT)" in keys:demag_key="Demag level (mT)"
if "Demag Level (mT)" in keys: demag_key="Demag Level (mT)"
if "Inclination (Tray- and Bkgrd-Corrected) (deg)" in keys:inc_key="Inclination (Tray- and Bkgrd-Corrected) (deg)"
if "Inclination background + tray corrected (deg)" in keys:inc_key="Inclination background + tray corrected (deg)"
if "Inclination background + tray corrected (\xc2\xb0)" in keys:inc_key="Inclination background + tray corrected (\xc2\xb0)"
if "Inclination background & tray corrected (deg)" in keys:inc_key="Inclination background & tray corrected (deg)"
if "Declination (Tray- and Bkgrd-Corrected) (deg)" in keys:dec_key="Declination (Tray- and Bkgrd-Corrected) (deg)"
if "Declination background + tray corrected (deg)" in keys:dec_key="Declination background + tray corrected (deg)"
if "Declination background + tray corrected (\xc2\xb0)" in keys:dec_key="Declination background + tray corrected (\xc2\xb0)"
if "Declination background & tray corrected (deg)" in keys:dec_key="Declination background & tray corrected (deg)"
if "Intensity (Tray- and Bkgrd-Corrected) (A/m)" in keys:int_key="Intensity (Tray- and Bkgrd-Corrected) (A/m)"
if "Intensity background + tray corrected (A/m)" in keys:int_key="Intensity background + tray corrected (A/m)"
if "Intensity background & tray corrected (A/m)" in keys:int_key="Intensity background & tray corrected (A/m)"
if "Core Type" in keys:
core_type="Core Type"
else: core_type="Type"
if 'Run Number' in keys: run_number_key='Run Number'
if 'Test No.' in keys: run_number_key='Test No.'
if 'Test Changed On' in keys: date_key='Test Changed On'
if "Timestamp (UTC)" in keys: date_key="Timestamp (UTC)"
if "Section" in keys: sect_key="Section"
if "Sect" in keys: sect_key="Sect"
if 'Section Half' in keys: half_key='Section Half'
if "A/W" in keys: half_key="A/W"
if "Text ID" in keys: text_id="Text ID"
if "Text Id" in keys: text_id="Text Id"
for line in file_input[1:]:
InRec={}
test=0
recs=line.split(',')
for k in range(len(keys)):
if len(recs)==len(keys):
InRec[keys[k]]=line.split(',')[k]
if InRec['Exp']!="": test=1 # get rid of pesky blank lines
if test==1:
run_number=""
inst="IODP-SRM"
volume='15.59' # set default volume to this
MagRec,SpecRec,SampRec,SiteRec={},{},{},{}
expedition=InRec['Exp']
location=InRec['Site']+InRec['Hole']
# Maintain backward compatibility for the ever-changing LIMS format (Argh!)
while len(InRec['Core'])<3:
InRec['Core']='0'+InRec['Core']
if "Last Tray Measurment" in list(InRec.keys()) and "SHLF" not in InRec[text_id] or 'dscr' in csv_file : # assume discrete sample
specimen=expedition+'-'+location+'-'+InRec['Core']+InRec[core_type]+"-"+InRec[sect_key]+'-'+InRec[half_key]+'-'+str(InRec[interval_key])
else: # mark as continuous measurements
specimen=expedition+'-'+location+'-'+InRec['Core']+InRec[core_type]+"_"+InRec[sect_key]+InRec[half_key]+'-'+str(InRec[interval_key])
SpecRec['er_expedition_name']=expedition
SpecRec['er_location_name']=location
SpecRec['er_site_name']=specimen
SpecRec['er_citation_names']=citation
for key in list(SpecRec.keys()):SampRec[key]=SpecRec[key]
for key in list(SpecRec.keys()):SiteRec[key]=SpecRec[key]
SampRec['sample_azimuth']='0'
SampRec['sample_dip']='0'
SampRec['sample_core_depth']=InRec[depth_key]
if comp_depth_key!='':
SampRec['sample_composite_depth']=InRec[comp_depth_key]
if "SHLF" not in InRec[text_id]:
SampRec['magic_method_codes']='FS-C-DRILL-IODP:SP-SS-C:SO-V'
else:
SampRec['magic_method_codes']='FS-C-DRILL-IODP:SO-V'
SpecRec['er_specimen_name']=specimen
SpecRec['er_sample_name']=specimen
SampRec['er_sample_name']=specimen
SampRec['er_specimen_names']=specimen
SiteRec['er_specimen_names']=specimen
for key in list(SpecRec.keys()):MagRec[key]=SpecRec[key]
# set up measurement record - default is NRM
#MagRec['er_analyst_mail_names']=InRec['Test Entered By']
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]=0
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
MagRec["measurement_flag"]='g' # assume all data are "good"
MagRec["measurement_standard"]='u' # assume all data are "good"
SpecRec['er_specimen_alternatives']=InRec[text_id]
if 'Sample Area (cm?)' in list(InRec.keys()) and InRec['Sample Area (cm?)']!= "": volume=InRec['Sample Area (cm?)']
if InRec[run_number_key]!= "": run_number=InRec[run_number_key]
datestamp=InRec[date_key].split() # date time is second line of file
if '/' in datestamp[0]:
mmddyy=datestamp[0].split('/') # break into month day year
if len(mmddyy[0])==1: mmddyy[0]='0'+mmddyy[0] # make 2 characters
if len(mmddyy[1])==1: mmddyy[1]='0'+mmddyy[1] # make 2 characters
if len(datestamp[1])==1: datestamp[1]='0'+datestamp[1] # make 2 characters
date='20'+mmddyy[2]+':'+mmddyy[0]+":"+mmddyy[1] +':' +datestamp[1]+":00.00"
if '-' in datestamp[0]:
mmddyy=datestamp[0].split('-') # break into month day year
date=mmddyy[0]+':'+mmddyy[1]+":"+mmddyy[2] +':' +datestamp[1]+":00.00"
MagRec["measurement_date"]=date
MagRec["magic_method_codes"]='LT-NO'
if InRec[demag_key]!="0":
MagRec['magic_method_codes'] = 'LT-AF-Z'
inst=inst+':IODP-SRM-AF' # measured on shipboard in-line 2G AF
treatment_value=float(InRec[demag_key].strip('"'))*1e-3 # convert mT => T
MagRec["treatment_ac_field"]=treatment_value # AF demag in treat mT => T
if 'Treatment Type' in list(InRec.keys()) and InRec['Treatment Type']!="":
if 'Alternating Frequency' in InRec['Treatment Type']:
MagRec['magic_method_codes'] = 'LT-AF-Z'
inst=inst+':I`ODP-DTECH' # measured on shipboard Dtech D2000
treatment_value=float(InRec['Treatment Value'])*1e-3 # convert mT => T
MagRec["treatment_ac_field"]=treatment_value # AF demag in treat mT => T
elif 'Thermal' in InRec['Treatment Type']:
MagRec['magic_method_codes'] = 'LT-T-Z'
inst=inst+':IODP-TDS' # measured on shipboard Schonstedt thermal demagnetizer
treatment_value=float(InRec['Treatment Value'])+273 # convert C => K
MagRec["treatment_temp"]='%8.3e'%(treatment_value) #
MagRec["measurement_standard"]='u' # assume all data are "good"
vol=float(volume)*1e-6 # convert from cc to m^3
if run_number!="":
MagRec['external_database_ids']=run_number
MagRec['external_database_names']='LIMS'
else:
MagRec['external_database_ids']=""
MagRec['external_database_names']=''
MagRec['measurement_inc']=InRec[inc_key].strip('"')
MagRec['measurement_dec']=InRec[dec_key].strip('"')
intens= InRec[int_key].strip('"')
MagRec['measurement_magn_moment']='%8.3e'%(float(intens)*vol) # convert intensity from A/m to Am^2 using vol
MagRec['magic_instrument_codes']=inst
MagRec['measurement_number']='1'
MagRec['measurement_csd']=''
MagRec['measurement_positions']=''
MagRecs.append(MagRec)
if specimen not in specimens:
specimens.append(specimen)
SpecRecs.append(SpecRec)
if MagRec['er_sample_name'] not in samples:
samples.append(MagRec['er_sample_name'])
SampRecs.append(SampRec)
if MagRec['er_site_name'] not in sites:
sites.append(MagRec['er_site_name'])
SiteRecs.append(SiteRec)
#except:
# print 'Boo-boo somewhere - no idea where'
if not file_found:
print("No .csv files were found")
return False, "No .csv files were found"
if len(SpecRecs)>0:
print('spec_file', spec_file)
pmag.magic_write(spec_file,SpecRecs,'er_specimens')
#print 'specimens stored in ',spec_file
if len(SampRecs)>0:
SampOut,keys=pmag.fillkeys(SampRecs)
pmag.magic_write(samp_file,SampOut,'er_samples')
#print 'samples stored in ',samp_file
if len(SiteRecs)>0:
pmag.magic_write(site_file,SiteRecs,'er_sites')
#print 'sites stored in ',site_file
MagSort=pmag.sortbykeys(MagRecs,["er_specimen_name","treatment_ac_field"])
MagOuts=[]
for MagRec in MagSort:
MagRec["treatment_ac_field"]='%8.3e'%(MagRec['treatment_ac_field']) # convert to string
MagOuts.append(MagRec)
Fixed=pmag.measurements_methods(MagOuts,noave)
if pmag.magic_write(meas_file,Fixed,'magic_measurements'):
print('data stored in ',meas_file)
return True, meas_file
else:
print('no data found. bad magfile?')
return False, 'no data found. bad magfile?' | def function[main, parameter[command_line]]:
constant[
NAME
iodp_srm_magic.py
DESCRIPTION
converts IODP LIMS and LORE SRM archive half sample format files to magic_measurements format files
SYNTAX
iodp_srm_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input .csv file, default is all in directory
-F FILE: specify output measurements file, default is magic_measurements.txt
-Fsp FILE: specify output er_specimens.txt file, default is er_specimens.txt
-Fsa FILE: specify output er_samples.txt file, default is er_samples.txt
-Fsi FILE: specify output er_sites.txt file, default is er_sites.txt
-A : don't average replicate measurements
INPUTS
IODP .csv file format exported from LIMS database
]
variable[version_num] assign[=] call[name[pmag].get_version, parameter[]]
variable[meas_file] assign[=] constant[magic_measurements.txt]
variable[spec_file] assign[=] constant[er_specimens.txt]
variable[samp_file] assign[=] constant[er_samples.txt]
variable[site_file] assign[=] constant[er_sites.txt]
variable[csv_file] assign[=] constant[]
<ast.Tuple object at 0x7da1b040c310> assign[=] tuple[[<ast.List object at 0x7da1b040c1f0>, <ast.List object at 0x7da1b040c460>, <ast.List object at 0x7da1b040c100>, <ast.List object at 0x7da1b040c130>, <ast.List object at 0x7da1b040c160>]]
variable[MagRecs] assign[=] list[[]]
variable[citation] assign[=] constant[This study]
<ast.Tuple object at 0x7da1b040c610> assign[=] tuple[[<ast.Constant object at 0x7da1b040c730>, <ast.Constant object at 0x7da1b040d030>]]
variable[args] assign[=] name[sys].argv
variable[noave] assign[=] constant[0]
variable[depth_method] assign[=] constant[a]
if name[command_line] begin[:]
if compare[constant[-WD] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-WD]]]
variable[dir_path] assign[=] call[name[args]][binary_operation[name[ind] + constant[1]]]
if compare[constant[-ID] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-ID]]]
variable[input_dir_path] assign[=] call[name[args]][binary_operation[name[ind] + constant[1]]]
variable[output_dir_path] assign[=] name[dir_path]
if compare[constant[-h] in name[args]] begin[:]
call[name[print], parameter[name[main].__doc__]]
return[constant[False]]
if compare[constant[-A] in name[args]] begin[:]
variable[noave] assign[=] constant[1]
if compare[constant[-f] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-f]]]
variable[csv_file] assign[=] call[name[args]][binary_operation[name[ind] + constant[1]]]
if compare[constant[-F] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-F]]]
variable[meas_file] assign[=] call[name[args]][binary_operation[name[ind] + constant[1]]]
if compare[constant[-Fsp] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-Fsp]]]
variable[spec_file] assign[=] call[name[args]][binary_operation[name[ind] + constant[1]]]
if compare[constant[-Fsi] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-Fsi]]]
variable[site_file] assign[=] call[name[args]][binary_operation[name[ind] + constant[1]]]
if compare[constant[-Fsa] in name[args]] begin[:]
variable[ind] assign[=] call[name[args].index, parameter[constant[-Fsa]]]
variable[samp_file] assign[=] call[name[args]][binary_operation[name[ind] + constant[1]]]
if <ast.UnaryOp object at 0x7da1b04552a0> begin[:]
variable[dir_path] assign[=] call[name[kwargs].get, parameter[constant[dir_path], constant[.]]]
variable[input_dir_path] assign[=] call[name[kwargs].get, parameter[constant[input_dir_path], name[dir_path]]]
variable[output_dir_path] assign[=] name[dir_path]
variable[noave] assign[=] call[name[kwargs].get, parameter[constant[noave], constant[0]]]
variable[csv_file] assign[=] call[name[kwargs].get, parameter[constant[csv_file], constant[]]]
variable[meas_file] assign[=] call[name[kwargs].get, parameter[constant[meas_file], constant[magic_measurements.txt]]]
variable[spec_file] assign[=] call[name[kwargs].get, parameter[constant[spec_file], constant[er_specimens.txt]]]
variable[samp_file] assign[=] call[name[kwargs].get, parameter[constant[samp_file], constant[er_samples.txt]]]
variable[site_file] assign[=] call[name[kwargs].get, parameter[constant[site_file], constant[er_sites.txt]]]
variable[meas_file] assign[=] call[name[os].path.join, parameter[name[output_dir_path], name[meas_file]]]
variable[spec_file] assign[=] call[name[os].path.join, parameter[name[output_dir_path], name[spec_file]]]
<ast.Tuple object at 0x7da1b0454400> assign[=] call[name[pmag].magic_read, parameter[name[spec_file]]]
variable[samp_file] assign[=] call[name[os].path.join, parameter[name[output_dir_path], name[samp_file]]]
<ast.Tuple object at 0x7da1b0454100> assign[=] call[name[pmag].magic_read, parameter[name[samp_file]]]
variable[site_file] assign[=] call[name[os].path.join, parameter[name[output_dir_path], name[site_file]]]
if compare[name[csv_file] equal[==] constant[]] begin[:]
variable[filelist] assign[=] call[name[os].listdir, parameter[name[input_dir_path]]]
<ast.Tuple object at 0x7da1b0432b00> assign[=] tuple[[<ast.List object at 0x7da1b0432020>, <ast.List object at 0x7da1b0432c20>, <ast.List object at 0x7da1b0432590>]]
<ast.Tuple object at 0x7da1b04307f0> assign[=] tuple[[<ast.List object at 0x7da1b0430c70>, <ast.List object at 0x7da1b0432d40>, <ast.List object at 0x7da1b0430100>, <ast.List object at 0x7da1b0430760>]]
for taget[name[samp]] in starred[name[ErSamps]] begin[:]
if compare[call[name[samp]][constant[er_sample_name]] <ast.NotIn object at 0x7da2590d7190> name[samples]] begin[:]
call[name[samples].append, parameter[call[name[samp]][constant[er_sample_name]]]]
call[name[SampRecs].append, parameter[name[samp]]]
variable[file_found] assign[=] constant[False]
for taget[name[f]] in starred[name[filelist]] begin[:]
if compare[call[call[name[f]][<ast.Slice object at 0x7da1b0431ae0>].lower, parameter[]] equal[==] constant[csv]] begin[:]
variable[file_found] assign[=] constant[True]
call[name[print], parameter[constant[processing: ], name[f]]]
variable[full_file] assign[=] call[name[os].path.join, parameter[name[input_dir_path], name[f]]]
with call[name[open], parameter[name[full_file], constant[r]]] begin[:]
variable[file_input] assign[=] call[name[fin].readlines, parameter[]]
variable[keys] assign[=] call[call[call[name[file_input]][constant[0]].replace, parameter[constant[
], constant[]]].split, parameter[constant[,]]]
if compare[constant[Interval Top (cm) on SHLF] in name[keys]] begin[:]
variable[interval_key] assign[=] constant[Interval Top (cm) on SHLF]
if compare[constant[ Interval Bot (cm) on SECT] in name[keys]] begin[:]
variable[interval_key] assign[=] constant[ Interval Bot (cm) on SECT]
if compare[constant[Offset (cm)] in name[keys]] begin[:]
variable[interval_key] assign[=] constant[Offset (cm)]
if compare[constant[Top Depth (m)] in name[keys]] begin[:]
variable[depth_key] assign[=] constant[Top Depth (m)]
if compare[constant[CSF-A Top (m)] in name[keys]] begin[:]
variable[depth_key] assign[=] constant[CSF-A Top (m)]
if compare[constant[Depth CSF-A (m)] in name[keys]] begin[:]
variable[depth_key] assign[=] constant[Depth CSF-A (m)]
if compare[constant[CSF-B Top (m)] in name[keys]] begin[:]
variable[comp_depth_key] assign[=] constant[CSF-B Top (m)]
if compare[constant[Demag level (mT)] in name[keys]] begin[:]
variable[demag_key] assign[=] constant[Demag level (mT)]
if compare[constant[Demag Level (mT)] in name[keys]] begin[:]
variable[demag_key] assign[=] constant[Demag Level (mT)]
if compare[constant[Inclination (Tray- and Bkgrd-Corrected) (deg)] in name[keys]] begin[:]
variable[inc_key] assign[=] constant[Inclination (Tray- and Bkgrd-Corrected) (deg)]
if compare[constant[Inclination background + tray corrected (deg)] in name[keys]] begin[:]
variable[inc_key] assign[=] constant[Inclination background + tray corrected (deg)]
if compare[constant[Inclination background + tray corrected (°)] in name[keys]] begin[:]
variable[inc_key] assign[=] constant[Inclination background + tray corrected (°)]
if compare[constant[Inclination background & tray corrected (deg)] in name[keys]] begin[:]
variable[inc_key] assign[=] constant[Inclination background & tray corrected (deg)]
if compare[constant[Declination (Tray- and Bkgrd-Corrected) (deg)] in name[keys]] begin[:]
variable[dec_key] assign[=] constant[Declination (Tray- and Bkgrd-Corrected) (deg)]
if compare[constant[Declination background + tray corrected (deg)] in name[keys]] begin[:]
variable[dec_key] assign[=] constant[Declination background + tray corrected (deg)]
if compare[constant[Declination background + tray corrected (°)] in name[keys]] begin[:]
variable[dec_key] assign[=] constant[Declination background + tray corrected (°)]
if compare[constant[Declination background & tray corrected (deg)] in name[keys]] begin[:]
variable[dec_key] assign[=] constant[Declination background & tray corrected (deg)]
if compare[constant[Intensity (Tray- and Bkgrd-Corrected) (A/m)] in name[keys]] begin[:]
variable[int_key] assign[=] constant[Intensity (Tray- and Bkgrd-Corrected) (A/m)]
if compare[constant[Intensity background + tray corrected (A/m)] in name[keys]] begin[:]
variable[int_key] assign[=] constant[Intensity background + tray corrected (A/m)]
if compare[constant[Intensity background & tray corrected (A/m)] in name[keys]] begin[:]
variable[int_key] assign[=] constant[Intensity background & tray corrected (A/m)]
if compare[constant[Core Type] in name[keys]] begin[:]
variable[core_type] assign[=] constant[Core Type]
if compare[constant[Run Number] in name[keys]] begin[:]
variable[run_number_key] assign[=] constant[Run Number]
if compare[constant[Test No.] in name[keys]] begin[:]
variable[run_number_key] assign[=] constant[Test No.]
if compare[constant[Test Changed On] in name[keys]] begin[:]
variable[date_key] assign[=] constant[Test Changed On]
if compare[constant[Timestamp (UTC)] in name[keys]] begin[:]
variable[date_key] assign[=] constant[Timestamp (UTC)]
if compare[constant[Section] in name[keys]] begin[:]
variable[sect_key] assign[=] constant[Section]
if compare[constant[Sect] in name[keys]] begin[:]
variable[sect_key] assign[=] constant[Sect]
if compare[constant[Section Half] in name[keys]] begin[:]
variable[half_key] assign[=] constant[Section Half]
if compare[constant[A/W] in name[keys]] begin[:]
variable[half_key] assign[=] constant[A/W]
if compare[constant[Text ID] in name[keys]] begin[:]
variable[text_id] assign[=] constant[Text ID]
if compare[constant[Text Id] in name[keys]] begin[:]
variable[text_id] assign[=] constant[Text Id]
for taget[name[line]] in starred[call[name[file_input]][<ast.Slice object at 0x7da1b04adba0>]] begin[:]
variable[InRec] assign[=] dictionary[[], []]
variable[test] assign[=] constant[0]
variable[recs] assign[=] call[name[line].split, parameter[constant[,]]]
for taget[name[k]] in starred[call[name[range], parameter[call[name[len], parameter[name[keys]]]]]] begin[:]
if compare[call[name[len], parameter[name[recs]]] equal[==] call[name[len], parameter[name[keys]]]] begin[:]
call[name[InRec]][call[name[keys]][name[k]]] assign[=] call[call[name[line].split, parameter[constant[,]]]][name[k]]
if compare[call[name[InRec]][constant[Exp]] not_equal[!=] constant[]] begin[:]
variable[test] assign[=] constant[1]
if compare[name[test] equal[==] constant[1]] begin[:]
variable[run_number] assign[=] constant[]
variable[inst] assign[=] constant[IODP-SRM]
variable[volume] assign[=] constant[15.59]
<ast.Tuple object at 0x7da1b042f400> assign[=] tuple[[<ast.Dict object at 0x7da1b042fa60>, <ast.Dict object at 0x7da1b042faf0>, <ast.Dict object at 0x7da1b042fb20>, <ast.Dict object at 0x7da1b042fac0>]]
variable[expedition] assign[=] call[name[InRec]][constant[Exp]]
variable[location] assign[=] binary_operation[call[name[InRec]][constant[Site]] + call[name[InRec]][constant[Hole]]]
while compare[call[name[len], parameter[call[name[InRec]][constant[Core]]]] less[<] constant[3]] begin[:]
call[name[InRec]][constant[Core]] assign[=] binary_operation[constant[0] + call[name[InRec]][constant[Core]]]
if <ast.BoolOp object at 0x7da1b0449510> begin[:]
variable[specimen] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[expedition] + constant[-]] + name[location]] + constant[-]] + call[name[InRec]][constant[Core]]] + call[name[InRec]][name[core_type]]] + constant[-]] + call[name[InRec]][name[sect_key]]] + constant[-]] + call[name[InRec]][name[half_key]]] + constant[-]] + call[name[str], parameter[call[name[InRec]][name[interval_key]]]]]
call[name[SpecRec]][constant[er_expedition_name]] assign[=] name[expedition]
call[name[SpecRec]][constant[er_location_name]] assign[=] name[location]
call[name[SpecRec]][constant[er_site_name]] assign[=] name[specimen]
call[name[SpecRec]][constant[er_citation_names]] assign[=] name[citation]
for taget[name[key]] in starred[call[name[list], parameter[call[name[SpecRec].keys, parameter[]]]]] begin[:]
call[name[SampRec]][name[key]] assign[=] call[name[SpecRec]][name[key]]
for taget[name[key]] in starred[call[name[list], parameter[call[name[SpecRec].keys, parameter[]]]]] begin[:]
call[name[SiteRec]][name[key]] assign[=] call[name[SpecRec]][name[key]]
call[name[SampRec]][constant[sample_azimuth]] assign[=] constant[0]
call[name[SampRec]][constant[sample_dip]] assign[=] constant[0]
call[name[SampRec]][constant[sample_core_depth]] assign[=] call[name[InRec]][name[depth_key]]
if compare[name[comp_depth_key] not_equal[!=] constant[]] begin[:]
call[name[SampRec]][constant[sample_composite_depth]] assign[=] call[name[InRec]][name[comp_depth_key]]
if compare[constant[SHLF] <ast.NotIn object at 0x7da2590d7190> call[name[InRec]][name[text_id]]] begin[:]
call[name[SampRec]][constant[magic_method_codes]] assign[=] constant[FS-C-DRILL-IODP:SP-SS-C:SO-V]
call[name[SpecRec]][constant[er_specimen_name]] assign[=] name[specimen]
call[name[SpecRec]][constant[er_sample_name]] assign[=] name[specimen]
call[name[SampRec]][constant[er_sample_name]] assign[=] name[specimen]
call[name[SampRec]][constant[er_specimen_names]] assign[=] name[specimen]
call[name[SiteRec]][constant[er_specimen_names]] assign[=] name[specimen]
for taget[name[key]] in starred[call[name[list], parameter[call[name[SpecRec].keys, parameter[]]]]] begin[:]
call[name[MagRec]][name[key]] assign[=] call[name[SpecRec]][name[key]]
call[name[MagRec]][constant[magic_software_packages]] assign[=] name[version_num]
call[name[MagRec]][constant[treatment_temp]] assign[=] binary_operation[constant[%8.3e] <ast.Mod object at 0x7da2590d6920> constant[273]]
call[name[MagRec]][constant[measurement_temp]] assign[=] binary_operation[constant[%8.3e] <ast.Mod object at 0x7da2590d6920> constant[273]]
call[name[MagRec]][constant[treatment_ac_field]] assign[=] constant[0]
call[name[MagRec]][constant[treatment_dc_field]] assign[=] constant[0]
call[name[MagRec]][constant[treatment_dc_field_phi]] assign[=] constant[0]
call[name[MagRec]][constant[treatment_dc_field_theta]] assign[=] constant[0]
call[name[MagRec]][constant[measurement_flag]] assign[=] constant[g]
call[name[MagRec]][constant[measurement_standard]] assign[=] constant[u]
call[name[SpecRec]][constant[er_specimen_alternatives]] assign[=] call[name[InRec]][name[text_id]]
if <ast.BoolOp object at 0x7da1b04d44f0> begin[:]
variable[volume] assign[=] call[name[InRec]][constant[Sample Area (cm?)]]
if compare[call[name[InRec]][name[run_number_key]] not_equal[!=] constant[]] begin[:]
variable[run_number] assign[=] call[name[InRec]][name[run_number_key]]
variable[datestamp] assign[=] call[call[name[InRec]][name[date_key]].split, parameter[]]
if compare[constant[/] in call[name[datestamp]][constant[0]]] begin[:]
variable[mmddyy] assign[=] call[call[name[datestamp]][constant[0]].split, parameter[constant[/]]]
if compare[call[name[len], parameter[call[name[mmddyy]][constant[0]]]] equal[==] constant[1]] begin[:]
call[name[mmddyy]][constant[0]] assign[=] binary_operation[constant[0] + call[name[mmddyy]][constant[0]]]
if compare[call[name[len], parameter[call[name[mmddyy]][constant[1]]]] equal[==] constant[1]] begin[:]
call[name[mmddyy]][constant[1]] assign[=] binary_operation[constant[0] + call[name[mmddyy]][constant[1]]]
if compare[call[name[len], parameter[call[name[datestamp]][constant[1]]]] equal[==] constant[1]] begin[:]
call[name[datestamp]][constant[1]] assign[=] binary_operation[constant[0] + call[name[datestamp]][constant[1]]]
variable[date] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[20] + call[name[mmddyy]][constant[2]]] + constant[:]] + call[name[mmddyy]][constant[0]]] + constant[:]] + call[name[mmddyy]][constant[1]]] + constant[:]] + call[name[datestamp]][constant[1]]] + constant[:00.00]]
if compare[constant[-] in call[name[datestamp]][constant[0]]] begin[:]
variable[mmddyy] assign[=] call[call[name[datestamp]][constant[0]].split, parameter[constant[-]]]
variable[date] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[name[mmddyy]][constant[0]] + constant[:]] + call[name[mmddyy]][constant[1]]] + constant[:]] + call[name[mmddyy]][constant[2]]] + constant[:]] + call[name[datestamp]][constant[1]]] + constant[:00.00]]
call[name[MagRec]][constant[measurement_date]] assign[=] name[date]
call[name[MagRec]][constant[magic_method_codes]] assign[=] constant[LT-NO]
if compare[call[name[InRec]][name[demag_key]] not_equal[!=] constant[0]] begin[:]
call[name[MagRec]][constant[magic_method_codes]] assign[=] constant[LT-AF-Z]
variable[inst] assign[=] binary_operation[name[inst] + constant[:IODP-SRM-AF]]
variable[treatment_value] assign[=] binary_operation[call[name[float], parameter[call[call[name[InRec]][name[demag_key]].strip, parameter[constant["]]]]] * constant[0.001]]
call[name[MagRec]][constant[treatment_ac_field]] assign[=] name[treatment_value]
if <ast.BoolOp object at 0x7da1b0451de0> begin[:]
if compare[constant[Alternating Frequency] in call[name[InRec]][constant[Treatment Type]]] begin[:]
call[name[MagRec]][constant[magic_method_codes]] assign[=] constant[LT-AF-Z]
variable[inst] assign[=] binary_operation[name[inst] + constant[:I`ODP-DTECH]]
variable[treatment_value] assign[=] binary_operation[call[name[float], parameter[call[name[InRec]][constant[Treatment Value]]]] * constant[0.001]]
call[name[MagRec]][constant[treatment_ac_field]] assign[=] name[treatment_value]
call[name[MagRec]][constant[measurement_standard]] assign[=] constant[u]
variable[vol] assign[=] binary_operation[call[name[float], parameter[name[volume]]] * constant[1e-06]]
if compare[name[run_number] not_equal[!=] constant[]] begin[:]
call[name[MagRec]][constant[external_database_ids]] assign[=] name[run_number]
call[name[MagRec]][constant[external_database_names]] assign[=] constant[LIMS]
call[name[MagRec]][constant[measurement_inc]] assign[=] call[call[name[InRec]][name[inc_key]].strip, parameter[constant["]]]
call[name[MagRec]][constant[measurement_dec]] assign[=] call[call[name[InRec]][name[dec_key]].strip, parameter[constant["]]]
variable[intens] assign[=] call[call[name[InRec]][name[int_key]].strip, parameter[constant["]]]
call[name[MagRec]][constant[measurement_magn_moment]] assign[=] binary_operation[constant[%8.3e] <ast.Mod object at 0x7da2590d6920> binary_operation[call[name[float], parameter[name[intens]]] * name[vol]]]
call[name[MagRec]][constant[magic_instrument_codes]] assign[=] name[inst]
call[name[MagRec]][constant[measurement_number]] assign[=] constant[1]
call[name[MagRec]][constant[measurement_csd]] assign[=] constant[]
call[name[MagRec]][constant[measurement_positions]] assign[=] constant[]
call[name[MagRecs].append, parameter[name[MagRec]]]
if compare[name[specimen] <ast.NotIn object at 0x7da2590d7190> name[specimens]] begin[:]
call[name[specimens].append, parameter[name[specimen]]]
call[name[SpecRecs].append, parameter[name[SpecRec]]]
if compare[call[name[MagRec]][constant[er_sample_name]] <ast.NotIn object at 0x7da2590d7190> name[samples]] begin[:]
call[name[samples].append, parameter[call[name[MagRec]][constant[er_sample_name]]]]
call[name[SampRecs].append, parameter[name[SampRec]]]
if compare[call[name[MagRec]][constant[er_site_name]] <ast.NotIn object at 0x7da2590d7190> name[sites]] begin[:]
call[name[sites].append, parameter[call[name[MagRec]][constant[er_site_name]]]]
call[name[SiteRecs].append, parameter[name[SiteRec]]]
if <ast.UnaryOp object at 0x7da20c76cb50> begin[:]
call[name[print], parameter[constant[No .csv files were found]]]
return[tuple[[<ast.Constant object at 0x7da1b0479a50>, <ast.Constant object at 0x7da1b04790f0>]]]
if compare[call[name[len], parameter[name[SpecRecs]]] greater[>] constant[0]] begin[:]
call[name[print], parameter[constant[spec_file], name[spec_file]]]
call[name[pmag].magic_write, parameter[name[spec_file], name[SpecRecs], constant[er_specimens]]]
if compare[call[name[len], parameter[name[SampRecs]]] greater[>] constant[0]] begin[:]
<ast.Tuple object at 0x7da1b0479b10> assign[=] call[name[pmag].fillkeys, parameter[name[SampRecs]]]
call[name[pmag].magic_write, parameter[name[samp_file], name[SampOut], constant[er_samples]]]
if compare[call[name[len], parameter[name[SiteRecs]]] greater[>] constant[0]] begin[:]
call[name[pmag].magic_write, parameter[name[site_file], name[SiteRecs], constant[er_sites]]]
variable[MagSort] assign[=] call[name[pmag].sortbykeys, parameter[name[MagRecs], list[[<ast.Constant object at 0x7da1b0479c30>, <ast.Constant object at 0x7da1b047a920>]]]]
variable[MagOuts] assign[=] list[[]]
for taget[name[MagRec]] in starred[name[MagSort]] begin[:]
call[name[MagRec]][constant[treatment_ac_field]] assign[=] binary_operation[constant[%8.3e] <ast.Mod object at 0x7da2590d6920> call[name[MagRec]][constant[treatment_ac_field]]]
call[name[MagOuts].append, parameter[name[MagRec]]]
variable[Fixed] assign[=] call[name[pmag].measurements_methods, parameter[name[MagOuts], name[noave]]]
if call[name[pmag].magic_write, parameter[name[meas_file], name[Fixed], constant[magic_measurements]]] begin[:]
call[name[print], parameter[constant[data stored in ], name[meas_file]]]
return[tuple[[<ast.Constant object at 0x7da1b047ba00>, <ast.Name object at 0x7da1b047b400>]]] | keyword[def] identifier[main] ( identifier[command_line] = keyword[True] ,** identifier[kwargs] ):
literal[string]
identifier[version_num] = identifier[pmag] . identifier[get_version] ()
identifier[meas_file] = literal[string]
identifier[spec_file] = literal[string]
identifier[samp_file] = literal[string]
identifier[site_file] = literal[string]
identifier[csv_file] = literal[string]
identifier[ErSpecs] , identifier[ErSamps] , identifier[ErSites] , identifier[ErLocs] , identifier[ErCits] =[],[],[],[],[]
identifier[MagRecs] =[]
identifier[citation] = literal[string]
identifier[dir_path] , identifier[demag] = literal[string] , literal[string]
identifier[args] = identifier[sys] . identifier[argv]
identifier[noave] = literal[int]
identifier[depth_method] = literal[string]
keyword[if] identifier[command_line] :
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[dir_path] = identifier[args] [ identifier[ind] + literal[int] ]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[input_dir_path] = identifier[args] [ identifier[ind] + literal[int] ]
keyword[else] :
identifier[input_dir_path] = identifier[dir_path]
identifier[output_dir_path] = identifier[dir_path]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[print] ( identifier[main] . identifier[__doc__] )
keyword[return] keyword[False]
keyword[if] literal[string] keyword[in] identifier[args] : identifier[noave] = literal[int]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[csv_file] = identifier[args] [ identifier[ind] + literal[int] ]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[meas_file] = identifier[args] [ identifier[ind] + literal[int] ]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[spec_file] = identifier[args] [ identifier[ind] + literal[int] ]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[site_file] = identifier[args] [ identifier[ind] + literal[int] ]
keyword[if] literal[string] keyword[in] identifier[args] :
identifier[ind] = identifier[args] . identifier[index] ( literal[string] )
identifier[samp_file] = identifier[args] [ identifier[ind] + literal[int] ]
keyword[if] keyword[not] identifier[command_line] :
identifier[dir_path] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[input_dir_path] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[dir_path] )
identifier[output_dir_path] = identifier[dir_path]
identifier[noave] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )
identifier[csv_file] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[meas_file] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[spec_file] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[samp_file] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[site_file] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[meas_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[output_dir_path] , identifier[meas_file] )
identifier[spec_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[output_dir_path] , identifier[spec_file] )
identifier[Specs] , identifier[file_type] = identifier[pmag] . identifier[magic_read] ( identifier[spec_file] )
identifier[samp_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[output_dir_path] , identifier[samp_file] )
identifier[ErSamps] , identifier[file_type] = identifier[pmag] . identifier[magic_read] ( identifier[samp_file] )
identifier[site_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[output_dir_path] , identifier[site_file] )
keyword[if] identifier[csv_file] == literal[string] :
identifier[filelist] = identifier[os] . identifier[listdir] ( identifier[input_dir_path] )
keyword[else] :
identifier[csv_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[input_dir_path] , identifier[csv_file] )
identifier[filelist] =[ identifier[csv_file] ]
identifier[specimens] , identifier[samples] , identifier[sites] =[],[],[]
identifier[MagRecs] , identifier[SpecRecs] , identifier[SampRecs] , identifier[SiteRecs] =[],[],[],[]
keyword[for] identifier[samp] keyword[in] identifier[ErSamps] :
keyword[if] identifier[samp] [ literal[string] ] keyword[not] keyword[in] identifier[samples] :
identifier[samples] . identifier[append] ( identifier[samp] [ literal[string] ])
identifier[SampRecs] . identifier[append] ( identifier[samp] )
identifier[file_found] = keyword[False]
keyword[for] identifier[f] keyword[in] identifier[filelist] :
keyword[if] identifier[f] [- literal[int] :]. identifier[lower] ()== literal[string] :
identifier[file_found] = keyword[True]
identifier[print] ( literal[string] , identifier[f] )
identifier[full_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[input_dir_path] , identifier[f] )
keyword[with] identifier[open] ( identifier[full_file] , literal[string] ) keyword[as] identifier[fin] :
identifier[file_input] = identifier[fin] . identifier[readlines] ()
identifier[keys] = identifier[file_input] [ literal[int] ]. identifier[replace] ( literal[string] , literal[string] ). identifier[split] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[interval_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[interval_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[interval_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[depth_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[depth_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[depth_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] :
identifier[comp_depth_key] = literal[string]
keyword[elif] literal[string] keyword[in] identifier[keys] :
identifier[comp_depth_key] = literal[string]
keyword[else] :
identifier[comp_depth_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[demag_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[demag_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[inc_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[inc_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[inc_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[inc_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[dec_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[dec_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[dec_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[dec_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[int_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[int_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[int_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] :
identifier[core_type] = literal[string]
keyword[else] : identifier[core_type] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[run_number_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[run_number_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[date_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[date_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[sect_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[sect_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[half_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[half_key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[text_id] = literal[string]
keyword[if] literal[string] keyword[in] identifier[keys] : identifier[text_id] = literal[string]
keyword[for] identifier[line] keyword[in] identifier[file_input] [ literal[int] :]:
identifier[InRec] ={}
identifier[test] = literal[int]
identifier[recs] = identifier[line] . identifier[split] ( literal[string] )
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[len] ( identifier[keys] )):
keyword[if] identifier[len] ( identifier[recs] )== identifier[len] ( identifier[keys] ):
identifier[InRec] [ identifier[keys] [ identifier[k] ]]= identifier[line] . identifier[split] ( literal[string] )[ identifier[k] ]
keyword[if] identifier[InRec] [ literal[string] ]!= literal[string] : identifier[test] = literal[int]
keyword[if] identifier[test] == literal[int] :
identifier[run_number] = literal[string]
identifier[inst] = literal[string]
identifier[volume] = literal[string]
identifier[MagRec] , identifier[SpecRec] , identifier[SampRec] , identifier[SiteRec] ={},{},{},{}
identifier[expedition] = identifier[InRec] [ literal[string] ]
identifier[location] = identifier[InRec] [ literal[string] ]+ identifier[InRec] [ literal[string] ]
keyword[while] identifier[len] ( identifier[InRec] [ literal[string] ])< literal[int] :
identifier[InRec] [ literal[string] ]= literal[string] + identifier[InRec] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[list] ( identifier[InRec] . identifier[keys] ()) keyword[and] literal[string] keyword[not] keyword[in] identifier[InRec] [ identifier[text_id] ] keyword[or] literal[string] keyword[in] identifier[csv_file] :
identifier[specimen] = identifier[expedition] + literal[string] + identifier[location] + literal[string] + identifier[InRec] [ literal[string] ]+ identifier[InRec] [ identifier[core_type] ]+ literal[string] + identifier[InRec] [ identifier[sect_key] ]+ literal[string] + identifier[InRec] [ identifier[half_key] ]+ literal[string] + identifier[str] ( identifier[InRec] [ identifier[interval_key] ])
keyword[else] :
identifier[specimen] = identifier[expedition] + literal[string] + identifier[location] + literal[string] + identifier[InRec] [ literal[string] ]+ identifier[InRec] [ identifier[core_type] ]+ literal[string] + identifier[InRec] [ identifier[sect_key] ]+ identifier[InRec] [ identifier[half_key] ]+ literal[string] + identifier[str] ( identifier[InRec] [ identifier[interval_key] ])
identifier[SpecRec] [ literal[string] ]= identifier[expedition]
identifier[SpecRec] [ literal[string] ]= identifier[location]
identifier[SpecRec] [ literal[string] ]= identifier[specimen]
identifier[SpecRec] [ literal[string] ]= identifier[citation]
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[SpecRec] . identifier[keys] ()): identifier[SampRec] [ identifier[key] ]= identifier[SpecRec] [ identifier[key] ]
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[SpecRec] . identifier[keys] ()): identifier[SiteRec] [ identifier[key] ]= identifier[SpecRec] [ identifier[key] ]
identifier[SampRec] [ literal[string] ]= literal[string]
identifier[SampRec] [ literal[string] ]= literal[string]
identifier[SampRec] [ literal[string] ]= identifier[InRec] [ identifier[depth_key] ]
keyword[if] identifier[comp_depth_key] != literal[string] :
identifier[SampRec] [ literal[string] ]= identifier[InRec] [ identifier[comp_depth_key] ]
keyword[if] literal[string] keyword[not] keyword[in] identifier[InRec] [ identifier[text_id] ]:
identifier[SampRec] [ literal[string] ]= literal[string]
keyword[else] :
identifier[SampRec] [ literal[string] ]= literal[string]
identifier[SpecRec] [ literal[string] ]= identifier[specimen]
identifier[SpecRec] [ literal[string] ]= identifier[specimen]
identifier[SampRec] [ literal[string] ]= identifier[specimen]
identifier[SampRec] [ literal[string] ]= identifier[specimen]
identifier[SiteRec] [ literal[string] ]= identifier[specimen]
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[SpecRec] . identifier[keys] ()): identifier[MagRec] [ identifier[key] ]= identifier[SpecRec] [ identifier[key] ]
identifier[MagRec] [ literal[string] ]= identifier[version_num]
identifier[MagRec] [ literal[string] ]= literal[string] %( literal[int] )
identifier[MagRec] [ literal[string] ]= literal[string] %( literal[int] )
identifier[MagRec] [ literal[string] ]= literal[int]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[SpecRec] [ literal[string] ]= identifier[InRec] [ identifier[text_id] ]
keyword[if] literal[string] keyword[in] identifier[list] ( identifier[InRec] . identifier[keys] ()) keyword[and] identifier[InRec] [ literal[string] ]!= literal[string] : identifier[volume] = identifier[InRec] [ literal[string] ]
keyword[if] identifier[InRec] [ identifier[run_number_key] ]!= literal[string] : identifier[run_number] = identifier[InRec] [ identifier[run_number_key] ]
identifier[datestamp] = identifier[InRec] [ identifier[date_key] ]. identifier[split] ()
keyword[if] literal[string] keyword[in] identifier[datestamp] [ literal[int] ]:
identifier[mmddyy] = identifier[datestamp] [ literal[int] ]. identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[mmddyy] [ literal[int] ])== literal[int] : identifier[mmddyy] [ literal[int] ]= literal[string] + identifier[mmddyy] [ literal[int] ]
keyword[if] identifier[len] ( identifier[mmddyy] [ literal[int] ])== literal[int] : identifier[mmddyy] [ literal[int] ]= literal[string] + identifier[mmddyy] [ literal[int] ]
keyword[if] identifier[len] ( identifier[datestamp] [ literal[int] ])== literal[int] : identifier[datestamp] [ literal[int] ]= literal[string] + identifier[datestamp] [ literal[int] ]
identifier[date] = literal[string] + identifier[mmddyy] [ literal[int] ]+ literal[string] + identifier[mmddyy] [ literal[int] ]+ literal[string] + identifier[mmddyy] [ literal[int] ]+ literal[string] + identifier[datestamp] [ literal[int] ]+ literal[string]
keyword[if] literal[string] keyword[in] identifier[datestamp] [ literal[int] ]:
identifier[mmddyy] = identifier[datestamp] [ literal[int] ]. identifier[split] ( literal[string] )
identifier[date] = identifier[mmddyy] [ literal[int] ]+ literal[string] + identifier[mmddyy] [ literal[int] ]+ literal[string] + identifier[mmddyy] [ literal[int] ]+ literal[string] + identifier[datestamp] [ literal[int] ]+ literal[string]
identifier[MagRec] [ literal[string] ]= identifier[date]
identifier[MagRec] [ literal[string] ]= literal[string]
keyword[if] identifier[InRec] [ identifier[demag_key] ]!= literal[string] :
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[inst] = identifier[inst] + literal[string]
identifier[treatment_value] = identifier[float] ( identifier[InRec] [ identifier[demag_key] ]. identifier[strip] ( literal[string] ))* literal[int]
identifier[MagRec] [ literal[string] ]= identifier[treatment_value]
keyword[if] literal[string] keyword[in] identifier[list] ( identifier[InRec] . identifier[keys] ()) keyword[and] identifier[InRec] [ literal[string] ]!= literal[string] :
keyword[if] literal[string] keyword[in] identifier[InRec] [ literal[string] ]:
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[inst] = identifier[inst] + literal[string]
identifier[treatment_value] = identifier[float] ( identifier[InRec] [ literal[string] ])* literal[int]
identifier[MagRec] [ literal[string] ]= identifier[treatment_value]
keyword[elif] literal[string] keyword[in] identifier[InRec] [ literal[string] ]:
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[inst] = identifier[inst] + literal[string]
identifier[treatment_value] = identifier[float] ( identifier[InRec] [ literal[string] ])+ literal[int]
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[treatment_value] )
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[vol] = identifier[float] ( identifier[volume] )* literal[int]
keyword[if] identifier[run_number] != literal[string] :
identifier[MagRec] [ literal[string] ]= identifier[run_number]
identifier[MagRec] [ literal[string] ]= literal[string]
keyword[else] :
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= identifier[InRec] [ identifier[inc_key] ]. identifier[strip] ( literal[string] )
identifier[MagRec] [ literal[string] ]= identifier[InRec] [ identifier[dec_key] ]. identifier[strip] ( literal[string] )
identifier[intens] = identifier[InRec] [ identifier[int_key] ]. identifier[strip] ( literal[string] )
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[float] ( identifier[intens] )* identifier[vol] )
identifier[MagRec] [ literal[string] ]= identifier[inst]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRec] [ literal[string] ]= literal[string]
identifier[MagRecs] . identifier[append] ( identifier[MagRec] )
keyword[if] identifier[specimen] keyword[not] keyword[in] identifier[specimens] :
identifier[specimens] . identifier[append] ( identifier[specimen] )
identifier[SpecRecs] . identifier[append] ( identifier[SpecRec] )
keyword[if] identifier[MagRec] [ literal[string] ] keyword[not] keyword[in] identifier[samples] :
identifier[samples] . identifier[append] ( identifier[MagRec] [ literal[string] ])
identifier[SampRecs] . identifier[append] ( identifier[SampRec] )
keyword[if] identifier[MagRec] [ literal[string] ] keyword[not] keyword[in] identifier[sites] :
identifier[sites] . identifier[append] ( identifier[MagRec] [ literal[string] ])
identifier[SiteRecs] . identifier[append] ( identifier[SiteRec] )
keyword[if] keyword[not] identifier[file_found] :
identifier[print] ( literal[string] )
keyword[return] keyword[False] , literal[string]
keyword[if] identifier[len] ( identifier[SpecRecs] )> literal[int] :
identifier[print] ( literal[string] , identifier[spec_file] )
identifier[pmag] . identifier[magic_write] ( identifier[spec_file] , identifier[SpecRecs] , literal[string] )
keyword[if] identifier[len] ( identifier[SampRecs] )> literal[int] :
identifier[SampOut] , identifier[keys] = identifier[pmag] . identifier[fillkeys] ( identifier[SampRecs] )
identifier[pmag] . identifier[magic_write] ( identifier[samp_file] , identifier[SampOut] , literal[string] )
keyword[if] identifier[len] ( identifier[SiteRecs] )> literal[int] :
identifier[pmag] . identifier[magic_write] ( identifier[site_file] , identifier[SiteRecs] , literal[string] )
identifier[MagSort] = identifier[pmag] . identifier[sortbykeys] ( identifier[MagRecs] ,[ literal[string] , literal[string] ])
identifier[MagOuts] =[]
keyword[for] identifier[MagRec] keyword[in] identifier[MagSort] :
identifier[MagRec] [ literal[string] ]= literal[string] %( identifier[MagRec] [ literal[string] ])
identifier[MagOuts] . identifier[append] ( identifier[MagRec] )
identifier[Fixed] = identifier[pmag] . identifier[measurements_methods] ( identifier[MagOuts] , identifier[noave] )
keyword[if] identifier[pmag] . identifier[magic_write] ( identifier[meas_file] , identifier[Fixed] , literal[string] ):
identifier[print] ( literal[string] , identifier[meas_file] )
keyword[return] keyword[True] , identifier[meas_file]
keyword[else] :
identifier[print] ( literal[string] )
keyword[return] keyword[False] , literal[string] | def main(command_line=True, **kwargs):
"""
NAME
iodp_srm_magic.py
DESCRIPTION
converts IODP LIMS and LORE SRM archive half sample format files to magic_measurements format files
SYNTAX
iodp_srm_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input .csv file, default is all in directory
-F FILE: specify output measurements file, default is magic_measurements.txt
-Fsp FILE: specify output er_specimens.txt file, default is er_specimens.txt
-Fsa FILE: specify output er_samples.txt file, default is er_samples.txt
-Fsi FILE: specify output er_sites.txt file, default is er_sites.txt
-A : don't average replicate measurements
INPUTS
IODP .csv file format exported from LIMS database
"""
#
# initialize defaults
version_num = pmag.get_version()
meas_file = 'magic_measurements.txt'
spec_file = 'er_specimens.txt'
samp_file = 'er_samples.txt'
site_file = 'er_sites.txt'
csv_file = ''
(ErSpecs, ErSamps, ErSites, ErLocs, ErCits) = ([], [], [], [], [])
MagRecs = []
citation = 'This study'
(dir_path, demag) = ('.', 'NRM')
args = sys.argv
noave = 0
depth_method = 'a'
# get command line args
if command_line:
if '-WD' in args:
ind = args.index('-WD')
dir_path = args[ind + 1] # depends on [control=['if'], data=['args']]
if '-ID' in args:
ind = args.index('-ID')
input_dir_path = args[ind + 1] # depends on [control=['if'], data=['args']]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if '-h' in args:
print(main.__doc__)
return False # depends on [control=['if'], data=[]]
if '-A' in args:
noave = 1 # depends on [control=['if'], data=[]]
if '-f' in args:
ind = args.index('-f')
csv_file = args[ind + 1] # depends on [control=['if'], data=['args']]
if '-F' in args:
ind = args.index('-F')
meas_file = args[ind + 1] # depends on [control=['if'], data=['args']]
if '-Fsp' in args:
ind = args.index('-Fsp')
spec_file = args[ind + 1] # depends on [control=['if'], data=['args']]
if '-Fsi' in args:
ind = args.index('-Fsi')
site_file = args[ind + 1] # depends on [control=['if'], data=['args']]
if '-Fsa' in args:
ind = args.index('-Fsa')
samp_file = args[ind + 1] # depends on [control=['if'], data=['args']] # depends on [control=['if'], data=[]]
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path # rename dir_path after input_dir_path is set
noave = kwargs.get('noave', 0) # default (0) is DO average
csv_file = kwargs.get('csv_file', '')
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
spec_file = kwargs.get('spec_file', 'er_specimens.txt')
samp_file = kwargs.get('samp_file', 'er_samples.txt')
site_file = kwargs.get('site_file', 'er_sites.txt') # depends on [control=['if'], data=[]]
# format variables
meas_file = os.path.join(output_dir_path, meas_file)
spec_file = os.path.join(output_dir_path, spec_file)
(Specs, file_type) = pmag.magic_read(spec_file)
samp_file = os.path.join(output_dir_path, samp_file)
(ErSamps, file_type) = pmag.magic_read(samp_file)
site_file = os.path.join(output_dir_path, site_file)
if csv_file == '':
filelist = os.listdir(input_dir_path) # read in list of files to import # depends on [control=['if'], data=[]]
else:
csv_file = os.path.join(input_dir_path, csv_file)
filelist = [csv_file]
# parsing the data
(specimens, samples, sites) = ([], [], [])
(MagRecs, SpecRecs, SampRecs, SiteRecs) = ([], [], [], [])
for samp in ErSamps:
if samp['er_sample_name'] not in samples:
samples.append(samp['er_sample_name'])
SampRecs.append(samp) # depends on [control=['if'], data=['samples']] # depends on [control=['for'], data=['samp']]
file_found = False
for f in filelist: # parse each file
if f[-3:].lower() == 'csv':
file_found = True
print('processing: ', f)
full_file = os.path.join(input_dir_path, f)
with open(full_file, 'r') as fin:
file_input = fin.readlines() # depends on [control=['with'], data=['fin']]
keys = file_input[0].replace('\n', '').split(',') # splits on underscores
if 'Interval Top (cm) on SHLF' in keys:
interval_key = 'Interval Top (cm) on SHLF' # depends on [control=['if'], data=[]]
if ' Interval Bot (cm) on SECT' in keys:
interval_key = ' Interval Bot (cm) on SECT' # depends on [control=['if'], data=[]]
if 'Offset (cm)' in keys:
interval_key = 'Offset (cm)' # depends on [control=['if'], data=[]]
if 'Top Depth (m)' in keys:
depth_key = 'Top Depth (m)' # depends on [control=['if'], data=[]]
if 'CSF-A Top (m)' in keys:
depth_key = 'CSF-A Top (m)' # depends on [control=['if'], data=[]]
if 'Depth CSF-A (m)' in keys:
depth_key = 'Depth CSF-A (m)' # depends on [control=['if'], data=[]]
if 'CSF-B Top (m)' in keys:
comp_depth_key = 'CSF-B Top (m)' # use this model if available # depends on [control=['if'], data=[]]
elif 'Depth CSF-B (m)' in keys:
comp_depth_key = 'Depth CSF-B (m)' # depends on [control=['if'], data=[]]
else:
comp_depth_key = ''
if 'Demag level (mT)' in keys:
demag_key = 'Demag level (mT)' # depends on [control=['if'], data=[]]
if 'Demag Level (mT)' in keys:
demag_key = 'Demag Level (mT)' # depends on [control=['if'], data=[]]
if 'Inclination (Tray- and Bkgrd-Corrected) (deg)' in keys:
inc_key = 'Inclination (Tray- and Bkgrd-Corrected) (deg)' # depends on [control=['if'], data=[]]
if 'Inclination background + tray corrected (deg)' in keys:
inc_key = 'Inclination background + tray corrected (deg)' # depends on [control=['if'], data=[]]
if 'Inclination background + tray corrected (°)' in keys:
inc_key = 'Inclination background + tray corrected (°)' # depends on [control=['if'], data=[]]
if 'Inclination background & tray corrected (deg)' in keys:
inc_key = 'Inclination background & tray corrected (deg)' # depends on [control=['if'], data=[]]
if 'Declination (Tray- and Bkgrd-Corrected) (deg)' in keys:
dec_key = 'Declination (Tray- and Bkgrd-Corrected) (deg)' # depends on [control=['if'], data=[]]
if 'Declination background + tray corrected (deg)' in keys:
dec_key = 'Declination background + tray corrected (deg)' # depends on [control=['if'], data=[]]
if 'Declination background + tray corrected (°)' in keys:
dec_key = 'Declination background + tray corrected (°)' # depends on [control=['if'], data=[]]
if 'Declination background & tray corrected (deg)' in keys:
dec_key = 'Declination background & tray corrected (deg)' # depends on [control=['if'], data=[]]
if 'Intensity (Tray- and Bkgrd-Corrected) (A/m)' in keys:
int_key = 'Intensity (Tray- and Bkgrd-Corrected) (A/m)' # depends on [control=['if'], data=[]]
if 'Intensity background + tray corrected (A/m)' in keys:
int_key = 'Intensity background + tray corrected (A/m)' # depends on [control=['if'], data=[]]
if 'Intensity background & tray corrected (A/m)' in keys:
int_key = 'Intensity background & tray corrected (A/m)' # depends on [control=['if'], data=[]]
if 'Core Type' in keys:
core_type = 'Core Type' # depends on [control=['if'], data=[]]
else:
core_type = 'Type'
if 'Run Number' in keys:
run_number_key = 'Run Number' # depends on [control=['if'], data=[]]
if 'Test No.' in keys:
run_number_key = 'Test No.' # depends on [control=['if'], data=[]]
if 'Test Changed On' in keys:
date_key = 'Test Changed On' # depends on [control=['if'], data=[]]
if 'Timestamp (UTC)' in keys:
date_key = 'Timestamp (UTC)' # depends on [control=['if'], data=[]]
if 'Section' in keys:
sect_key = 'Section' # depends on [control=['if'], data=[]]
if 'Sect' in keys:
sect_key = 'Sect' # depends on [control=['if'], data=[]]
if 'Section Half' in keys:
half_key = 'Section Half' # depends on [control=['if'], data=[]]
if 'A/W' in keys:
half_key = 'A/W' # depends on [control=['if'], data=[]]
if 'Text ID' in keys:
text_id = 'Text ID' # depends on [control=['if'], data=[]]
if 'Text Id' in keys:
text_id = 'Text Id' # depends on [control=['if'], data=[]]
for line in file_input[1:]:
InRec = {}
test = 0
recs = line.split(',')
for k in range(len(keys)):
if len(recs) == len(keys):
InRec[keys[k]] = line.split(',')[k] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']]
if InRec['Exp'] != '':
test = 1 # get rid of pesky blank lines # depends on [control=['if'], data=[]]
if test == 1:
run_number = ''
inst = 'IODP-SRM'
volume = '15.59' # set default volume to this
(MagRec, SpecRec, SampRec, SiteRec) = ({}, {}, {}, {})
expedition = InRec['Exp']
location = InRec['Site'] + InRec['Hole']
# Maintain backward compatibility for the ever-changing LIMS format (Argh!)
while len(InRec['Core']) < 3:
InRec['Core'] = '0' + InRec['Core'] # depends on [control=['while'], data=[]]
if 'Last Tray Measurment' in list(InRec.keys()) and 'SHLF' not in InRec[text_id] or 'dscr' in csv_file: # assume discrete sample
specimen = expedition + '-' + location + '-' + InRec['Core'] + InRec[core_type] + '-' + InRec[sect_key] + '-' + InRec[half_key] + '-' + str(InRec[interval_key]) # depends on [control=['if'], data=[]]
else: # mark as continuous measurements
specimen = expedition + '-' + location + '-' + InRec['Core'] + InRec[core_type] + '_' + InRec[sect_key] + InRec[half_key] + '-' + str(InRec[interval_key])
SpecRec['er_expedition_name'] = expedition
SpecRec['er_location_name'] = location
SpecRec['er_site_name'] = specimen
SpecRec['er_citation_names'] = citation
for key in list(SpecRec.keys()):
SampRec[key] = SpecRec[key] # depends on [control=['for'], data=['key']]
for key in list(SpecRec.keys()):
SiteRec[key] = SpecRec[key] # depends on [control=['for'], data=['key']]
SampRec['sample_azimuth'] = '0'
SampRec['sample_dip'] = '0'
SampRec['sample_core_depth'] = InRec[depth_key]
if comp_depth_key != '':
SampRec['sample_composite_depth'] = InRec[comp_depth_key] # depends on [control=['if'], data=['comp_depth_key']]
if 'SHLF' not in InRec[text_id]:
SampRec['magic_method_codes'] = 'FS-C-DRILL-IODP:SP-SS-C:SO-V' # depends on [control=['if'], data=[]]
else:
SampRec['magic_method_codes'] = 'FS-C-DRILL-IODP:SO-V'
SpecRec['er_specimen_name'] = specimen
SpecRec['er_sample_name'] = specimen
SampRec['er_sample_name'] = specimen
SampRec['er_specimen_names'] = specimen
SiteRec['er_specimen_names'] = specimen
for key in list(SpecRec.keys()):
MagRec[key] = SpecRec[key] # depends on [control=['for'], data=['key']]
# set up measurement record - default is NRM
#MagRec['er_analyst_mail_names']=InRec['Test Entered By']
MagRec['magic_software_packages'] = version_num
MagRec['treatment_temp'] = '%8.3e' % 273 # room temp in kelvin
MagRec['measurement_temp'] = '%8.3e' % 273 # room temp in kelvin
MagRec['treatment_ac_field'] = 0
MagRec['treatment_dc_field'] = '0'
MagRec['treatment_dc_field_phi'] = '0'
MagRec['treatment_dc_field_theta'] = '0'
MagRec['measurement_flag'] = 'g' # assume all data are "good"
MagRec['measurement_standard'] = 'u' # assume all data are "good"
SpecRec['er_specimen_alternatives'] = InRec[text_id]
if 'Sample Area (cm?)' in list(InRec.keys()) and InRec['Sample Area (cm?)'] != '':
volume = InRec['Sample Area (cm?)'] # depends on [control=['if'], data=[]]
if InRec[run_number_key] != '':
run_number = InRec[run_number_key] # depends on [control=['if'], data=[]]
datestamp = InRec[date_key].split() # date time is second line of file
if '/' in datestamp[0]:
mmddyy = datestamp[0].split('/') # break into month day year
if len(mmddyy[0]) == 1:
mmddyy[0] = '0' + mmddyy[0] # make 2 characters # depends on [control=['if'], data=[]]
if len(mmddyy[1]) == 1:
mmddyy[1] = '0' + mmddyy[1] # make 2 characters # depends on [control=['if'], data=[]]
if len(datestamp[1]) == 1:
datestamp[1] = '0' + datestamp[1] # make 2 characters # depends on [control=['if'], data=[]]
date = '20' + mmddyy[2] + ':' + mmddyy[0] + ':' + mmddyy[1] + ':' + datestamp[1] + ':00.00' # depends on [control=['if'], data=[]]
if '-' in datestamp[0]:
mmddyy = datestamp[0].split('-') # break into month day year
date = mmddyy[0] + ':' + mmddyy[1] + ':' + mmddyy[2] + ':' + datestamp[1] + ':00.00' # depends on [control=['if'], data=[]]
MagRec['measurement_date'] = date
MagRec['magic_method_codes'] = 'LT-NO'
if InRec[demag_key] != '0':
MagRec['magic_method_codes'] = 'LT-AF-Z'
inst = inst + ':IODP-SRM-AF' # measured on shipboard in-line 2G AF
treatment_value = float(InRec[demag_key].strip('"')) * 0.001 # convert mT => T
MagRec['treatment_ac_field'] = treatment_value # AF demag in treat mT => T # depends on [control=['if'], data=[]]
if 'Treatment Type' in list(InRec.keys()) and InRec['Treatment Type'] != '':
if 'Alternating Frequency' in InRec['Treatment Type']:
MagRec['magic_method_codes'] = 'LT-AF-Z'
inst = inst + ':I`ODP-DTECH' # measured on shipboard Dtech D2000
treatment_value = float(InRec['Treatment Value']) * 0.001 # convert mT => T
MagRec['treatment_ac_field'] = treatment_value # AF demag in treat mT => T # depends on [control=['if'], data=[]]
elif 'Thermal' in InRec['Treatment Type']:
MagRec['magic_method_codes'] = 'LT-T-Z'
inst = inst + ':IODP-TDS' # measured on shipboard Schonstedt thermal demagnetizer
treatment_value = float(InRec['Treatment Value']) + 273 # convert C => K
MagRec['treatment_temp'] = '%8.3e' % treatment_value # # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
MagRec['measurement_standard'] = 'u' # assume all data are "good"
vol = float(volume) * 1e-06 # convert from cc to m^3
if run_number != '':
MagRec['external_database_ids'] = run_number
MagRec['external_database_names'] = 'LIMS' # depends on [control=['if'], data=['run_number']]
else:
MagRec['external_database_ids'] = ''
MagRec['external_database_names'] = ''
MagRec['measurement_inc'] = InRec[inc_key].strip('"')
MagRec['measurement_dec'] = InRec[dec_key].strip('"')
intens = InRec[int_key].strip('"')
MagRec['measurement_magn_moment'] = '%8.3e' % (float(intens) * vol) # convert intensity from A/m to Am^2 using vol
MagRec['magic_instrument_codes'] = inst
MagRec['measurement_number'] = '1'
MagRec['measurement_csd'] = ''
MagRec['measurement_positions'] = ''
MagRecs.append(MagRec)
if specimen not in specimens:
specimens.append(specimen)
SpecRecs.append(SpecRec) # depends on [control=['if'], data=['specimen', 'specimens']]
if MagRec['er_sample_name'] not in samples:
samples.append(MagRec['er_sample_name'])
SampRecs.append(SampRec) # depends on [control=['if'], data=['samples']]
if MagRec['er_site_name'] not in sites:
sites.append(MagRec['er_site_name'])
SiteRecs.append(SiteRec) # depends on [control=['if'], data=['sites']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
#except:
# print 'Boo-boo somewhere - no idea where'
if not file_found:
print('No .csv files were found')
return (False, 'No .csv files were found') # depends on [control=['if'], data=[]]
if len(SpecRecs) > 0:
print('spec_file', spec_file)
pmag.magic_write(spec_file, SpecRecs, 'er_specimens') # depends on [control=['if'], data=[]]
#print 'specimens stored in ',spec_file
if len(SampRecs) > 0:
(SampOut, keys) = pmag.fillkeys(SampRecs)
pmag.magic_write(samp_file, SampOut, 'er_samples') # depends on [control=['if'], data=[]]
#print 'samples stored in ',samp_file
if len(SiteRecs) > 0:
pmag.magic_write(site_file, SiteRecs, 'er_sites') # depends on [control=['if'], data=[]]
#print 'sites stored in ',site_file
MagSort = pmag.sortbykeys(MagRecs, ['er_specimen_name', 'treatment_ac_field'])
MagOuts = []
for MagRec in MagSort:
MagRec['treatment_ac_field'] = '%8.3e' % MagRec['treatment_ac_field'] # convert to string
MagOuts.append(MagRec) # depends on [control=['for'], data=['MagRec']]
Fixed = pmag.measurements_methods(MagOuts, noave)
if pmag.magic_write(meas_file, Fixed, 'magic_measurements'):
print('data stored in ', meas_file)
return (True, meas_file) # depends on [control=['if'], data=[]]
else:
print('no data found. bad magfile?')
return (False, 'no data found. bad magfile?') |
def delete_all_secrets(cls, user, client_id):
"""Delete all of the client's credentials"""
can_delete = yield cls(client_id=client_id).can_delete(user)
if not can_delete:
raise exceptions.Unauthorized('User may not delete {} secrets'
.format(client_id))
results = yield cls.view.get(key=client_id, include_docs=True)
if results['rows']:
db = cls.db_client()
docs = [{
'_rev': doc['doc']['_rev'],
'_id': doc['doc']['_id'],
'_deleted': True
} for doc in results['rows']]
yield db.save_docs(docs) | def function[delete_all_secrets, parameter[cls, user, client_id]]:
constant[Delete all of the client's credentials]
variable[can_delete] assign[=] <ast.Yield object at 0x7da2041d8a60>
if <ast.UnaryOp object at 0x7da2041d88e0> begin[:]
<ast.Raise object at 0x7da2041dad40>
variable[results] assign[=] <ast.Yield object at 0x7da2041db340>
if call[name[results]][constant[rows]] begin[:]
variable[db] assign[=] call[name[cls].db_client, parameter[]]
variable[docs] assign[=] <ast.ListComp object at 0x7da2041d81f0>
<ast.Yield object at 0x7da2041da0e0> | keyword[def] identifier[delete_all_secrets] ( identifier[cls] , identifier[user] , identifier[client_id] ):
literal[string]
identifier[can_delete] = keyword[yield] identifier[cls] ( identifier[client_id] = identifier[client_id] ). identifier[can_delete] ( identifier[user] )
keyword[if] keyword[not] identifier[can_delete] :
keyword[raise] identifier[exceptions] . identifier[Unauthorized] ( literal[string]
. identifier[format] ( identifier[client_id] ))
identifier[results] = keyword[yield] identifier[cls] . identifier[view] . identifier[get] ( identifier[key] = identifier[client_id] , identifier[include_docs] = keyword[True] )
keyword[if] identifier[results] [ literal[string] ]:
identifier[db] = identifier[cls] . identifier[db_client] ()
identifier[docs] =[{
literal[string] : identifier[doc] [ literal[string] ][ literal[string] ],
literal[string] : identifier[doc] [ literal[string] ][ literal[string] ],
literal[string] : keyword[True]
} keyword[for] identifier[doc] keyword[in] identifier[results] [ literal[string] ]]
keyword[yield] identifier[db] . identifier[save_docs] ( identifier[docs] ) | def delete_all_secrets(cls, user, client_id):
"""Delete all of the client's credentials"""
can_delete = (yield cls(client_id=client_id).can_delete(user))
if not can_delete:
raise exceptions.Unauthorized('User may not delete {} secrets'.format(client_id)) # depends on [control=['if'], data=[]]
results = (yield cls.view.get(key=client_id, include_docs=True))
if results['rows']:
db = cls.db_client()
docs = [{'_rev': doc['doc']['_rev'], '_id': doc['doc']['_id'], '_deleted': True} for doc in results['rows']]
yield db.save_docs(docs) # depends on [control=['if'], data=[]] |
def _get_a2_value(bbar, dbar, slip, beta, mmax):
"""
Returns the A2 value defined in II.8 (Table 3)
"""
return ((dbar - bbar) / bbar) * (slip / beta) *\
np.exp(-(dbar / 2.) * mmax) | def function[_get_a2_value, parameter[bbar, dbar, slip, beta, mmax]]:
constant[
Returns the A2 value defined in II.8 (Table 3)
]
return[binary_operation[binary_operation[binary_operation[binary_operation[name[dbar] - name[bbar]] / name[bbar]] * binary_operation[name[slip] / name[beta]]] * call[name[np].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da18bcc9ed0> * name[mmax]]]]]] | keyword[def] identifier[_get_a2_value] ( identifier[bbar] , identifier[dbar] , identifier[slip] , identifier[beta] , identifier[mmax] ):
literal[string]
keyword[return] (( identifier[dbar] - identifier[bbar] )/ identifier[bbar] )*( identifier[slip] / identifier[beta] )* identifier[np] . identifier[exp] (-( identifier[dbar] / literal[int] )* identifier[mmax] ) | def _get_a2_value(bbar, dbar, slip, beta, mmax):
"""
Returns the A2 value defined in II.8 (Table 3)
"""
return (dbar - bbar) / bbar * (slip / beta) * np.exp(-(dbar / 2.0) * mmax) |
def support_autoupload_param_username(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
support = ET.SubElement(config, "support", xmlns="urn:brocade.com:mgmt:brocade-ras")
autoupload_param = ET.SubElement(support, "autoupload-param")
username = ET.SubElement(autoupload_param, "username")
username.text = kwargs.pop('username')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[support_autoupload_param_username, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[support] assign[=] call[name[ET].SubElement, parameter[name[config], constant[support]]]
variable[autoupload_param] assign[=] call[name[ET].SubElement, parameter[name[support], constant[autoupload-param]]]
variable[username] assign[=] call[name[ET].SubElement, parameter[name[autoupload_param], constant[username]]]
name[username].text assign[=] call[name[kwargs].pop, parameter[constant[username]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[support_autoupload_param_username] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[support] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[autoupload_param] = identifier[ET] . identifier[SubElement] ( identifier[support] , literal[string] )
identifier[username] = identifier[ET] . identifier[SubElement] ( identifier[autoupload_param] , literal[string] )
identifier[username] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def support_autoupload_param_username(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
support = ET.SubElement(config, 'support', xmlns='urn:brocade.com:mgmt:brocade-ras')
autoupload_param = ET.SubElement(support, 'autoupload-param')
username = ET.SubElement(autoupload_param, 'username')
username.text = kwargs.pop('username')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def uniform(self, a: float, b: float, precision: int = 15) -> float:
"""Get a random number in the range [a, b) or [a, b] depending on rounding.
:param a: Minimum value.
:param b: Maximum value.
:param precision: Round a number to a given
precision in decimal digits, default is 15.
"""
return round(a + (b - a) * self.random(), precision) | def function[uniform, parameter[self, a, b, precision]]:
constant[Get a random number in the range [a, b) or [a, b] depending on rounding.
:param a: Minimum value.
:param b: Maximum value.
:param precision: Round a number to a given
precision in decimal digits, default is 15.
]
return[call[name[round], parameter[binary_operation[name[a] + binary_operation[binary_operation[name[b] - name[a]] * call[name[self].random, parameter[]]]], name[precision]]]] | keyword[def] identifier[uniform] ( identifier[self] , identifier[a] : identifier[float] , identifier[b] : identifier[float] , identifier[precision] : identifier[int] = literal[int] )-> identifier[float] :
literal[string]
keyword[return] identifier[round] ( identifier[a] +( identifier[b] - identifier[a] )* identifier[self] . identifier[random] (), identifier[precision] ) | def uniform(self, a: float, b: float, precision: int=15) -> float:
"""Get a random number in the range [a, b) or [a, b] depending on rounding.
:param a: Minimum value.
:param b: Maximum value.
:param precision: Round a number to a given
precision in decimal digits, default is 15.
"""
return round(a + (b - a) * self.random(), precision) |
def _recurse(self, matrix, m_list, indices, output_m_list=[]):
"""
This method recursively finds the minimal permutations using a binary
tree search strategy.
Args:
matrix: The current matrix (with some permutations already
performed).
m_list: The list of permutations still to be performed
indices: Set of indices which haven't had a permutation
performed on them.
"""
# check to see if we've found all the solutions that we need
if self._finished:
return
# if we're done with the current manipulation, pop it off.
while m_list[-1][1] == 0:
m_list = copy(m_list)
m_list.pop()
# if there are no more manipulations left to do check the value
if not m_list:
matrix_sum = np.sum(matrix)
if matrix_sum < self._current_minimum:
self.add_m_list(matrix_sum, output_m_list)
return
# if we wont have enough indices left, return
if m_list[-1][1] > len(indices.intersection(m_list[-1][2])):
return
if len(m_list) == 1 or m_list[-1][1] > 1:
if self.best_case(matrix, m_list, indices) > self._current_minimum:
return
index = self.get_next_index(matrix, m_list[-1], indices)
m_list[-1][2].remove(index)
# Make the matrix and new m_list where we do the manipulation to the
# index that we just got
matrix2 = np.copy(matrix)
m_list2 = deepcopy(m_list)
output_m_list2 = copy(output_m_list)
matrix2[index, :] *= m_list[-1][0]
matrix2[:, index] *= m_list[-1][0]
output_m_list2.append([index, m_list[-1][3]])
indices2 = copy(indices)
indices2.remove(index)
m_list2[-1][1] -= 1
# recurse through both the modified and unmodified matrices
self._recurse(matrix2, m_list2, indices2, output_m_list2)
self._recurse(matrix, m_list, indices, output_m_list) | def function[_recurse, parameter[self, matrix, m_list, indices, output_m_list]]:
constant[
This method recursively finds the minimal permutations using a binary
tree search strategy.
Args:
matrix: The current matrix (with some permutations already
performed).
m_list: The list of permutations still to be performed
indices: Set of indices which haven't had a permutation
performed on them.
]
if name[self]._finished begin[:]
return[None]
while compare[call[call[name[m_list]][<ast.UnaryOp object at 0x7da20c7cb8b0>]][constant[1]] equal[==] constant[0]] begin[:]
variable[m_list] assign[=] call[name[copy], parameter[name[m_list]]]
call[name[m_list].pop, parameter[]]
if <ast.UnaryOp object at 0x7da20c7caa10> begin[:]
variable[matrix_sum] assign[=] call[name[np].sum, parameter[name[matrix]]]
if compare[name[matrix_sum] less[<] name[self]._current_minimum] begin[:]
call[name[self].add_m_list, parameter[name[matrix_sum], name[output_m_list]]]
return[None]
if compare[call[call[name[m_list]][<ast.UnaryOp object at 0x7da20c7cb400>]][constant[1]] greater[>] call[name[len], parameter[call[name[indices].intersection, parameter[call[call[name[m_list]][<ast.UnaryOp object at 0x7da20c7c8970>]][constant[2]]]]]]] begin[:]
return[None]
if <ast.BoolOp object at 0x7da20c7c97e0> begin[:]
if compare[call[name[self].best_case, parameter[name[matrix], name[m_list], name[indices]]] greater[>] name[self]._current_minimum] begin[:]
return[None]
variable[index] assign[=] call[name[self].get_next_index, parameter[name[matrix], call[name[m_list]][<ast.UnaryOp object at 0x7da20c7cbee0>], name[indices]]]
call[call[call[name[m_list]][<ast.UnaryOp object at 0x7da20c7cb6d0>]][constant[2]].remove, parameter[name[index]]]
variable[matrix2] assign[=] call[name[np].copy, parameter[name[matrix]]]
variable[m_list2] assign[=] call[name[deepcopy], parameter[name[m_list]]]
variable[output_m_list2] assign[=] call[name[copy], parameter[name[output_m_list]]]
<ast.AugAssign object at 0x7da20c7cb220>
<ast.AugAssign object at 0x7da20c7cb5e0>
call[name[output_m_list2].append, parameter[list[[<ast.Name object at 0x7da20c7c9240>, <ast.Subscript object at 0x7da20c7c9d20>]]]]
variable[indices2] assign[=] call[name[copy], parameter[name[indices]]]
call[name[indices2].remove, parameter[name[index]]]
<ast.AugAssign object at 0x7da20c7c85b0>
call[name[self]._recurse, parameter[name[matrix2], name[m_list2], name[indices2], name[output_m_list2]]]
call[name[self]._recurse, parameter[name[matrix], name[m_list], name[indices], name[output_m_list]]] | keyword[def] identifier[_recurse] ( identifier[self] , identifier[matrix] , identifier[m_list] , identifier[indices] , identifier[output_m_list] =[]):
literal[string]
keyword[if] identifier[self] . identifier[_finished] :
keyword[return]
keyword[while] identifier[m_list] [- literal[int] ][ literal[int] ]== literal[int] :
identifier[m_list] = identifier[copy] ( identifier[m_list] )
identifier[m_list] . identifier[pop] ()
keyword[if] keyword[not] identifier[m_list] :
identifier[matrix_sum] = identifier[np] . identifier[sum] ( identifier[matrix] )
keyword[if] identifier[matrix_sum] < identifier[self] . identifier[_current_minimum] :
identifier[self] . identifier[add_m_list] ( identifier[matrix_sum] , identifier[output_m_list] )
keyword[return]
keyword[if] identifier[m_list] [- literal[int] ][ literal[int] ]> identifier[len] ( identifier[indices] . identifier[intersection] ( identifier[m_list] [- literal[int] ][ literal[int] ])):
keyword[return]
keyword[if] identifier[len] ( identifier[m_list] )== literal[int] keyword[or] identifier[m_list] [- literal[int] ][ literal[int] ]> literal[int] :
keyword[if] identifier[self] . identifier[best_case] ( identifier[matrix] , identifier[m_list] , identifier[indices] )> identifier[self] . identifier[_current_minimum] :
keyword[return]
identifier[index] = identifier[self] . identifier[get_next_index] ( identifier[matrix] , identifier[m_list] [- literal[int] ], identifier[indices] )
identifier[m_list] [- literal[int] ][ literal[int] ]. identifier[remove] ( identifier[index] )
identifier[matrix2] = identifier[np] . identifier[copy] ( identifier[matrix] )
identifier[m_list2] = identifier[deepcopy] ( identifier[m_list] )
identifier[output_m_list2] = identifier[copy] ( identifier[output_m_list] )
identifier[matrix2] [ identifier[index] ,:]*= identifier[m_list] [- literal[int] ][ literal[int] ]
identifier[matrix2] [:, identifier[index] ]*= identifier[m_list] [- literal[int] ][ literal[int] ]
identifier[output_m_list2] . identifier[append] ([ identifier[index] , identifier[m_list] [- literal[int] ][ literal[int] ]])
identifier[indices2] = identifier[copy] ( identifier[indices] )
identifier[indices2] . identifier[remove] ( identifier[index] )
identifier[m_list2] [- literal[int] ][ literal[int] ]-= literal[int]
identifier[self] . identifier[_recurse] ( identifier[matrix2] , identifier[m_list2] , identifier[indices2] , identifier[output_m_list2] )
identifier[self] . identifier[_recurse] ( identifier[matrix] , identifier[m_list] , identifier[indices] , identifier[output_m_list] ) | def _recurse(self, matrix, m_list, indices, output_m_list=[]):
"""
This method recursively finds the minimal permutations using a binary
tree search strategy.
Args:
matrix: The current matrix (with some permutations already
performed).
m_list: The list of permutations still to be performed
indices: Set of indices which haven't had a permutation
performed on them.
"""
# check to see if we've found all the solutions that we need
if self._finished:
return # depends on [control=['if'], data=[]]
# if we're done with the current manipulation, pop it off.
while m_list[-1][1] == 0:
m_list = copy(m_list)
m_list.pop()
# if there are no more manipulations left to do check the value
if not m_list:
matrix_sum = np.sum(matrix)
if matrix_sum < self._current_minimum:
self.add_m_list(matrix_sum, output_m_list) # depends on [control=['if'], data=['matrix_sum']]
return # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
# if we wont have enough indices left, return
if m_list[-1][1] > len(indices.intersection(m_list[-1][2])):
return # depends on [control=['if'], data=[]]
if len(m_list) == 1 or m_list[-1][1] > 1:
if self.best_case(matrix, m_list, indices) > self._current_minimum:
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
index = self.get_next_index(matrix, m_list[-1], indices)
m_list[-1][2].remove(index)
# Make the matrix and new m_list where we do the manipulation to the
# index that we just got
matrix2 = np.copy(matrix)
m_list2 = deepcopy(m_list)
output_m_list2 = copy(output_m_list)
matrix2[index, :] *= m_list[-1][0]
matrix2[:, index] *= m_list[-1][0]
output_m_list2.append([index, m_list[-1][3]])
indices2 = copy(indices)
indices2.remove(index)
m_list2[-1][1] -= 1
# recurse through both the modified and unmodified matrices
self._recurse(matrix2, m_list2, indices2, output_m_list2)
self._recurse(matrix, m_list, indices, output_m_list) |
def adapters(self, adapters):
"""
Sets the number of Ethernet adapters for this QEMU VM.
:param adapters: number of adapters
"""
self._ethernet_adapters.clear()
for adapter_number in range(0, adapters):
self._ethernet_adapters.append(EthernetAdapter())
log.info('QEMU VM "{name}" [{id}]: number of Ethernet adapters changed to {adapters}'.format(name=self._name,
id=self._id,
adapters=adapters)) | def function[adapters, parameter[self, adapters]]:
constant[
Sets the number of Ethernet adapters for this QEMU VM.
:param adapters: number of adapters
]
call[name[self]._ethernet_adapters.clear, parameter[]]
for taget[name[adapter_number]] in starred[call[name[range], parameter[constant[0], name[adapters]]]] begin[:]
call[name[self]._ethernet_adapters.append, parameter[call[name[EthernetAdapter], parameter[]]]]
call[name[log].info, parameter[call[constant[QEMU VM "{name}" [{id}]: number of Ethernet adapters changed to {adapters}].format, parameter[]]]] | keyword[def] identifier[adapters] ( identifier[self] , identifier[adapters] ):
literal[string]
identifier[self] . identifier[_ethernet_adapters] . identifier[clear] ()
keyword[for] identifier[adapter_number] keyword[in] identifier[range] ( literal[int] , identifier[adapters] ):
identifier[self] . identifier[_ethernet_adapters] . identifier[append] ( identifier[EthernetAdapter] ())
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[_name] ,
identifier[id] = identifier[self] . identifier[_id] ,
identifier[adapters] = identifier[adapters] )) | def adapters(self, adapters):
"""
Sets the number of Ethernet adapters for this QEMU VM.
:param adapters: number of adapters
"""
self._ethernet_adapters.clear()
for adapter_number in range(0, adapters):
self._ethernet_adapters.append(EthernetAdapter()) # depends on [control=['for'], data=[]]
log.info('QEMU VM "{name}" [{id}]: number of Ethernet adapters changed to {adapters}'.format(name=self._name, id=self._id, adapters=adapters)) |
def parse_inline(self, text):
"""Parses text into inline elements.
RawText is not considered in parsing but created as a wrapper of holes
that don't match any other elements.
:param text: the text to be parsed.
:returns: a list of inline elements.
"""
element_list = self._build_inline_element_list()
return inline_parser.parse(
text, element_list, fallback=self.inline_elements['RawText']
) | def function[parse_inline, parameter[self, text]]:
constant[Parses text into inline elements.
RawText is not considered in parsing but created as a wrapper of holes
that don't match any other elements.
:param text: the text to be parsed.
:returns: a list of inline elements.
]
variable[element_list] assign[=] call[name[self]._build_inline_element_list, parameter[]]
return[call[name[inline_parser].parse, parameter[name[text], name[element_list]]]] | keyword[def] identifier[parse_inline] ( identifier[self] , identifier[text] ):
literal[string]
identifier[element_list] = identifier[self] . identifier[_build_inline_element_list] ()
keyword[return] identifier[inline_parser] . identifier[parse] (
identifier[text] , identifier[element_list] , identifier[fallback] = identifier[self] . identifier[inline_elements] [ literal[string] ]
) | def parse_inline(self, text):
"""Parses text into inline elements.
RawText is not considered in parsing but created as a wrapper of holes
that don't match any other elements.
:param text: the text to be parsed.
:returns: a list of inline elements.
"""
element_list = self._build_inline_element_list()
return inline_parser.parse(text, element_list, fallback=self.inline_elements['RawText']) |
def strings(self):
'''
Generate strings (lists of symbols) that this FSM accepts. Since there may
be infinitely many of these we use a generator instead of constructing a
static list. Strings will be sorted in order of length and then lexically.
This procedure uses arbitrary amounts of memory but is very fast. There
may be more efficient ways to do this, that I haven't investigated yet.
You can use this in list comprehensions.
'''
# Many FSMs have "dead states". Once you reach a dead state, you can no
# longer reach a final state. Since many strings may end up here, it's
# advantageous to constrain our search to live states only.
livestates = set(state for state in self.states if self.islive(state))
# We store a list of tuples. Each tuple consists of an input string and the
# state that this input string leads to. This means we don't have to run the
# state machine from the very beginning every time we want to check a new
# string.
strings = []
# Initial entry (or possibly not, in which case this is a short one)
cstate = self.initial
cstring = []
if cstate in livestates:
if cstate in self.finals:
yield cstring
strings.append((cstring, cstate))
# Fixed point calculation
i = 0
while i < len(strings):
(cstring, cstate) = strings[i]
if cstate in self.map:
for symbol in sorted(self.map[cstate], key=key):
nstate = self.map[cstate][symbol]
nstring = cstring + [symbol]
if nstate in livestates:
if nstate in self.finals:
yield nstring
strings.append((nstring, nstate))
i += 1 | def function[strings, parameter[self]]:
constant[
Generate strings (lists of symbols) that this FSM accepts. Since there may
be infinitely many of these we use a generator instead of constructing a
static list. Strings will be sorted in order of length and then lexically.
This procedure uses arbitrary amounts of memory but is very fast. There
may be more efficient ways to do this, that I haven't investigated yet.
You can use this in list comprehensions.
]
variable[livestates] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da1b047a1d0>]]
variable[strings] assign[=] list[[]]
variable[cstate] assign[=] name[self].initial
variable[cstring] assign[=] list[[]]
if compare[name[cstate] in name[livestates]] begin[:]
if compare[name[cstate] in name[self].finals] begin[:]
<ast.Yield object at 0x7da1b047a230>
call[name[strings].append, parameter[tuple[[<ast.Name object at 0x7da1b0478760>, <ast.Name object at 0x7da1b04796c0>]]]]
variable[i] assign[=] constant[0]
while compare[name[i] less[<] call[name[len], parameter[name[strings]]]] begin[:]
<ast.Tuple object at 0x7da1b047b7f0> assign[=] call[name[strings]][name[i]]
if compare[name[cstate] in name[self].map] begin[:]
for taget[name[symbol]] in starred[call[name[sorted], parameter[call[name[self].map][name[cstate]]]]] begin[:]
variable[nstate] assign[=] call[call[name[self].map][name[cstate]]][name[symbol]]
variable[nstring] assign[=] binary_operation[name[cstring] + list[[<ast.Name object at 0x7da1b0478070>]]]
if compare[name[nstate] in name[livestates]] begin[:]
if compare[name[nstate] in name[self].finals] begin[:]
<ast.Yield object at 0x7da1b047a2c0>
call[name[strings].append, parameter[tuple[[<ast.Name object at 0x7da1b0478d90>, <ast.Name object at 0x7da1b0479690>]]]]
<ast.AugAssign object at 0x7da1b047a9b0> | keyword[def] identifier[strings] ( identifier[self] ):
literal[string]
identifier[livestates] = identifier[set] ( identifier[state] keyword[for] identifier[state] keyword[in] identifier[self] . identifier[states] keyword[if] identifier[self] . identifier[islive] ( identifier[state] ))
identifier[strings] =[]
identifier[cstate] = identifier[self] . identifier[initial]
identifier[cstring] =[]
keyword[if] identifier[cstate] keyword[in] identifier[livestates] :
keyword[if] identifier[cstate] keyword[in] identifier[self] . identifier[finals] :
keyword[yield] identifier[cstring]
identifier[strings] . identifier[append] (( identifier[cstring] , identifier[cstate] ))
identifier[i] = literal[int]
keyword[while] identifier[i] < identifier[len] ( identifier[strings] ):
( identifier[cstring] , identifier[cstate] )= identifier[strings] [ identifier[i] ]
keyword[if] identifier[cstate] keyword[in] identifier[self] . identifier[map] :
keyword[for] identifier[symbol] keyword[in] identifier[sorted] ( identifier[self] . identifier[map] [ identifier[cstate] ], identifier[key] = identifier[key] ):
identifier[nstate] = identifier[self] . identifier[map] [ identifier[cstate] ][ identifier[symbol] ]
identifier[nstring] = identifier[cstring] +[ identifier[symbol] ]
keyword[if] identifier[nstate] keyword[in] identifier[livestates] :
keyword[if] identifier[nstate] keyword[in] identifier[self] . identifier[finals] :
keyword[yield] identifier[nstring]
identifier[strings] . identifier[append] (( identifier[nstring] , identifier[nstate] ))
identifier[i] += literal[int] | def strings(self):
"""
Generate strings (lists of symbols) that this FSM accepts. Since there may
be infinitely many of these we use a generator instead of constructing a
static list. Strings will be sorted in order of length and then lexically.
This procedure uses arbitrary amounts of memory but is very fast. There
may be more efficient ways to do this, that I haven't investigated yet.
You can use this in list comprehensions.
""" # Many FSMs have "dead states". Once you reach a dead state, you can no
# longer reach a final state. Since many strings may end up here, it's
# advantageous to constrain our search to live states only.
livestates = set((state for state in self.states if self.islive(state))) # We store a list of tuples. Each tuple consists of an input string and the
# state that this input string leads to. This means we don't have to run the
# state machine from the very beginning every time we want to check a new
# string.
strings = [] # Initial entry (or possibly not, in which case this is a short one)
cstate = self.initial
cstring = []
if cstate in livestates:
if cstate in self.finals:
yield cstring # depends on [control=['if'], data=[]]
strings.append((cstring, cstate)) # depends on [control=['if'], data=['cstate']] # Fixed point calculation
i = 0
while i < len(strings):
(cstring, cstate) = strings[i]
if cstate in self.map:
for symbol in sorted(self.map[cstate], key=key):
nstate = self.map[cstate][symbol]
nstring = cstring + [symbol]
if nstate in livestates:
if nstate in self.finals:
yield nstring # depends on [control=['if'], data=[]]
strings.append((nstring, nstate)) # depends on [control=['if'], data=['nstate']] # depends on [control=['for'], data=['symbol']] # depends on [control=['if'], data=['cstate']]
i += 1 # depends on [control=['while'], data=['i']] |
def search(self, search_phrase, limit=None):
"""Search for datasets, and expand to database records"""
from ambry.identity import ObjectNumber
from ambry.orm.exc import NotFoundError
from ambry.library.search_backends.base import SearchTermParser
results = []
stp = SearchTermParser()
# Because of the split between searching for partitions and bundles, some terms don't behave right.
# The source term should be a limit on everything, but it isn't part of the partition doc,
# so we check for it here.
parsed_terms = stp.parse(search_phrase)
for r in self.search_datasets(search_phrase, limit):
vid = r.vid or ObjectNumber.parse(next(iter(r.partitions))).as_dataset
r.vid = vid
try:
r.bundle = self.library.bundle(r.vid)
if 'source' not in parsed_terms or parsed_terms['source'] in r.bundle.dataset.source:
results.append(r)
except NotFoundError:
pass
return sorted(results, key=lambda r : r.score, reverse=True) | def function[search, parameter[self, search_phrase, limit]]:
constant[Search for datasets, and expand to database records]
from relative_module[ambry.identity] import module[ObjectNumber]
from relative_module[ambry.orm.exc] import module[NotFoundError]
from relative_module[ambry.library.search_backends.base] import module[SearchTermParser]
variable[results] assign[=] list[[]]
variable[stp] assign[=] call[name[SearchTermParser], parameter[]]
variable[parsed_terms] assign[=] call[name[stp].parse, parameter[name[search_phrase]]]
for taget[name[r]] in starred[call[name[self].search_datasets, parameter[name[search_phrase], name[limit]]]] begin[:]
variable[vid] assign[=] <ast.BoolOp object at 0x7da1b0ebf760>
name[r].vid assign[=] name[vid]
<ast.Try object at 0x7da1b0ebfc40>
return[call[name[sorted], parameter[name[results]]]] | keyword[def] identifier[search] ( identifier[self] , identifier[search_phrase] , identifier[limit] = keyword[None] ):
literal[string]
keyword[from] identifier[ambry] . identifier[identity] keyword[import] identifier[ObjectNumber]
keyword[from] identifier[ambry] . identifier[orm] . identifier[exc] keyword[import] identifier[NotFoundError]
keyword[from] identifier[ambry] . identifier[library] . identifier[search_backends] . identifier[base] keyword[import] identifier[SearchTermParser]
identifier[results] =[]
identifier[stp] = identifier[SearchTermParser] ()
identifier[parsed_terms] = identifier[stp] . identifier[parse] ( identifier[search_phrase] )
keyword[for] identifier[r] keyword[in] identifier[self] . identifier[search_datasets] ( identifier[search_phrase] , identifier[limit] ):
identifier[vid] = identifier[r] . identifier[vid] keyword[or] identifier[ObjectNumber] . identifier[parse] ( identifier[next] ( identifier[iter] ( identifier[r] . identifier[partitions] ))). identifier[as_dataset]
identifier[r] . identifier[vid] = identifier[vid]
keyword[try] :
identifier[r] . identifier[bundle] = identifier[self] . identifier[library] . identifier[bundle] ( identifier[r] . identifier[vid] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[parsed_terms] keyword[or] identifier[parsed_terms] [ literal[string] ] keyword[in] identifier[r] . identifier[bundle] . identifier[dataset] . identifier[source] :
identifier[results] . identifier[append] ( identifier[r] )
keyword[except] identifier[NotFoundError] :
keyword[pass]
keyword[return] identifier[sorted] ( identifier[results] , identifier[key] = keyword[lambda] identifier[r] : identifier[r] . identifier[score] , identifier[reverse] = keyword[True] ) | def search(self, search_phrase, limit=None):
"""Search for datasets, and expand to database records"""
from ambry.identity import ObjectNumber
from ambry.orm.exc import NotFoundError
from ambry.library.search_backends.base import SearchTermParser
results = []
stp = SearchTermParser()
# Because of the split between searching for partitions and bundles, some terms don't behave right.
# The source term should be a limit on everything, but it isn't part of the partition doc,
# so we check for it here.
parsed_terms = stp.parse(search_phrase)
for r in self.search_datasets(search_phrase, limit):
vid = r.vid or ObjectNumber.parse(next(iter(r.partitions))).as_dataset
r.vid = vid
try:
r.bundle = self.library.bundle(r.vid)
if 'source' not in parsed_terms or parsed_terms['source'] in r.bundle.dataset.source:
results.append(r) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except NotFoundError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['r']]
return sorted(results, key=lambda r: r.score, reverse=True) |
def ccbox(msg="Shall I continue?", title=" ",
choices=("C[o]ntinue", "C[a]ncel"), image=None,
default_choice='Continue', cancel_choice='Cancel'):
"""
Display a msgbox with choices of Continue and Cancel.
The returned value is calculated this way::
if the first choice ("Continue") is chosen,
or if the dialog is cancelled:
return True
else:
return False
If invoked without a msg argument, displays a generic
request for a confirmation
that the user wishes to continue. So it can be used this way::
if ccbox():
pass # continue
else:
sys.exit(0) # exit the program
:param str msg: the msg to be displayed
:param str title: the window title
:param list choices: a list or tuple of the choices to be displayed
:param str image: Filename of image to display
:param str default_choice: The choice you want highlighted
when the gui appears
:param str cancel_choice: If the user presses the 'X' close,
which button should be pressed
:return: True if 'Continue' or dialog is cancelled, False if 'Cancel'
"""
return boolbox(msg=msg,
title=title,
choices=choices,
image=image,
default_choice=default_choice,
cancel_choice=cancel_choice) | def function[ccbox, parameter[msg, title, choices, image, default_choice, cancel_choice]]:
constant[
Display a msgbox with choices of Continue and Cancel.
The returned value is calculated this way::
if the first choice ("Continue") is chosen,
or if the dialog is cancelled:
return True
else:
return False
If invoked without a msg argument, displays a generic
request for a confirmation
that the user wishes to continue. So it can be used this way::
if ccbox():
pass # continue
else:
sys.exit(0) # exit the program
:param str msg: the msg to be displayed
:param str title: the window title
:param list choices: a list or tuple of the choices to be displayed
:param str image: Filename of image to display
:param str default_choice: The choice you want highlighted
when the gui appears
:param str cancel_choice: If the user presses the 'X' close,
which button should be pressed
:return: True if 'Continue' or dialog is cancelled, False if 'Cancel'
]
return[call[name[boolbox], parameter[]]] | keyword[def] identifier[ccbox] ( identifier[msg] = literal[string] , identifier[title] = literal[string] ,
identifier[choices] =( literal[string] , literal[string] ), identifier[image] = keyword[None] ,
identifier[default_choice] = literal[string] , identifier[cancel_choice] = literal[string] ):
literal[string]
keyword[return] identifier[boolbox] ( identifier[msg] = identifier[msg] ,
identifier[title] = identifier[title] ,
identifier[choices] = identifier[choices] ,
identifier[image] = identifier[image] ,
identifier[default_choice] = identifier[default_choice] ,
identifier[cancel_choice] = identifier[cancel_choice] ) | def ccbox(msg='Shall I continue?', title=' ', choices=('C[o]ntinue', 'C[a]ncel'), image=None, default_choice='Continue', cancel_choice='Cancel'):
"""
Display a msgbox with choices of Continue and Cancel.
The returned value is calculated this way::
if the first choice ("Continue") is chosen,
or if the dialog is cancelled:
return True
else:
return False
If invoked without a msg argument, displays a generic
request for a confirmation
that the user wishes to continue. So it can be used this way::
if ccbox():
pass # continue
else:
sys.exit(0) # exit the program
:param str msg: the msg to be displayed
:param str title: the window title
:param list choices: a list or tuple of the choices to be displayed
:param str image: Filename of image to display
:param str default_choice: The choice you want highlighted
when the gui appears
:param str cancel_choice: If the user presses the 'X' close,
which button should be pressed
:return: True if 'Continue' or dialog is cancelled, False if 'Cancel'
"""
return boolbox(msg=msg, title=title, choices=choices, image=image, default_choice=default_choice, cancel_choice=cancel_choice) |
def isin(elems, line):
"""Check if an element from a list is in a string.
:type elems: list
:type line: str
"""
found = False
for e in elems:
if e in line.lower():
found = True
break
return found | def function[isin, parameter[elems, line]]:
constant[Check if an element from a list is in a string.
:type elems: list
:type line: str
]
variable[found] assign[=] constant[False]
for taget[name[e]] in starred[name[elems]] begin[:]
if compare[name[e] in call[name[line].lower, parameter[]]] begin[:]
variable[found] assign[=] constant[True]
break
return[name[found]] | keyword[def] identifier[isin] ( identifier[elems] , identifier[line] ):
literal[string]
identifier[found] = keyword[False]
keyword[for] identifier[e] keyword[in] identifier[elems] :
keyword[if] identifier[e] keyword[in] identifier[line] . identifier[lower] ():
identifier[found] = keyword[True]
keyword[break]
keyword[return] identifier[found] | def isin(elems, line):
"""Check if an element from a list is in a string.
:type elems: list
:type line: str
"""
found = False
for e in elems:
if e in line.lower():
found = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['e']]
return found |
def getTransfer(self, iso_packets=0):
"""
Get an USBTransfer instance for asynchronous use.
iso_packets: the number of isochronous transfer descriptors to
allocate.
"""
result = USBTransfer(
self.__handle, iso_packets,
self.__inflight_add, self.__inflight_remove,
)
self.__transfer_set.add(result)
return result | def function[getTransfer, parameter[self, iso_packets]]:
constant[
Get an USBTransfer instance for asynchronous use.
iso_packets: the number of isochronous transfer descriptors to
allocate.
]
variable[result] assign[=] call[name[USBTransfer], parameter[name[self].__handle, name[iso_packets], name[self].__inflight_add, name[self].__inflight_remove]]
call[name[self].__transfer_set.add, parameter[name[result]]]
return[name[result]] | keyword[def] identifier[getTransfer] ( identifier[self] , identifier[iso_packets] = literal[int] ):
literal[string]
identifier[result] = identifier[USBTransfer] (
identifier[self] . identifier[__handle] , identifier[iso_packets] ,
identifier[self] . identifier[__inflight_add] , identifier[self] . identifier[__inflight_remove] ,
)
identifier[self] . identifier[__transfer_set] . identifier[add] ( identifier[result] )
keyword[return] identifier[result] | def getTransfer(self, iso_packets=0):
"""
Get an USBTransfer instance for asynchronous use.
iso_packets: the number of isochronous transfer descriptors to
allocate.
"""
result = USBTransfer(self.__handle, iso_packets, self.__inflight_add, self.__inflight_remove)
self.__transfer_set.add(result)
return result |
def _t(unistr, charset_from, charset_to):
"""
This is a unexposed function, is responsibility for translation internal.
"""
# if type(unistr) is str:
# try:
# unistr = unistr.decode('utf-8')
# # Python 3 returns AttributeError when .decode() is called on a str
# # This means it is already unicode.
# except AttributeError:
# pass
# try:
# if type(unistr) is not unicode:
# return unistr
# # Python 3 returns NameError because unicode is not a type.
# except NameError:
# pass
chars = []
for c in unistr:
idx = charset_from.find(c)
chars.append(charset_to[idx] if idx!=-1 else c)
return u''.join(chars) | def function[_t, parameter[unistr, charset_from, charset_to]]:
constant[
This is a unexposed function, is responsibility for translation internal.
]
variable[chars] assign[=] list[[]]
for taget[name[c]] in starred[name[unistr]] begin[:]
variable[idx] assign[=] call[name[charset_from].find, parameter[name[c]]]
call[name[chars].append, parameter[<ast.IfExp object at 0x7da1b10d7cd0>]]
return[call[constant[].join, parameter[name[chars]]]] | keyword[def] identifier[_t] ( identifier[unistr] , identifier[charset_from] , identifier[charset_to] ):
literal[string]
identifier[chars] =[]
keyword[for] identifier[c] keyword[in] identifier[unistr] :
identifier[idx] = identifier[charset_from] . identifier[find] ( identifier[c] )
identifier[chars] . identifier[append] ( identifier[charset_to] [ identifier[idx] ] keyword[if] identifier[idx] !=- literal[int] keyword[else] identifier[c] )
keyword[return] literal[string] . identifier[join] ( identifier[chars] ) | def _t(unistr, charset_from, charset_to):
"""
This is a unexposed function, is responsibility for translation internal.
"""
# if type(unistr) is str:
# try:
# unistr = unistr.decode('utf-8')
# # Python 3 returns AttributeError when .decode() is called on a str
# # This means it is already unicode.
# except AttributeError:
# pass
# try:
# if type(unistr) is not unicode:
# return unistr
# # Python 3 returns NameError because unicode is not a type.
# except NameError:
# pass
chars = []
for c in unistr:
idx = charset_from.find(c)
chars.append(charset_to[idx] if idx != -1 else c) # depends on [control=['for'], data=['c']]
return u''.join(chars) |
def _valid_dimensions(self, dimensions):
"""Validates key dimension input
Returns kdims if no dimensions are specified"""
if dimensions is None:
dimensions = self.kdims
elif not isinstance(dimensions, list):
dimensions = [dimensions]
valid_dimensions = []
for dim in dimensions:
if isinstance(dim, Dimension): dim = dim.name
if dim not in self.kdims:
raise Exception("Supplied dimensions %s not found." % dim)
valid_dimensions.append(dim)
return valid_dimensions | def function[_valid_dimensions, parameter[self, dimensions]]:
constant[Validates key dimension input
Returns kdims if no dimensions are specified]
if compare[name[dimensions] is constant[None]] begin[:]
variable[dimensions] assign[=] name[self].kdims
variable[valid_dimensions] assign[=] list[[]]
for taget[name[dim]] in starred[name[dimensions]] begin[:]
if call[name[isinstance], parameter[name[dim], name[Dimension]]] begin[:]
variable[dim] assign[=] name[dim].name
if compare[name[dim] <ast.NotIn object at 0x7da2590d7190> name[self].kdims] begin[:]
<ast.Raise object at 0x7da2054a4c40>
call[name[valid_dimensions].append, parameter[name[dim]]]
return[name[valid_dimensions]] | keyword[def] identifier[_valid_dimensions] ( identifier[self] , identifier[dimensions] ):
literal[string]
keyword[if] identifier[dimensions] keyword[is] keyword[None] :
identifier[dimensions] = identifier[self] . identifier[kdims]
keyword[elif] keyword[not] identifier[isinstance] ( identifier[dimensions] , identifier[list] ):
identifier[dimensions] =[ identifier[dimensions] ]
identifier[valid_dimensions] =[]
keyword[for] identifier[dim] keyword[in] identifier[dimensions] :
keyword[if] identifier[isinstance] ( identifier[dim] , identifier[Dimension] ): identifier[dim] = identifier[dim] . identifier[name]
keyword[if] identifier[dim] keyword[not] keyword[in] identifier[self] . identifier[kdims] :
keyword[raise] identifier[Exception] ( literal[string] % identifier[dim] )
identifier[valid_dimensions] . identifier[append] ( identifier[dim] )
keyword[return] identifier[valid_dimensions] | def _valid_dimensions(self, dimensions):
"""Validates key dimension input
Returns kdims if no dimensions are specified"""
if dimensions is None:
dimensions = self.kdims # depends on [control=['if'], data=['dimensions']]
elif not isinstance(dimensions, list):
dimensions = [dimensions] # depends on [control=['if'], data=[]]
valid_dimensions = []
for dim in dimensions:
if isinstance(dim, Dimension):
dim = dim.name # depends on [control=['if'], data=[]]
if dim not in self.kdims:
raise Exception('Supplied dimensions %s not found.' % dim) # depends on [control=['if'], data=['dim']]
valid_dimensions.append(dim) # depends on [control=['for'], data=['dim']]
return valid_dimensions |
def defer(
self,
func: typing.Callable[[], typing.Any],
until: typing.Union[int, float]=-1,
) -> typing.Any:
"""Defer the execution of a function until some clock value.
Args:
func (typing.Callable[[], typing.Any]): A callable that accepts no
arguments. All return values are ignored.
until (typing.Union[int, float]): A numeric value that represents
the clock time when the callback becomes available for
execution. Values that are less than the current time result in
the function being called at the next opportunity.
Returns:
typing.Any: An opaque identifier that represents the callback
uniquely within the processor. This identifier is used to
modify the callback scheduling.
Note:
The time given should not be considered absolute. It represents
the time when the callback becomes available to execute. It may
be much later than the given time value when the function actually
executes depending on the implementation.
"""
raise NotImplementedError() | def function[defer, parameter[self, func, until]]:
constant[Defer the execution of a function until some clock value.
Args:
func (typing.Callable[[], typing.Any]): A callable that accepts no
arguments. All return values are ignored.
until (typing.Union[int, float]): A numeric value that represents
the clock time when the callback becomes available for
execution. Values that are less than the current time result in
the function being called at the next opportunity.
Returns:
typing.Any: An opaque identifier that represents the callback
uniquely within the processor. This identifier is used to
modify the callback scheduling.
Note:
The time given should not be considered absolute. It represents
the time when the callback becomes available to execute. It may
be much later than the given time value when the function actually
executes depending on the implementation.
]
<ast.Raise object at 0x7da20e961f60> | keyword[def] identifier[defer] (
identifier[self] ,
identifier[func] : identifier[typing] . identifier[Callable] [[], identifier[typing] . identifier[Any] ],
identifier[until] : identifier[typing] . identifier[Union] [ identifier[int] , identifier[float] ]=- literal[int] ,
)-> identifier[typing] . identifier[Any] :
literal[string]
keyword[raise] identifier[NotImplementedError] () | def defer(self, func: typing.Callable[[], typing.Any], until: typing.Union[int, float]=-1) -> typing.Any:
"""Defer the execution of a function until some clock value.
Args:
func (typing.Callable[[], typing.Any]): A callable that accepts no
arguments. All return values are ignored.
until (typing.Union[int, float]): A numeric value that represents
the clock time when the callback becomes available for
execution. Values that are less than the current time result in
the function being called at the next opportunity.
Returns:
typing.Any: An opaque identifier that represents the callback
uniquely within the processor. This identifier is used to
modify the callback scheduling.
Note:
The time given should not be considered absolute. It represents
the time when the callback becomes available to execute. It may
be much later than the given time value when the function actually
executes depending on the implementation.
"""
raise NotImplementedError() |
def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None):
'''
Use drill to lookup addresses
:param name: Name of record to search
:param rdtype: DNS record type
:param timeout: command return timeout
:param servers: [] of servers to use
:return: [] of records or False if error
'''
cmd = 'drill '
if secure:
cmd += '-D -o ad '
cmd += '{0} {1} '.format(rdtype, name)
if servers:
cmd += ''.join(['@{0} '.format(srv) for srv in servers])
cmd = __salt__['cmd.run_all'](
cmd, timeout=timeout,
python_shell=False, output_loglevel='quiet')
if cmd['retcode'] != 0:
log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr'])
return False
lookup_res = iter(cmd['stdout'].splitlines())
validated = False
res = []
try:
line = ''
while 'ANSWER SECTION' not in line:
line = next(lookup_res)
while True:
line = next(lookup_res)
line = line.strip()
if not line or line.startswith(';;'):
break
l_type, l_rec = line.split(None, 4)[-2:]
if l_type == 'CNAME' and rdtype != 'CNAME':
continue
elif l_type == 'RRSIG':
validated = True
continue
elif l_type != rdtype:
raise ValueError('Invalid DNS type {}'.format(rdtype))
res.append(_data_clean(l_rec))
except StopIteration:
pass
if res and secure and not validated:
return False
else:
return res | def function[_lookup_drill, parameter[name, rdtype, timeout, servers, secure]]:
constant[
Use drill to lookup addresses
:param name: Name of record to search
:param rdtype: DNS record type
:param timeout: command return timeout
:param servers: [] of servers to use
:return: [] of records or False if error
]
variable[cmd] assign[=] constant[drill ]
if name[secure] begin[:]
<ast.AugAssign object at 0x7da2044c2fb0>
<ast.AugAssign object at 0x7da2044c0850>
if name[servers] begin[:]
<ast.AugAssign object at 0x7da2044c1c60>
variable[cmd] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]]
if compare[call[name[cmd]][constant[retcode]] not_equal[!=] constant[0]] begin[:]
call[name[log].warning, parameter[constant[drill returned (%s): %s], call[name[cmd]][constant[retcode]], call[name[cmd]][constant[stderr]]]]
return[constant[False]]
variable[lookup_res] assign[=] call[name[iter], parameter[call[call[name[cmd]][constant[stdout]].splitlines, parameter[]]]]
variable[validated] assign[=] constant[False]
variable[res] assign[=] list[[]]
<ast.Try object at 0x7da1b208a980>
if <ast.BoolOp object at 0x7da1b2034850> begin[:]
return[constant[False]] | keyword[def] identifier[_lookup_drill] ( identifier[name] , identifier[rdtype] , identifier[timeout] = keyword[None] , identifier[servers] = keyword[None] , identifier[secure] = keyword[None] ):
literal[string]
identifier[cmd] = literal[string]
keyword[if] identifier[secure] :
identifier[cmd] += literal[string]
identifier[cmd] += literal[string] . identifier[format] ( identifier[rdtype] , identifier[name] )
keyword[if] identifier[servers] :
identifier[cmd] += literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[srv] ) keyword[for] identifier[srv] keyword[in] identifier[servers] ])
identifier[cmd] = identifier[__salt__] [ literal[string] ](
identifier[cmd] , identifier[timeout] = identifier[timeout] ,
identifier[python_shell] = keyword[False] , identifier[output_loglevel] = literal[string] )
keyword[if] identifier[cmd] [ literal[string] ]!= literal[int] :
identifier[log] . identifier[warning] ( literal[string] , identifier[cmd] [ literal[string] ], identifier[cmd] [ literal[string] ])
keyword[return] keyword[False]
identifier[lookup_res] = identifier[iter] ( identifier[cmd] [ literal[string] ]. identifier[splitlines] ())
identifier[validated] = keyword[False]
identifier[res] =[]
keyword[try] :
identifier[line] = literal[string]
keyword[while] literal[string] keyword[not] keyword[in] identifier[line] :
identifier[line] = identifier[next] ( identifier[lookup_res] )
keyword[while] keyword[True] :
identifier[line] = identifier[next] ( identifier[lookup_res] )
identifier[line] = identifier[line] . identifier[strip] ()
keyword[if] keyword[not] identifier[line] keyword[or] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[break]
identifier[l_type] , identifier[l_rec] = identifier[line] . identifier[split] ( keyword[None] , literal[int] )[- literal[int] :]
keyword[if] identifier[l_type] == literal[string] keyword[and] identifier[rdtype] != literal[string] :
keyword[continue]
keyword[elif] identifier[l_type] == literal[string] :
identifier[validated] = keyword[True]
keyword[continue]
keyword[elif] identifier[l_type] != identifier[rdtype] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[rdtype] ))
identifier[res] . identifier[append] ( identifier[_data_clean] ( identifier[l_rec] ))
keyword[except] identifier[StopIteration] :
keyword[pass]
keyword[if] identifier[res] keyword[and] identifier[secure] keyword[and] keyword[not] identifier[validated] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] identifier[res] | def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None):
"""
Use drill to lookup addresses
:param name: Name of record to search
:param rdtype: DNS record type
:param timeout: command return timeout
:param servers: [] of servers to use
:return: [] of records or False if error
"""
cmd = 'drill '
if secure:
cmd += '-D -o ad ' # depends on [control=['if'], data=[]]
cmd += '{0} {1} '.format(rdtype, name)
if servers:
cmd += ''.join(['@{0} '.format(srv) for srv in servers]) # depends on [control=['if'], data=[]]
cmd = __salt__['cmd.run_all'](cmd, timeout=timeout, python_shell=False, output_loglevel='quiet')
if cmd['retcode'] != 0:
log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr'])
return False # depends on [control=['if'], data=[]]
lookup_res = iter(cmd['stdout'].splitlines())
validated = False
res = []
try:
line = ''
while 'ANSWER SECTION' not in line:
line = next(lookup_res) # depends on [control=['while'], data=['line']]
while True:
line = next(lookup_res)
line = line.strip()
if not line or line.startswith(';;'):
break # depends on [control=['if'], data=[]]
(l_type, l_rec) = line.split(None, 4)[-2:]
if l_type == 'CNAME' and rdtype != 'CNAME':
continue # depends on [control=['if'], data=[]]
elif l_type == 'RRSIG':
validated = True
continue # depends on [control=['if'], data=[]]
elif l_type != rdtype:
raise ValueError('Invalid DNS type {}'.format(rdtype)) # depends on [control=['if'], data=['rdtype']]
res.append(_data_clean(l_rec)) # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except StopIteration:
pass # depends on [control=['except'], data=[]]
if res and secure and (not validated):
return False # depends on [control=['if'], data=[]]
else:
return res |
def parse(rmk: str) -> RemarksData:
"""
Finds temperature and dewpoint decimal values from the remarks
"""
rmkdata = {}
for item in rmk.split(' '):
if len(item) in [5, 9] and item[0] == 'T' and item[1:].isdigit():
rmkdata['temperature_decimal'] = core.make_number(_tdec(item[1:5], None)) # type: ignore
rmkdata['dewpoint_decimal'] = core.make_number(_tdec(item[5:], None)) # type: ignore
return RemarksData(**rmkdata) | def function[parse, parameter[rmk]]:
constant[
Finds temperature and dewpoint decimal values from the remarks
]
variable[rmkdata] assign[=] dictionary[[], []]
for taget[name[item]] in starred[call[name[rmk].split, parameter[constant[ ]]]] begin[:]
if <ast.BoolOp object at 0x7da1b1596920> begin[:]
call[name[rmkdata]][constant[temperature_decimal]] assign[=] call[name[core].make_number, parameter[call[name[_tdec], parameter[call[name[item]][<ast.Slice object at 0x7da1b1597a90>], constant[None]]]]]
call[name[rmkdata]][constant[dewpoint_decimal]] assign[=] call[name[core].make_number, parameter[call[name[_tdec], parameter[call[name[item]][<ast.Slice object at 0x7da1b1596800>], constant[None]]]]]
return[call[name[RemarksData], parameter[]]] | keyword[def] identifier[parse] ( identifier[rmk] : identifier[str] )-> identifier[RemarksData] :
literal[string]
identifier[rmkdata] ={}
keyword[for] identifier[item] keyword[in] identifier[rmk] . identifier[split] ( literal[string] ):
keyword[if] identifier[len] ( identifier[item] ) keyword[in] [ literal[int] , literal[int] ] keyword[and] identifier[item] [ literal[int] ]== literal[string] keyword[and] identifier[item] [ literal[int] :]. identifier[isdigit] ():
identifier[rmkdata] [ literal[string] ]= identifier[core] . identifier[make_number] ( identifier[_tdec] ( identifier[item] [ literal[int] : literal[int] ], keyword[None] ))
identifier[rmkdata] [ literal[string] ]= identifier[core] . identifier[make_number] ( identifier[_tdec] ( identifier[item] [ literal[int] :], keyword[None] ))
keyword[return] identifier[RemarksData] (** identifier[rmkdata] ) | def parse(rmk: str) -> RemarksData:
"""
Finds temperature and dewpoint decimal values from the remarks
"""
rmkdata = {}
for item in rmk.split(' '):
if len(item) in [5, 9] and item[0] == 'T' and item[1:].isdigit():
rmkdata['temperature_decimal'] = core.make_number(_tdec(item[1:5], None)) # type: ignore
rmkdata['dewpoint_decimal'] = core.make_number(_tdec(item[5:], None)) # type: ignore # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
return RemarksData(**rmkdata) |
def field_type(field):
"""
Template filter that returns field class name (in lower case).
E.g. if field is CharField then {{ field|field_type }} will
return 'charfield'.
"""
if hasattr(field, 'field') and field.field:
return field.field.__class__.__name__.lower()
return '' | def function[field_type, parameter[field]]:
constant[
Template filter that returns field class name (in lower case).
E.g. if field is CharField then {{ field|field_type }} will
return 'charfield'.
]
if <ast.BoolOp object at 0x7da18dc9b700> begin[:]
return[call[name[field].field.__class__.__name__.lower, parameter[]]]
return[constant[]] | keyword[def] identifier[field_type] ( identifier[field] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[field] , literal[string] ) keyword[and] identifier[field] . identifier[field] :
keyword[return] identifier[field] . identifier[field] . identifier[__class__] . identifier[__name__] . identifier[lower] ()
keyword[return] literal[string] | def field_type(field):
"""
Template filter that returns field class name (in lower case).
E.g. if field is CharField then {{ field|field_type }} will
return 'charfield'.
"""
if hasattr(field, 'field') and field.field:
return field.field.__class__.__name__.lower() # depends on [control=['if'], data=[]]
return '' |
def run(self, asynchronous=False):
'''
Load and start all available api modules
'''
log.debug('Process Manager starting!')
appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# There are no SIGTERM handlers installed, install ours
signal.signal(signal.SIGTERM, self.kill_children)
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# There are no SIGINT handlers installed, install ours
signal.signal(signal.SIGINT, self.kill_children)
while True:
log.trace('Process manager iteration')
try:
# in case someone died while we were waiting...
self.check_children()
# The event-based subprocesses management code was removed from here
# because os.wait() conflicts with the subprocesses management logic
# implemented in `multiprocessing` package. See #35480 for details.
if asynchronous:
yield gen.sleep(10)
else:
time.sleep(10)
if not self._process_map:
break
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
except IOError as exc:
# IOError with errno of EINTR (4) may be raised
# when using time.sleep() on Windows.
if exc.errno != errno.EINTR:
raise
break | def function[run, parameter[self, asynchronous]]:
constant[
Load and start all available api modules
]
call[name[log].debug, parameter[constant[Process Manager starting!]]]
call[name[appendproctitle], parameter[name[self].name]]
if compare[call[name[signal].getsignal, parameter[name[signal].SIGTERM]] is name[signal].SIG_DFL] begin[:]
call[name[signal].signal, parameter[name[signal].SIGTERM, name[self].kill_children]]
if compare[call[name[signal].getsignal, parameter[name[signal].SIGINT]] is name[signal].SIG_DFL] begin[:]
call[name[signal].signal, parameter[name[signal].SIGINT, name[self].kill_children]]
while constant[True] begin[:]
call[name[log].trace, parameter[constant[Process manager iteration]]]
<ast.Try object at 0x7da18dc05de0> | keyword[def] identifier[run] ( identifier[self] , identifier[asynchronous] = keyword[False] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] )
identifier[appendproctitle] ( identifier[self] . identifier[name] )
keyword[if] identifier[signal] . identifier[getsignal] ( identifier[signal] . identifier[SIGTERM] ) keyword[is] identifier[signal] . identifier[SIG_DFL] :
identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGTERM] , identifier[self] . identifier[kill_children] )
keyword[if] identifier[signal] . identifier[getsignal] ( identifier[signal] . identifier[SIGINT] ) keyword[is] identifier[signal] . identifier[SIG_DFL] :
identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGINT] , identifier[self] . identifier[kill_children] )
keyword[while] keyword[True] :
identifier[log] . identifier[trace] ( literal[string] )
keyword[try] :
identifier[self] . identifier[check_children] ()
keyword[if] identifier[asynchronous] :
keyword[yield] identifier[gen] . identifier[sleep] ( literal[int] )
keyword[else] :
identifier[time] . identifier[sleep] ( literal[int] )
keyword[if] keyword[not] identifier[self] . identifier[_process_map] :
keyword[break]
keyword[except] identifier[OSError] :
keyword[break]
keyword[except] identifier[IOError] keyword[as] identifier[exc] :
keyword[if] identifier[exc] . identifier[errno] != identifier[errno] . identifier[EINTR] :
keyword[raise]
keyword[break] | def run(self, asynchronous=False):
"""
Load and start all available api modules
"""
log.debug('Process Manager starting!')
appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# There are no SIGTERM handlers installed, install ours
signal.signal(signal.SIGTERM, self.kill_children) # depends on [control=['if'], data=[]]
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# There are no SIGINT handlers installed, install ours
signal.signal(signal.SIGINT, self.kill_children) # depends on [control=['if'], data=[]]
while True:
log.trace('Process manager iteration')
try:
# in case someone died while we were waiting...
self.check_children()
# The event-based subprocesses management code was removed from here
# because os.wait() conflicts with the subprocesses management logic
# implemented in `multiprocessing` package. See #35480 for details.
if asynchronous:
yield gen.sleep(10) # depends on [control=['if'], data=[]]
else:
time.sleep(10)
if not self._process_map:
break # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break # depends on [control=['except'], data=[]]
except IOError as exc:
# IOError with errno of EINTR (4) may be raised
# when using time.sleep() on Windows.
if exc.errno != errno.EINTR:
raise # depends on [control=['if'], data=[]]
break # depends on [control=['except'], data=['exc']] # depends on [control=['while'], data=[]] |
def to_dataframe(cls, network=None, phases=[], join=False, delim=' | '):
r"""
Convert the Network (and optionally Phase) data to Pandas DataFrames.
Parameters
----------
network: OpenPNM Network Object
The network containing the data to be stored
phases : list of OpenPNM Phase Objects
The data on each supplied phase will be added to DataFrame
join : boolean
If ``False`` (default), two DataFrames are returned with *pore*
data in one, and *throat* data in the other. If ``True`` the pore
and throat data are combined into a single DataFrame. This can be
problematic as it will put NaNs into all the *pore* columns which
are shorter than the *throat* columns.
Returns
-------
Pandas ``DataFrame`` object containing property and label data in each
column. If ``join`` was False (default) the two DataFrames are
returned i a named tuple, or else a single DataFrame with pore and
throat data in the same file, despite the column length being
different.
"""
project, network, phases = cls._parse_args(network=network,
phases=phases)
# Initialize pore and throat data dictionary using Dict class
pdata = Dict.to_dict(network=network, phases=phases, element='pore',
interleave=True, flatten=True,
categorize_by=['object'])
tdata = Dict.to_dict(network=network, phases=phases, element='throat',
interleave=True, flatten=True,
categorize_by=['object'])
pdata = FlatDict(pdata, delimiter=delim)
tdata = FlatDict(tdata, delimiter=delim)
# Scan data and convert non-1d arrays to multiple columns
for key in list(pdata.keys()):
if sp.shape(pdata[key]) != (network[0].Np,):
arr = pdata.pop(key)
tmp = sp.split(arr, arr.shape[1], axis=1)
cols = range(len(tmp))
pdata.update({key+'['+str(i)+']': tmp[i].squeeze()
for i in cols})
for key in list(tdata.keys()):
if sp.shape(tdata[key]) != (network[0].Nt,):
arr = tdata.pop(key)
tmp = sp.split(arr, arr.shape[1], axis=1)
cols = range(len(tmp))
tdata.update({key+'['+str(i)+']': tmp[i].squeeze()
for i in cols})
# Convert sanitized dictionaries to DataFrames
pdata = pd.DataFrame(sanitize_dict(pdata))
tdata = pd.DataFrame(sanitize_dict(tdata))
# Prepare DataFrames to be returned
if join:
data = tdata.join(other=pdata, how='left')
else:
nt = namedtuple('dataframes', ('pore', 'throat'))
data = nt(pore=pdata, throat=tdata)
return data | def function[to_dataframe, parameter[cls, network, phases, join, delim]]:
constant[
Convert the Network (and optionally Phase) data to Pandas DataFrames.
Parameters
----------
network: OpenPNM Network Object
The network containing the data to be stored
phases : list of OpenPNM Phase Objects
The data on each supplied phase will be added to DataFrame
join : boolean
If ``False`` (default), two DataFrames are returned with *pore*
data in one, and *throat* data in the other. If ``True`` the pore
and throat data are combined into a single DataFrame. This can be
problematic as it will put NaNs into all the *pore* columns which
are shorter than the *throat* columns.
Returns
-------
Pandas ``DataFrame`` object containing property and label data in each
column. If ``join`` was False (default) the two DataFrames are
returned i a named tuple, or else a single DataFrame with pore and
throat data in the same file, despite the column length being
different.
]
<ast.Tuple object at 0x7da20cabd480> assign[=] call[name[cls]._parse_args, parameter[]]
variable[pdata] assign[=] call[name[Dict].to_dict, parameter[]]
variable[tdata] assign[=] call[name[Dict].to_dict, parameter[]]
variable[pdata] assign[=] call[name[FlatDict], parameter[name[pdata]]]
variable[tdata] assign[=] call[name[FlatDict], parameter[name[tdata]]]
for taget[name[key]] in starred[call[name[list], parameter[call[name[pdata].keys, parameter[]]]]] begin[:]
if compare[call[name[sp].shape, parameter[call[name[pdata]][name[key]]]] not_equal[!=] tuple[[<ast.Attribute object at 0x7da2054a6ad0>]]] begin[:]
variable[arr] assign[=] call[name[pdata].pop, parameter[name[key]]]
variable[tmp] assign[=] call[name[sp].split, parameter[name[arr], call[name[arr].shape][constant[1]]]]
variable[cols] assign[=] call[name[range], parameter[call[name[len], parameter[name[tmp]]]]]
call[name[pdata].update, parameter[<ast.DictComp object at 0x7da2054a7fd0>]]
for taget[name[key]] in starred[call[name[list], parameter[call[name[tdata].keys, parameter[]]]]] begin[:]
if compare[call[name[sp].shape, parameter[call[name[tdata]][name[key]]]] not_equal[!=] tuple[[<ast.Attribute object at 0x7da2054a7370>]]] begin[:]
variable[arr] assign[=] call[name[tdata].pop, parameter[name[key]]]
variable[tmp] assign[=] call[name[sp].split, parameter[name[arr], call[name[arr].shape][constant[1]]]]
variable[cols] assign[=] call[name[range], parameter[call[name[len], parameter[name[tmp]]]]]
call[name[tdata].update, parameter[<ast.DictComp object at 0x7da2054a4be0>]]
variable[pdata] assign[=] call[name[pd].DataFrame, parameter[call[name[sanitize_dict], parameter[name[pdata]]]]]
variable[tdata] assign[=] call[name[pd].DataFrame, parameter[call[name[sanitize_dict], parameter[name[tdata]]]]]
if name[join] begin[:]
variable[data] assign[=] call[name[tdata].join, parameter[]]
return[name[data]] | keyword[def] identifier[to_dataframe] ( identifier[cls] , identifier[network] = keyword[None] , identifier[phases] =[], identifier[join] = keyword[False] , identifier[delim] = literal[string] ):
literal[string]
identifier[project] , identifier[network] , identifier[phases] = identifier[cls] . identifier[_parse_args] ( identifier[network] = identifier[network] ,
identifier[phases] = identifier[phases] )
identifier[pdata] = identifier[Dict] . identifier[to_dict] ( identifier[network] = identifier[network] , identifier[phases] = identifier[phases] , identifier[element] = literal[string] ,
identifier[interleave] = keyword[True] , identifier[flatten] = keyword[True] ,
identifier[categorize_by] =[ literal[string] ])
identifier[tdata] = identifier[Dict] . identifier[to_dict] ( identifier[network] = identifier[network] , identifier[phases] = identifier[phases] , identifier[element] = literal[string] ,
identifier[interleave] = keyword[True] , identifier[flatten] = keyword[True] ,
identifier[categorize_by] =[ literal[string] ])
identifier[pdata] = identifier[FlatDict] ( identifier[pdata] , identifier[delimiter] = identifier[delim] )
identifier[tdata] = identifier[FlatDict] ( identifier[tdata] , identifier[delimiter] = identifier[delim] )
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[pdata] . identifier[keys] ()):
keyword[if] identifier[sp] . identifier[shape] ( identifier[pdata] [ identifier[key] ])!=( identifier[network] [ literal[int] ]. identifier[Np] ,):
identifier[arr] = identifier[pdata] . identifier[pop] ( identifier[key] )
identifier[tmp] = identifier[sp] . identifier[split] ( identifier[arr] , identifier[arr] . identifier[shape] [ literal[int] ], identifier[axis] = literal[int] )
identifier[cols] = identifier[range] ( identifier[len] ( identifier[tmp] ))
identifier[pdata] . identifier[update] ({ identifier[key] + literal[string] + identifier[str] ( identifier[i] )+ literal[string] : identifier[tmp] [ identifier[i] ]. identifier[squeeze] ()
keyword[for] identifier[i] keyword[in] identifier[cols] })
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[tdata] . identifier[keys] ()):
keyword[if] identifier[sp] . identifier[shape] ( identifier[tdata] [ identifier[key] ])!=( identifier[network] [ literal[int] ]. identifier[Nt] ,):
identifier[arr] = identifier[tdata] . identifier[pop] ( identifier[key] )
identifier[tmp] = identifier[sp] . identifier[split] ( identifier[arr] , identifier[arr] . identifier[shape] [ literal[int] ], identifier[axis] = literal[int] )
identifier[cols] = identifier[range] ( identifier[len] ( identifier[tmp] ))
identifier[tdata] . identifier[update] ({ identifier[key] + literal[string] + identifier[str] ( identifier[i] )+ literal[string] : identifier[tmp] [ identifier[i] ]. identifier[squeeze] ()
keyword[for] identifier[i] keyword[in] identifier[cols] })
identifier[pdata] = identifier[pd] . identifier[DataFrame] ( identifier[sanitize_dict] ( identifier[pdata] ))
identifier[tdata] = identifier[pd] . identifier[DataFrame] ( identifier[sanitize_dict] ( identifier[tdata] ))
keyword[if] identifier[join] :
identifier[data] = identifier[tdata] . identifier[join] ( identifier[other] = identifier[pdata] , identifier[how] = literal[string] )
keyword[else] :
identifier[nt] = identifier[namedtuple] ( literal[string] ,( literal[string] , literal[string] ))
identifier[data] = identifier[nt] ( identifier[pore] = identifier[pdata] , identifier[throat] = identifier[tdata] )
keyword[return] identifier[data] | def to_dataframe(cls, network=None, phases=[], join=False, delim=' | '):
"""
Convert the Network (and optionally Phase) data to Pandas DataFrames.
Parameters
----------
network: OpenPNM Network Object
The network containing the data to be stored
phases : list of OpenPNM Phase Objects
The data on each supplied phase will be added to DataFrame
join : boolean
If ``False`` (default), two DataFrames are returned with *pore*
data in one, and *throat* data in the other. If ``True`` the pore
and throat data are combined into a single DataFrame. This can be
problematic as it will put NaNs into all the *pore* columns which
are shorter than the *throat* columns.
Returns
-------
Pandas ``DataFrame`` object containing property and label data in each
column. If ``join`` was False (default) the two DataFrames are
returned i a named tuple, or else a single DataFrame with pore and
throat data in the same file, despite the column length being
different.
"""
(project, network, phases) = cls._parse_args(network=network, phases=phases)
# Initialize pore and throat data dictionary using Dict class
pdata = Dict.to_dict(network=network, phases=phases, element='pore', interleave=True, flatten=True, categorize_by=['object'])
tdata = Dict.to_dict(network=network, phases=phases, element='throat', interleave=True, flatten=True, categorize_by=['object'])
pdata = FlatDict(pdata, delimiter=delim)
tdata = FlatDict(tdata, delimiter=delim)
# Scan data and convert non-1d arrays to multiple columns
for key in list(pdata.keys()):
if sp.shape(pdata[key]) != (network[0].Np,):
arr = pdata.pop(key)
tmp = sp.split(arr, arr.shape[1], axis=1)
cols = range(len(tmp))
pdata.update({key + '[' + str(i) + ']': tmp[i].squeeze() for i in cols}) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
for key in list(tdata.keys()):
if sp.shape(tdata[key]) != (network[0].Nt,):
arr = tdata.pop(key)
tmp = sp.split(arr, arr.shape[1], axis=1)
cols = range(len(tmp))
tdata.update({key + '[' + str(i) + ']': tmp[i].squeeze() for i in cols}) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
# Convert sanitized dictionaries to DataFrames
pdata = pd.DataFrame(sanitize_dict(pdata))
tdata = pd.DataFrame(sanitize_dict(tdata))
# Prepare DataFrames to be returned
if join:
data = tdata.join(other=pdata, how='left') # depends on [control=['if'], data=[]]
else:
nt = namedtuple('dataframes', ('pore', 'throat'))
data = nt(pore=pdata, throat=tdata)
return data |
def fetch_source(self) -> None:
"""Download the tar archive that contains the source code for the library.
"""
import requests # Do not import at the top that this file can be imported by setup.py
with TemporaryFile() as temp_file:
# Download the source archive
request = requests.get(self.src_tar_gz_url)
temp_file.write(request.content)
# Rewind the file
temp_file.seek(0)
# Extract the content of the archive
tar_file = tarfile.open(fileobj=temp_file)
tar_file.extractall(path=_DEPS_PATH) | def function[fetch_source, parameter[self]]:
constant[Download the tar archive that contains the source code for the library.
]
import module[requests]
with call[name[TemporaryFile], parameter[]] begin[:]
variable[request] assign[=] call[name[requests].get, parameter[name[self].src_tar_gz_url]]
call[name[temp_file].write, parameter[name[request].content]]
call[name[temp_file].seek, parameter[constant[0]]]
variable[tar_file] assign[=] call[name[tarfile].open, parameter[]]
call[name[tar_file].extractall, parameter[]] | keyword[def] identifier[fetch_source] ( identifier[self] )-> keyword[None] :
literal[string]
keyword[import] identifier[requests]
keyword[with] identifier[TemporaryFile] () keyword[as] identifier[temp_file] :
identifier[request] = identifier[requests] . identifier[get] ( identifier[self] . identifier[src_tar_gz_url] )
identifier[temp_file] . identifier[write] ( identifier[request] . identifier[content] )
identifier[temp_file] . identifier[seek] ( literal[int] )
identifier[tar_file] = identifier[tarfile] . identifier[open] ( identifier[fileobj] = identifier[temp_file] )
identifier[tar_file] . identifier[extractall] ( identifier[path] = identifier[_DEPS_PATH] ) | def fetch_source(self) -> None:
"""Download the tar archive that contains the source code for the library.
"""
import requests # Do not import at the top that this file can be imported by setup.py
with TemporaryFile() as temp_file:
# Download the source archive
request = requests.get(self.src_tar_gz_url)
temp_file.write(request.content)
# Rewind the file
temp_file.seek(0)
# Extract the content of the archive
tar_file = tarfile.open(fileobj=temp_file)
tar_file.extractall(path=_DEPS_PATH) # depends on [control=['with'], data=['temp_file']] |
def decline_weak_masculine_noun(ns: str, gs: str, np: str):
"""
Gives the full declension of weak masculine nouns.
>>> decline_weak_masculine_noun("goði", "goða", "goðar")
goði
goða
goða
goða
goðar
goða
goðum
goða
>>> decline_weak_masculine_noun("hluti", "hluta", "hlutar")
hluti
hluta
hluta
hluta
hlutar
hluta
hlutum
hluta
>>> decline_weak_masculine_noun("arfi", "arfa", "arfar")
arfi
arfa
arfa
arfa
arfar
arfa
örfum
arfa
>>> decline_weak_masculine_noun("bryti", "bryta", "brytjar")
bryti
bryta
bryta
bryta
brytjar
brytja
brytjum
brytja
>>> decline_weak_masculine_noun("vöðvi", "vöðva", "vöðvar")
vöðvi
vöðva
vöðva
vöðva
vöðvar
vöðva
vöðum
vöðva
The main pattern is:
:param ns: nominative singular
:param gs: genitive singular
:param np: nominative plural
:return:
"""
# nominative singular
print(ns)
# accusative singular
print(gs)
# dative singular
print(gs)
# genitive singular
print(gs)
# nominative plural
print(np)
# accusative plural
print(np[:-1])
# dative plural
if len(np) > 3 and np[-3] == "v":
print(apply_u_umlaut(np[:-3]) + "um")
else:
print(apply_u_umlaut(np[:-2]) + "um")
# genitive plural
print(np[:-1]) | def function[decline_weak_masculine_noun, parameter[ns, gs, np]]:
constant[
Gives the full declension of weak masculine nouns.
>>> decline_weak_masculine_noun("goði", "goða", "goðar")
goði
goða
goða
goða
goðar
goða
goðum
goða
>>> decline_weak_masculine_noun("hluti", "hluta", "hlutar")
hluti
hluta
hluta
hluta
hlutar
hluta
hlutum
hluta
>>> decline_weak_masculine_noun("arfi", "arfa", "arfar")
arfi
arfa
arfa
arfa
arfar
arfa
örfum
arfa
>>> decline_weak_masculine_noun("bryti", "bryta", "brytjar")
bryti
bryta
bryta
bryta
brytjar
brytja
brytjum
brytja
>>> decline_weak_masculine_noun("vöðvi", "vöðva", "vöðvar")
vöðvi
vöðva
vöðva
vöðva
vöðvar
vöðva
vöðum
vöðva
The main pattern is:
:param ns: nominative singular
:param gs: genitive singular
:param np: nominative plural
:return:
]
call[name[print], parameter[name[ns]]]
call[name[print], parameter[name[gs]]]
call[name[print], parameter[name[gs]]]
call[name[print], parameter[name[gs]]]
call[name[print], parameter[name[np]]]
call[name[print], parameter[call[name[np]][<ast.Slice object at 0x7da20c6c5330>]]]
if <ast.BoolOp object at 0x7da20c6c6cb0> begin[:]
call[name[print], parameter[binary_operation[call[name[apply_u_umlaut], parameter[call[name[np]][<ast.Slice object at 0x7da20c6c59f0>]]] + constant[um]]]]
call[name[print], parameter[call[name[np]][<ast.Slice object at 0x7da2045650f0>]]] | keyword[def] identifier[decline_weak_masculine_noun] ( identifier[ns] : identifier[str] , identifier[gs] : identifier[str] , identifier[np] : identifier[str] ):
literal[string]
identifier[print] ( identifier[ns] )
identifier[print] ( identifier[gs] )
identifier[print] ( identifier[gs] )
identifier[print] ( identifier[gs] )
identifier[print] ( identifier[np] )
identifier[print] ( identifier[np] [:- literal[int] ])
keyword[if] identifier[len] ( identifier[np] )> literal[int] keyword[and] identifier[np] [- literal[int] ]== literal[string] :
identifier[print] ( identifier[apply_u_umlaut] ( identifier[np] [:- literal[int] ])+ literal[string] )
keyword[else] :
identifier[print] ( identifier[apply_u_umlaut] ( identifier[np] [:- literal[int] ])+ literal[string] )
identifier[print] ( identifier[np] [:- literal[int] ]) | def decline_weak_masculine_noun(ns: str, gs: str, np: str):
"""
Gives the full declension of weak masculine nouns.
>>> decline_weak_masculine_noun("goði", "goða", "goðar")
goði
goða
goða
goða
goðar
goða
goðum
goða
>>> decline_weak_masculine_noun("hluti", "hluta", "hlutar")
hluti
hluta
hluta
hluta
hlutar
hluta
hlutum
hluta
>>> decline_weak_masculine_noun("arfi", "arfa", "arfar")
arfi
arfa
arfa
arfa
arfar
arfa
örfum
arfa
>>> decline_weak_masculine_noun("bryti", "bryta", "brytjar")
bryti
bryta
bryta
bryta
brytjar
brytja
brytjum
brytja
>>> decline_weak_masculine_noun("vöðvi", "vöðva", "vöðvar")
vöðvi
vöðva
vöðva
vöðva
vöðvar
vöðva
vöðum
vöðva
The main pattern is:
:param ns: nominative singular
:param gs: genitive singular
:param np: nominative plural
:return:
"""
# nominative singular
print(ns)
# accusative singular
print(gs)
# dative singular
print(gs)
# genitive singular
print(gs)
# nominative plural
print(np)
# accusative plural
print(np[:-1])
# dative plural
if len(np) > 3 and np[-3] == 'v':
print(apply_u_umlaut(np[:-3]) + 'um') # depends on [control=['if'], data=[]]
else:
print(apply_u_umlaut(np[:-2]) + 'um')
# genitive plural
print(np[:-1]) |
def getTraitCovarStdErrors(self,term_i):
"""
Returns standard errors on trait covariances from term_i (for the covariance estimate \see getTraitCovar)
Args:
term_i: index of the term we are interested in
"""
assert self.init, 'GP not initialised'
assert self.fast==False, 'Not supported for fast implementation'
if self.P==1:
out = (2*self.getScales()[term_i])**2*self._getLaplaceCovar()[term_i,term_i]
else:
C = self.vd.getTerm(term_i).getTraitCovar()
n_params = C.getNumberParams()
par_index = 0
for term in range(term_i-1):
par_index += self.vd.getTerm(term_i).getNumberScales()
Sigma1 = self._getLaplaceCovar()[par_index:(par_index+n_params),:][:,par_index:(par_index+n_params)]
out = sp.zeros((self.P,self.P))
for param_i in range(n_params):
out += C.Kgrad_param(param_i)**2*Sigma1[param_i,param_i]
for param_j in range(param_i):
out += 2*abs(C.Kgrad_param(param_i)*C.Kgrad_param(param_j))*Sigma1[param_i,param_j]
out = sp.sqrt(out)
return out | def function[getTraitCovarStdErrors, parameter[self, term_i]]:
constant[
Returns standard errors on trait covariances from term_i (for the covariance estimate \see getTraitCovar)
Args:
term_i: index of the term we are interested in
]
assert[name[self].init]
assert[compare[name[self].fast equal[==] constant[False]]]
if compare[name[self].P equal[==] constant[1]] begin[:]
variable[out] assign[=] binary_operation[binary_operation[binary_operation[constant[2] * call[call[name[self].getScales, parameter[]]][name[term_i]]] ** constant[2]] * call[call[name[self]._getLaplaceCovar, parameter[]]][tuple[[<ast.Name object at 0x7da1b0ae1ed0>, <ast.Name object at 0x7da1b0ae09a0>]]]]
variable[out] assign[=] call[name[sp].sqrt, parameter[name[out]]]
return[name[out]] | keyword[def] identifier[getTraitCovarStdErrors] ( identifier[self] , identifier[term_i] ):
literal[string]
keyword[assert] identifier[self] . identifier[init] , literal[string]
keyword[assert] identifier[self] . identifier[fast] == keyword[False] , literal[string]
keyword[if] identifier[self] . identifier[P] == literal[int] :
identifier[out] =( literal[int] * identifier[self] . identifier[getScales] ()[ identifier[term_i] ])** literal[int] * identifier[self] . identifier[_getLaplaceCovar] ()[ identifier[term_i] , identifier[term_i] ]
keyword[else] :
identifier[C] = identifier[self] . identifier[vd] . identifier[getTerm] ( identifier[term_i] ). identifier[getTraitCovar] ()
identifier[n_params] = identifier[C] . identifier[getNumberParams] ()
identifier[par_index] = literal[int]
keyword[for] identifier[term] keyword[in] identifier[range] ( identifier[term_i] - literal[int] ):
identifier[par_index] += identifier[self] . identifier[vd] . identifier[getTerm] ( identifier[term_i] ). identifier[getNumberScales] ()
identifier[Sigma1] = identifier[self] . identifier[_getLaplaceCovar] ()[ identifier[par_index] :( identifier[par_index] + identifier[n_params] ),:][:, identifier[par_index] :( identifier[par_index] + identifier[n_params] )]
identifier[out] = identifier[sp] . identifier[zeros] (( identifier[self] . identifier[P] , identifier[self] . identifier[P] ))
keyword[for] identifier[param_i] keyword[in] identifier[range] ( identifier[n_params] ):
identifier[out] += identifier[C] . identifier[Kgrad_param] ( identifier[param_i] )** literal[int] * identifier[Sigma1] [ identifier[param_i] , identifier[param_i] ]
keyword[for] identifier[param_j] keyword[in] identifier[range] ( identifier[param_i] ):
identifier[out] += literal[int] * identifier[abs] ( identifier[C] . identifier[Kgrad_param] ( identifier[param_i] )* identifier[C] . identifier[Kgrad_param] ( identifier[param_j] ))* identifier[Sigma1] [ identifier[param_i] , identifier[param_j] ]
identifier[out] = identifier[sp] . identifier[sqrt] ( identifier[out] )
keyword[return] identifier[out] | def getTraitCovarStdErrors(self, term_i):
"""
Returns standard errors on trait covariances from term_i (for the covariance estimate \\see getTraitCovar)
Args:
term_i: index of the term we are interested in
"""
assert self.init, 'GP not initialised'
assert self.fast == False, 'Not supported for fast implementation'
if self.P == 1:
out = (2 * self.getScales()[term_i]) ** 2 * self._getLaplaceCovar()[term_i, term_i] # depends on [control=['if'], data=[]]
else:
C = self.vd.getTerm(term_i).getTraitCovar()
n_params = C.getNumberParams()
par_index = 0
for term in range(term_i - 1):
par_index += self.vd.getTerm(term_i).getNumberScales() # depends on [control=['for'], data=[]]
Sigma1 = self._getLaplaceCovar()[par_index:par_index + n_params, :][:, par_index:par_index + n_params]
out = sp.zeros((self.P, self.P))
for param_i in range(n_params):
out += C.Kgrad_param(param_i) ** 2 * Sigma1[param_i, param_i]
for param_j in range(param_i):
out += 2 * abs(C.Kgrad_param(param_i) * C.Kgrad_param(param_j)) * Sigma1[param_i, param_j] # depends on [control=['for'], data=['param_j']] # depends on [control=['for'], data=['param_i']]
out = sp.sqrt(out)
return out |
def _view_removed(self):
"""View packages before removed
"""
print("\nPackages with name matching [ {0}{1}{2} ]\n".format(
self.meta.color["CYAN"], ", ".join(self.binary),
self.meta.color["ENDC"]))
removed, packages = self._get_removed()
if packages and "--checklist" in self.extra:
removed = []
text = "Press 'spacebar' to unchoose packages from the remove"
backtitle = "{0} {1}".format(self.meta.__all__,
self.meta.__version__)
status = True
pkgs = DialogUtil(packages, text, " Remove ", backtitle,
status).checklist()
if pkgs:
for rmv in pkgs:
removed.append(split_package(rmv)[0])
self.meta.default_answer = "y"
else:
for rmv, pkg in zip(removed, packages):
print("[ {0}delete{1} ] --> {2}".format(
self.meta.color["RED"], self.meta.color["ENDC"], pkg))
self._sizes(pkg)
self._calc_sizes()
self._remove_summary()
return removed | def function[_view_removed, parameter[self]]:
constant[View packages before removed
]
call[name[print], parameter[call[constant[
Packages with name matching [ {0}{1}{2} ]
].format, parameter[call[name[self].meta.color][constant[CYAN]], call[constant[, ].join, parameter[name[self].binary]], call[name[self].meta.color][constant[ENDC]]]]]]
<ast.Tuple object at 0x7da18f723820> assign[=] call[name[self]._get_removed, parameter[]]
if <ast.BoolOp object at 0x7da18f7224a0> begin[:]
variable[removed] assign[=] list[[]]
variable[text] assign[=] constant[Press 'spacebar' to unchoose packages from the remove]
variable[backtitle] assign[=] call[constant[{0} {1}].format, parameter[name[self].meta.__all__, name[self].meta.__version__]]
variable[status] assign[=] constant[True]
variable[pkgs] assign[=] call[call[name[DialogUtil], parameter[name[packages], name[text], constant[ Remove ], name[backtitle], name[status]]].checklist, parameter[]]
if name[pkgs] begin[:]
for taget[name[rmv]] in starred[name[pkgs]] begin[:]
call[name[removed].append, parameter[call[call[name[split_package], parameter[name[rmv]]]][constant[0]]]]
name[self].meta.default_answer assign[=] constant[y]
return[name[removed]] | keyword[def] identifier[_view_removed] ( identifier[self] ):
literal[string]
identifier[print] ( literal[string] . identifier[format] (
identifier[self] . identifier[meta] . identifier[color] [ literal[string] ], literal[string] . identifier[join] ( identifier[self] . identifier[binary] ),
identifier[self] . identifier[meta] . identifier[color] [ literal[string] ]))
identifier[removed] , identifier[packages] = identifier[self] . identifier[_get_removed] ()
keyword[if] identifier[packages] keyword[and] literal[string] keyword[in] identifier[self] . identifier[extra] :
identifier[removed] =[]
identifier[text] = literal[string]
identifier[backtitle] = literal[string] . identifier[format] ( identifier[self] . identifier[meta] . identifier[__all__] ,
identifier[self] . identifier[meta] . identifier[__version__] )
identifier[status] = keyword[True]
identifier[pkgs] = identifier[DialogUtil] ( identifier[packages] , identifier[text] , literal[string] , identifier[backtitle] ,
identifier[status] ). identifier[checklist] ()
keyword[if] identifier[pkgs] :
keyword[for] identifier[rmv] keyword[in] identifier[pkgs] :
identifier[removed] . identifier[append] ( identifier[split_package] ( identifier[rmv] )[ literal[int] ])
identifier[self] . identifier[meta] . identifier[default_answer] = literal[string]
keyword[else] :
keyword[for] identifier[rmv] , identifier[pkg] keyword[in] identifier[zip] ( identifier[removed] , identifier[packages] ):
identifier[print] ( literal[string] . identifier[format] (
identifier[self] . identifier[meta] . identifier[color] [ literal[string] ], identifier[self] . identifier[meta] . identifier[color] [ literal[string] ], identifier[pkg] ))
identifier[self] . identifier[_sizes] ( identifier[pkg] )
identifier[self] . identifier[_calc_sizes] ()
identifier[self] . identifier[_remove_summary] ()
keyword[return] identifier[removed] | def _view_removed(self):
"""View packages before removed
"""
print('\nPackages with name matching [ {0}{1}{2} ]\n'.format(self.meta.color['CYAN'], ', '.join(self.binary), self.meta.color['ENDC']))
(removed, packages) = self._get_removed()
if packages and '--checklist' in self.extra:
removed = []
text = "Press 'spacebar' to unchoose packages from the remove"
backtitle = '{0} {1}'.format(self.meta.__all__, self.meta.__version__)
status = True
pkgs = DialogUtil(packages, text, ' Remove ', backtitle, status).checklist()
if pkgs:
for rmv in pkgs:
removed.append(split_package(rmv)[0]) # depends on [control=['for'], data=['rmv']]
self.meta.default_answer = 'y' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
for (rmv, pkg) in zip(removed, packages):
print('[ {0}delete{1} ] --> {2}'.format(self.meta.color['RED'], self.meta.color['ENDC'], pkg))
self._sizes(pkg) # depends on [control=['for'], data=[]]
self._calc_sizes()
self._remove_summary()
return removed |
def _bam_coverage(name, bam_input, data):
"""Run bamCoverage from deeptools"""
cmd = ("{bam_coverage} -b {bam_input} -o {bw_output} "
"--binSize 20 --effectiveGenomeSize {size} "
"--smoothLength 60 --extendReads 150 --centerReads -p {cores}")
size = bam.fasta.total_sequence_length(dd.get_ref_file(data))
cores = dd.get_num_cores(data)
try:
bam_coverage = config_utils.get_program("bamCoverage", data)
except config_utils.CmdNotFound:
logger.info("No bamCoverage found, skipping bamCoverage.")
return None
resources = config_utils.get_resources("bamCoverage", data["config"])
if resources:
options = resources.get("options")
if options:
cmd += " %s" % " ".join([str(x) for x in options])
bw_output = os.path.join(os.path.dirname(bam_input), "%s.bw" % name)
if utils.file_exists(bw_output):
return bw_output
with file_transaction(bw_output) as out_tx:
do.run(cmd.format(**locals()), "Run bamCoverage in %s" % name)
return bw_output | def function[_bam_coverage, parameter[name, bam_input, data]]:
constant[Run bamCoverage from deeptools]
variable[cmd] assign[=] constant[{bam_coverage} -b {bam_input} -o {bw_output} --binSize 20 --effectiveGenomeSize {size} --smoothLength 60 --extendReads 150 --centerReads -p {cores}]
variable[size] assign[=] call[name[bam].fasta.total_sequence_length, parameter[call[name[dd].get_ref_file, parameter[name[data]]]]]
variable[cores] assign[=] call[name[dd].get_num_cores, parameter[name[data]]]
<ast.Try object at 0x7da20c6ab220>
variable[resources] assign[=] call[name[config_utils].get_resources, parameter[constant[bamCoverage], call[name[data]][constant[config]]]]
if name[resources] begin[:]
variable[options] assign[=] call[name[resources].get, parameter[constant[options]]]
if name[options] begin[:]
<ast.AugAssign object at 0x7da1b1987b80>
variable[bw_output] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[bam_input]]], binary_operation[constant[%s.bw] <ast.Mod object at 0x7da2590d6920> name[name]]]]
if call[name[utils].file_exists, parameter[name[bw_output]]] begin[:]
return[name[bw_output]]
with call[name[file_transaction], parameter[name[bw_output]]] begin[:]
call[name[do].run, parameter[call[name[cmd].format, parameter[]], binary_operation[constant[Run bamCoverage in %s] <ast.Mod object at 0x7da2590d6920> name[name]]]]
return[name[bw_output]] | keyword[def] identifier[_bam_coverage] ( identifier[name] , identifier[bam_input] , identifier[data] ):
literal[string]
identifier[cmd] =( literal[string]
literal[string]
literal[string] )
identifier[size] = identifier[bam] . identifier[fasta] . identifier[total_sequence_length] ( identifier[dd] . identifier[get_ref_file] ( identifier[data] ))
identifier[cores] = identifier[dd] . identifier[get_num_cores] ( identifier[data] )
keyword[try] :
identifier[bam_coverage] = identifier[config_utils] . identifier[get_program] ( literal[string] , identifier[data] )
keyword[except] identifier[config_utils] . identifier[CmdNotFound] :
identifier[logger] . identifier[info] ( literal[string] )
keyword[return] keyword[None]
identifier[resources] = identifier[config_utils] . identifier[get_resources] ( literal[string] , identifier[data] [ literal[string] ])
keyword[if] identifier[resources] :
identifier[options] = identifier[resources] . identifier[get] ( literal[string] )
keyword[if] identifier[options] :
identifier[cmd] += literal[string] % literal[string] . identifier[join] ([ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[options] ])
identifier[bw_output] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[bam_input] ), literal[string] % identifier[name] )
keyword[if] identifier[utils] . identifier[file_exists] ( identifier[bw_output] ):
keyword[return] identifier[bw_output]
keyword[with] identifier[file_transaction] ( identifier[bw_output] ) keyword[as] identifier[out_tx] :
identifier[do] . identifier[run] ( identifier[cmd] . identifier[format] (** identifier[locals] ()), literal[string] % identifier[name] )
keyword[return] identifier[bw_output] | def _bam_coverage(name, bam_input, data):
"""Run bamCoverage from deeptools"""
cmd = '{bam_coverage} -b {bam_input} -o {bw_output} --binSize 20 --effectiveGenomeSize {size} --smoothLength 60 --extendReads 150 --centerReads -p {cores}'
size = bam.fasta.total_sequence_length(dd.get_ref_file(data))
cores = dd.get_num_cores(data)
try:
bam_coverage = config_utils.get_program('bamCoverage', data) # depends on [control=['try'], data=[]]
except config_utils.CmdNotFound:
logger.info('No bamCoverage found, skipping bamCoverage.')
return None # depends on [control=['except'], data=[]]
resources = config_utils.get_resources('bamCoverage', data['config'])
if resources:
options = resources.get('options')
if options:
cmd += ' %s' % ' '.join([str(x) for x in options]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
bw_output = os.path.join(os.path.dirname(bam_input), '%s.bw' % name)
if utils.file_exists(bw_output):
return bw_output # depends on [control=['if'], data=[]]
with file_transaction(bw_output) as out_tx:
do.run(cmd.format(**locals()), 'Run bamCoverage in %s' % name) # depends on [control=['with'], data=[]]
return bw_output |
def _escape(s):
"""Escape commas, tabs, newlines and dashes in a string
Commas are encoded as tabs
"""
assert isinstance(s, str), \
"expected %s but got %s; value=%s" % (type(str), type(s), s)
s = s.replace("\\", "\\\\")
s = s.replace("\n", "\\n")
s = s.replace("\t", "\\t")
s = s.replace(",", "\t")
return s | def function[_escape, parameter[s]]:
constant[Escape commas, tabs, newlines and dashes in a string
Commas are encoded as tabs
]
assert[call[name[isinstance], parameter[name[s], name[str]]]]
variable[s] assign[=] call[name[s].replace, parameter[constant[\], constant[\\]]]
variable[s] assign[=] call[name[s].replace, parameter[constant[
], constant[\n]]]
variable[s] assign[=] call[name[s].replace, parameter[constant[ ], constant[\t]]]
variable[s] assign[=] call[name[s].replace, parameter[constant[,], constant[ ]]]
return[name[s]] | keyword[def] identifier[_escape] ( identifier[s] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[s] , identifier[str] ), literal[string] %( identifier[type] ( identifier[str] ), identifier[type] ( identifier[s] ), identifier[s] )
identifier[s] = identifier[s] . identifier[replace] ( literal[string] , literal[string] )
identifier[s] = identifier[s] . identifier[replace] ( literal[string] , literal[string] )
identifier[s] = identifier[s] . identifier[replace] ( literal[string] , literal[string] )
identifier[s] = identifier[s] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[s] | def _escape(s):
"""Escape commas, tabs, newlines and dashes in a string
Commas are encoded as tabs
"""
assert isinstance(s, str), 'expected %s but got %s; value=%s' % (type(str), type(s), s)
s = s.replace('\\', '\\\\')
s = s.replace('\n', '\\n')
s = s.replace('\t', '\\t')
s = s.replace(',', '\t')
return s |
def _fileobj_lookup(self, fileobj):
"""Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive search in case
the object is invalid but we still have it in our map. This
is used by unregister() so we can unregister an object that
was previously registered even if it is closed. It is also
used by _SelectorMapping.
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Do an exhaustive search.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise | def function[_fileobj_lookup, parameter[self, fileobj]]:
constant[Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive search in case
the object is invalid but we still have it in our map. This
is used by unregister() so we can unregister an object that
was previously registered even if it is closed. It is also
used by _SelectorMapping.
]
<ast.Try object at 0x7da1b1cb1300> | keyword[def] identifier[_fileobj_lookup] ( identifier[self] , identifier[fileobj] ):
literal[string]
keyword[try] :
keyword[return] identifier[_fileobj_to_fd] ( identifier[fileobj] )
keyword[except] identifier[ValueError] :
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[_fd_to_key] . identifier[values] ():
keyword[if] identifier[key] . identifier[fileobj] keyword[is] identifier[fileobj] :
keyword[return] identifier[key] . identifier[fd]
keyword[raise] | def _fileobj_lookup(self, fileobj):
"""Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive search in case
the object is invalid but we still have it in our map. This
is used by unregister() so we can unregister an object that
was previously registered even if it is closed. It is also
used by _SelectorMapping.
"""
try:
return _fileobj_to_fd(fileobj) # depends on [control=['try'], data=[]]
except ValueError:
# Do an exhaustive search.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
# Raise ValueError after all.
raise # depends on [control=['except'], data=[]] |
def get_metadata(self):
"""
Get the metadata returned after authentication
"""
try:
r = requests.get('https://login.mailchimp.com/oauth2/metadata', auth=self)
except requests.exceptions.RequestException as e:
raise e
else:
r.raise_for_status()
output = r.json()
if 'error' in output:
raise requests.exceptions.RequestException(output['error'])
return output | def function[get_metadata, parameter[self]]:
constant[
Get the metadata returned after authentication
]
<ast.Try object at 0x7da1b01ff0a0> | keyword[def] identifier[get_metadata] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[r] = identifier[requests] . identifier[get] ( literal[string] , identifier[auth] = identifier[self] )
keyword[except] identifier[requests] . identifier[exceptions] . identifier[RequestException] keyword[as] identifier[e] :
keyword[raise] identifier[e]
keyword[else] :
identifier[r] . identifier[raise_for_status] ()
identifier[output] = identifier[r] . identifier[json] ()
keyword[if] literal[string] keyword[in] identifier[output] :
keyword[raise] identifier[requests] . identifier[exceptions] . identifier[RequestException] ( identifier[output] [ literal[string] ])
keyword[return] identifier[output] | def get_metadata(self):
"""
Get the metadata returned after authentication
"""
try:
r = requests.get('https://login.mailchimp.com/oauth2/metadata', auth=self) # depends on [control=['try'], data=[]]
except requests.exceptions.RequestException as e:
raise e # depends on [control=['except'], data=['e']]
else:
r.raise_for_status()
output = r.json()
if 'error' in output:
raise requests.exceptions.RequestException(output['error']) # depends on [control=['if'], data=['output']]
return output |
def handle_submit(self):
"""Handle form submission
"""
form = self.request.form
# Selected AR UIDs
uids = form.get("uids")
container_mapping = self.get_container_mapping()
for uid in uids:
src_pos = container_mapping[uid]
self.context.addDuplicateAnalyses(src_pos)
redirect_url = "{}/{}".format(
api.get_url(self.context), "manage_results")
self.request.response.redirect(redirect_url) | def function[handle_submit, parameter[self]]:
constant[Handle form submission
]
variable[form] assign[=] name[self].request.form
variable[uids] assign[=] call[name[form].get, parameter[constant[uids]]]
variable[container_mapping] assign[=] call[name[self].get_container_mapping, parameter[]]
for taget[name[uid]] in starred[name[uids]] begin[:]
variable[src_pos] assign[=] call[name[container_mapping]][name[uid]]
call[name[self].context.addDuplicateAnalyses, parameter[name[src_pos]]]
variable[redirect_url] assign[=] call[constant[{}/{}].format, parameter[call[name[api].get_url, parameter[name[self].context]], constant[manage_results]]]
call[name[self].request.response.redirect, parameter[name[redirect_url]]] | keyword[def] identifier[handle_submit] ( identifier[self] ):
literal[string]
identifier[form] = identifier[self] . identifier[request] . identifier[form]
identifier[uids] = identifier[form] . identifier[get] ( literal[string] )
identifier[container_mapping] = identifier[self] . identifier[get_container_mapping] ()
keyword[for] identifier[uid] keyword[in] identifier[uids] :
identifier[src_pos] = identifier[container_mapping] [ identifier[uid] ]
identifier[self] . identifier[context] . identifier[addDuplicateAnalyses] ( identifier[src_pos] )
identifier[redirect_url] = literal[string] . identifier[format] (
identifier[api] . identifier[get_url] ( identifier[self] . identifier[context] ), literal[string] )
identifier[self] . identifier[request] . identifier[response] . identifier[redirect] ( identifier[redirect_url] ) | def handle_submit(self):
"""Handle form submission
"""
form = self.request.form
# Selected AR UIDs
uids = form.get('uids')
container_mapping = self.get_container_mapping()
for uid in uids:
src_pos = container_mapping[uid]
self.context.addDuplicateAnalyses(src_pos) # depends on [control=['for'], data=['uid']]
redirect_url = '{}/{}'.format(api.get_url(self.context), 'manage_results')
self.request.response.redirect(redirect_url) |
def register_for_json(*args, **kwargs) -> Any:
"""
Class decorator to register classes with our JSON system.
- If method is ``'provides_init_args_kwargs'``, the class provides a
function
.. code-block:: python
def init_args_kwargs(self) -> Tuple[List[Any], Dict[str, Any]]
that returns an ``(args, kwargs)`` tuple, suitable for passing to its
``__init__()`` function as ``__init__(*args, **kwargs)``.
- If method is ``'provides_init_kwargs'``, the class provides a function
.. code-block:: python
def init_kwargs(self) -> Dict
that returns a dictionary ``kwargs`` suitable for passing to its
``__init__()`` function as ``__init__(**kwargs)``.
- Otherwise, the method argument is as for ``register_class_for_json()``.
Usage looks like:
.. code-block:: python
@register_for_json(method=METHOD_STRIP_UNDERSCORE)
class TableId(object):
def __init__(self, db: str = '', schema: str = '',
table: str = '') -> None:
self._db = db
self._schema = schema
self._table = table
"""
if DEBUG:
print("register_for_json: args = {}".format(repr(args)))
print("register_for_json: kwargs = {}".format(repr(kwargs)))
# http://stackoverflow.com/questions/653368/how-to-create-a-python-decorator-that-can-be-used-either-with-or-without-paramet # noqa
# In brief,
# @decorator
# x
#
# means
# x = decorator(x)
#
# so
# @decorator(args)
# x
#
# means
# x = decorator(args)(x)
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
if DEBUG:
print("... called as @register_for_json")
# called as @decorator
# ... the single argument is the class itself, e.g. Thing in:
# @decorator
# class Thing(object):
# # ...
# ... e.g.:
# args = (<class '__main__.unit_tests.<locals>.SimpleThing'>,)
# kwargs = {}
cls = args[0] # type: ClassType
register_class_for_json(cls, method=METHOD_SIMPLE)
return cls
# Otherwise:
if DEBUG:
print("... called as @register_for_json(*args, **kwargs)")
# called as @decorator(*args, **kwargs)
# ... e.g.:
# args = ()
# kwargs = {'method': 'provides_to_init_args_kwargs_dict'}
method = kwargs.pop('method', METHOD_SIMPLE) # type: str
obj_to_dict_fn = kwargs.pop('obj_to_dict_fn', None) # type: InstanceToDictFnType # noqa
dict_to_obj_fn = kwargs.pop('dict_to_obj_fn', initdict_to_instance) # type: DictToInstanceFnType # noqa
default_factory = kwargs.pop('default_factory', None) # type: DefaultFactoryFnType # noqa
check_result = kwargs.pop('check_results', True) # type: bool
def register_json_class(cls_: ClassType) -> ClassType:
odf = obj_to_dict_fn
dof = dict_to_obj_fn
if method == METHOD_PROVIDES_INIT_ARGS_KWARGS:
if hasattr(cls_, INIT_ARGS_KWARGS_FN_NAME):
odf = wrap_args_kwargs_to_initdict(
getattr(cls_, INIT_ARGS_KWARGS_FN_NAME),
typename=cls_.__qualname__,
check_result=check_result
)
else:
raise ValueError(
"Class type {} does not provide function {}".format(
cls_, INIT_ARGS_KWARGS_FN_NAME))
elif method == METHOD_PROVIDES_INIT_KWARGS:
if hasattr(cls_, INIT_KWARGS_FN_NAME):
odf = wrap_kwargs_to_initdict(
getattr(cls_, INIT_KWARGS_FN_NAME),
typename=cls_.__qualname__,
check_result=check_result
)
else:
raise ValueError(
"Class type {} does not provide function {}".format(
cls_, INIT_KWARGS_FN_NAME))
elif method == METHOD_NO_ARGS:
odf = obj_with_no_args_to_init_dict
register_class_for_json(cls_,
method=method,
obj_to_dict_fn=odf,
dict_to_obj_fn=dof,
default_factory=default_factory)
return cls_
return register_json_class | def function[register_for_json, parameter[]]:
constant[
Class decorator to register classes with our JSON system.
- If method is ``'provides_init_args_kwargs'``, the class provides a
function
.. code-block:: python
def init_args_kwargs(self) -> Tuple[List[Any], Dict[str, Any]]
that returns an ``(args, kwargs)`` tuple, suitable for passing to its
``__init__()`` function as ``__init__(*args, **kwargs)``.
- If method is ``'provides_init_kwargs'``, the class provides a function
.. code-block:: python
def init_kwargs(self) -> Dict
that returns a dictionary ``kwargs`` suitable for passing to its
``__init__()`` function as ``__init__(**kwargs)``.
- Otherwise, the method argument is as for ``register_class_for_json()``.
Usage looks like:
.. code-block:: python
@register_for_json(method=METHOD_STRIP_UNDERSCORE)
class TableId(object):
def __init__(self, db: str = '', schema: str = '',
table: str = '') -> None:
self._db = db
self._schema = schema
self._table = table
]
if name[DEBUG] begin[:]
call[name[print], parameter[call[constant[register_for_json: args = {}].format, parameter[call[name[repr], parameter[name[args]]]]]]]
call[name[print], parameter[call[constant[register_for_json: kwargs = {}].format, parameter[call[name[repr], parameter[name[kwargs]]]]]]]
if <ast.BoolOp object at 0x7da1b18370a0> begin[:]
if name[DEBUG] begin[:]
call[name[print], parameter[constant[... called as @register_for_json]]]
variable[cls] assign[=] call[name[args]][constant[0]]
call[name[register_class_for_json], parameter[name[cls]]]
return[name[cls]]
if name[DEBUG] begin[:]
call[name[print], parameter[constant[... called as @register_for_json(*args, **kwargs)]]]
variable[method] assign[=] call[name[kwargs].pop, parameter[constant[method], name[METHOD_SIMPLE]]]
variable[obj_to_dict_fn] assign[=] call[name[kwargs].pop, parameter[constant[obj_to_dict_fn], constant[None]]]
variable[dict_to_obj_fn] assign[=] call[name[kwargs].pop, parameter[constant[dict_to_obj_fn], name[initdict_to_instance]]]
variable[default_factory] assign[=] call[name[kwargs].pop, parameter[constant[default_factory], constant[None]]]
variable[check_result] assign[=] call[name[kwargs].pop, parameter[constant[check_results], constant[True]]]
def function[register_json_class, parameter[cls_]]:
variable[odf] assign[=] name[obj_to_dict_fn]
variable[dof] assign[=] name[dict_to_obj_fn]
if compare[name[method] equal[==] name[METHOD_PROVIDES_INIT_ARGS_KWARGS]] begin[:]
if call[name[hasattr], parameter[name[cls_], name[INIT_ARGS_KWARGS_FN_NAME]]] begin[:]
variable[odf] assign[=] call[name[wrap_args_kwargs_to_initdict], parameter[call[name[getattr], parameter[name[cls_], name[INIT_ARGS_KWARGS_FN_NAME]]]]]
call[name[register_class_for_json], parameter[name[cls_]]]
return[name[cls_]]
return[name[register_json_class]] | keyword[def] identifier[register_for_json] (* identifier[args] ,** identifier[kwargs] )-> identifier[Any] :
literal[string]
keyword[if] identifier[DEBUG] :
identifier[print] ( literal[string] . identifier[format] ( identifier[repr] ( identifier[args] )))
identifier[print] ( literal[string] . identifier[format] ( identifier[repr] ( identifier[kwargs] )))
keyword[if] identifier[len] ( identifier[args] )== literal[int] keyword[and] identifier[len] ( identifier[kwargs] )== literal[int] keyword[and] identifier[callable] ( identifier[args] [ literal[int] ]):
keyword[if] identifier[DEBUG] :
identifier[print] ( literal[string] )
identifier[cls] = identifier[args] [ literal[int] ]
identifier[register_class_for_json] ( identifier[cls] , identifier[method] = identifier[METHOD_SIMPLE] )
keyword[return] identifier[cls]
keyword[if] identifier[DEBUG] :
identifier[print] ( literal[string] )
identifier[method] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[METHOD_SIMPLE] )
identifier[obj_to_dict_fn] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[dict_to_obj_fn] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[initdict_to_instance] )
identifier[default_factory] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[check_result] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[True] )
keyword[def] identifier[register_json_class] ( identifier[cls_] : identifier[ClassType] )-> identifier[ClassType] :
identifier[odf] = identifier[obj_to_dict_fn]
identifier[dof] = identifier[dict_to_obj_fn]
keyword[if] identifier[method] == identifier[METHOD_PROVIDES_INIT_ARGS_KWARGS] :
keyword[if] identifier[hasattr] ( identifier[cls_] , identifier[INIT_ARGS_KWARGS_FN_NAME] ):
identifier[odf] = identifier[wrap_args_kwargs_to_initdict] (
identifier[getattr] ( identifier[cls_] , identifier[INIT_ARGS_KWARGS_FN_NAME] ),
identifier[typename] = identifier[cls_] . identifier[__qualname__] ,
identifier[check_result] = identifier[check_result]
)
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] (
identifier[cls_] , identifier[INIT_ARGS_KWARGS_FN_NAME] ))
keyword[elif] identifier[method] == identifier[METHOD_PROVIDES_INIT_KWARGS] :
keyword[if] identifier[hasattr] ( identifier[cls_] , identifier[INIT_KWARGS_FN_NAME] ):
identifier[odf] = identifier[wrap_kwargs_to_initdict] (
identifier[getattr] ( identifier[cls_] , identifier[INIT_KWARGS_FN_NAME] ),
identifier[typename] = identifier[cls_] . identifier[__qualname__] ,
identifier[check_result] = identifier[check_result]
)
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] (
identifier[cls_] , identifier[INIT_KWARGS_FN_NAME] ))
keyword[elif] identifier[method] == identifier[METHOD_NO_ARGS] :
identifier[odf] = identifier[obj_with_no_args_to_init_dict]
identifier[register_class_for_json] ( identifier[cls_] ,
identifier[method] = identifier[method] ,
identifier[obj_to_dict_fn] = identifier[odf] ,
identifier[dict_to_obj_fn] = identifier[dof] ,
identifier[default_factory] = identifier[default_factory] )
keyword[return] identifier[cls_]
keyword[return] identifier[register_json_class] | def register_for_json(*args, **kwargs) -> Any:
"""
Class decorator to register classes with our JSON system.
- If method is ``'provides_init_args_kwargs'``, the class provides a
function
.. code-block:: python
def init_args_kwargs(self) -> Tuple[List[Any], Dict[str, Any]]
that returns an ``(args, kwargs)`` tuple, suitable for passing to its
``__init__()`` function as ``__init__(*args, **kwargs)``.
- If method is ``'provides_init_kwargs'``, the class provides a function
.. code-block:: python
def init_kwargs(self) -> Dict
that returns a dictionary ``kwargs`` suitable for passing to its
``__init__()`` function as ``__init__(**kwargs)``.
- Otherwise, the method argument is as for ``register_class_for_json()``.
Usage looks like:
.. code-block:: python
@register_for_json(method=METHOD_STRIP_UNDERSCORE)
class TableId(object):
def __init__(self, db: str = '', schema: str = '',
table: str = '') -> None:
self._db = db
self._schema = schema
self._table = table
"""
if DEBUG:
print('register_for_json: args = {}'.format(repr(args)))
print('register_for_json: kwargs = {}'.format(repr(kwargs))) # depends on [control=['if'], data=[]]
# http://stackoverflow.com/questions/653368/how-to-create-a-python-decorator-that-can-be-used-either-with-or-without-paramet # noqa
# In brief,
# @decorator
# x
#
# means
# x = decorator(x)
#
# so
# @decorator(args)
# x
#
# means
# x = decorator(args)(x)
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
if DEBUG:
print('... called as @register_for_json') # depends on [control=['if'], data=[]]
# called as @decorator
# ... the single argument is the class itself, e.g. Thing in:
# @decorator
# class Thing(object):
# # ...
# ... e.g.:
# args = (<class '__main__.unit_tests.<locals>.SimpleThing'>,)
# kwargs = {}
cls = args[0] # type: ClassType
register_class_for_json(cls, method=METHOD_SIMPLE)
return cls # depends on [control=['if'], data=[]]
# Otherwise:
if DEBUG:
print('... called as @register_for_json(*args, **kwargs)') # depends on [control=['if'], data=[]]
# called as @decorator(*args, **kwargs)
# ... e.g.:
# args = ()
# kwargs = {'method': 'provides_to_init_args_kwargs_dict'}
method = kwargs.pop('method', METHOD_SIMPLE) # type: str
obj_to_dict_fn = kwargs.pop('obj_to_dict_fn', None) # type: InstanceToDictFnType # noqa
dict_to_obj_fn = kwargs.pop('dict_to_obj_fn', initdict_to_instance) # type: DictToInstanceFnType # noqa
default_factory = kwargs.pop('default_factory', None) # type: DefaultFactoryFnType # noqa
check_result = kwargs.pop('check_results', True) # type: bool
def register_json_class(cls_: ClassType) -> ClassType:
odf = obj_to_dict_fn
dof = dict_to_obj_fn
if method == METHOD_PROVIDES_INIT_ARGS_KWARGS:
if hasattr(cls_, INIT_ARGS_KWARGS_FN_NAME):
odf = wrap_args_kwargs_to_initdict(getattr(cls_, INIT_ARGS_KWARGS_FN_NAME), typename=cls_.__qualname__, check_result=check_result) # depends on [control=['if'], data=[]]
else:
raise ValueError('Class type {} does not provide function {}'.format(cls_, INIT_ARGS_KWARGS_FN_NAME)) # depends on [control=['if'], data=[]]
elif method == METHOD_PROVIDES_INIT_KWARGS:
if hasattr(cls_, INIT_KWARGS_FN_NAME):
odf = wrap_kwargs_to_initdict(getattr(cls_, INIT_KWARGS_FN_NAME), typename=cls_.__qualname__, check_result=check_result) # depends on [control=['if'], data=[]]
else:
raise ValueError('Class type {} does not provide function {}'.format(cls_, INIT_KWARGS_FN_NAME)) # depends on [control=['if'], data=[]]
elif method == METHOD_NO_ARGS:
odf = obj_with_no_args_to_init_dict # depends on [control=['if'], data=[]]
register_class_for_json(cls_, method=method, obj_to_dict_fn=odf, dict_to_obj_fn=dof, default_factory=default_factory)
return cls_
return register_json_class |
def publish(self, user_id, wifi_fingerprint, action='track', location_id='', port=1883):
'''
a method to publish wifi fingerprint data to a mosquitto server
:param user_id: string with id of user
:param wifi_fingerprint: list of dictionaries with wifi fields mac and rssi
:param action: string with type of action to perform with data (track or learn)
:param location_id: [optional] string with classifier to add to learning data
:param port: [optional] integer with port to connect to
:return: True
'''
title = '%s.publish' % self.__class__.__name__
# validate inputs
input_fields = {
'user_id': user_id,
'wifi_fingerprint': wifi_fingerprint,
'action': action,
'location_id': location_id,
'port': port
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# compose message
fingerprint_string = ''
for signal in wifi_fingerprint:
fingerprint_string += signal['mac'].replace(':','')
rssi_string = str(signal['rssi']).replace('-','')
if len(rssi_string) > 2:
fingerprint_string += ' '
fingerprint_string += rssi_string
# compose channel
topic_string = '%s/track/%s' % (self.group_name, user_id)
if action == 'learn':
topic_string = '%s/learn/%s/%s' % (self.group_name, user_id, location_id)
# send a single message to server
import paho.mqtt.publish as mqtt_publish
mqtt_publish.single(
topic=topic_string,
payload=fingerprint_string,
auth={ 'username': self.group_name, 'password': self.password },
hostname=self.server_url,
port=port
)
return True | def function[publish, parameter[self, user_id, wifi_fingerprint, action, location_id, port]]:
constant[
a method to publish wifi fingerprint data to a mosquitto server
:param user_id: string with id of user
:param wifi_fingerprint: list of dictionaries with wifi fields mac and rssi
:param action: string with type of action to perform with data (track or learn)
:param location_id: [optional] string with classifier to add to learning data
:param port: [optional] integer with port to connect to
:return: True
]
variable[title] assign[=] binary_operation[constant[%s.publish] <ast.Mod object at 0x7da2590d6920> name[self].__class__.__name__]
variable[input_fields] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cd360>, <ast.Constant object at 0x7da18c4cf550>, <ast.Constant object at 0x7da18c4cee30>, <ast.Constant object at 0x7da18c4cee00>, <ast.Constant object at 0x7da18c4ce860>], [<ast.Name object at 0x7da18c4cd900>, <ast.Name object at 0x7da18c4cc070>, <ast.Name object at 0x7da18c4ce620>, <ast.Name object at 0x7da18c4cd150>, <ast.Name object at 0x7da18c4cc850>]]
for taget[tuple[[<ast.Name object at 0x7da18c4ccb50>, <ast.Name object at 0x7da18c4cd4e0>]]] in starred[call[name[input_fields].items, parameter[]]] begin[:]
variable[object_title] assign[=] binary_operation[constant[%s(%s=%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18c4cef20>, <ast.Name object at 0x7da18c4cc8e0>, <ast.Call object at 0x7da18c4ce5f0>]]]
call[name[self].fields.validate, parameter[name[value], binary_operation[constant[.%s] <ast.Mod object at 0x7da2590d6920> name[key]], name[object_title]]]
variable[fingerprint_string] assign[=] constant[]
for taget[name[signal]] in starred[name[wifi_fingerprint]] begin[:]
<ast.AugAssign object at 0x7da18c4cc670>
variable[rssi_string] assign[=] call[call[name[str], parameter[call[name[signal]][constant[rssi]]]].replace, parameter[constant[-], constant[]]]
if compare[call[name[len], parameter[name[rssi_string]]] greater[>] constant[2]] begin[:]
<ast.AugAssign object at 0x7da18c4cf640>
<ast.AugAssign object at 0x7da18c4cea40>
variable[topic_string] assign[=] binary_operation[constant[%s/track/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18c4cc7c0>, <ast.Name object at 0x7da18c4cc160>]]]
if compare[name[action] equal[==] constant[learn]] begin[:]
variable[topic_string] assign[=] binary_operation[constant[%s/learn/%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18c4ce650>, <ast.Name object at 0x7da18c4ceec0>, <ast.Name object at 0x7da18c4cd600>]]]
import module[paho.mqtt.publish] as alias[mqtt_publish]
call[name[mqtt_publish].single, parameter[]]
return[constant[True]] | keyword[def] identifier[publish] ( identifier[self] , identifier[user_id] , identifier[wifi_fingerprint] , identifier[action] = literal[string] , identifier[location_id] = literal[string] , identifier[port] = literal[int] ):
literal[string]
identifier[title] = literal[string] % identifier[self] . identifier[__class__] . identifier[__name__]
identifier[input_fields] ={
literal[string] : identifier[user_id] ,
literal[string] : identifier[wifi_fingerprint] ,
literal[string] : identifier[action] ,
literal[string] : identifier[location_id] ,
literal[string] : identifier[port]
}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[input_fields] . identifier[items] ():
identifier[object_title] = literal[string] %( identifier[title] , identifier[key] , identifier[str] ( identifier[value] ))
identifier[self] . identifier[fields] . identifier[validate] ( identifier[value] , literal[string] % identifier[key] , identifier[object_title] )
identifier[fingerprint_string] = literal[string]
keyword[for] identifier[signal] keyword[in] identifier[wifi_fingerprint] :
identifier[fingerprint_string] += identifier[signal] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] )
identifier[rssi_string] = identifier[str] ( identifier[signal] [ literal[string] ]). identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[len] ( identifier[rssi_string] )> literal[int] :
identifier[fingerprint_string] += literal[string]
identifier[fingerprint_string] += identifier[rssi_string]
identifier[topic_string] = literal[string] %( identifier[self] . identifier[group_name] , identifier[user_id] )
keyword[if] identifier[action] == literal[string] :
identifier[topic_string] = literal[string] %( identifier[self] . identifier[group_name] , identifier[user_id] , identifier[location_id] )
keyword[import] identifier[paho] . identifier[mqtt] . identifier[publish] keyword[as] identifier[mqtt_publish]
identifier[mqtt_publish] . identifier[single] (
identifier[topic] = identifier[topic_string] ,
identifier[payload] = identifier[fingerprint_string] ,
identifier[auth] ={ literal[string] : identifier[self] . identifier[group_name] , literal[string] : identifier[self] . identifier[password] },
identifier[hostname] = identifier[self] . identifier[server_url] ,
identifier[port] = identifier[port]
)
keyword[return] keyword[True] | def publish(self, user_id, wifi_fingerprint, action='track', location_id='', port=1883):
"""
a method to publish wifi fingerprint data to a mosquitto server
:param user_id: string with id of user
:param wifi_fingerprint: list of dictionaries with wifi fields mac and rssi
:param action: string with type of action to perform with data (track or learn)
:param location_id: [optional] string with classifier to add to learning data
:param port: [optional] integer with port to connect to
:return: True
"""
title = '%s.publish' % self.__class__.__name__
# validate inputs
input_fields = {'user_id': user_id, 'wifi_fingerprint': wifi_fingerprint, 'action': action, 'location_id': location_id, 'port': port}
for (key, value) in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title) # depends on [control=['for'], data=[]]
# compose message
fingerprint_string = ''
for signal in wifi_fingerprint:
fingerprint_string += signal['mac'].replace(':', '')
rssi_string = str(signal['rssi']).replace('-', '')
if len(rssi_string) > 2:
fingerprint_string += ' ' # depends on [control=['if'], data=[]]
fingerprint_string += rssi_string # depends on [control=['for'], data=['signal']]
# compose channel
topic_string = '%s/track/%s' % (self.group_name, user_id)
if action == 'learn':
topic_string = '%s/learn/%s/%s' % (self.group_name, user_id, location_id) # depends on [control=['if'], data=[]]
# send a single message to server
import paho.mqtt.publish as mqtt_publish
mqtt_publish.single(topic=topic_string, payload=fingerprint_string, auth={'username': self.group_name, 'password': self.password}, hostname=self.server_url, port=port)
return True |
def stop(self):
"""Stops all the snippet clients under management."""
for client in self._snippet_clients.values():
if client.is_alive:
self._device.log.debug('Stopping SnippetClient<%s>.',
client.package)
client.stop_app()
else:
self._device.log.debug(
'Not stopping SnippetClient<%s> because it is not alive.',
client.package) | def function[stop, parameter[self]]:
constant[Stops all the snippet clients under management.]
for taget[name[client]] in starred[call[name[self]._snippet_clients.values, parameter[]]] begin[:]
if name[client].is_alive begin[:]
call[name[self]._device.log.debug, parameter[constant[Stopping SnippetClient<%s>.], name[client].package]]
call[name[client].stop_app, parameter[]] | keyword[def] identifier[stop] ( identifier[self] ):
literal[string]
keyword[for] identifier[client] keyword[in] identifier[self] . identifier[_snippet_clients] . identifier[values] ():
keyword[if] identifier[client] . identifier[is_alive] :
identifier[self] . identifier[_device] . identifier[log] . identifier[debug] ( literal[string] ,
identifier[client] . identifier[package] )
identifier[client] . identifier[stop_app] ()
keyword[else] :
identifier[self] . identifier[_device] . identifier[log] . identifier[debug] (
literal[string] ,
identifier[client] . identifier[package] ) | def stop(self):
"""Stops all the snippet clients under management."""
for client in self._snippet_clients.values():
if client.is_alive:
self._device.log.debug('Stopping SnippetClient<%s>.', client.package)
client.stop_app() # depends on [control=['if'], data=[]]
else:
self._device.log.debug('Not stopping SnippetClient<%s> because it is not alive.', client.package) # depends on [control=['for'], data=['client']] |
def sasml(self) -> 'SASml':
"""
This methods creates a SASML object which you can use to run various analytics. See the sasml.py module.
:return: sasml object
"""
if not self._loaded_macros:
self._loadmacros()
self._loaded_macros = True
return SASml(self) | def function[sasml, parameter[self]]:
constant[
This methods creates a SASML object which you can use to run various analytics. See the sasml.py module.
:return: sasml object
]
if <ast.UnaryOp object at 0x7da2044c14e0> begin[:]
call[name[self]._loadmacros, parameter[]]
name[self]._loaded_macros assign[=] constant[True]
return[call[name[SASml], parameter[name[self]]]] | keyword[def] identifier[sasml] ( identifier[self] )-> literal[string] :
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_loaded_macros] :
identifier[self] . identifier[_loadmacros] ()
identifier[self] . identifier[_loaded_macros] = keyword[True]
keyword[return] identifier[SASml] ( identifier[self] ) | def sasml(self) -> 'SASml':
"""
This methods creates a SASML object which you can use to run various analytics. See the sasml.py module.
:return: sasml object
"""
if not self._loaded_macros:
self._loadmacros()
self._loaded_macros = True # depends on [control=['if'], data=[]]
return SASml(self) |
def cleanup(test_data, udfs, tmp_data, tmp_db):
"""Cleanup Ibis test data and UDFs"""
con = make_ibis_client(ENV)
if udfs:
# this comes before test_data bc the latter clobbers this too
con.hdfs.rmdir(os.path.join(ENV.test_data_dir, 'udf'))
if test_data:
con.drop_database(ENV.test_data_db, force=True)
con.hdfs.rmdir(ENV.test_data_dir)
if tmp_data:
con.hdfs.rmdir(ENV.tmp_dir)
if tmp_db:
con.drop_database(ENV.tmp_db, force=True) | def function[cleanup, parameter[test_data, udfs, tmp_data, tmp_db]]:
constant[Cleanup Ibis test data and UDFs]
variable[con] assign[=] call[name[make_ibis_client], parameter[name[ENV]]]
if name[udfs] begin[:]
call[name[con].hdfs.rmdir, parameter[call[name[os].path.join, parameter[name[ENV].test_data_dir, constant[udf]]]]]
if name[test_data] begin[:]
call[name[con].drop_database, parameter[name[ENV].test_data_db]]
call[name[con].hdfs.rmdir, parameter[name[ENV].test_data_dir]]
if name[tmp_data] begin[:]
call[name[con].hdfs.rmdir, parameter[name[ENV].tmp_dir]]
if name[tmp_db] begin[:]
call[name[con].drop_database, parameter[name[ENV].tmp_db]] | keyword[def] identifier[cleanup] ( identifier[test_data] , identifier[udfs] , identifier[tmp_data] , identifier[tmp_db] ):
literal[string]
identifier[con] = identifier[make_ibis_client] ( identifier[ENV] )
keyword[if] identifier[udfs] :
identifier[con] . identifier[hdfs] . identifier[rmdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[ENV] . identifier[test_data_dir] , literal[string] ))
keyword[if] identifier[test_data] :
identifier[con] . identifier[drop_database] ( identifier[ENV] . identifier[test_data_db] , identifier[force] = keyword[True] )
identifier[con] . identifier[hdfs] . identifier[rmdir] ( identifier[ENV] . identifier[test_data_dir] )
keyword[if] identifier[tmp_data] :
identifier[con] . identifier[hdfs] . identifier[rmdir] ( identifier[ENV] . identifier[tmp_dir] )
keyword[if] identifier[tmp_db] :
identifier[con] . identifier[drop_database] ( identifier[ENV] . identifier[tmp_db] , identifier[force] = keyword[True] ) | def cleanup(test_data, udfs, tmp_data, tmp_db):
"""Cleanup Ibis test data and UDFs"""
con = make_ibis_client(ENV)
if udfs:
# this comes before test_data bc the latter clobbers this too
con.hdfs.rmdir(os.path.join(ENV.test_data_dir, 'udf')) # depends on [control=['if'], data=[]]
if test_data:
con.drop_database(ENV.test_data_db, force=True)
con.hdfs.rmdir(ENV.test_data_dir) # depends on [control=['if'], data=[]]
if tmp_data:
con.hdfs.rmdir(ENV.tmp_dir) # depends on [control=['if'], data=[]]
if tmp_db:
con.drop_database(ENV.tmp_db, force=True) # depends on [control=['if'], data=[]] |
def autoencoder_range(rhp):
"""Tuning grid of the main autoencoder params."""
rhp.set_float("dropout", 0.01, 0.3)
rhp.set_float("gan_loss_factor", 0.01, 0.1)
rhp.set_float("bottleneck_l2_factor", 0.001, 0.1, scale=rhp.LOG_SCALE)
rhp.set_discrete("bottleneck_warmup_steps", [200, 2000])
rhp.set_float("gumbel_temperature", 0, 1)
rhp.set_float("gumbel_noise_factor", 0, 0.5) | def function[autoencoder_range, parameter[rhp]]:
constant[Tuning grid of the main autoencoder params.]
call[name[rhp].set_float, parameter[constant[dropout], constant[0.01], constant[0.3]]]
call[name[rhp].set_float, parameter[constant[gan_loss_factor], constant[0.01], constant[0.1]]]
call[name[rhp].set_float, parameter[constant[bottleneck_l2_factor], constant[0.001], constant[0.1]]]
call[name[rhp].set_discrete, parameter[constant[bottleneck_warmup_steps], list[[<ast.Constant object at 0x7da2045675e0>, <ast.Constant object at 0x7da204565510>]]]]
call[name[rhp].set_float, parameter[constant[gumbel_temperature], constant[0], constant[1]]]
call[name[rhp].set_float, parameter[constant[gumbel_noise_factor], constant[0], constant[0.5]]] | keyword[def] identifier[autoencoder_range] ( identifier[rhp] ):
literal[string]
identifier[rhp] . identifier[set_float] ( literal[string] , literal[int] , literal[int] )
identifier[rhp] . identifier[set_float] ( literal[string] , literal[int] , literal[int] )
identifier[rhp] . identifier[set_float] ( literal[string] , literal[int] , literal[int] , identifier[scale] = identifier[rhp] . identifier[LOG_SCALE] )
identifier[rhp] . identifier[set_discrete] ( literal[string] ,[ literal[int] , literal[int] ])
identifier[rhp] . identifier[set_float] ( literal[string] , literal[int] , literal[int] )
identifier[rhp] . identifier[set_float] ( literal[string] , literal[int] , literal[int] ) | def autoencoder_range(rhp):
"""Tuning grid of the main autoencoder params."""
rhp.set_float('dropout', 0.01, 0.3)
rhp.set_float('gan_loss_factor', 0.01, 0.1)
rhp.set_float('bottleneck_l2_factor', 0.001, 0.1, scale=rhp.LOG_SCALE)
rhp.set_discrete('bottleneck_warmup_steps', [200, 2000])
rhp.set_float('gumbel_temperature', 0, 1)
rhp.set_float('gumbel_noise_factor', 0, 0.5) |
def reset_env(exclude=[]):
"""Remove environment variables, used in Jupyter notebooks"""
if os.getenv(env.INITED):
wandb_keys = [key for key in os.environ.keys() if key.startswith(
'WANDB_') and key not in exclude]
for key in wandb_keys:
del os.environ[key]
return True
else:
return False | def function[reset_env, parameter[exclude]]:
constant[Remove environment variables, used in Jupyter notebooks]
if call[name[os].getenv, parameter[name[env].INITED]] begin[:]
variable[wandb_keys] assign[=] <ast.ListComp object at 0x7da1b07baa10>
for taget[name[key]] in starred[name[wandb_keys]] begin[:]
<ast.Delete object at 0x7da1b07bb220>
return[constant[True]] | keyword[def] identifier[reset_env] ( identifier[exclude] =[]):
literal[string]
keyword[if] identifier[os] . identifier[getenv] ( identifier[env] . identifier[INITED] ):
identifier[wandb_keys] =[ identifier[key] keyword[for] identifier[key] keyword[in] identifier[os] . identifier[environ] . identifier[keys] () keyword[if] identifier[key] . identifier[startswith] (
literal[string] ) keyword[and] identifier[key] keyword[not] keyword[in] identifier[exclude] ]
keyword[for] identifier[key] keyword[in] identifier[wandb_keys] :
keyword[del] identifier[os] . identifier[environ] [ identifier[key] ]
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def reset_env(exclude=[]):
"""Remove environment variables, used in Jupyter notebooks"""
if os.getenv(env.INITED):
wandb_keys = [key for key in os.environ.keys() if key.startswith('WANDB_') and key not in exclude]
for key in wandb_keys:
del os.environ[key] # depends on [control=['for'], data=['key']]
return True # depends on [control=['if'], data=[]]
else:
return False |
def uniqueName(self, name):
"""UIParser.uniqueName(string) -> string
Create a unique name from a string.
>>> p = UIParser(QtCore, QtGui, QtWidgets)
>>> p.uniqueName("foo")
'foo'
>>> p.uniqueName("foo")
'foo1'
"""
try:
suffix = self.name_suffixes[name]
except KeyError:
self.name_suffixes[name] = 0
return name
suffix += 1
self.name_suffixes[name] = suffix
return "%s%i" % (name, suffix) | def function[uniqueName, parameter[self, name]]:
constant[UIParser.uniqueName(string) -> string
Create a unique name from a string.
>>> p = UIParser(QtCore, QtGui, QtWidgets)
>>> p.uniqueName("foo")
'foo'
>>> p.uniqueName("foo")
'foo1'
]
<ast.Try object at 0x7da1b07cfee0>
<ast.AugAssign object at 0x7da1b07cd630>
call[name[self].name_suffixes][name[name]] assign[=] name[suffix]
return[binary_operation[constant[%s%i] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b07cef20>, <ast.Name object at 0x7da1b07cead0>]]]] | keyword[def] identifier[uniqueName] ( identifier[self] , identifier[name] ):
literal[string]
keyword[try] :
identifier[suffix] = identifier[self] . identifier[name_suffixes] [ identifier[name] ]
keyword[except] identifier[KeyError] :
identifier[self] . identifier[name_suffixes] [ identifier[name] ]= literal[int]
keyword[return] identifier[name]
identifier[suffix] += literal[int]
identifier[self] . identifier[name_suffixes] [ identifier[name] ]= identifier[suffix]
keyword[return] literal[string] %( identifier[name] , identifier[suffix] ) | def uniqueName(self, name):
"""UIParser.uniqueName(string) -> string
Create a unique name from a string.
>>> p = UIParser(QtCore, QtGui, QtWidgets)
>>> p.uniqueName("foo")
'foo'
>>> p.uniqueName("foo")
'foo1'
"""
try:
suffix = self.name_suffixes[name] # depends on [control=['try'], data=[]]
except KeyError:
self.name_suffixes[name] = 0
return name # depends on [control=['except'], data=[]]
suffix += 1
self.name_suffixes[name] = suffix
return '%s%i' % (name, suffix) |
def currentRecord( self ):
"""
Returns the record found at the current index for this combo box.
:rerturn <orb.Table> || None
"""
if self._currentRecord is None and self.isRequired():
self._currentRecord = self.recordAt(self.currentIndex())
return self._currentRecord | def function[currentRecord, parameter[self]]:
constant[
Returns the record found at the current index for this combo box.
:rerturn <orb.Table> || None
]
if <ast.BoolOp object at 0x7da18c4cff40> begin[:]
name[self]._currentRecord assign[=] call[name[self].recordAt, parameter[call[name[self].currentIndex, parameter[]]]]
return[name[self]._currentRecord] | keyword[def] identifier[currentRecord] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_currentRecord] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[isRequired] ():
identifier[self] . identifier[_currentRecord] = identifier[self] . identifier[recordAt] ( identifier[self] . identifier[currentIndex] ())
keyword[return] identifier[self] . identifier[_currentRecord] | def currentRecord(self):
"""
Returns the record found at the current index for this combo box.
:rerturn <orb.Table> || None
"""
if self._currentRecord is None and self.isRequired():
self._currentRecord = self.recordAt(self.currentIndex()) # depends on [control=['if'], data=[]]
return self._currentRecord |
async def facebook_request(
self,
path: str,
access_token: str = None,
post_args: Dict[str, Any] = None,
**args: Any
) -> Any:
"""Fetches the given relative API path, e.g., "/btaylor/picture"
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
An introduction to the Facebook Graph API can be found at
http://developers.facebook.com/docs/api
Many methods require an OAuth access token which you can
obtain through `~OAuth2Mixin.authorize_redirect` and
`get_authenticated_user`. The user returned through that
process includes an ``access_token`` attribute that can be
used to make authenticated requests via this method.
Example usage:
.. testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
async def get(self):
new_entry = await self.facebook_request(
"/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
.. testoutput::
:hide:
The given path is relative to ``self._FACEBOOK_BASE_URL``,
by default "https://graph.facebook.com".
This method is a wrapper around `OAuth2Mixin.oauth2_request`;
the only difference is that this method takes a relative path,
while ``oauth2_request`` takes a complete url.
.. versionchanged:: 3.1
Added the ability to override ``self._FACEBOOK_BASE_URL``.
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned awaitable object instead.
"""
url = self._FACEBOOK_BASE_URL + path
return await self.oauth2_request(
url, access_token=access_token, post_args=post_args, **args
) | <ast.AsyncFunctionDef object at 0x7da1b20c9c60> | keyword[async] keyword[def] identifier[facebook_request] (
identifier[self] ,
identifier[path] : identifier[str] ,
identifier[access_token] : identifier[str] = keyword[None] ,
identifier[post_args] : identifier[Dict] [ identifier[str] , identifier[Any] ]= keyword[None] ,
** identifier[args] : identifier[Any]
)-> identifier[Any] :
literal[string]
identifier[url] = identifier[self] . identifier[_FACEBOOK_BASE_URL] + identifier[path]
keyword[return] keyword[await] identifier[self] . identifier[oauth2_request] (
identifier[url] , identifier[access_token] = identifier[access_token] , identifier[post_args] = identifier[post_args] ,** identifier[args]
) | async def facebook_request(self, path: str, access_token: str=None, post_args: Dict[str, Any]=None, **args: Any) -> Any:
"""Fetches the given relative API path, e.g., "/btaylor/picture"
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
An introduction to the Facebook Graph API can be found at
http://developers.facebook.com/docs/api
Many methods require an OAuth access token which you can
obtain through `~OAuth2Mixin.authorize_redirect` and
`get_authenticated_user`. The user returned through that
process includes an ``access_token`` attribute that can be
used to make authenticated requests via this method.
Example usage:
.. testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
async def get(self):
new_entry = await self.facebook_request(
"/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
.. testoutput::
:hide:
The given path is relative to ``self._FACEBOOK_BASE_URL``,
by default "https://graph.facebook.com".
This method is a wrapper around `OAuth2Mixin.oauth2_request`;
the only difference is that this method takes a relative path,
while ``oauth2_request`` takes a complete url.
.. versionchanged:: 3.1
Added the ability to override ``self._FACEBOOK_BASE_URL``.
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned awaitable object instead.
"""
url = self._FACEBOOK_BASE_URL + path
return await self.oauth2_request(url, access_token=access_token, post_args=post_args, **args) |
def add_auth_to_method(self, path, method_name, auth, api):
"""
Adds auth settings for this path/method. Auth settings currently consist solely of Authorizers
but this method will eventually include setting other auth settings such as API Key,
Resource Policy, etc.
:param string path: Path name
:param string method_name: Method name
:param dict auth: Auth configuration such as Authorizers, ApiKey, ResourcePolicy (only Authorizers supported
currently)
:param dict api: Reference to the related Api's properties as defined in the template.
"""
method_authorizer = auth and auth.get('Authorizer')
if method_authorizer:
api_auth = api.get('Auth')
api_authorizers = api_auth and api_auth.get('Authorizers')
default_authorizer = api_auth and api_auth.get('DefaultAuthorizer')
self.set_method_authorizer(path, method_name, method_authorizer, api_authorizers, default_authorizer) | def function[add_auth_to_method, parameter[self, path, method_name, auth, api]]:
constant[
Adds auth settings for this path/method. Auth settings currently consist solely of Authorizers
but this method will eventually include setting other auth settings such as API Key,
Resource Policy, etc.
:param string path: Path name
:param string method_name: Method name
:param dict auth: Auth configuration such as Authorizers, ApiKey, ResourcePolicy (only Authorizers supported
currently)
:param dict api: Reference to the related Api's properties as defined in the template.
]
variable[method_authorizer] assign[=] <ast.BoolOp object at 0x7da2049630a0>
if name[method_authorizer] begin[:]
variable[api_auth] assign[=] call[name[api].get, parameter[constant[Auth]]]
variable[api_authorizers] assign[=] <ast.BoolOp object at 0x7da2049607c0>
variable[default_authorizer] assign[=] <ast.BoolOp object at 0x7da204962a40>
call[name[self].set_method_authorizer, parameter[name[path], name[method_name], name[method_authorizer], name[api_authorizers], name[default_authorizer]]] | keyword[def] identifier[add_auth_to_method] ( identifier[self] , identifier[path] , identifier[method_name] , identifier[auth] , identifier[api] ):
literal[string]
identifier[method_authorizer] = identifier[auth] keyword[and] identifier[auth] . identifier[get] ( literal[string] )
keyword[if] identifier[method_authorizer] :
identifier[api_auth] = identifier[api] . identifier[get] ( literal[string] )
identifier[api_authorizers] = identifier[api_auth] keyword[and] identifier[api_auth] . identifier[get] ( literal[string] )
identifier[default_authorizer] = identifier[api_auth] keyword[and] identifier[api_auth] . identifier[get] ( literal[string] )
identifier[self] . identifier[set_method_authorizer] ( identifier[path] , identifier[method_name] , identifier[method_authorizer] , identifier[api_authorizers] , identifier[default_authorizer] ) | def add_auth_to_method(self, path, method_name, auth, api):
"""
Adds auth settings for this path/method. Auth settings currently consist solely of Authorizers
but this method will eventually include setting other auth settings such as API Key,
Resource Policy, etc.
:param string path: Path name
:param string method_name: Method name
:param dict auth: Auth configuration such as Authorizers, ApiKey, ResourcePolicy (only Authorizers supported
currently)
:param dict api: Reference to the related Api's properties as defined in the template.
"""
method_authorizer = auth and auth.get('Authorizer')
if method_authorizer:
api_auth = api.get('Auth')
api_authorizers = api_auth and api_auth.get('Authorizers')
default_authorizer = api_auth and api_auth.get('DefaultAuthorizer')
self.set_method_authorizer(path, method_name, method_authorizer, api_authorizers, default_authorizer) # depends on [control=['if'], data=[]] |
def validate_instance(instance):
"""Validating if the instance should be logged, or is excluded"""
excludes = settings.AUTOMATED_LOGGING['exclude']['model']
for excluded in excludes:
if (excluded in [instance._meta.app_label.lower(),
instance.__class__.__name__.lower()] or
instance.__module__.lower().startswith(excluded)):
return False
return True | def function[validate_instance, parameter[instance]]:
constant[Validating if the instance should be logged, or is excluded]
variable[excludes] assign[=] call[call[name[settings].AUTOMATED_LOGGING][constant[exclude]]][constant[model]]
for taget[name[excluded]] in starred[name[excludes]] begin[:]
if <ast.BoolOp object at 0x7da1b23808b0> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[validate_instance] ( identifier[instance] ):
literal[string]
identifier[excludes] = identifier[settings] . identifier[AUTOMATED_LOGGING] [ literal[string] ][ literal[string] ]
keyword[for] identifier[excluded] keyword[in] identifier[excludes] :
keyword[if] ( identifier[excluded] keyword[in] [ identifier[instance] . identifier[_meta] . identifier[app_label] . identifier[lower] (),
identifier[instance] . identifier[__class__] . identifier[__name__] . identifier[lower] ()] keyword[or]
identifier[instance] . identifier[__module__] . identifier[lower] (). identifier[startswith] ( identifier[excluded] )):
keyword[return] keyword[False]
keyword[return] keyword[True] | def validate_instance(instance):
"""Validating if the instance should be logged, or is excluded"""
excludes = settings.AUTOMATED_LOGGING['exclude']['model']
for excluded in excludes:
if excluded in [instance._meta.app_label.lower(), instance.__class__.__name__.lower()] or instance.__module__.lower().startswith(excluded):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['excluded']]
return True |
def add(self, ngram):
"""Count 1 for P[(w1, ..., wn)] and for P(wn | (w1, ..., wn-1)"""
CountingProbDist.add(self, ngram)
self.cond_prob[ngram[:-1]].add(ngram[-1]) | def function[add, parameter[self, ngram]]:
constant[Count 1 for P[(w1, ..., wn)] and for P(wn | (w1, ..., wn-1)]
call[name[CountingProbDist].add, parameter[name[self], name[ngram]]]
call[call[name[self].cond_prob][call[name[ngram]][<ast.Slice object at 0x7da18bcc90c0>]].add, parameter[call[name[ngram]][<ast.UnaryOp object at 0x7da18bcca1a0>]]] | keyword[def] identifier[add] ( identifier[self] , identifier[ngram] ):
literal[string]
identifier[CountingProbDist] . identifier[add] ( identifier[self] , identifier[ngram] )
identifier[self] . identifier[cond_prob] [ identifier[ngram] [:- literal[int] ]]. identifier[add] ( identifier[ngram] [- literal[int] ]) | def add(self, ngram):
"""Count 1 for P[(w1, ..., wn)] and for P(wn | (w1, ..., wn-1)"""
CountingProbDist.add(self, ngram)
self.cond_prob[ngram[:-1]].add(ngram[-1]) |
def zip_dict(*dicts):
"""Iterate over items of dictionaries grouped by their keys."""
for key in set(itertools.chain(*dicts)): # set merge all keys
# Will raise KeyError if the dict don't have the same keys
yield key, tuple(d[key] for d in dicts) | def function[zip_dict, parameter[]]:
constant[Iterate over items of dictionaries grouped by their keys.]
for taget[name[key]] in starred[call[name[set], parameter[call[name[itertools].chain, parameter[<ast.Starred object at 0x7da1b2011270>]]]]] begin[:]
<ast.Yield object at 0x7da1b1e19b40> | keyword[def] identifier[zip_dict] (* identifier[dicts] ):
literal[string]
keyword[for] identifier[key] keyword[in] identifier[set] ( identifier[itertools] . identifier[chain] (* identifier[dicts] )):
keyword[yield] identifier[key] , identifier[tuple] ( identifier[d] [ identifier[key] ] keyword[for] identifier[d] keyword[in] identifier[dicts] ) | def zip_dict(*dicts):
"""Iterate over items of dictionaries grouped by their keys."""
for key in set(itertools.chain(*dicts)): # set merge all keys
# Will raise KeyError if the dict don't have the same keys
yield (key, tuple((d[key] for d in dicts))) # depends on [control=['for'], data=['key']] |
def set_register(self, addr, data):
"""Sets an arbitrary register at @addr and subsequent registers depending
on how much data you decide to write. It will automatically fill extra
bytes with zeros. You cannot write more than 14 bytes at a time.
@addr should be a static constant from the Addr class, e.g. Addr.Speed"""
assert len(data) <= 14, "Cannot write more than 14 bytes at a time"
cmd = chr(addr) + chr(len(data) | 0x80)
for byte in data:
cmd += chr(cast_to_byte(byte))
cmd += (16 - len(cmd)) * chr(0)
self._dev.send_bytes(cmd) | def function[set_register, parameter[self, addr, data]]:
constant[Sets an arbitrary register at @addr and subsequent registers depending
on how much data you decide to write. It will automatically fill extra
bytes with zeros. You cannot write more than 14 bytes at a time.
@addr should be a static constant from the Addr class, e.g. Addr.Speed]
assert[compare[call[name[len], parameter[name[data]]] less_or_equal[<=] constant[14]]]
variable[cmd] assign[=] binary_operation[call[name[chr], parameter[name[addr]]] + call[name[chr], parameter[binary_operation[call[name[len], parameter[name[data]]] <ast.BitOr object at 0x7da2590d6aa0> constant[128]]]]]
for taget[name[byte]] in starred[name[data]] begin[:]
<ast.AugAssign object at 0x7da18eb562f0>
<ast.AugAssign object at 0x7da18eb55000>
call[name[self]._dev.send_bytes, parameter[name[cmd]]] | keyword[def] identifier[set_register] ( identifier[self] , identifier[addr] , identifier[data] ):
literal[string]
keyword[assert] identifier[len] ( identifier[data] )<= literal[int] , literal[string]
identifier[cmd] = identifier[chr] ( identifier[addr] )+ identifier[chr] ( identifier[len] ( identifier[data] )| literal[int] )
keyword[for] identifier[byte] keyword[in] identifier[data] :
identifier[cmd] += identifier[chr] ( identifier[cast_to_byte] ( identifier[byte] ))
identifier[cmd] +=( literal[int] - identifier[len] ( identifier[cmd] ))* identifier[chr] ( literal[int] )
identifier[self] . identifier[_dev] . identifier[send_bytes] ( identifier[cmd] ) | def set_register(self, addr, data):
"""Sets an arbitrary register at @addr and subsequent registers depending
on how much data you decide to write. It will automatically fill extra
bytes with zeros. You cannot write more than 14 bytes at a time.
@addr should be a static constant from the Addr class, e.g. Addr.Speed"""
assert len(data) <= 14, 'Cannot write more than 14 bytes at a time'
cmd = chr(addr) + chr(len(data) | 128)
for byte in data:
cmd += chr(cast_to_byte(byte)) # depends on [control=['for'], data=['byte']]
cmd += (16 - len(cmd)) * chr(0)
self._dev.send_bytes(cmd) |
def show_time_as_short_string(self, seconds):
"""
converts seconds to a string in terms of
seconds -> years to show complexity of algorithm
"""
if seconds < 60:
return str(seconds) + ' seconds'
elif seconds < 3600:
return str(round(seconds/60, 1)) + ' minutes'
elif seconds < 3600*24:
return str(round(seconds/(60*24), 1)) + ' hours'
elif seconds < 3600*24*365:
return str(round(seconds/(3600*24), 1)) + ' days'
else:
print('WARNING - this will take ' + str(seconds/(60*24*365)) + ' YEARS to run' )
return str(round(seconds/(60*24*365), 1)) + ' years' | def function[show_time_as_short_string, parameter[self, seconds]]:
constant[
converts seconds to a string in terms of
seconds -> years to show complexity of algorithm
]
if compare[name[seconds] less[<] constant[60]] begin[:]
return[binary_operation[call[name[str], parameter[name[seconds]]] + constant[ seconds]]] | keyword[def] identifier[show_time_as_short_string] ( identifier[self] , identifier[seconds] ):
literal[string]
keyword[if] identifier[seconds] < literal[int] :
keyword[return] identifier[str] ( identifier[seconds] )+ literal[string]
keyword[elif] identifier[seconds] < literal[int] :
keyword[return] identifier[str] ( identifier[round] ( identifier[seconds] / literal[int] , literal[int] ))+ literal[string]
keyword[elif] identifier[seconds] < literal[int] * literal[int] :
keyword[return] identifier[str] ( identifier[round] ( identifier[seconds] /( literal[int] * literal[int] ), literal[int] ))+ literal[string]
keyword[elif] identifier[seconds] < literal[int] * literal[int] * literal[int] :
keyword[return] identifier[str] ( identifier[round] ( identifier[seconds] /( literal[int] * literal[int] ), literal[int] ))+ literal[string]
keyword[else] :
identifier[print] ( literal[string] + identifier[str] ( identifier[seconds] /( literal[int] * literal[int] * literal[int] ))+ literal[string] )
keyword[return] identifier[str] ( identifier[round] ( identifier[seconds] /( literal[int] * literal[int] * literal[int] ), literal[int] ))+ literal[string] | def show_time_as_short_string(self, seconds):
"""
converts seconds to a string in terms of
seconds -> years to show complexity of algorithm
"""
if seconds < 60:
return str(seconds) + ' seconds' # depends on [control=['if'], data=['seconds']]
elif seconds < 3600:
return str(round(seconds / 60, 1)) + ' minutes' # depends on [control=['if'], data=['seconds']]
elif seconds < 3600 * 24:
return str(round(seconds / (60 * 24), 1)) + ' hours' # depends on [control=['if'], data=['seconds']]
elif seconds < 3600 * 24 * 365:
return str(round(seconds / (3600 * 24), 1)) + ' days' # depends on [control=['if'], data=['seconds']]
else:
print('WARNING - this will take ' + str(seconds / (60 * 24 * 365)) + ' YEARS to run')
return str(round(seconds / (60 * 24 * 365), 1)) + ' years' |
def run(self):
"""Load all paintings into the database
"""
df = PaintingsInputData().load()
# rename columns
df.rename(columns={'paintingLabel': 'name'}, inplace=True)
# get artist IDs, map via artist wiki ID
artists = models.Entity.query_with_attributes('artist', self.client)
df['artist_id'] = df['creator_wiki_id'].map(artists.set_index('wiki_id')['id'])
# define attributes to create
attribute_columns = ['name', 'wiki_id', 'area', 'decade', 'artist_id']
# store entities and attributes
self.store(df, attribute_columns)
self.done() | def function[run, parameter[self]]:
constant[Load all paintings into the database
]
variable[df] assign[=] call[call[name[PaintingsInputData], parameter[]].load, parameter[]]
call[name[df].rename, parameter[]]
variable[artists] assign[=] call[name[models].Entity.query_with_attributes, parameter[constant[artist], name[self].client]]
call[name[df]][constant[artist_id]] assign[=] call[call[name[df]][constant[creator_wiki_id]].map, parameter[call[call[name[artists].set_index, parameter[constant[wiki_id]]]][constant[id]]]]
variable[attribute_columns] assign[=] list[[<ast.Constant object at 0x7da1b242bc10>, <ast.Constant object at 0x7da1b242b9a0>, <ast.Constant object at 0x7da1b242a500>, <ast.Constant object at 0x7da1b242abf0>, <ast.Constant object at 0x7da1b242a5c0>]]
call[name[self].store, parameter[name[df], name[attribute_columns]]]
call[name[self].done, parameter[]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[df] = identifier[PaintingsInputData] (). identifier[load] ()
identifier[df] . identifier[rename] ( identifier[columns] ={ literal[string] : literal[string] }, identifier[inplace] = keyword[True] )
identifier[artists] = identifier[models] . identifier[Entity] . identifier[query_with_attributes] ( literal[string] , identifier[self] . identifier[client] )
identifier[df] [ literal[string] ]= identifier[df] [ literal[string] ]. identifier[map] ( identifier[artists] . identifier[set_index] ( literal[string] )[ literal[string] ])
identifier[attribute_columns] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[self] . identifier[store] ( identifier[df] , identifier[attribute_columns] )
identifier[self] . identifier[done] () | def run(self):
"""Load all paintings into the database
"""
df = PaintingsInputData().load()
# rename columns
df.rename(columns={'paintingLabel': 'name'}, inplace=True)
# get artist IDs, map via artist wiki ID
artists = models.Entity.query_with_attributes('artist', self.client)
df['artist_id'] = df['creator_wiki_id'].map(artists.set_index('wiki_id')['id'])
# define attributes to create
attribute_columns = ['name', 'wiki_id', 'area', 'decade', 'artist_id']
# store entities and attributes
self.store(df, attribute_columns)
self.done() |
def plotConnectivity(ax):
'''make an imshow of the intranetwork connectivity'''
im = ax.pcolor(params.C_YX, cmap='hot')
ax.axis(ax.axis('tight'))
ax.invert_yaxis()
ax.xaxis.set_ticks_position('top')
ax.set_xticks(np.arange(9)+0.5)
ax.set_yticks(np.arange(8)+0.5)
ax.set_xticklabels(params.X, rotation=270)
ax.set_yticklabels(params.Y, )
#ax.set_ylabel(r'to ($Y$)', ha='center')
#ax.set_xlabel(r'from ($X$)', va='center')
ax.xaxis.set_label_position('top')
rect = np.array(ax.get_position().bounds)
rect[0] += rect[2] + 0.01
rect[2] = 0.01
fig = plt.gcf()
cax = fig.add_axes(rect)
cbar = plt.colorbar(im, cax=cax)
cbar.set_label('connectivity', ha='center') | def function[plotConnectivity, parameter[ax]]:
constant[make an imshow of the intranetwork connectivity]
variable[im] assign[=] call[name[ax].pcolor, parameter[name[params].C_YX]]
call[name[ax].axis, parameter[call[name[ax].axis, parameter[constant[tight]]]]]
call[name[ax].invert_yaxis, parameter[]]
call[name[ax].xaxis.set_ticks_position, parameter[constant[top]]]
call[name[ax].set_xticks, parameter[binary_operation[call[name[np].arange, parameter[constant[9]]] + constant[0.5]]]]
call[name[ax].set_yticks, parameter[binary_operation[call[name[np].arange, parameter[constant[8]]] + constant[0.5]]]]
call[name[ax].set_xticklabels, parameter[name[params].X]]
call[name[ax].set_yticklabels, parameter[name[params].Y]]
call[name[ax].xaxis.set_label_position, parameter[constant[top]]]
variable[rect] assign[=] call[name[np].array, parameter[call[name[ax].get_position, parameter[]].bounds]]
<ast.AugAssign object at 0x7da1b0be9030>
call[name[rect]][constant[2]] assign[=] constant[0.01]
variable[fig] assign[=] call[name[plt].gcf, parameter[]]
variable[cax] assign[=] call[name[fig].add_axes, parameter[name[rect]]]
variable[cbar] assign[=] call[name[plt].colorbar, parameter[name[im]]]
call[name[cbar].set_label, parameter[constant[connectivity]]] | keyword[def] identifier[plotConnectivity] ( identifier[ax] ):
literal[string]
identifier[im] = identifier[ax] . identifier[pcolor] ( identifier[params] . identifier[C_YX] , identifier[cmap] = literal[string] )
identifier[ax] . identifier[axis] ( identifier[ax] . identifier[axis] ( literal[string] ))
identifier[ax] . identifier[invert_yaxis] ()
identifier[ax] . identifier[xaxis] . identifier[set_ticks_position] ( literal[string] )
identifier[ax] . identifier[set_xticks] ( identifier[np] . identifier[arange] ( literal[int] )+ literal[int] )
identifier[ax] . identifier[set_yticks] ( identifier[np] . identifier[arange] ( literal[int] )+ literal[int] )
identifier[ax] . identifier[set_xticklabels] ( identifier[params] . identifier[X] , identifier[rotation] = literal[int] )
identifier[ax] . identifier[set_yticklabels] ( identifier[params] . identifier[Y] ,)
identifier[ax] . identifier[xaxis] . identifier[set_label_position] ( literal[string] )
identifier[rect] = identifier[np] . identifier[array] ( identifier[ax] . identifier[get_position] (). identifier[bounds] )
identifier[rect] [ literal[int] ]+= identifier[rect] [ literal[int] ]+ literal[int]
identifier[rect] [ literal[int] ]= literal[int]
identifier[fig] = identifier[plt] . identifier[gcf] ()
identifier[cax] = identifier[fig] . identifier[add_axes] ( identifier[rect] )
identifier[cbar] = identifier[plt] . identifier[colorbar] ( identifier[im] , identifier[cax] = identifier[cax] )
identifier[cbar] . identifier[set_label] ( literal[string] , identifier[ha] = literal[string] ) | def plotConnectivity(ax):
"""make an imshow of the intranetwork connectivity"""
im = ax.pcolor(params.C_YX, cmap='hot')
ax.axis(ax.axis('tight'))
ax.invert_yaxis()
ax.xaxis.set_ticks_position('top')
ax.set_xticks(np.arange(9) + 0.5)
ax.set_yticks(np.arange(8) + 0.5)
ax.set_xticklabels(params.X, rotation=270)
ax.set_yticklabels(params.Y)
#ax.set_ylabel(r'to ($Y$)', ha='center')
#ax.set_xlabel(r'from ($X$)', va='center')
ax.xaxis.set_label_position('top')
rect = np.array(ax.get_position().bounds)
rect[0] += rect[2] + 0.01
rect[2] = 0.01
fig = plt.gcf()
cax = fig.add_axes(rect)
cbar = plt.colorbar(im, cax=cax)
cbar.set_label('connectivity', ha='center') |
def blt(f: List[SYM], x: List[SYM]) -> Dict[str, Any]:
"""
Sort equations by dependence
"""
J = ca.jacobian(f, x)
nblock, rowperm, colperm, rowblock, colblock, coarserow, coarsecol = J.sparsity().btf()
return {
'J': J,
'nblock': nblock,
'rowperm': rowperm,
'colperm': colperm,
'rowblock': rowblock,
'colblock': colblock,
'coarserow': coarserow,
'coarsecol': coarsecol
} | def function[blt, parameter[f, x]]:
constant[
Sort equations by dependence
]
variable[J] assign[=] call[name[ca].jacobian, parameter[name[f], name[x]]]
<ast.Tuple object at 0x7da18ede73d0> assign[=] call[call[name[J].sparsity, parameter[]].btf, parameter[]]
return[dictionary[[<ast.Constant object at 0x7da18ede4640>, <ast.Constant object at 0x7da18ede6d40>, <ast.Constant object at 0x7da18ede49a0>, <ast.Constant object at 0x7da18ede7e80>, <ast.Constant object at 0x7da18ede77c0>, <ast.Constant object at 0x7da18ede6bc0>, <ast.Constant object at 0x7da18ede74c0>, <ast.Constant object at 0x7da18ede5660>], [<ast.Name object at 0x7da18ede7af0>, <ast.Name object at 0x7da18ede5d80>, <ast.Name object at 0x7da18ede6ec0>, <ast.Name object at 0x7da18ede4610>, <ast.Name object at 0x7da18ede6860>, <ast.Name object at 0x7da18ede7b50>, <ast.Name object at 0x7da18ede6aa0>, <ast.Name object at 0x7da18ede7520>]]] | keyword[def] identifier[blt] ( identifier[f] : identifier[List] [ identifier[SYM] ], identifier[x] : identifier[List] [ identifier[SYM] ])-> identifier[Dict] [ identifier[str] , identifier[Any] ]:
literal[string]
identifier[J] = identifier[ca] . identifier[jacobian] ( identifier[f] , identifier[x] )
identifier[nblock] , identifier[rowperm] , identifier[colperm] , identifier[rowblock] , identifier[colblock] , identifier[coarserow] , identifier[coarsecol] = identifier[J] . identifier[sparsity] (). identifier[btf] ()
keyword[return] {
literal[string] : identifier[J] ,
literal[string] : identifier[nblock] ,
literal[string] : identifier[rowperm] ,
literal[string] : identifier[colperm] ,
literal[string] : identifier[rowblock] ,
literal[string] : identifier[colblock] ,
literal[string] : identifier[coarserow] ,
literal[string] : identifier[coarsecol]
} | def blt(f: List[SYM], x: List[SYM]) -> Dict[str, Any]:
"""
Sort equations by dependence
"""
J = ca.jacobian(f, x)
(nblock, rowperm, colperm, rowblock, colblock, coarserow, coarsecol) = J.sparsity().btf()
return {'J': J, 'nblock': nblock, 'rowperm': rowperm, 'colperm': colperm, 'rowblock': rowblock, 'colblock': colblock, 'coarserow': coarserow, 'coarsecol': coarsecol} |
def json_to_labels(segments_json):
"""Extracts the labels from a json file and puts them into
an np array."""
f = open(segments_json)
segments = json.load(f)["segments"]
labels = []
str_labels = []
for segment in segments:
if not segment["label"] in str_labels:
str_labels.append(segment["label"])
labels.append(len(str_labels)-1)
else:
label_idx = np.where(np.asarray(str_labels) == segment["label"])[0][0]
labels.append(label_idx)
f.close()
return np.asarray(labels) | def function[json_to_labels, parameter[segments_json]]:
constant[Extracts the labels from a json file and puts them into
an np array.]
variable[f] assign[=] call[name[open], parameter[name[segments_json]]]
variable[segments] assign[=] call[call[name[json].load, parameter[name[f]]]][constant[segments]]
variable[labels] assign[=] list[[]]
variable[str_labels] assign[=] list[[]]
for taget[name[segment]] in starred[name[segments]] begin[:]
if <ast.UnaryOp object at 0x7da1b02434c0> begin[:]
call[name[str_labels].append, parameter[call[name[segment]][constant[label]]]]
call[name[labels].append, parameter[binary_operation[call[name[len], parameter[name[str_labels]]] - constant[1]]]]
call[name[f].close, parameter[]]
return[call[name[np].asarray, parameter[name[labels]]]] | keyword[def] identifier[json_to_labels] ( identifier[segments_json] ):
literal[string]
identifier[f] = identifier[open] ( identifier[segments_json] )
identifier[segments] = identifier[json] . identifier[load] ( identifier[f] )[ literal[string] ]
identifier[labels] =[]
identifier[str_labels] =[]
keyword[for] identifier[segment] keyword[in] identifier[segments] :
keyword[if] keyword[not] identifier[segment] [ literal[string] ] keyword[in] identifier[str_labels] :
identifier[str_labels] . identifier[append] ( identifier[segment] [ literal[string] ])
identifier[labels] . identifier[append] ( identifier[len] ( identifier[str_labels] )- literal[int] )
keyword[else] :
identifier[label_idx] = identifier[np] . identifier[where] ( identifier[np] . identifier[asarray] ( identifier[str_labels] )== identifier[segment] [ literal[string] ])[ literal[int] ][ literal[int] ]
identifier[labels] . identifier[append] ( identifier[label_idx] )
identifier[f] . identifier[close] ()
keyword[return] identifier[np] . identifier[asarray] ( identifier[labels] ) | def json_to_labels(segments_json):
"""Extracts the labels from a json file and puts them into
an np array."""
f = open(segments_json)
segments = json.load(f)['segments']
labels = []
str_labels = []
for segment in segments:
if not segment['label'] in str_labels:
str_labels.append(segment['label'])
labels.append(len(str_labels) - 1) # depends on [control=['if'], data=[]]
else:
label_idx = np.where(np.asarray(str_labels) == segment['label'])[0][0]
labels.append(label_idx) # depends on [control=['for'], data=['segment']]
f.close()
return np.asarray(labels) |
def raise_for_redefined_annotation(self, line: str, position: int, annotation: str) -> None:
"""Raise an exception if the given annotation is already defined.
:raises: RedefinedAnnotationError
"""
if self.disallow_redefinition and self.has_annotation(annotation):
raise RedefinedAnnotationError(self.get_line_number(), line, position, annotation) | def function[raise_for_redefined_annotation, parameter[self, line, position, annotation]]:
constant[Raise an exception if the given annotation is already defined.
:raises: RedefinedAnnotationError
]
if <ast.BoolOp object at 0x7da1b0c42320> begin[:]
<ast.Raise object at 0x7da1b0c40ca0> | keyword[def] identifier[raise_for_redefined_annotation] ( identifier[self] , identifier[line] : identifier[str] , identifier[position] : identifier[int] , identifier[annotation] : identifier[str] )-> keyword[None] :
literal[string]
keyword[if] identifier[self] . identifier[disallow_redefinition] keyword[and] identifier[self] . identifier[has_annotation] ( identifier[annotation] ):
keyword[raise] identifier[RedefinedAnnotationError] ( identifier[self] . identifier[get_line_number] (), identifier[line] , identifier[position] , identifier[annotation] ) | def raise_for_redefined_annotation(self, line: str, position: int, annotation: str) -> None:
"""Raise an exception if the given annotation is already defined.
:raises: RedefinedAnnotationError
"""
if self.disallow_redefinition and self.has_annotation(annotation):
raise RedefinedAnnotationError(self.get_line_number(), line, position, annotation) # depends on [control=['if'], data=[]] |
def impulse_deltav_plummerstream(v,y,b,w,GSigma,rs,tmin=None,tmax=None):
"""
NAME:
impulse_deltav_plummerstream
PURPOSE:
calculate the delta velocity to due an encounter with a Plummer-softened stream in the impulse approximation; allows for arbitrary velocity vectors, but y is input as the position along the stream
INPUT:
v - velocity of the stream (nstar,3)
y - position along the stream (nstar)
b - impact parameter
w - velocity of the Plummer sphere (3)
GSigma - surface density of the Plummer-softened stream (in natural units); should be a function of time
rs - size of the Plummer sphere
tmin, tmax= (None) minimum and maximum time to consider for GSigma (need to be set)
OUTPUT:
deltav (nstar,3)
HISTORY:
2015-11-14 - Written - Bovy (UofT)
"""
if len(v.shape) == 1:
v= numpy.reshape(v,(1,3))
y= numpy.reshape(y,(1,1))
if tmax is None or tmax is None:
raise ValueError("tmin= and tmax= need to be set")
nv= v.shape[0]
vmag= numpy.sqrt(numpy.sum(v**2.,axis=1))
# Build the rotation matrices and their inverse
rot= _rotation_vy(v)
rotinv= _rotation_vy(v,inv=True)
# Rotate the perturbing stream's velocity to the stream frames
tilew= numpy.sum(rot*numpy.tile(w,(nv,3,1)),axis=-1)
# Use similar expressions to Denis'
wperp= numpy.sqrt(tilew[:,0]**2.+tilew[:,2]**2.)
wpar= numpy.sqrt(numpy.sum(v**2.,axis=1))-tilew[:,1]
wmag2= wpar**2.+wperp**2.
wmag= numpy.sqrt(wmag2)
b2= b**2.
rs2= rs**2.
wperp2= wperp**2.
out= numpy.empty_like(v)
out[:,0]= [1./wmag[ii]\
*integrate.quad(_astream_integrand_x,
tmin,tmax,args=(y[ii],
vmag[ii],b,tilew[ii],
b2,wmag2[ii],
wperp[ii],wperp2[ii],
wpar[ii],
GSigma,rs2))[0]
for ii in range(len(y))]
out[:,1]= [-wperp2[ii]/wmag[ii]\
*integrate.quad(_astream_integrand_y,
tmin,tmax,args=(y[ii],
vmag[ii],
b2,wmag2[ii],
wperp2[ii],
GSigma,rs2))[0]
for ii in range(len(y))]
out[:,2]= [1./wmag[ii]\
*integrate.quad(_astream_integrand_z ,
tmin,tmax,args=(y[ii],
vmag[ii],b,tilew[ii],
b2,wmag2[ii],
wperp[ii],wperp2[ii],
wpar[ii],
GSigma,rs2))[0]
for ii in range(len(y))]
# Rotate back to the original frame
return 2.0*numpy.sum(\
rotinv*numpy.swapaxes(numpy.tile(out.T,(3,1,1)).T,1,2),axis=-1) | def function[impulse_deltav_plummerstream, parameter[v, y, b, w, GSigma, rs, tmin, tmax]]:
constant[
NAME:
impulse_deltav_plummerstream
PURPOSE:
calculate the delta velocity to due an encounter with a Plummer-softened stream in the impulse approximation; allows for arbitrary velocity vectors, but y is input as the position along the stream
INPUT:
v - velocity of the stream (nstar,3)
y - position along the stream (nstar)
b - impact parameter
w - velocity of the Plummer sphere (3)
GSigma - surface density of the Plummer-softened stream (in natural units); should be a function of time
rs - size of the Plummer sphere
tmin, tmax= (None) minimum and maximum time to consider for GSigma (need to be set)
OUTPUT:
deltav (nstar,3)
HISTORY:
2015-11-14 - Written - Bovy (UofT)
]
if compare[call[name[len], parameter[name[v].shape]] equal[==] constant[1]] begin[:]
variable[v] assign[=] call[name[numpy].reshape, parameter[name[v], tuple[[<ast.Constant object at 0x7da1b0ea5a50>, <ast.Constant object at 0x7da1b0ea5a80>]]]]
variable[y] assign[=] call[name[numpy].reshape, parameter[name[y], tuple[[<ast.Constant object at 0x7da1b0ea5390>, <ast.Constant object at 0x7da1b0ea5420>]]]]
if <ast.BoolOp object at 0x7da1b0ea5720> begin[:]
<ast.Raise object at 0x7da1b0ea7460>
variable[nv] assign[=] call[name[v].shape][constant[0]]
variable[vmag] assign[=] call[name[numpy].sqrt, parameter[call[name[numpy].sum, parameter[binary_operation[name[v] ** constant[2.0]]]]]]
variable[rot] assign[=] call[name[_rotation_vy], parameter[name[v]]]
variable[rotinv] assign[=] call[name[_rotation_vy], parameter[name[v]]]
variable[tilew] assign[=] call[name[numpy].sum, parameter[binary_operation[name[rot] * call[name[numpy].tile, parameter[name[w], tuple[[<ast.Name object at 0x7da1b0ea7100>, <ast.Constant object at 0x7da1b0ea71f0>, <ast.Constant object at 0x7da1b0ea7220>]]]]]]]
variable[wperp] assign[=] call[name[numpy].sqrt, parameter[binary_operation[binary_operation[call[name[tilew]][tuple[[<ast.Slice object at 0x7da1b0ea6710>, <ast.Constant object at 0x7da1b0ea6e00>]]] ** constant[2.0]] + binary_operation[call[name[tilew]][tuple[[<ast.Slice object at 0x7da1b0ea7b20>, <ast.Constant object at 0x7da1b0ea7ac0>]]] ** constant[2.0]]]]]
variable[wpar] assign[=] binary_operation[call[name[numpy].sqrt, parameter[call[name[numpy].sum, parameter[binary_operation[name[v] ** constant[2.0]]]]]] - call[name[tilew]][tuple[[<ast.Slice object at 0x7da1b0ea6e30>, <ast.Constant object at 0x7da1b0ea6ec0>]]]]
variable[wmag2] assign[=] binary_operation[binary_operation[name[wpar] ** constant[2.0]] + binary_operation[name[wperp] ** constant[2.0]]]
variable[wmag] assign[=] call[name[numpy].sqrt, parameter[name[wmag2]]]
variable[b2] assign[=] binary_operation[name[b] ** constant[2.0]]
variable[rs2] assign[=] binary_operation[name[rs] ** constant[2.0]]
variable[wperp2] assign[=] binary_operation[name[wperp] ** constant[2.0]]
variable[out] assign[=] call[name[numpy].empty_like, parameter[name[v]]]
call[name[out]][tuple[[<ast.Slice object at 0x7da1b0ea4190>, <ast.Constant object at 0x7da1b0ea42e0>]]] assign[=] <ast.ListComp object at 0x7da1b0ea4280>
call[name[out]][tuple[[<ast.Slice object at 0x7da1b0ea62c0>, <ast.Constant object at 0x7da1b0ea5870>]]] assign[=] <ast.ListComp object at 0x7da1b0ea4550>
call[name[out]][tuple[[<ast.Slice object at 0x7da1b0ea7b80>, <ast.Constant object at 0x7da1b0ea7bb0>]]] assign[=] <ast.ListComp object at 0x7da1b0ea7ca0>
return[binary_operation[constant[2.0] * call[name[numpy].sum, parameter[binary_operation[name[rotinv] * call[name[numpy].swapaxes, parameter[call[name[numpy].tile, parameter[name[out].T, tuple[[<ast.Constant object at 0x7da18fe938b0>, <ast.Constant object at 0x7da18fe92b90>, <ast.Constant object at 0x7da18fe909d0>]]]].T, constant[1], constant[2]]]]]]]] | keyword[def] identifier[impulse_deltav_plummerstream] ( identifier[v] , identifier[y] , identifier[b] , identifier[w] , identifier[GSigma] , identifier[rs] , identifier[tmin] = keyword[None] , identifier[tmax] = keyword[None] ):
literal[string]
keyword[if] identifier[len] ( identifier[v] . identifier[shape] )== literal[int] :
identifier[v] = identifier[numpy] . identifier[reshape] ( identifier[v] ,( literal[int] , literal[int] ))
identifier[y] = identifier[numpy] . identifier[reshape] ( identifier[y] ,( literal[int] , literal[int] ))
keyword[if] identifier[tmax] keyword[is] keyword[None] keyword[or] identifier[tmax] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[nv] = identifier[v] . identifier[shape] [ literal[int] ]
identifier[vmag] = identifier[numpy] . identifier[sqrt] ( identifier[numpy] . identifier[sum] ( identifier[v] ** literal[int] , identifier[axis] = literal[int] ))
identifier[rot] = identifier[_rotation_vy] ( identifier[v] )
identifier[rotinv] = identifier[_rotation_vy] ( identifier[v] , identifier[inv] = keyword[True] )
identifier[tilew] = identifier[numpy] . identifier[sum] ( identifier[rot] * identifier[numpy] . identifier[tile] ( identifier[w] ,( identifier[nv] , literal[int] , literal[int] )), identifier[axis] =- literal[int] )
identifier[wperp] = identifier[numpy] . identifier[sqrt] ( identifier[tilew] [:, literal[int] ]** literal[int] + identifier[tilew] [:, literal[int] ]** literal[int] )
identifier[wpar] = identifier[numpy] . identifier[sqrt] ( identifier[numpy] . identifier[sum] ( identifier[v] ** literal[int] , identifier[axis] = literal[int] ))- identifier[tilew] [:, literal[int] ]
identifier[wmag2] = identifier[wpar] ** literal[int] + identifier[wperp] ** literal[int]
identifier[wmag] = identifier[numpy] . identifier[sqrt] ( identifier[wmag2] )
identifier[b2] = identifier[b] ** literal[int]
identifier[rs2] = identifier[rs] ** literal[int]
identifier[wperp2] = identifier[wperp] ** literal[int]
identifier[out] = identifier[numpy] . identifier[empty_like] ( identifier[v] )
identifier[out] [:, literal[int] ]=[ literal[int] / identifier[wmag] [ identifier[ii] ]* identifier[integrate] . identifier[quad] ( identifier[_astream_integrand_x] ,
identifier[tmin] , identifier[tmax] , identifier[args] =( identifier[y] [ identifier[ii] ],
identifier[vmag] [ identifier[ii] ], identifier[b] , identifier[tilew] [ identifier[ii] ],
identifier[b2] , identifier[wmag2] [ identifier[ii] ],
identifier[wperp] [ identifier[ii] ], identifier[wperp2] [ identifier[ii] ],
identifier[wpar] [ identifier[ii] ],
identifier[GSigma] , identifier[rs2] ))[ literal[int] ]
keyword[for] identifier[ii] keyword[in] identifier[range] ( identifier[len] ( identifier[y] ))]
identifier[out] [:, literal[int] ]=[- identifier[wperp2] [ identifier[ii] ]/ identifier[wmag] [ identifier[ii] ]* identifier[integrate] . identifier[quad] ( identifier[_astream_integrand_y] ,
identifier[tmin] , identifier[tmax] , identifier[args] =( identifier[y] [ identifier[ii] ],
identifier[vmag] [ identifier[ii] ],
identifier[b2] , identifier[wmag2] [ identifier[ii] ],
identifier[wperp2] [ identifier[ii] ],
identifier[GSigma] , identifier[rs2] ))[ literal[int] ]
keyword[for] identifier[ii] keyword[in] identifier[range] ( identifier[len] ( identifier[y] ))]
identifier[out] [:, literal[int] ]=[ literal[int] / identifier[wmag] [ identifier[ii] ]* identifier[integrate] . identifier[quad] ( identifier[_astream_integrand_z] ,
identifier[tmin] , identifier[tmax] , identifier[args] =( identifier[y] [ identifier[ii] ],
identifier[vmag] [ identifier[ii] ], identifier[b] , identifier[tilew] [ identifier[ii] ],
identifier[b2] , identifier[wmag2] [ identifier[ii] ],
identifier[wperp] [ identifier[ii] ], identifier[wperp2] [ identifier[ii] ],
identifier[wpar] [ identifier[ii] ],
identifier[GSigma] , identifier[rs2] ))[ literal[int] ]
keyword[for] identifier[ii] keyword[in] identifier[range] ( identifier[len] ( identifier[y] ))]
keyword[return] literal[int] * identifier[numpy] . identifier[sum] ( identifier[rotinv] * identifier[numpy] . identifier[swapaxes] ( identifier[numpy] . identifier[tile] ( identifier[out] . identifier[T] ,( literal[int] , literal[int] , literal[int] )). identifier[T] , literal[int] , literal[int] ), identifier[axis] =- literal[int] ) | def impulse_deltav_plummerstream(v, y, b, w, GSigma, rs, tmin=None, tmax=None):
"""
NAME:
impulse_deltav_plummerstream
PURPOSE:
calculate the delta velocity to due an encounter with a Plummer-softened stream in the impulse approximation; allows for arbitrary velocity vectors, but y is input as the position along the stream
INPUT:
v - velocity of the stream (nstar,3)
y - position along the stream (nstar)
b - impact parameter
w - velocity of the Plummer sphere (3)
GSigma - surface density of the Plummer-softened stream (in natural units); should be a function of time
rs - size of the Plummer sphere
tmin, tmax= (None) minimum and maximum time to consider for GSigma (need to be set)
OUTPUT:
deltav (nstar,3)
HISTORY:
2015-11-14 - Written - Bovy (UofT)
"""
if len(v.shape) == 1:
v = numpy.reshape(v, (1, 3))
y = numpy.reshape(y, (1, 1)) # depends on [control=['if'], data=[]]
if tmax is None or tmax is None:
raise ValueError('tmin= and tmax= need to be set') # depends on [control=['if'], data=[]]
nv = v.shape[0]
vmag = numpy.sqrt(numpy.sum(v ** 2.0, axis=1))
# Build the rotation matrices and their inverse
rot = _rotation_vy(v)
rotinv = _rotation_vy(v, inv=True)
# Rotate the perturbing stream's velocity to the stream frames
tilew = numpy.sum(rot * numpy.tile(w, (nv, 3, 1)), axis=-1)
# Use similar expressions to Denis'
wperp = numpy.sqrt(tilew[:, 0] ** 2.0 + tilew[:, 2] ** 2.0)
wpar = numpy.sqrt(numpy.sum(v ** 2.0, axis=1)) - tilew[:, 1]
wmag2 = wpar ** 2.0 + wperp ** 2.0
wmag = numpy.sqrt(wmag2)
b2 = b ** 2.0
rs2 = rs ** 2.0
wperp2 = wperp ** 2.0
out = numpy.empty_like(v)
out[:, 0] = [1.0 / wmag[ii] * integrate.quad(_astream_integrand_x, tmin, tmax, args=(y[ii], vmag[ii], b, tilew[ii], b2, wmag2[ii], wperp[ii], wperp2[ii], wpar[ii], GSigma, rs2))[0] for ii in range(len(y))]
out[:, 1] = [-wperp2[ii] / wmag[ii] * integrate.quad(_astream_integrand_y, tmin, tmax, args=(y[ii], vmag[ii], b2, wmag2[ii], wperp2[ii], GSigma, rs2))[0] for ii in range(len(y))]
out[:, 2] = [1.0 / wmag[ii] * integrate.quad(_astream_integrand_z, tmin, tmax, args=(y[ii], vmag[ii], b, tilew[ii], b2, wmag2[ii], wperp[ii], wperp2[ii], wpar[ii], GSigma, rs2))[0] for ii in range(len(y))]
# Rotate back to the original frame
return 2.0 * numpy.sum(rotinv * numpy.swapaxes(numpy.tile(out.T, (3, 1, 1)).T, 1, 2), axis=-1) |
def createCluster(self, hzVersion, xmlconfig):
"""
Parameters:
- hzVersion
- xmlconfig
"""
self.send_createCluster(hzVersion, xmlconfig)
return self.recv_createCluster() | def function[createCluster, parameter[self, hzVersion, xmlconfig]]:
constant[
Parameters:
- hzVersion
- xmlconfig
]
call[name[self].send_createCluster, parameter[name[hzVersion], name[xmlconfig]]]
return[call[name[self].recv_createCluster, parameter[]]] | keyword[def] identifier[createCluster] ( identifier[self] , identifier[hzVersion] , identifier[xmlconfig] ):
literal[string]
identifier[self] . identifier[send_createCluster] ( identifier[hzVersion] , identifier[xmlconfig] )
keyword[return] identifier[self] . identifier[recv_createCluster] () | def createCluster(self, hzVersion, xmlconfig):
"""
Parameters:
- hzVersion
- xmlconfig
"""
self.send_createCluster(hzVersion, xmlconfig)
return self.recv_createCluster() |
def parse_verbose(self, line):
"""
parse output from verbose format
"""
try:
logging.debug(line)
(host, pings) = line.split(' : ')
cnt = 0
lost = 0
times = []
pings = pings.strip().split(' ')
cnt = len(pings)
for latency in pings:
if latency == '-':
continue
times.append(float(latency))
lost = cnt - len(times)
if lost:
loss = lost / float(cnt)
else:
loss = 0.0
rv = {
'host': host.strip(),
'cnt': cnt,
'loss': loss,
'data': times,
}
if times:
rv['min'] = min(times)
rv['max'] = max(times)
rv['avg'] = sum(times) / len(times)
rv['last'] = times[-1]
return rv
except Exception as e:
logging.error("failed to get data: {}".format(e)) | def function[parse_verbose, parameter[self, line]]:
constant[
parse output from verbose format
]
<ast.Try object at 0x7da18c4cfc40> | keyword[def] identifier[parse_verbose] ( identifier[self] , identifier[line] ):
literal[string]
keyword[try] :
identifier[logging] . identifier[debug] ( identifier[line] )
( identifier[host] , identifier[pings] )= identifier[line] . identifier[split] ( literal[string] )
identifier[cnt] = literal[int]
identifier[lost] = literal[int]
identifier[times] =[]
identifier[pings] = identifier[pings] . identifier[strip] (). identifier[split] ( literal[string] )
identifier[cnt] = identifier[len] ( identifier[pings] )
keyword[for] identifier[latency] keyword[in] identifier[pings] :
keyword[if] identifier[latency] == literal[string] :
keyword[continue]
identifier[times] . identifier[append] ( identifier[float] ( identifier[latency] ))
identifier[lost] = identifier[cnt] - identifier[len] ( identifier[times] )
keyword[if] identifier[lost] :
identifier[loss] = identifier[lost] / identifier[float] ( identifier[cnt] )
keyword[else] :
identifier[loss] = literal[int]
identifier[rv] ={
literal[string] : identifier[host] . identifier[strip] (),
literal[string] : identifier[cnt] ,
literal[string] : identifier[loss] ,
literal[string] : identifier[times] ,
}
keyword[if] identifier[times] :
identifier[rv] [ literal[string] ]= identifier[min] ( identifier[times] )
identifier[rv] [ literal[string] ]= identifier[max] ( identifier[times] )
identifier[rv] [ literal[string] ]= identifier[sum] ( identifier[times] )/ identifier[len] ( identifier[times] )
identifier[rv] [ literal[string] ]= identifier[times] [- literal[int] ]
keyword[return] identifier[rv]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logging] . identifier[error] ( literal[string] . identifier[format] ( identifier[e] )) | def parse_verbose(self, line):
"""
parse output from verbose format
"""
try:
logging.debug(line)
(host, pings) = line.split(' : ')
cnt = 0
lost = 0
times = []
pings = pings.strip().split(' ')
cnt = len(pings)
for latency in pings:
if latency == '-':
continue # depends on [control=['if'], data=[]]
times.append(float(latency)) # depends on [control=['for'], data=['latency']]
lost = cnt - len(times)
if lost:
loss = lost / float(cnt) # depends on [control=['if'], data=[]]
else:
loss = 0.0
rv = {'host': host.strip(), 'cnt': cnt, 'loss': loss, 'data': times}
if times:
rv['min'] = min(times)
rv['max'] = max(times)
rv['avg'] = sum(times) / len(times)
rv['last'] = times[-1] # depends on [control=['if'], data=[]]
return rv # depends on [control=['try'], data=[]]
except Exception as e:
logging.error('failed to get data: {}'.format(e)) # depends on [control=['except'], data=['e']] |
def unassigned(data, as_json=False):
""" https://sendgrid.com/docs/API_Reference/api_v3.html#ip-addresses
The /ips rest endpoint returns information about the IP addresses
and the usernames assigned to an IP
unassigned returns a listing of the IP addresses that are allocated
but have 0 users assigned
data (response.body from sg.client.ips.get())
as_json False -> get list of dicts
True -> get json object
example:
sg = sendgrid.SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
params = {
'subuser': 'test_string',
'ip': 'test_string',
'limit': 1,
'exclude_whitelabels':
'true', 'offset': 1
}
response = sg.client.ips.get(query_params=params)
if response.status_code == 201:
data = response.body
unused = unassigned(data)
"""
no_subusers = set()
if not isinstance(data, list):
return format_ret(no_subusers, as_json=as_json)
for current in data:
num_subusers = len(current["subusers"])
if num_subusers == 0:
current_ip = current["ip"]
no_subusers.add(current_ip)
ret_val = format_ret(no_subusers, as_json=as_json)
return ret_val | def function[unassigned, parameter[data, as_json]]:
constant[ https://sendgrid.com/docs/API_Reference/api_v3.html#ip-addresses
The /ips rest endpoint returns information about the IP addresses
and the usernames assigned to an IP
unassigned returns a listing of the IP addresses that are allocated
but have 0 users assigned
data (response.body from sg.client.ips.get())
as_json False -> get list of dicts
True -> get json object
example:
sg = sendgrid.SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
params = {
'subuser': 'test_string',
'ip': 'test_string',
'limit': 1,
'exclude_whitelabels':
'true', 'offset': 1
}
response = sg.client.ips.get(query_params=params)
if response.status_code == 201:
data = response.body
unused = unassigned(data)
]
variable[no_subusers] assign[=] call[name[set], parameter[]]
if <ast.UnaryOp object at 0x7da18f00cb20> begin[:]
return[call[name[format_ret], parameter[name[no_subusers]]]]
for taget[name[current]] in starred[name[data]] begin[:]
variable[num_subusers] assign[=] call[name[len], parameter[call[name[current]][constant[subusers]]]]
if compare[name[num_subusers] equal[==] constant[0]] begin[:]
variable[current_ip] assign[=] call[name[current]][constant[ip]]
call[name[no_subusers].add, parameter[name[current_ip]]]
variable[ret_val] assign[=] call[name[format_ret], parameter[name[no_subusers]]]
return[name[ret_val]] | keyword[def] identifier[unassigned] ( identifier[data] , identifier[as_json] = keyword[False] ):
literal[string]
identifier[no_subusers] = identifier[set] ()
keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[list] ):
keyword[return] identifier[format_ret] ( identifier[no_subusers] , identifier[as_json] = identifier[as_json] )
keyword[for] identifier[current] keyword[in] identifier[data] :
identifier[num_subusers] = identifier[len] ( identifier[current] [ literal[string] ])
keyword[if] identifier[num_subusers] == literal[int] :
identifier[current_ip] = identifier[current] [ literal[string] ]
identifier[no_subusers] . identifier[add] ( identifier[current_ip] )
identifier[ret_val] = identifier[format_ret] ( identifier[no_subusers] , identifier[as_json] = identifier[as_json] )
keyword[return] identifier[ret_val] | def unassigned(data, as_json=False):
""" https://sendgrid.com/docs/API_Reference/api_v3.html#ip-addresses
The /ips rest endpoint returns information about the IP addresses
and the usernames assigned to an IP
unassigned returns a listing of the IP addresses that are allocated
but have 0 users assigned
data (response.body from sg.client.ips.get())
as_json False -> get list of dicts
True -> get json object
example:
sg = sendgrid.SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
params = {
'subuser': 'test_string',
'ip': 'test_string',
'limit': 1,
'exclude_whitelabels':
'true', 'offset': 1
}
response = sg.client.ips.get(query_params=params)
if response.status_code == 201:
data = response.body
unused = unassigned(data)
"""
no_subusers = set()
if not isinstance(data, list):
return format_ret(no_subusers, as_json=as_json) # depends on [control=['if'], data=[]]
for current in data:
num_subusers = len(current['subusers'])
if num_subusers == 0:
current_ip = current['ip']
no_subusers.add(current_ip) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['current']]
ret_val = format_ret(no_subusers, as_json=as_json)
return ret_val |
def process(self, quoted=False):
''' Parse an URL '''
self.p = urlparse(self.raw)
self.scheme = self.p.scheme
self.netloc = self.p.netloc
self.opath = self.p.path if not quoted else quote(self.p.path)
self.path = [x for x in self.opath.split('/') if x]
self.params = self.p.params
self.query = parse_qs(self.p.query, keep_blank_values=True)
self.fragment = self.p.fragment | def function[process, parameter[self, quoted]]:
constant[ Parse an URL ]
name[self].p assign[=] call[name[urlparse], parameter[name[self].raw]]
name[self].scheme assign[=] name[self].p.scheme
name[self].netloc assign[=] name[self].p.netloc
name[self].opath assign[=] <ast.IfExp object at 0x7da1b1038610>
name[self].path assign[=] <ast.ListComp object at 0x7da1b1038bb0>
name[self].params assign[=] name[self].p.params
name[self].query assign[=] call[name[parse_qs], parameter[name[self].p.query]]
name[self].fragment assign[=] name[self].p.fragment | keyword[def] identifier[process] ( identifier[self] , identifier[quoted] = keyword[False] ):
literal[string]
identifier[self] . identifier[p] = identifier[urlparse] ( identifier[self] . identifier[raw] )
identifier[self] . identifier[scheme] = identifier[self] . identifier[p] . identifier[scheme]
identifier[self] . identifier[netloc] = identifier[self] . identifier[p] . identifier[netloc]
identifier[self] . identifier[opath] = identifier[self] . identifier[p] . identifier[path] keyword[if] keyword[not] identifier[quoted] keyword[else] identifier[quote] ( identifier[self] . identifier[p] . identifier[path] )
identifier[self] . identifier[path] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[opath] . identifier[split] ( literal[string] ) keyword[if] identifier[x] ]
identifier[self] . identifier[params] = identifier[self] . identifier[p] . identifier[params]
identifier[self] . identifier[query] = identifier[parse_qs] ( identifier[self] . identifier[p] . identifier[query] , identifier[keep_blank_values] = keyword[True] )
identifier[self] . identifier[fragment] = identifier[self] . identifier[p] . identifier[fragment] | def process(self, quoted=False):
""" Parse an URL """
self.p = urlparse(self.raw)
self.scheme = self.p.scheme
self.netloc = self.p.netloc
self.opath = self.p.path if not quoted else quote(self.p.path)
self.path = [x for x in self.opath.split('/') if x]
self.params = self.p.params
self.query = parse_qs(self.p.query, keep_blank_values=True)
self.fragment = self.p.fragment |
def remove_root_gradebook(self, gradebook_id):
"""Removes a root gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of a gradebook
raise: NotFound - ``gradebook_id`` is not a root
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_root_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_root_catalog(catalog_id=gradebook_id)
return self._hierarchy_session.remove_root(id_=gradebook_id) | def function[remove_root_gradebook, parameter[self, gradebook_id]]:
constant[Removes a root gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of a gradebook
raise: NotFound - ``gradebook_id`` is not a root
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
if compare[name[self]._catalog_session is_not constant[None]] begin[:]
return[call[name[self]._catalog_session.remove_root_catalog, parameter[]]]
return[call[name[self]._hierarchy_session.remove_root, parameter[]]] | keyword[def] identifier[remove_root_gradebook] ( identifier[self] , identifier[gradebook_id] ):
literal[string]
keyword[if] identifier[self] . identifier[_catalog_session] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_catalog_session] . identifier[remove_root_catalog] ( identifier[catalog_id] = identifier[gradebook_id] )
keyword[return] identifier[self] . identifier[_hierarchy_session] . identifier[remove_root] ( identifier[id_] = identifier[gradebook_id] ) | def remove_root_gradebook(self, gradebook_id):
"""Removes a root gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of a gradebook
raise: NotFound - ``gradebook_id`` is not a root
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_root_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_root_catalog(catalog_id=gradebook_id) # depends on [control=['if'], data=[]]
return self._hierarchy_session.remove_root(id_=gradebook_id) |
def choose(self, context_ms=None):
""" Returns a point chosen by the interest model
"""
try:
if self.context_mode is None:
x = self.interest_model.sample()
else:
if self.context_mode["mode"] == 'mdmsds':
if self.expl_dims == self.conf.s_dims:
x = np.hstack((context_ms[self.conf.m_ndims//2:], self.interest_model.sample_given_context(context_ms[self.conf.m_ndims//2:], range(self.conf.s_ndims//2))))
else:
if self.context_mode['choose_m']:
x = self.interest_model.sample()
else:
x = np.hstack((context_ms[:self.conf.m_ndims//2], self.interest_model.sample_given_context(context_ms[:self.conf.m_ndims//2], range(self.conf.m_ndims//2))))
elif self.context_mode["mode"] == 'mcs':
x = np.hstack((context_ms, self.interest_model.sample_given_context(context_ms, range(self.context_mode["context_n_dims"]))))
except ExplautoBootstrapError:
logger.warning('Interest model not bootstrapped yet')
x = rand_bounds(self.conf.bounds[:, self.expl_dims]).flatten()
if self.context_mode is not None:
x = x[list(set(self.expl_dims) - set(self.context_mode['context_dims']))]
return x | def function[choose, parameter[self, context_ms]]:
constant[ Returns a point chosen by the interest model
]
<ast.Try object at 0x7da1b0c9da80>
return[name[x]] | keyword[def] identifier[choose] ( identifier[self] , identifier[context_ms] = keyword[None] ):
literal[string]
keyword[try] :
keyword[if] identifier[self] . identifier[context_mode] keyword[is] keyword[None] :
identifier[x] = identifier[self] . identifier[interest_model] . identifier[sample] ()
keyword[else] :
keyword[if] identifier[self] . identifier[context_mode] [ literal[string] ]== literal[string] :
keyword[if] identifier[self] . identifier[expl_dims] == identifier[self] . identifier[conf] . identifier[s_dims] :
identifier[x] = identifier[np] . identifier[hstack] (( identifier[context_ms] [ identifier[self] . identifier[conf] . identifier[m_ndims] // literal[int] :], identifier[self] . identifier[interest_model] . identifier[sample_given_context] ( identifier[context_ms] [ identifier[self] . identifier[conf] . identifier[m_ndims] // literal[int] :], identifier[range] ( identifier[self] . identifier[conf] . identifier[s_ndims] // literal[int] ))))
keyword[else] :
keyword[if] identifier[self] . identifier[context_mode] [ literal[string] ]:
identifier[x] = identifier[self] . identifier[interest_model] . identifier[sample] ()
keyword[else] :
identifier[x] = identifier[np] . identifier[hstack] (( identifier[context_ms] [: identifier[self] . identifier[conf] . identifier[m_ndims] // literal[int] ], identifier[self] . identifier[interest_model] . identifier[sample_given_context] ( identifier[context_ms] [: identifier[self] . identifier[conf] . identifier[m_ndims] // literal[int] ], identifier[range] ( identifier[self] . identifier[conf] . identifier[m_ndims] // literal[int] ))))
keyword[elif] identifier[self] . identifier[context_mode] [ literal[string] ]== literal[string] :
identifier[x] = identifier[np] . identifier[hstack] (( identifier[context_ms] , identifier[self] . identifier[interest_model] . identifier[sample_given_context] ( identifier[context_ms] , identifier[range] ( identifier[self] . identifier[context_mode] [ literal[string] ]))))
keyword[except] identifier[ExplautoBootstrapError] :
identifier[logger] . identifier[warning] ( literal[string] )
identifier[x] = identifier[rand_bounds] ( identifier[self] . identifier[conf] . identifier[bounds] [:, identifier[self] . identifier[expl_dims] ]). identifier[flatten] ()
keyword[if] identifier[self] . identifier[context_mode] keyword[is] keyword[not] keyword[None] :
identifier[x] = identifier[x] [ identifier[list] ( identifier[set] ( identifier[self] . identifier[expl_dims] )- identifier[set] ( identifier[self] . identifier[context_mode] [ literal[string] ]))]
keyword[return] identifier[x] | def choose(self, context_ms=None):
""" Returns a point chosen by the interest model
"""
try:
if self.context_mode is None:
x = self.interest_model.sample() # depends on [control=['if'], data=[]]
elif self.context_mode['mode'] == 'mdmsds':
if self.expl_dims == self.conf.s_dims:
x = np.hstack((context_ms[self.conf.m_ndims // 2:], self.interest_model.sample_given_context(context_ms[self.conf.m_ndims // 2:], range(self.conf.s_ndims // 2)))) # depends on [control=['if'], data=[]]
elif self.context_mode['choose_m']:
x = self.interest_model.sample() # depends on [control=['if'], data=[]]
else:
x = np.hstack((context_ms[:self.conf.m_ndims // 2], self.interest_model.sample_given_context(context_ms[:self.conf.m_ndims // 2], range(self.conf.m_ndims // 2)))) # depends on [control=['if'], data=[]]
elif self.context_mode['mode'] == 'mcs':
x = np.hstack((context_ms, self.interest_model.sample_given_context(context_ms, range(self.context_mode['context_n_dims'])))) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except ExplautoBootstrapError:
logger.warning('Interest model not bootstrapped yet')
x = rand_bounds(self.conf.bounds[:, self.expl_dims]).flatten()
if self.context_mode is not None:
x = x[list(set(self.expl_dims) - set(self.context_mode['context_dims']))] # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
return x |
def _read_msg(self, method_frame, with_consumer_tag=False,
with_message_count=False):
'''
Support method to read a Message from the current frame buffer.
Will return a Message, or re-queue current frames and raise a
FrameUnderflow. Takes an optional argument on whether to read the
consumer tag so it can be used for both deliver and get-ok.
'''
header_frame, body = self._reap_msg_frames(method_frame)
if with_consumer_tag:
consumer_tag = method_frame.args.read_shortstr()
delivery_tag = method_frame.args.read_longlong()
redelivered = method_frame.args.read_bit()
exchange = method_frame.args.read_shortstr()
routing_key = method_frame.args.read_shortstr()
if with_message_count:
message_count = method_frame.args.read_long()
delivery_info = {
'channel': self.channel,
'delivery_tag': delivery_tag,
'redelivered': redelivered,
'exchange': exchange,
'routing_key': routing_key,
}
if with_consumer_tag:
delivery_info['consumer_tag'] = consumer_tag
if with_message_count:
delivery_info['message_count'] = message_count
return Message(body=body, delivery_info=delivery_info,
**header_frame.properties) | def function[_read_msg, parameter[self, method_frame, with_consumer_tag, with_message_count]]:
constant[
Support method to read a Message from the current frame buffer.
Will return a Message, or re-queue current frames and raise a
FrameUnderflow. Takes an optional argument on whether to read the
consumer tag so it can be used for both deliver and get-ok.
]
<ast.Tuple object at 0x7da1b0627e20> assign[=] call[name[self]._reap_msg_frames, parameter[name[method_frame]]]
if name[with_consumer_tag] begin[:]
variable[consumer_tag] assign[=] call[name[method_frame].args.read_shortstr, parameter[]]
variable[delivery_tag] assign[=] call[name[method_frame].args.read_longlong, parameter[]]
variable[redelivered] assign[=] call[name[method_frame].args.read_bit, parameter[]]
variable[exchange] assign[=] call[name[method_frame].args.read_shortstr, parameter[]]
variable[routing_key] assign[=] call[name[method_frame].args.read_shortstr, parameter[]]
if name[with_message_count] begin[:]
variable[message_count] assign[=] call[name[method_frame].args.read_long, parameter[]]
variable[delivery_info] assign[=] dictionary[[<ast.Constant object at 0x7da18f09efe0>, <ast.Constant object at 0x7da18f09fe50>, <ast.Constant object at 0x7da18f09fc10>, <ast.Constant object at 0x7da18f09d750>, <ast.Constant object at 0x7da18f09cfd0>], [<ast.Attribute object at 0x7da18f09f3d0>, <ast.Name object at 0x7da18f09d1b0>, <ast.Name object at 0x7da18f09cf40>, <ast.Name object at 0x7da18f09c820>, <ast.Name object at 0x7da18f09d420>]]
if name[with_consumer_tag] begin[:]
call[name[delivery_info]][constant[consumer_tag]] assign[=] name[consumer_tag]
if name[with_message_count] begin[:]
call[name[delivery_info]][constant[message_count]] assign[=] name[message_count]
return[call[name[Message], parameter[]]] | keyword[def] identifier[_read_msg] ( identifier[self] , identifier[method_frame] , identifier[with_consumer_tag] = keyword[False] ,
identifier[with_message_count] = keyword[False] ):
literal[string]
identifier[header_frame] , identifier[body] = identifier[self] . identifier[_reap_msg_frames] ( identifier[method_frame] )
keyword[if] identifier[with_consumer_tag] :
identifier[consumer_tag] = identifier[method_frame] . identifier[args] . identifier[read_shortstr] ()
identifier[delivery_tag] = identifier[method_frame] . identifier[args] . identifier[read_longlong] ()
identifier[redelivered] = identifier[method_frame] . identifier[args] . identifier[read_bit] ()
identifier[exchange] = identifier[method_frame] . identifier[args] . identifier[read_shortstr] ()
identifier[routing_key] = identifier[method_frame] . identifier[args] . identifier[read_shortstr] ()
keyword[if] identifier[with_message_count] :
identifier[message_count] = identifier[method_frame] . identifier[args] . identifier[read_long] ()
identifier[delivery_info] ={
literal[string] : identifier[self] . identifier[channel] ,
literal[string] : identifier[delivery_tag] ,
literal[string] : identifier[redelivered] ,
literal[string] : identifier[exchange] ,
literal[string] : identifier[routing_key] ,
}
keyword[if] identifier[with_consumer_tag] :
identifier[delivery_info] [ literal[string] ]= identifier[consumer_tag]
keyword[if] identifier[with_message_count] :
identifier[delivery_info] [ literal[string] ]= identifier[message_count]
keyword[return] identifier[Message] ( identifier[body] = identifier[body] , identifier[delivery_info] = identifier[delivery_info] ,
** identifier[header_frame] . identifier[properties] ) | def _read_msg(self, method_frame, with_consumer_tag=False, with_message_count=False):
"""
Support method to read a Message from the current frame buffer.
Will return a Message, or re-queue current frames and raise a
FrameUnderflow. Takes an optional argument on whether to read the
consumer tag so it can be used for both deliver and get-ok.
"""
(header_frame, body) = self._reap_msg_frames(method_frame)
if with_consumer_tag:
consumer_tag = method_frame.args.read_shortstr() # depends on [control=['if'], data=[]]
delivery_tag = method_frame.args.read_longlong()
redelivered = method_frame.args.read_bit()
exchange = method_frame.args.read_shortstr()
routing_key = method_frame.args.read_shortstr()
if with_message_count:
message_count = method_frame.args.read_long() # depends on [control=['if'], data=[]]
delivery_info = {'channel': self.channel, 'delivery_tag': delivery_tag, 'redelivered': redelivered, 'exchange': exchange, 'routing_key': routing_key}
if with_consumer_tag:
delivery_info['consumer_tag'] = consumer_tag # depends on [control=['if'], data=[]]
if with_message_count:
delivery_info['message_count'] = message_count # depends on [control=['if'], data=[]]
return Message(body=body, delivery_info=delivery_info, **header_frame.properties) |
def run(self):
"""
Compile all message catalogs .po files into .mo files.
Skips not changed file based on source mtime.
"""
# thanks to deluge guys ;)
po_dir = os.path.join(os.path.dirname(__file__), 'webant', 'translations')
print('Compiling po files from "{}"...'.format(po_dir))
for lang in os.listdir(po_dir):
sys.stdout.write("\tCompiling {}... ".format(lang))
sys.stdout.flush()
curr_lang_path = os.path.join(po_dir, lang)
for path, dirs, filenames in os.walk(curr_lang_path):
for f in filenames:
if f.endswith('.po'):
src = os.path.join(path, f)
dst = os.path.join(path, f[:-3] + ".mo")
if not os.path.exists(dst) or self.force:
msgfmt.make(src, dst)
print("ok.")
else:
src_mtime = os.stat(src)[8]
dst_mtime = os.stat(dst)[8]
if src_mtime > dst_mtime:
msgfmt.make(src, dst)
print("ok.")
else:
print("already up to date.")
print('Finished compiling translation files.') | def function[run, parameter[self]]:
constant[
Compile all message catalogs .po files into .mo files.
Skips not changed file based on source mtime.
]
variable[po_dir] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[__file__]]], constant[webant], constant[translations]]]
call[name[print], parameter[call[constant[Compiling po files from "{}"...].format, parameter[name[po_dir]]]]]
for taget[name[lang]] in starred[call[name[os].listdir, parameter[name[po_dir]]]] begin[:]
call[name[sys].stdout.write, parameter[call[constant[ Compiling {}... ].format, parameter[name[lang]]]]]
call[name[sys].stdout.flush, parameter[]]
variable[curr_lang_path] assign[=] call[name[os].path.join, parameter[name[po_dir], name[lang]]]
for taget[tuple[[<ast.Name object at 0x7da1b26a22c0>, <ast.Name object at 0x7da1b26a0310>, <ast.Name object at 0x7da1b26a3850>]]] in starred[call[name[os].walk, parameter[name[curr_lang_path]]]] begin[:]
for taget[name[f]] in starred[name[filenames]] begin[:]
if call[name[f].endswith, parameter[constant[.po]]] begin[:]
variable[src] assign[=] call[name[os].path.join, parameter[name[path], name[f]]]
variable[dst] assign[=] call[name[os].path.join, parameter[name[path], binary_operation[call[name[f]][<ast.Slice object at 0x7da1b26a3430>] + constant[.mo]]]]
if <ast.BoolOp object at 0x7da1b26a1150> begin[:]
call[name[msgfmt].make, parameter[name[src], name[dst]]]
call[name[print], parameter[constant[ok.]]]
call[name[print], parameter[constant[Finished compiling translation files.]]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[po_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ), literal[string] , literal[string] )
identifier[print] ( literal[string] . identifier[format] ( identifier[po_dir] ))
keyword[for] identifier[lang] keyword[in] identifier[os] . identifier[listdir] ( identifier[po_dir] ):
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] . identifier[format] ( identifier[lang] ))
identifier[sys] . identifier[stdout] . identifier[flush] ()
identifier[curr_lang_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[po_dir] , identifier[lang] )
keyword[for] identifier[path] , identifier[dirs] , identifier[filenames] keyword[in] identifier[os] . identifier[walk] ( identifier[curr_lang_path] ):
keyword[for] identifier[f] keyword[in] identifier[filenames] :
keyword[if] identifier[f] . identifier[endswith] ( literal[string] ):
identifier[src] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[f] )
identifier[dst] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[f] [:- literal[int] ]+ literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[dst] ) keyword[or] identifier[self] . identifier[force] :
identifier[msgfmt] . identifier[make] ( identifier[src] , identifier[dst] )
identifier[print] ( literal[string] )
keyword[else] :
identifier[src_mtime] = identifier[os] . identifier[stat] ( identifier[src] )[ literal[int] ]
identifier[dst_mtime] = identifier[os] . identifier[stat] ( identifier[dst] )[ literal[int] ]
keyword[if] identifier[src_mtime] > identifier[dst_mtime] :
identifier[msgfmt] . identifier[make] ( identifier[src] , identifier[dst] )
identifier[print] ( literal[string] )
keyword[else] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] ) | def run(self):
"""
Compile all message catalogs .po files into .mo files.
Skips not changed file based on source mtime.
"""
# thanks to deluge guys ;)
po_dir = os.path.join(os.path.dirname(__file__), 'webant', 'translations')
print('Compiling po files from "{}"...'.format(po_dir))
for lang in os.listdir(po_dir):
sys.stdout.write('\tCompiling {}... '.format(lang))
sys.stdout.flush()
curr_lang_path = os.path.join(po_dir, lang)
for (path, dirs, filenames) in os.walk(curr_lang_path):
for f in filenames:
if f.endswith('.po'):
src = os.path.join(path, f)
dst = os.path.join(path, f[:-3] + '.mo')
if not os.path.exists(dst) or self.force:
msgfmt.make(src, dst)
print('ok.') # depends on [control=['if'], data=[]]
else:
src_mtime = os.stat(src)[8]
dst_mtime = os.stat(dst)[8]
if src_mtime > dst_mtime:
msgfmt.make(src, dst)
print('ok.') # depends on [control=['if'], data=[]]
else:
print('already up to date.') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['lang']]
print('Finished compiling translation files.') |
def persistant_success(request, message, extra_tags='', fail_silently=False, *args, **kwargs):
"""
Adds a persistant message with the ``SUCCESS`` level.
"""
add_message(request, SUCCESS_PERSISTENT, message, extra_tags=extra_tags,
fail_silently=fail_silently, *args, **kwargs) | def function[persistant_success, parameter[request, message, extra_tags, fail_silently]]:
constant[
Adds a persistant message with the ``SUCCESS`` level.
]
call[name[add_message], parameter[name[request], name[SUCCESS_PERSISTENT], name[message], <ast.Starred object at 0x7da18f721c30>]] | keyword[def] identifier[persistant_success] ( identifier[request] , identifier[message] , identifier[extra_tags] = literal[string] , identifier[fail_silently] = keyword[False] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[add_message] ( identifier[request] , identifier[SUCCESS_PERSISTENT] , identifier[message] , identifier[extra_tags] = identifier[extra_tags] ,
identifier[fail_silently] = identifier[fail_silently] ,* identifier[args] ,** identifier[kwargs] ) | def persistant_success(request, message, extra_tags='', fail_silently=False, *args, **kwargs):
"""
Adds a persistant message with the ``SUCCESS`` level.
"""
add_message(request, SUCCESS_PERSISTENT, message, *args, extra_tags=extra_tags, fail_silently=fail_silently, **kwargs) |
def beta(a, b):
"""use gamma function or inbuilt math.gamma() to compute vals of beta func"""
beta = math.exp(math.lgamma(a) + math.lgamma(b) - math.lgamma(a + b))
return beta | def function[beta, parameter[a, b]]:
constant[use gamma function or inbuilt math.gamma() to compute vals of beta func]
variable[beta] assign[=] call[name[math].exp, parameter[binary_operation[binary_operation[call[name[math].lgamma, parameter[name[a]]] + call[name[math].lgamma, parameter[name[b]]]] - call[name[math].lgamma, parameter[binary_operation[name[a] + name[b]]]]]]]
return[name[beta]] | keyword[def] identifier[beta] ( identifier[a] , identifier[b] ):
literal[string]
identifier[beta] = identifier[math] . identifier[exp] ( identifier[math] . identifier[lgamma] ( identifier[a] )+ identifier[math] . identifier[lgamma] ( identifier[b] )- identifier[math] . identifier[lgamma] ( identifier[a] + identifier[b] ))
keyword[return] identifier[beta] | def beta(a, b):
"""use gamma function or inbuilt math.gamma() to compute vals of beta func"""
beta = math.exp(math.lgamma(a) + math.lgamma(b) - math.lgamma(a + b))
return beta |
def node_slider(self, seed=None):
"""
Returns a toytree copy with node heights modified while retaining
the same topology but not necessarily node branching order.
Node heights are moved up or down uniformly between their parent
and highest child node heights in 'levelorder' from root to tips.
The total tree height is retained at 1.0, only relative edge
lengths change.
"""
# I don't think user's should need to access prop
prop = 0.999
assert isinstance(prop, float), "prop must be a float"
assert prop < 1, "prop must be a proportion >0 and < 1."
random.seed(seed)
ctree = self._ttree.copy()
for node in ctree.treenode.traverse():
## slide internal nodes
if node.up and node.children:
## get min and max slides
minjit = max([i.dist for i in node.children]) * prop
maxjit = (node.up.height * prop) - node.height
newheight = random.uniform(-minjit, maxjit)
## slide children
for child in node.children:
child.dist += newheight
## slide self to match
node.dist -= newheight
ctree._coords.update()
return ctree | def function[node_slider, parameter[self, seed]]:
constant[
Returns a toytree copy with node heights modified while retaining
the same topology but not necessarily node branching order.
Node heights are moved up or down uniformly between their parent
and highest child node heights in 'levelorder' from root to tips.
The total tree height is retained at 1.0, only relative edge
lengths change.
]
variable[prop] assign[=] constant[0.999]
assert[call[name[isinstance], parameter[name[prop], name[float]]]]
assert[compare[name[prop] less[<] constant[1]]]
call[name[random].seed, parameter[name[seed]]]
variable[ctree] assign[=] call[name[self]._ttree.copy, parameter[]]
for taget[name[node]] in starred[call[name[ctree].treenode.traverse, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da18c4cd2a0> begin[:]
variable[minjit] assign[=] binary_operation[call[name[max], parameter[<ast.ListComp object at 0x7da1b2345870>]] * name[prop]]
variable[maxjit] assign[=] binary_operation[binary_operation[name[node].up.height * name[prop]] - name[node].height]
variable[newheight] assign[=] call[name[random].uniform, parameter[<ast.UnaryOp object at 0x7da1b10c77c0>, name[maxjit]]]
for taget[name[child]] in starred[name[node].children] begin[:]
<ast.AugAssign object at 0x7da1b10c7370>
<ast.AugAssign object at 0x7da1b10c6830>
call[name[ctree]._coords.update, parameter[]]
return[name[ctree]] | keyword[def] identifier[node_slider] ( identifier[self] , identifier[seed] = keyword[None] ):
literal[string]
identifier[prop] = literal[int]
keyword[assert] identifier[isinstance] ( identifier[prop] , identifier[float] ), literal[string]
keyword[assert] identifier[prop] < literal[int] , literal[string]
identifier[random] . identifier[seed] ( identifier[seed] )
identifier[ctree] = identifier[self] . identifier[_ttree] . identifier[copy] ()
keyword[for] identifier[node] keyword[in] identifier[ctree] . identifier[treenode] . identifier[traverse] ():
keyword[if] identifier[node] . identifier[up] keyword[and] identifier[node] . identifier[children] :
identifier[minjit] = identifier[max] ([ identifier[i] . identifier[dist] keyword[for] identifier[i] keyword[in] identifier[node] . identifier[children] ])* identifier[prop]
identifier[maxjit] =( identifier[node] . identifier[up] . identifier[height] * identifier[prop] )- identifier[node] . identifier[height]
identifier[newheight] = identifier[random] . identifier[uniform] (- identifier[minjit] , identifier[maxjit] )
keyword[for] identifier[child] keyword[in] identifier[node] . identifier[children] :
identifier[child] . identifier[dist] += identifier[newheight]
identifier[node] . identifier[dist] -= identifier[newheight]
identifier[ctree] . identifier[_coords] . identifier[update] ()
keyword[return] identifier[ctree] | def node_slider(self, seed=None):
"""
Returns a toytree copy with node heights modified while retaining
the same topology but not necessarily node branching order.
Node heights are moved up or down uniformly between their parent
and highest child node heights in 'levelorder' from root to tips.
The total tree height is retained at 1.0, only relative edge
lengths change.
"""
# I don't think user's should need to access prop
prop = 0.999
assert isinstance(prop, float), 'prop must be a float'
assert prop < 1, 'prop must be a proportion >0 and < 1.'
random.seed(seed)
ctree = self._ttree.copy()
for node in ctree.treenode.traverse(): ## slide internal nodes
if node.up and node.children:
## get min and max slides
minjit = max([i.dist for i in node.children]) * prop
maxjit = node.up.height * prop - node.height
newheight = random.uniform(-minjit, maxjit)
## slide children
for child in node.children:
child.dist += newheight # depends on [control=['for'], data=['child']]
## slide self to match
node.dist -= newheight # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
ctree._coords.update()
return ctree |
def _parse_simple_model(topology, parent_scope, model, inputs, outputs):
'''
Parse a model containing only one operator (aka simple model).
Steps:
1. Create local scope for allocating local variables and operators
2. Create operator and then feed the model's inputs and outputs to the operator
3. Connect local variables and their corresponding parent variables
Note:
1. Notice that a CoreML operator can contain no input and output, so we directly use model's inputs (outputs).
2. Input and output names can be identical in CoreML, but they must be different for ONNX.
'''
# Create local scope for the considered model
scope = topology.declare_scope('single', [parent_scope] + parent_scope.parent_scopes)
# Create operator for the considered model
this_operator = scope.declare_local_operator(model.WhichOneof('Type'), model)
# Allocate inputs for the operator and then connect them with inputs from outside
for var in model.description.input:
# We assume that no duplicated raw name exists. Note that we set prepend=True because model inputs should
# not hide any intermediate variables.
variable = scope.declare_local_variable(
var.name, _parse_coreml_feature(var, topology.target_opset, topology.default_batch_size),
prepend=True)
this_operator.inputs.append(variable)
# Connect local variables and variables passed into this scope. Our assumptions are described below.
# 1. Assume a variable with 'A' as its CoreML name is passed in. There must be at least one local variable gets a
# raw name 'A'. That is, for each parent variable, at least one local duplicate is available.
# 2. It's possible to find multiple local variables associated with the same raw name. For example, raw name 'A' can
# be associated with 'A' and 'A1' in ONNX. In this case, we connect the first one to parent input.
for parent_variable in inputs:
raw_name = parent_variable.raw_name
child_variable = scope.variables[scope.variable_name_mapping[raw_name][0]]
operator = scope.declare_local_operator('identity')
operator.inputs.append(parent_variable)
operator.outputs.append(child_variable)
# Allocate outputs for the operator and then connect them with outputs from outside
for var in model.description.output:
# We assume that no duplicated output raw name exists.
variable = scope.declare_local_variable(
var.name, _parse_coreml_feature(var, topology.target_opset, topology.default_batch_size))
this_operator.outputs.append(variable)
# Connect local variables and variables passed into this scope. Our assumptions are described below.
# 1. Assume a variable with 'A' as its CoreML name is passed in. There must be at least one local variable gets a
# raw name 'A'. That is, for each parent variable, at least one local duplicate is available.
# 2. It's possible to find multiple local variables associated with the same raw name. For example, raw name 'A' can
# be associated with 'A' and 'A1' in ONNX. In this case, we connect the last one to parent output.
for parent_variable in outputs:
raw_name = parent_variable.raw_name
child_variable = scope.variables[scope.variable_name_mapping[raw_name][-1]]
operator = scope.declare_local_operator('identity')
operator.inputs.append(child_variable)
operator.outputs.append(parent_variable) | def function[_parse_simple_model, parameter[topology, parent_scope, model, inputs, outputs]]:
constant[
Parse a model containing only one operator (aka simple model).
Steps:
1. Create local scope for allocating local variables and operators
2. Create operator and then feed the model's inputs and outputs to the operator
3. Connect local variables and their corresponding parent variables
Note:
1. Notice that a CoreML operator can contain no input and output, so we directly use model's inputs (outputs).
2. Input and output names can be identical in CoreML, but they must be different for ONNX.
]
variable[scope] assign[=] call[name[topology].declare_scope, parameter[constant[single], binary_operation[list[[<ast.Name object at 0x7da20c6e4e20>]] + name[parent_scope].parent_scopes]]]
variable[this_operator] assign[=] call[name[scope].declare_local_operator, parameter[call[name[model].WhichOneof, parameter[constant[Type]]], name[model]]]
for taget[name[var]] in starred[name[model].description.input] begin[:]
variable[variable] assign[=] call[name[scope].declare_local_variable, parameter[name[var].name, call[name[_parse_coreml_feature], parameter[name[var], name[topology].target_opset, name[topology].default_batch_size]]]]
call[name[this_operator].inputs.append, parameter[name[variable]]]
for taget[name[parent_variable]] in starred[name[inputs]] begin[:]
variable[raw_name] assign[=] name[parent_variable].raw_name
variable[child_variable] assign[=] call[name[scope].variables][call[call[name[scope].variable_name_mapping][name[raw_name]]][constant[0]]]
variable[operator] assign[=] call[name[scope].declare_local_operator, parameter[constant[identity]]]
call[name[operator].inputs.append, parameter[name[parent_variable]]]
call[name[operator].outputs.append, parameter[name[child_variable]]]
for taget[name[var]] in starred[name[model].description.output] begin[:]
variable[variable] assign[=] call[name[scope].declare_local_variable, parameter[name[var].name, call[name[_parse_coreml_feature], parameter[name[var], name[topology].target_opset, name[topology].default_batch_size]]]]
call[name[this_operator].outputs.append, parameter[name[variable]]]
for taget[name[parent_variable]] in starred[name[outputs]] begin[:]
variable[raw_name] assign[=] name[parent_variable].raw_name
variable[child_variable] assign[=] call[name[scope].variables][call[call[name[scope].variable_name_mapping][name[raw_name]]][<ast.UnaryOp object at 0x7da1b1d62c50>]]
variable[operator] assign[=] call[name[scope].declare_local_operator, parameter[constant[identity]]]
call[name[operator].inputs.append, parameter[name[child_variable]]]
call[name[operator].outputs.append, parameter[name[parent_variable]]] | keyword[def] identifier[_parse_simple_model] ( identifier[topology] , identifier[parent_scope] , identifier[model] , identifier[inputs] , identifier[outputs] ):
literal[string]
identifier[scope] = identifier[topology] . identifier[declare_scope] ( literal[string] ,[ identifier[parent_scope] ]+ identifier[parent_scope] . identifier[parent_scopes] )
identifier[this_operator] = identifier[scope] . identifier[declare_local_operator] ( identifier[model] . identifier[WhichOneof] ( literal[string] ), identifier[model] )
keyword[for] identifier[var] keyword[in] identifier[model] . identifier[description] . identifier[input] :
identifier[variable] = identifier[scope] . identifier[declare_local_variable] (
identifier[var] . identifier[name] , identifier[_parse_coreml_feature] ( identifier[var] , identifier[topology] . identifier[target_opset] , identifier[topology] . identifier[default_batch_size] ),
identifier[prepend] = keyword[True] )
identifier[this_operator] . identifier[inputs] . identifier[append] ( identifier[variable] )
keyword[for] identifier[parent_variable] keyword[in] identifier[inputs] :
identifier[raw_name] = identifier[parent_variable] . identifier[raw_name]
identifier[child_variable] = identifier[scope] . identifier[variables] [ identifier[scope] . identifier[variable_name_mapping] [ identifier[raw_name] ][ literal[int] ]]
identifier[operator] = identifier[scope] . identifier[declare_local_operator] ( literal[string] )
identifier[operator] . identifier[inputs] . identifier[append] ( identifier[parent_variable] )
identifier[operator] . identifier[outputs] . identifier[append] ( identifier[child_variable] )
keyword[for] identifier[var] keyword[in] identifier[model] . identifier[description] . identifier[output] :
identifier[variable] = identifier[scope] . identifier[declare_local_variable] (
identifier[var] . identifier[name] , identifier[_parse_coreml_feature] ( identifier[var] , identifier[topology] . identifier[target_opset] , identifier[topology] . identifier[default_batch_size] ))
identifier[this_operator] . identifier[outputs] . identifier[append] ( identifier[variable] )
keyword[for] identifier[parent_variable] keyword[in] identifier[outputs] :
identifier[raw_name] = identifier[parent_variable] . identifier[raw_name]
identifier[child_variable] = identifier[scope] . identifier[variables] [ identifier[scope] . identifier[variable_name_mapping] [ identifier[raw_name] ][- literal[int] ]]
identifier[operator] = identifier[scope] . identifier[declare_local_operator] ( literal[string] )
identifier[operator] . identifier[inputs] . identifier[append] ( identifier[child_variable] )
identifier[operator] . identifier[outputs] . identifier[append] ( identifier[parent_variable] ) | def _parse_simple_model(topology, parent_scope, model, inputs, outputs):
"""
Parse a model containing only one operator (aka simple model).
Steps:
1. Create local scope for allocating local variables and operators
2. Create operator and then feed the model's inputs and outputs to the operator
3. Connect local variables and their corresponding parent variables
Note:
1. Notice that a CoreML operator can contain no input and output, so we directly use model's inputs (outputs).
2. Input and output names can be identical in CoreML, but they must be different for ONNX.
"""
# Create local scope for the considered model
scope = topology.declare_scope('single', [parent_scope] + parent_scope.parent_scopes)
# Create operator for the considered model
this_operator = scope.declare_local_operator(model.WhichOneof('Type'), model)
# Allocate inputs for the operator and then connect them with inputs from outside
for var in model.description.input:
# We assume that no duplicated raw name exists. Note that we set prepend=True because model inputs should
# not hide any intermediate variables.
variable = scope.declare_local_variable(var.name, _parse_coreml_feature(var, topology.target_opset, topology.default_batch_size), prepend=True)
this_operator.inputs.append(variable) # depends on [control=['for'], data=['var']]
# Connect local variables and variables passed into this scope. Our assumptions are described below.
# 1. Assume a variable with 'A' as its CoreML name is passed in. There must be at least one local variable gets a
# raw name 'A'. That is, for each parent variable, at least one local duplicate is available.
# 2. It's possible to find multiple local variables associated with the same raw name. For example, raw name 'A' can
# be associated with 'A' and 'A1' in ONNX. In this case, we connect the first one to parent input.
for parent_variable in inputs:
raw_name = parent_variable.raw_name
child_variable = scope.variables[scope.variable_name_mapping[raw_name][0]]
operator = scope.declare_local_operator('identity')
operator.inputs.append(parent_variable)
operator.outputs.append(child_variable) # depends on [control=['for'], data=['parent_variable']]
# Allocate outputs for the operator and then connect them with outputs from outside
for var in model.description.output:
# We assume that no duplicated output raw name exists.
variable = scope.declare_local_variable(var.name, _parse_coreml_feature(var, topology.target_opset, topology.default_batch_size))
this_operator.outputs.append(variable) # depends on [control=['for'], data=['var']]
# Connect local variables and variables passed into this scope. Our assumptions are described below.
# 1. Assume a variable with 'A' as its CoreML name is passed in. There must be at least one local variable gets a
# raw name 'A'. That is, for each parent variable, at least one local duplicate is available.
# 2. It's possible to find multiple local variables associated with the same raw name. For example, raw name 'A' can
# be associated with 'A' and 'A1' in ONNX. In this case, we connect the last one to parent output.
for parent_variable in outputs:
raw_name = parent_variable.raw_name
child_variable = scope.variables[scope.variable_name_mapping[raw_name][-1]]
operator = scope.declare_local_operator('identity')
operator.inputs.append(child_variable)
operator.outputs.append(parent_variable) # depends on [control=['for'], data=['parent_variable']] |
def get_member_brief(self, member_id=0):
''' a method to retrieve member profile info
:param member_id: [optional] integer with member id from member profile
:return: dictionary with member profile inside [json] key
member_profile = self.objects.profile_brief.schema
'''
# https://www.meetup.com/meetup_api/docs/members/:member_id/#get
title = '%s.get_member_brief' % self.__class__.__name__
# validate inputs
input_fields = {
'member_id': member_id
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = '%s/members/' % self.endpoint
params = {
'member_id': 'self'
}
if member_id:
params['member_id'] = member_id
# send request
response_details = self._get_request(url, params=params)
# construct method output dictionary
profile_details = {
'json': {}
}
for key, value in response_details.items():
if not key == 'json':
profile_details[key] = value
# parse response
if response_details['json']:
if 'results' in response_details['json'].keys():
if response_details['json']['results']:
details = response_details['json']['results'][0]
for key, value in details.items():
if key != 'topics':
profile_details['json'][key] = value
profile_details['json'] = self.objects.profile_brief.ingest(**profile_details['json'])
return profile_details | def function[get_member_brief, parameter[self, member_id]]:
constant[ a method to retrieve member profile info
:param member_id: [optional] integer with member id from member profile
:return: dictionary with member profile inside [json] key
member_profile = self.objects.profile_brief.schema
]
variable[title] assign[=] binary_operation[constant[%s.get_member_brief] <ast.Mod object at 0x7da2590d6920> name[self].__class__.__name__]
variable[input_fields] assign[=] dictionary[[<ast.Constant object at 0x7da1b15956c0>], [<ast.Name object at 0x7da1b15946d0>]]
for taget[tuple[[<ast.Name object at 0x7da1b15958a0>, <ast.Name object at 0x7da1b1597fa0>]]] in starred[call[name[input_fields].items, parameter[]]] begin[:]
if name[value] begin[:]
variable[object_title] assign[=] binary_operation[constant[%s(%s=%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1596b90>, <ast.Name object at 0x7da1b1597970>, <ast.Call object at 0x7da1b15949d0>]]]
call[name[self].fields.validate, parameter[name[value], binary_operation[constant[.%s] <ast.Mod object at 0x7da2590d6920> name[key]], name[object_title]]]
variable[url] assign[=] binary_operation[constant[%s/members/] <ast.Mod object at 0x7da2590d6920> name[self].endpoint]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b1594730>], [<ast.Constant object at 0x7da1b15943d0>]]
if name[member_id] begin[:]
call[name[params]][constant[member_id]] assign[=] name[member_id]
variable[response_details] assign[=] call[name[self]._get_request, parameter[name[url]]]
variable[profile_details] assign[=] dictionary[[<ast.Constant object at 0x7da1b1595870>], [<ast.Dict object at 0x7da1b15947f0>]]
for taget[tuple[[<ast.Name object at 0x7da1b1597cd0>, <ast.Name object at 0x7da1b15953c0>]]] in starred[call[name[response_details].items, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1595e40> begin[:]
call[name[profile_details]][name[key]] assign[=] name[value]
if call[name[response_details]][constant[json]] begin[:]
if compare[constant[results] in call[call[name[response_details]][constant[json]].keys, parameter[]]] begin[:]
if call[call[name[response_details]][constant[json]]][constant[results]] begin[:]
variable[details] assign[=] call[call[call[name[response_details]][constant[json]]][constant[results]]][constant[0]]
for taget[tuple[[<ast.Name object at 0x7da1b1595e70>, <ast.Name object at 0x7da1b1595420>]]] in starred[call[name[details].items, parameter[]]] begin[:]
if compare[name[key] not_equal[!=] constant[topics]] begin[:]
call[call[name[profile_details]][constant[json]]][name[key]] assign[=] name[value]
call[name[profile_details]][constant[json]] assign[=] call[name[self].objects.profile_brief.ingest, parameter[]]
return[name[profile_details]] | keyword[def] identifier[get_member_brief] ( identifier[self] , identifier[member_id] = literal[int] ):
literal[string]
identifier[title] = literal[string] % identifier[self] . identifier[__class__] . identifier[__name__]
identifier[input_fields] ={
literal[string] : identifier[member_id]
}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[input_fields] . identifier[items] ():
keyword[if] identifier[value] :
identifier[object_title] = literal[string] %( identifier[title] , identifier[key] , identifier[str] ( identifier[value] ))
identifier[self] . identifier[fields] . identifier[validate] ( identifier[value] , literal[string] % identifier[key] , identifier[object_title] )
identifier[url] = literal[string] % identifier[self] . identifier[endpoint]
identifier[params] ={
literal[string] : literal[string]
}
keyword[if] identifier[member_id] :
identifier[params] [ literal[string] ]= identifier[member_id]
identifier[response_details] = identifier[self] . identifier[_get_request] ( identifier[url] , identifier[params] = identifier[params] )
identifier[profile_details] ={
literal[string] :{}
}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[response_details] . identifier[items] ():
keyword[if] keyword[not] identifier[key] == literal[string] :
identifier[profile_details] [ identifier[key] ]= identifier[value]
keyword[if] identifier[response_details] [ literal[string] ]:
keyword[if] literal[string] keyword[in] identifier[response_details] [ literal[string] ]. identifier[keys] ():
keyword[if] identifier[response_details] [ literal[string] ][ literal[string] ]:
identifier[details] = identifier[response_details] [ literal[string] ][ literal[string] ][ literal[int] ]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[details] . identifier[items] ():
keyword[if] identifier[key] != literal[string] :
identifier[profile_details] [ literal[string] ][ identifier[key] ]= identifier[value]
identifier[profile_details] [ literal[string] ]= identifier[self] . identifier[objects] . identifier[profile_brief] . identifier[ingest] (** identifier[profile_details] [ literal[string] ])
keyword[return] identifier[profile_details] | def get_member_brief(self, member_id=0):
""" a method to retrieve member profile info
:param member_id: [optional] integer with member id from member profile
:return: dictionary with member profile inside [json] key
member_profile = self.objects.profile_brief.schema
""" # https://www.meetup.com/meetup_api/docs/members/:member_id/#get
title = '%s.get_member_brief' % self.__class__.__name__ # validate inputs
input_fields = {'member_id': member_id}
for (key, value) in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # construct request fields
url = '%s/members/' % self.endpoint
params = {'member_id': 'self'}
if member_id:
params['member_id'] = member_id # depends on [control=['if'], data=[]] # send request
response_details = self._get_request(url, params=params) # construct method output dictionary
profile_details = {'json': {}}
for (key, value) in response_details.items():
if not key == 'json':
profile_details[key] = value # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # parse response
if response_details['json']:
if 'results' in response_details['json'].keys():
if response_details['json']['results']:
details = response_details['json']['results'][0]
for (key, value) in details.items():
if key != 'topics':
profile_details['json'][key] = value # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
profile_details['json'] = self.objects.profile_brief.ingest(**profile_details['json'])
return profile_details |
def _isstring(dtype):
"""Given a numpy dtype, determines whether it is a string. Returns True
if the dtype is string or unicode.
"""
return dtype.type == numpy.unicode_ or dtype.type == numpy.string_ | def function[_isstring, parameter[dtype]]:
constant[Given a numpy dtype, determines whether it is a string. Returns True
if the dtype is string or unicode.
]
return[<ast.BoolOp object at 0x7da20e9b2950>] | keyword[def] identifier[_isstring] ( identifier[dtype] ):
literal[string]
keyword[return] identifier[dtype] . identifier[type] == identifier[numpy] . identifier[unicode_] keyword[or] identifier[dtype] . identifier[type] == identifier[numpy] . identifier[string_] | def _isstring(dtype):
"""Given a numpy dtype, determines whether it is a string. Returns True
if the dtype is string or unicode.
"""
return dtype.type == numpy.unicode_ or dtype.type == numpy.string_ |
def header(self, definitions, parent, root):
""" add the input/output header properties """
if root is None:
return
header = Facade('Header')
parent.headers.append(header)
header.use = root.get('use', default='literal')
ns = root.get('namespace')
if ns is None:
header.namespace = definitions.tns
else:
prefix = root.findPrefix(ns, 'h0')
header.namespace = (prefix, ns)
msg = root.get('message')
if msg is not None:
header.message = msg
part = root.get('part')
if part is not None:
header.part = part | def function[header, parameter[self, definitions, parent, root]]:
constant[ add the input/output header properties ]
if compare[name[root] is constant[None]] begin[:]
return[None]
variable[header] assign[=] call[name[Facade], parameter[constant[Header]]]
call[name[parent].headers.append, parameter[name[header]]]
name[header].use assign[=] call[name[root].get, parameter[constant[use]]]
variable[ns] assign[=] call[name[root].get, parameter[constant[namespace]]]
if compare[name[ns] is constant[None]] begin[:]
name[header].namespace assign[=] name[definitions].tns
variable[msg] assign[=] call[name[root].get, parameter[constant[message]]]
if compare[name[msg] is_not constant[None]] begin[:]
name[header].message assign[=] name[msg]
variable[part] assign[=] call[name[root].get, parameter[constant[part]]]
if compare[name[part] is_not constant[None]] begin[:]
name[header].part assign[=] name[part] | keyword[def] identifier[header] ( identifier[self] , identifier[definitions] , identifier[parent] , identifier[root] ):
literal[string]
keyword[if] identifier[root] keyword[is] keyword[None] :
keyword[return]
identifier[header] = identifier[Facade] ( literal[string] )
identifier[parent] . identifier[headers] . identifier[append] ( identifier[header] )
identifier[header] . identifier[use] = identifier[root] . identifier[get] ( literal[string] , identifier[default] = literal[string] )
identifier[ns] = identifier[root] . identifier[get] ( literal[string] )
keyword[if] identifier[ns] keyword[is] keyword[None] :
identifier[header] . identifier[namespace] = identifier[definitions] . identifier[tns]
keyword[else] :
identifier[prefix] = identifier[root] . identifier[findPrefix] ( identifier[ns] , literal[string] )
identifier[header] . identifier[namespace] =( identifier[prefix] , identifier[ns] )
identifier[msg] = identifier[root] . identifier[get] ( literal[string] )
keyword[if] identifier[msg] keyword[is] keyword[not] keyword[None] :
identifier[header] . identifier[message] = identifier[msg]
identifier[part] = identifier[root] . identifier[get] ( literal[string] )
keyword[if] identifier[part] keyword[is] keyword[not] keyword[None] :
identifier[header] . identifier[part] = identifier[part] | def header(self, definitions, parent, root):
""" add the input/output header properties """
if root is None:
return # depends on [control=['if'], data=[]]
header = Facade('Header')
parent.headers.append(header)
header.use = root.get('use', default='literal')
ns = root.get('namespace')
if ns is None:
header.namespace = definitions.tns # depends on [control=['if'], data=[]]
else:
prefix = root.findPrefix(ns, 'h0')
header.namespace = (prefix, ns)
msg = root.get('message')
if msg is not None:
header.message = msg # depends on [control=['if'], data=['msg']]
part = root.get('part')
if part is not None:
header.part = part # depends on [control=['if'], data=['part']] |
def rename_library(self, from_lib, to_lib):
"""
Renames a library
Parameters
----------
from_lib: str
The name of the library to be renamed
to_lib: str
The new name of the library
"""
to_colname = to_lib
if '.' in from_lib and '.' in to_lib:
if from_lib.split('.')[0] != to_lib.split('.')[0]:
raise ValueError("Collection can only be renamed in the same database")
to_colname = to_lib.split('.')[1]
lib = ArcticLibraryBinding(self, from_lib)
colname = lib.get_top_level_collection().name
logger.info('Renaming collection: %s' % colname)
lib._db[colname].rename(to_colname)
for coll in lib._db.list_collection_names():
if coll.startswith(colname + '.'):
lib._db[coll].rename(coll.replace(colname, to_colname))
if from_lib in self._library_cache:
del self._library_cache[from_lib]
del self._library_cache[lib.get_name()]
self._cache.update_item_for_key(
'list_libraries', self._sanitize_lib_name(from_lib), self._sanitize_lib_name(to_lib)) | def function[rename_library, parameter[self, from_lib, to_lib]]:
constant[
Renames a library
Parameters
----------
from_lib: str
The name of the library to be renamed
to_lib: str
The new name of the library
]
variable[to_colname] assign[=] name[to_lib]
if <ast.BoolOp object at 0x7da1b2347910> begin[:]
if compare[call[call[name[from_lib].split, parameter[constant[.]]]][constant[0]] not_equal[!=] call[call[name[to_lib].split, parameter[constant[.]]]][constant[0]]] begin[:]
<ast.Raise object at 0x7da1b2346740>
variable[to_colname] assign[=] call[call[name[to_lib].split, parameter[constant[.]]]][constant[1]]
variable[lib] assign[=] call[name[ArcticLibraryBinding], parameter[name[self], name[from_lib]]]
variable[colname] assign[=] call[name[lib].get_top_level_collection, parameter[]].name
call[name[logger].info, parameter[binary_operation[constant[Renaming collection: %s] <ast.Mod object at 0x7da2590d6920> name[colname]]]]
call[call[name[lib]._db][name[colname]].rename, parameter[name[to_colname]]]
for taget[name[coll]] in starred[call[name[lib]._db.list_collection_names, parameter[]]] begin[:]
if call[name[coll].startswith, parameter[binary_operation[name[colname] + constant[.]]]] begin[:]
call[call[name[lib]._db][name[coll]].rename, parameter[call[name[coll].replace, parameter[name[colname], name[to_colname]]]]]
if compare[name[from_lib] in name[self]._library_cache] begin[:]
<ast.Delete object at 0x7da18fe908e0>
<ast.Delete object at 0x7da18fe91480>
call[name[self]._cache.update_item_for_key, parameter[constant[list_libraries], call[name[self]._sanitize_lib_name, parameter[name[from_lib]]], call[name[self]._sanitize_lib_name, parameter[name[to_lib]]]]] | keyword[def] identifier[rename_library] ( identifier[self] , identifier[from_lib] , identifier[to_lib] ):
literal[string]
identifier[to_colname] = identifier[to_lib]
keyword[if] literal[string] keyword[in] identifier[from_lib] keyword[and] literal[string] keyword[in] identifier[to_lib] :
keyword[if] identifier[from_lib] . identifier[split] ( literal[string] )[ literal[int] ]!= identifier[to_lib] . identifier[split] ( literal[string] )[ literal[int] ]:
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[to_colname] = identifier[to_lib] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[lib] = identifier[ArcticLibraryBinding] ( identifier[self] , identifier[from_lib] )
identifier[colname] = identifier[lib] . identifier[get_top_level_collection] (). identifier[name]
identifier[logger] . identifier[info] ( literal[string] % identifier[colname] )
identifier[lib] . identifier[_db] [ identifier[colname] ]. identifier[rename] ( identifier[to_colname] )
keyword[for] identifier[coll] keyword[in] identifier[lib] . identifier[_db] . identifier[list_collection_names] ():
keyword[if] identifier[coll] . identifier[startswith] ( identifier[colname] + literal[string] ):
identifier[lib] . identifier[_db] [ identifier[coll] ]. identifier[rename] ( identifier[coll] . identifier[replace] ( identifier[colname] , identifier[to_colname] ))
keyword[if] identifier[from_lib] keyword[in] identifier[self] . identifier[_library_cache] :
keyword[del] identifier[self] . identifier[_library_cache] [ identifier[from_lib] ]
keyword[del] identifier[self] . identifier[_library_cache] [ identifier[lib] . identifier[get_name] ()]
identifier[self] . identifier[_cache] . identifier[update_item_for_key] (
literal[string] , identifier[self] . identifier[_sanitize_lib_name] ( identifier[from_lib] ), identifier[self] . identifier[_sanitize_lib_name] ( identifier[to_lib] )) | def rename_library(self, from_lib, to_lib):
"""
Renames a library
Parameters
----------
from_lib: str
The name of the library to be renamed
to_lib: str
The new name of the library
"""
to_colname = to_lib
if '.' in from_lib and '.' in to_lib:
if from_lib.split('.')[0] != to_lib.split('.')[0]:
raise ValueError('Collection can only be renamed in the same database') # depends on [control=['if'], data=[]]
to_colname = to_lib.split('.')[1] # depends on [control=['if'], data=[]]
lib = ArcticLibraryBinding(self, from_lib)
colname = lib.get_top_level_collection().name
logger.info('Renaming collection: %s' % colname)
lib._db[colname].rename(to_colname)
for coll in lib._db.list_collection_names():
if coll.startswith(colname + '.'):
lib._db[coll].rename(coll.replace(colname, to_colname)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['coll']]
if from_lib in self._library_cache:
del self._library_cache[from_lib]
del self._library_cache[lib.get_name()] # depends on [control=['if'], data=['from_lib']]
self._cache.update_item_for_key('list_libraries', self._sanitize_lib_name(from_lib), self._sanitize_lib_name(to_lib)) |
def FormatTypeSummaryTable(self, level_name, name_to_problist):
"""Return an HTML table listing the number of problems by class name.
Args:
level_name: string such as "Error" or "Warning"
name_to_problist: dict mapping class name to an BoundedProblemList object
Returns:
HTML in a string
"""
output = []
output.append('<table>')
for classname in sorted(name_to_problist.keys()):
problist = name_to_problist[classname]
human_name = MaybePluralizeWord(problist.count, UnCamelCase(classname))
output.append('<tr><td>%d</td><td><a href="#%s%s">%s</a></td></tr>\n' %
(problist.count, level_name, classname, human_name))
output.append('</table>\n')
return ''.join(output) | def function[FormatTypeSummaryTable, parameter[self, level_name, name_to_problist]]:
constant[Return an HTML table listing the number of problems by class name.
Args:
level_name: string such as "Error" or "Warning"
name_to_problist: dict mapping class name to an BoundedProblemList object
Returns:
HTML in a string
]
variable[output] assign[=] list[[]]
call[name[output].append, parameter[constant[<table>]]]
for taget[name[classname]] in starred[call[name[sorted], parameter[call[name[name_to_problist].keys, parameter[]]]]] begin[:]
variable[problist] assign[=] call[name[name_to_problist]][name[classname]]
variable[human_name] assign[=] call[name[MaybePluralizeWord], parameter[name[problist].count, call[name[UnCamelCase], parameter[name[classname]]]]]
call[name[output].append, parameter[binary_operation[constant[<tr><td>%d</td><td><a href="#%s%s">%s</a></td></tr>
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b2347400>, <ast.Name object at 0x7da1b23453f0>, <ast.Name object at 0x7da1b23457e0>, <ast.Name object at 0x7da1b2345c30>]]]]]
call[name[output].append, parameter[constant[</table>
]]]
return[call[constant[].join, parameter[name[output]]]] | keyword[def] identifier[FormatTypeSummaryTable] ( identifier[self] , identifier[level_name] , identifier[name_to_problist] ):
literal[string]
identifier[output] =[]
identifier[output] . identifier[append] ( literal[string] )
keyword[for] identifier[classname] keyword[in] identifier[sorted] ( identifier[name_to_problist] . identifier[keys] ()):
identifier[problist] = identifier[name_to_problist] [ identifier[classname] ]
identifier[human_name] = identifier[MaybePluralizeWord] ( identifier[problist] . identifier[count] , identifier[UnCamelCase] ( identifier[classname] ))
identifier[output] . identifier[append] ( literal[string] %
( identifier[problist] . identifier[count] , identifier[level_name] , identifier[classname] , identifier[human_name] ))
identifier[output] . identifier[append] ( literal[string] )
keyword[return] literal[string] . identifier[join] ( identifier[output] ) | def FormatTypeSummaryTable(self, level_name, name_to_problist):
"""Return an HTML table listing the number of problems by class name.
Args:
level_name: string such as "Error" or "Warning"
name_to_problist: dict mapping class name to an BoundedProblemList object
Returns:
HTML in a string
"""
output = []
output.append('<table>')
for classname in sorted(name_to_problist.keys()):
problist = name_to_problist[classname]
human_name = MaybePluralizeWord(problist.count, UnCamelCase(classname))
output.append('<tr><td>%d</td><td><a href="#%s%s">%s</a></td></tr>\n' % (problist.count, level_name, classname, human_name)) # depends on [control=['for'], data=['classname']]
output.append('</table>\n')
return ''.join(output) |
def count(self, strand, pseudo=False):
'''Enumerates the total number of secondary structures over the
structural ensemble Ω(π). Runs the \'count\' command.
:param strand: Strand on which to run count. Strands must be either
coral.DNA or coral.RNA).
:type strand: list
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:returns: The count of the number of structures in the structural
ensemble.
:rtype: int
'''
# Set up command flags
if pseudo:
cmd_args = ['-pseudo']
else:
cmd_args = []
# Set up the input file and run the command
stdout = self._run('count', cmd_args, [str(strand)]).split('\n')
# Return the count
return int(float(stdout[-2])) | def function[count, parameter[self, strand, pseudo]]:
constant[Enumerates the total number of secondary structures over the
structural ensemble Ω(π). Runs the 'count' command.
:param strand: Strand on which to run count. Strands must be either
coral.DNA or coral.RNA).
:type strand: list
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:returns: The count of the number of structures in the structural
ensemble.
:rtype: int
]
if name[pseudo] begin[:]
variable[cmd_args] assign[=] list[[<ast.Constant object at 0x7da1b2345390>]]
variable[stdout] assign[=] call[call[name[self]._run, parameter[constant[count], name[cmd_args], list[[<ast.Call object at 0x7da1b2345b70>]]]].split, parameter[constant[
]]]
return[call[name[int], parameter[call[name[float], parameter[call[name[stdout]][<ast.UnaryOp object at 0x7da1b23479d0>]]]]]] | keyword[def] identifier[count] ( identifier[self] , identifier[strand] , identifier[pseudo] = keyword[False] ):
literal[string]
keyword[if] identifier[pseudo] :
identifier[cmd_args] =[ literal[string] ]
keyword[else] :
identifier[cmd_args] =[]
identifier[stdout] = identifier[self] . identifier[_run] ( literal[string] , identifier[cmd_args] ,[ identifier[str] ( identifier[strand] )]). identifier[split] ( literal[string] )
keyword[return] identifier[int] ( identifier[float] ( identifier[stdout] [- literal[int] ])) | def count(self, strand, pseudo=False):
"""Enumerates the total number of secondary structures over the
structural ensemble Ω(π). Runs the 'count' command.
:param strand: Strand on which to run count. Strands must be either
coral.DNA or coral.RNA).
:type strand: list
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:returns: The count of the number of structures in the structural
ensemble.
:rtype: int
"""
# Set up command flags
if pseudo:
cmd_args = ['-pseudo'] # depends on [control=['if'], data=[]]
else:
cmd_args = []
# Set up the input file and run the command
stdout = self._run('count', cmd_args, [str(strand)]).split('\n')
# Return the count
return int(float(stdout[-2])) |
def servers(self):
"""gets all the server resources"""
self.__init()
items = []
for k,v in self._json_dict.items():
if k == "servers":
for s in v:
if 'id' in s:
url = "%s/%s" % (self.root, s['id'])
items.append(
self.Server(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port))
del k,v
return items | def function[servers, parameter[self]]:
constant[gets all the server resources]
call[name[self].__init, parameter[]]
variable[items] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18f09e710>, <ast.Name object at 0x7da18f09f670>]]] in starred[call[name[self]._json_dict.items, parameter[]]] begin[:]
if compare[name[k] equal[==] constant[servers]] begin[:]
for taget[name[s]] in starred[name[v]] begin[:]
if compare[constant[id] in name[s]] begin[:]
variable[url] assign[=] binary_operation[constant[%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18f09d180>, <ast.Subscript object at 0x7da18f09d1e0>]]]
call[name[items].append, parameter[call[name[self].Server, parameter[]]]]
<ast.Delete object at 0x7da1b1249690>
return[name[items]] | keyword[def] identifier[servers] ( identifier[self] ):
literal[string]
identifier[self] . identifier[__init] ()
identifier[items] =[]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[_json_dict] . identifier[items] ():
keyword[if] identifier[k] == literal[string] :
keyword[for] identifier[s] keyword[in] identifier[v] :
keyword[if] literal[string] keyword[in] identifier[s] :
identifier[url] = literal[string] %( identifier[self] . identifier[root] , identifier[s] [ literal[string] ])
identifier[items] . identifier[append] (
identifier[self] . identifier[Server] ( identifier[url] = identifier[url] ,
identifier[securityHandler] = identifier[self] . identifier[_securityHandler] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] ))
keyword[del] identifier[k] , identifier[v]
keyword[return] identifier[items] | def servers(self):
"""gets all the server resources"""
self.__init()
items = []
for (k, v) in self._json_dict.items():
if k == 'servers':
for s in v:
if 'id' in s:
url = '%s/%s' % (self.root, s['id'])
items.append(self.Server(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)) # depends on [control=['if'], data=['s']] # depends on [control=['for'], data=['s']] # depends on [control=['if'], data=[]]
del k, v # depends on [control=['for'], data=[]]
return items |
def cat(src_filename, dst_file):
"""Copies the contents of the indicated file to an already opened file."""
(dev, dev_filename) = get_dev_and_path(src_filename)
if dev is None:
with open(dev_filename, 'rb') as txtfile:
for line in txtfile:
dst_file.write(line)
else:
filesize = dev.remote_eval(get_filesize, dev_filename)
return dev.remote(send_file_to_host, dev_filename, dst_file, filesize,
xfer_func=recv_file_from_remote) | def function[cat, parameter[src_filename, dst_file]]:
constant[Copies the contents of the indicated file to an already opened file.]
<ast.Tuple object at 0x7da2047e8760> assign[=] call[name[get_dev_and_path], parameter[name[src_filename]]]
if compare[name[dev] is constant[None]] begin[:]
with call[name[open], parameter[name[dev_filename], constant[rb]]] begin[:]
for taget[name[line]] in starred[name[txtfile]] begin[:]
call[name[dst_file].write, parameter[name[line]]] | keyword[def] identifier[cat] ( identifier[src_filename] , identifier[dst_file] ):
literal[string]
( identifier[dev] , identifier[dev_filename] )= identifier[get_dev_and_path] ( identifier[src_filename] )
keyword[if] identifier[dev] keyword[is] keyword[None] :
keyword[with] identifier[open] ( identifier[dev_filename] , literal[string] ) keyword[as] identifier[txtfile] :
keyword[for] identifier[line] keyword[in] identifier[txtfile] :
identifier[dst_file] . identifier[write] ( identifier[line] )
keyword[else] :
identifier[filesize] = identifier[dev] . identifier[remote_eval] ( identifier[get_filesize] , identifier[dev_filename] )
keyword[return] identifier[dev] . identifier[remote] ( identifier[send_file_to_host] , identifier[dev_filename] , identifier[dst_file] , identifier[filesize] ,
identifier[xfer_func] = identifier[recv_file_from_remote] ) | def cat(src_filename, dst_file):
"""Copies the contents of the indicated file to an already opened file."""
(dev, dev_filename) = get_dev_and_path(src_filename)
if dev is None:
with open(dev_filename, 'rb') as txtfile:
for line in txtfile:
dst_file.write(line) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['txtfile']] # depends on [control=['if'], data=[]]
else:
filesize = dev.remote_eval(get_filesize, dev_filename)
return dev.remote(send_file_to_host, dev_filename, dst_file, filesize, xfer_func=recv_file_from_remote) |
def _print(self, text, color=None, **kwargs):
"""print text with given color to terminal
"""
COLORS = {
'red': '\033[91m{}\033[00m',
'green': '\033[92m{}\033[00m',
'yellow': '\033[93m{}\033[00m',
'cyan': '\033[96m{}\033[00m'
}
_ = COLORS[color]
six.print_(_.format(text), **kwargs) | def function[_print, parameter[self, text, color]]:
constant[print text with given color to terminal
]
variable[COLORS] assign[=] dictionary[[<ast.Constant object at 0x7da20c6aa2c0>, <ast.Constant object at 0x7da20c6a94b0>, <ast.Constant object at 0x7da20c6aa0e0>, <ast.Constant object at 0x7da20c6aba30>], [<ast.Constant object at 0x7da20c6ab310>, <ast.Constant object at 0x7da20c6a9cc0>, <ast.Constant object at 0x7da20c6aaf80>, <ast.Constant object at 0x7da20c6a8f40>]]
variable[_] assign[=] call[name[COLORS]][name[color]]
call[name[six].print_, parameter[call[name[_].format, parameter[name[text]]]]] | keyword[def] identifier[_print] ( identifier[self] , identifier[text] , identifier[color] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[COLORS] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[_] = identifier[COLORS] [ identifier[color] ]
identifier[six] . identifier[print_] ( identifier[_] . identifier[format] ( identifier[text] ),** identifier[kwargs] ) | def _print(self, text, color=None, **kwargs):
"""print text with given color to terminal
"""
COLORS = {'red': '\x1b[91m{}\x1b[00m', 'green': '\x1b[92m{}\x1b[00m', 'yellow': '\x1b[93m{}\x1b[00m', 'cyan': '\x1b[96m{}\x1b[00m'}
_ = COLORS[color]
six.print_(_.format(text), **kwargs) |
def instruction_LSR_register(self, opcode, register):
""" Logical shift right accumulator """
a = register.value
r = self.LSR(a)
# log.debug("$%x LSR %s value $%x >> 1 = $%x" % (
# self.program_counter,
# register.name, a, r
# ))
register.set(r) | def function[instruction_LSR_register, parameter[self, opcode, register]]:
constant[ Logical shift right accumulator ]
variable[a] assign[=] name[register].value
variable[r] assign[=] call[name[self].LSR, parameter[name[a]]]
call[name[register].set, parameter[name[r]]] | keyword[def] identifier[instruction_LSR_register] ( identifier[self] , identifier[opcode] , identifier[register] ):
literal[string]
identifier[a] = identifier[register] . identifier[value]
identifier[r] = identifier[self] . identifier[LSR] ( identifier[a] )
identifier[register] . identifier[set] ( identifier[r] ) | def instruction_LSR_register(self, opcode, register):
""" Logical shift right accumulator """
a = register.value
r = self.LSR(a)
# log.debug("$%x LSR %s value $%x >> 1 = $%x" % (
# self.program_counter,
# register.name, a, r
# ))
register.set(r) |
def set_notification_callback(self, notification_cb):
"""Set the notifier we'll call when measurements are set."""
self._notification_cb = notification_cb
if not notification_cb and self.dimensions:
self.measured_value.notify_value_set = None
return self | def function[set_notification_callback, parameter[self, notification_cb]]:
constant[Set the notifier we'll call when measurements are set.]
name[self]._notification_cb assign[=] name[notification_cb]
if <ast.BoolOp object at 0x7da1b18a90c0> begin[:]
name[self].measured_value.notify_value_set assign[=] constant[None]
return[name[self]] | keyword[def] identifier[set_notification_callback] ( identifier[self] , identifier[notification_cb] ):
literal[string]
identifier[self] . identifier[_notification_cb] = identifier[notification_cb]
keyword[if] keyword[not] identifier[notification_cb] keyword[and] identifier[self] . identifier[dimensions] :
identifier[self] . identifier[measured_value] . identifier[notify_value_set] = keyword[None]
keyword[return] identifier[self] | def set_notification_callback(self, notification_cb):
"""Set the notifier we'll call when measurements are set."""
self._notification_cb = notification_cb
if not notification_cb and self.dimensions:
self.measured_value.notify_value_set = None # depends on [control=['if'], data=[]]
return self |
def get_slots(self):
"""Return the current used analyses positions
"""
positions = map(
lambda uid: self.get_item_slot(uid), self.get_analyses_uids())
return map(lambda pos: str(pos), sorted(set(positions))) | def function[get_slots, parameter[self]]:
constant[Return the current used analyses positions
]
variable[positions] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da18eb54850>, call[name[self].get_analyses_uids, parameter[]]]]
return[call[name[map], parameter[<ast.Lambda object at 0x7da18eb57250>, call[name[sorted], parameter[call[name[set], parameter[name[positions]]]]]]]] | keyword[def] identifier[get_slots] ( identifier[self] ):
literal[string]
identifier[positions] = identifier[map] (
keyword[lambda] identifier[uid] : identifier[self] . identifier[get_item_slot] ( identifier[uid] ), identifier[self] . identifier[get_analyses_uids] ())
keyword[return] identifier[map] ( keyword[lambda] identifier[pos] : identifier[str] ( identifier[pos] ), identifier[sorted] ( identifier[set] ( identifier[positions] ))) | def get_slots(self):
"""Return the current used analyses positions
"""
positions = map(lambda uid: self.get_item_slot(uid), self.get_analyses_uids())
return map(lambda pos: str(pos), sorted(set(positions))) |
def init_app(self, app):
"""Initializes the Flask application with Common."""
if not hasattr(app, 'extensions'):
app.extensions = {}
if 'common' in app.extensions:
raise RuntimeError("Flask-Common extension already initialized")
app.extensions['common'] = self
self.app = app
if 'COMMON_FILESERVER_DISABLED' not in app.config:
with app.test_request_context():
# Configure WhiteNoise.
app.wsgi_app = WhiteNoise(app.wsgi_app, root=url_for('static', filename='')[1:])
self.cache = Cache(app, config={'CACHE_TYPE': app.config.get("COMMON_CACHE_TYPE", 'simple')})
@app.before_request
def before_request_callback():
request.start_time = maya.now()
@app.after_request
def after_request_callback(response):
if 'COMMON_POWERED_BY_DISABLED' not in current_app.config:
response.headers['X-Powered-By'] = 'Flask'
if 'COMMON_PROCESSED_TIME_DISABLED' not in current_app.config:
response.headers['X-Processed-Time'] = maya.now().epoch - request.start_time.epoch
return response
@app.route('/favicon.ico')
def favicon():
return redirect(url_for('static', filename='favicon.ico'), code=301) | def function[init_app, parameter[self, app]]:
constant[Initializes the Flask application with Common.]
if <ast.UnaryOp object at 0x7da18c4cc700> begin[:]
name[app].extensions assign[=] dictionary[[], []]
if compare[constant[common] in name[app].extensions] begin[:]
<ast.Raise object at 0x7da18c4cea40>
call[name[app].extensions][constant[common]] assign[=] name[self]
name[self].app assign[=] name[app]
if compare[constant[COMMON_FILESERVER_DISABLED] <ast.NotIn object at 0x7da2590d7190> name[app].config] begin[:]
with call[name[app].test_request_context, parameter[]] begin[:]
name[app].wsgi_app assign[=] call[name[WhiteNoise], parameter[name[app].wsgi_app]]
name[self].cache assign[=] call[name[Cache], parameter[name[app]]]
def function[before_request_callback, parameter[]]:
name[request].start_time assign[=] call[name[maya].now, parameter[]]
def function[after_request_callback, parameter[response]]:
if compare[constant[COMMON_POWERED_BY_DISABLED] <ast.NotIn object at 0x7da2590d7190> name[current_app].config] begin[:]
call[name[response].headers][constant[X-Powered-By]] assign[=] constant[Flask]
if compare[constant[COMMON_PROCESSED_TIME_DISABLED] <ast.NotIn object at 0x7da2590d7190> name[current_app].config] begin[:]
call[name[response].headers][constant[X-Processed-Time]] assign[=] binary_operation[call[name[maya].now, parameter[]].epoch - name[request].start_time.epoch]
return[name[response]]
def function[favicon, parameter[]]:
return[call[name[redirect], parameter[call[name[url_for], parameter[constant[static]]]]]] | keyword[def] identifier[init_app] ( identifier[self] , identifier[app] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[app] , literal[string] ):
identifier[app] . identifier[extensions] ={}
keyword[if] literal[string] keyword[in] identifier[app] . identifier[extensions] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[app] . identifier[extensions] [ literal[string] ]= identifier[self]
identifier[self] . identifier[app] = identifier[app]
keyword[if] literal[string] keyword[not] keyword[in] identifier[app] . identifier[config] :
keyword[with] identifier[app] . identifier[test_request_context] ():
identifier[app] . identifier[wsgi_app] = identifier[WhiteNoise] ( identifier[app] . identifier[wsgi_app] , identifier[root] = identifier[url_for] ( literal[string] , identifier[filename] = literal[string] )[ literal[int] :])
identifier[self] . identifier[cache] = identifier[Cache] ( identifier[app] , identifier[config] ={ literal[string] : identifier[app] . identifier[config] . identifier[get] ( literal[string] , literal[string] )})
@ identifier[app] . identifier[before_request]
keyword[def] identifier[before_request_callback] ():
identifier[request] . identifier[start_time] = identifier[maya] . identifier[now] ()
@ identifier[app] . identifier[after_request]
keyword[def] identifier[after_request_callback] ( identifier[response] ):
keyword[if] literal[string] keyword[not] keyword[in] identifier[current_app] . identifier[config] :
identifier[response] . identifier[headers] [ literal[string] ]= literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[current_app] . identifier[config] :
identifier[response] . identifier[headers] [ literal[string] ]= identifier[maya] . identifier[now] (). identifier[epoch] - identifier[request] . identifier[start_time] . identifier[epoch]
keyword[return] identifier[response]
@ identifier[app] . identifier[route] ( literal[string] )
keyword[def] identifier[favicon] ():
keyword[return] identifier[redirect] ( identifier[url_for] ( literal[string] , identifier[filename] = literal[string] ), identifier[code] = literal[int] ) | def init_app(self, app):
"""Initializes the Flask application with Common."""
if not hasattr(app, 'extensions'):
app.extensions = {} # depends on [control=['if'], data=[]]
if 'common' in app.extensions:
raise RuntimeError('Flask-Common extension already initialized') # depends on [control=['if'], data=[]]
app.extensions['common'] = self
self.app = app
if 'COMMON_FILESERVER_DISABLED' not in app.config:
with app.test_request_context():
# Configure WhiteNoise.
app.wsgi_app = WhiteNoise(app.wsgi_app, root=url_for('static', filename='')[1:]) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
self.cache = Cache(app, config={'CACHE_TYPE': app.config.get('COMMON_CACHE_TYPE', 'simple')})
@app.before_request
def before_request_callback():
request.start_time = maya.now()
@app.after_request
def after_request_callback(response):
if 'COMMON_POWERED_BY_DISABLED' not in current_app.config:
response.headers['X-Powered-By'] = 'Flask' # depends on [control=['if'], data=[]]
if 'COMMON_PROCESSED_TIME_DISABLED' not in current_app.config:
response.headers['X-Processed-Time'] = maya.now().epoch - request.start_time.epoch # depends on [control=['if'], data=[]]
return response
@app.route('/favicon.ico')
def favicon():
return redirect(url_for('static', filename='favicon.ico'), code=301) |
def _extract_table_model(table_data, current, tt):
"""
Add in modelNumber and summaryNumber fields if this is a summary table
:param dict table_data: Table data
:param dict current: LiPD root data
:param str tt: Table type "summ", "ens", "meas"
:return dict current: Current root data
"""
try:
if tt in ["summ", "ens"]:
m = re.match(re_sheet, table_data["tableName"])
if m:
_pc_num= m.group(1) + "Number"
current[_pc_num] = m.group(2)
current["modelNumber"] = m.group(4)
current["tableNumber"] = m.group(6)
else:
logger_ts.error("extract_table_summary: Unable to parse paleo/model/table numbers")
except Exception as e:
logger_ts.error("extract_table_summary: {}".format(e))
return current | def function[_extract_table_model, parameter[table_data, current, tt]]:
constant[
Add in modelNumber and summaryNumber fields if this is a summary table
:param dict table_data: Table data
:param dict current: LiPD root data
:param str tt: Table type "summ", "ens", "meas"
:return dict current: Current root data
]
<ast.Try object at 0x7da20c7cae60>
return[name[current]] | keyword[def] identifier[_extract_table_model] ( identifier[table_data] , identifier[current] , identifier[tt] ):
literal[string]
keyword[try] :
keyword[if] identifier[tt] keyword[in] [ literal[string] , literal[string] ]:
identifier[m] = identifier[re] . identifier[match] ( identifier[re_sheet] , identifier[table_data] [ literal[string] ])
keyword[if] identifier[m] :
identifier[_pc_num] = identifier[m] . identifier[group] ( literal[int] )+ literal[string]
identifier[current] [ identifier[_pc_num] ]= identifier[m] . identifier[group] ( literal[int] )
identifier[current] [ literal[string] ]= identifier[m] . identifier[group] ( literal[int] )
identifier[current] [ literal[string] ]= identifier[m] . identifier[group] ( literal[int] )
keyword[else] :
identifier[logger_ts] . identifier[error] ( literal[string] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger_ts] . identifier[error] ( literal[string] . identifier[format] ( identifier[e] ))
keyword[return] identifier[current] | def _extract_table_model(table_data, current, tt):
"""
Add in modelNumber and summaryNumber fields if this is a summary table
:param dict table_data: Table data
:param dict current: LiPD root data
:param str tt: Table type "summ", "ens", "meas"
:return dict current: Current root data
"""
try:
if tt in ['summ', 'ens']:
m = re.match(re_sheet, table_data['tableName'])
if m:
_pc_num = m.group(1) + 'Number'
current[_pc_num] = m.group(2)
current['modelNumber'] = m.group(4)
current['tableNumber'] = m.group(6) # depends on [control=['if'], data=[]]
else:
logger_ts.error('extract_table_summary: Unable to parse paleo/model/table numbers') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
logger_ts.error('extract_table_summary: {}'.format(e)) # depends on [control=['except'], data=['e']]
return current |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.