code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def create_bar_chart(self, x_labels, y_values, y_label):
"""Creates bar char
:param x_labels: Names for each variable
:param y_values: Values of x labels
:param y_label: Label of y axis
:return: Bar chart
"""
self.setup(0.25)
ax1 = self.get_ax()
ax1.set_xticks(list(range(len(x_labels))))
ax1.set_xticklabels([x_labels[i] for i in range(len(x_labels))],
rotation=90)
plt.ylabel(y_label)
x_pos = range(len(x_labels))
plt.bar(x_pos, y_values, align="center")
return ax1 | def function[create_bar_chart, parameter[self, x_labels, y_values, y_label]]:
constant[Creates bar char
:param x_labels: Names for each variable
:param y_values: Values of x labels
:param y_label: Label of y axis
:return: Bar chart
]
call[name[self].setup, parameter[constant[0.25]]]
variable[ax1] assign[=] call[name[self].get_ax, parameter[]]
call[name[ax1].set_xticks, parameter[call[name[list], parameter[call[name[range], parameter[call[name[len], parameter[name[x_labels]]]]]]]]]
call[name[ax1].set_xticklabels, parameter[<ast.ListComp object at 0x7da18fe938b0>]]
call[name[plt].ylabel, parameter[name[y_label]]]
variable[x_pos] assign[=] call[name[range], parameter[call[name[len], parameter[name[x_labels]]]]]
call[name[plt].bar, parameter[name[x_pos], name[y_values]]]
return[name[ax1]] | keyword[def] identifier[create_bar_chart] ( identifier[self] , identifier[x_labels] , identifier[y_values] , identifier[y_label] ):
literal[string]
identifier[self] . identifier[setup] ( literal[int] )
identifier[ax1] = identifier[self] . identifier[get_ax] ()
identifier[ax1] . identifier[set_xticks] ( identifier[list] ( identifier[range] ( identifier[len] ( identifier[x_labels] ))))
identifier[ax1] . identifier[set_xticklabels] ([ identifier[x_labels] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[x_labels] ))],
identifier[rotation] = literal[int] )
identifier[plt] . identifier[ylabel] ( identifier[y_label] )
identifier[x_pos] = identifier[range] ( identifier[len] ( identifier[x_labels] ))
identifier[plt] . identifier[bar] ( identifier[x_pos] , identifier[y_values] , identifier[align] = literal[string] )
keyword[return] identifier[ax1] | def create_bar_chart(self, x_labels, y_values, y_label):
"""Creates bar char
:param x_labels: Names for each variable
:param y_values: Values of x labels
:param y_label: Label of y axis
:return: Bar chart
"""
self.setup(0.25)
ax1 = self.get_ax()
ax1.set_xticks(list(range(len(x_labels))))
ax1.set_xticklabels([x_labels[i] for i in range(len(x_labels))], rotation=90)
plt.ylabel(y_label)
x_pos = range(len(x_labels))
plt.bar(x_pos, y_values, align='center')
return ax1 |
def list_snapshots_days(path, cam_id):
"""Returns a list of (date, dir) in which snapshopts are present"""
screenshoots_path = path + "/" + str(cam_id)
if os.path.exists(screenshoots_path):
days = []
for day_dir in os.listdir(screenshoots_path):
date = datetime.datetime.strptime(day_dir, "%d%m%Y").strftime('%d/%m/%y')
days.append((date, day_dir))
return days
else:
return [] | def function[list_snapshots_days, parameter[path, cam_id]]:
constant[Returns a list of (date, dir) in which snapshopts are present]
variable[screenshoots_path] assign[=] binary_operation[binary_operation[name[path] + constant[/]] + call[name[str], parameter[name[cam_id]]]]
if call[name[os].path.exists, parameter[name[screenshoots_path]]] begin[:]
variable[days] assign[=] list[[]]
for taget[name[day_dir]] in starred[call[name[os].listdir, parameter[name[screenshoots_path]]]] begin[:]
variable[date] assign[=] call[call[name[datetime].datetime.strptime, parameter[name[day_dir], constant[%d%m%Y]]].strftime, parameter[constant[%d/%m/%y]]]
call[name[days].append, parameter[tuple[[<ast.Name object at 0x7da18dc05690>, <ast.Name object at 0x7da18dc06e30>]]]]
return[name[days]] | keyword[def] identifier[list_snapshots_days] ( identifier[path] , identifier[cam_id] ):
literal[string]
identifier[screenshoots_path] = identifier[path] + literal[string] + identifier[str] ( identifier[cam_id] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[screenshoots_path] ):
identifier[days] =[]
keyword[for] identifier[day_dir] keyword[in] identifier[os] . identifier[listdir] ( identifier[screenshoots_path] ):
identifier[date] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[day_dir] , literal[string] ). identifier[strftime] ( literal[string] )
identifier[days] . identifier[append] (( identifier[date] , identifier[day_dir] ))
keyword[return] identifier[days]
keyword[else] :
keyword[return] [] | def list_snapshots_days(path, cam_id):
"""Returns a list of (date, dir) in which snapshopts are present"""
screenshoots_path = path + '/' + str(cam_id)
if os.path.exists(screenshoots_path):
days = []
for day_dir in os.listdir(screenshoots_path):
date = datetime.datetime.strptime(day_dir, '%d%m%Y').strftime('%d/%m/%y')
days.append((date, day_dir)) # depends on [control=['for'], data=['day_dir']]
return days # depends on [control=['if'], data=[]]
else:
return [] |
def _have_conf(self, magic_hash=None):
"""Get the daemon current configuration state
If the daemon has received a configuration from its arbiter, this will
return True
If a `magic_hash` is provided it is compared with the one included in the
daemon configuration and this function returns True only if they match!
:return: boolean indicating if the daemon has a configuration
:rtype: bool
"""
self.app.have_conf = getattr(self.app, 'cur_conf', None) not in [None, {}]
if magic_hash is not None:
# Beware, we got an str in entry, not an int
magic_hash = int(magic_hash)
# I've got a conf and a good one
return self.app.have_conf and self.app.cur_conf.magic_hash == magic_hash
return self.app.have_conf | def function[_have_conf, parameter[self, magic_hash]]:
constant[Get the daemon current configuration state
If the daemon has received a configuration from its arbiter, this will
return True
If a `magic_hash` is provided it is compared with the one included in the
daemon configuration and this function returns True only if they match!
:return: boolean indicating if the daemon has a configuration
:rtype: bool
]
name[self].app.have_conf assign[=] compare[call[name[getattr], parameter[name[self].app, constant[cur_conf], constant[None]]] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18bc71300>, <ast.Dict object at 0x7da18bc72590>]]]
if compare[name[magic_hash] is_not constant[None]] begin[:]
variable[magic_hash] assign[=] call[name[int], parameter[name[magic_hash]]]
return[<ast.BoolOp object at 0x7da18bc730d0>]
return[name[self].app.have_conf] | keyword[def] identifier[_have_conf] ( identifier[self] , identifier[magic_hash] = keyword[None] ):
literal[string]
identifier[self] . identifier[app] . identifier[have_conf] = identifier[getattr] ( identifier[self] . identifier[app] , literal[string] , keyword[None] ) keyword[not] keyword[in] [ keyword[None] ,{}]
keyword[if] identifier[magic_hash] keyword[is] keyword[not] keyword[None] :
identifier[magic_hash] = identifier[int] ( identifier[magic_hash] )
keyword[return] identifier[self] . identifier[app] . identifier[have_conf] keyword[and] identifier[self] . identifier[app] . identifier[cur_conf] . identifier[magic_hash] == identifier[magic_hash]
keyword[return] identifier[self] . identifier[app] . identifier[have_conf] | def _have_conf(self, magic_hash=None):
"""Get the daemon current configuration state
If the daemon has received a configuration from its arbiter, this will
return True
If a `magic_hash` is provided it is compared with the one included in the
daemon configuration and this function returns True only if they match!
:return: boolean indicating if the daemon has a configuration
:rtype: bool
"""
self.app.have_conf = getattr(self.app, 'cur_conf', None) not in [None, {}]
if magic_hash is not None:
# Beware, we got an str in entry, not an int
magic_hash = int(magic_hash)
# I've got a conf and a good one
return self.app.have_conf and self.app.cur_conf.magic_hash == magic_hash # depends on [control=['if'], data=['magic_hash']]
return self.app.have_conf |
def nodeInLanguageStem(_: Context, n: Node, s: ShExJ.LanguageStem) -> bool:
""" http://shex.io/shex-semantics/#values
**nodeIn**: asserts that an RDF node n is equal to an RDF term s or is in a set defined by a
:py:class:`ShExJ.IriStem`, :py:class:`LiteralStem` or :py:class:`LanguageStem`.
The expression `nodeInLanguageStem(n, s)` is satisfied iff:
#) `s` is a :py:class:`ShExJ.WildCard` or
#) `n` is a language-tagged string and fn:starts-with(`n.language`, `s`)
"""
return isinstance(s, ShExJ.Wildcard) or \
(isinstance(n, Literal) and n.language is not None and str(n.language).startswith(str(s))) | def function[nodeInLanguageStem, parameter[_, n, s]]:
constant[ http://shex.io/shex-semantics/#values
**nodeIn**: asserts that an RDF node n is equal to an RDF term s or is in a set defined by a
:py:class:`ShExJ.IriStem`, :py:class:`LiteralStem` or :py:class:`LanguageStem`.
The expression `nodeInLanguageStem(n, s)` is satisfied iff:
#) `s` is a :py:class:`ShExJ.WildCard` or
#) `n` is a language-tagged string and fn:starts-with(`n.language`, `s`)
]
return[<ast.BoolOp object at 0x7da207f02aa0>] | keyword[def] identifier[nodeInLanguageStem] ( identifier[_] : identifier[Context] , identifier[n] : identifier[Node] , identifier[s] : identifier[ShExJ] . identifier[LanguageStem] )-> identifier[bool] :
literal[string]
keyword[return] identifier[isinstance] ( identifier[s] , identifier[ShExJ] . identifier[Wildcard] ) keyword[or] ( identifier[isinstance] ( identifier[n] , identifier[Literal] ) keyword[and] identifier[n] . identifier[language] keyword[is] keyword[not] keyword[None] keyword[and] identifier[str] ( identifier[n] . identifier[language] ). identifier[startswith] ( identifier[str] ( identifier[s] ))) | def nodeInLanguageStem(_: Context, n: Node, s: ShExJ.LanguageStem) -> bool:
""" http://shex.io/shex-semantics/#values
**nodeIn**: asserts that an RDF node n is equal to an RDF term s or is in a set defined by a
:py:class:`ShExJ.IriStem`, :py:class:`LiteralStem` or :py:class:`LanguageStem`.
The expression `nodeInLanguageStem(n, s)` is satisfied iff:
#) `s` is a :py:class:`ShExJ.WildCard` or
#) `n` is a language-tagged string and fn:starts-with(`n.language`, `s`)
"""
return isinstance(s, ShExJ.Wildcard) or (isinstance(n, Literal) and n.language is not None and str(n.language).startswith(str(s))) |
def _setup_sentry_client(context):
"""Produce and configure the sentry client."""
# get_secret will be deprecated soon
dsn = os.environ.get("SENTRY_DSN")
try:
client = raven.Client(dsn, sample_rate=SENTRY_SAMPLE_RATE)
client.user_context(_sentry_context_dict(context))
return client
except:
rlogger.error("Raven client error", exc_info=True)
return None | def function[_setup_sentry_client, parameter[context]]:
constant[Produce and configure the sentry client.]
variable[dsn] assign[=] call[name[os].environ.get, parameter[constant[SENTRY_DSN]]]
<ast.Try object at 0x7da1b1452170> | keyword[def] identifier[_setup_sentry_client] ( identifier[context] ):
literal[string]
identifier[dsn] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] )
keyword[try] :
identifier[client] = identifier[raven] . identifier[Client] ( identifier[dsn] , identifier[sample_rate] = identifier[SENTRY_SAMPLE_RATE] )
identifier[client] . identifier[user_context] ( identifier[_sentry_context_dict] ( identifier[context] ))
keyword[return] identifier[client]
keyword[except] :
identifier[rlogger] . identifier[error] ( literal[string] , identifier[exc_info] = keyword[True] )
keyword[return] keyword[None] | def _setup_sentry_client(context):
"""Produce and configure the sentry client."""
# get_secret will be deprecated soon
dsn = os.environ.get('SENTRY_DSN')
try:
client = raven.Client(dsn, sample_rate=SENTRY_SAMPLE_RATE)
client.user_context(_sentry_context_dict(context))
return client # depends on [control=['try'], data=[]]
except:
rlogger.error('Raven client error', exc_info=True)
return None # depends on [control=['except'], data=[]] |
def create(self, term, options):
"""Create a monitor using passed configuration."""
if not self._state:
raise InvalidState("State was not properly obtained from the app")
options['action'] = 'CREATE'
payload = self._build_payload(term, options)
url = self.ALERTS_CREATE_URL.format(requestX=self._state[3])
self._log.debug("Creating alert using: %s" % url)
params = json.dumps(payload, separators=(',', ':'))
data = {'params': params}
response = self._session.post(url, data=data, headers=self.HEADERS)
if response.status_code != 200:
raise ActionError("Failed to create monitor: %s"
% response.content)
if options.get('exact', False):
term = "\"%s\"" % term
return self.list(term) | def function[create, parameter[self, term, options]]:
constant[Create a monitor using passed configuration.]
if <ast.UnaryOp object at 0x7da20c6e4460> begin[:]
<ast.Raise object at 0x7da20c6e77c0>
call[name[options]][constant[action]] assign[=] constant[CREATE]
variable[payload] assign[=] call[name[self]._build_payload, parameter[name[term], name[options]]]
variable[url] assign[=] call[name[self].ALERTS_CREATE_URL.format, parameter[]]
call[name[self]._log.debug, parameter[binary_operation[constant[Creating alert using: %s] <ast.Mod object at 0x7da2590d6920> name[url]]]]
variable[params] assign[=] call[name[json].dumps, parameter[name[payload]]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da18eb57970>], [<ast.Name object at 0x7da18eb566e0>]]
variable[response] assign[=] call[name[self]._session.post, parameter[name[url]]]
if compare[name[response].status_code not_equal[!=] constant[200]] begin[:]
<ast.Raise object at 0x7da20c6e63e0>
if call[name[options].get, parameter[constant[exact], constant[False]]] begin[:]
variable[term] assign[=] binary_operation[constant["%s"] <ast.Mod object at 0x7da2590d6920> name[term]]
return[call[name[self].list, parameter[name[term]]]] | keyword[def] identifier[create] ( identifier[self] , identifier[term] , identifier[options] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_state] :
keyword[raise] identifier[InvalidState] ( literal[string] )
identifier[options] [ literal[string] ]= literal[string]
identifier[payload] = identifier[self] . identifier[_build_payload] ( identifier[term] , identifier[options] )
identifier[url] = identifier[self] . identifier[ALERTS_CREATE_URL] . identifier[format] ( identifier[requestX] = identifier[self] . identifier[_state] [ literal[int] ])
identifier[self] . identifier[_log] . identifier[debug] ( literal[string] % identifier[url] )
identifier[params] = identifier[json] . identifier[dumps] ( identifier[payload] , identifier[separators] =( literal[string] , literal[string] ))
identifier[data] ={ literal[string] : identifier[params] }
identifier[response] = identifier[self] . identifier[_session] . identifier[post] ( identifier[url] , identifier[data] = identifier[data] , identifier[headers] = identifier[self] . identifier[HEADERS] )
keyword[if] identifier[response] . identifier[status_code] != literal[int] :
keyword[raise] identifier[ActionError] ( literal[string]
% identifier[response] . identifier[content] )
keyword[if] identifier[options] . identifier[get] ( literal[string] , keyword[False] ):
identifier[term] = literal[string] % identifier[term]
keyword[return] identifier[self] . identifier[list] ( identifier[term] ) | def create(self, term, options):
"""Create a monitor using passed configuration."""
if not self._state:
raise InvalidState('State was not properly obtained from the app') # depends on [control=['if'], data=[]]
options['action'] = 'CREATE'
payload = self._build_payload(term, options)
url = self.ALERTS_CREATE_URL.format(requestX=self._state[3])
self._log.debug('Creating alert using: %s' % url)
params = json.dumps(payload, separators=(',', ':'))
data = {'params': params}
response = self._session.post(url, data=data, headers=self.HEADERS)
if response.status_code != 200:
raise ActionError('Failed to create monitor: %s' % response.content) # depends on [control=['if'], data=[]]
if options.get('exact', False):
term = '"%s"' % term # depends on [control=['if'], data=[]]
return self.list(term) |
def description(self):
"""
Retrieve a description of the columns in the current result set
:return: A tuple of seven elements. Only some elements are meaningful:\n
* Element #0 is the name of the column
* Element #1 is the type code of the column
* Element #6 is true if the column may contain ``NULL`` values
"""
if self.result_set:
info = self.result_set.get_column_info()
return [(c.name, c.type_code(), None, None, None, None, c.supports_null_values) for c in info]
else:
return None | def function[description, parameter[self]]:
constant[
Retrieve a description of the columns in the current result set
:return: A tuple of seven elements. Only some elements are meaningful:
* Element #0 is the name of the column
* Element #1 is the type code of the column
* Element #6 is true if the column may contain ``NULL`` values
]
if name[self].result_set begin[:]
variable[info] assign[=] call[name[self].result_set.get_column_info, parameter[]]
return[<ast.ListComp object at 0x7da18bc70b20>] | keyword[def] identifier[description] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[result_set] :
identifier[info] = identifier[self] . identifier[result_set] . identifier[get_column_info] ()
keyword[return] [( identifier[c] . identifier[name] , identifier[c] . identifier[type_code] (), keyword[None] , keyword[None] , keyword[None] , keyword[None] , identifier[c] . identifier[supports_null_values] ) keyword[for] identifier[c] keyword[in] identifier[info] ]
keyword[else] :
keyword[return] keyword[None] | def description(self):
"""
Retrieve a description of the columns in the current result set
:return: A tuple of seven elements. Only some elements are meaningful:
* Element #0 is the name of the column
* Element #1 is the type code of the column
* Element #6 is true if the column may contain ``NULL`` values
"""
if self.result_set:
info = self.result_set.get_column_info()
return [(c.name, c.type_code(), None, None, None, None, c.supports_null_values) for c in info] # depends on [control=['if'], data=[]]
else:
return None |
def create_store():
"""
A helper for setting the _proxy and slapping the store
object for us.
:return: A thread-local storage as a dictionary
"""
new_storage = _proxy('store')
_state.store = type('store', (object,), {})
new_storage.store = dict()
return new_storage.store | def function[create_store, parameter[]]:
constant[
A helper for setting the _proxy and slapping the store
object for us.
:return: A thread-local storage as a dictionary
]
variable[new_storage] assign[=] call[name[_proxy], parameter[constant[store]]]
name[_state].store assign[=] call[name[type], parameter[constant[store], tuple[[<ast.Name object at 0x7da18f58ed70>]], dictionary[[], []]]]
name[new_storage].store assign[=] call[name[dict], parameter[]]
return[name[new_storage].store] | keyword[def] identifier[create_store] ():
literal[string]
identifier[new_storage] = identifier[_proxy] ( literal[string] )
identifier[_state] . identifier[store] = identifier[type] ( literal[string] ,( identifier[object] ,),{})
identifier[new_storage] . identifier[store] = identifier[dict] ()
keyword[return] identifier[new_storage] . identifier[store] | def create_store():
"""
A helper for setting the _proxy and slapping the store
object for us.
:return: A thread-local storage as a dictionary
"""
new_storage = _proxy('store')
_state.store = type('store', (object,), {})
new_storage.store = dict()
return new_storage.store |
def consume(self, consumer_callback=None, exclusive=False):
"""
Initialize consuming of messages from an AMQP RPC queue. Messages will be consumed after start_consuming() is called.
An internal callback will be used to handle incoming RPC responses. Only responses that have been registered with register_response()
will be kept internally, all other responses will be dropped silently. Responses can be accessed by using get_response().
The internal callback will assume that the incoming RPC responses will have a correlation_id property set in the headers.
Additionally, if a general purpose queue was created on construction, the parameters to this function can be used to declare a callback
and options for that queue. A ValueError is raised when trying to set a general purpose callback, but no queue was declared during
construction.
In contrast to the Queue class, the recover parameter is missing from this implementation of consume(). We will always try to requeue
old messages.
Parameters
----------
consumer_callback: callback
Function to call when a message is consumed. The callback function will be called on each delivery,
and will receive three parameters:
* channel
* method_frame
* header_frame
* body
exclusive: boolean
Is this consumer supposed to be the exclusive consumer of the given queue?
"""
if not hasattr(self, "queue_name") and consumer_callback:
raise ValueError("Trying to set a callback, while no general purpose queue was declared.")
self.rpc_consumer_tag = self.channel.basic_consume(consumer_callback=self._rpc_response_callback, queue=self.rpc_queue_name, exclusive=False)
if consumer_callback:
super(Rpc, self).consume(consumer_callback, exclusive, True) | def function[consume, parameter[self, consumer_callback, exclusive]]:
constant[
Initialize consuming of messages from an AMQP RPC queue. Messages will be consumed after start_consuming() is called.
An internal callback will be used to handle incoming RPC responses. Only responses that have been registered with register_response()
will be kept internally, all other responses will be dropped silently. Responses can be accessed by using get_response().
The internal callback will assume that the incoming RPC responses will have a correlation_id property set in the headers.
Additionally, if a general purpose queue was created on construction, the parameters to this function can be used to declare a callback
and options for that queue. A ValueError is raised when trying to set a general purpose callback, but no queue was declared during
construction.
In contrast to the Queue class, the recover parameter is missing from this implementation of consume(). We will always try to requeue
old messages.
Parameters
----------
consumer_callback: callback
Function to call when a message is consumed. The callback function will be called on each delivery,
and will receive three parameters:
* channel
* method_frame
* header_frame
* body
exclusive: boolean
Is this consumer supposed to be the exclusive consumer of the given queue?
]
if <ast.BoolOp object at 0x7da1b1a1c190> begin[:]
<ast.Raise object at 0x7da1b1a1ca90>
name[self].rpc_consumer_tag assign[=] call[name[self].channel.basic_consume, parameter[]]
if name[consumer_callback] begin[:]
call[call[name[super], parameter[name[Rpc], name[self]]].consume, parameter[name[consumer_callback], name[exclusive], constant[True]]] | keyword[def] identifier[consume] ( identifier[self] , identifier[consumer_callback] = keyword[None] , identifier[exclusive] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[consumer_callback] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[rpc_consumer_tag] = identifier[self] . identifier[channel] . identifier[basic_consume] ( identifier[consumer_callback] = identifier[self] . identifier[_rpc_response_callback] , identifier[queue] = identifier[self] . identifier[rpc_queue_name] , identifier[exclusive] = keyword[False] )
keyword[if] identifier[consumer_callback] :
identifier[super] ( identifier[Rpc] , identifier[self] ). identifier[consume] ( identifier[consumer_callback] , identifier[exclusive] , keyword[True] ) | def consume(self, consumer_callback=None, exclusive=False):
"""
Initialize consuming of messages from an AMQP RPC queue. Messages will be consumed after start_consuming() is called.
An internal callback will be used to handle incoming RPC responses. Only responses that have been registered with register_response()
will be kept internally, all other responses will be dropped silently. Responses can be accessed by using get_response().
The internal callback will assume that the incoming RPC responses will have a correlation_id property set in the headers.
Additionally, if a general purpose queue was created on construction, the parameters to this function can be used to declare a callback
and options for that queue. A ValueError is raised when trying to set a general purpose callback, but no queue was declared during
construction.
In contrast to the Queue class, the recover parameter is missing from this implementation of consume(). We will always try to requeue
old messages.
Parameters
----------
consumer_callback: callback
Function to call when a message is consumed. The callback function will be called on each delivery,
and will receive three parameters:
* channel
* method_frame
* header_frame
* body
exclusive: boolean
Is this consumer supposed to be the exclusive consumer of the given queue?
"""
if not hasattr(self, 'queue_name') and consumer_callback:
raise ValueError('Trying to set a callback, while no general purpose queue was declared.') # depends on [control=['if'], data=[]]
self.rpc_consumer_tag = self.channel.basic_consume(consumer_callback=self._rpc_response_callback, queue=self.rpc_queue_name, exclusive=False)
if consumer_callback:
super(Rpc, self).consume(consumer_callback, exclusive, True) # depends on [control=['if'], data=[]] |
def _compute_pga_rock(self, C_PGA, mag, rjb):
"""
Returns the PGA (g) on rock, as defined in equation 15
"""
return np.exp(self._compute_linear_magnitude_term(C_PGA, mag) +
self._compute_simple_distance_term(C_PGA, rjb)) | def function[_compute_pga_rock, parameter[self, C_PGA, mag, rjb]]:
constant[
Returns the PGA (g) on rock, as defined in equation 15
]
return[call[name[np].exp, parameter[binary_operation[call[name[self]._compute_linear_magnitude_term, parameter[name[C_PGA], name[mag]]] + call[name[self]._compute_simple_distance_term, parameter[name[C_PGA], name[rjb]]]]]]] | keyword[def] identifier[_compute_pga_rock] ( identifier[self] , identifier[C_PGA] , identifier[mag] , identifier[rjb] ):
literal[string]
keyword[return] identifier[np] . identifier[exp] ( identifier[self] . identifier[_compute_linear_magnitude_term] ( identifier[C_PGA] , identifier[mag] )+
identifier[self] . identifier[_compute_simple_distance_term] ( identifier[C_PGA] , identifier[rjb] )) | def _compute_pga_rock(self, C_PGA, mag, rjb):
"""
Returns the PGA (g) on rock, as defined in equation 15
"""
return np.exp(self._compute_linear_magnitude_term(C_PGA, mag) + self._compute_simple_distance_term(C_PGA, rjb)) |
def return_data(self, data, format=None):
"""Format and return data appropriate to the requested API format.
data: The data retured by the api request
"""
if format is None:
format = self.format
if format == "json":
formatted_data = json.loads(data)
else:
formatted_data = data
return formatted_data | def function[return_data, parameter[self, data, format]]:
constant[Format and return data appropriate to the requested API format.
data: The data retured by the api request
]
if compare[name[format] is constant[None]] begin[:]
variable[format] assign[=] name[self].format
if compare[name[format] equal[==] constant[json]] begin[:]
variable[formatted_data] assign[=] call[name[json].loads, parameter[name[data]]]
return[name[formatted_data]] | keyword[def] identifier[return_data] ( identifier[self] , identifier[data] , identifier[format] = keyword[None] ):
literal[string]
keyword[if] identifier[format] keyword[is] keyword[None] :
identifier[format] = identifier[self] . identifier[format]
keyword[if] identifier[format] == literal[string] :
identifier[formatted_data] = identifier[json] . identifier[loads] ( identifier[data] )
keyword[else] :
identifier[formatted_data] = identifier[data]
keyword[return] identifier[formatted_data] | def return_data(self, data, format=None):
"""Format and return data appropriate to the requested API format.
data: The data retured by the api request
"""
if format is None:
format = self.format # depends on [control=['if'], data=['format']]
if format == 'json':
formatted_data = json.loads(data) # depends on [control=['if'], data=[]]
else:
formatted_data = data
return formatted_data |
def _seconds_have_elapsed(token, num_seconds):
"""Tests if 'num_seconds' have passed since 'token' was requested.
Not strictly thread-safe - may log with the wrong frequency if called
concurrently from multiple threads. Accuracy depends on resolution of
'timeit.default_timer()'.
Always returns True on the first call for a given 'token'.
Args:
token: The token for which to look up the count.
num_seconds: The number of seconds to test for.
Returns:
Whether it has been >= 'num_seconds' since 'token' was last requested.
"""
now = timeit.default_timer()
then = _log_timer_per_token.get(token, None)
if then is None or (now - then) >= num_seconds:
_log_timer_per_token[token] = now
return True
else:
return False | def function[_seconds_have_elapsed, parameter[token, num_seconds]]:
constant[Tests if 'num_seconds' have passed since 'token' was requested.
Not strictly thread-safe - may log with the wrong frequency if called
concurrently from multiple threads. Accuracy depends on resolution of
'timeit.default_timer()'.
Always returns True on the first call for a given 'token'.
Args:
token: The token for which to look up the count.
num_seconds: The number of seconds to test for.
Returns:
Whether it has been >= 'num_seconds' since 'token' was last requested.
]
variable[now] assign[=] call[name[timeit].default_timer, parameter[]]
variable[then] assign[=] call[name[_log_timer_per_token].get, parameter[name[token], constant[None]]]
if <ast.BoolOp object at 0x7da1b18bdf30> begin[:]
call[name[_log_timer_per_token]][name[token]] assign[=] name[now]
return[constant[True]] | keyword[def] identifier[_seconds_have_elapsed] ( identifier[token] , identifier[num_seconds] ):
literal[string]
identifier[now] = identifier[timeit] . identifier[default_timer] ()
identifier[then] = identifier[_log_timer_per_token] . identifier[get] ( identifier[token] , keyword[None] )
keyword[if] identifier[then] keyword[is] keyword[None] keyword[or] ( identifier[now] - identifier[then] )>= identifier[num_seconds] :
identifier[_log_timer_per_token] [ identifier[token] ]= identifier[now]
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def _seconds_have_elapsed(token, num_seconds):
"""Tests if 'num_seconds' have passed since 'token' was requested.
Not strictly thread-safe - may log with the wrong frequency if called
concurrently from multiple threads. Accuracy depends on resolution of
'timeit.default_timer()'.
Always returns True on the first call for a given 'token'.
Args:
token: The token for which to look up the count.
num_seconds: The number of seconds to test for.
Returns:
Whether it has been >= 'num_seconds' since 'token' was last requested.
"""
now = timeit.default_timer()
then = _log_timer_per_token.get(token, None)
if then is None or now - then >= num_seconds:
_log_timer_per_token[token] = now
return True # depends on [control=['if'], data=[]]
else:
return False |
def _line_by_type(self, line, header, hgroups, htypes, out, want_type,
collapse_quals_fn = None):
"""Parse out key value pairs for line information based on a group of values.
"""
for index, htype in ((i, t) for i, t in enumerate(htypes) if t == want_type):
col = hgroups[index][0]
key = header[col]#self._clean_header(header[col])
if collapse_quals_fn:
val = collapse_quals_fn(line, header, hgroups[index])
else:
val = line[col]
out[key].add(val)
return out | def function[_line_by_type, parameter[self, line, header, hgroups, htypes, out, want_type, collapse_quals_fn]]:
constant[Parse out key value pairs for line information based on a group of values.
]
for taget[tuple[[<ast.Name object at 0x7da18dc98430>, <ast.Name object at 0x7da18dc985b0>]]] in starred[<ast.GeneratorExp object at 0x7da18dc9b160>] begin[:]
variable[col] assign[=] call[call[name[hgroups]][name[index]]][constant[0]]
variable[key] assign[=] call[name[header]][name[col]]
if name[collapse_quals_fn] begin[:]
variable[val] assign[=] call[name[collapse_quals_fn], parameter[name[line], name[header], call[name[hgroups]][name[index]]]]
call[call[name[out]][name[key]].add, parameter[name[val]]]
return[name[out]] | keyword[def] identifier[_line_by_type] ( identifier[self] , identifier[line] , identifier[header] , identifier[hgroups] , identifier[htypes] , identifier[out] , identifier[want_type] ,
identifier[collapse_quals_fn] = keyword[None] ):
literal[string]
keyword[for] identifier[index] , identifier[htype] keyword[in] (( identifier[i] , identifier[t] ) keyword[for] identifier[i] , identifier[t] keyword[in] identifier[enumerate] ( identifier[htypes] ) keyword[if] identifier[t] == identifier[want_type] ):
identifier[col] = identifier[hgroups] [ identifier[index] ][ literal[int] ]
identifier[key] = identifier[header] [ identifier[col] ]
keyword[if] identifier[collapse_quals_fn] :
identifier[val] = identifier[collapse_quals_fn] ( identifier[line] , identifier[header] , identifier[hgroups] [ identifier[index] ])
keyword[else] :
identifier[val] = identifier[line] [ identifier[col] ]
identifier[out] [ identifier[key] ]. identifier[add] ( identifier[val] )
keyword[return] identifier[out] | def _line_by_type(self, line, header, hgroups, htypes, out, want_type, collapse_quals_fn=None):
"""Parse out key value pairs for line information based on a group of values.
"""
for (index, htype) in ((i, t) for (i, t) in enumerate(htypes) if t == want_type):
col = hgroups[index][0]
key = header[col] #self._clean_header(header[col])
if collapse_quals_fn:
val = collapse_quals_fn(line, header, hgroups[index]) # depends on [control=['if'], data=[]]
else:
val = line[col]
out[key].add(val) # depends on [control=['for'], data=[]]
return out |
def get_squeezenet(version, pretrained=False, ctx=cpu(),
root=os.path.join(base.data_dir(), 'models'), **kwargs):
r"""SqueezeNet model from the `"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size" <https://arxiv.org/abs/1602.07360>`_ paper.
SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Parameters
----------
version : str
Version of squeezenet. Options are '1.0', '1.1'.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
net = SqueezeNet(version, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_parameters(get_model_file('squeezenet%s'%version, root=root), ctx=ctx)
return net | def function[get_squeezenet, parameter[version, pretrained, ctx, root]]:
constant[SqueezeNet model from the `"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size" <https://arxiv.org/abs/1602.07360>`_ paper.
SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Parameters
----------
version : str
Version of squeezenet. Options are '1.0', '1.1'.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
]
variable[net] assign[=] call[name[SqueezeNet], parameter[name[version]]]
if name[pretrained] begin[:]
from relative_module[model_store] import module[get_model_file]
call[name[net].load_parameters, parameter[call[name[get_model_file], parameter[binary_operation[constant[squeezenet%s] <ast.Mod object at 0x7da2590d6920> name[version]]]]]]
return[name[net]] | keyword[def] identifier[get_squeezenet] ( identifier[version] , identifier[pretrained] = keyword[False] , identifier[ctx] = identifier[cpu] (),
identifier[root] = identifier[os] . identifier[path] . identifier[join] ( identifier[base] . identifier[data_dir] (), literal[string] ),** identifier[kwargs] ):
literal[string]
identifier[net] = identifier[SqueezeNet] ( identifier[version] ,** identifier[kwargs] )
keyword[if] identifier[pretrained] :
keyword[from] .. identifier[model_store] keyword[import] identifier[get_model_file]
identifier[net] . identifier[load_parameters] ( identifier[get_model_file] ( literal[string] % identifier[version] , identifier[root] = identifier[root] ), identifier[ctx] = identifier[ctx] )
keyword[return] identifier[net] | def get_squeezenet(version, pretrained=False, ctx=cpu(), root=os.path.join(base.data_dir(), 'models'), **kwargs):
"""SqueezeNet model from the `"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size" <https://arxiv.org/abs/1602.07360>`_ paper.
SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Parameters
----------
version : str
Version of squeezenet. Options are '1.0', '1.1'.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
net = SqueezeNet(version, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_parameters(get_model_file('squeezenet%s' % version, root=root), ctx=ctx) # depends on [control=['if'], data=[]]
return net |
def do_POST(self):
"""Handles HTTP POST requests."""
post_data = self.rfile.read(int(self.headers['Content-Length']))
json_data = gzip.decompress(post_data)
self._profile_json.update(json.loads(json_data.decode('utf-8')))
self._send_response(
200, headers=(('Content-type', '%s; charset=utf-8' % 'text/json'),
('Content-Encoding', 'gzip'),
('Content-Length', len(post_data)))) | def function[do_POST, parameter[self]]:
constant[Handles HTTP POST requests.]
variable[post_data] assign[=] call[name[self].rfile.read, parameter[call[name[int], parameter[call[name[self].headers][constant[Content-Length]]]]]]
variable[json_data] assign[=] call[name[gzip].decompress, parameter[name[post_data]]]
call[name[self]._profile_json.update, parameter[call[name[json].loads, parameter[call[name[json_data].decode, parameter[constant[utf-8]]]]]]]
call[name[self]._send_response, parameter[constant[200]]] | keyword[def] identifier[do_POST] ( identifier[self] ):
literal[string]
identifier[post_data] = identifier[self] . identifier[rfile] . identifier[read] ( identifier[int] ( identifier[self] . identifier[headers] [ literal[string] ]))
identifier[json_data] = identifier[gzip] . identifier[decompress] ( identifier[post_data] )
identifier[self] . identifier[_profile_json] . identifier[update] ( identifier[json] . identifier[loads] ( identifier[json_data] . identifier[decode] ( literal[string] )))
identifier[self] . identifier[_send_response] (
literal[int] , identifier[headers] =(( literal[string] , literal[string] % literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , identifier[len] ( identifier[post_data] )))) | def do_POST(self):
"""Handles HTTP POST requests."""
post_data = self.rfile.read(int(self.headers['Content-Length']))
json_data = gzip.decompress(post_data)
self._profile_json.update(json.loads(json_data.decode('utf-8')))
self._send_response(200, headers=(('Content-type', '%s; charset=utf-8' % 'text/json'), ('Content-Encoding', 'gzip'), ('Content-Length', len(post_data)))) |
def w_diffuser_outer(sed_inputs=sed_dict):
"""Return the outer width of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Outer width of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>>
"""
return (w_diffuser_inner_min(sed_inputs['tank']['W']) +
(2 * sed_inputs['manifold']['diffuser']['thickness_wall'])).to(u.m).magnitude | def function[w_diffuser_outer, parameter[sed_inputs]]:
constant[Return the outer width of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Outer width of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>>
]
return[call[binary_operation[call[name[w_diffuser_inner_min], parameter[call[call[name[sed_inputs]][constant[tank]]][constant[W]]]] + binary_operation[constant[2] * call[call[call[name[sed_inputs]][constant[manifold]]][constant[diffuser]]][constant[thickness_wall]]]].to, parameter[name[u].m]].magnitude] | keyword[def] identifier[w_diffuser_outer] ( identifier[sed_inputs] = identifier[sed_dict] ):
literal[string]
keyword[return] ( identifier[w_diffuser_inner_min] ( identifier[sed_inputs] [ literal[string] ][ literal[string] ])+
( literal[int] * identifier[sed_inputs] [ literal[string] ][ literal[string] ][ literal[string] ])). identifier[to] ( identifier[u] . identifier[m] ). identifier[magnitude] | def w_diffuser_outer(sed_inputs=sed_dict):
"""Return the outer width of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Outer width of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>>
"""
return (w_diffuser_inner_min(sed_inputs['tank']['W']) + 2 * sed_inputs['manifold']['diffuser']['thickness_wall']).to(u.m).magnitude |
def get_word_before_cursor(self, WORD=False):
"""
Give the word before the cursor.
If we have whitespace before the cursor this returns an empty string.
"""
if self.text_before_cursor[-1:].isspace():
return ''
else:
return self.text_before_cursor[self.find_start_of_previous_word(WORD=WORD):] | def function[get_word_before_cursor, parameter[self, WORD]]:
constant[
Give the word before the cursor.
If we have whitespace before the cursor this returns an empty string.
]
if call[call[name[self].text_before_cursor][<ast.Slice object at 0x7da1b08a7c40>].isspace, parameter[]] begin[:]
return[constant[]] | keyword[def] identifier[get_word_before_cursor] ( identifier[self] , identifier[WORD] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[text_before_cursor] [- literal[int] :]. identifier[isspace] ():
keyword[return] literal[string]
keyword[else] :
keyword[return] identifier[self] . identifier[text_before_cursor] [ identifier[self] . identifier[find_start_of_previous_word] ( identifier[WORD] = identifier[WORD] ):] | def get_word_before_cursor(self, WORD=False):
"""
Give the word before the cursor.
If we have whitespace before the cursor this returns an empty string.
"""
if self.text_before_cursor[-1:].isspace():
return '' # depends on [control=['if'], data=[]]
else:
return self.text_before_cursor[self.find_start_of_previous_word(WORD=WORD):] |
def pop(self, key, timeout=1, is_async=False, only_read=False):
"""
Test:
>>> cache = Cache(log_level=logging.WARNING)
>>> cache.put('a', 0)
>>> cache.pop('a')
0
>>> cache.pop('b') == None
True
"""
if key not in self.cache_items:
return None
return self.cache_items.pop(key)[key] | def function[pop, parameter[self, key, timeout, is_async, only_read]]:
constant[
Test:
>>> cache = Cache(log_level=logging.WARNING)
>>> cache.put('a', 0)
>>> cache.pop('a')
0
>>> cache.pop('b') == None
True
]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[self].cache_items] begin[:]
return[constant[None]]
return[call[call[name[self].cache_items.pop, parameter[name[key]]]][name[key]]] | keyword[def] identifier[pop] ( identifier[self] , identifier[key] , identifier[timeout] = literal[int] , identifier[is_async] = keyword[False] , identifier[only_read] = keyword[False] ):
literal[string]
keyword[if] identifier[key] keyword[not] keyword[in] identifier[self] . identifier[cache_items] :
keyword[return] keyword[None]
keyword[return] identifier[self] . identifier[cache_items] . identifier[pop] ( identifier[key] )[ identifier[key] ] | def pop(self, key, timeout=1, is_async=False, only_read=False):
"""
Test:
>>> cache = Cache(log_level=logging.WARNING)
>>> cache.put('a', 0)
>>> cache.pop('a')
0
>>> cache.pop('b') == None
True
"""
if key not in self.cache_items:
return None # depends on [control=['if'], data=[]]
return self.cache_items.pop(key)[key] |
def solve_tsp(V,c):
"""solve_tsp -- solve the traveling salesman problem
- start with assignment model
- check flow from a source to every other node;
- if no flow, a sub-cycle has been found --> add cut
- otherwise, the solution is optimal
Parameters:
- V: set/list of nodes in the graph
- c[i,j]: cost for traversing edge (i,j)
Returns the optimum objective value and the list of edges used.
"""
def addcut(X):
for sink in V[1:]:
mflow = maxflow(V,X,V[0],sink)
mflow.optimize()
f,cons = mflow.data
if mflow.ObjVal < 2-EPS: # no flow to sink, can add cut
break
else:
return False
#add a cut/constraint
CutA = set([V[0]])
for i in cons:
if cons[i].Pi <= -1+EPS:
CutA.add(i)
CutB = set(V) - CutA
main.addCons(
quicksum(x[i,j] for i in CutA for j in CutB if j>i) + \
quicksum(x[j,i] for i in CutA for j in CutB if j<i) >= 2)
print("mflow:",mflow.getObjVal(),"cut:",CutA,"+",CutB,">= 2")
print("mflow:",mflow.getObjVal(),"cut:",[(i,j) for i in CutA for j in CutB if j>i],"+",[(j,i) for i in CutA for j in CutB if j<i],">= 2")
return True
def isMIP(x):
for var in x:
if var.vtype == "CONTINUOUS":
return False
return True
# main part of the solution process:
main = Model("tsp")
x = {}
for i in V:
for j in V:
if j > i:
x[i,j] = main.addVar(ub=1, vtype="C", name="x(%s,%s)"%(i,j))
for i in V:
main.addCons(quicksum(x[j,i] for j in V if j < i) + \
quicksum(x[i,j] for j in V if j > i) == 2, "Degree(%s)"%i)
main.setObjective(quicksum(c[i,j]*x[i,j] for i in V for j in V if j > i), "minimize")
while True:
main.optimize()
z = main.getObjVal()
X = {}
for (i,j) in x:
if main.getVal(x[i,j]) > EPS:
X[i,j] = main.getVal(x[i,j])
if addcut(X) == False: # i.e., components are connected
if isMIP(): # integer variables, components connected: solution found
break
for (i,j) in x: # all components connected, switch to integer model
main.chgVarType(x[i,j], "BINARY")
# process solution
edges = []
for (i,j) in x:
if main.getVal(x[i,j]) > EPS:
edges.append((i,j))
return main.getObjVal(),edges | def function[solve_tsp, parameter[V, c]]:
constant[solve_tsp -- solve the traveling salesman problem
- start with assignment model
- check flow from a source to every other node;
- if no flow, a sub-cycle has been found --> add cut
- otherwise, the solution is optimal
Parameters:
- V: set/list of nodes in the graph
- c[i,j]: cost for traversing edge (i,j)
Returns the optimum objective value and the list of edges used.
]
def function[addcut, parameter[X]]:
for taget[name[sink]] in starred[call[name[V]][<ast.Slice object at 0x7da1b17f7d00>]] begin[:]
variable[mflow] assign[=] call[name[maxflow], parameter[name[V], name[X], call[name[V]][constant[0]], name[sink]]]
call[name[mflow].optimize, parameter[]]
<ast.Tuple object at 0x7da1b17f79a0> assign[=] name[mflow].data
if compare[name[mflow].ObjVal less[<] binary_operation[constant[2] - name[EPS]]] begin[:]
break
variable[CutA] assign[=] call[name[set], parameter[list[[<ast.Subscript object at 0x7da1b17f7580>]]]]
for taget[name[i]] in starred[name[cons]] begin[:]
if compare[call[name[cons]][name[i]].Pi less_or_equal[<=] binary_operation[<ast.UnaryOp object at 0x7da1b17f7310> + name[EPS]]] begin[:]
call[name[CutA].add, parameter[name[i]]]
variable[CutB] assign[=] binary_operation[call[name[set], parameter[name[V]]] - name[CutA]]
call[name[main].addCons, parameter[compare[binary_operation[call[name[quicksum], parameter[<ast.GeneratorExp object at 0x7da1b17f6e90>]] + call[name[quicksum], parameter[<ast.GeneratorExp object at 0x7da1b17f6b00>]]] greater_or_equal[>=] constant[2]]]]
call[name[print], parameter[constant[mflow:], call[name[mflow].getObjVal, parameter[]], constant[cut:], name[CutA], constant[+], name[CutB], constant[>= 2]]]
call[name[print], parameter[constant[mflow:], call[name[mflow].getObjVal, parameter[]], constant[cut:], <ast.ListComp object at 0x7da1b17f6380>, constant[+], <ast.ListComp object at 0x7da1b17f6080>, constant[>= 2]]]
return[constant[True]]
def function[isMIP, parameter[x]]:
for taget[name[var]] in starred[name[x]] begin[:]
if compare[name[var].vtype equal[==] constant[CONTINUOUS]] begin[:]
return[constant[False]]
return[constant[True]]
variable[main] assign[=] call[name[Model], parameter[constant[tsp]]]
variable[x] assign[=] dictionary[[], []]
for taget[name[i]] in starred[name[V]] begin[:]
for taget[name[j]] in starred[name[V]] begin[:]
if compare[name[j] greater[>] name[i]] begin[:]
call[name[x]][tuple[[<ast.Name object at 0x7da1b17f5570>, <ast.Name object at 0x7da1b17f5540>]]] assign[=] call[name[main].addVar, parameter[]]
for taget[name[i]] in starred[name[V]] begin[:]
call[name[main].addCons, parameter[compare[binary_operation[call[name[quicksum], parameter[<ast.GeneratorExp object at 0x7da1b17f5090>]] + call[name[quicksum], parameter[<ast.GeneratorExp object at 0x7da1b17f4dc0>]]] equal[==] constant[2]], binary_operation[constant[Degree(%s)] <ast.Mod object at 0x7da2590d6920> name[i]]]]
call[name[main].setObjective, parameter[call[name[quicksum], parameter[<ast.GeneratorExp object at 0x7da1b17dfc40>]], constant[minimize]]]
while constant[True] begin[:]
call[name[main].optimize, parameter[]]
variable[z] assign[=] call[name[main].getObjVal, parameter[]]
variable[X] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b17df430>, <ast.Name object at 0x7da1b17df400>]]] in starred[name[x]] begin[:]
if compare[call[name[main].getVal, parameter[call[name[x]][tuple[[<ast.Name object at 0x7da1b17df220>, <ast.Name object at 0x7da1b17df1f0>]]]]] greater[>] name[EPS]] begin[:]
call[name[X]][tuple[[<ast.Name object at 0x7da1b17df0d0>, <ast.Name object at 0x7da1b17df0a0>]]] assign[=] call[name[main].getVal, parameter[call[name[x]][tuple[[<ast.Name object at 0x7da1b17def50>, <ast.Name object at 0x7da1b17def20>]]]]]
if compare[call[name[addcut], parameter[name[X]]] equal[==] constant[False]] begin[:]
if call[name[isMIP], parameter[]] begin[:]
break
for taget[tuple[[<ast.Name object at 0x7da1b17dec80>, <ast.Name object at 0x7da1b17dec50>]]] in starred[name[x]] begin[:]
call[name[main].chgVarType, parameter[call[name[x]][tuple[[<ast.Name object at 0x7da1b17deaa0>, <ast.Name object at 0x7da1b17dea70>]]], constant[BINARY]]]
variable[edges] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b17de8f0>, <ast.Name object at 0x7da1b17de8c0>]]] in starred[name[x]] begin[:]
if compare[call[name[main].getVal, parameter[call[name[x]][tuple[[<ast.Name object at 0x7da1b17de6e0>, <ast.Name object at 0x7da1b17de6b0>]]]]] greater[>] name[EPS]] begin[:]
call[name[edges].append, parameter[tuple[[<ast.Name object at 0x7da1b17de560>, <ast.Name object at 0x7da1b17de530>]]]]
return[tuple[[<ast.Call object at 0x7da1b17de470>, <ast.Name object at 0x7da1b17de3e0>]]] | keyword[def] identifier[solve_tsp] ( identifier[V] , identifier[c] ):
literal[string]
keyword[def] identifier[addcut] ( identifier[X] ):
keyword[for] identifier[sink] keyword[in] identifier[V] [ literal[int] :]:
identifier[mflow] = identifier[maxflow] ( identifier[V] , identifier[X] , identifier[V] [ literal[int] ], identifier[sink] )
identifier[mflow] . identifier[optimize] ()
identifier[f] , identifier[cons] = identifier[mflow] . identifier[data]
keyword[if] identifier[mflow] . identifier[ObjVal] < literal[int] - identifier[EPS] :
keyword[break]
keyword[else] :
keyword[return] keyword[False]
identifier[CutA] = identifier[set] ([ identifier[V] [ literal[int] ]])
keyword[for] identifier[i] keyword[in] identifier[cons] :
keyword[if] identifier[cons] [ identifier[i] ]. identifier[Pi] <=- literal[int] + identifier[EPS] :
identifier[CutA] . identifier[add] ( identifier[i] )
identifier[CutB] = identifier[set] ( identifier[V] )- identifier[CutA]
identifier[main] . identifier[addCons] (
identifier[quicksum] ( identifier[x] [ identifier[i] , identifier[j] ] keyword[for] identifier[i] keyword[in] identifier[CutA] keyword[for] identifier[j] keyword[in] identifier[CutB] keyword[if] identifier[j] > identifier[i] )+ identifier[quicksum] ( identifier[x] [ identifier[j] , identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[CutA] keyword[for] identifier[j] keyword[in] identifier[CutB] keyword[if] identifier[j] < identifier[i] )>= literal[int] )
identifier[print] ( literal[string] , identifier[mflow] . identifier[getObjVal] (), literal[string] , identifier[CutA] , literal[string] , identifier[CutB] , literal[string] )
identifier[print] ( literal[string] , identifier[mflow] . identifier[getObjVal] (), literal[string] ,[( identifier[i] , identifier[j] ) keyword[for] identifier[i] keyword[in] identifier[CutA] keyword[for] identifier[j] keyword[in] identifier[CutB] keyword[if] identifier[j] > identifier[i] ], literal[string] ,[( identifier[j] , identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[CutA] keyword[for] identifier[j] keyword[in] identifier[CutB] keyword[if] identifier[j] < identifier[i] ], literal[string] )
keyword[return] keyword[True]
keyword[def] identifier[isMIP] ( identifier[x] ):
keyword[for] identifier[var] keyword[in] identifier[x] :
keyword[if] identifier[var] . identifier[vtype] == literal[string] :
keyword[return] keyword[False]
keyword[return] keyword[True]
identifier[main] = identifier[Model] ( literal[string] )
identifier[x] ={}
keyword[for] identifier[i] keyword[in] identifier[V] :
keyword[for] identifier[j] keyword[in] identifier[V] :
keyword[if] identifier[j] > identifier[i] :
identifier[x] [ identifier[i] , identifier[j] ]= identifier[main] . identifier[addVar] ( identifier[ub] = literal[int] , identifier[vtype] = literal[string] , identifier[name] = literal[string] %( identifier[i] , identifier[j] ))
keyword[for] identifier[i] keyword[in] identifier[V] :
identifier[main] . identifier[addCons] ( identifier[quicksum] ( identifier[x] [ identifier[j] , identifier[i] ] keyword[for] identifier[j] keyword[in] identifier[V] keyword[if] identifier[j] < identifier[i] )+ identifier[quicksum] ( identifier[x] [ identifier[i] , identifier[j] ] keyword[for] identifier[j] keyword[in] identifier[V] keyword[if] identifier[j] > identifier[i] )== literal[int] , literal[string] % identifier[i] )
identifier[main] . identifier[setObjective] ( identifier[quicksum] ( identifier[c] [ identifier[i] , identifier[j] ]* identifier[x] [ identifier[i] , identifier[j] ] keyword[for] identifier[i] keyword[in] identifier[V] keyword[for] identifier[j] keyword[in] identifier[V] keyword[if] identifier[j] > identifier[i] ), literal[string] )
keyword[while] keyword[True] :
identifier[main] . identifier[optimize] ()
identifier[z] = identifier[main] . identifier[getObjVal] ()
identifier[X] ={}
keyword[for] ( identifier[i] , identifier[j] ) keyword[in] identifier[x] :
keyword[if] identifier[main] . identifier[getVal] ( identifier[x] [ identifier[i] , identifier[j] ])> identifier[EPS] :
identifier[X] [ identifier[i] , identifier[j] ]= identifier[main] . identifier[getVal] ( identifier[x] [ identifier[i] , identifier[j] ])
keyword[if] identifier[addcut] ( identifier[X] )== keyword[False] :
keyword[if] identifier[isMIP] ():
keyword[break]
keyword[for] ( identifier[i] , identifier[j] ) keyword[in] identifier[x] :
identifier[main] . identifier[chgVarType] ( identifier[x] [ identifier[i] , identifier[j] ], literal[string] )
identifier[edges] =[]
keyword[for] ( identifier[i] , identifier[j] ) keyword[in] identifier[x] :
keyword[if] identifier[main] . identifier[getVal] ( identifier[x] [ identifier[i] , identifier[j] ])> identifier[EPS] :
identifier[edges] . identifier[append] (( identifier[i] , identifier[j] ))
keyword[return] identifier[main] . identifier[getObjVal] (), identifier[edges] | def solve_tsp(V, c):
"""solve_tsp -- solve the traveling salesman problem
- start with assignment model
- check flow from a source to every other node;
- if no flow, a sub-cycle has been found --> add cut
- otherwise, the solution is optimal
Parameters:
- V: set/list of nodes in the graph
- c[i,j]: cost for traversing edge (i,j)
Returns the optimum objective value and the list of edges used.
"""
def addcut(X):
for sink in V[1:]:
mflow = maxflow(V, X, V[0], sink)
mflow.optimize()
(f, cons) = mflow.data
if mflow.ObjVal < 2 - EPS: # no flow to sink, can add cut
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sink']]
else:
return False
#add a cut/constraint
CutA = set([V[0]])
for i in cons:
if cons[i].Pi <= -1 + EPS:
CutA.add(i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
CutB = set(V) - CutA
main.addCons(quicksum((x[i, j] for i in CutA for j in CutB if j > i)) + quicksum((x[j, i] for i in CutA for j in CutB if j < i)) >= 2)
print('mflow:', mflow.getObjVal(), 'cut:', CutA, '+', CutB, '>= 2')
print('mflow:', mflow.getObjVal(), 'cut:', [(i, j) for i in CutA for j in CutB if j > i], '+', [(j, i) for i in CutA for j in CutB if j < i], '>= 2')
return True
def isMIP(x):
for var in x:
if var.vtype == 'CONTINUOUS':
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['var']]
return True
# main part of the solution process:
main = Model('tsp')
x = {}
for i in V:
for j in V:
if j > i:
x[i, j] = main.addVar(ub=1, vtype='C', name='x(%s,%s)' % (i, j)) # depends on [control=['if'], data=['j', 'i']] # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
for i in V:
main.addCons(quicksum((x[j, i] for j in V if j < i)) + quicksum((x[i, j] for j in V if j > i)) == 2, 'Degree(%s)' % i) # depends on [control=['for'], data=['i']]
main.setObjective(quicksum((c[i, j] * x[i, j] for i in V for j in V if j > i)), 'minimize')
while True:
main.optimize()
z = main.getObjVal()
X = {}
for (i, j) in x:
if main.getVal(x[i, j]) > EPS:
X[i, j] = main.getVal(x[i, j]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if addcut(X) == False: # i.e., components are connected
if isMIP(): # integer variables, components connected: solution found
break # depends on [control=['if'], data=[]]
for (i, j) in x: # all components connected, switch to integer model
main.chgVarType(x[i, j], 'BINARY') # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
# process solution
edges = []
for (i, j) in x:
if main.getVal(x[i, j]) > EPS:
edges.append((i, j)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return (main.getObjVal(), edges) |
def get_canonical_headers(cls, req, include=None):
"""
Generate the Canonical Headers section of the Canonical Request.
Return the Canonical Headers and the Signed Headers strs as a tuple
(canonical_headers, signed_headers).
req -- Requests PreparedRequest object
include -- List of headers to include in the canonical and signed
headers. It's primarily included to allow testing against
specific examples from Amazon. If omitted or None it
includes host, content-type and any header starting 'x-amz-'
except for x-amz-client context, which appears to break
mobile analytics auth if included. Except for the
x-amz-client-context exclusion these defaults are per the
AWS documentation.
"""
if include is None:
include = cls.default_include_headers
include = [x.lower() for x in include]
headers = req.headers.copy()
# Temporarily include the host header - AWS requires it to be included
# in the signed headers, but Requests doesn't include it in a
# PreparedRequest
if 'host' not in headers:
headers['host'] = urlparse(req.url).netloc.split(':')[0]
# Aggregate for upper/lowercase header name collisions in header names,
# AMZ requires values of colliding headers be concatenated into a
# single header with lowercase name. Although this is not possible with
# Requests, since it uses a case-insensitive dict to hold headers, this
# is here just in case you duck type with a regular dict
cano_headers_dict = {}
for hdr, val in headers.items():
hdr = hdr.strip().lower()
val = cls.amz_norm_whitespace(val).strip()
if (hdr in include or '*' in include or
('x-amz-*' in include and hdr.startswith('x-amz-') and not
hdr == 'x-amz-client-context')):
vals = cano_headers_dict.setdefault(hdr, [])
vals.append(val)
# Flatten cano_headers dict to string and generate signed_headers
cano_headers = ''
signed_headers_list = []
for hdr in sorted(cano_headers_dict):
vals = cano_headers_dict[hdr]
val = ','.join(sorted(vals))
cano_headers += '{}:{}\n'.format(hdr, val)
signed_headers_list.append(hdr)
signed_headers = ';'.join(signed_headers_list)
return (cano_headers, signed_headers) | def function[get_canonical_headers, parameter[cls, req, include]]:
constant[
Generate the Canonical Headers section of the Canonical Request.
Return the Canonical Headers and the Signed Headers strs as a tuple
(canonical_headers, signed_headers).
req -- Requests PreparedRequest object
include -- List of headers to include in the canonical and signed
headers. It's primarily included to allow testing against
specific examples from Amazon. If omitted or None it
includes host, content-type and any header starting 'x-amz-'
except for x-amz-client context, which appears to break
mobile analytics auth if included. Except for the
x-amz-client-context exclusion these defaults are per the
AWS documentation.
]
if compare[name[include] is constant[None]] begin[:]
variable[include] assign[=] name[cls].default_include_headers
variable[include] assign[=] <ast.ListComp object at 0x7da1affc1a50>
variable[headers] assign[=] call[name[req].headers.copy, parameter[]]
if compare[constant[host] <ast.NotIn object at 0x7da2590d7190> name[headers]] begin[:]
call[name[headers]][constant[host]] assign[=] call[call[call[name[urlparse], parameter[name[req].url]].netloc.split, parameter[constant[:]]]][constant[0]]
variable[cano_headers_dict] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1affc2fe0>, <ast.Name object at 0x7da1affc1690>]]] in starred[call[name[headers].items, parameter[]]] begin[:]
variable[hdr] assign[=] call[call[name[hdr].strip, parameter[]].lower, parameter[]]
variable[val] assign[=] call[call[name[cls].amz_norm_whitespace, parameter[name[val]]].strip, parameter[]]
if <ast.BoolOp object at 0x7da1b00f8c70> begin[:]
variable[vals] assign[=] call[name[cano_headers_dict].setdefault, parameter[name[hdr], list[[]]]]
call[name[vals].append, parameter[name[val]]]
variable[cano_headers] assign[=] constant[]
variable[signed_headers_list] assign[=] list[[]]
for taget[name[hdr]] in starred[call[name[sorted], parameter[name[cano_headers_dict]]]] begin[:]
variable[vals] assign[=] call[name[cano_headers_dict]][name[hdr]]
variable[val] assign[=] call[constant[,].join, parameter[call[name[sorted], parameter[name[vals]]]]]
<ast.AugAssign object at 0x7da1b00fa4d0>
call[name[signed_headers_list].append, parameter[name[hdr]]]
variable[signed_headers] assign[=] call[constant[;].join, parameter[name[signed_headers_list]]]
return[tuple[[<ast.Name object at 0x7da1b00fbd30>, <ast.Name object at 0x7da1b00fb820>]]] | keyword[def] identifier[get_canonical_headers] ( identifier[cls] , identifier[req] , identifier[include] = keyword[None] ):
literal[string]
keyword[if] identifier[include] keyword[is] keyword[None] :
identifier[include] = identifier[cls] . identifier[default_include_headers]
identifier[include] =[ identifier[x] . identifier[lower] () keyword[for] identifier[x] keyword[in] identifier[include] ]
identifier[headers] = identifier[req] . identifier[headers] . identifier[copy] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[headers] :
identifier[headers] [ literal[string] ]= identifier[urlparse] ( identifier[req] . identifier[url] ). identifier[netloc] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[cano_headers_dict] ={}
keyword[for] identifier[hdr] , identifier[val] keyword[in] identifier[headers] . identifier[items] ():
identifier[hdr] = identifier[hdr] . identifier[strip] (). identifier[lower] ()
identifier[val] = identifier[cls] . identifier[amz_norm_whitespace] ( identifier[val] ). identifier[strip] ()
keyword[if] ( identifier[hdr] keyword[in] identifier[include] keyword[or] literal[string] keyword[in] identifier[include] keyword[or]
( literal[string] keyword[in] identifier[include] keyword[and] identifier[hdr] . identifier[startswith] ( literal[string] ) keyword[and] keyword[not]
identifier[hdr] == literal[string] )):
identifier[vals] = identifier[cano_headers_dict] . identifier[setdefault] ( identifier[hdr] ,[])
identifier[vals] . identifier[append] ( identifier[val] )
identifier[cano_headers] = literal[string]
identifier[signed_headers_list] =[]
keyword[for] identifier[hdr] keyword[in] identifier[sorted] ( identifier[cano_headers_dict] ):
identifier[vals] = identifier[cano_headers_dict] [ identifier[hdr] ]
identifier[val] = literal[string] . identifier[join] ( identifier[sorted] ( identifier[vals] ))
identifier[cano_headers] += literal[string] . identifier[format] ( identifier[hdr] , identifier[val] )
identifier[signed_headers_list] . identifier[append] ( identifier[hdr] )
identifier[signed_headers] = literal[string] . identifier[join] ( identifier[signed_headers_list] )
keyword[return] ( identifier[cano_headers] , identifier[signed_headers] ) | def get_canonical_headers(cls, req, include=None):
"""
Generate the Canonical Headers section of the Canonical Request.
Return the Canonical Headers and the Signed Headers strs as a tuple
(canonical_headers, signed_headers).
req -- Requests PreparedRequest object
include -- List of headers to include in the canonical and signed
headers. It's primarily included to allow testing against
specific examples from Amazon. If omitted or None it
includes host, content-type and any header starting 'x-amz-'
except for x-amz-client context, which appears to break
mobile analytics auth if included. Except for the
x-amz-client-context exclusion these defaults are per the
AWS documentation.
"""
if include is None:
include = cls.default_include_headers # depends on [control=['if'], data=['include']]
include = [x.lower() for x in include]
headers = req.headers.copy()
# Temporarily include the host header - AWS requires it to be included
# in the signed headers, but Requests doesn't include it in a
# PreparedRequest
if 'host' not in headers:
headers['host'] = urlparse(req.url).netloc.split(':')[0] # depends on [control=['if'], data=['headers']]
# Aggregate for upper/lowercase header name collisions in header names,
# AMZ requires values of colliding headers be concatenated into a
# single header with lowercase name. Although this is not possible with
# Requests, since it uses a case-insensitive dict to hold headers, this
# is here just in case you duck type with a regular dict
cano_headers_dict = {}
for (hdr, val) in headers.items():
hdr = hdr.strip().lower()
val = cls.amz_norm_whitespace(val).strip()
if hdr in include or '*' in include or ('x-amz-*' in include and hdr.startswith('x-amz-') and (not hdr == 'x-amz-client-context')):
vals = cano_headers_dict.setdefault(hdr, [])
vals.append(val) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# Flatten cano_headers dict to string and generate signed_headers
cano_headers = ''
signed_headers_list = []
for hdr in sorted(cano_headers_dict):
vals = cano_headers_dict[hdr]
val = ','.join(sorted(vals))
cano_headers += '{}:{}\n'.format(hdr, val)
signed_headers_list.append(hdr) # depends on [control=['for'], data=['hdr']]
signed_headers = ';'.join(signed_headers_list)
return (cano_headers, signed_headers) |
def autoconfig_url_from_registry():
"""
Get the PAC ``AutoConfigURL`` value from the Windows Registry.
This setting is visible as the "use automatic configuration script" field in
Internet Options > Connection > LAN Settings.
:return: The value from the registry, or None if the value isn't configured or available.
Note that it may be local filesystem path instead of a URL.
:rtype: str|None
:raises NotWindowsError: If called on a non-Windows platform.
"""
if not ON_WINDOWS:
raise NotWindowsError()
try:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER,
'Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings') as key:
return winreg.QueryValueEx(key, 'AutoConfigURL')[0]
except WindowsError:
return | def function[autoconfig_url_from_registry, parameter[]]:
constant[
Get the PAC ``AutoConfigURL`` value from the Windows Registry.
This setting is visible as the "use automatic configuration script" field in
Internet Options > Connection > LAN Settings.
:return: The value from the registry, or None if the value isn't configured or available.
Note that it may be local filesystem path instead of a URL.
:rtype: str|None
:raises NotWindowsError: If called on a non-Windows platform.
]
if <ast.UnaryOp object at 0x7da20c7951e0> begin[:]
<ast.Raise object at 0x7da20c795390>
<ast.Try object at 0x7da20c795600> | keyword[def] identifier[autoconfig_url_from_registry] ():
literal[string]
keyword[if] keyword[not] identifier[ON_WINDOWS] :
keyword[raise] identifier[NotWindowsError] ()
keyword[try] :
keyword[with] identifier[winreg] . identifier[OpenKey] ( identifier[winreg] . identifier[HKEY_CURRENT_USER] ,
literal[string] ) keyword[as] identifier[key] :
keyword[return] identifier[winreg] . identifier[QueryValueEx] ( identifier[key] , literal[string] )[ literal[int] ]
keyword[except] identifier[WindowsError] :
keyword[return] | def autoconfig_url_from_registry():
"""
Get the PAC ``AutoConfigURL`` value from the Windows Registry.
This setting is visible as the "use automatic configuration script" field in
Internet Options > Connection > LAN Settings.
:return: The value from the registry, or None if the value isn't configured or available.
Note that it may be local filesystem path instead of a URL.
:rtype: str|None
:raises NotWindowsError: If called on a non-Windows platform.
"""
if not ON_WINDOWS:
raise NotWindowsError() # depends on [control=['if'], data=[]]
try:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings') as key:
return winreg.QueryValueEx(key, 'AutoConfigURL')[0] # depends on [control=['with'], data=['key']] # depends on [control=['try'], data=[]]
except WindowsError:
return # depends on [control=['except'], data=[]] |
def get_author_string(self, links=False):
saved_args = locals()
saved_args = saved_args['links']
"""Returns list of authors as a comma-separated
string (with 'and' before last author)."""
def format_author(author):
if links and author.person.slug:
return '<a href="/authors/%s/">%s</a>' % (author.person.slug, author.person.full_name)
return author.person.full_name
if links == True or links == False:
authors = map(format_author, self.authors.all())
else:
authors = map(format_author, saved_args)
if not authors:
return ""
elif len(authors) == 1:
# If this is the only author, just return author name
return authors[0]
return ", ".join(authors[0:-1]) + " and " + authors[-1] | def function[get_author_string, parameter[self, links]]:
variable[saved_args] assign[=] call[name[locals], parameter[]]
variable[saved_args] assign[=] call[name[saved_args]][constant[links]]
constant[Returns list of authors as a comma-separated
string (with 'and' before last author).]
def function[format_author, parameter[author]]:
if <ast.BoolOp object at 0x7da18c4ccfd0> begin[:]
return[binary_operation[constant[<a href="/authors/%s/">%s</a>] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18c4cdf00>, <ast.Attribute object at 0x7da18c4cfac0>]]]]
return[name[author].person.full_name]
if <ast.BoolOp object at 0x7da18c4cf520> begin[:]
variable[authors] assign[=] call[name[map], parameter[name[format_author], call[name[self].authors.all, parameter[]]]]
if <ast.UnaryOp object at 0x7da18c4cef50> begin[:]
return[constant[]]
return[binary_operation[binary_operation[call[constant[, ].join, parameter[call[name[authors]][<ast.Slice object at 0x7da18c4ce3e0>]]] + constant[ and ]] + call[name[authors]][<ast.UnaryOp object at 0x7da18c4cd6f0>]]] | keyword[def] identifier[get_author_string] ( identifier[self] , identifier[links] = keyword[False] ):
identifier[saved_args] = identifier[locals] ()
identifier[saved_args] = identifier[saved_args] [ literal[string] ]
literal[string]
keyword[def] identifier[format_author] ( identifier[author] ):
keyword[if] identifier[links] keyword[and] identifier[author] . identifier[person] . identifier[slug] :
keyword[return] literal[string] %( identifier[author] . identifier[person] . identifier[slug] , identifier[author] . identifier[person] . identifier[full_name] )
keyword[return] identifier[author] . identifier[person] . identifier[full_name]
keyword[if] identifier[links] == keyword[True] keyword[or] identifier[links] == keyword[False] :
identifier[authors] = identifier[map] ( identifier[format_author] , identifier[self] . identifier[authors] . identifier[all] ())
keyword[else] :
identifier[authors] = identifier[map] ( identifier[format_author] , identifier[saved_args] )
keyword[if] keyword[not] identifier[authors] :
keyword[return] literal[string]
keyword[elif] identifier[len] ( identifier[authors] )== literal[int] :
keyword[return] identifier[authors] [ literal[int] ]
keyword[return] literal[string] . identifier[join] ( identifier[authors] [ literal[int] :- literal[int] ])+ literal[string] + identifier[authors] [- literal[int] ] | def get_author_string(self, links=False):
saved_args = locals()
saved_args = saved_args['links']
"Returns list of authors as a comma-separated\n string (with 'and' before last author)."
def format_author(author):
if links and author.person.slug:
return '<a href="/authors/%s/">%s</a>' % (author.person.slug, author.person.full_name) # depends on [control=['if'], data=[]]
return author.person.full_name
if links == True or links == False:
authors = map(format_author, self.authors.all()) # depends on [control=['if'], data=[]]
else:
authors = map(format_author, saved_args)
if not authors:
return '' # depends on [control=['if'], data=[]]
elif len(authors) == 1:
# If this is the only author, just return author name
return authors[0] # depends on [control=['if'], data=[]]
return ', '.join(authors[0:-1]) + ' and ' + authors[-1] |
def in_array(self, event_property, value):
"""An in-array filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.in_array('path', '/event')
>>> print(filtered)
request(elapsed_ms).in(path, ["/", "e", "v", "e", "n", "t"])
>>> filtered = request_time.in_array('path', ['/event', '/'])
>>> print(filtered)
request(elapsed_ms).in(path, ["/event", "/"])
"""
c = self.copy()
c.filters.append(filters.IN(event_property, value))
return c | def function[in_array, parameter[self, event_property, value]]:
constant[An in-array filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.in_array('path', '/event')
>>> print(filtered)
request(elapsed_ms).in(path, ["/", "e", "v", "e", "n", "t"])
>>> filtered = request_time.in_array('path', ['/event', '/'])
>>> print(filtered)
request(elapsed_ms).in(path, ["/event", "/"])
]
variable[c] assign[=] call[name[self].copy, parameter[]]
call[name[c].filters.append, parameter[call[name[filters].IN, parameter[name[event_property], name[value]]]]]
return[name[c]] | keyword[def] identifier[in_array] ( identifier[self] , identifier[event_property] , identifier[value] ):
literal[string]
identifier[c] = identifier[self] . identifier[copy] ()
identifier[c] . identifier[filters] . identifier[append] ( identifier[filters] . identifier[IN] ( identifier[event_property] , identifier[value] ))
keyword[return] identifier[c] | def in_array(self, event_property, value):
"""An in-array filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.in_array('path', '/event')
>>> print(filtered)
request(elapsed_ms).in(path, ["/", "e", "v", "e", "n", "t"])
>>> filtered = request_time.in_array('path', ['/event', '/'])
>>> print(filtered)
request(elapsed_ms).in(path, ["/event", "/"])
"""
c = self.copy()
c.filters.append(filters.IN(event_property, value))
return c |
def _select_phase_left_bound(self, epoch_number):
"""
Return number of current phase.
Return index of first phase not done after all up to epoch_number were done.
"""
idx = bisect.bisect_left(self.ladder, epoch_number)
if idx >= len(self.ladder):
return len(self.ladder) - 1
elif self.ladder[idx] > epoch_number:
return idx - 1
else:
return idx | def function[_select_phase_left_bound, parameter[self, epoch_number]]:
constant[
Return number of current phase.
Return index of first phase not done after all up to epoch_number were done.
]
variable[idx] assign[=] call[name[bisect].bisect_left, parameter[name[self].ladder, name[epoch_number]]]
if compare[name[idx] greater_or_equal[>=] call[name[len], parameter[name[self].ladder]]] begin[:]
return[binary_operation[call[name[len], parameter[name[self].ladder]] - constant[1]]] | keyword[def] identifier[_select_phase_left_bound] ( identifier[self] , identifier[epoch_number] ):
literal[string]
identifier[idx] = identifier[bisect] . identifier[bisect_left] ( identifier[self] . identifier[ladder] , identifier[epoch_number] )
keyword[if] identifier[idx] >= identifier[len] ( identifier[self] . identifier[ladder] ):
keyword[return] identifier[len] ( identifier[self] . identifier[ladder] )- literal[int]
keyword[elif] identifier[self] . identifier[ladder] [ identifier[idx] ]> identifier[epoch_number] :
keyword[return] identifier[idx] - literal[int]
keyword[else] :
keyword[return] identifier[idx] | def _select_phase_left_bound(self, epoch_number):
"""
Return number of current phase.
Return index of first phase not done after all up to epoch_number were done.
"""
idx = bisect.bisect_left(self.ladder, epoch_number)
if idx >= len(self.ladder):
return len(self.ladder) - 1 # depends on [control=['if'], data=[]]
elif self.ladder[idx] > epoch_number:
return idx - 1 # depends on [control=['if'], data=[]]
else:
return idx |
def start(cls, name):
"""
starts a timer with the given name.
:param name: the name of the timer
:type name: string
"""
if cls.debug:
print("Timer", name, "started ...")
cls.timer_start[name] = time.time() | def function[start, parameter[cls, name]]:
constant[
starts a timer with the given name.
:param name: the name of the timer
:type name: string
]
if name[cls].debug begin[:]
call[name[print], parameter[constant[Timer], name[name], constant[started ...]]]
call[name[cls].timer_start][name[name]] assign[=] call[name[time].time, parameter[]] | keyword[def] identifier[start] ( identifier[cls] , identifier[name] ):
literal[string]
keyword[if] identifier[cls] . identifier[debug] :
identifier[print] ( literal[string] , identifier[name] , literal[string] )
identifier[cls] . identifier[timer_start] [ identifier[name] ]= identifier[time] . identifier[time] () | def start(cls, name):
"""
starts a timer with the given name.
:param name: the name of the timer
:type name: string
"""
if cls.debug:
print('Timer', name, 'started ...') # depends on [control=['if'], data=[]]
cls.timer_start[name] = time.time() |
def meanOmega(self,dangle,oned=False,tdisrupt=None,approx=True,
higherorder=None):
"""
NAME:
meanOmega
PURPOSE:
calculate the mean frequency as a function of angle, assuming a uniform time distribution up to a maximum time
INPUT:
dangle - angle offset
oned= (False) if True, return the 1D offset from the progenitor (along the direction of disruption)
approx= (True) if True, compute the mean Omega by direct integration of the spline representation
higherorder= (object-wide default higherorderTrack) if True, include higher-order spline terms in the approximate computation
OUTPUT:
mean Omega
HISTORY:
2015-11-17 - Written - Bovy (UofT)
"""
if higherorder is None: higherorder= self._higherorderTrack
if tdisrupt is None: tdisrupt= self._tdisrupt
if approx:
num= self._meanOmega_num_approx(dangle,tdisrupt,
higherorder=higherorder)
else:
num=\
integrate.quad(lambda T: (T/(1-T*T)\
*numpy.sqrt(self._sortedSigOEig[2])\
+self._meandO)\
*numpy.sqrt(self._sortedSigOEig[2])\
*(1+T*T)/(1-T*T)**2.\
*self.pOparapar(T/(1-T*T)\
*numpy.sqrt(self._sortedSigOEig[2])\
+self._meandO,dangle),
-1.,1.)[0]
denom= self._density_par(dangle,tdisrupt=tdisrupt,approx=approx,
higherorder=higherorder)
dO1D= num/denom
if oned: return dO1D
else:
return self._progenitor_Omega+dO1D*self._dsigomeanProgDirection\
*self._sigMeanSign | def function[meanOmega, parameter[self, dangle, oned, tdisrupt, approx, higherorder]]:
constant[
NAME:
meanOmega
PURPOSE:
calculate the mean frequency as a function of angle, assuming a uniform time distribution up to a maximum time
INPUT:
dangle - angle offset
oned= (False) if True, return the 1D offset from the progenitor (along the direction of disruption)
approx= (True) if True, compute the mean Omega by direct integration of the spline representation
higherorder= (object-wide default higherorderTrack) if True, include higher-order spline terms in the approximate computation
OUTPUT:
mean Omega
HISTORY:
2015-11-17 - Written - Bovy (UofT)
]
if compare[name[higherorder] is constant[None]] begin[:]
variable[higherorder] assign[=] name[self]._higherorderTrack
if compare[name[tdisrupt] is constant[None]] begin[:]
variable[tdisrupt] assign[=] name[self]._tdisrupt
if name[approx] begin[:]
variable[num] assign[=] call[name[self]._meanOmega_num_approx, parameter[name[dangle], name[tdisrupt]]]
variable[denom] assign[=] call[name[self]._density_par, parameter[name[dangle]]]
variable[dO1D] assign[=] binary_operation[name[num] / name[denom]]
if name[oned] begin[:]
return[name[dO1D]] | keyword[def] identifier[meanOmega] ( identifier[self] , identifier[dangle] , identifier[oned] = keyword[False] , identifier[tdisrupt] = keyword[None] , identifier[approx] = keyword[True] ,
identifier[higherorder] = keyword[None] ):
literal[string]
keyword[if] identifier[higherorder] keyword[is] keyword[None] : identifier[higherorder] = identifier[self] . identifier[_higherorderTrack]
keyword[if] identifier[tdisrupt] keyword[is] keyword[None] : identifier[tdisrupt] = identifier[self] . identifier[_tdisrupt]
keyword[if] identifier[approx] :
identifier[num] = identifier[self] . identifier[_meanOmega_num_approx] ( identifier[dangle] , identifier[tdisrupt] ,
identifier[higherorder] = identifier[higherorder] )
keyword[else] :
identifier[num] = identifier[integrate] . identifier[quad] ( keyword[lambda] identifier[T] :( identifier[T] /( literal[int] - identifier[T] * identifier[T] )* identifier[numpy] . identifier[sqrt] ( identifier[self] . identifier[_sortedSigOEig] [ literal[int] ])+ identifier[self] . identifier[_meandO] )* identifier[numpy] . identifier[sqrt] ( identifier[self] . identifier[_sortedSigOEig] [ literal[int] ])*( literal[int] + identifier[T] * identifier[T] )/( literal[int] - identifier[T] * identifier[T] )** literal[int] * identifier[self] . identifier[pOparapar] ( identifier[T] /( literal[int] - identifier[T] * identifier[T] )* identifier[numpy] . identifier[sqrt] ( identifier[self] . identifier[_sortedSigOEig] [ literal[int] ])+ identifier[self] . identifier[_meandO] , identifier[dangle] ),
- literal[int] , literal[int] )[ literal[int] ]
identifier[denom] = identifier[self] . identifier[_density_par] ( identifier[dangle] , identifier[tdisrupt] = identifier[tdisrupt] , identifier[approx] = identifier[approx] ,
identifier[higherorder] = identifier[higherorder] )
identifier[dO1D] = identifier[num] / identifier[denom]
keyword[if] identifier[oned] : keyword[return] identifier[dO1D]
keyword[else] :
keyword[return] identifier[self] . identifier[_progenitor_Omega] + identifier[dO1D] * identifier[self] . identifier[_dsigomeanProgDirection] * identifier[self] . identifier[_sigMeanSign] | def meanOmega(self, dangle, oned=False, tdisrupt=None, approx=True, higherorder=None):
"""
NAME:
meanOmega
PURPOSE:
calculate the mean frequency as a function of angle, assuming a uniform time distribution up to a maximum time
INPUT:
dangle - angle offset
oned= (False) if True, return the 1D offset from the progenitor (along the direction of disruption)
approx= (True) if True, compute the mean Omega by direct integration of the spline representation
higherorder= (object-wide default higherorderTrack) if True, include higher-order spline terms in the approximate computation
OUTPUT:
mean Omega
HISTORY:
2015-11-17 - Written - Bovy (UofT)
"""
if higherorder is None:
higherorder = self._higherorderTrack # depends on [control=['if'], data=['higherorder']]
if tdisrupt is None:
tdisrupt = self._tdisrupt # depends on [control=['if'], data=['tdisrupt']]
if approx:
num = self._meanOmega_num_approx(dangle, tdisrupt, higherorder=higherorder) # depends on [control=['if'], data=[]]
else:
num = integrate.quad(lambda T: (T / (1 - T * T) * numpy.sqrt(self._sortedSigOEig[2]) + self._meandO) * numpy.sqrt(self._sortedSigOEig[2]) * (1 + T * T) / (1 - T * T) ** 2.0 * self.pOparapar(T / (1 - T * T) * numpy.sqrt(self._sortedSigOEig[2]) + self._meandO, dangle), -1.0, 1.0)[0]
denom = self._density_par(dangle, tdisrupt=tdisrupt, approx=approx, higherorder=higherorder)
dO1D = num / denom
if oned:
return dO1D # depends on [control=['if'], data=[]]
else:
return self._progenitor_Omega + dO1D * self._dsigomeanProgDirection * self._sigMeanSign |
def output(self, stream):
"""Outputs the results of :meth:`.run` to the given stream. The results
are presented similarly to HTTP headers, where each line has a key and
value, separated by ``: ``. The ``Status`` key will always be available
in the output.
:param stream: The output file to write to.
:returns: A return code that would be appropriate to return to the
operating system, e.g. zero means success, non-zero means
failure.
:rtype: int
"""
for key, val in self.results.items():
if isinstance(val, basestring):
print >> stream, '{0}: {1!s}'.format(key, val)
elif isinstance(val, float):
print >> stream, '{0}: {1:.5f}'.format(key, val)
elif val is None:
print >> stream, '{0}: '.format(key)
else:
print >> stream, '{0}: {1!s}'.format(key, val)
if self.results['Status'] == 'OK':
return 0
else:
return 1 | def function[output, parameter[self, stream]]:
constant[Outputs the results of :meth:`.run` to the given stream. The results
are presented similarly to HTTP headers, where each line has a key and
value, separated by ``: ``. The ``Status`` key will always be available
in the output.
:param stream: The output file to write to.
:returns: A return code that would be appropriate to return to the
operating system, e.g. zero means success, non-zero means
failure.
:rtype: int
]
for taget[tuple[[<ast.Name object at 0x7da2046203d0>, <ast.Name object at 0x7da204620f10>]]] in starred[call[name[self].results.items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[val], name[basestring]]] begin[:]
tuple[[<ast.BinOp object at 0x7da2046215d0>, <ast.Call object at 0x7da204621420>]]
if compare[call[name[self].results][constant[Status]] equal[==] constant[OK]] begin[:]
return[constant[0]] | keyword[def] identifier[output] ( identifier[self] , identifier[stream] ):
literal[string]
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[self] . identifier[results] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[val] , identifier[basestring] ):
identifier[print] >> identifier[stream] , literal[string] . identifier[format] ( identifier[key] , identifier[val] )
keyword[elif] identifier[isinstance] ( identifier[val] , identifier[float] ):
identifier[print] >> identifier[stream] , literal[string] . identifier[format] ( identifier[key] , identifier[val] )
keyword[elif] identifier[val] keyword[is] keyword[None] :
identifier[print] >> identifier[stream] , literal[string] . identifier[format] ( identifier[key] )
keyword[else] :
identifier[print] >> identifier[stream] , literal[string] . identifier[format] ( identifier[key] , identifier[val] )
keyword[if] identifier[self] . identifier[results] [ literal[string] ]== literal[string] :
keyword[return] literal[int]
keyword[else] :
keyword[return] literal[int] | def output(self, stream):
"""Outputs the results of :meth:`.run` to the given stream. The results
are presented similarly to HTTP headers, where each line has a key and
value, separated by ``: ``. The ``Status`` key will always be available
in the output.
:param stream: The output file to write to.
:returns: A return code that would be appropriate to return to the
operating system, e.g. zero means success, non-zero means
failure.
:rtype: int
"""
for (key, val) in self.results.items():
if isinstance(val, basestring):
(print >> stream, '{0}: {1!s}'.format(key, val)) # depends on [control=['if'], data=[]]
elif isinstance(val, float):
(print >> stream, '{0}: {1:.5f}'.format(key, val)) # depends on [control=['if'], data=[]]
elif val is None:
(print >> stream, '{0}: '.format(key)) # depends on [control=['if'], data=[]]
else:
(print >> stream, '{0}: {1!s}'.format(key, val)) # depends on [control=['for'], data=[]]
if self.results['Status'] == 'OK':
return 0 # depends on [control=['if'], data=[]]
else:
return 1 |
def print_stream(file, name):
"""Print stream from file to logger."""
logger = logging.getLogger('xenon.{}'.format(name))
for line in file:
logger.info('[{}] {}'.format(name, line.strip())) | def function[print_stream, parameter[file, name]]:
constant[Print stream from file to logger.]
variable[logger] assign[=] call[name[logging].getLogger, parameter[call[constant[xenon.{}].format, parameter[name[name]]]]]
for taget[name[line]] in starred[name[file]] begin[:]
call[name[logger].info, parameter[call[constant[[{}] {}].format, parameter[name[name], call[name[line].strip, parameter[]]]]]] | keyword[def] identifier[print_stream] ( identifier[file] , identifier[name] ):
literal[string]
identifier[logger] = identifier[logging] . identifier[getLogger] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[for] identifier[line] keyword[in] identifier[file] :
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] , identifier[line] . identifier[strip] ())) | def print_stream(file, name):
"""Print stream from file to logger."""
logger = logging.getLogger('xenon.{}'.format(name))
for line in file:
logger.info('[{}] {}'.format(name, line.strip())) # depends on [control=['for'], data=['line']] |
def newton_refine_solve(jac_both, x_val, surf_x, y_val, surf_y):
r"""Helper for :func:`newton_refine`.
We have a system:
.. code-block:: rest
[A C][ds] = [E]
[B D][dt] [F]
This is not a typo, ``A->B->C->D`` matches the data in ``jac_both``.
We solve directly rather than using a linear algebra utility:
.. code-block:: rest
ds = (D E - C F) / (A D - B C)
dt = (A F - B E) / (A D - B C)
Args:
jac_both (numpy.ndarray): A ``4 x 1`` matrix of entries in a Jacobian.
x_val (float): An ``x``-value we are trying to reach.
surf_x (float): The actual ``x``-value we are currently at.
y_val (float): An ``y``-value we are trying to reach.
surf_y (float): The actual ``x``-value we are currently at.
Returns:
Tuple[float, float]: The pair of values the solve the
linear system.
"""
a_val, b_val, c_val, d_val = jac_both[:, 0]
# and
e_val = x_val - surf_x
f_val = y_val - surf_y
# Now solve:
denom = a_val * d_val - b_val * c_val
delta_s = (d_val * e_val - c_val * f_val) / denom
delta_t = (a_val * f_val - b_val * e_val) / denom
return delta_s, delta_t | def function[newton_refine_solve, parameter[jac_both, x_val, surf_x, y_val, surf_y]]:
constant[Helper for :func:`newton_refine`.
We have a system:
.. code-block:: rest
[A C][ds] = [E]
[B D][dt] [F]
This is not a typo, ``A->B->C->D`` matches the data in ``jac_both``.
We solve directly rather than using a linear algebra utility:
.. code-block:: rest
ds = (D E - C F) / (A D - B C)
dt = (A F - B E) / (A D - B C)
Args:
jac_both (numpy.ndarray): A ``4 x 1`` matrix of entries in a Jacobian.
x_val (float): An ``x``-value we are trying to reach.
surf_x (float): The actual ``x``-value we are currently at.
y_val (float): An ``y``-value we are trying to reach.
surf_y (float): The actual ``x``-value we are currently at.
Returns:
Tuple[float, float]: The pair of values the solve the
linear system.
]
<ast.Tuple object at 0x7da20c6c6710> assign[=] call[name[jac_both]][tuple[[<ast.Slice object at 0x7da20c6c4760>, <ast.Constant object at 0x7da20c6c6bf0>]]]
variable[e_val] assign[=] binary_operation[name[x_val] - name[surf_x]]
variable[f_val] assign[=] binary_operation[name[y_val] - name[surf_y]]
variable[denom] assign[=] binary_operation[binary_operation[name[a_val] * name[d_val]] - binary_operation[name[b_val] * name[c_val]]]
variable[delta_s] assign[=] binary_operation[binary_operation[binary_operation[name[d_val] * name[e_val]] - binary_operation[name[c_val] * name[f_val]]] / name[denom]]
variable[delta_t] assign[=] binary_operation[binary_operation[binary_operation[name[a_val] * name[f_val]] - binary_operation[name[b_val] * name[e_val]]] / name[denom]]
return[tuple[[<ast.Name object at 0x7da20c6c6260>, <ast.Name object at 0x7da20c6c6230>]]] | keyword[def] identifier[newton_refine_solve] ( identifier[jac_both] , identifier[x_val] , identifier[surf_x] , identifier[y_val] , identifier[surf_y] ):
literal[string]
identifier[a_val] , identifier[b_val] , identifier[c_val] , identifier[d_val] = identifier[jac_both] [:, literal[int] ]
identifier[e_val] = identifier[x_val] - identifier[surf_x]
identifier[f_val] = identifier[y_val] - identifier[surf_y]
identifier[denom] = identifier[a_val] * identifier[d_val] - identifier[b_val] * identifier[c_val]
identifier[delta_s] =( identifier[d_val] * identifier[e_val] - identifier[c_val] * identifier[f_val] )/ identifier[denom]
identifier[delta_t] =( identifier[a_val] * identifier[f_val] - identifier[b_val] * identifier[e_val] )/ identifier[denom]
keyword[return] identifier[delta_s] , identifier[delta_t] | def newton_refine_solve(jac_both, x_val, surf_x, y_val, surf_y):
"""Helper for :func:`newton_refine`.
We have a system:
.. code-block:: rest
[A C][ds] = [E]
[B D][dt] [F]
This is not a typo, ``A->B->C->D`` matches the data in ``jac_both``.
We solve directly rather than using a linear algebra utility:
.. code-block:: rest
ds = (D E - C F) / (A D - B C)
dt = (A F - B E) / (A D - B C)
Args:
jac_both (numpy.ndarray): A ``4 x 1`` matrix of entries in a Jacobian.
x_val (float): An ``x``-value we are trying to reach.
surf_x (float): The actual ``x``-value we are currently at.
y_val (float): An ``y``-value we are trying to reach.
surf_y (float): The actual ``x``-value we are currently at.
Returns:
Tuple[float, float]: The pair of values the solve the
linear system.
"""
(a_val, b_val, c_val, d_val) = jac_both[:, 0]
# and
e_val = x_val - surf_x
f_val = y_val - surf_y
# Now solve:
denom = a_val * d_val - b_val * c_val
delta_s = (d_val * e_val - c_val * f_val) / denom
delta_t = (a_val * f_val - b_val * e_val) / denom
return (delta_s, delta_t) |
def get_nodedata(self, sort_names=False):
"""
get dc node data from solved power flow
"""
if not self.Node.n:
return
if not self.pflow.solved:
logger.error('Power flow not solved when getting bus data.')
return tuple([False] * 7)
idx = self.Node.idx
names = self.Node.name
V = [self.dae.y[x] for x in self.Node.v]
if sort_names:
ret = (list(x)
for x in zip(*sorted(zip(idx, names, V), key=itemgetter(0))))
else:
ret = idx, names, V
return ret | def function[get_nodedata, parameter[self, sort_names]]:
constant[
get dc node data from solved power flow
]
if <ast.UnaryOp object at 0x7da18bccb5b0> begin[:]
return[None]
if <ast.UnaryOp object at 0x7da18bcc8a90> begin[:]
call[name[logger].error, parameter[constant[Power flow not solved when getting bus data.]]]
return[call[name[tuple], parameter[binary_operation[list[[<ast.Constant object at 0x7da18bcc9cf0>]] * constant[7]]]]]
variable[idx] assign[=] name[self].Node.idx
variable[names] assign[=] name[self].Node.name
variable[V] assign[=] <ast.ListComp object at 0x7da18bcc8f10>
if name[sort_names] begin[:]
variable[ret] assign[=] <ast.GeneratorExp object at 0x7da18bccbfd0>
return[name[ret]] | keyword[def] identifier[get_nodedata] ( identifier[self] , identifier[sort_names] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[Node] . identifier[n] :
keyword[return]
keyword[if] keyword[not] identifier[self] . identifier[pflow] . identifier[solved] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[return] identifier[tuple] ([ keyword[False] ]* literal[int] )
identifier[idx] = identifier[self] . identifier[Node] . identifier[idx]
identifier[names] = identifier[self] . identifier[Node] . identifier[name]
identifier[V] =[ identifier[self] . identifier[dae] . identifier[y] [ identifier[x] ] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[Node] . identifier[v] ]
keyword[if] identifier[sort_names] :
identifier[ret] =( identifier[list] ( identifier[x] )
keyword[for] identifier[x] keyword[in] identifier[zip] (* identifier[sorted] ( identifier[zip] ( identifier[idx] , identifier[names] , identifier[V] ), identifier[key] = identifier[itemgetter] ( literal[int] ))))
keyword[else] :
identifier[ret] = identifier[idx] , identifier[names] , identifier[V]
keyword[return] identifier[ret] | def get_nodedata(self, sort_names=False):
"""
get dc node data from solved power flow
"""
if not self.Node.n:
return # depends on [control=['if'], data=[]]
if not self.pflow.solved:
logger.error('Power flow not solved when getting bus data.')
return tuple([False] * 7) # depends on [control=['if'], data=[]]
idx = self.Node.idx
names = self.Node.name
V = [self.dae.y[x] for x in self.Node.v]
if sort_names:
ret = (list(x) for x in zip(*sorted(zip(idx, names, V), key=itemgetter(0)))) # depends on [control=['if'], data=[]]
else:
ret = (idx, names, V)
return ret |
def _jws_payload(expire_at, requrl=None, **kwargs):
"""
Produce a base64-encoded JWS payload.
expire_at, if specified, must be a number that indicates
a timestamp after which the message must be rejected.
requrl, if specified, is used as the "audience" according
to the JWT spec.
Any other parameters are passed as is to the payload.
"""
data = {
'exp': expire_at,
'aud': requrl
}
data.update(kwargs)
datajson = json.dumps(data, sort_keys=True).encode('utf8')
return base64url_encode(datajson) | def function[_jws_payload, parameter[expire_at, requrl]]:
constant[
Produce a base64-encoded JWS payload.
expire_at, if specified, must be a number that indicates
a timestamp after which the message must be rejected.
requrl, if specified, is used as the "audience" according
to the JWT spec.
Any other parameters are passed as is to the payload.
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da18eb57df0>, <ast.Constant object at 0x7da18eb56fb0>], [<ast.Name object at 0x7da18eb54250>, <ast.Name object at 0x7da18eb54cd0>]]
call[name[data].update, parameter[name[kwargs]]]
variable[datajson] assign[=] call[call[name[json].dumps, parameter[name[data]]].encode, parameter[constant[utf8]]]
return[call[name[base64url_encode], parameter[name[datajson]]]] | keyword[def] identifier[_jws_payload] ( identifier[expire_at] , identifier[requrl] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[data] ={
literal[string] : identifier[expire_at] ,
literal[string] : identifier[requrl]
}
identifier[data] . identifier[update] ( identifier[kwargs] )
identifier[datajson] = identifier[json] . identifier[dumps] ( identifier[data] , identifier[sort_keys] = keyword[True] ). identifier[encode] ( literal[string] )
keyword[return] identifier[base64url_encode] ( identifier[datajson] ) | def _jws_payload(expire_at, requrl=None, **kwargs):
"""
Produce a base64-encoded JWS payload.
expire_at, if specified, must be a number that indicates
a timestamp after which the message must be rejected.
requrl, if specified, is used as the "audience" according
to the JWT spec.
Any other parameters are passed as is to the payload.
"""
data = {'exp': expire_at, 'aud': requrl}
data.update(kwargs)
datajson = json.dumps(data, sort_keys=True).encode('utf8')
return base64url_encode(datajson) |
def set_execution_state(self, execution_state):
""" set exectuion state """
if not execution_state:
self.execution_state = None
self.cluster = None
self.environ = None
else:
self.execution_state = execution_state
cluster, environ = self.get_execution_state_dc_environ(execution_state)
self.cluster = cluster
self.environ = environ
self.zone = cluster
self.trigger_watches() | def function[set_execution_state, parameter[self, execution_state]]:
constant[ set exectuion state ]
if <ast.UnaryOp object at 0x7da2054a5420> begin[:]
name[self].execution_state assign[=] constant[None]
name[self].cluster assign[=] constant[None]
name[self].environ assign[=] constant[None]
call[name[self].trigger_watches, parameter[]] | keyword[def] identifier[set_execution_state] ( identifier[self] , identifier[execution_state] ):
literal[string]
keyword[if] keyword[not] identifier[execution_state] :
identifier[self] . identifier[execution_state] = keyword[None]
identifier[self] . identifier[cluster] = keyword[None]
identifier[self] . identifier[environ] = keyword[None]
keyword[else] :
identifier[self] . identifier[execution_state] = identifier[execution_state]
identifier[cluster] , identifier[environ] = identifier[self] . identifier[get_execution_state_dc_environ] ( identifier[execution_state] )
identifier[self] . identifier[cluster] = identifier[cluster]
identifier[self] . identifier[environ] = identifier[environ]
identifier[self] . identifier[zone] = identifier[cluster]
identifier[self] . identifier[trigger_watches] () | def set_execution_state(self, execution_state):
""" set exectuion state """
if not execution_state:
self.execution_state = None
self.cluster = None
self.environ = None # depends on [control=['if'], data=[]]
else:
self.execution_state = execution_state
(cluster, environ) = self.get_execution_state_dc_environ(execution_state)
self.cluster = cluster
self.environ = environ
self.zone = cluster
self.trigger_watches() |
def _recv(self):
"""Take all available bytes from socket, return list of any responses from parser"""
recvd = []
self._lock.acquire()
if not self._can_send_recv():
log.warning('%s cannot recv: socket not connected', self)
self._lock.release()
return ()
while len(recvd) < self.config['sock_chunk_buffer_count']:
try:
data = self._sock.recv(self.config['sock_chunk_bytes'])
# We expect socket.recv to raise an exception if there are no
# bytes available to read from the socket in non-blocking mode.
# but if the socket is disconnected, we will get empty data
# without an exception raised
if not data:
log.error('%s: socket disconnected', self)
self._lock.release()
self.close(error=Errors.KafkaConnectionError('socket disconnected'))
return []
else:
recvd.append(data)
except SSLWantReadError:
break
except ConnectionError as e:
if six.PY2 and e.errno == errno.EWOULDBLOCK:
break
log.exception('%s: Error receiving network data'
' closing socket', self)
self._lock.release()
self.close(error=Errors.KafkaConnectionError(e))
return []
except BlockingIOError:
if six.PY3:
break
self._lock.release()
raise
recvd_data = b''.join(recvd)
if self._sensors:
self._sensors.bytes_received.record(len(recvd_data))
try:
responses = self._protocol.receive_bytes(recvd_data)
except Errors.KafkaProtocolError as e:
self._lock.release()
self.close(e)
return []
else:
self._lock.release()
return responses | def function[_recv, parameter[self]]:
constant[Take all available bytes from socket, return list of any responses from parser]
variable[recvd] assign[=] list[[]]
call[name[self]._lock.acquire, parameter[]]
if <ast.UnaryOp object at 0x7da1b21e3a00> begin[:]
call[name[log].warning, parameter[constant[%s cannot recv: socket not connected], name[self]]]
call[name[self]._lock.release, parameter[]]
return[tuple[[]]]
while compare[call[name[len], parameter[name[recvd]]] less[<] call[name[self].config][constant[sock_chunk_buffer_count]]] begin[:]
<ast.Try object at 0x7da1b21e0af0>
variable[recvd_data] assign[=] call[constant[b''].join, parameter[name[recvd]]]
if name[self]._sensors begin[:]
call[name[self]._sensors.bytes_received.record, parameter[call[name[len], parameter[name[recvd_data]]]]]
<ast.Try object at 0x7da1b1c281f0> | keyword[def] identifier[_recv] ( identifier[self] ):
literal[string]
identifier[recvd] =[]
identifier[self] . identifier[_lock] . identifier[acquire] ()
keyword[if] keyword[not] identifier[self] . identifier[_can_send_recv] ():
identifier[log] . identifier[warning] ( literal[string] , identifier[self] )
identifier[self] . identifier[_lock] . identifier[release] ()
keyword[return] ()
keyword[while] identifier[len] ( identifier[recvd] )< identifier[self] . identifier[config] [ literal[string] ]:
keyword[try] :
identifier[data] = identifier[self] . identifier[_sock] . identifier[recv] ( identifier[self] . identifier[config] [ literal[string] ])
keyword[if] keyword[not] identifier[data] :
identifier[log] . identifier[error] ( literal[string] , identifier[self] )
identifier[self] . identifier[_lock] . identifier[release] ()
identifier[self] . identifier[close] ( identifier[error] = identifier[Errors] . identifier[KafkaConnectionError] ( literal[string] ))
keyword[return] []
keyword[else] :
identifier[recvd] . identifier[append] ( identifier[data] )
keyword[except] identifier[SSLWantReadError] :
keyword[break]
keyword[except] identifier[ConnectionError] keyword[as] identifier[e] :
keyword[if] identifier[six] . identifier[PY2] keyword[and] identifier[e] . identifier[errno] == identifier[errno] . identifier[EWOULDBLOCK] :
keyword[break]
identifier[log] . identifier[exception] ( literal[string]
literal[string] , identifier[self] )
identifier[self] . identifier[_lock] . identifier[release] ()
identifier[self] . identifier[close] ( identifier[error] = identifier[Errors] . identifier[KafkaConnectionError] ( identifier[e] ))
keyword[return] []
keyword[except] identifier[BlockingIOError] :
keyword[if] identifier[six] . identifier[PY3] :
keyword[break]
identifier[self] . identifier[_lock] . identifier[release] ()
keyword[raise]
identifier[recvd_data] = literal[string] . identifier[join] ( identifier[recvd] )
keyword[if] identifier[self] . identifier[_sensors] :
identifier[self] . identifier[_sensors] . identifier[bytes_received] . identifier[record] ( identifier[len] ( identifier[recvd_data] ))
keyword[try] :
identifier[responses] = identifier[self] . identifier[_protocol] . identifier[receive_bytes] ( identifier[recvd_data] )
keyword[except] identifier[Errors] . identifier[KafkaProtocolError] keyword[as] identifier[e] :
identifier[self] . identifier[_lock] . identifier[release] ()
identifier[self] . identifier[close] ( identifier[e] )
keyword[return] []
keyword[else] :
identifier[self] . identifier[_lock] . identifier[release] ()
keyword[return] identifier[responses] | def _recv(self):
"""Take all available bytes from socket, return list of any responses from parser"""
recvd = []
self._lock.acquire()
if not self._can_send_recv():
log.warning('%s cannot recv: socket not connected', self)
self._lock.release()
return () # depends on [control=['if'], data=[]]
while len(recvd) < self.config['sock_chunk_buffer_count']:
try:
data = self._sock.recv(self.config['sock_chunk_bytes'])
# We expect socket.recv to raise an exception if there are no
# bytes available to read from the socket in non-blocking mode.
# but if the socket is disconnected, we will get empty data
# without an exception raised
if not data:
log.error('%s: socket disconnected', self)
self._lock.release()
self.close(error=Errors.KafkaConnectionError('socket disconnected'))
return [] # depends on [control=['if'], data=[]]
else:
recvd.append(data) # depends on [control=['try'], data=[]]
except SSLWantReadError:
break # depends on [control=['except'], data=[]]
except ConnectionError as e:
if six.PY2 and e.errno == errno.EWOULDBLOCK:
break # depends on [control=['if'], data=[]]
log.exception('%s: Error receiving network data closing socket', self)
self._lock.release()
self.close(error=Errors.KafkaConnectionError(e))
return [] # depends on [control=['except'], data=['e']]
except BlockingIOError:
if six.PY3:
break # depends on [control=['if'], data=[]]
self._lock.release()
raise # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]]
recvd_data = b''.join(recvd)
if self._sensors:
self._sensors.bytes_received.record(len(recvd_data)) # depends on [control=['if'], data=[]]
try:
responses = self._protocol.receive_bytes(recvd_data) # depends on [control=['try'], data=[]]
except Errors.KafkaProtocolError as e:
self._lock.release()
self.close(e)
return [] # depends on [control=['except'], data=['e']]
else:
self._lock.release()
return responses |
def transition_matrix_samples(self):
r""" Samples of the transition matrix """
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :, :] = self._sampled_hmms[i].transition_matrix
return res | def function[transition_matrix_samples, parameter[self]]:
constant[ Samples of the transition matrix ]
variable[res] assign[=] call[name[np].empty, parameter[tuple[[<ast.Attribute object at 0x7da20c6c4580>, <ast.Attribute object at 0x7da20c6c4340>, <ast.Attribute object at 0x7da20c6c7760>]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[self].nsamples]]] begin[:]
call[name[res]][tuple[[<ast.Name object at 0x7da20c6c5f00>, <ast.Slice object at 0x7da20c6c5570>, <ast.Slice object at 0x7da20c6c7df0>]]] assign[=] call[name[self]._sampled_hmms][name[i]].transition_matrix
return[name[res]] | keyword[def] identifier[transition_matrix_samples] ( identifier[self] ):
literal[string]
identifier[res] = identifier[np] . identifier[empty] (( identifier[self] . identifier[nsamples] , identifier[self] . identifier[nstates] , identifier[self] . identifier[nstates] ), identifier[dtype] = identifier[config] . identifier[dtype] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[nsamples] ):
identifier[res] [ identifier[i] ,:,:]= identifier[self] . identifier[_sampled_hmms] [ identifier[i] ]. identifier[transition_matrix]
keyword[return] identifier[res] | def transition_matrix_samples(self):
""" Samples of the transition matrix """
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :, :] = self._sampled_hmms[i].transition_matrix # depends on [control=['for'], data=['i']]
return res |
def mark_deactivated(self,request,queryset):
"""An admin action for marking several cages as inactive.
This action sets the selected cages as Active=False and Death=today.
This admin action also shows as the output the number of mice sacrificed."""
rows_updated = queryset.update(Active=False, End=datetime.date.today() )
if rows_updated == 1:
message_bit = "1 cage was"
else:
message_bit = "%s cages were" % rows_updated
self.message_user(request, "%s successfully marked as deactivated." % message_bit) | def function[mark_deactivated, parameter[self, request, queryset]]:
constant[An admin action for marking several cages as inactive.
This action sets the selected cages as Active=False and Death=today.
This admin action also shows as the output the number of mice sacrificed.]
variable[rows_updated] assign[=] call[name[queryset].update, parameter[]]
if compare[name[rows_updated] equal[==] constant[1]] begin[:]
variable[message_bit] assign[=] constant[1 cage was]
call[name[self].message_user, parameter[name[request], binary_operation[constant[%s successfully marked as deactivated.] <ast.Mod object at 0x7da2590d6920> name[message_bit]]]] | keyword[def] identifier[mark_deactivated] ( identifier[self] , identifier[request] , identifier[queryset] ):
literal[string]
identifier[rows_updated] = identifier[queryset] . identifier[update] ( identifier[Active] = keyword[False] , identifier[End] = identifier[datetime] . identifier[date] . identifier[today] ())
keyword[if] identifier[rows_updated] == literal[int] :
identifier[message_bit] = literal[string]
keyword[else] :
identifier[message_bit] = literal[string] % identifier[rows_updated]
identifier[self] . identifier[message_user] ( identifier[request] , literal[string] % identifier[message_bit] ) | def mark_deactivated(self, request, queryset):
"""An admin action for marking several cages as inactive.
This action sets the selected cages as Active=False and Death=today.
This admin action also shows as the output the number of mice sacrificed."""
rows_updated = queryset.update(Active=False, End=datetime.date.today())
if rows_updated == 1:
message_bit = '1 cage was' # depends on [control=['if'], data=[]]
else:
message_bit = '%s cages were' % rows_updated
self.message_user(request, '%s successfully marked as deactivated.' % message_bit) |
def characters(self):
"""
The number of characters in this text file.
:rtype: int
"""
chars = 0
for fragment in self.fragments:
chars += fragment.characters
return chars | def function[characters, parameter[self]]:
constant[
The number of characters in this text file.
:rtype: int
]
variable[chars] assign[=] constant[0]
for taget[name[fragment]] in starred[name[self].fragments] begin[:]
<ast.AugAssign object at 0x7da1b1881060>
return[name[chars]] | keyword[def] identifier[characters] ( identifier[self] ):
literal[string]
identifier[chars] = literal[int]
keyword[for] identifier[fragment] keyword[in] identifier[self] . identifier[fragments] :
identifier[chars] += identifier[fragment] . identifier[characters]
keyword[return] identifier[chars] | def characters(self):
"""
The number of characters in this text file.
:rtype: int
"""
chars = 0
for fragment in self.fragments:
chars += fragment.characters # depends on [control=['for'], data=['fragment']]
return chars |
def parse_date(string, formation=None):
"""
string to date stamp
:param string: date string
:param formation: format string
:return: datetime.date
"""
if formation:
_stamp = datetime.datetime.strptime(string, formation).date()
return _stamp
_string = string.replace('.', '-').replace('/', '-')
if '-' in _string:
if len(_string.split('-')[0]) > 3 or len(_string.split('-')[2]) > 3:
try:
_stamp = datetime.datetime.strptime(_string, '%Y-%m-%d').date()
except ValueError:
try:
_stamp = datetime.datetime.strptime(_string, '%m-%d-%Y').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%d-%m-%Y').date()
else:
try:
_stamp = datetime.datetime.strptime(_string, '%y-%m-%d').date()
except ValueError:
try:
_stamp = datetime.datetime.strptime(_string, '%m-%d-%y').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%d-%m-%y').date()
else:
if len(_string) > 6:
try:
_stamp = datetime.datetime.strptime(_string, '%Y%m%d').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%m%d%Y').date()
elif len(_string) <= 6:
try:
_stamp = datetime.datetime.strptime(_string, '%y%m%d').date()
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%m%d%y').date()
else:
raise CanNotFormatError
return _stamp | def function[parse_date, parameter[string, formation]]:
constant[
string to date stamp
:param string: date string
:param formation: format string
:return: datetime.date
]
if name[formation] begin[:]
variable[_stamp] assign[=] call[call[name[datetime].datetime.strptime, parameter[name[string], name[formation]]].date, parameter[]]
return[name[_stamp]]
variable[_string] assign[=] call[call[name[string].replace, parameter[constant[.], constant[-]]].replace, parameter[constant[/], constant[-]]]
if compare[constant[-] in name[_string]] begin[:]
if <ast.BoolOp object at 0x7da1b0c51c30> begin[:]
<ast.Try object at 0x7da1b0c505b0>
return[name[_stamp]] | keyword[def] identifier[parse_date] ( identifier[string] , identifier[formation] = keyword[None] ):
literal[string]
keyword[if] identifier[formation] :
identifier[_stamp] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[string] , identifier[formation] ). identifier[date] ()
keyword[return] identifier[_stamp]
identifier[_string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
keyword[if] literal[string] keyword[in] identifier[_string] :
keyword[if] identifier[len] ( identifier[_string] . identifier[split] ( literal[string] )[ literal[int] ])> literal[int] keyword[or] identifier[len] ( identifier[_string] . identifier[split] ( literal[string] )[ literal[int] ])> literal[int] :
keyword[try] :
identifier[_stamp] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[_string] , literal[string] ). identifier[date] ()
keyword[except] identifier[ValueError] :
keyword[try] :
identifier[_stamp] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[_string] , literal[string] ). identifier[date] ()
keyword[except] identifier[ValueError] :
identifier[_stamp] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[_string] , literal[string] ). identifier[date] ()
keyword[else] :
keyword[try] :
identifier[_stamp] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[_string] , literal[string] ). identifier[date] ()
keyword[except] identifier[ValueError] :
keyword[try] :
identifier[_stamp] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[_string] , literal[string] ). identifier[date] ()
keyword[except] identifier[ValueError] :
identifier[_stamp] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[_string] , literal[string] ). identifier[date] ()
keyword[else] :
keyword[if] identifier[len] ( identifier[_string] )> literal[int] :
keyword[try] :
identifier[_stamp] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[_string] , literal[string] ). identifier[date] ()
keyword[except] identifier[ValueError] :
identifier[_stamp] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[_string] , literal[string] ). identifier[date] ()
keyword[elif] identifier[len] ( identifier[_string] )<= literal[int] :
keyword[try] :
identifier[_stamp] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[_string] , literal[string] ). identifier[date] ()
keyword[except] identifier[ValueError] :
identifier[_stamp] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[_string] , literal[string] ). identifier[date] ()
keyword[else] :
keyword[raise] identifier[CanNotFormatError]
keyword[return] identifier[_stamp] | def parse_date(string, formation=None):
"""
string to date stamp
:param string: date string
:param formation: format string
:return: datetime.date
"""
if formation:
_stamp = datetime.datetime.strptime(string, formation).date()
return _stamp # depends on [control=['if'], data=[]]
_string = string.replace('.', '-').replace('/', '-')
if '-' in _string:
if len(_string.split('-')[0]) > 3 or len(_string.split('-')[2]) > 3:
try:
_stamp = datetime.datetime.strptime(_string, '%Y-%m-%d').date() # depends on [control=['try'], data=[]]
except ValueError:
try:
_stamp = datetime.datetime.strptime(_string, '%m-%d-%Y').date() # depends on [control=['try'], data=[]]
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%d-%m-%Y').date() # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
try:
_stamp = datetime.datetime.strptime(_string, '%y-%m-%d').date() # depends on [control=['try'], data=[]]
except ValueError:
try:
_stamp = datetime.datetime.strptime(_string, '%m-%d-%y').date() # depends on [control=['try'], data=[]]
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%d-%m-%y').date() # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['_string']]
elif len(_string) > 6:
try:
_stamp = datetime.datetime.strptime(_string, '%Y%m%d').date() # depends on [control=['try'], data=[]]
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%m%d%Y').date() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif len(_string) <= 6:
try:
_stamp = datetime.datetime.strptime(_string, '%y%m%d').date() # depends on [control=['try'], data=[]]
except ValueError:
_stamp = datetime.datetime.strptime(_string, '%m%d%y').date() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
raise CanNotFormatError
return _stamp |
def load_from_config(cp, model, **kwargs):
"""Loads a sampler from the given config file.
This looks for a name in the section ``[sampler]`` to determine which
sampler class to load. That sampler's ``from_config`` is then called.
Parameters
----------
cp : WorkflowConfigParser
Config parser to read from.
model : pycbc.inference.model
Which model to pass to the sampler.
\**kwargs :
All other keyword arguments are passed directly to the sampler's
``from_config`` file.
Returns
-------
sampler :
The initialized sampler.
"""
name = cp.get('sampler', 'name')
return samplers[name].from_config(cp, model, **kwargs) | def function[load_from_config, parameter[cp, model]]:
constant[Loads a sampler from the given config file.
This looks for a name in the section ``[sampler]`` to determine which
sampler class to load. That sampler's ``from_config`` is then called.
Parameters
----------
cp : WorkflowConfigParser
Config parser to read from.
model : pycbc.inference.model
Which model to pass to the sampler.
\**kwargs :
All other keyword arguments are passed directly to the sampler's
``from_config`` file.
Returns
-------
sampler :
The initialized sampler.
]
variable[name] assign[=] call[name[cp].get, parameter[constant[sampler], constant[name]]]
return[call[call[name[samplers]][name[name]].from_config, parameter[name[cp], name[model]]]] | keyword[def] identifier[load_from_config] ( identifier[cp] , identifier[model] ,** identifier[kwargs] ):
literal[string]
identifier[name] = identifier[cp] . identifier[get] ( literal[string] , literal[string] )
keyword[return] identifier[samplers] [ identifier[name] ]. identifier[from_config] ( identifier[cp] , identifier[model] ,** identifier[kwargs] ) | def load_from_config(cp, model, **kwargs):
"""Loads a sampler from the given config file.
This looks for a name in the section ``[sampler]`` to determine which
sampler class to load. That sampler's ``from_config`` is then called.
Parameters
----------
cp : WorkflowConfigParser
Config parser to read from.
model : pycbc.inference.model
Which model to pass to the sampler.
\\**kwargs :
All other keyword arguments are passed directly to the sampler's
``from_config`` file.
Returns
-------
sampler :
The initialized sampler.
"""
name = cp.get('sampler', 'name')
return samplers[name].from_config(cp, model, **kwargs) |
def n_join(self, other, psi=-40.76, omega=-178.25, phi=-65.07,
o_c_n_angle=None, c_n_ca_angle=None, c_n_length=None, relabel=True):
"""Joins other to self at the N-terminus via a peptide bond.
Notes
-----
This function directly modifies self. It does not return a new object.
Parameters
----------
other: Residue or Polypeptide
psi: float
Psi torsion angle (degrees) between final `Residue` of other
and first `Residue` of self.
omega: float
Omega torsion angle (degrees) between final `Residue` of
other and first `Residue` of self.
phi: float
Phi torsion angle (degrees) between final `Residue` of other
and first `Residue` of self.
o_c_n_angle: float or None
Desired angle between O, C (final `Residue` of other) and N
(first `Residue` of self) atoms. If `None`, default value is
taken from `ideal_backbone_bond_angles`.
c_n_ca_angle: float or None
Desired angle between C (final `Residue` of other) and N, CA
(first `Residue` of self) atoms. If `None`, default value is taken
from `ideal_backbone_bond_angles`.
c_n_length: float or None
Desired peptide bond length between final `Residue` of other
and first `Residue` of self. If None, default value is taken
from ideal_backbone_bond_lengths.
relabel: bool
If True, relabel_all is run on self before returning.
Raises
------
TypeError:
If other is not a `Residue` or a `Polypeptide`
"""
if isinstance(other, Residue):
other = Polypeptide([other])
if not isinstance(other, Polypeptide):
raise TypeError(
'Only Polypeptide or Residue objects can be joined to a Polypeptide')
if abs(omega) >= 90:
peptide_conformation = 'trans'
else:
peptide_conformation = 'cis'
if o_c_n_angle is None:
o_c_n_angle = ideal_backbone_bond_angles[peptide_conformation]['o_c_n']
if c_n_ca_angle is None:
c_n_ca_angle = ideal_backbone_bond_angles[peptide_conformation]['c_n_ca']
if c_n_length is None:
c_n_length = ideal_backbone_bond_lengths['c_n']
r1 = self[0]
r1_n = r1['N']._vector
r1_ca = r1['CA']._vector
r1_c = r1['C']._vector
# p1 is point that will be used to position the C atom of r2.
p1 = r1_ca[:]
# rotate p1 by c_n_ca_angle, about axis perpendicular to the
# r1_n, r1_ca, r1_c plane, passing through r1_ca.
axis = numpy.cross((r1_ca - r1_n), (r1_c - r1_n))
q = Quaternion.angle_and_axis(angle=c_n_ca_angle, axis=axis)
p1 = q.rotate_vector(v=p1, point=r1_n)
# Ensure p1 is separated from r1_n by the correct distance.
p1 = r1_n + (c_n_length * unit_vector(p1 - r1_n))
# translate other so that its final C atom is at p1
other.translate(vector=(p1 - other[-1]['C']._vector))
# Force CA-C=O-N to be in a plane, and fix O=C-N angle accordingly
measured_dihedral = dihedral(
other[-1]['CA'], other[-1]['C'], other[-1]['O'], r1['N'])
desired_dihedral = 180.0
axis = other[-1]['O'] - other[-1]['C']
other.rotate(angle=(measured_dihedral - desired_dihedral),
axis=axis, point=other[-1]['C']._vector)
axis = (numpy.cross(other[-1]['O'] - other[-1]
['C'], r1['N'] - other[-1]['C']))
measured_o_c_n = angle_between_vectors(
other[-1]['O'] - other[-1]['C'], r1['N'] - other[-1]['C'])
other.rotate(angle=(measured_o_c_n - o_c_n_angle),
axis=axis, point=other[-1]['C']._vector)
# rotate other to obtain desired phi, omega, psi values at the join.
measured_phi = dihedral(other[-1]['C'], r1['N'], r1['CA'], r1['C'])
other.rotate(angle=(phi - measured_phi),
axis=(r1_n - r1_ca), point=r1_ca)
measured_omega = dihedral(
other[-1]['CA'], other[-1]['C'], r1['N'], r1['CA'])
other.rotate(angle=(measured_omega - omega),
axis=(r1['N'] - other[-1]['C']), point=r1_n)
measured_psi = dihedral(
other[-1]['N'], other[-1]['CA'], other[-1]['C'], r1['N'])
other.rotate(angle=-(measured_psi - psi), axis=(other[-1]['CA'] - other[-1]['C']),
point=other[-1]['CA']._vector)
self._monomers = other._monomers + self._monomers
if relabel:
self.relabel_all()
self.tags['assigned_ff'] = False
return | def function[n_join, parameter[self, other, psi, omega, phi, o_c_n_angle, c_n_ca_angle, c_n_length, relabel]]:
constant[Joins other to self at the N-terminus via a peptide bond.
Notes
-----
This function directly modifies self. It does not return a new object.
Parameters
----------
other: Residue or Polypeptide
psi: float
Psi torsion angle (degrees) between final `Residue` of other
and first `Residue` of self.
omega: float
Omega torsion angle (degrees) between final `Residue` of
other and first `Residue` of self.
phi: float
Phi torsion angle (degrees) between final `Residue` of other
and first `Residue` of self.
o_c_n_angle: float or None
Desired angle between O, C (final `Residue` of other) and N
(first `Residue` of self) atoms. If `None`, default value is
taken from `ideal_backbone_bond_angles`.
c_n_ca_angle: float or None
Desired angle between C (final `Residue` of other) and N, CA
(first `Residue` of self) atoms. If `None`, default value is taken
from `ideal_backbone_bond_angles`.
c_n_length: float or None
Desired peptide bond length between final `Residue` of other
and first `Residue` of self. If None, default value is taken
from ideal_backbone_bond_lengths.
relabel: bool
If True, relabel_all is run on self before returning.
Raises
------
TypeError:
If other is not a `Residue` or a `Polypeptide`
]
if call[name[isinstance], parameter[name[other], name[Residue]]] begin[:]
variable[other] assign[=] call[name[Polypeptide], parameter[list[[<ast.Name object at 0x7da1b281a290>]]]]
if <ast.UnaryOp object at 0x7da1b281bac0> begin[:]
<ast.Raise object at 0x7da1b2819e10>
if compare[call[name[abs], parameter[name[omega]]] greater_or_equal[>=] constant[90]] begin[:]
variable[peptide_conformation] assign[=] constant[trans]
if compare[name[o_c_n_angle] is constant[None]] begin[:]
variable[o_c_n_angle] assign[=] call[call[name[ideal_backbone_bond_angles]][name[peptide_conformation]]][constant[o_c_n]]
if compare[name[c_n_ca_angle] is constant[None]] begin[:]
variable[c_n_ca_angle] assign[=] call[call[name[ideal_backbone_bond_angles]][name[peptide_conformation]]][constant[c_n_ca]]
if compare[name[c_n_length] is constant[None]] begin[:]
variable[c_n_length] assign[=] call[name[ideal_backbone_bond_lengths]][constant[c_n]]
variable[r1] assign[=] call[name[self]][constant[0]]
variable[r1_n] assign[=] call[name[r1]][constant[N]]._vector
variable[r1_ca] assign[=] call[name[r1]][constant[CA]]._vector
variable[r1_c] assign[=] call[name[r1]][constant[C]]._vector
variable[p1] assign[=] call[name[r1_ca]][<ast.Slice object at 0x7da1b281ada0>]
variable[axis] assign[=] call[name[numpy].cross, parameter[binary_operation[name[r1_ca] - name[r1_n]], binary_operation[name[r1_c] - name[r1_n]]]]
variable[q] assign[=] call[name[Quaternion].angle_and_axis, parameter[]]
variable[p1] assign[=] call[name[q].rotate_vector, parameter[]]
variable[p1] assign[=] binary_operation[name[r1_n] + binary_operation[name[c_n_length] * call[name[unit_vector], parameter[binary_operation[name[p1] - name[r1_n]]]]]]
call[name[other].translate, parameter[]]
variable[measured_dihedral] assign[=] call[name[dihedral], parameter[call[call[name[other]][<ast.UnaryOp object at 0x7da1b281b610>]][constant[CA]], call[call[name[other]][<ast.UnaryOp object at 0x7da1b265dc90>]][constant[C]], call[call[name[other]][<ast.UnaryOp object at 0x7da1b265f070>]][constant[O]], call[name[r1]][constant[N]]]]
variable[desired_dihedral] assign[=] constant[180.0]
variable[axis] assign[=] binary_operation[call[call[name[other]][<ast.UnaryOp object at 0x7da1b265ff10>]][constant[O]] - call[call[name[other]][<ast.UnaryOp object at 0x7da1b265e5f0>]][constant[C]]]
call[name[other].rotate, parameter[]]
variable[axis] assign[=] call[name[numpy].cross, parameter[binary_operation[call[call[name[other]][<ast.UnaryOp object at 0x7da1b265e500>]][constant[O]] - call[call[name[other]][<ast.UnaryOp object at 0x7da1b265f040>]][constant[C]]], binary_operation[call[name[r1]][constant[N]] - call[call[name[other]][<ast.UnaryOp object at 0x7da1b265e410>]][constant[C]]]]]
variable[measured_o_c_n] assign[=] call[name[angle_between_vectors], parameter[binary_operation[call[call[name[other]][<ast.UnaryOp object at 0x7da1b265d360>]][constant[O]] - call[call[name[other]][<ast.UnaryOp object at 0x7da1b265d2a0>]][constant[C]]], binary_operation[call[name[r1]][constant[N]] - call[call[name[other]][<ast.UnaryOp object at 0x7da1b265f1f0>]][constant[C]]]]]
call[name[other].rotate, parameter[]]
variable[measured_phi] assign[=] call[name[dihedral], parameter[call[call[name[other]][<ast.UnaryOp object at 0x7da1b265cb20>]][constant[C]], call[name[r1]][constant[N]], call[name[r1]][constant[CA]], call[name[r1]][constant[C]]]]
call[name[other].rotate, parameter[]]
variable[measured_omega] assign[=] call[name[dihedral], parameter[call[call[name[other]][<ast.UnaryOp object at 0x7da1b2625150>]][constant[CA]], call[call[name[other]][<ast.UnaryOp object at 0x7da1b2624cd0>]][constant[C]], call[name[r1]][constant[N]], call[name[r1]][constant[CA]]]]
call[name[other].rotate, parameter[]]
variable[measured_psi] assign[=] call[name[dihedral], parameter[call[call[name[other]][<ast.UnaryOp object at 0x7da1b2629f30>]][constant[N]], call[call[name[other]][<ast.UnaryOp object at 0x7da1b262a620>]][constant[CA]], call[call[name[other]][<ast.UnaryOp object at 0x7da1b2629450>]][constant[C]], call[name[r1]][constant[N]]]]
call[name[other].rotate, parameter[]]
name[self]._monomers assign[=] binary_operation[name[other]._monomers + name[self]._monomers]
if name[relabel] begin[:]
call[name[self].relabel_all, parameter[]]
call[name[self].tags][constant[assigned_ff]] assign[=] constant[False]
return[None] | keyword[def] identifier[n_join] ( identifier[self] , identifier[other] , identifier[psi] =- literal[int] , identifier[omega] =- literal[int] , identifier[phi] =- literal[int] ,
identifier[o_c_n_angle] = keyword[None] , identifier[c_n_ca_angle] = keyword[None] , identifier[c_n_length] = keyword[None] , identifier[relabel] = keyword[True] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[other] , identifier[Residue] ):
identifier[other] = identifier[Polypeptide] ([ identifier[other] ])
keyword[if] keyword[not] identifier[isinstance] ( identifier[other] , identifier[Polypeptide] ):
keyword[raise] identifier[TypeError] (
literal[string] )
keyword[if] identifier[abs] ( identifier[omega] )>= literal[int] :
identifier[peptide_conformation] = literal[string]
keyword[else] :
identifier[peptide_conformation] = literal[string]
keyword[if] identifier[o_c_n_angle] keyword[is] keyword[None] :
identifier[o_c_n_angle] = identifier[ideal_backbone_bond_angles] [ identifier[peptide_conformation] ][ literal[string] ]
keyword[if] identifier[c_n_ca_angle] keyword[is] keyword[None] :
identifier[c_n_ca_angle] = identifier[ideal_backbone_bond_angles] [ identifier[peptide_conformation] ][ literal[string] ]
keyword[if] identifier[c_n_length] keyword[is] keyword[None] :
identifier[c_n_length] = identifier[ideal_backbone_bond_lengths] [ literal[string] ]
identifier[r1] = identifier[self] [ literal[int] ]
identifier[r1_n] = identifier[r1] [ literal[string] ]. identifier[_vector]
identifier[r1_ca] = identifier[r1] [ literal[string] ]. identifier[_vector]
identifier[r1_c] = identifier[r1] [ literal[string] ]. identifier[_vector]
identifier[p1] = identifier[r1_ca] [:]
identifier[axis] = identifier[numpy] . identifier[cross] (( identifier[r1_ca] - identifier[r1_n] ),( identifier[r1_c] - identifier[r1_n] ))
identifier[q] = identifier[Quaternion] . identifier[angle_and_axis] ( identifier[angle] = identifier[c_n_ca_angle] , identifier[axis] = identifier[axis] )
identifier[p1] = identifier[q] . identifier[rotate_vector] ( identifier[v] = identifier[p1] , identifier[point] = identifier[r1_n] )
identifier[p1] = identifier[r1_n] +( identifier[c_n_length] * identifier[unit_vector] ( identifier[p1] - identifier[r1_n] ))
identifier[other] . identifier[translate] ( identifier[vector] =( identifier[p1] - identifier[other] [- literal[int] ][ literal[string] ]. identifier[_vector] ))
identifier[measured_dihedral] = identifier[dihedral] (
identifier[other] [- literal[int] ][ literal[string] ], identifier[other] [- literal[int] ][ literal[string] ], identifier[other] [- literal[int] ][ literal[string] ], identifier[r1] [ literal[string] ])
identifier[desired_dihedral] = literal[int]
identifier[axis] = identifier[other] [- literal[int] ][ literal[string] ]- identifier[other] [- literal[int] ][ literal[string] ]
identifier[other] . identifier[rotate] ( identifier[angle] =( identifier[measured_dihedral] - identifier[desired_dihedral] ),
identifier[axis] = identifier[axis] , identifier[point] = identifier[other] [- literal[int] ][ literal[string] ]. identifier[_vector] )
identifier[axis] =( identifier[numpy] . identifier[cross] ( identifier[other] [- literal[int] ][ literal[string] ]- identifier[other] [- literal[int] ]
[ literal[string] ], identifier[r1] [ literal[string] ]- identifier[other] [- literal[int] ][ literal[string] ]))
identifier[measured_o_c_n] = identifier[angle_between_vectors] (
identifier[other] [- literal[int] ][ literal[string] ]- identifier[other] [- literal[int] ][ literal[string] ], identifier[r1] [ literal[string] ]- identifier[other] [- literal[int] ][ literal[string] ])
identifier[other] . identifier[rotate] ( identifier[angle] =( identifier[measured_o_c_n] - identifier[o_c_n_angle] ),
identifier[axis] = identifier[axis] , identifier[point] = identifier[other] [- literal[int] ][ literal[string] ]. identifier[_vector] )
identifier[measured_phi] = identifier[dihedral] ( identifier[other] [- literal[int] ][ literal[string] ], identifier[r1] [ literal[string] ], identifier[r1] [ literal[string] ], identifier[r1] [ literal[string] ])
identifier[other] . identifier[rotate] ( identifier[angle] =( identifier[phi] - identifier[measured_phi] ),
identifier[axis] =( identifier[r1_n] - identifier[r1_ca] ), identifier[point] = identifier[r1_ca] )
identifier[measured_omega] = identifier[dihedral] (
identifier[other] [- literal[int] ][ literal[string] ], identifier[other] [- literal[int] ][ literal[string] ], identifier[r1] [ literal[string] ], identifier[r1] [ literal[string] ])
identifier[other] . identifier[rotate] ( identifier[angle] =( identifier[measured_omega] - identifier[omega] ),
identifier[axis] =( identifier[r1] [ literal[string] ]- identifier[other] [- literal[int] ][ literal[string] ]), identifier[point] = identifier[r1_n] )
identifier[measured_psi] = identifier[dihedral] (
identifier[other] [- literal[int] ][ literal[string] ], identifier[other] [- literal[int] ][ literal[string] ], identifier[other] [- literal[int] ][ literal[string] ], identifier[r1] [ literal[string] ])
identifier[other] . identifier[rotate] ( identifier[angle] =-( identifier[measured_psi] - identifier[psi] ), identifier[axis] =( identifier[other] [- literal[int] ][ literal[string] ]- identifier[other] [- literal[int] ][ literal[string] ]),
identifier[point] = identifier[other] [- literal[int] ][ literal[string] ]. identifier[_vector] )
identifier[self] . identifier[_monomers] = identifier[other] . identifier[_monomers] + identifier[self] . identifier[_monomers]
keyword[if] identifier[relabel] :
identifier[self] . identifier[relabel_all] ()
identifier[self] . identifier[tags] [ literal[string] ]= keyword[False]
keyword[return] | def n_join(self, other, psi=-40.76, omega=-178.25, phi=-65.07, o_c_n_angle=None, c_n_ca_angle=None, c_n_length=None, relabel=True):
"""Joins other to self at the N-terminus via a peptide bond.
Notes
-----
This function directly modifies self. It does not return a new object.
Parameters
----------
other: Residue or Polypeptide
psi: float
Psi torsion angle (degrees) between final `Residue` of other
and first `Residue` of self.
omega: float
Omega torsion angle (degrees) between final `Residue` of
other and first `Residue` of self.
phi: float
Phi torsion angle (degrees) between final `Residue` of other
and first `Residue` of self.
o_c_n_angle: float or None
Desired angle between O, C (final `Residue` of other) and N
(first `Residue` of self) atoms. If `None`, default value is
taken from `ideal_backbone_bond_angles`.
c_n_ca_angle: float or None
Desired angle between C (final `Residue` of other) and N, CA
(first `Residue` of self) atoms. If `None`, default value is taken
from `ideal_backbone_bond_angles`.
c_n_length: float or None
Desired peptide bond length between final `Residue` of other
and first `Residue` of self. If None, default value is taken
from ideal_backbone_bond_lengths.
relabel: bool
If True, relabel_all is run on self before returning.
Raises
------
TypeError:
If other is not a `Residue` or a `Polypeptide`
"""
if isinstance(other, Residue):
other = Polypeptide([other]) # depends on [control=['if'], data=[]]
if not isinstance(other, Polypeptide):
raise TypeError('Only Polypeptide or Residue objects can be joined to a Polypeptide') # depends on [control=['if'], data=[]]
if abs(omega) >= 90:
peptide_conformation = 'trans' # depends on [control=['if'], data=[]]
else:
peptide_conformation = 'cis'
if o_c_n_angle is None:
o_c_n_angle = ideal_backbone_bond_angles[peptide_conformation]['o_c_n'] # depends on [control=['if'], data=['o_c_n_angle']]
if c_n_ca_angle is None:
c_n_ca_angle = ideal_backbone_bond_angles[peptide_conformation]['c_n_ca'] # depends on [control=['if'], data=['c_n_ca_angle']]
if c_n_length is None:
c_n_length = ideal_backbone_bond_lengths['c_n'] # depends on [control=['if'], data=['c_n_length']]
r1 = self[0]
r1_n = r1['N']._vector
r1_ca = r1['CA']._vector
r1_c = r1['C']._vector
# p1 is point that will be used to position the C atom of r2.
p1 = r1_ca[:]
# rotate p1 by c_n_ca_angle, about axis perpendicular to the
# r1_n, r1_ca, r1_c plane, passing through r1_ca.
axis = numpy.cross(r1_ca - r1_n, r1_c - r1_n)
q = Quaternion.angle_and_axis(angle=c_n_ca_angle, axis=axis)
p1 = q.rotate_vector(v=p1, point=r1_n)
# Ensure p1 is separated from r1_n by the correct distance.
p1 = r1_n + c_n_length * unit_vector(p1 - r1_n)
# translate other so that its final C atom is at p1
other.translate(vector=p1 - other[-1]['C']._vector)
# Force CA-C=O-N to be in a plane, and fix O=C-N angle accordingly
measured_dihedral = dihedral(other[-1]['CA'], other[-1]['C'], other[-1]['O'], r1['N'])
desired_dihedral = 180.0
axis = other[-1]['O'] - other[-1]['C']
other.rotate(angle=measured_dihedral - desired_dihedral, axis=axis, point=other[-1]['C']._vector)
axis = numpy.cross(other[-1]['O'] - other[-1]['C'], r1['N'] - other[-1]['C'])
measured_o_c_n = angle_between_vectors(other[-1]['O'] - other[-1]['C'], r1['N'] - other[-1]['C'])
other.rotate(angle=measured_o_c_n - o_c_n_angle, axis=axis, point=other[-1]['C']._vector)
# rotate other to obtain desired phi, omega, psi values at the join.
measured_phi = dihedral(other[-1]['C'], r1['N'], r1['CA'], r1['C'])
other.rotate(angle=phi - measured_phi, axis=r1_n - r1_ca, point=r1_ca)
measured_omega = dihedral(other[-1]['CA'], other[-1]['C'], r1['N'], r1['CA'])
other.rotate(angle=measured_omega - omega, axis=r1['N'] - other[-1]['C'], point=r1_n)
measured_psi = dihedral(other[-1]['N'], other[-1]['CA'], other[-1]['C'], r1['N'])
other.rotate(angle=-(measured_psi - psi), axis=other[-1]['CA'] - other[-1]['C'], point=other[-1]['CA']._vector)
self._monomers = other._monomers + self._monomers
if relabel:
self.relabel_all() # depends on [control=['if'], data=[]]
self.tags['assigned_ff'] = False
return |
def clear_cache(backend=None):
'''
.. versionadded:: 2015.5.0
Clear the fileserver cache from VCS fileserver backends (:mod:`git
<salt.fileserver.gitfs>`, :mod:`hg <salt.fileserver.hgfs>`, :mod:`svn
<salt.fileserver.svnfs>`). Executing this runner with no arguments will
clear the cache for all enabled VCS fileserver backends, but this
can be narrowed using the ``backend`` argument.
backend
Only clear the update lock for the specified backend(s). If all passed
backends start with a minus sign (``-``), then these backends will be
excluded from the enabled backends. However, if there is a mix of
backends with and without a minus sign (ex: ``backend=-roots,git``)
then the ones starting with a minus sign will be disregarded.
CLI Example:
.. code-block:: bash
salt-run fileserver.clear_cache
salt-run fileserver.clear_cache backend=git,hg
salt-run fileserver.clear_cache hg
salt-run fileserver.clear_cache -roots
'''
fileserver = salt.fileserver.Fileserver(__opts__)
cleared, errors = fileserver.clear_cache(back=backend)
ret = {}
if cleared:
ret['cleared'] = cleared
if errors:
ret['errors'] = errors
if not ret:
return 'No cache was cleared'
return ret | def function[clear_cache, parameter[backend]]:
constant[
.. versionadded:: 2015.5.0
Clear the fileserver cache from VCS fileserver backends (:mod:`git
<salt.fileserver.gitfs>`, :mod:`hg <salt.fileserver.hgfs>`, :mod:`svn
<salt.fileserver.svnfs>`). Executing this runner with no arguments will
clear the cache for all enabled VCS fileserver backends, but this
can be narrowed using the ``backend`` argument.
backend
Only clear the update lock for the specified backend(s). If all passed
backends start with a minus sign (``-``), then these backends will be
excluded from the enabled backends. However, if there is a mix of
backends with and without a minus sign (ex: ``backend=-roots,git``)
then the ones starting with a minus sign will be disregarded.
CLI Example:
.. code-block:: bash
salt-run fileserver.clear_cache
salt-run fileserver.clear_cache backend=git,hg
salt-run fileserver.clear_cache hg
salt-run fileserver.clear_cache -roots
]
variable[fileserver] assign[=] call[name[salt].fileserver.Fileserver, parameter[name[__opts__]]]
<ast.Tuple object at 0x7da18dc9a1d0> assign[=] call[name[fileserver].clear_cache, parameter[]]
variable[ret] assign[=] dictionary[[], []]
if name[cleared] begin[:]
call[name[ret]][constant[cleared]] assign[=] name[cleared]
if name[errors] begin[:]
call[name[ret]][constant[errors]] assign[=] name[errors]
if <ast.UnaryOp object at 0x7da18dc995d0> begin[:]
return[constant[No cache was cleared]]
return[name[ret]] | keyword[def] identifier[clear_cache] ( identifier[backend] = keyword[None] ):
literal[string]
identifier[fileserver] = identifier[salt] . identifier[fileserver] . identifier[Fileserver] ( identifier[__opts__] )
identifier[cleared] , identifier[errors] = identifier[fileserver] . identifier[clear_cache] ( identifier[back] = identifier[backend] )
identifier[ret] ={}
keyword[if] identifier[cleared] :
identifier[ret] [ literal[string] ]= identifier[cleared]
keyword[if] identifier[errors] :
identifier[ret] [ literal[string] ]= identifier[errors]
keyword[if] keyword[not] identifier[ret] :
keyword[return] literal[string]
keyword[return] identifier[ret] | def clear_cache(backend=None):
"""
.. versionadded:: 2015.5.0
Clear the fileserver cache from VCS fileserver backends (:mod:`git
<salt.fileserver.gitfs>`, :mod:`hg <salt.fileserver.hgfs>`, :mod:`svn
<salt.fileserver.svnfs>`). Executing this runner with no arguments will
clear the cache for all enabled VCS fileserver backends, but this
can be narrowed using the ``backend`` argument.
backend
Only clear the update lock for the specified backend(s). If all passed
backends start with a minus sign (``-``), then these backends will be
excluded from the enabled backends. However, if there is a mix of
backends with and without a minus sign (ex: ``backend=-roots,git``)
then the ones starting with a minus sign will be disregarded.
CLI Example:
.. code-block:: bash
salt-run fileserver.clear_cache
salt-run fileserver.clear_cache backend=git,hg
salt-run fileserver.clear_cache hg
salt-run fileserver.clear_cache -roots
"""
fileserver = salt.fileserver.Fileserver(__opts__)
(cleared, errors) = fileserver.clear_cache(back=backend)
ret = {}
if cleared:
ret['cleared'] = cleared # depends on [control=['if'], data=[]]
if errors:
ret['errors'] = errors # depends on [control=['if'], data=[]]
if not ret:
return 'No cache was cleared' # depends on [control=['if'], data=[]]
return ret |
def delete_preferences(queryset):
"""
Delete preferences objects if they are not present in registry. Return a list of deleted objects
"""
deleted = []
# Iterate through preferences. If an error is raised when accessing preference object, just delete it
for p in queryset:
try:
pref = p.registry.get(section=p.section, name=p.name, fallback=False)
except NotFoundInRegistry:
p.delete()
deleted.append(p)
return deleted | def function[delete_preferences, parameter[queryset]]:
constant[
Delete preferences objects if they are not present in registry. Return a list of deleted objects
]
variable[deleted] assign[=] list[[]]
for taget[name[p]] in starred[name[queryset]] begin[:]
<ast.Try object at 0x7da1b12b5510>
return[name[deleted]] | keyword[def] identifier[delete_preferences] ( identifier[queryset] ):
literal[string]
identifier[deleted] =[]
keyword[for] identifier[p] keyword[in] identifier[queryset] :
keyword[try] :
identifier[pref] = identifier[p] . identifier[registry] . identifier[get] ( identifier[section] = identifier[p] . identifier[section] , identifier[name] = identifier[p] . identifier[name] , identifier[fallback] = keyword[False] )
keyword[except] identifier[NotFoundInRegistry] :
identifier[p] . identifier[delete] ()
identifier[deleted] . identifier[append] ( identifier[p] )
keyword[return] identifier[deleted] | def delete_preferences(queryset):
"""
Delete preferences objects if they are not present in registry. Return a list of deleted objects
"""
deleted = []
# Iterate through preferences. If an error is raised when accessing preference object, just delete it
for p in queryset:
try:
pref = p.registry.get(section=p.section, name=p.name, fallback=False) # depends on [control=['try'], data=[]]
except NotFoundInRegistry:
p.delete()
deleted.append(p) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['p']]
return deleted |
def _get_auth_from_keyring(self):
"""Try to get credentials using `keyring <https://github.com/jaraco/keyring>`_."""
if not keyring:
return None
# Take user from URL if available, else the OS login name
password = self._get_password_from_keyring(self.user or getpass.getuser())
if password is not None:
self.user = self.user or getpass.getuser()
self.password = password
return 'keyring' | def function[_get_auth_from_keyring, parameter[self]]:
constant[Try to get credentials using `keyring <https://github.com/jaraco/keyring>`_.]
if <ast.UnaryOp object at 0x7da204564490> begin[:]
return[constant[None]]
variable[password] assign[=] call[name[self]._get_password_from_keyring, parameter[<ast.BoolOp object at 0x7da204566fe0>]]
if compare[name[password] is_not constant[None]] begin[:]
name[self].user assign[=] <ast.BoolOp object at 0x7da204566320>
name[self].password assign[=] name[password]
return[constant[keyring]] | keyword[def] identifier[_get_auth_from_keyring] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[keyring] :
keyword[return] keyword[None]
identifier[password] = identifier[self] . identifier[_get_password_from_keyring] ( identifier[self] . identifier[user] keyword[or] identifier[getpass] . identifier[getuser] ())
keyword[if] identifier[password] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[user] = identifier[self] . identifier[user] keyword[or] identifier[getpass] . identifier[getuser] ()
identifier[self] . identifier[password] = identifier[password]
keyword[return] literal[string] | def _get_auth_from_keyring(self):
"""Try to get credentials using `keyring <https://github.com/jaraco/keyring>`_."""
if not keyring:
return None # depends on [control=['if'], data=[]]
# Take user from URL if available, else the OS login name
password = self._get_password_from_keyring(self.user or getpass.getuser())
if password is not None:
self.user = self.user or getpass.getuser()
self.password = password # depends on [control=['if'], data=['password']]
return 'keyring' |
def cluster_application_attempt_info(self, application_id, attempt_id):
"""
With the application attempts API, you can obtain an extended info about
an application attempt.
:param str application_id: The application id
:param str attempt_id: The attempt id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/cluster/apps/{appid}/appattempts/{attemptid}'.format(
appid=application_id, attemptid=attempt_id)
return self.request(path) | def function[cluster_application_attempt_info, parameter[self, application_id, attempt_id]]:
constant[
With the application attempts API, you can obtain an extended info about
an application attempt.
:param str application_id: The application id
:param str attempt_id: The attempt id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
]
variable[path] assign[=] call[constant[/ws/v1/cluster/apps/{appid}/appattempts/{attemptid}].format, parameter[]]
return[call[name[self].request, parameter[name[path]]]] | keyword[def] identifier[cluster_application_attempt_info] ( identifier[self] , identifier[application_id] , identifier[attempt_id] ):
literal[string]
identifier[path] = literal[string] . identifier[format] (
identifier[appid] = identifier[application_id] , identifier[attemptid] = identifier[attempt_id] )
keyword[return] identifier[self] . identifier[request] ( identifier[path] ) | def cluster_application_attempt_info(self, application_id, attempt_id):
"""
With the application attempts API, you can obtain an extended info about
an application attempt.
:param str application_id: The application id
:param str attempt_id: The attempt id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/cluster/apps/{appid}/appattempts/{attemptid}'.format(appid=application_id, attemptid=attempt_id)
return self.request(path) |
def ReadList(self, *branches, **kwargs):
"""
Same as `phi.dsl.Expression.List` but any string argument `x` is translated to `Read(x)`.
"""
branches = map(lambda x: E.Read(x) if isinstance(x, str) else x, branches)
return self.List(*branches, **kwargs) | def function[ReadList, parameter[self]]:
constant[
Same as `phi.dsl.Expression.List` but any string argument `x` is translated to `Read(x)`.
]
variable[branches] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da207f01330>, name[branches]]]
return[call[name[self].List, parameter[<ast.Starred object at 0x7da207f03760>]]] | keyword[def] identifier[ReadList] ( identifier[self] ,* identifier[branches] ,** identifier[kwargs] ):
literal[string]
identifier[branches] = identifier[map] ( keyword[lambda] identifier[x] : identifier[E] . identifier[Read] ( identifier[x] ) keyword[if] identifier[isinstance] ( identifier[x] , identifier[str] ) keyword[else] identifier[x] , identifier[branches] )
keyword[return] identifier[self] . identifier[List] (* identifier[branches] ,** identifier[kwargs] ) | def ReadList(self, *branches, **kwargs):
"""
Same as `phi.dsl.Expression.List` but any string argument `x` is translated to `Read(x)`.
"""
branches = map(lambda x: E.Read(x) if isinstance(x, str) else x, branches)
return self.List(*branches, **kwargs) |
def kill(arg1, arg2):
"""Stops a proces that contains arg1 and is filtered by arg2
"""
from subprocess import Popen, PIPE
# Wait until ready
t0 = time.time()
# Wait no more than these many seconds
time_out = 30
running = True
while running and time.time() - t0 < time_out:
if os.name == 'nt':
p = Popen(
'tasklist | find "%s"' % arg1,
shell=True,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
close_fds=False)
else:
p = Popen(
'ps aux | grep %s' % arg1,
shell=True,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
close_fds=True)
lines = p.stdout.readlines()
running = False
for line in lines:
# this kills all java.exe and python including self in windows
if ('%s' % arg2 in line) or (os.name == 'nt'
and '%s' % arg1 in line):
running = True
# Get pid
fields = line.strip().split()
info('Stopping %s (process number %s)' % (arg1, fields[1]))
if os.name == 'nt':
kill = 'taskkill /F /PID "%s"' % fields[1]
else:
kill = 'kill -9 %s 2> /dev/null' % fields[1]
os.system(kill)
# Give it a little more time
time.sleep(1)
else:
pass
if running:
raise Exception('Could not stop %s: '
'Running processes are\n%s' % (arg1, '\n'.join(
[l.strip() for l in lines]))) | def function[kill, parameter[arg1, arg2]]:
constant[Stops a proces that contains arg1 and is filtered by arg2
]
from relative_module[subprocess] import module[Popen], module[PIPE]
variable[t0] assign[=] call[name[time].time, parameter[]]
variable[time_out] assign[=] constant[30]
variable[running] assign[=] constant[True]
while <ast.BoolOp object at 0x7da1b06a23b0> begin[:]
if compare[name[os].name equal[==] constant[nt]] begin[:]
variable[p] assign[=] call[name[Popen], parameter[binary_operation[constant[tasklist | find "%s"] <ast.Mod object at 0x7da2590d6920> name[arg1]]]]
variable[lines] assign[=] call[name[p].stdout.readlines, parameter[]]
variable[running] assign[=] constant[False]
for taget[name[line]] in starred[name[lines]] begin[:]
if <ast.BoolOp object at 0x7da1b06a1450> begin[:]
variable[running] assign[=] constant[True]
variable[fields] assign[=] call[call[name[line].strip, parameter[]].split, parameter[]]
call[name[info], parameter[binary_operation[constant[Stopping %s (process number %s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b06a18a0>, <ast.Subscript object at 0x7da1b06a1780>]]]]]
if compare[name[os].name equal[==] constant[nt]] begin[:]
variable[kill] assign[=] binary_operation[constant[taskkill /F /PID "%s"] <ast.Mod object at 0x7da2590d6920> call[name[fields]][constant[1]]]
call[name[os].system, parameter[name[kill]]]
call[name[time].sleep, parameter[constant[1]]]
if name[running] begin[:]
<ast.Raise object at 0x7da1b06a2b90> | keyword[def] identifier[kill] ( identifier[arg1] , identifier[arg2] ):
literal[string]
keyword[from] identifier[subprocess] keyword[import] identifier[Popen] , identifier[PIPE]
identifier[t0] = identifier[time] . identifier[time] ()
identifier[time_out] = literal[int]
identifier[running] = keyword[True]
keyword[while] identifier[running] keyword[and] identifier[time] . identifier[time] ()- identifier[t0] < identifier[time_out] :
keyword[if] identifier[os] . identifier[name] == literal[string] :
identifier[p] = identifier[Popen] (
literal[string] % identifier[arg1] ,
identifier[shell] = keyword[True] ,
identifier[stdin] = identifier[PIPE] ,
identifier[stdout] = identifier[PIPE] ,
identifier[stderr] = identifier[PIPE] ,
identifier[close_fds] = keyword[False] )
keyword[else] :
identifier[p] = identifier[Popen] (
literal[string] % identifier[arg1] ,
identifier[shell] = keyword[True] ,
identifier[stdin] = identifier[PIPE] ,
identifier[stdout] = identifier[PIPE] ,
identifier[stderr] = identifier[PIPE] ,
identifier[close_fds] = keyword[True] )
identifier[lines] = identifier[p] . identifier[stdout] . identifier[readlines] ()
identifier[running] = keyword[False]
keyword[for] identifier[line] keyword[in] identifier[lines] :
keyword[if] ( literal[string] % identifier[arg2] keyword[in] identifier[line] ) keyword[or] ( identifier[os] . identifier[name] == literal[string]
keyword[and] literal[string] % identifier[arg1] keyword[in] identifier[line] ):
identifier[running] = keyword[True]
identifier[fields] = identifier[line] . identifier[strip] (). identifier[split] ()
identifier[info] ( literal[string] %( identifier[arg1] , identifier[fields] [ literal[int] ]))
keyword[if] identifier[os] . identifier[name] == literal[string] :
identifier[kill] = literal[string] % identifier[fields] [ literal[int] ]
keyword[else] :
identifier[kill] = literal[string] % identifier[fields] [ literal[int] ]
identifier[os] . identifier[system] ( identifier[kill] )
identifier[time] . identifier[sleep] ( literal[int] )
keyword[else] :
keyword[pass]
keyword[if] identifier[running] :
keyword[raise] identifier[Exception] ( literal[string]
literal[string] %( identifier[arg1] , literal[string] . identifier[join] (
[ identifier[l] . identifier[strip] () keyword[for] identifier[l] keyword[in] identifier[lines] ]))) | def kill(arg1, arg2):
"""Stops a proces that contains arg1 and is filtered by arg2
"""
from subprocess import Popen, PIPE
# Wait until ready
t0 = time.time()
# Wait no more than these many seconds
time_out = 30
running = True
while running and time.time() - t0 < time_out:
if os.name == 'nt':
p = Popen('tasklist | find "%s"' % arg1, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=False) # depends on [control=['if'], data=[]]
else:
p = Popen('ps aux | grep %s' % arg1, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
lines = p.stdout.readlines()
running = False
for line in lines:
# this kills all java.exe and python including self in windows
if '%s' % arg2 in line or (os.name == 'nt' and '%s' % arg1 in line):
running = True
# Get pid
fields = line.strip().split()
info('Stopping %s (process number %s)' % (arg1, fields[1]))
if os.name == 'nt':
kill = 'taskkill /F /PID "%s"' % fields[1] # depends on [control=['if'], data=[]]
else:
kill = 'kill -9 %s 2> /dev/null' % fields[1]
os.system(kill) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
# Give it a little more time
time.sleep(1) # depends on [control=['while'], data=[]]
else:
pass
if running:
raise Exception('Could not stop %s: Running processes are\n%s' % (arg1, '\n'.join([l.strip() for l in lines]))) # depends on [control=['if'], data=[]] |
def erase_disk_partitions(disk_id=None, scsi_address=None,
service_instance=None):
'''
Erases the partitions on a disk.
The disk can be specified either by the canonical name, or by the
scsi_address.
disk_id
Canonical name of the disk.
Either ``disk_id`` or ``scsi_address`` needs to be specified
(``disk_id`` supersedes ``scsi_address``.
scsi_address
Scsi address of the disk.
``disk_id`` or ``scsi_address`` needs to be specified
(``disk_id`` supersedes ``scsi_address``.
service_instance
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
Default is None.
.. code-block:: bash
salt '*' vsphere.erase_disk_partitions scsi_address='vmhaba0:C0:T0:L0'
salt '*' vsphere.erase_disk_partitions disk_id='naa.000000000000001'
'''
if not disk_id and not scsi_address:
raise ArgumentValueError('Either \'disk_id\' or \'scsi_address\' '
'needs to be specified')
host_ref = _get_proxy_target(service_instance)
hostname = __proxy__['esxi.get_details']()['esxi_host']
if not disk_id:
scsi_address_to_lun = \
salt.utils.vmware.get_scsi_address_to_lun_map(host_ref)
if scsi_address not in scsi_address_to_lun:
raise VMwareObjectRetrievalError(
'Scsi lun with address \'{0}\' was not found on host \'{1}\''
''.format(scsi_address, hostname))
disk_id = scsi_address_to_lun[scsi_address].canonicalName
log.trace('[%s] Got disk id \'%s\' for scsi address \'%s\'',
hostname, disk_id, scsi_address)
log.trace('Erasing disk partitions on disk \'%s\' in host \'%s\'',
disk_id, hostname)
salt.utils.vmware.erase_disk_partitions(service_instance,
host_ref, disk_id,
hostname=hostname)
log.info('Erased disk partitions on disk \'%s\' on host \'%s\'',
disk_id, hostname)
return True | def function[erase_disk_partitions, parameter[disk_id, scsi_address, service_instance]]:
constant[
Erases the partitions on a disk.
The disk can be specified either by the canonical name, or by the
scsi_address.
disk_id
Canonical name of the disk.
Either ``disk_id`` or ``scsi_address`` needs to be specified
(``disk_id`` supersedes ``scsi_address``.
scsi_address
Scsi address of the disk.
``disk_id`` or ``scsi_address`` needs to be specified
(``disk_id`` supersedes ``scsi_address``.
service_instance
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
Default is None.
.. code-block:: bash
salt '*' vsphere.erase_disk_partitions scsi_address='vmhaba0:C0:T0:L0'
salt '*' vsphere.erase_disk_partitions disk_id='naa.000000000000001'
]
if <ast.BoolOp object at 0x7da2041dac20> begin[:]
<ast.Raise object at 0x7da2041d80a0>
variable[host_ref] assign[=] call[name[_get_proxy_target], parameter[name[service_instance]]]
variable[hostname] assign[=] call[call[call[name[__proxy__]][constant[esxi.get_details]], parameter[]]][constant[esxi_host]]
if <ast.UnaryOp object at 0x7da2041d94e0> begin[:]
variable[scsi_address_to_lun] assign[=] call[name[salt].utils.vmware.get_scsi_address_to_lun_map, parameter[name[host_ref]]]
if compare[name[scsi_address] <ast.NotIn object at 0x7da2590d7190> name[scsi_address_to_lun]] begin[:]
<ast.Raise object at 0x7da2041d9de0>
variable[disk_id] assign[=] call[name[scsi_address_to_lun]][name[scsi_address]].canonicalName
call[name[log].trace, parameter[constant[[%s] Got disk id '%s' for scsi address '%s'], name[hostname], name[disk_id], name[scsi_address]]]
call[name[log].trace, parameter[constant[Erasing disk partitions on disk '%s' in host '%s'], name[disk_id], name[hostname]]]
call[name[salt].utils.vmware.erase_disk_partitions, parameter[name[service_instance], name[host_ref], name[disk_id]]]
call[name[log].info, parameter[constant[Erased disk partitions on disk '%s' on host '%s'], name[disk_id], name[hostname]]]
return[constant[True]] | keyword[def] identifier[erase_disk_partitions] ( identifier[disk_id] = keyword[None] , identifier[scsi_address] = keyword[None] ,
identifier[service_instance] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[disk_id] keyword[and] keyword[not] identifier[scsi_address] :
keyword[raise] identifier[ArgumentValueError] ( literal[string]
literal[string] )
identifier[host_ref] = identifier[_get_proxy_target] ( identifier[service_instance] )
identifier[hostname] = identifier[__proxy__] [ literal[string] ]()[ literal[string] ]
keyword[if] keyword[not] identifier[disk_id] :
identifier[scsi_address_to_lun] = identifier[salt] . identifier[utils] . identifier[vmware] . identifier[get_scsi_address_to_lun_map] ( identifier[host_ref] )
keyword[if] identifier[scsi_address] keyword[not] keyword[in] identifier[scsi_address_to_lun] :
keyword[raise] identifier[VMwareObjectRetrievalError] (
literal[string]
literal[string] . identifier[format] ( identifier[scsi_address] , identifier[hostname] ))
identifier[disk_id] = identifier[scsi_address_to_lun] [ identifier[scsi_address] ]. identifier[canonicalName]
identifier[log] . identifier[trace] ( literal[string] ,
identifier[hostname] , identifier[disk_id] , identifier[scsi_address] )
identifier[log] . identifier[trace] ( literal[string] ,
identifier[disk_id] , identifier[hostname] )
identifier[salt] . identifier[utils] . identifier[vmware] . identifier[erase_disk_partitions] ( identifier[service_instance] ,
identifier[host_ref] , identifier[disk_id] ,
identifier[hostname] = identifier[hostname] )
identifier[log] . identifier[info] ( literal[string] ,
identifier[disk_id] , identifier[hostname] )
keyword[return] keyword[True] | def erase_disk_partitions(disk_id=None, scsi_address=None, service_instance=None):
"""
Erases the partitions on a disk.
The disk can be specified either by the canonical name, or by the
scsi_address.
disk_id
Canonical name of the disk.
Either ``disk_id`` or ``scsi_address`` needs to be specified
(``disk_id`` supersedes ``scsi_address``.
scsi_address
Scsi address of the disk.
``disk_id`` or ``scsi_address`` needs to be specified
(``disk_id`` supersedes ``scsi_address``.
service_instance
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
Default is None.
.. code-block:: bash
salt '*' vsphere.erase_disk_partitions scsi_address='vmhaba0:C0:T0:L0'
salt '*' vsphere.erase_disk_partitions disk_id='naa.000000000000001'
"""
if not disk_id and (not scsi_address):
raise ArgumentValueError("Either 'disk_id' or 'scsi_address' needs to be specified") # depends on [control=['if'], data=[]]
host_ref = _get_proxy_target(service_instance)
hostname = __proxy__['esxi.get_details']()['esxi_host']
if not disk_id:
scsi_address_to_lun = salt.utils.vmware.get_scsi_address_to_lun_map(host_ref)
if scsi_address not in scsi_address_to_lun:
raise VMwareObjectRetrievalError("Scsi lun with address '{0}' was not found on host '{1}'".format(scsi_address, hostname)) # depends on [control=['if'], data=['scsi_address']]
disk_id = scsi_address_to_lun[scsi_address].canonicalName
log.trace("[%s] Got disk id '%s' for scsi address '%s'", hostname, disk_id, scsi_address) # depends on [control=['if'], data=[]]
log.trace("Erasing disk partitions on disk '%s' in host '%s'", disk_id, hostname)
salt.utils.vmware.erase_disk_partitions(service_instance, host_ref, disk_id, hostname=hostname)
log.info("Erased disk partitions on disk '%s' on host '%s'", disk_id, hostname)
return True |
def accept(self):
"""Method invoked when OK button is clicked."""
output_path = self.output_path_line_edit.text()
if not output_path:
display_warning_message_box(
self,
tr('Empty Output Path'),
tr('Output path can not be empty'))
return
try:
self.convert_metadata()
except MetadataConversionError as e:
display_warning_message_box(
self,
tr('Metadata Conversion Failed'),
str(e))
return
if not os.path.exists(output_path):
display_warning_message_box(
self,
tr('Metadata Conversion Failed'),
tr('Result file is not found.'))
return
display_success_message_bar(
tr('Metadata Conversion Success'),
tr('You can find your copied layer with metadata version 3.5 in '
'%s' % output_path),
iface_object=self.iface
)
super(MetadataConverterDialog, self).accept() | def function[accept, parameter[self]]:
constant[Method invoked when OK button is clicked.]
variable[output_path] assign[=] call[name[self].output_path_line_edit.text, parameter[]]
if <ast.UnaryOp object at 0x7da204345f00> begin[:]
call[name[display_warning_message_box], parameter[name[self], call[name[tr], parameter[constant[Empty Output Path]]], call[name[tr], parameter[constant[Output path can not be empty]]]]]
return[None]
<ast.Try object at 0x7da204345690>
if <ast.UnaryOp object at 0x7da204345ff0> begin[:]
call[name[display_warning_message_box], parameter[name[self], call[name[tr], parameter[constant[Metadata Conversion Failed]]], call[name[tr], parameter[constant[Result file is not found.]]]]]
return[None]
call[name[display_success_message_bar], parameter[call[name[tr], parameter[constant[Metadata Conversion Success]]], call[name[tr], parameter[binary_operation[constant[You can find your copied layer with metadata version 3.5 in %s] <ast.Mod object at 0x7da2590d6920> name[output_path]]]]]]
call[call[name[super], parameter[name[MetadataConverterDialog], name[self]]].accept, parameter[]] | keyword[def] identifier[accept] ( identifier[self] ):
literal[string]
identifier[output_path] = identifier[self] . identifier[output_path_line_edit] . identifier[text] ()
keyword[if] keyword[not] identifier[output_path] :
identifier[display_warning_message_box] (
identifier[self] ,
identifier[tr] ( literal[string] ),
identifier[tr] ( literal[string] ))
keyword[return]
keyword[try] :
identifier[self] . identifier[convert_metadata] ()
keyword[except] identifier[MetadataConversionError] keyword[as] identifier[e] :
identifier[display_warning_message_box] (
identifier[self] ,
identifier[tr] ( literal[string] ),
identifier[str] ( identifier[e] ))
keyword[return]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[output_path] ):
identifier[display_warning_message_box] (
identifier[self] ,
identifier[tr] ( literal[string] ),
identifier[tr] ( literal[string] ))
keyword[return]
identifier[display_success_message_bar] (
identifier[tr] ( literal[string] ),
identifier[tr] ( literal[string]
literal[string] % identifier[output_path] ),
identifier[iface_object] = identifier[self] . identifier[iface]
)
identifier[super] ( identifier[MetadataConverterDialog] , identifier[self] ). identifier[accept] () | def accept(self):
"""Method invoked when OK button is clicked."""
output_path = self.output_path_line_edit.text()
if not output_path:
display_warning_message_box(self, tr('Empty Output Path'), tr('Output path can not be empty'))
return # depends on [control=['if'], data=[]]
try:
self.convert_metadata() # depends on [control=['try'], data=[]]
except MetadataConversionError as e:
display_warning_message_box(self, tr('Metadata Conversion Failed'), str(e))
return # depends on [control=['except'], data=['e']]
if not os.path.exists(output_path):
display_warning_message_box(self, tr('Metadata Conversion Failed'), tr('Result file is not found.'))
return # depends on [control=['if'], data=[]]
display_success_message_bar(tr('Metadata Conversion Success'), tr('You can find your copied layer with metadata version 3.5 in %s' % output_path), iface_object=self.iface)
super(MetadataConverterDialog, self).accept() |
def get_files_from_filestore(job, files, work_dir, cache=True, docker=False):
"""
This is adapted from John Vivian's return_input_paths from the RNA-Seq pipeline.
Returns the paths of files from the FileStore if they are not present.
If docker=True, return the docker path for the file.
If the file extension is tar.gz, then tar -zxvf it.
files is a dict with:
keys = the name of the file to be returned in toil space
value = the input value for the file (can be toil temp file)
work_dir is the location where the file should be stored
cache indiciates whether caching should be used
"""
for name in files.keys():
outfile = job.fileStore.readGlobalFile(files[name], '/'.join([work_dir, name]), cache=cache)
# If the file pointed to a tarball, extract it to WORK_DIR
if tarfile.is_tarfile(outfile) and file_xext(outfile).startswith('.tar'):
untar_name = os.path.basename(strip_xext(outfile))
files[untar_name] = untargz(outfile, work_dir)
files.pop(name)
name = os.path.basename(untar_name)
# If the file is gzipped but NOT a tarfile, gunzip it to work_dir. However, the file is
# already named x.gz so we need to write to a temporary file x.gz_temp then do a move
# operation to overwrite x.gz.
elif is_gzipfile(outfile) and file_xext(outfile) == '.gz':
ungz_name = strip_xext(outfile)
with gzip.open(outfile, 'rb') as gz_in, open(ungz_name, 'w') as ungz_out:
shutil.copyfileobj(gz_in, ungz_out)
files[os.path.basename(ungz_name)] = outfile
files.pop(name)
name = os.path.basename(ungz_name)
else:
files[name] = outfile
# If the files will be sent to docker, we will mount work_dir to the container as /data and
# we want the /data prefixed path to the file
if docker:
files[name] = docker_path(files[name])
return files | def function[get_files_from_filestore, parameter[job, files, work_dir, cache, docker]]:
constant[
This is adapted from John Vivian's return_input_paths from the RNA-Seq pipeline.
Returns the paths of files from the FileStore if they are not present.
If docker=True, return the docker path for the file.
If the file extension is tar.gz, then tar -zxvf it.
files is a dict with:
keys = the name of the file to be returned in toil space
value = the input value for the file (can be toil temp file)
work_dir is the location where the file should be stored
cache indiciates whether caching should be used
]
for taget[name[name]] in starred[call[name[files].keys, parameter[]]] begin[:]
variable[outfile] assign[=] call[name[job].fileStore.readGlobalFile, parameter[call[name[files]][name[name]], call[constant[/].join, parameter[list[[<ast.Name object at 0x7da207f9a410>, <ast.Name object at 0x7da207f9afe0>]]]]]]
if <ast.BoolOp object at 0x7da207f9a140> begin[:]
variable[untar_name] assign[=] call[name[os].path.basename, parameter[call[name[strip_xext], parameter[name[outfile]]]]]
call[name[files]][name[untar_name]] assign[=] call[name[untargz], parameter[name[outfile], name[work_dir]]]
call[name[files].pop, parameter[name[name]]]
variable[name] assign[=] call[name[os].path.basename, parameter[name[untar_name]]]
if name[docker] begin[:]
call[name[files]][name[name]] assign[=] call[name[docker_path], parameter[call[name[files]][name[name]]]]
return[name[files]] | keyword[def] identifier[get_files_from_filestore] ( identifier[job] , identifier[files] , identifier[work_dir] , identifier[cache] = keyword[True] , identifier[docker] = keyword[False] ):
literal[string]
keyword[for] identifier[name] keyword[in] identifier[files] . identifier[keys] ():
identifier[outfile] = identifier[job] . identifier[fileStore] . identifier[readGlobalFile] ( identifier[files] [ identifier[name] ], literal[string] . identifier[join] ([ identifier[work_dir] , identifier[name] ]), identifier[cache] = identifier[cache] )
keyword[if] identifier[tarfile] . identifier[is_tarfile] ( identifier[outfile] ) keyword[and] identifier[file_xext] ( identifier[outfile] ). identifier[startswith] ( literal[string] ):
identifier[untar_name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[strip_xext] ( identifier[outfile] ))
identifier[files] [ identifier[untar_name] ]= identifier[untargz] ( identifier[outfile] , identifier[work_dir] )
identifier[files] . identifier[pop] ( identifier[name] )
identifier[name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[untar_name] )
keyword[elif] identifier[is_gzipfile] ( identifier[outfile] ) keyword[and] identifier[file_xext] ( identifier[outfile] )== literal[string] :
identifier[ungz_name] = identifier[strip_xext] ( identifier[outfile] )
keyword[with] identifier[gzip] . identifier[open] ( identifier[outfile] , literal[string] ) keyword[as] identifier[gz_in] , identifier[open] ( identifier[ungz_name] , literal[string] ) keyword[as] identifier[ungz_out] :
identifier[shutil] . identifier[copyfileobj] ( identifier[gz_in] , identifier[ungz_out] )
identifier[files] [ identifier[os] . identifier[path] . identifier[basename] ( identifier[ungz_name] )]= identifier[outfile]
identifier[files] . identifier[pop] ( identifier[name] )
identifier[name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[ungz_name] )
keyword[else] :
identifier[files] [ identifier[name] ]= identifier[outfile]
keyword[if] identifier[docker] :
identifier[files] [ identifier[name] ]= identifier[docker_path] ( identifier[files] [ identifier[name] ])
keyword[return] identifier[files] | def get_files_from_filestore(job, files, work_dir, cache=True, docker=False):
"""
This is adapted from John Vivian's return_input_paths from the RNA-Seq pipeline.
Returns the paths of files from the FileStore if they are not present.
If docker=True, return the docker path for the file.
If the file extension is tar.gz, then tar -zxvf it.
files is a dict with:
keys = the name of the file to be returned in toil space
value = the input value for the file (can be toil temp file)
work_dir is the location where the file should be stored
cache indiciates whether caching should be used
"""
for name in files.keys():
outfile = job.fileStore.readGlobalFile(files[name], '/'.join([work_dir, name]), cache=cache)
# If the file pointed to a tarball, extract it to WORK_DIR
if tarfile.is_tarfile(outfile) and file_xext(outfile).startswith('.tar'):
untar_name = os.path.basename(strip_xext(outfile))
files[untar_name] = untargz(outfile, work_dir)
files.pop(name)
name = os.path.basename(untar_name) # depends on [control=['if'], data=[]]
# If the file is gzipped but NOT a tarfile, gunzip it to work_dir. However, the file is
# already named x.gz so we need to write to a temporary file x.gz_temp then do a move
# operation to overwrite x.gz.
elif is_gzipfile(outfile) and file_xext(outfile) == '.gz':
ungz_name = strip_xext(outfile)
with gzip.open(outfile, 'rb') as gz_in, open(ungz_name, 'w') as ungz_out:
shutil.copyfileobj(gz_in, ungz_out) # depends on [control=['with'], data=['gz_in']]
files[os.path.basename(ungz_name)] = outfile
files.pop(name)
name = os.path.basename(ungz_name) # depends on [control=['if'], data=[]]
else:
files[name] = outfile
# If the files will be sent to docker, we will mount work_dir to the container as /data and
# we want the /data prefixed path to the file
if docker:
files[name] = docker_path(files[name]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']]
return files |
def invert_projection(self, X, identities):
"""
Calculate the inverted projection.
The inverted projectio of a SOM is created by association each weight
with the input which matches it the most, thus giving a good
approximation of the "influence" of each input item.
Works best for symbolic (instead of continuous) input data.
Parameters
----------
X : numpy array
Input data
identities : list
A list of names for each of the input data. Must be the same
length as X.
Returns
-------
m : numpy array
An array with the same shape as the map
"""
distances = self.transform(X)
if len(distances) != len(identities):
raise ValueError("X and identities are not the same length: "
"{0} and {1}".format(len(X), len(identities)))
node_match = []
for d in distances.__getattribute__(self.argfunc)(0):
node_match.append(identities[d])
return np.array(node_match) | def function[invert_projection, parameter[self, X, identities]]:
constant[
Calculate the inverted projection.
The inverted projectio of a SOM is created by association each weight
with the input which matches it the most, thus giving a good
approximation of the "influence" of each input item.
Works best for symbolic (instead of continuous) input data.
Parameters
----------
X : numpy array
Input data
identities : list
A list of names for each of the input data. Must be the same
length as X.
Returns
-------
m : numpy array
An array with the same shape as the map
]
variable[distances] assign[=] call[name[self].transform, parameter[name[X]]]
if compare[call[name[len], parameter[name[distances]]] not_equal[!=] call[name[len], parameter[name[identities]]]] begin[:]
<ast.Raise object at 0x7da20c7c9c00>
variable[node_match] assign[=] list[[]]
for taget[name[d]] in starred[call[call[name[distances].__getattribute__, parameter[name[self].argfunc]], parameter[constant[0]]]] begin[:]
call[name[node_match].append, parameter[call[name[identities]][name[d]]]]
return[call[name[np].array, parameter[name[node_match]]]] | keyword[def] identifier[invert_projection] ( identifier[self] , identifier[X] , identifier[identities] ):
literal[string]
identifier[distances] = identifier[self] . identifier[transform] ( identifier[X] )
keyword[if] identifier[len] ( identifier[distances] )!= identifier[len] ( identifier[identities] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[len] ( identifier[X] ), identifier[len] ( identifier[identities] )))
identifier[node_match] =[]
keyword[for] identifier[d] keyword[in] identifier[distances] . identifier[__getattribute__] ( identifier[self] . identifier[argfunc] )( literal[int] ):
identifier[node_match] . identifier[append] ( identifier[identities] [ identifier[d] ])
keyword[return] identifier[np] . identifier[array] ( identifier[node_match] ) | def invert_projection(self, X, identities):
"""
Calculate the inverted projection.
The inverted projectio of a SOM is created by association each weight
with the input which matches it the most, thus giving a good
approximation of the "influence" of each input item.
Works best for symbolic (instead of continuous) input data.
Parameters
----------
X : numpy array
Input data
identities : list
A list of names for each of the input data. Must be the same
length as X.
Returns
-------
m : numpy array
An array with the same shape as the map
"""
distances = self.transform(X)
if len(distances) != len(identities):
raise ValueError('X and identities are not the same length: {0} and {1}'.format(len(X), len(identities))) # depends on [control=['if'], data=[]]
node_match = []
for d in distances.__getattribute__(self.argfunc)(0):
node_match.append(identities[d]) # depends on [control=['for'], data=['d']]
return np.array(node_match) |
def has_public_constructor(class_):
"""if class has any public constructor, this function will return list of
them, otherwise None"""
class_ = class_traits.get_declaration(class_)
decls = class_.constructors(
lambda c: not is_copy_constructor(c) and c.access_type == 'public',
recursive=False,
allow_empty=True)
if decls:
return decls | def function[has_public_constructor, parameter[class_]]:
constant[if class has any public constructor, this function will return list of
them, otherwise None]
variable[class_] assign[=] call[name[class_traits].get_declaration, parameter[name[class_]]]
variable[decls] assign[=] call[name[class_].constructors, parameter[<ast.Lambda object at 0x7da204622c50>]]
if name[decls] begin[:]
return[name[decls]] | keyword[def] identifier[has_public_constructor] ( identifier[class_] ):
literal[string]
identifier[class_] = identifier[class_traits] . identifier[get_declaration] ( identifier[class_] )
identifier[decls] = identifier[class_] . identifier[constructors] (
keyword[lambda] identifier[c] : keyword[not] identifier[is_copy_constructor] ( identifier[c] ) keyword[and] identifier[c] . identifier[access_type] == literal[string] ,
identifier[recursive] = keyword[False] ,
identifier[allow_empty] = keyword[True] )
keyword[if] identifier[decls] :
keyword[return] identifier[decls] | def has_public_constructor(class_):
"""if class has any public constructor, this function will return list of
them, otherwise None"""
class_ = class_traits.get_declaration(class_)
decls = class_.constructors(lambda c: not is_copy_constructor(c) and c.access_type == 'public', recursive=False, allow_empty=True)
if decls:
return decls # depends on [control=['if'], data=[]] |
def emit(self, record):
"""Emit record after checking if message triggers later sending of e-mail."""
if self.triggerLevelNo is not None and record.levelno>=self.triggerLevelNo:
self.triggered = True
logging.handlers.BufferingHandler.emit(self,record) | def function[emit, parameter[self, record]]:
constant[Emit record after checking if message triggers later sending of e-mail.]
if <ast.BoolOp object at 0x7da1b2677880> begin[:]
name[self].triggered assign[=] constant[True]
call[name[logging].handlers.BufferingHandler.emit, parameter[name[self], name[record]]] | keyword[def] identifier[emit] ( identifier[self] , identifier[record] ):
literal[string]
keyword[if] identifier[self] . identifier[triggerLevelNo] keyword[is] keyword[not] keyword[None] keyword[and] identifier[record] . identifier[levelno] >= identifier[self] . identifier[triggerLevelNo] :
identifier[self] . identifier[triggered] = keyword[True]
identifier[logging] . identifier[handlers] . identifier[BufferingHandler] . identifier[emit] ( identifier[self] , identifier[record] ) | def emit(self, record):
"""Emit record after checking if message triggers later sending of e-mail."""
if self.triggerLevelNo is not None and record.levelno >= self.triggerLevelNo:
self.triggered = True # depends on [control=['if'], data=[]]
logging.handlers.BufferingHandler.emit(self, record) |
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass | def function[_create_repo, parameter[self, args]]:
constant[
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
]
if compare[call[name[len], parameter[name[args]]] less[<] constant[2]] begin[:]
<ast.Raise object at 0x7da18c4ceaa0>
if compare[call[name[args]][constant[1]] equal[==] constant[.]] begin[:]
variable[repo_path] assign[=] call[name[os].getcwdu, parameter[]]
variable[old_files] assign[=] list[[]]
variable[repo_metadata] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18c4cc670>, <ast.Name object at 0x7da18c4cf250>, <ast.Name object at 0x7da18c4ce380>]]] in starred[call[name[salt].utils.path.os_walk, parameter[name[repo_path]]]] begin[:]
for taget[name[spm_file]] in starred[name[filenames]] begin[:]
if <ast.UnaryOp object at 0x7da18c4cf2b0> begin[:]
continue
variable[spm_path] assign[=] call[constant[{0}/{1}].format, parameter[name[repo_path], name[spm_file]]]
if <ast.UnaryOp object at 0x7da18c4cd060> begin[:]
continue
variable[comps] assign[=] call[name[spm_file].split, parameter[constant[-]]]
variable[spm_name] assign[=] call[constant[-].join, parameter[call[name[comps]][<ast.Slice object at 0x7da18c4ceda0>]]]
variable[spm_fh] assign[=] call[name[tarfile].open, parameter[name[spm_path], constant[r:bz2]]]
variable[formula_handle] assign[=] call[name[spm_fh].extractfile, parameter[call[constant[{0}/FORMULA].format, parameter[name[spm_name]]]]]
variable[formula_conf] assign[=] call[name[salt].utils.yaml.safe_load, parameter[call[name[formula_handle].read, parameter[]]]]
variable[use_formula] assign[=] constant[True]
if compare[name[spm_name] in name[repo_metadata]] begin[:]
variable[cur_info] assign[=] call[call[name[repo_metadata]][name[spm_name]]][constant[info]]
variable[new_info] assign[=] name[formula_conf]
if compare[call[name[int], parameter[call[name[new_info]][constant[version]]]] equal[==] call[name[int], parameter[call[name[cur_info]][constant[version]]]]] begin[:]
if compare[call[name[int], parameter[call[name[new_info]][constant[release]]]] less[<] call[name[int], parameter[call[name[cur_info]][constant[release]]]]] begin[:]
variable[use_formula] assign[=] constant[False]
if compare[name[use_formula] is constant[True]] begin[:]
call[name[log].debug, parameter[constant[%s %s-%s had been added, but %s-%s will replace it], name[spm_name], call[name[cur_info]][constant[version]], call[name[cur_info]][constant[release]], call[name[new_info]][constant[version]], call[name[new_info]][constant[release]]]]
call[name[old_files].append, parameter[call[call[name[repo_metadata]][name[spm_name]]][constant[filename]]]]
if compare[name[use_formula] is constant[True]] begin[:]
call[name[log].debug, parameter[constant[adding %s-%s-%s to the repo], call[name[formula_conf]][constant[name]], call[name[formula_conf]][constant[version]], call[name[formula_conf]][constant[release]]]]
call[name[repo_metadata]][name[spm_name]] assign[=] dictionary[[<ast.Constant object at 0x7da204621300>], [<ast.Call object at 0x7da204621060>]]
call[call[name[repo_metadata]][name[spm_name]]][constant[filename]] assign[=] name[spm_file]
variable[metadata_filename] assign[=] call[constant[{0}/SPM-METADATA].format, parameter[name[repo_path]]]
with call[name[salt].utils.files.fopen, parameter[name[metadata_filename], constant[w]]] begin[:]
call[name[salt].utils.yaml.safe_dump, parameter[name[repo_metadata], name[mfh]]]
call[name[log].debug, parameter[constant[Wrote %s], name[metadata_filename]]]
for taget[name[file_]] in starred[name[old_files]] begin[:]
if compare[call[name[self].opts][constant[spm_repo_dups]] equal[==] constant[ignore]] begin[:]
call[name[log].debug, parameter[constant[%s will be left in the directory], name[file_]]] | keyword[def] identifier[_create_repo] ( identifier[self] , identifier[args] ):
literal[string]
keyword[if] identifier[len] ( identifier[args] )< literal[int] :
keyword[raise] identifier[SPMInvocationError] ( literal[string] )
keyword[if] identifier[args] [ literal[int] ]== literal[string] :
identifier[repo_path] = identifier[os] . identifier[getcwdu] ()
keyword[else] :
identifier[repo_path] = identifier[args] [ literal[int] ]
identifier[old_files] =[]
identifier[repo_metadata] ={}
keyword[for] ( identifier[dirpath] , identifier[dirnames] , identifier[filenames] ) keyword[in] identifier[salt] . identifier[utils] . identifier[path] . identifier[os_walk] ( identifier[repo_path] ):
keyword[for] identifier[spm_file] keyword[in] identifier[filenames] :
keyword[if] keyword[not] identifier[spm_file] . identifier[endswith] ( literal[string] ):
keyword[continue]
identifier[spm_path] = literal[string] . identifier[format] ( identifier[repo_path] , identifier[spm_file] )
keyword[if] keyword[not] identifier[tarfile] . identifier[is_tarfile] ( identifier[spm_path] ):
keyword[continue]
identifier[comps] = identifier[spm_file] . identifier[split] ( literal[string] )
identifier[spm_name] = literal[string] . identifier[join] ( identifier[comps] [:- literal[int] ])
identifier[spm_fh] = identifier[tarfile] . identifier[open] ( identifier[spm_path] , literal[string] )
identifier[formula_handle] = identifier[spm_fh] . identifier[extractfile] ( literal[string] . identifier[format] ( identifier[spm_name] ))
identifier[formula_conf] = identifier[salt] . identifier[utils] . identifier[yaml] . identifier[safe_load] ( identifier[formula_handle] . identifier[read] ())
identifier[use_formula] = keyword[True]
keyword[if] identifier[spm_name] keyword[in] identifier[repo_metadata] :
identifier[cur_info] = identifier[repo_metadata] [ identifier[spm_name] ][ literal[string] ]
identifier[new_info] = identifier[formula_conf]
keyword[if] identifier[int] ( identifier[new_info] [ literal[string] ])== identifier[int] ( identifier[cur_info] [ literal[string] ]):
keyword[if] identifier[int] ( identifier[new_info] [ literal[string] ])< identifier[int] ( identifier[cur_info] [ literal[string] ]):
identifier[use_formula] = keyword[False]
keyword[elif] identifier[int] ( identifier[new_info] [ literal[string] ])< identifier[int] ( identifier[cur_info] [ literal[string] ]):
identifier[use_formula] = keyword[False]
keyword[if] identifier[use_formula] keyword[is] keyword[True] :
identifier[log] . identifier[debug] (
literal[string] ,
identifier[spm_name] , identifier[cur_info] [ literal[string] ], identifier[cur_info] [ literal[string] ],
identifier[new_info] [ literal[string] ], identifier[new_info] [ literal[string] ]
)
identifier[old_files] . identifier[append] ( identifier[repo_metadata] [ identifier[spm_name] ][ literal[string] ])
keyword[else] :
identifier[log] . identifier[debug] (
literal[string] ,
identifier[spm_name] , identifier[new_info] [ literal[string] ], identifier[new_info] [ literal[string] ],
identifier[cur_info] [ literal[string] ], identifier[cur_info] [ literal[string] ]
)
identifier[old_files] . identifier[append] ( identifier[spm_file] )
keyword[if] identifier[use_formula] keyword[is] keyword[True] :
identifier[log] . identifier[debug] (
literal[string] ,
identifier[formula_conf] [ literal[string] ], identifier[formula_conf] [ literal[string] ],
identifier[formula_conf] [ literal[string] ]
)
identifier[repo_metadata] [ identifier[spm_name] ]={
literal[string] : identifier[formula_conf] . identifier[copy] (),
}
identifier[repo_metadata] [ identifier[spm_name] ][ literal[string] ]= identifier[spm_file]
identifier[metadata_filename] = literal[string] . identifier[format] ( identifier[repo_path] )
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[metadata_filename] , literal[string] ) keyword[as] identifier[mfh] :
identifier[salt] . identifier[utils] . identifier[yaml] . identifier[safe_dump] (
identifier[repo_metadata] ,
identifier[mfh] ,
identifier[indent] = literal[int] ,
identifier[canonical] = keyword[False] ,
identifier[default_flow_style] = keyword[False] ,
)
identifier[log] . identifier[debug] ( literal[string] , identifier[metadata_filename] )
keyword[for] identifier[file_] keyword[in] identifier[old_files] :
keyword[if] identifier[self] . identifier[opts] [ literal[string] ]== literal[string] :
identifier[log] . identifier[debug] ( literal[string] , identifier[file_] )
keyword[elif] identifier[self] . identifier[opts] [ literal[string] ]== literal[string] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( literal[string] ):
keyword[try] :
identifier[os] . identifier[makedirs] ( literal[string] )
identifier[log] . identifier[debug] ( literal[string] , identifier[file_] )
keyword[except] identifier[IOError] :
identifier[log] . identifier[error] ( literal[string] )
keyword[try] :
identifier[shutil] . identifier[move] ( identifier[file_] , literal[string] )
keyword[except] ( identifier[IOError] , identifier[OSError] ):
identifier[log] . identifier[error] ( literal[string] , identifier[file_] )
keyword[elif] identifier[self] . identifier[opts] [ literal[string] ]== literal[string] :
keyword[try] :
identifier[os] . identifier[remove] ( identifier[file_] )
identifier[log] . identifier[debug] ( literal[string] , identifier[file_] )
keyword[except] identifier[IOError] :
identifier[log] . identifier[error] ( literal[string] , identifier[file_] )
keyword[except] identifier[OSError] :
keyword[pass] | def _create_repo(self, args):
"""
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
"""
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified') # depends on [control=['if'], data=[]]
if args[1] == '.':
repo_path = os.getcwdu() # depends on [control=['if'], data=[]]
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue # depends on [control=['if'], data=[]]
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue # depends on [control=['if'], data=[]]
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False # depends on [control=['if'], data=[]]
if use_formula is True:
# Ignore/archive/delete the old version
log.debug('%s %s-%s had been added, but %s-%s will replace it', spm_name, cur_info['version'], cur_info['release'], new_info['version'], new_info['release'])
old_files.append(repo_metadata[spm_name]['filename']) # depends on [control=['if'], data=[]]
else:
# Ignore/archive/delete the new version
log.debug('%s %s-%s has been found, but is older than %s-%s', spm_name, new_info['version'], new_info['release'], cur_info['version'], cur_info['release'])
old_files.append(spm_file) # depends on [control=['if'], data=['spm_name', 'repo_metadata']]
if use_formula is True:
log.debug('adding %s-%s-%s to the repo', formula_conf['name'], formula_conf['version'], formula_conf['release'])
repo_metadata[spm_name] = {'info': formula_conf.copy()}
repo_metadata[spm_name]['filename'] = spm_file # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['spm_file']] # depends on [control=['for'], data=[]]
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(repo_metadata, mfh, indent=4, canonical=False, default_flow_style=False) # depends on [control=['with'], data=['mfh']]
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_) # depends on [control=['if'], data=[]]
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_) # depends on [control=['try'], data=[]]
except IOError:
log.error('Unable to create archive directory') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
try:
shutil.move(file_, './archive') # depends on [control=['try'], data=[]]
except (IOError, OSError):
log.error('Unable to archive %s', file_) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_) # depends on [control=['try'], data=[]]
except IOError:
log.error('Unable to delete %s', file_) # depends on [control=['except'], data=[]]
except OSError:
# The file has already been deleted
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['file_']] |
def load(self, val, **kwargs):
"""
Load the file contents into the supplied pandas dataframe or
HoloViews Table. This allows a selection to be made over the
metadata before loading the file contents (may be slow).
"""
if Table and isinstance(val, Table):
return self.load_table(val, **kwargs)
elif DataFrame and isinstance(val, DataFrame):
return self.load_dframe(val, **kwargs)
else:
raise Exception("Type %s not a DataFrame or Table." % type(val)) | def function[load, parameter[self, val]]:
constant[
Load the file contents into the supplied pandas dataframe or
HoloViews Table. This allows a selection to be made over the
metadata before loading the file contents (may be slow).
]
if <ast.BoolOp object at 0x7da1afe0e770> begin[:]
return[call[name[self].load_table, parameter[name[val]]]] | keyword[def] identifier[load] ( identifier[self] , identifier[val] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[Table] keyword[and] identifier[isinstance] ( identifier[val] , identifier[Table] ):
keyword[return] identifier[self] . identifier[load_table] ( identifier[val] ,** identifier[kwargs] )
keyword[elif] identifier[DataFrame] keyword[and] identifier[isinstance] ( identifier[val] , identifier[DataFrame] ):
keyword[return] identifier[self] . identifier[load_dframe] ( identifier[val] ,** identifier[kwargs] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] % identifier[type] ( identifier[val] )) | def load(self, val, **kwargs):
"""
Load the file contents into the supplied pandas dataframe or
HoloViews Table. This allows a selection to be made over the
metadata before loading the file contents (may be slow).
"""
if Table and isinstance(val, Table):
return self.load_table(val, **kwargs) # depends on [control=['if'], data=[]]
elif DataFrame and isinstance(val, DataFrame):
return self.load_dframe(val, **kwargs) # depends on [control=['if'], data=[]]
else:
raise Exception('Type %s not a DataFrame or Table.' % type(val)) |
def _check_input_files(nspc, parser):
"""check filename args. otherwise if one of the 3 filenames is bad
it's hard to tell which one"""
if not len(nspc.filenames) == 3:
parser.print_help()
msg = """
3 Expected files; Expected content: study population association",
{} Actual files: {}""".format(len(nspc.filenames), ' '.join(nspc.filenames))
raise Exception(msg)
for fin in nspc.filenames:
if not os.path.exists(fin):
return "*{}* does not exist".format(fin)
return False | def function[_check_input_files, parameter[nspc, parser]]:
constant[check filename args. otherwise if one of the 3 filenames is bad
it's hard to tell which one]
if <ast.UnaryOp object at 0x7da18bc70430> begin[:]
call[name[parser].print_help, parameter[]]
variable[msg] assign[=] call[constant[
3 Expected files; Expected content: study population association",
{} Actual files: {}].format, parameter[call[name[len], parameter[name[nspc].filenames]], call[constant[ ].join, parameter[name[nspc].filenames]]]]
<ast.Raise object at 0x7da18bc737c0>
for taget[name[fin]] in starred[name[nspc].filenames] begin[:]
if <ast.UnaryOp object at 0x7da18bc72920> begin[:]
return[call[constant[*{}* does not exist].format, parameter[name[fin]]]]
return[constant[False]] | keyword[def] identifier[_check_input_files] ( identifier[nspc] , identifier[parser] ):
literal[string]
keyword[if] keyword[not] identifier[len] ( identifier[nspc] . identifier[filenames] )== literal[int] :
identifier[parser] . identifier[print_help] ()
identifier[msg] = literal[string] . identifier[format] ( identifier[len] ( identifier[nspc] . identifier[filenames] ), literal[string] . identifier[join] ( identifier[nspc] . identifier[filenames] ))
keyword[raise] identifier[Exception] ( identifier[msg] )
keyword[for] identifier[fin] keyword[in] identifier[nspc] . identifier[filenames] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[fin] ):
keyword[return] literal[string] . identifier[format] ( identifier[fin] )
keyword[return] keyword[False] | def _check_input_files(nspc, parser):
"""check filename args. otherwise if one of the 3 filenames is bad
it's hard to tell which one"""
if not len(nspc.filenames) == 3:
parser.print_help()
msg = '\n 3 Expected files; Expected content: study population association",\n {} Actual files: {}'.format(len(nspc.filenames), ' '.join(nspc.filenames))
raise Exception(msg) # depends on [control=['if'], data=[]]
for fin in nspc.filenames:
if not os.path.exists(fin):
return '*{}* does not exist'.format(fin) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fin']]
return False |
def set(self, property_dict):
"""Attempts to set the given properties of the object.
An example of this is setting the nickname of the object::
cdb.set({"nickname": "My new nickname"})
note that there is a convenience property `cdb.nickname` that allows you to get/set the nickname directly.
"""
self.metadata = self.db.update(self.path, property_dict).json() | def function[set, parameter[self, property_dict]]:
constant[Attempts to set the given properties of the object.
An example of this is setting the nickname of the object::
cdb.set({"nickname": "My new nickname"})
note that there is a convenience property `cdb.nickname` that allows you to get/set the nickname directly.
]
name[self].metadata assign[=] call[call[name[self].db.update, parameter[name[self].path, name[property_dict]]].json, parameter[]] | keyword[def] identifier[set] ( identifier[self] , identifier[property_dict] ):
literal[string]
identifier[self] . identifier[metadata] = identifier[self] . identifier[db] . identifier[update] ( identifier[self] . identifier[path] , identifier[property_dict] ). identifier[json] () | def set(self, property_dict):
"""Attempts to set the given properties of the object.
An example of this is setting the nickname of the object::
cdb.set({"nickname": "My new nickname"})
note that there is a convenience property `cdb.nickname` that allows you to get/set the nickname directly.
"""
self.metadata = self.db.update(self.path, property_dict).json() |
def required_length(nmin, nmax):
"""
For use with argparse's action argument. Allows setting a range for nargs.
Example: nargs='+', action=required_length(2, 3)
:param int nmin: Minimum number of arguments
:param int nmax: Maximum number of arguments
:return: RequiredLength object
"""
class RequiredLength(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if not nmin <= len(values) <= nmax:
msg = 'argument "{f}" requires between {nmin} and {nmax} arguments'.format(
f=self.dest, nmin=nmin, nmax=nmax)
raise argparse.ArgumentTypeError(msg)
setattr(args, self.dest, values)
return RequiredLength | def function[required_length, parameter[nmin, nmax]]:
constant[
For use with argparse's action argument. Allows setting a range for nargs.
Example: nargs='+', action=required_length(2, 3)
:param int nmin: Minimum number of arguments
:param int nmax: Maximum number of arguments
:return: RequiredLength object
]
class class[RequiredLength, parameter[]] begin[:]
def function[__call__, parameter[self, parser, args, values, option_string]]:
if <ast.UnaryOp object at 0x7da2046234c0> begin[:]
variable[msg] assign[=] call[constant[argument "{f}" requires between {nmin} and {nmax} arguments].format, parameter[]]
<ast.Raise object at 0x7da204622080>
call[name[setattr], parameter[name[args], name[self].dest, name[values]]]
return[name[RequiredLength]] | keyword[def] identifier[required_length] ( identifier[nmin] , identifier[nmax] ):
literal[string]
keyword[class] identifier[RequiredLength] ( identifier[argparse] . identifier[Action] ):
keyword[def] identifier[__call__] ( identifier[self] , identifier[parser] , identifier[args] , identifier[values] , identifier[option_string] = keyword[None] ):
keyword[if] keyword[not] identifier[nmin] <= identifier[len] ( identifier[values] )<= identifier[nmax] :
identifier[msg] = literal[string] . identifier[format] (
identifier[f] = identifier[self] . identifier[dest] , identifier[nmin] = identifier[nmin] , identifier[nmax] = identifier[nmax] )
keyword[raise] identifier[argparse] . identifier[ArgumentTypeError] ( identifier[msg] )
identifier[setattr] ( identifier[args] , identifier[self] . identifier[dest] , identifier[values] )
keyword[return] identifier[RequiredLength] | def required_length(nmin, nmax):
"""
For use with argparse's action argument. Allows setting a range for nargs.
Example: nargs='+', action=required_length(2, 3)
:param int nmin: Minimum number of arguments
:param int nmax: Maximum number of arguments
:return: RequiredLength object
"""
class RequiredLength(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if not nmin <= len(values) <= nmax:
msg = 'argument "{f}" requires between {nmin} and {nmax} arguments'.format(f=self.dest, nmin=nmin, nmax=nmax)
raise argparse.ArgumentTypeError(msg) # depends on [control=['if'], data=[]]
setattr(args, self.dest, values)
return RequiredLength |
def grant_authority(self, column=None, value=None, **kwargs):
"""Many-to-many table connecting grants and authority."""
return self._resolve_call('GIC_GRANT_AUTH', column, value, **kwargs) | def function[grant_authority, parameter[self, column, value]]:
constant[Many-to-many table connecting grants and authority.]
return[call[name[self]._resolve_call, parameter[constant[GIC_GRANT_AUTH], name[column], name[value]]]] | keyword[def] identifier[grant_authority] ( identifier[self] , identifier[column] = keyword[None] , identifier[value] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[_resolve_call] ( literal[string] , identifier[column] , identifier[value] ,** identifier[kwargs] ) | def grant_authority(self, column=None, value=None, **kwargs):
"""Many-to-many table connecting grants and authority."""
return self._resolve_call('GIC_GRANT_AUTH', column, value, **kwargs) |
def read_meta(self, **kwargs):
'''
Read only the annotation of the FCS file (without reading DATA segment).
It's advised not to use this method, but instead to access
the meta data through the FCMeasurement.meta attribute.
'''
# TODO Try to rewrite the code to be more logical
# The reason the equivalent statement is not in the read_data method
# above is because self.readdata_kwargs are passed
# as **kwargs to the read_data function.
if 'channel_naming' in self.readdata_kwargs:
kwargs['channel_naming'] = self.readdata_kwargs['channel_naming']
meta = parse_fcs(self.datafile,
reformat_meta=True,
meta_data_only=True, **kwargs)
return meta | def function[read_meta, parameter[self]]:
constant[
Read only the annotation of the FCS file (without reading DATA segment).
It's advised not to use this method, but instead to access
the meta data through the FCMeasurement.meta attribute.
]
if compare[constant[channel_naming] in name[self].readdata_kwargs] begin[:]
call[name[kwargs]][constant[channel_naming]] assign[=] call[name[self].readdata_kwargs][constant[channel_naming]]
variable[meta] assign[=] call[name[parse_fcs], parameter[name[self].datafile]]
return[name[meta]] | keyword[def] identifier[read_meta] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[readdata_kwargs] :
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[readdata_kwargs] [ literal[string] ]
identifier[meta] = identifier[parse_fcs] ( identifier[self] . identifier[datafile] ,
identifier[reformat_meta] = keyword[True] ,
identifier[meta_data_only] = keyword[True] ,** identifier[kwargs] )
keyword[return] identifier[meta] | def read_meta(self, **kwargs):
"""
Read only the annotation of the FCS file (without reading DATA segment).
It's advised not to use this method, but instead to access
the meta data through the FCMeasurement.meta attribute.
"""
# TODO Try to rewrite the code to be more logical
# The reason the equivalent statement is not in the read_data method
# above is because self.readdata_kwargs are passed
# as **kwargs to the read_data function.
if 'channel_naming' in self.readdata_kwargs:
kwargs['channel_naming'] = self.readdata_kwargs['channel_naming'] # depends on [control=['if'], data=[]]
meta = parse_fcs(self.datafile, reformat_meta=True, meta_data_only=True, **kwargs)
return meta |
def process_all_json_files(build_dir):
"""Return a list of pages to index"""
html_files = []
for root, _, files in os.walk(build_dir):
for filename in fnmatch.filter(files, '*.fjson'):
if filename in ['search.fjson', 'genindex.fjson',
'py-modindex.fjson']:
continue
html_files.append(os.path.join(root, filename))
page_list = []
for filename in html_files:
try:
result = process_file(filename)
if result:
page_list.append(result)
# we're unsure which exceptions can be raised
except: # noqa
pass
return page_list | def function[process_all_json_files, parameter[build_dir]]:
constant[Return a list of pages to index]
variable[html_files] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b16aabc0>, <ast.Name object at 0x7da1b16a9e10>, <ast.Name object at 0x7da1b16a8580>]]] in starred[call[name[os].walk, parameter[name[build_dir]]]] begin[:]
for taget[name[filename]] in starred[call[name[fnmatch].filter, parameter[name[files], constant[*.fjson]]]] begin[:]
if compare[name[filename] in list[[<ast.Constant object at 0x7da1b16ab160>, <ast.Constant object at 0x7da1b16aa710>, <ast.Constant object at 0x7da1b16abaf0>]]] begin[:]
continue
call[name[html_files].append, parameter[call[name[os].path.join, parameter[name[root], name[filename]]]]]
variable[page_list] assign[=] list[[]]
for taget[name[filename]] in starred[name[html_files]] begin[:]
<ast.Try object at 0x7da1b16a8310>
return[name[page_list]] | keyword[def] identifier[process_all_json_files] ( identifier[build_dir] ):
literal[string]
identifier[html_files] =[]
keyword[for] identifier[root] , identifier[_] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[build_dir] ):
keyword[for] identifier[filename] keyword[in] identifier[fnmatch] . identifier[filter] ( identifier[files] , literal[string] ):
keyword[if] identifier[filename] keyword[in] [ literal[string] , literal[string] ,
literal[string] ]:
keyword[continue]
identifier[html_files] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[filename] ))
identifier[page_list] =[]
keyword[for] identifier[filename] keyword[in] identifier[html_files] :
keyword[try] :
identifier[result] = identifier[process_file] ( identifier[filename] )
keyword[if] identifier[result] :
identifier[page_list] . identifier[append] ( identifier[result] )
keyword[except] :
keyword[pass]
keyword[return] identifier[page_list] | def process_all_json_files(build_dir):
"""Return a list of pages to index"""
html_files = []
for (root, _, files) in os.walk(build_dir):
for filename in fnmatch.filter(files, '*.fjson'):
if filename in ['search.fjson', 'genindex.fjson', 'py-modindex.fjson']:
continue # depends on [control=['if'], data=[]]
html_files.append(os.path.join(root, filename)) # depends on [control=['for'], data=['filename']] # depends on [control=['for'], data=[]]
page_list = []
for filename in html_files:
try:
result = process_file(filename)
if result:
page_list.append(result) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
# we're unsure which exceptions can be raised
except: # noqa
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['filename']]
return page_list |
async def stop(self):
"""Stop the current task process.
Starts with SIGTERM, gives the process 1 second to terminate, then kills it
"""
# negate pid so that signals apply to process group
pgid = -self.process.pid
try:
os.kill(pgid, signal.SIGTERM)
await asyncio.sleep(1)
os.kill(pgid, signal.SIGKILL)
except (OSError, ProcessLookupError):
return | <ast.AsyncFunctionDef object at 0x7da1b0e9ece0> | keyword[async] keyword[def] identifier[stop] ( identifier[self] ):
literal[string]
identifier[pgid] =- identifier[self] . identifier[process] . identifier[pid]
keyword[try] :
identifier[os] . identifier[kill] ( identifier[pgid] , identifier[signal] . identifier[SIGTERM] )
keyword[await] identifier[asyncio] . identifier[sleep] ( literal[int] )
identifier[os] . identifier[kill] ( identifier[pgid] , identifier[signal] . identifier[SIGKILL] )
keyword[except] ( identifier[OSError] , identifier[ProcessLookupError] ):
keyword[return] | async def stop(self):
"""Stop the current task process.
Starts with SIGTERM, gives the process 1 second to terminate, then kills it
"""
# negate pid so that signals apply to process group
pgid = -self.process.pid
try:
os.kill(pgid, signal.SIGTERM)
await asyncio.sleep(1)
os.kill(pgid, signal.SIGKILL) # depends on [control=['try'], data=[]]
except (OSError, ProcessLookupError):
return # depends on [control=['except'], data=[]] |
def get_requires_for_build_sdist(config_settings):
"""Invoke the optional get_requires_for_build_wheel hook
Returns [] if the hook is not defined.
"""
backend = _build_backend()
try:
hook = backend.get_requires_for_build_sdist
except AttributeError:
return []
else:
return hook(config_settings) | def function[get_requires_for_build_sdist, parameter[config_settings]]:
constant[Invoke the optional get_requires_for_build_wheel hook
Returns [] if the hook is not defined.
]
variable[backend] assign[=] call[name[_build_backend], parameter[]]
<ast.Try object at 0x7da2041d99f0> | keyword[def] identifier[get_requires_for_build_sdist] ( identifier[config_settings] ):
literal[string]
identifier[backend] = identifier[_build_backend] ()
keyword[try] :
identifier[hook] = identifier[backend] . identifier[get_requires_for_build_sdist]
keyword[except] identifier[AttributeError] :
keyword[return] []
keyword[else] :
keyword[return] identifier[hook] ( identifier[config_settings] ) | def get_requires_for_build_sdist(config_settings):
"""Invoke the optional get_requires_for_build_wheel hook
Returns [] if the hook is not defined.
"""
backend = _build_backend()
try:
hook = backend.get_requires_for_build_sdist # depends on [control=['try'], data=[]]
except AttributeError:
return [] # depends on [control=['except'], data=[]]
else:
return hook(config_settings) |
def end(self):
"""
Ends the response. Useful for quickly ending connection with no data
sent
"""
self.send_headers()
self.write()
self.write_eof()
self.has_ended = True | def function[end, parameter[self]]:
constant[
Ends the response. Useful for quickly ending connection with no data
sent
]
call[name[self].send_headers, parameter[]]
call[name[self].write, parameter[]]
call[name[self].write_eof, parameter[]]
name[self].has_ended assign[=] constant[True] | keyword[def] identifier[end] ( identifier[self] ):
literal[string]
identifier[self] . identifier[send_headers] ()
identifier[self] . identifier[write] ()
identifier[self] . identifier[write_eof] ()
identifier[self] . identifier[has_ended] = keyword[True] | def end(self):
"""
Ends the response. Useful for quickly ending connection with no data
sent
"""
self.send_headers()
self.write()
self.write_eof()
self.has_ended = True |
def convert_to_mosek(sdp):
"""Convert an SDP relaxation to a MOSEK task.
:param sdp: The SDP relaxation to convert.
:type sdp: :class:`ncpol2sdpa.sdp`.
:returns: :class:`mosek.Task`.
"""
import mosek
# Cheat when variables are complex and convert with PICOS
if sdp.complex_matrix:
from .picos_utils import convert_to_picos
Problem = convert_to_picos(sdp).to_real()
Problem._make_mosek_instance()
task = Problem.msk_task
if sdp.verbose > 0:
task.set_Stream(mosek.streamtype.log, streamprinter)
return task
barci, barcj, barcval, barai, baraj, baraval = \
convert_to_mosek_matrix(sdp)
bkc = [mosek.boundkey.fx] * sdp.n_vars
blc = [-v for v in sdp.obj_facvar]
buc = [-v for v in sdp.obj_facvar]
env = mosek.Env()
task = env.Task(0, 0)
if sdp.verbose > 0:
task.set_Stream(mosek.streamtype.log, streamprinter)
numvar = 0
numcon = len(bkc)
BARVARDIM = [sum(sdp.block_struct)]
task.appendvars(numvar)
task.appendcons(numcon)
task.appendbarvars(BARVARDIM)
for i in range(numcon):
task.putconbound(i, bkc[i], blc[i], buc[i])
symc = task.appendsparsesymmat(BARVARDIM[0], barci, barcj, barcval)
task.putbarcj(0, [symc], [1.0])
for i in range(len(barai)):
syma = task.appendsparsesymmat(BARVARDIM[0], barai[i], baraj[i],
baraval[i])
task.putbaraij(i, 0, [syma], [1.0])
# Input the objective sense (minimize/maximize)
task.putobjsense(mosek.objsense.minimize)
return task | def function[convert_to_mosek, parameter[sdp]]:
constant[Convert an SDP relaxation to a MOSEK task.
:param sdp: The SDP relaxation to convert.
:type sdp: :class:`ncpol2sdpa.sdp`.
:returns: :class:`mosek.Task`.
]
import module[mosek]
if name[sdp].complex_matrix begin[:]
from relative_module[picos_utils] import module[convert_to_picos]
variable[Problem] assign[=] call[call[name[convert_to_picos], parameter[name[sdp]]].to_real, parameter[]]
call[name[Problem]._make_mosek_instance, parameter[]]
variable[task] assign[=] name[Problem].msk_task
if compare[name[sdp].verbose greater[>] constant[0]] begin[:]
call[name[task].set_Stream, parameter[name[mosek].streamtype.log, name[streamprinter]]]
return[name[task]]
<ast.Tuple object at 0x7da1b10d6440> assign[=] call[name[convert_to_mosek_matrix], parameter[name[sdp]]]
variable[bkc] assign[=] binary_operation[list[[<ast.Attribute object at 0x7da1b10d54b0>]] * name[sdp].n_vars]
variable[blc] assign[=] <ast.ListComp object at 0x7da1b10d5450>
variable[buc] assign[=] <ast.ListComp object at 0x7da1b10d6500>
variable[env] assign[=] call[name[mosek].Env, parameter[]]
variable[task] assign[=] call[name[env].Task, parameter[constant[0], constant[0]]]
if compare[name[sdp].verbose greater[>] constant[0]] begin[:]
call[name[task].set_Stream, parameter[name[mosek].streamtype.log, name[streamprinter]]]
variable[numvar] assign[=] constant[0]
variable[numcon] assign[=] call[name[len], parameter[name[bkc]]]
variable[BARVARDIM] assign[=] list[[<ast.Call object at 0x7da1b10d74f0>]]
call[name[task].appendvars, parameter[name[numvar]]]
call[name[task].appendcons, parameter[name[numcon]]]
call[name[task].appendbarvars, parameter[name[BARVARDIM]]]
for taget[name[i]] in starred[call[name[range], parameter[name[numcon]]]] begin[:]
call[name[task].putconbound, parameter[name[i], call[name[bkc]][name[i]], call[name[blc]][name[i]], call[name[buc]][name[i]]]]
variable[symc] assign[=] call[name[task].appendsparsesymmat, parameter[call[name[BARVARDIM]][constant[0]], name[barci], name[barcj], name[barcval]]]
call[name[task].putbarcj, parameter[constant[0], list[[<ast.Name object at 0x7da1b10d4ac0>]], list[[<ast.Constant object at 0x7da1b10d6830>]]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[barai]]]]]] begin[:]
variable[syma] assign[=] call[name[task].appendsparsesymmat, parameter[call[name[BARVARDIM]][constant[0]], call[name[barai]][name[i]], call[name[baraj]][name[i]], call[name[baraval]][name[i]]]]
call[name[task].putbaraij, parameter[name[i], constant[0], list[[<ast.Name object at 0x7da20e963b50>]], list[[<ast.Constant object at 0x7da20e963e80>]]]]
call[name[task].putobjsense, parameter[name[mosek].objsense.minimize]]
return[name[task]] | keyword[def] identifier[convert_to_mosek] ( identifier[sdp] ):
literal[string]
keyword[import] identifier[mosek]
keyword[if] identifier[sdp] . identifier[complex_matrix] :
keyword[from] . identifier[picos_utils] keyword[import] identifier[convert_to_picos]
identifier[Problem] = identifier[convert_to_picos] ( identifier[sdp] ). identifier[to_real] ()
identifier[Problem] . identifier[_make_mosek_instance] ()
identifier[task] = identifier[Problem] . identifier[msk_task]
keyword[if] identifier[sdp] . identifier[verbose] > literal[int] :
identifier[task] . identifier[set_Stream] ( identifier[mosek] . identifier[streamtype] . identifier[log] , identifier[streamprinter] )
keyword[return] identifier[task]
identifier[barci] , identifier[barcj] , identifier[barcval] , identifier[barai] , identifier[baraj] , identifier[baraval] = identifier[convert_to_mosek_matrix] ( identifier[sdp] )
identifier[bkc] =[ identifier[mosek] . identifier[boundkey] . identifier[fx] ]* identifier[sdp] . identifier[n_vars]
identifier[blc] =[- identifier[v] keyword[for] identifier[v] keyword[in] identifier[sdp] . identifier[obj_facvar] ]
identifier[buc] =[- identifier[v] keyword[for] identifier[v] keyword[in] identifier[sdp] . identifier[obj_facvar] ]
identifier[env] = identifier[mosek] . identifier[Env] ()
identifier[task] = identifier[env] . identifier[Task] ( literal[int] , literal[int] )
keyword[if] identifier[sdp] . identifier[verbose] > literal[int] :
identifier[task] . identifier[set_Stream] ( identifier[mosek] . identifier[streamtype] . identifier[log] , identifier[streamprinter] )
identifier[numvar] = literal[int]
identifier[numcon] = identifier[len] ( identifier[bkc] )
identifier[BARVARDIM] =[ identifier[sum] ( identifier[sdp] . identifier[block_struct] )]
identifier[task] . identifier[appendvars] ( identifier[numvar] )
identifier[task] . identifier[appendcons] ( identifier[numcon] )
identifier[task] . identifier[appendbarvars] ( identifier[BARVARDIM] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[numcon] ):
identifier[task] . identifier[putconbound] ( identifier[i] , identifier[bkc] [ identifier[i] ], identifier[blc] [ identifier[i] ], identifier[buc] [ identifier[i] ])
identifier[symc] = identifier[task] . identifier[appendsparsesymmat] ( identifier[BARVARDIM] [ literal[int] ], identifier[barci] , identifier[barcj] , identifier[barcval] )
identifier[task] . identifier[putbarcj] ( literal[int] ,[ identifier[symc] ],[ literal[int] ])
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[barai] )):
identifier[syma] = identifier[task] . identifier[appendsparsesymmat] ( identifier[BARVARDIM] [ literal[int] ], identifier[barai] [ identifier[i] ], identifier[baraj] [ identifier[i] ],
identifier[baraval] [ identifier[i] ])
identifier[task] . identifier[putbaraij] ( identifier[i] , literal[int] ,[ identifier[syma] ],[ literal[int] ])
identifier[task] . identifier[putobjsense] ( identifier[mosek] . identifier[objsense] . identifier[minimize] )
keyword[return] identifier[task] | def convert_to_mosek(sdp):
"""Convert an SDP relaxation to a MOSEK task.
:param sdp: The SDP relaxation to convert.
:type sdp: :class:`ncpol2sdpa.sdp`.
:returns: :class:`mosek.Task`.
"""
import mosek
# Cheat when variables are complex and convert with PICOS
if sdp.complex_matrix:
from .picos_utils import convert_to_picos
Problem = convert_to_picos(sdp).to_real()
Problem._make_mosek_instance()
task = Problem.msk_task
if sdp.verbose > 0:
task.set_Stream(mosek.streamtype.log, streamprinter) # depends on [control=['if'], data=[]]
return task # depends on [control=['if'], data=[]]
(barci, barcj, barcval, barai, baraj, baraval) = convert_to_mosek_matrix(sdp)
bkc = [mosek.boundkey.fx] * sdp.n_vars
blc = [-v for v in sdp.obj_facvar]
buc = [-v for v in sdp.obj_facvar]
env = mosek.Env()
task = env.Task(0, 0)
if sdp.verbose > 0:
task.set_Stream(mosek.streamtype.log, streamprinter) # depends on [control=['if'], data=[]]
numvar = 0
numcon = len(bkc)
BARVARDIM = [sum(sdp.block_struct)]
task.appendvars(numvar)
task.appendcons(numcon)
task.appendbarvars(BARVARDIM)
for i in range(numcon):
task.putconbound(i, bkc[i], blc[i], buc[i]) # depends on [control=['for'], data=['i']]
symc = task.appendsparsesymmat(BARVARDIM[0], barci, barcj, barcval)
task.putbarcj(0, [symc], [1.0])
for i in range(len(barai)):
syma = task.appendsparsesymmat(BARVARDIM[0], barai[i], baraj[i], baraval[i])
task.putbaraij(i, 0, [syma], [1.0]) # depends on [control=['for'], data=['i']]
# Input the objective sense (minimize/maximize)
task.putobjsense(mosek.objsense.minimize)
return task |
def load_sst(path=None,
url='http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'):
"""
Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset
"""
if path is None:
# find a good temporary path
path = os.path.expanduser("~/stanford_sentiment_treebank/")
makedirs(path, exist_ok=True)
fnames = download_sst(path, url)
return {key: import_tree_corpus(value) for key, value in fnames.items()} | def function[load_sst, parameter[path, url]]:
constant[
Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset
]
if compare[name[path] is constant[None]] begin[:]
variable[path] assign[=] call[name[os].path.expanduser, parameter[constant[~/stanford_sentiment_treebank/]]]
call[name[makedirs], parameter[name[path]]]
variable[fnames] assign[=] call[name[download_sst], parameter[name[path], name[url]]]
return[<ast.DictComp object at 0x7da1b12b7c40>] | keyword[def] identifier[load_sst] ( identifier[path] = keyword[None] ,
identifier[url] = literal[string] ):
literal[string]
keyword[if] identifier[path] keyword[is] keyword[None] :
identifier[path] = identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] )
identifier[makedirs] ( identifier[path] , identifier[exist_ok] = keyword[True] )
identifier[fnames] = identifier[download_sst] ( identifier[path] , identifier[url] )
keyword[return] { identifier[key] : identifier[import_tree_corpus] ( identifier[value] ) keyword[for] identifier[key] , identifier[value] keyword[in] identifier[fnames] . identifier[items] ()} | def load_sst(path=None, url='http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'):
"""
Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset
"""
if path is None:
# find a good temporary path
path = os.path.expanduser('~/stanford_sentiment_treebank/')
makedirs(path, exist_ok=True) # depends on [control=['if'], data=['path']]
fnames = download_sst(path, url)
return {key: import_tree_corpus(value) for (key, value) in fnames.items()} |
def find_similar(self, doc, min_score=0.0, max_results=100):
"""
Find `max_results` most similar articles in the index, each having similarity
score of at least `min_score`. The resulting list may be shorter than `max_results`,
in case there are not enough matching documents.
`doc` is either a string (=document id, previously indexed) or a
dict containing a 'tokens' key. These tokens are processed to produce a
vector, which is then used as a query against the index.
The similar documents are returned in decreasing similarity order, as
`(doc_id, similarity_score, doc_payload)` 3-tuples. The payload returned
is identical to what was supplied for this document during indexing.
"""
logger.debug("received query call with %r" % doc)
if self.is_locked():
msg = "cannot query while the server is being updated"
logger.error(msg)
raise RuntimeError(msg)
sims_opt, sims_fresh = None, None
for index in [self.fresh_index, self.opt_index]:
if index is not None:
index.topsims = max_results
if isinstance(doc, basestring):
# query by direct document id
docid = doc
if self.opt_index is not None and docid in self.opt_index:
sims_opt = self.opt_index.sims_by_id(docid)
if self.fresh_index is not None:
vec = self.opt_index.vec_by_id(docid)
sims_fresh = self.fresh_index.sims_by_vec(vec, normalize=False)
elif self.fresh_index is not None and docid in self.fresh_index:
sims_fresh = self.fresh_index.sims_by_id(docid)
if self.opt_index is not None:
vec = self.fresh_index.vec_by_id(docid)
sims_opt = self.opt_index.sims_by_vec(vec, normalize=False)
else:
raise ValueError("document %r not in index" % docid)
else:
if 'topics' in doc:
# user supplied vector directly => use that
vec = gensim.matutils.any2sparse(doc['topics'])
else:
# query by an arbitrary text (=tokens) inside doc['tokens']
vec = self.model.doc2vec(doc) # convert document (text) to vector
if self.opt_index is not None:
sims_opt = self.opt_index.sims_by_vec(vec)
if self.fresh_index is not None:
sims_fresh = self.fresh_index.sims_by_vec(vec)
merged = merge_sims(sims_opt, sims_fresh)
logger.debug("got %s raw similars, pruning with max_results=%s, min_score=%s" %
(len(merged), max_results, min_score))
result = []
for docid, score in merged:
if score < min_score or 0 < max_results <= len(result):
break
result.append((docid, float(score), self.payload.get(docid, None)))
return result | def function[find_similar, parameter[self, doc, min_score, max_results]]:
constant[
Find `max_results` most similar articles in the index, each having similarity
score of at least `min_score`. The resulting list may be shorter than `max_results`,
in case there are not enough matching documents.
`doc` is either a string (=document id, previously indexed) or a
dict containing a 'tokens' key. These tokens are processed to produce a
vector, which is then used as a query against the index.
The similar documents are returned in decreasing similarity order, as
`(doc_id, similarity_score, doc_payload)` 3-tuples. The payload returned
is identical to what was supplied for this document during indexing.
]
call[name[logger].debug, parameter[binary_operation[constant[received query call with %r] <ast.Mod object at 0x7da2590d6920> name[doc]]]]
if call[name[self].is_locked, parameter[]] begin[:]
variable[msg] assign[=] constant[cannot query while the server is being updated]
call[name[logger].error, parameter[name[msg]]]
<ast.Raise object at 0x7da1b0fd0a60>
<ast.Tuple object at 0x7da1b0fd0f70> assign[=] tuple[[<ast.Constant object at 0x7da1b0fd0fd0>, <ast.Constant object at 0x7da1b0fd0d00>]]
for taget[name[index]] in starred[list[[<ast.Attribute object at 0x7da1b0fd0fa0>, <ast.Attribute object at 0x7da1b0fd0dc0>]]] begin[:]
if compare[name[index] is_not constant[None]] begin[:]
name[index].topsims assign[=] name[max_results]
if call[name[isinstance], parameter[name[doc], name[basestring]]] begin[:]
variable[docid] assign[=] name[doc]
if <ast.BoolOp object at 0x7da1b0fd0bb0> begin[:]
variable[sims_opt] assign[=] call[name[self].opt_index.sims_by_id, parameter[name[docid]]]
if compare[name[self].fresh_index is_not constant[None]] begin[:]
variable[vec] assign[=] call[name[self].opt_index.vec_by_id, parameter[name[docid]]]
variable[sims_fresh] assign[=] call[name[self].fresh_index.sims_by_vec, parameter[name[vec]]]
variable[merged] assign[=] call[name[merge_sims], parameter[name[sims_opt], name[sims_fresh]]]
call[name[logger].debug, parameter[binary_operation[constant[got %s raw similars, pruning with max_results=%s, min_score=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b0f900a0>, <ast.Name object at 0x7da1b0f91960>, <ast.Name object at 0x7da1b0f91d20>]]]]]
variable[result] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0f90f10>, <ast.Name object at 0x7da1b0f90e50>]]] in starred[name[merged]] begin[:]
if <ast.BoolOp object at 0x7da1b0f904c0> begin[:]
break
call[name[result].append, parameter[tuple[[<ast.Name object at 0x7da1b0f92ce0>, <ast.Call object at 0x7da1b0f91360>, <ast.Call object at 0x7da1b0f903a0>]]]]
return[name[result]] | keyword[def] identifier[find_similar] ( identifier[self] , identifier[doc] , identifier[min_score] = literal[int] , identifier[max_results] = literal[int] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] % identifier[doc] )
keyword[if] identifier[self] . identifier[is_locked] ():
identifier[msg] = literal[string]
identifier[logger] . identifier[error] ( identifier[msg] )
keyword[raise] identifier[RuntimeError] ( identifier[msg] )
identifier[sims_opt] , identifier[sims_fresh] = keyword[None] , keyword[None]
keyword[for] identifier[index] keyword[in] [ identifier[self] . identifier[fresh_index] , identifier[self] . identifier[opt_index] ]:
keyword[if] identifier[index] keyword[is] keyword[not] keyword[None] :
identifier[index] . identifier[topsims] = identifier[max_results]
keyword[if] identifier[isinstance] ( identifier[doc] , identifier[basestring] ):
identifier[docid] = identifier[doc]
keyword[if] identifier[self] . identifier[opt_index] keyword[is] keyword[not] keyword[None] keyword[and] identifier[docid] keyword[in] identifier[self] . identifier[opt_index] :
identifier[sims_opt] = identifier[self] . identifier[opt_index] . identifier[sims_by_id] ( identifier[docid] )
keyword[if] identifier[self] . identifier[fresh_index] keyword[is] keyword[not] keyword[None] :
identifier[vec] = identifier[self] . identifier[opt_index] . identifier[vec_by_id] ( identifier[docid] )
identifier[sims_fresh] = identifier[self] . identifier[fresh_index] . identifier[sims_by_vec] ( identifier[vec] , identifier[normalize] = keyword[False] )
keyword[elif] identifier[self] . identifier[fresh_index] keyword[is] keyword[not] keyword[None] keyword[and] identifier[docid] keyword[in] identifier[self] . identifier[fresh_index] :
identifier[sims_fresh] = identifier[self] . identifier[fresh_index] . identifier[sims_by_id] ( identifier[docid] )
keyword[if] identifier[self] . identifier[opt_index] keyword[is] keyword[not] keyword[None] :
identifier[vec] = identifier[self] . identifier[fresh_index] . identifier[vec_by_id] ( identifier[docid] )
identifier[sims_opt] = identifier[self] . identifier[opt_index] . identifier[sims_by_vec] ( identifier[vec] , identifier[normalize] = keyword[False] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[docid] )
keyword[else] :
keyword[if] literal[string] keyword[in] identifier[doc] :
identifier[vec] = identifier[gensim] . identifier[matutils] . identifier[any2sparse] ( identifier[doc] [ literal[string] ])
keyword[else] :
identifier[vec] = identifier[self] . identifier[model] . identifier[doc2vec] ( identifier[doc] )
keyword[if] identifier[self] . identifier[opt_index] keyword[is] keyword[not] keyword[None] :
identifier[sims_opt] = identifier[self] . identifier[opt_index] . identifier[sims_by_vec] ( identifier[vec] )
keyword[if] identifier[self] . identifier[fresh_index] keyword[is] keyword[not] keyword[None] :
identifier[sims_fresh] = identifier[self] . identifier[fresh_index] . identifier[sims_by_vec] ( identifier[vec] )
identifier[merged] = identifier[merge_sims] ( identifier[sims_opt] , identifier[sims_fresh] )
identifier[logger] . identifier[debug] ( literal[string] %
( identifier[len] ( identifier[merged] ), identifier[max_results] , identifier[min_score] ))
identifier[result] =[]
keyword[for] identifier[docid] , identifier[score] keyword[in] identifier[merged] :
keyword[if] identifier[score] < identifier[min_score] keyword[or] literal[int] < identifier[max_results] <= identifier[len] ( identifier[result] ):
keyword[break]
identifier[result] . identifier[append] (( identifier[docid] , identifier[float] ( identifier[score] ), identifier[self] . identifier[payload] . identifier[get] ( identifier[docid] , keyword[None] )))
keyword[return] identifier[result] | def find_similar(self, doc, min_score=0.0, max_results=100):
"""
Find `max_results` most similar articles in the index, each having similarity
score of at least `min_score`. The resulting list may be shorter than `max_results`,
in case there are not enough matching documents.
`doc` is either a string (=document id, previously indexed) or a
dict containing a 'tokens' key. These tokens are processed to produce a
vector, which is then used as a query against the index.
The similar documents are returned in decreasing similarity order, as
`(doc_id, similarity_score, doc_payload)` 3-tuples. The payload returned
is identical to what was supplied for this document during indexing.
"""
logger.debug('received query call with %r' % doc)
if self.is_locked():
msg = 'cannot query while the server is being updated'
logger.error(msg)
raise RuntimeError(msg) # depends on [control=['if'], data=[]]
(sims_opt, sims_fresh) = (None, None)
for index in [self.fresh_index, self.opt_index]:
if index is not None:
index.topsims = max_results # depends on [control=['if'], data=['index']] # depends on [control=['for'], data=['index']]
if isinstance(doc, basestring):
# query by direct document id
docid = doc
if self.opt_index is not None and docid in self.opt_index:
sims_opt = self.opt_index.sims_by_id(docid)
if self.fresh_index is not None:
vec = self.opt_index.vec_by_id(docid)
sims_fresh = self.fresh_index.sims_by_vec(vec, normalize=False) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif self.fresh_index is not None and docid in self.fresh_index:
sims_fresh = self.fresh_index.sims_by_id(docid)
if self.opt_index is not None:
vec = self.fresh_index.vec_by_id(docid)
sims_opt = self.opt_index.sims_by_vec(vec, normalize=False) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise ValueError('document %r not in index' % docid) # depends on [control=['if'], data=[]]
else:
if 'topics' in doc:
# user supplied vector directly => use that
vec = gensim.matutils.any2sparse(doc['topics']) # depends on [control=['if'], data=['doc']]
else:
# query by an arbitrary text (=tokens) inside doc['tokens']
vec = self.model.doc2vec(doc) # convert document (text) to vector
if self.opt_index is not None:
sims_opt = self.opt_index.sims_by_vec(vec) # depends on [control=['if'], data=[]]
if self.fresh_index is not None:
sims_fresh = self.fresh_index.sims_by_vec(vec) # depends on [control=['if'], data=[]]
merged = merge_sims(sims_opt, sims_fresh)
logger.debug('got %s raw similars, pruning with max_results=%s, min_score=%s' % (len(merged), max_results, min_score))
result = []
for (docid, score) in merged:
if score < min_score or 0 < max_results <= len(result):
break # depends on [control=['if'], data=[]]
result.append((docid, float(score), self.payload.get(docid, None))) # depends on [control=['for'], data=[]]
return result |
def parse_config(config, env, as_dict=True):
""" Parse a config from a magic cell body. This could be JSON or YAML. We turn it into
a Python dictionary then recursively replace any variable references using the supplied
env dictionary.
"""
if config is None:
return None
stripped = config.strip()
if len(stripped) == 0:
config = {}
elif stripped[0] == '{':
config = json.loads(config)
else:
config = yaml.load(config)
if as_dict:
config = dict(config)
# Now we need to walk the config dictionary recursively replacing any '$name' vars.
replace_vars(config, env)
return config | def function[parse_config, parameter[config, env, as_dict]]:
constant[ Parse a config from a magic cell body. This could be JSON or YAML. We turn it into
a Python dictionary then recursively replace any variable references using the supplied
env dictionary.
]
if compare[name[config] is constant[None]] begin[:]
return[constant[None]]
variable[stripped] assign[=] call[name[config].strip, parameter[]]
if compare[call[name[len], parameter[name[stripped]]] equal[==] constant[0]] begin[:]
variable[config] assign[=] dictionary[[], []]
if name[as_dict] begin[:]
variable[config] assign[=] call[name[dict], parameter[name[config]]]
call[name[replace_vars], parameter[name[config], name[env]]]
return[name[config]] | keyword[def] identifier[parse_config] ( identifier[config] , identifier[env] , identifier[as_dict] = keyword[True] ):
literal[string]
keyword[if] identifier[config] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[stripped] = identifier[config] . identifier[strip] ()
keyword[if] identifier[len] ( identifier[stripped] )== literal[int] :
identifier[config] ={}
keyword[elif] identifier[stripped] [ literal[int] ]== literal[string] :
identifier[config] = identifier[json] . identifier[loads] ( identifier[config] )
keyword[else] :
identifier[config] = identifier[yaml] . identifier[load] ( identifier[config] )
keyword[if] identifier[as_dict] :
identifier[config] = identifier[dict] ( identifier[config] )
identifier[replace_vars] ( identifier[config] , identifier[env] )
keyword[return] identifier[config] | def parse_config(config, env, as_dict=True):
""" Parse a config from a magic cell body. This could be JSON or YAML. We turn it into
a Python dictionary then recursively replace any variable references using the supplied
env dictionary.
"""
if config is None:
return None # depends on [control=['if'], data=[]]
stripped = config.strip()
if len(stripped) == 0:
config = {} # depends on [control=['if'], data=[]]
elif stripped[0] == '{':
config = json.loads(config) # depends on [control=['if'], data=[]]
else:
config = yaml.load(config)
if as_dict:
config = dict(config) # depends on [control=['if'], data=[]]
# Now we need to walk the config dictionary recursively replacing any '$name' vars.
replace_vars(config, env)
return config |
def hsl(self, *args):
""" Translate hsl(...) to color string
raises:
ValueError
returns:
str
"""
if len(args) == 4:
return self.hsla(*args)
elif len(args) == 3:
h, s, l = args
rgb = colorsys.hls_to_rgb(
int(h) / 360.0, utility.pc_or_float(l), utility.pc_or_float(s))
color = (utility.convergent_round(c * 255) for c in rgb)
return self._rgbatohex(color)
raise ValueError('Illegal color values') | def function[hsl, parameter[self]]:
constant[ Translate hsl(...) to color string
raises:
ValueError
returns:
str
]
if compare[call[name[len], parameter[name[args]]] equal[==] constant[4]] begin[:]
return[call[name[self].hsla, parameter[<ast.Starred object at 0x7da1aff01f30>]]]
<ast.Raise object at 0x7da1b00b6050> | keyword[def] identifier[hsl] ( identifier[self] ,* identifier[args] ):
literal[string]
keyword[if] identifier[len] ( identifier[args] )== literal[int] :
keyword[return] identifier[self] . identifier[hsla] (* identifier[args] )
keyword[elif] identifier[len] ( identifier[args] )== literal[int] :
identifier[h] , identifier[s] , identifier[l] = identifier[args]
identifier[rgb] = identifier[colorsys] . identifier[hls_to_rgb] (
identifier[int] ( identifier[h] )/ literal[int] , identifier[utility] . identifier[pc_or_float] ( identifier[l] ), identifier[utility] . identifier[pc_or_float] ( identifier[s] ))
identifier[color] =( identifier[utility] . identifier[convergent_round] ( identifier[c] * literal[int] ) keyword[for] identifier[c] keyword[in] identifier[rgb] )
keyword[return] identifier[self] . identifier[_rgbatohex] ( identifier[color] )
keyword[raise] identifier[ValueError] ( literal[string] ) | def hsl(self, *args):
""" Translate hsl(...) to color string
raises:
ValueError
returns:
str
"""
if len(args) == 4:
return self.hsla(*args) # depends on [control=['if'], data=[]]
elif len(args) == 3:
(h, s, l) = args
rgb = colorsys.hls_to_rgb(int(h) / 360.0, utility.pc_or_float(l), utility.pc_or_float(s))
color = (utility.convergent_round(c * 255) for c in rgb)
return self._rgbatohex(color) # depends on [control=['if'], data=[]]
raise ValueError('Illegal color values') |
def summary(args):
"""
%prog summary input.bed scaffolds.fasta
Print out summary statistics per map, followed by consensus summary of
scaffold anchoring based on multiple maps.
"""
p = OptionParser(summary.__doc__)
p.set_table(sep="|", align=True)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
inputbed, scaffolds = args
pf = inputbed.rsplit(".", 1)[0]
mapbed = pf + ".bed"
chr_agp = pf + ".chr.agp"
sep = opts.sep
align = opts.align
cc = Map(mapbed)
mapnames = cc.mapnames
s = Sizes(scaffolds)
total, l50, n50 = s.summary
r = {}
maps = []
fw = must_open(opts.outfile, "w")
print("*** Summary for each individual map ***", file=fw)
for mapname in mapnames:
markers = [x for x in cc if x.mapname == mapname]
ms = MapSummary(markers, l50, s)
r["Linkage Groups", mapname] = ms.num_lgs
ms.export_table(r, mapname, total)
maps.append(ms)
print(tabulate(r, sep=sep, align=align), file=fw)
r = {}
agp = AGP(chr_agp)
print("*** Summary for consensus map ***", file=fw)
consensus_scaffolds = set(x.component_id for x in agp if not x.is_gap)
oriented_scaffolds = set(x.component_id for x in agp \
if (not x.is_gap) and x.orientation != '?')
unplaced_scaffolds = set(s.mapping.keys()) - consensus_scaffolds
for mapname, sc in (("Anchored", consensus_scaffolds),
("Oriented", oriented_scaffolds),
("Unplaced", unplaced_scaffolds)):
markers = [x for x in cc if x.seqid in sc]
ms = MapSummary(markers, l50, s, scaffolds=sc)
ms.export_table(r, mapname, total)
print(tabulate(r, sep=sep, align=align), file=fw) | def function[summary, parameter[args]]:
constant[
%prog summary input.bed scaffolds.fasta
Print out summary statistics per map, followed by consensus summary of
scaffold anchoring based on multiple maps.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[summary].__doc__]]
call[name[p].set_table, parameter[]]
call[name[p].set_outfile, parameter[]]
<ast.Tuple object at 0x7da207f98d60> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da207f98370>]]
<ast.Tuple object at 0x7da207f9b0a0> assign[=] name[args]
variable[pf] assign[=] call[call[name[inputbed].rsplit, parameter[constant[.], constant[1]]]][constant[0]]
variable[mapbed] assign[=] binary_operation[name[pf] + constant[.bed]]
variable[chr_agp] assign[=] binary_operation[name[pf] + constant[.chr.agp]]
variable[sep] assign[=] name[opts].sep
variable[align] assign[=] name[opts].align
variable[cc] assign[=] call[name[Map], parameter[name[mapbed]]]
variable[mapnames] assign[=] name[cc].mapnames
variable[s] assign[=] call[name[Sizes], parameter[name[scaffolds]]]
<ast.Tuple object at 0x7da18bc70ee0> assign[=] name[s].summary
variable[r] assign[=] dictionary[[], []]
variable[maps] assign[=] list[[]]
variable[fw] assign[=] call[name[must_open], parameter[name[opts].outfile, constant[w]]]
call[name[print], parameter[constant[*** Summary for each individual map ***]]]
for taget[name[mapname]] in starred[name[mapnames]] begin[:]
variable[markers] assign[=] <ast.ListComp object at 0x7da18bc72bf0>
variable[ms] assign[=] call[name[MapSummary], parameter[name[markers], name[l50], name[s]]]
call[name[r]][tuple[[<ast.Constant object at 0x7da18bc739a0>, <ast.Name object at 0x7da18bc70b20>]]] assign[=] name[ms].num_lgs
call[name[ms].export_table, parameter[name[r], name[mapname], name[total]]]
call[name[maps].append, parameter[name[ms]]]
call[name[print], parameter[call[name[tabulate], parameter[name[r]]]]]
variable[r] assign[=] dictionary[[], []]
variable[agp] assign[=] call[name[AGP], parameter[name[chr_agp]]]
call[name[print], parameter[constant[*** Summary for consensus map ***]]]
variable[consensus_scaffolds] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da18bc71510>]]
variable[oriented_scaffolds] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da18bc724d0>]]
variable[unplaced_scaffolds] assign[=] binary_operation[call[name[set], parameter[call[name[s].mapping.keys, parameter[]]]] - name[consensus_scaffolds]]
for taget[tuple[[<ast.Name object at 0x7da18bc71e10>, <ast.Name object at 0x7da18bc70b50>]]] in starred[tuple[[<ast.Tuple object at 0x7da18bc707c0>, <ast.Tuple object at 0x7da18bc726e0>, <ast.Tuple object at 0x7da18bc701c0>]]] begin[:]
variable[markers] assign[=] <ast.ListComp object at 0x7da18bc719c0>
variable[ms] assign[=] call[name[MapSummary], parameter[name[markers], name[l50], name[s]]]
call[name[ms].export_table, parameter[name[r], name[mapname], name[total]]]
call[name[print], parameter[call[name[tabulate], parameter[name[r]]]]] | keyword[def] identifier[summary] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[summary] . identifier[__doc__] )
identifier[p] . identifier[set_table] ( identifier[sep] = literal[string] , identifier[align] = keyword[True] )
identifier[p] . identifier[set_outfile] ()
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[inputbed] , identifier[scaffolds] = identifier[args]
identifier[pf] = identifier[inputbed] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[mapbed] = identifier[pf] + literal[string]
identifier[chr_agp] = identifier[pf] + literal[string]
identifier[sep] = identifier[opts] . identifier[sep]
identifier[align] = identifier[opts] . identifier[align]
identifier[cc] = identifier[Map] ( identifier[mapbed] )
identifier[mapnames] = identifier[cc] . identifier[mapnames]
identifier[s] = identifier[Sizes] ( identifier[scaffolds] )
identifier[total] , identifier[l50] , identifier[n50] = identifier[s] . identifier[summary]
identifier[r] ={}
identifier[maps] =[]
identifier[fw] = identifier[must_open] ( identifier[opts] . identifier[outfile] , literal[string] )
identifier[print] ( literal[string] , identifier[file] = identifier[fw] )
keyword[for] identifier[mapname] keyword[in] identifier[mapnames] :
identifier[markers] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[cc] keyword[if] identifier[x] . identifier[mapname] == identifier[mapname] ]
identifier[ms] = identifier[MapSummary] ( identifier[markers] , identifier[l50] , identifier[s] )
identifier[r] [ literal[string] , identifier[mapname] ]= identifier[ms] . identifier[num_lgs]
identifier[ms] . identifier[export_table] ( identifier[r] , identifier[mapname] , identifier[total] )
identifier[maps] . identifier[append] ( identifier[ms] )
identifier[print] ( identifier[tabulate] ( identifier[r] , identifier[sep] = identifier[sep] , identifier[align] = identifier[align] ), identifier[file] = identifier[fw] )
identifier[r] ={}
identifier[agp] = identifier[AGP] ( identifier[chr_agp] )
identifier[print] ( literal[string] , identifier[file] = identifier[fw] )
identifier[consensus_scaffolds] = identifier[set] ( identifier[x] . identifier[component_id] keyword[for] identifier[x] keyword[in] identifier[agp] keyword[if] keyword[not] identifier[x] . identifier[is_gap] )
identifier[oriented_scaffolds] = identifier[set] ( identifier[x] . identifier[component_id] keyword[for] identifier[x] keyword[in] identifier[agp] keyword[if] ( keyword[not] identifier[x] . identifier[is_gap] ) keyword[and] identifier[x] . identifier[orientation] != literal[string] )
identifier[unplaced_scaffolds] = identifier[set] ( identifier[s] . identifier[mapping] . identifier[keys] ())- identifier[consensus_scaffolds]
keyword[for] identifier[mapname] , identifier[sc] keyword[in] (( literal[string] , identifier[consensus_scaffolds] ),
( literal[string] , identifier[oriented_scaffolds] ),
( literal[string] , identifier[unplaced_scaffolds] )):
identifier[markers] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[cc] keyword[if] identifier[x] . identifier[seqid] keyword[in] identifier[sc] ]
identifier[ms] = identifier[MapSummary] ( identifier[markers] , identifier[l50] , identifier[s] , identifier[scaffolds] = identifier[sc] )
identifier[ms] . identifier[export_table] ( identifier[r] , identifier[mapname] , identifier[total] )
identifier[print] ( identifier[tabulate] ( identifier[r] , identifier[sep] = identifier[sep] , identifier[align] = identifier[align] ), identifier[file] = identifier[fw] ) | def summary(args):
"""
%prog summary input.bed scaffolds.fasta
Print out summary statistics per map, followed by consensus summary of
scaffold anchoring based on multiple maps.
"""
p = OptionParser(summary.__doc__)
p.set_table(sep='|', align=True)
p.set_outfile()
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(inputbed, scaffolds) = args
pf = inputbed.rsplit('.', 1)[0]
mapbed = pf + '.bed'
chr_agp = pf + '.chr.agp'
sep = opts.sep
align = opts.align
cc = Map(mapbed)
mapnames = cc.mapnames
s = Sizes(scaffolds)
(total, l50, n50) = s.summary
r = {}
maps = []
fw = must_open(opts.outfile, 'w')
print('*** Summary for each individual map ***', file=fw)
for mapname in mapnames:
markers = [x for x in cc if x.mapname == mapname]
ms = MapSummary(markers, l50, s)
r['Linkage Groups', mapname] = ms.num_lgs
ms.export_table(r, mapname, total)
maps.append(ms) # depends on [control=['for'], data=['mapname']]
print(tabulate(r, sep=sep, align=align), file=fw)
r = {}
agp = AGP(chr_agp)
print('*** Summary for consensus map ***', file=fw)
consensus_scaffolds = set((x.component_id for x in agp if not x.is_gap))
oriented_scaffolds = set((x.component_id for x in agp if not x.is_gap and x.orientation != '?'))
unplaced_scaffolds = set(s.mapping.keys()) - consensus_scaffolds
for (mapname, sc) in (('Anchored', consensus_scaffolds), ('Oriented', oriented_scaffolds), ('Unplaced', unplaced_scaffolds)):
markers = [x for x in cc if x.seqid in sc]
ms = MapSummary(markers, l50, s, scaffolds=sc)
ms.export_table(r, mapname, total) # depends on [control=['for'], data=[]]
print(tabulate(r, sep=sep, align=align), file=fw) |
def merge(self, other):
"""Merges a set of build file aliases and returns a new set of aliases containing both.
Any duplicate aliases from `other` will trump.
:API: public
:param other: The BuildFileAliases to merge in.
:type other: :class:`BuildFileAliases`
:returns: A new BuildFileAliases containing `other`'s aliases merged into ours.
:rtype: :class:`BuildFileAliases`
"""
if not isinstance(other, BuildFileAliases):
raise TypeError('Can only merge other BuildFileAliases, given {0}'.format(other))
def merge(*items):
merged = {}
for item in items:
merged.update(item)
return merged
targets = merge(self.target_types, self.target_macro_factories,
other.target_types, other.target_macro_factories)
objects = merge(self.objects, other.objects)
context_aware_object_factories=merge(self.context_aware_object_factories,
other.context_aware_object_factories)
return BuildFileAliases(targets=targets,
objects=objects,
context_aware_object_factories=context_aware_object_factories) | def function[merge, parameter[self, other]]:
constant[Merges a set of build file aliases and returns a new set of aliases containing both.
Any duplicate aliases from `other` will trump.
:API: public
:param other: The BuildFileAliases to merge in.
:type other: :class:`BuildFileAliases`
:returns: A new BuildFileAliases containing `other`'s aliases merged into ours.
:rtype: :class:`BuildFileAliases`
]
if <ast.UnaryOp object at 0x7da1b22a4700> begin[:]
<ast.Raise object at 0x7da1b22a6410>
def function[merge, parameter[]]:
variable[merged] assign[=] dictionary[[], []]
for taget[name[item]] in starred[name[items]] begin[:]
call[name[merged].update, parameter[name[item]]]
return[name[merged]]
variable[targets] assign[=] call[name[merge], parameter[name[self].target_types, name[self].target_macro_factories, name[other].target_types, name[other].target_macro_factories]]
variable[objects] assign[=] call[name[merge], parameter[name[self].objects, name[other].objects]]
variable[context_aware_object_factories] assign[=] call[name[merge], parameter[name[self].context_aware_object_factories, name[other].context_aware_object_factories]]
return[call[name[BuildFileAliases], parameter[]]] | keyword[def] identifier[merge] ( identifier[self] , identifier[other] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[other] , identifier[BuildFileAliases] ):
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[other] ))
keyword[def] identifier[merge] (* identifier[items] ):
identifier[merged] ={}
keyword[for] identifier[item] keyword[in] identifier[items] :
identifier[merged] . identifier[update] ( identifier[item] )
keyword[return] identifier[merged]
identifier[targets] = identifier[merge] ( identifier[self] . identifier[target_types] , identifier[self] . identifier[target_macro_factories] ,
identifier[other] . identifier[target_types] , identifier[other] . identifier[target_macro_factories] )
identifier[objects] = identifier[merge] ( identifier[self] . identifier[objects] , identifier[other] . identifier[objects] )
identifier[context_aware_object_factories] = identifier[merge] ( identifier[self] . identifier[context_aware_object_factories] ,
identifier[other] . identifier[context_aware_object_factories] )
keyword[return] identifier[BuildFileAliases] ( identifier[targets] = identifier[targets] ,
identifier[objects] = identifier[objects] ,
identifier[context_aware_object_factories] = identifier[context_aware_object_factories] ) | def merge(self, other):
"""Merges a set of build file aliases and returns a new set of aliases containing both.
Any duplicate aliases from `other` will trump.
:API: public
:param other: The BuildFileAliases to merge in.
:type other: :class:`BuildFileAliases`
:returns: A new BuildFileAliases containing `other`'s aliases merged into ours.
:rtype: :class:`BuildFileAliases`
"""
if not isinstance(other, BuildFileAliases):
raise TypeError('Can only merge other BuildFileAliases, given {0}'.format(other)) # depends on [control=['if'], data=[]]
def merge(*items):
merged = {}
for item in items:
merged.update(item) # depends on [control=['for'], data=['item']]
return merged
targets = merge(self.target_types, self.target_macro_factories, other.target_types, other.target_macro_factories)
objects = merge(self.objects, other.objects)
context_aware_object_factories = merge(self.context_aware_object_factories, other.context_aware_object_factories)
return BuildFileAliases(targets=targets, objects=objects, context_aware_object_factories=context_aware_object_factories) |
def s_supply(self, bus):
""" Returns the total complex power generation capacity.
"""
Sg = array([complex(g.p, g.q) for g in self.generators if
(g.bus == bus) and not g.is_load], dtype=complex64)
if len(Sg):
return sum(Sg)
else:
return 0 + 0j | def function[s_supply, parameter[self, bus]]:
constant[ Returns the total complex power generation capacity.
]
variable[Sg] assign[=] call[name[array], parameter[<ast.ListComp object at 0x7da1b25d1ae0>]]
if call[name[len], parameter[name[Sg]]] begin[:]
return[call[name[sum], parameter[name[Sg]]]] | keyword[def] identifier[s_supply] ( identifier[self] , identifier[bus] ):
literal[string]
identifier[Sg] = identifier[array] ([ identifier[complex] ( identifier[g] . identifier[p] , identifier[g] . identifier[q] ) keyword[for] identifier[g] keyword[in] identifier[self] . identifier[generators] keyword[if]
( identifier[g] . identifier[bus] == identifier[bus] ) keyword[and] keyword[not] identifier[g] . identifier[is_load] ], identifier[dtype] = identifier[complex64] )
keyword[if] identifier[len] ( identifier[Sg] ):
keyword[return] identifier[sum] ( identifier[Sg] )
keyword[else] :
keyword[return] literal[int] + literal[int] | def s_supply(self, bus):
""" Returns the total complex power generation capacity.
"""
Sg = array([complex(g.p, g.q) for g in self.generators if g.bus == bus and (not g.is_load)], dtype=complex64)
if len(Sg):
return sum(Sg) # depends on [control=['if'], data=[]]
else:
return 0 + 0j |
def check_environment_presets():
"""
Checks for environment variables that can cause problems with supernova
"""
presets = [x for x in os.environ.copy().keys() if x.startswith('NOVA_') or
x.startswith('OS_')]
if len(presets) < 1:
return True
else:
click.echo("_" * 80)
click.echo("*WARNING* Found existing environment variables that may "
"cause conflicts:")
for preset in presets:
click.echo(" - %s" % preset)
click.echo("_" * 80)
return False | def function[check_environment_presets, parameter[]]:
constant[
Checks for environment variables that can cause problems with supernova
]
variable[presets] assign[=] <ast.ListComp object at 0x7da1b27e2050>
if compare[call[name[len], parameter[name[presets]]] less[<] constant[1]] begin[:]
return[constant[True]] | keyword[def] identifier[check_environment_presets] ():
literal[string]
identifier[presets] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[os] . identifier[environ] . identifier[copy] (). identifier[keys] () keyword[if] identifier[x] . identifier[startswith] ( literal[string] ) keyword[or]
identifier[x] . identifier[startswith] ( literal[string] )]
keyword[if] identifier[len] ( identifier[presets] )< literal[int] :
keyword[return] keyword[True]
keyword[else] :
identifier[click] . identifier[echo] ( literal[string] * literal[int] )
identifier[click] . identifier[echo] ( literal[string]
literal[string] )
keyword[for] identifier[preset] keyword[in] identifier[presets] :
identifier[click] . identifier[echo] ( literal[string] % identifier[preset] )
identifier[click] . identifier[echo] ( literal[string] * literal[int] )
keyword[return] keyword[False] | def check_environment_presets():
"""
Checks for environment variables that can cause problems with supernova
"""
presets = [x for x in os.environ.copy().keys() if x.startswith('NOVA_') or x.startswith('OS_')]
if len(presets) < 1:
return True # depends on [control=['if'], data=[]]
else:
click.echo('_' * 80)
click.echo('*WARNING* Found existing environment variables that may cause conflicts:')
for preset in presets:
click.echo(' - %s' % preset) # depends on [control=['for'], data=['preset']]
click.echo('_' * 80)
return False |
def remove_interface_router(self, router, body=None):
"""Removes an internal network interface from the specified router."""
return self.put((self.router_path % router) +
"/remove_router_interface", body=body) | def function[remove_interface_router, parameter[self, router, body]]:
constant[Removes an internal network interface from the specified router.]
return[call[name[self].put, parameter[binary_operation[binary_operation[name[self].router_path <ast.Mod object at 0x7da2590d6920> name[router]] + constant[/remove_router_interface]]]]] | keyword[def] identifier[remove_interface_router] ( identifier[self] , identifier[router] , identifier[body] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[put] (( identifier[self] . identifier[router_path] % identifier[router] )+
literal[string] , identifier[body] = identifier[body] ) | def remove_interface_router(self, router, body=None):
"""Removes an internal network interface from the specified router."""
return self.put(self.router_path % router + '/remove_router_interface', body=body) |
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
sites.vs30 = 700 * np.ones(len(sites.vs30))
mean, stddevs = super().get_mean_and_stddevs(
sites, rup, dists, imt, stddev_types)
C = CauzziFaccioli2008SWISS01.COEFFS
tau_ss = 'tau'
log_phi_ss = np.log(10)
mean, stddevs = _apply_adjustments(
C, self.COEFFS_FS_ROCK[imt], tau_ss,
mean, stddevs, sites, rup, dists.rhypo, imt, stddev_types,
log_phi_ss)
return mean, np.log(10 ** np.array(stddevs)) | def function[get_mean_and_stddevs, parameter[self, sites, rup, dists, imt, stddev_types]]:
constant[
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
]
name[sites].vs30 assign[=] binary_operation[constant[700] * call[name[np].ones, parameter[call[name[len], parameter[name[sites].vs30]]]]]
<ast.Tuple object at 0x7da18ede4910> assign[=] call[call[name[super], parameter[]].get_mean_and_stddevs, parameter[name[sites], name[rup], name[dists], name[imt], name[stddev_types]]]
variable[C] assign[=] name[CauzziFaccioli2008SWISS01].COEFFS
variable[tau_ss] assign[=] constant[tau]
variable[log_phi_ss] assign[=] call[name[np].log, parameter[constant[10]]]
<ast.Tuple object at 0x7da18ede76d0> assign[=] call[name[_apply_adjustments], parameter[name[C], call[name[self].COEFFS_FS_ROCK][name[imt]], name[tau_ss], name[mean], name[stddevs], name[sites], name[rup], name[dists].rhypo, name[imt], name[stddev_types], name[log_phi_ss]]]
return[tuple[[<ast.Name object at 0x7da18ede69b0>, <ast.Call object at 0x7da2041da1d0>]]] | keyword[def] identifier[get_mean_and_stddevs] ( identifier[self] , identifier[sites] , identifier[rup] , identifier[dists] , identifier[imt] , identifier[stddev_types] ):
literal[string]
identifier[sites] . identifier[vs30] = literal[int] * identifier[np] . identifier[ones] ( identifier[len] ( identifier[sites] . identifier[vs30] ))
identifier[mean] , identifier[stddevs] = identifier[super] (). identifier[get_mean_and_stddevs] (
identifier[sites] , identifier[rup] , identifier[dists] , identifier[imt] , identifier[stddev_types] )
identifier[C] = identifier[CauzziFaccioli2008SWISS01] . identifier[COEFFS]
identifier[tau_ss] = literal[string]
identifier[log_phi_ss] = identifier[np] . identifier[log] ( literal[int] )
identifier[mean] , identifier[stddevs] = identifier[_apply_adjustments] (
identifier[C] , identifier[self] . identifier[COEFFS_FS_ROCK] [ identifier[imt] ], identifier[tau_ss] ,
identifier[mean] , identifier[stddevs] , identifier[sites] , identifier[rup] , identifier[dists] . identifier[rhypo] , identifier[imt] , identifier[stddev_types] ,
identifier[log_phi_ss] )
keyword[return] identifier[mean] , identifier[np] . identifier[log] ( literal[int] ** identifier[np] . identifier[array] ( identifier[stddevs] )) | def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
sites.vs30 = 700 * np.ones(len(sites.vs30))
(mean, stddevs) = super().get_mean_and_stddevs(sites, rup, dists, imt, stddev_types)
C = CauzziFaccioli2008SWISS01.COEFFS
tau_ss = 'tau'
log_phi_ss = np.log(10)
(mean, stddevs) = _apply_adjustments(C, self.COEFFS_FS_ROCK[imt], tau_ss, mean, stddevs, sites, rup, dists.rhypo, imt, stddev_types, log_phi_ss)
return (mean, np.log(10 ** np.array(stddevs))) |
def method_not_allowed(cls, errors=None):
"""Shortcut API for HTTP 405 `Method not allowed` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '405 Method Not Allowed'
return cls(405, None, errors).to_json | def function[method_not_allowed, parameter[cls, errors]]:
constant[Shortcut API for HTTP 405 `Method not allowed` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
]
if name[cls].expose_status begin[:]
name[cls].response.content_type assign[=] constant[application/json]
name[cls].response._status_line assign[=] constant[405 Method Not Allowed]
return[call[name[cls], parameter[constant[405], constant[None], name[errors]]].to_json] | keyword[def] identifier[method_not_allowed] ( identifier[cls] , identifier[errors] = keyword[None] ):
literal[string]
keyword[if] identifier[cls] . identifier[expose_status] :
identifier[cls] . identifier[response] . identifier[content_type] = literal[string]
identifier[cls] . identifier[response] . identifier[_status_line] = literal[string]
keyword[return] identifier[cls] ( literal[int] , keyword[None] , identifier[errors] ). identifier[to_json] | def method_not_allowed(cls, errors=None):
"""Shortcut API for HTTP 405 `Method not allowed` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '405 Method Not Allowed' # depends on [control=['if'], data=[]]
return cls(405, None, errors).to_json |
def install_notebook_hook(notebook_type, load, show_doc, show_app, overwrite=False):
''' Install a new notebook display hook.
Bokeh comes with support for Jupyter notebooks built-in. However, there are
other kinds of notebooks in use by different communities. This function
provides a mechanism for other projects to instruct Bokeh how to display
content in other notebooks.
This function is primarily of use to developers wishing to integrate Bokeh
with new notebook types.
Args:
notebook_type (str) :
A name for the notebook type, e.e. ``'Jupyter'`` or ``'Zeppelin'``
If the name has previously been installed, a ``RuntimeError`` will
be raised, unless ``overwrite=True``
load (callable) :
A function for loading BokehJS in a notebook type. The function
will be called with the following arguments:
.. code-block:: python
load(
resources, # A Resources object for how to load BokehJS
verbose, # Whether to display verbose loading banner
hide_banner, # Whether to hide the output banner entirely
load_timeout # Time after which to report a load fail error
)
show_doc (callable) :
A function for displaying Bokeh standalone documents in the
notebook type. This function will be called with the following
arguments:
.. code-block:: python
show_doc(
obj, # the Bokeh object to display
state, # current bokeh.io "state"
notebook_handle # whether a notebook handle was requested
)
If the notebook platform is capable of supporting in-place updates
to plots then this function may return an opaque notebook handle
that can be used for that purpose. The handle will be returned by
``show()``, and can be used by as appropriate to update plots, etc.
by additional functions in the library that installed the hooks.
show_app (callable) :
A function for displaying Bokeh applications in the notebook
type. This function will be called with the following arguments:
.. code-block:: python
show_app(
app, # the Bokeh Application to display
state, # current bokeh.io "state"
notebook_url, # URL to the current active notebook page
**kw # any backend-specific keywords passed as-is
)
overwrite (bool, optional) :
Whether to allow an existing hook to be overwritten by a new
definition (default: False)
Returns:
None
Raises:
RuntimeError
If ``notebook_type`` is already installed and ``overwrite=False``
'''
if notebook_type in _HOOKS and not overwrite:
raise RuntimeError("hook for notebook type %r already exists" % notebook_type)
_HOOKS[notebook_type] = dict(load=load, doc=show_doc, app=show_app) | def function[install_notebook_hook, parameter[notebook_type, load, show_doc, show_app, overwrite]]:
constant[ Install a new notebook display hook.
Bokeh comes with support for Jupyter notebooks built-in. However, there are
other kinds of notebooks in use by different communities. This function
provides a mechanism for other projects to instruct Bokeh how to display
content in other notebooks.
This function is primarily of use to developers wishing to integrate Bokeh
with new notebook types.
Args:
notebook_type (str) :
A name for the notebook type, e.e. ``'Jupyter'`` or ``'Zeppelin'``
If the name has previously been installed, a ``RuntimeError`` will
be raised, unless ``overwrite=True``
load (callable) :
A function for loading BokehJS in a notebook type. The function
will be called with the following arguments:
.. code-block:: python
load(
resources, # A Resources object for how to load BokehJS
verbose, # Whether to display verbose loading banner
hide_banner, # Whether to hide the output banner entirely
load_timeout # Time after which to report a load fail error
)
show_doc (callable) :
A function for displaying Bokeh standalone documents in the
notebook type. This function will be called with the following
arguments:
.. code-block:: python
show_doc(
obj, # the Bokeh object to display
state, # current bokeh.io "state"
notebook_handle # whether a notebook handle was requested
)
If the notebook platform is capable of supporting in-place updates
to plots then this function may return an opaque notebook handle
that can be used for that purpose. The handle will be returned by
``show()``, and can be used by as appropriate to update plots, etc.
by additional functions in the library that installed the hooks.
show_app (callable) :
A function for displaying Bokeh applications in the notebook
type. This function will be called with the following arguments:
.. code-block:: python
show_app(
app, # the Bokeh Application to display
state, # current bokeh.io "state"
notebook_url, # URL to the current active notebook page
**kw # any backend-specific keywords passed as-is
)
overwrite (bool, optional) :
Whether to allow an existing hook to be overwritten by a new
definition (default: False)
Returns:
None
Raises:
RuntimeError
If ``notebook_type`` is already installed and ``overwrite=False``
]
if <ast.BoolOp object at 0x7da20c6c7100> begin[:]
<ast.Raise object at 0x7da1b1f611e0>
call[name[_HOOKS]][name[notebook_type]] assign[=] call[name[dict], parameter[]] | keyword[def] identifier[install_notebook_hook] ( identifier[notebook_type] , identifier[load] , identifier[show_doc] , identifier[show_app] , identifier[overwrite] = keyword[False] ):
literal[string]
keyword[if] identifier[notebook_type] keyword[in] identifier[_HOOKS] keyword[and] keyword[not] identifier[overwrite] :
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[notebook_type] )
identifier[_HOOKS] [ identifier[notebook_type] ]= identifier[dict] ( identifier[load] = identifier[load] , identifier[doc] = identifier[show_doc] , identifier[app] = identifier[show_app] ) | def install_notebook_hook(notebook_type, load, show_doc, show_app, overwrite=False):
""" Install a new notebook display hook.
Bokeh comes with support for Jupyter notebooks built-in. However, there are
other kinds of notebooks in use by different communities. This function
provides a mechanism for other projects to instruct Bokeh how to display
content in other notebooks.
This function is primarily of use to developers wishing to integrate Bokeh
with new notebook types.
Args:
notebook_type (str) :
A name for the notebook type, e.e. ``'Jupyter'`` or ``'Zeppelin'``
If the name has previously been installed, a ``RuntimeError`` will
be raised, unless ``overwrite=True``
load (callable) :
A function for loading BokehJS in a notebook type. The function
will be called with the following arguments:
.. code-block:: python
load(
resources, # A Resources object for how to load BokehJS
verbose, # Whether to display verbose loading banner
hide_banner, # Whether to hide the output banner entirely
load_timeout # Time after which to report a load fail error
)
show_doc (callable) :
A function for displaying Bokeh standalone documents in the
notebook type. This function will be called with the following
arguments:
.. code-block:: python
show_doc(
obj, # the Bokeh object to display
state, # current bokeh.io "state"
notebook_handle # whether a notebook handle was requested
)
If the notebook platform is capable of supporting in-place updates
to plots then this function may return an opaque notebook handle
that can be used for that purpose. The handle will be returned by
``show()``, and can be used by as appropriate to update plots, etc.
by additional functions in the library that installed the hooks.
show_app (callable) :
A function for displaying Bokeh applications in the notebook
type. This function will be called with the following arguments:
.. code-block:: python
show_app(
app, # the Bokeh Application to display
state, # current bokeh.io "state"
notebook_url, # URL to the current active notebook page
**kw # any backend-specific keywords passed as-is
)
overwrite (bool, optional) :
Whether to allow an existing hook to be overwritten by a new
definition (default: False)
Returns:
None
Raises:
RuntimeError
If ``notebook_type`` is already installed and ``overwrite=False``
"""
if notebook_type in _HOOKS and (not overwrite):
raise RuntimeError('hook for notebook type %r already exists' % notebook_type) # depends on [control=['if'], data=[]]
_HOOKS[notebook_type] = dict(load=load, doc=show_doc, app=show_app) |
def write_seqinfo(self, out_fp, include_name=True):
"""
Write a simple seq_info file, suitable for use in taxtastic.
Useful for printing out the results of collapsing tax nodes - super
bare bones, just tax_id and seqname.
If include_name is True, a column with the taxon name is included.
"""
header = ['seqname', 'tax_id']
if include_name:
header.append('tax_name')
w = csv.DictWriter(out_fp, header, quoting=csv.QUOTE_NONNUMERIC,
lineterminator='\n', extrasaction='ignore')
w.writeheader()
rows = ({'seqname': seq_id,
'tax_id': node.tax_id,
'tax_name': node.name}
for node in self
for seq_id in node.sequence_ids)
w.writerows(rows) | def function[write_seqinfo, parameter[self, out_fp, include_name]]:
constant[
Write a simple seq_info file, suitable for use in taxtastic.
Useful for printing out the results of collapsing tax nodes - super
bare bones, just tax_id and seqname.
If include_name is True, a column with the taxon name is included.
]
variable[header] assign[=] list[[<ast.Constant object at 0x7da1b198e800>, <ast.Constant object at 0x7da1b198f070>]]
if name[include_name] begin[:]
call[name[header].append, parameter[constant[tax_name]]]
variable[w] assign[=] call[name[csv].DictWriter, parameter[name[out_fp], name[header]]]
call[name[w].writeheader, parameter[]]
variable[rows] assign[=] <ast.GeneratorExp object at 0x7da1b198d8a0>
call[name[w].writerows, parameter[name[rows]]] | keyword[def] identifier[write_seqinfo] ( identifier[self] , identifier[out_fp] , identifier[include_name] = keyword[True] ):
literal[string]
identifier[header] =[ literal[string] , literal[string] ]
keyword[if] identifier[include_name] :
identifier[header] . identifier[append] ( literal[string] )
identifier[w] = identifier[csv] . identifier[DictWriter] ( identifier[out_fp] , identifier[header] , identifier[quoting] = identifier[csv] . identifier[QUOTE_NONNUMERIC] ,
identifier[lineterminator] = literal[string] , identifier[extrasaction] = literal[string] )
identifier[w] . identifier[writeheader] ()
identifier[rows] =({ literal[string] : identifier[seq_id] ,
literal[string] : identifier[node] . identifier[tax_id] ,
literal[string] : identifier[node] . identifier[name] }
keyword[for] identifier[node] keyword[in] identifier[self]
keyword[for] identifier[seq_id] keyword[in] identifier[node] . identifier[sequence_ids] )
identifier[w] . identifier[writerows] ( identifier[rows] ) | def write_seqinfo(self, out_fp, include_name=True):
"""
Write a simple seq_info file, suitable for use in taxtastic.
Useful for printing out the results of collapsing tax nodes - super
bare bones, just tax_id and seqname.
If include_name is True, a column with the taxon name is included.
"""
header = ['seqname', 'tax_id']
if include_name:
header.append('tax_name') # depends on [control=['if'], data=[]]
w = csv.DictWriter(out_fp, header, quoting=csv.QUOTE_NONNUMERIC, lineterminator='\n', extrasaction='ignore')
w.writeheader()
rows = ({'seqname': seq_id, 'tax_id': node.tax_id, 'tax_name': node.name} for node in self for seq_id in node.sequence_ids)
w.writerows(rows) |
def host_key_checking(enable):
""" Temporarily disables host_key_checking, which is set globally. """
def as_string(b):
return b and 'True' or 'False'
with environment_variable('ANSIBLE_HOST_KEY_CHECKING', as_string(enable)):
previous = ansible.constants.HOST_KEY_CHECKING
ansible.constants.HOST_KEY_CHECKING = enable
yield
ansible.constants.HOST_KEY_CHECKING = previous | def function[host_key_checking, parameter[enable]]:
constant[ Temporarily disables host_key_checking, which is set globally. ]
def function[as_string, parameter[b]]:
return[<ast.BoolOp object at 0x7da1b184bfd0>]
with call[name[environment_variable], parameter[constant[ANSIBLE_HOST_KEY_CHECKING], call[name[as_string], parameter[name[enable]]]]] begin[:]
variable[previous] assign[=] name[ansible].constants.HOST_KEY_CHECKING
name[ansible].constants.HOST_KEY_CHECKING assign[=] name[enable]
<ast.Yield object at 0x7da1b184b130>
name[ansible].constants.HOST_KEY_CHECKING assign[=] name[previous] | keyword[def] identifier[host_key_checking] ( identifier[enable] ):
literal[string]
keyword[def] identifier[as_string] ( identifier[b] ):
keyword[return] identifier[b] keyword[and] literal[string] keyword[or] literal[string]
keyword[with] identifier[environment_variable] ( literal[string] , identifier[as_string] ( identifier[enable] )):
identifier[previous] = identifier[ansible] . identifier[constants] . identifier[HOST_KEY_CHECKING]
identifier[ansible] . identifier[constants] . identifier[HOST_KEY_CHECKING] = identifier[enable]
keyword[yield]
identifier[ansible] . identifier[constants] . identifier[HOST_KEY_CHECKING] = identifier[previous] | def host_key_checking(enable):
""" Temporarily disables host_key_checking, which is set globally. """
def as_string(b):
return b and 'True' or 'False'
with environment_variable('ANSIBLE_HOST_KEY_CHECKING', as_string(enable)):
previous = ansible.constants.HOST_KEY_CHECKING
ansible.constants.HOST_KEY_CHECKING = enable
yield
ansible.constants.HOST_KEY_CHECKING = previous # depends on [control=['with'], data=[]] |
def evaluate(self, genomes, config):
"""Evaluate the genomes"""
if not self.working:
self.start()
p = 0
for genome_id, genome in genomes:
p += 1
self.inqueue.put((genome_id, genome, config))
# assign the fitness back to each genome
while p > 0:
p -= 1
ignored_genome_id, genome, fitness = self.outqueue.get()
genome.fitness = fitness | def function[evaluate, parameter[self, genomes, config]]:
constant[Evaluate the genomes]
if <ast.UnaryOp object at 0x7da204564dc0> begin[:]
call[name[self].start, parameter[]]
variable[p] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da2045661a0>, <ast.Name object at 0x7da1b18afca0>]]] in starred[name[genomes]] begin[:]
<ast.AugAssign object at 0x7da1b18aec20>
call[name[self].inqueue.put, parameter[tuple[[<ast.Name object at 0x7da1b18ae1a0>, <ast.Name object at 0x7da1b18ad2d0>, <ast.Name object at 0x7da1b18af3a0>]]]]
while compare[name[p] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b18af8b0>
<ast.Tuple object at 0x7da1b18ac250> assign[=] call[name[self].outqueue.get, parameter[]]
name[genome].fitness assign[=] name[fitness] | keyword[def] identifier[evaluate] ( identifier[self] , identifier[genomes] , identifier[config] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[working] :
identifier[self] . identifier[start] ()
identifier[p] = literal[int]
keyword[for] identifier[genome_id] , identifier[genome] keyword[in] identifier[genomes] :
identifier[p] += literal[int]
identifier[self] . identifier[inqueue] . identifier[put] (( identifier[genome_id] , identifier[genome] , identifier[config] ))
keyword[while] identifier[p] > literal[int] :
identifier[p] -= literal[int]
identifier[ignored_genome_id] , identifier[genome] , identifier[fitness] = identifier[self] . identifier[outqueue] . identifier[get] ()
identifier[genome] . identifier[fitness] = identifier[fitness] | def evaluate(self, genomes, config):
"""Evaluate the genomes"""
if not self.working:
self.start() # depends on [control=['if'], data=[]]
p = 0
for (genome_id, genome) in genomes:
p += 1
self.inqueue.put((genome_id, genome, config)) # depends on [control=['for'], data=[]]
# assign the fitness back to each genome
while p > 0:
p -= 1
(ignored_genome_id, genome, fitness) = self.outqueue.get()
genome.fitness = fitness # depends on [control=['while'], data=['p']] |
def Connect(self, Skype):
"""Connects this call channel manager instance to Skype. This is the first thing you should
do after creating this object.
:Parameters:
Skype : `Skype`
The Skype object.
:see: `Disconnect`
"""
self._Skype = Skype
self._Skype.RegisterEventHandler('CallStatus', self._CallStatus)
del self._Channels[:] | def function[Connect, parameter[self, Skype]]:
constant[Connects this call channel manager instance to Skype. This is the first thing you should
do after creating this object.
:Parameters:
Skype : `Skype`
The Skype object.
:see: `Disconnect`
]
name[self]._Skype assign[=] name[Skype]
call[name[self]._Skype.RegisterEventHandler, parameter[constant[CallStatus], name[self]._CallStatus]]
<ast.Delete object at 0x7da1b0666d10> | keyword[def] identifier[Connect] ( identifier[self] , identifier[Skype] ):
literal[string]
identifier[self] . identifier[_Skype] = identifier[Skype]
identifier[self] . identifier[_Skype] . identifier[RegisterEventHandler] ( literal[string] , identifier[self] . identifier[_CallStatus] )
keyword[del] identifier[self] . identifier[_Channels] [:] | def Connect(self, Skype):
"""Connects this call channel manager instance to Skype. This is the first thing you should
do after creating this object.
:Parameters:
Skype : `Skype`
The Skype object.
:see: `Disconnect`
"""
self._Skype = Skype
self._Skype.RegisterEventHandler('CallStatus', self._CallStatus)
del self._Channels[:] |
def bosonic_constraints(a):
"""Return a set of constraints that define fermionic ladder operators.
:param a: The non-Hermitian variables.
:type a: list of :class:`sympy.physics.quantum.operator.Operator`.
:returns: a dict of substitutions.
"""
substitutions = {}
for i, ai in enumerate(a):
substitutions[ai * Dagger(ai)] = 1.0 + Dagger(ai) * ai
for aj in a[i+1:]:
# substitutions[ai*Dagger(aj)] = -Dagger(ai)*aj
substitutions[ai*Dagger(aj)] = Dagger(aj)*ai
substitutions[Dagger(ai)*aj] = aj*Dagger(ai)
substitutions[ai*aj] = aj*ai
substitutions[Dagger(ai) * Dagger(aj)] = Dagger(aj) * Dagger(ai)
return substitutions | def function[bosonic_constraints, parameter[a]]:
constant[Return a set of constraints that define fermionic ladder operators.
:param a: The non-Hermitian variables.
:type a: list of :class:`sympy.physics.quantum.operator.Operator`.
:returns: a dict of substitutions.
]
variable[substitutions] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c795870>, <ast.Name object at 0x7da20c796650>]]] in starred[call[name[enumerate], parameter[name[a]]]] begin[:]
call[name[substitutions]][binary_operation[name[ai] * call[name[Dagger], parameter[name[ai]]]]] assign[=] binary_operation[constant[1.0] + binary_operation[call[name[Dagger], parameter[name[ai]]] * name[ai]]]
for taget[name[aj]] in starred[call[name[a]][<ast.Slice object at 0x7da2054a6710>]] begin[:]
call[name[substitutions]][binary_operation[name[ai] * call[name[Dagger], parameter[name[aj]]]]] assign[=] binary_operation[call[name[Dagger], parameter[name[aj]]] * name[ai]]
call[name[substitutions]][binary_operation[call[name[Dagger], parameter[name[ai]]] * name[aj]]] assign[=] binary_operation[name[aj] * call[name[Dagger], parameter[name[ai]]]]
call[name[substitutions]][binary_operation[name[ai] * name[aj]]] assign[=] binary_operation[name[aj] * name[ai]]
call[name[substitutions]][binary_operation[call[name[Dagger], parameter[name[ai]]] * call[name[Dagger], parameter[name[aj]]]]] assign[=] binary_operation[call[name[Dagger], parameter[name[aj]]] * call[name[Dagger], parameter[name[ai]]]]
return[name[substitutions]] | keyword[def] identifier[bosonic_constraints] ( identifier[a] ):
literal[string]
identifier[substitutions] ={}
keyword[for] identifier[i] , identifier[ai] keyword[in] identifier[enumerate] ( identifier[a] ):
identifier[substitutions] [ identifier[ai] * identifier[Dagger] ( identifier[ai] )]= literal[int] + identifier[Dagger] ( identifier[ai] )* identifier[ai]
keyword[for] identifier[aj] keyword[in] identifier[a] [ identifier[i] + literal[int] :]:
identifier[substitutions] [ identifier[ai] * identifier[Dagger] ( identifier[aj] )]= identifier[Dagger] ( identifier[aj] )* identifier[ai]
identifier[substitutions] [ identifier[Dagger] ( identifier[ai] )* identifier[aj] ]= identifier[aj] * identifier[Dagger] ( identifier[ai] )
identifier[substitutions] [ identifier[ai] * identifier[aj] ]= identifier[aj] * identifier[ai]
identifier[substitutions] [ identifier[Dagger] ( identifier[ai] )* identifier[Dagger] ( identifier[aj] )]= identifier[Dagger] ( identifier[aj] )* identifier[Dagger] ( identifier[ai] )
keyword[return] identifier[substitutions] | def bosonic_constraints(a):
"""Return a set of constraints that define fermionic ladder operators.
:param a: The non-Hermitian variables.
:type a: list of :class:`sympy.physics.quantum.operator.Operator`.
:returns: a dict of substitutions.
"""
substitutions = {}
for (i, ai) in enumerate(a):
substitutions[ai * Dagger(ai)] = 1.0 + Dagger(ai) * ai
for aj in a[i + 1:]:
# substitutions[ai*Dagger(aj)] = -Dagger(ai)*aj
substitutions[ai * Dagger(aj)] = Dagger(aj) * ai
substitutions[Dagger(ai) * aj] = aj * Dagger(ai)
substitutions[ai * aj] = aj * ai
substitutions[Dagger(ai) * Dagger(aj)] = Dagger(aj) * Dagger(ai) # depends on [control=['for'], data=['aj']] # depends on [control=['for'], data=[]]
return substitutions |
def windows_dir_format(host_dir, user):
"""Format a string for the location of the user's folder on the Windows (TJ03) fileserver."""
if user and user.grade:
grade = int(user.grade)
else:
return host_dir
if grade in range(9, 13):
win_path = "/{}/".format(user.username)
else:
win_path = ""
return host_dir.replace("{win}", win_path) | def function[windows_dir_format, parameter[host_dir, user]]:
constant[Format a string for the location of the user's folder on the Windows (TJ03) fileserver.]
if <ast.BoolOp object at 0x7da1b04bc460> begin[:]
variable[grade] assign[=] call[name[int], parameter[name[user].grade]]
if compare[name[grade] in call[name[range], parameter[constant[9], constant[13]]]] begin[:]
variable[win_path] assign[=] call[constant[/{}/].format, parameter[name[user].username]]
return[call[name[host_dir].replace, parameter[constant[{win}], name[win_path]]]] | keyword[def] identifier[windows_dir_format] ( identifier[host_dir] , identifier[user] ):
literal[string]
keyword[if] identifier[user] keyword[and] identifier[user] . identifier[grade] :
identifier[grade] = identifier[int] ( identifier[user] . identifier[grade] )
keyword[else] :
keyword[return] identifier[host_dir]
keyword[if] identifier[grade] keyword[in] identifier[range] ( literal[int] , literal[int] ):
identifier[win_path] = literal[string] . identifier[format] ( identifier[user] . identifier[username] )
keyword[else] :
identifier[win_path] = literal[string]
keyword[return] identifier[host_dir] . identifier[replace] ( literal[string] , identifier[win_path] ) | def windows_dir_format(host_dir, user):
"""Format a string for the location of the user's folder on the Windows (TJ03) fileserver."""
if user and user.grade:
grade = int(user.grade) # depends on [control=['if'], data=[]]
else:
return host_dir
if grade in range(9, 13):
win_path = '/{}/'.format(user.username) # depends on [control=['if'], data=[]]
else:
win_path = ''
return host_dir.replace('{win}', win_path) |
def _find_v1_settings(self, settings):
"""Parse a v1 module_settings.json file.
V1 is the older file format that requires a modules dictionary with a
module_name and modules key that could in theory hold information on
multiple modules in a single directory.
"""
if 'module_name' in settings:
modname = settings['module_name']
if 'modules' not in settings or len(settings['modules']) == 0:
raise DataError("No modules defined in module_settings.json file")
elif len(settings['modules']) > 1:
raise DataError("Multiple modules defined in module_settings.json file",
modules=[x for x in settings['modules']])
else:
modname = list(settings['modules'])[0]
if modname not in settings['modules']:
raise DataError("Module name does not correspond with an entry in the modules directory",
name=modname, modules=[x for x in settings['modules']])
release_info = self._load_release_info(settings)
modsettings = settings['modules'][modname]
architectures = settings.get('architectures', {})
target_defs = settings.get('module_targets', {})
targets = target_defs.get(modname, [])
return TileInfo(modname, modsettings, architectures, targets, release_info) | def function[_find_v1_settings, parameter[self, settings]]:
constant[Parse a v1 module_settings.json file.
V1 is the older file format that requires a modules dictionary with a
module_name and modules key that could in theory hold information on
multiple modules in a single directory.
]
if compare[constant[module_name] in name[settings]] begin[:]
variable[modname] assign[=] call[name[settings]][constant[module_name]]
if <ast.BoolOp object at 0x7da1b26affa0> begin[:]
<ast.Raise object at 0x7da1b26aff10>
if compare[name[modname] <ast.NotIn object at 0x7da2590d7190> call[name[settings]][constant[modules]]] begin[:]
<ast.Raise object at 0x7da1b26ada50>
variable[release_info] assign[=] call[name[self]._load_release_info, parameter[name[settings]]]
variable[modsettings] assign[=] call[call[name[settings]][constant[modules]]][name[modname]]
variable[architectures] assign[=] call[name[settings].get, parameter[constant[architectures], dictionary[[], []]]]
variable[target_defs] assign[=] call[name[settings].get, parameter[constant[module_targets], dictionary[[], []]]]
variable[targets] assign[=] call[name[target_defs].get, parameter[name[modname], list[[]]]]
return[call[name[TileInfo], parameter[name[modname], name[modsettings], name[architectures], name[targets], name[release_info]]]] | keyword[def] identifier[_find_v1_settings] ( identifier[self] , identifier[settings] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[settings] :
identifier[modname] = identifier[settings] [ literal[string] ]
keyword[if] literal[string] keyword[not] keyword[in] identifier[settings] keyword[or] identifier[len] ( identifier[settings] [ literal[string] ])== literal[int] :
keyword[raise] identifier[DataError] ( literal[string] )
keyword[elif] identifier[len] ( identifier[settings] [ literal[string] ])> literal[int] :
keyword[raise] identifier[DataError] ( literal[string] ,
identifier[modules] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[settings] [ literal[string] ]])
keyword[else] :
identifier[modname] = identifier[list] ( identifier[settings] [ literal[string] ])[ literal[int] ]
keyword[if] identifier[modname] keyword[not] keyword[in] identifier[settings] [ literal[string] ]:
keyword[raise] identifier[DataError] ( literal[string] ,
identifier[name] = identifier[modname] , identifier[modules] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[settings] [ literal[string] ]])
identifier[release_info] = identifier[self] . identifier[_load_release_info] ( identifier[settings] )
identifier[modsettings] = identifier[settings] [ literal[string] ][ identifier[modname] ]
identifier[architectures] = identifier[settings] . identifier[get] ( literal[string] ,{})
identifier[target_defs] = identifier[settings] . identifier[get] ( literal[string] ,{})
identifier[targets] = identifier[target_defs] . identifier[get] ( identifier[modname] ,[])
keyword[return] identifier[TileInfo] ( identifier[modname] , identifier[modsettings] , identifier[architectures] , identifier[targets] , identifier[release_info] ) | def _find_v1_settings(self, settings):
"""Parse a v1 module_settings.json file.
V1 is the older file format that requires a modules dictionary with a
module_name and modules key that could in theory hold information on
multiple modules in a single directory.
"""
if 'module_name' in settings:
modname = settings['module_name'] # depends on [control=['if'], data=['settings']]
if 'modules' not in settings or len(settings['modules']) == 0:
raise DataError('No modules defined in module_settings.json file') # depends on [control=['if'], data=[]]
elif len(settings['modules']) > 1:
raise DataError('Multiple modules defined in module_settings.json file', modules=[x for x in settings['modules']]) # depends on [control=['if'], data=[]]
else:
modname = list(settings['modules'])[0]
if modname not in settings['modules']:
raise DataError('Module name does not correspond with an entry in the modules directory', name=modname, modules=[x for x in settings['modules']]) # depends on [control=['if'], data=['modname']]
release_info = self._load_release_info(settings)
modsettings = settings['modules'][modname]
architectures = settings.get('architectures', {})
target_defs = settings.get('module_targets', {})
targets = target_defs.get(modname, [])
return TileInfo(modname, modsettings, architectures, targets, release_info) |
def receive_empty(self, message):
"""
Pair ACKs with requests.
:type message: Message
:param message: the received message
:rtype : Transaction
:return: the transaction to which the message belongs to
"""
logger.debug("receive_empty - " + str(message))
try:
host, port = message.source
except AttributeError:
return
key_mid = str_append_hash(host, port, message.mid)
key_mid_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.mid)
key_token = str_append_hash(host, port, message.token)
key_token_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.token)
if key_mid in list(self._transactions.keys()):
transaction = self._transactions[key_mid]
elif key_token in self._transactions_token:
transaction = self._transactions_token[key_token]
elif key_mid_multicast in list(self._transactions.keys()):
transaction = self._transactions[key_mid_multicast]
elif key_token_multicast in self._transactions_token:
transaction = self._transactions_token[key_token_multicast]
else:
logger.warning("Un-Matched incoming empty message " + str(host) + ":" + str(port))
return None
if message.type == defines.Types["ACK"]:
if not transaction.request.acknowledged:
transaction.request.acknowledged = True
elif (transaction.response is not None) and (not transaction.response.acknowledged):
transaction.response.acknowledged = True
elif message.type == defines.Types["RST"]:
if not transaction.request.acknowledged:
transaction.request.rejected = True
elif not transaction.response.acknowledged:
transaction.response.rejected = True
elif message.type == defines.Types["CON"]:
#implicit ACK (might have been lost)
logger.debug("Implicit ACK on received CON for waiting transaction")
transaction.request.acknowledged = True
else:
logger.warning("Unhandled message type...")
if transaction.retransmit_stop is not None:
transaction.retransmit_stop.set()
return transaction | def function[receive_empty, parameter[self, message]]:
constant[
Pair ACKs with requests.
:type message: Message
:param message: the received message
:rtype : Transaction
:return: the transaction to which the message belongs to
]
call[name[logger].debug, parameter[binary_operation[constant[receive_empty - ] + call[name[str], parameter[name[message]]]]]]
<ast.Try object at 0x7da1b0617b80>
variable[key_mid] assign[=] call[name[str_append_hash], parameter[name[host], name[port], name[message].mid]]
variable[key_mid_multicast] assign[=] call[name[str_append_hash], parameter[name[defines].ALL_COAP_NODES, name[port], name[message].mid]]
variable[key_token] assign[=] call[name[str_append_hash], parameter[name[host], name[port], name[message].token]]
variable[key_token_multicast] assign[=] call[name[str_append_hash], parameter[name[defines].ALL_COAP_NODES, name[port], name[message].token]]
if compare[name[key_mid] in call[name[list], parameter[call[name[self]._transactions.keys, parameter[]]]]] begin[:]
variable[transaction] assign[=] call[name[self]._transactions][name[key_mid]]
if compare[name[message].type equal[==] call[name[defines].Types][constant[ACK]]] begin[:]
if <ast.UnaryOp object at 0x7da18f722320> begin[:]
name[transaction].request.acknowledged assign[=] constant[True]
if compare[name[transaction].retransmit_stop is_not constant[None]] begin[:]
call[name[transaction].retransmit_stop.set, parameter[]]
return[name[transaction]] | keyword[def] identifier[receive_empty] ( identifier[self] , identifier[message] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] + identifier[str] ( identifier[message] ))
keyword[try] :
identifier[host] , identifier[port] = identifier[message] . identifier[source]
keyword[except] identifier[AttributeError] :
keyword[return]
identifier[key_mid] = identifier[str_append_hash] ( identifier[host] , identifier[port] , identifier[message] . identifier[mid] )
identifier[key_mid_multicast] = identifier[str_append_hash] ( identifier[defines] . identifier[ALL_COAP_NODES] , identifier[port] , identifier[message] . identifier[mid] )
identifier[key_token] = identifier[str_append_hash] ( identifier[host] , identifier[port] , identifier[message] . identifier[token] )
identifier[key_token_multicast] = identifier[str_append_hash] ( identifier[defines] . identifier[ALL_COAP_NODES] , identifier[port] , identifier[message] . identifier[token] )
keyword[if] identifier[key_mid] keyword[in] identifier[list] ( identifier[self] . identifier[_transactions] . identifier[keys] ()):
identifier[transaction] = identifier[self] . identifier[_transactions] [ identifier[key_mid] ]
keyword[elif] identifier[key_token] keyword[in] identifier[self] . identifier[_transactions_token] :
identifier[transaction] = identifier[self] . identifier[_transactions_token] [ identifier[key_token] ]
keyword[elif] identifier[key_mid_multicast] keyword[in] identifier[list] ( identifier[self] . identifier[_transactions] . identifier[keys] ()):
identifier[transaction] = identifier[self] . identifier[_transactions] [ identifier[key_mid_multicast] ]
keyword[elif] identifier[key_token_multicast] keyword[in] identifier[self] . identifier[_transactions_token] :
identifier[transaction] = identifier[self] . identifier[_transactions_token] [ identifier[key_token_multicast] ]
keyword[else] :
identifier[logger] . identifier[warning] ( literal[string] + identifier[str] ( identifier[host] )+ literal[string] + identifier[str] ( identifier[port] ))
keyword[return] keyword[None]
keyword[if] identifier[message] . identifier[type] == identifier[defines] . identifier[Types] [ literal[string] ]:
keyword[if] keyword[not] identifier[transaction] . identifier[request] . identifier[acknowledged] :
identifier[transaction] . identifier[request] . identifier[acknowledged] = keyword[True]
keyword[elif] ( identifier[transaction] . identifier[response] keyword[is] keyword[not] keyword[None] ) keyword[and] ( keyword[not] identifier[transaction] . identifier[response] . identifier[acknowledged] ):
identifier[transaction] . identifier[response] . identifier[acknowledged] = keyword[True]
keyword[elif] identifier[message] . identifier[type] == identifier[defines] . identifier[Types] [ literal[string] ]:
keyword[if] keyword[not] identifier[transaction] . identifier[request] . identifier[acknowledged] :
identifier[transaction] . identifier[request] . identifier[rejected] = keyword[True]
keyword[elif] keyword[not] identifier[transaction] . identifier[response] . identifier[acknowledged] :
identifier[transaction] . identifier[response] . identifier[rejected] = keyword[True]
keyword[elif] identifier[message] . identifier[type] == identifier[defines] . identifier[Types] [ literal[string] ]:
identifier[logger] . identifier[debug] ( literal[string] )
identifier[transaction] . identifier[request] . identifier[acknowledged] = keyword[True]
keyword[else] :
identifier[logger] . identifier[warning] ( literal[string] )
keyword[if] identifier[transaction] . identifier[retransmit_stop] keyword[is] keyword[not] keyword[None] :
identifier[transaction] . identifier[retransmit_stop] . identifier[set] ()
keyword[return] identifier[transaction] | def receive_empty(self, message):
"""
Pair ACKs with requests.
:type message: Message
:param message: the received message
:rtype : Transaction
:return: the transaction to which the message belongs to
"""
logger.debug('receive_empty - ' + str(message))
try:
(host, port) = message.source # depends on [control=['try'], data=[]]
except AttributeError:
return # depends on [control=['except'], data=[]]
key_mid = str_append_hash(host, port, message.mid)
key_mid_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.mid)
key_token = str_append_hash(host, port, message.token)
key_token_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.token)
if key_mid in list(self._transactions.keys()):
transaction = self._transactions[key_mid] # depends on [control=['if'], data=['key_mid']]
elif key_token in self._transactions_token:
transaction = self._transactions_token[key_token] # depends on [control=['if'], data=['key_token']]
elif key_mid_multicast in list(self._transactions.keys()):
transaction = self._transactions[key_mid_multicast] # depends on [control=['if'], data=['key_mid_multicast']]
elif key_token_multicast in self._transactions_token:
transaction = self._transactions_token[key_token_multicast] # depends on [control=['if'], data=['key_token_multicast']]
else:
logger.warning('Un-Matched incoming empty message ' + str(host) + ':' + str(port))
return None
if message.type == defines.Types['ACK']:
if not transaction.request.acknowledged:
transaction.request.acknowledged = True # depends on [control=['if'], data=[]]
elif transaction.response is not None and (not transaction.response.acknowledged):
transaction.response.acknowledged = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif message.type == defines.Types['RST']:
if not transaction.request.acknowledged:
transaction.request.rejected = True # depends on [control=['if'], data=[]]
elif not transaction.response.acknowledged:
transaction.response.rejected = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif message.type == defines.Types['CON']:
#implicit ACK (might have been lost)
logger.debug('Implicit ACK on received CON for waiting transaction')
transaction.request.acknowledged = True # depends on [control=['if'], data=[]]
else:
logger.warning('Unhandled message type...')
if transaction.retransmit_stop is not None:
transaction.retransmit_stop.set() # depends on [control=['if'], data=[]]
return transaction |
def filter_zone(self, data):
"""Check if a zone is private"""
if self.private_zone is not None:
if data['Config']['PrivateZone'] != self.str2bool(self.private_zone):
return False
if data['Name'] != '{0}.'.format(self.domain):
return False
return True | def function[filter_zone, parameter[self, data]]:
constant[Check if a zone is private]
if compare[name[self].private_zone is_not constant[None]] begin[:]
if compare[call[call[name[data]][constant[Config]]][constant[PrivateZone]] not_equal[!=] call[name[self].str2bool, parameter[name[self].private_zone]]] begin[:]
return[constant[False]]
if compare[call[name[data]][constant[Name]] not_equal[!=] call[constant[{0}.].format, parameter[name[self].domain]]] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[filter_zone] ( identifier[self] , identifier[data] ):
literal[string]
keyword[if] identifier[self] . identifier[private_zone] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[data] [ literal[string] ][ literal[string] ]!= identifier[self] . identifier[str2bool] ( identifier[self] . identifier[private_zone] ):
keyword[return] keyword[False]
keyword[if] identifier[data] [ literal[string] ]!= literal[string] . identifier[format] ( identifier[self] . identifier[domain] ):
keyword[return] keyword[False]
keyword[return] keyword[True] | def filter_zone(self, data):
"""Check if a zone is private"""
if self.private_zone is not None:
if data['Config']['PrivateZone'] != self.str2bool(self.private_zone):
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if data['Name'] != '{0}.'.format(self.domain):
return False # depends on [control=['if'], data=[]]
return True |
def to_type(upcast_type, varlist):
"""Loop over all elements of varlist and convert them to upcasttype.
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import to_type
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> varlist = to_type(upcast(x.dtype, y.dtype), [x, y])
"""
# convert_type = type(np.array([0], upcast_type)[0])
for i in range(len(varlist)):
# convert scalars to complex
if np.isscalar(varlist[i]):
varlist[i] = np.array([varlist[i]], upcast_type)[0]
else:
# convert sparse and dense mats to complex
try:
if varlist[i].dtype != upcast_type:
varlist[i] = varlist[i].astype(upcast_type)
except AttributeError:
warn('Failed to cast in to_type')
pass
return varlist | def function[to_type, parameter[upcast_type, varlist]]:
constant[Loop over all elements of varlist and convert them to upcasttype.
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import to_type
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> varlist = to_type(upcast(x.dtype, y.dtype), [x, y])
]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[varlist]]]]]] begin[:]
if call[name[np].isscalar, parameter[call[name[varlist]][name[i]]]] begin[:]
call[name[varlist]][name[i]] assign[=] call[call[name[np].array, parameter[list[[<ast.Subscript object at 0x7da18fe90d60>]], name[upcast_type]]]][constant[0]]
return[name[varlist]] | keyword[def] identifier[to_type] ( identifier[upcast_type] , identifier[varlist] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[varlist] )):
keyword[if] identifier[np] . identifier[isscalar] ( identifier[varlist] [ identifier[i] ]):
identifier[varlist] [ identifier[i] ]= identifier[np] . identifier[array] ([ identifier[varlist] [ identifier[i] ]], identifier[upcast_type] )[ literal[int] ]
keyword[else] :
keyword[try] :
keyword[if] identifier[varlist] [ identifier[i] ]. identifier[dtype] != identifier[upcast_type] :
identifier[varlist] [ identifier[i] ]= identifier[varlist] [ identifier[i] ]. identifier[astype] ( identifier[upcast_type] )
keyword[except] identifier[AttributeError] :
identifier[warn] ( literal[string] )
keyword[pass]
keyword[return] identifier[varlist] | def to_type(upcast_type, varlist):
"""Loop over all elements of varlist and convert them to upcasttype.
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import to_type
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> varlist = to_type(upcast(x.dtype, y.dtype), [x, y])
"""
# convert_type = type(np.array([0], upcast_type)[0])
for i in range(len(varlist)):
# convert scalars to complex
if np.isscalar(varlist[i]):
varlist[i] = np.array([varlist[i]], upcast_type)[0] # depends on [control=['if'], data=[]]
else:
# convert sparse and dense mats to complex
try:
if varlist[i].dtype != upcast_type:
varlist[i] = varlist[i].astype(upcast_type) # depends on [control=['if'], data=['upcast_type']] # depends on [control=['try'], data=[]]
except AttributeError:
warn('Failed to cast in to_type')
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['i']]
return varlist |
def parse(self, string):
'''Parse some string to the Grammar.
Returns a nodeResult with the following attributes:
- is_valid: True when the string is successfully parsed
by the Grammar.
- pos: position in the string where parsing ended.
(this is the end of the string when is_valid is True)
- expecting: a list containing possible elements at position
'pos' in the string.
- tree: the parse_tree containing a structured
result for the given string.
'''
self._string = string
self._expecting = Expecting()
self._cached_kw_match.clear()
self._len_string = len(string)
self._pos = None
tree = Node(self._element, string, 0, self._len_string)
node_res = Result(*self._walk(
self._element,
0,
tree.children,
self._element,
True))
# get rest if anything
rest = self._string[node_res.pos:].lstrip()
# set is_valid to False if we have 'rest' left.
if node_res.is_valid and rest:
node_res.is_valid = False
# add end_of_statement to expecting if this is possible
if not self._expecting.required and rest:
self._expecting.set_mode_required(node_res.pos, True)
self._expecting.update(end_of_statement, node_res.pos)
node_res.expecting = self._expecting.get_expecting()
# add expecting and correct pos to node_res if node_res is not valid
if not node_res.is_valid:
node_res.pos = self._expecting.pos
node_res.tree = tree
return node_res | def function[parse, parameter[self, string]]:
constant[Parse some string to the Grammar.
Returns a nodeResult with the following attributes:
- is_valid: True when the string is successfully parsed
by the Grammar.
- pos: position in the string where parsing ended.
(this is the end of the string when is_valid is True)
- expecting: a list containing possible elements at position
'pos' in the string.
- tree: the parse_tree containing a structured
result for the given string.
]
name[self]._string assign[=] name[string]
name[self]._expecting assign[=] call[name[Expecting], parameter[]]
call[name[self]._cached_kw_match.clear, parameter[]]
name[self]._len_string assign[=] call[name[len], parameter[name[string]]]
name[self]._pos assign[=] constant[None]
variable[tree] assign[=] call[name[Node], parameter[name[self]._element, name[string], constant[0], name[self]._len_string]]
variable[node_res] assign[=] call[name[Result], parameter[<ast.Starred object at 0x7da204622b30>]]
variable[rest] assign[=] call[call[name[self]._string][<ast.Slice object at 0x7da204621ea0>].lstrip, parameter[]]
if <ast.BoolOp object at 0x7da204621c00> begin[:]
name[node_res].is_valid assign[=] constant[False]
if <ast.BoolOp object at 0x7da20c76ec80> begin[:]
call[name[self]._expecting.set_mode_required, parameter[name[node_res].pos, constant[True]]]
call[name[self]._expecting.update, parameter[name[end_of_statement], name[node_res].pos]]
name[node_res].expecting assign[=] call[name[self]._expecting.get_expecting, parameter[]]
if <ast.UnaryOp object at 0x7da18dc982e0> begin[:]
name[node_res].pos assign[=] name[self]._expecting.pos
name[node_res].tree assign[=] name[tree]
return[name[node_res]] | keyword[def] identifier[parse] ( identifier[self] , identifier[string] ):
literal[string]
identifier[self] . identifier[_string] = identifier[string]
identifier[self] . identifier[_expecting] = identifier[Expecting] ()
identifier[self] . identifier[_cached_kw_match] . identifier[clear] ()
identifier[self] . identifier[_len_string] = identifier[len] ( identifier[string] )
identifier[self] . identifier[_pos] = keyword[None]
identifier[tree] = identifier[Node] ( identifier[self] . identifier[_element] , identifier[string] , literal[int] , identifier[self] . identifier[_len_string] )
identifier[node_res] = identifier[Result] (* identifier[self] . identifier[_walk] (
identifier[self] . identifier[_element] ,
literal[int] ,
identifier[tree] . identifier[children] ,
identifier[self] . identifier[_element] ,
keyword[True] ))
identifier[rest] = identifier[self] . identifier[_string] [ identifier[node_res] . identifier[pos] :]. identifier[lstrip] ()
keyword[if] identifier[node_res] . identifier[is_valid] keyword[and] identifier[rest] :
identifier[node_res] . identifier[is_valid] = keyword[False]
keyword[if] keyword[not] identifier[self] . identifier[_expecting] . identifier[required] keyword[and] identifier[rest] :
identifier[self] . identifier[_expecting] . identifier[set_mode_required] ( identifier[node_res] . identifier[pos] , keyword[True] )
identifier[self] . identifier[_expecting] . identifier[update] ( identifier[end_of_statement] , identifier[node_res] . identifier[pos] )
identifier[node_res] . identifier[expecting] = identifier[self] . identifier[_expecting] . identifier[get_expecting] ()
keyword[if] keyword[not] identifier[node_res] . identifier[is_valid] :
identifier[node_res] . identifier[pos] = identifier[self] . identifier[_expecting] . identifier[pos]
identifier[node_res] . identifier[tree] = identifier[tree]
keyword[return] identifier[node_res] | def parse(self, string):
"""Parse some string to the Grammar.
Returns a nodeResult with the following attributes:
- is_valid: True when the string is successfully parsed
by the Grammar.
- pos: position in the string where parsing ended.
(this is the end of the string when is_valid is True)
- expecting: a list containing possible elements at position
'pos' in the string.
- tree: the parse_tree containing a structured
result for the given string.
"""
self._string = string
self._expecting = Expecting()
self._cached_kw_match.clear()
self._len_string = len(string)
self._pos = None
tree = Node(self._element, string, 0, self._len_string)
node_res = Result(*self._walk(self._element, 0, tree.children, self._element, True))
# get rest if anything
rest = self._string[node_res.pos:].lstrip()
# set is_valid to False if we have 'rest' left.
if node_res.is_valid and rest:
node_res.is_valid = False # depends on [control=['if'], data=[]]
# add end_of_statement to expecting if this is possible
if not self._expecting.required and rest:
self._expecting.set_mode_required(node_res.pos, True)
self._expecting.update(end_of_statement, node_res.pos) # depends on [control=['if'], data=[]]
node_res.expecting = self._expecting.get_expecting()
# add expecting and correct pos to node_res if node_res is not valid
if not node_res.is_valid:
node_res.pos = self._expecting.pos # depends on [control=['if'], data=[]]
node_res.tree = tree
return node_res |
def event_types(self):
"""
Raises
------
IndexError
When there is no selected rater
"""
try:
events = self.rater.find('events')
except AttributeError:
raise IndexError('You need to have at least one rater')
return [x.get('type') for x in events] | def function[event_types, parameter[self]]:
constant[
Raises
------
IndexError
When there is no selected rater
]
<ast.Try object at 0x7da1b0d76830>
return[<ast.ListComp object at 0x7da207f03160>] | keyword[def] identifier[event_types] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[events] = identifier[self] . identifier[rater] . identifier[find] ( literal[string] )
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[IndexError] ( literal[string] )
keyword[return] [ identifier[x] . identifier[get] ( literal[string] ) keyword[for] identifier[x] keyword[in] identifier[events] ] | def event_types(self):
"""
Raises
------
IndexError
When there is no selected rater
"""
try:
events = self.rater.find('events') # depends on [control=['try'], data=[]]
except AttributeError:
raise IndexError('You need to have at least one rater') # depends on [control=['except'], data=[]]
return [x.get('type') for x in events] |
def manageInputCopies(filelist, **workinplace):
"""
Creates copies of all input images in a sub-directory.
The copies are made prior to any processing being done to the images at all,
including updating the WCS keywords. If there are already copies present,
they will NOT be overwritten, but instead will be used to over-write the
current working copies.
"""
# Find out what directory is being used for processing
workingdir = os.getcwd()
# Only create sub-directory for copies of inputs, if copies are requested
# Create name of sub-directory for copies
origdir = os.path.join(workingdir,'OrIg_files')
if workinplace['overwrite'] or workinplace['preserve']:
# if sub-directory does not exist yet, create it
if not os.path.exists(origdir):
os.mkdir(origdir)
printMsg = True
# check to see if copies already exist for each file
for fname in filelist:
copymade = False # If a copy is made, no need to restore
copyname = os.path.join(origdir,fname)
short_copyname = os.path.join('OrIg_files',fname)
if workinplace['overwrite']:
print('Forcibly archiving original of: ',fname, 'as ',short_copyname)
# make a copy of the file in the sub-directory
if os.path.exists(copyname): os.chmod(copyname, 438) # octal 666
shutil.copy(fname,copyname)
os.chmod(copyname,292) # octal 444 makes files read-only
if printMsg:
print('\nTurning OFF "preserve" and "restore" actions...\n')
printMsg = False # We only need to print this one time...
copymade = True
if (workinplace['preserve'] and not os.path.exists(copyname)) \
and not workinplace['overwrite']:
# Preserving a copy of the input, but only if not already archived
print('Preserving original of: ',fname, 'as ',short_copyname)
# make a copy of the file in the sub-directory
shutil.copy(fname,copyname)
os.chmod(copyname,292) # octal 444 makes files read-only
copymade = True
if 'restore' in workinplace and not copymade:
if (os.path.exists(copyname) and workinplace['restore']) and not workinplace['overwrite']:
print('Restoring original input for ',fname,' from ',short_copyname)
# replace current files with original version
os.chmod(fname, 438) # octal 666
shutil.copy(copyname, fname)
os.chmod(fname, 438) | def function[manageInputCopies, parameter[filelist]]:
constant[
Creates copies of all input images in a sub-directory.
The copies are made prior to any processing being done to the images at all,
including updating the WCS keywords. If there are already copies present,
they will NOT be overwritten, but instead will be used to over-write the
current working copies.
]
variable[workingdir] assign[=] call[name[os].getcwd, parameter[]]
variable[origdir] assign[=] call[name[os].path.join, parameter[name[workingdir], constant[OrIg_files]]]
if <ast.BoolOp object at 0x7da1b1b5bb80> begin[:]
if <ast.UnaryOp object at 0x7da1b1b5ba00> begin[:]
call[name[os].mkdir, parameter[name[origdir]]]
variable[printMsg] assign[=] constant[True]
for taget[name[fname]] in starred[name[filelist]] begin[:]
variable[copymade] assign[=] constant[False]
variable[copyname] assign[=] call[name[os].path.join, parameter[name[origdir], name[fname]]]
variable[short_copyname] assign[=] call[name[os].path.join, parameter[constant[OrIg_files], name[fname]]]
if call[name[workinplace]][constant[overwrite]] begin[:]
call[name[print], parameter[constant[Forcibly archiving original of: ], name[fname], constant[as ], name[short_copyname]]]
if call[name[os].path.exists, parameter[name[copyname]]] begin[:]
call[name[os].chmod, parameter[name[copyname], constant[438]]]
call[name[shutil].copy, parameter[name[fname], name[copyname]]]
call[name[os].chmod, parameter[name[copyname], constant[292]]]
if name[printMsg] begin[:]
call[name[print], parameter[constant[
Turning OFF "preserve" and "restore" actions...
]]]
variable[printMsg] assign[=] constant[False]
variable[copymade] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b1b5a8c0> begin[:]
call[name[print], parameter[constant[Preserving original of: ], name[fname], constant[as ], name[short_copyname]]]
call[name[shutil].copy, parameter[name[fname], name[copyname]]]
call[name[os].chmod, parameter[name[copyname], constant[292]]]
variable[copymade] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b1b5a110> begin[:]
if <ast.BoolOp object at 0x7da1b1b594b0> begin[:]
call[name[print], parameter[constant[Restoring original input for ], name[fname], constant[ from ], name[short_copyname]]]
call[name[os].chmod, parameter[name[fname], constant[438]]]
call[name[shutil].copy, parameter[name[copyname], name[fname]]]
call[name[os].chmod, parameter[name[fname], constant[438]]] | keyword[def] identifier[manageInputCopies] ( identifier[filelist] ,** identifier[workinplace] ):
literal[string]
identifier[workingdir] = identifier[os] . identifier[getcwd] ()
identifier[origdir] = identifier[os] . identifier[path] . identifier[join] ( identifier[workingdir] , literal[string] )
keyword[if] identifier[workinplace] [ literal[string] ] keyword[or] identifier[workinplace] [ literal[string] ]:
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[origdir] ):
identifier[os] . identifier[mkdir] ( identifier[origdir] )
identifier[printMsg] = keyword[True]
keyword[for] identifier[fname] keyword[in] identifier[filelist] :
identifier[copymade] = keyword[False]
identifier[copyname] = identifier[os] . identifier[path] . identifier[join] ( identifier[origdir] , identifier[fname] )
identifier[short_copyname] = identifier[os] . identifier[path] . identifier[join] ( literal[string] , identifier[fname] )
keyword[if] identifier[workinplace] [ literal[string] ]:
identifier[print] ( literal[string] , identifier[fname] , literal[string] , identifier[short_copyname] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[copyname] ): identifier[os] . identifier[chmod] ( identifier[copyname] , literal[int] )
identifier[shutil] . identifier[copy] ( identifier[fname] , identifier[copyname] )
identifier[os] . identifier[chmod] ( identifier[copyname] , literal[int] )
keyword[if] identifier[printMsg] :
identifier[print] ( literal[string] )
identifier[printMsg] = keyword[False]
identifier[copymade] = keyword[True]
keyword[if] ( identifier[workinplace] [ literal[string] ] keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[copyname] )) keyword[and] keyword[not] identifier[workinplace] [ literal[string] ]:
identifier[print] ( literal[string] , identifier[fname] , literal[string] , identifier[short_copyname] )
identifier[shutil] . identifier[copy] ( identifier[fname] , identifier[copyname] )
identifier[os] . identifier[chmod] ( identifier[copyname] , literal[int] )
identifier[copymade] = keyword[True]
keyword[if] literal[string] keyword[in] identifier[workinplace] keyword[and] keyword[not] identifier[copymade] :
keyword[if] ( identifier[os] . identifier[path] . identifier[exists] ( identifier[copyname] ) keyword[and] identifier[workinplace] [ literal[string] ]) keyword[and] keyword[not] identifier[workinplace] [ literal[string] ]:
identifier[print] ( literal[string] , identifier[fname] , literal[string] , identifier[short_copyname] )
identifier[os] . identifier[chmod] ( identifier[fname] , literal[int] )
identifier[shutil] . identifier[copy] ( identifier[copyname] , identifier[fname] )
identifier[os] . identifier[chmod] ( identifier[fname] , literal[int] ) | def manageInputCopies(filelist, **workinplace):
"""
Creates copies of all input images in a sub-directory.
The copies are made prior to any processing being done to the images at all,
including updating the WCS keywords. If there are already copies present,
they will NOT be overwritten, but instead will be used to over-write the
current working copies.
"""
# Find out what directory is being used for processing
workingdir = os.getcwd()
# Only create sub-directory for copies of inputs, if copies are requested
# Create name of sub-directory for copies
origdir = os.path.join(workingdir, 'OrIg_files')
if workinplace['overwrite'] or workinplace['preserve']:
# if sub-directory does not exist yet, create it
if not os.path.exists(origdir):
os.mkdir(origdir) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
printMsg = True
# check to see if copies already exist for each file
for fname in filelist:
copymade = False # If a copy is made, no need to restore
copyname = os.path.join(origdir, fname)
short_copyname = os.path.join('OrIg_files', fname)
if workinplace['overwrite']:
print('Forcibly archiving original of: ', fname, 'as ', short_copyname)
# make a copy of the file in the sub-directory
if os.path.exists(copyname):
os.chmod(copyname, 438) # octal 666 # depends on [control=['if'], data=[]]
shutil.copy(fname, copyname)
os.chmod(copyname, 292) # octal 444 makes files read-only
if printMsg:
print('\nTurning OFF "preserve" and "restore" actions...\n')
printMsg = False # We only need to print this one time... # depends on [control=['if'], data=[]]
copymade = True # depends on [control=['if'], data=[]]
if (workinplace['preserve'] and (not os.path.exists(copyname))) and (not workinplace['overwrite']):
# Preserving a copy of the input, but only if not already archived
print('Preserving original of: ', fname, 'as ', short_copyname)
# make a copy of the file in the sub-directory
shutil.copy(fname, copyname)
os.chmod(copyname, 292) # octal 444 makes files read-only
copymade = True # depends on [control=['if'], data=[]]
if 'restore' in workinplace and (not copymade):
if (os.path.exists(copyname) and workinplace['restore']) and (not workinplace['overwrite']):
print('Restoring original input for ', fname, ' from ', short_copyname)
# replace current files with original version
os.chmod(fname, 438) # octal 666
shutil.copy(copyname, fname)
os.chmod(fname, 438) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fname']] |
def getVarianceComps(self, univariance=False):
"""
Return the estimated variance components
Args:
univariance: Boolean indicator, if True variance components are normalized to sum up to 1 for each trait
Returns:
variance components of all random effects on all phenotypes [P, n_randEffs matrix]
"""
RV=sp.zeros((self.P,self.n_randEffs))
for term_i in range(self.n_randEffs):
RV[:,term_i] = self.getTraitCovar(term_i).diagonal()
if univariance:
RV /= RV.sum(1)[:,sp.newaxis]
return RV | def function[getVarianceComps, parameter[self, univariance]]:
constant[
Return the estimated variance components
Args:
univariance: Boolean indicator, if True variance components are normalized to sum up to 1 for each trait
Returns:
variance components of all random effects on all phenotypes [P, n_randEffs matrix]
]
variable[RV] assign[=] call[name[sp].zeros, parameter[tuple[[<ast.Attribute object at 0x7da1b0ae1de0>, <ast.Attribute object at 0x7da1b0ae2500>]]]]
for taget[name[term_i]] in starred[call[name[range], parameter[name[self].n_randEffs]]] begin[:]
call[name[RV]][tuple[[<ast.Slice object at 0x7da1b0ae0130>, <ast.Name object at 0x7da1b0ae1ea0>]]] assign[=] call[call[name[self].getTraitCovar, parameter[name[term_i]]].diagonal, parameter[]]
if name[univariance] begin[:]
<ast.AugAssign object at 0x7da1b0ae3160>
return[name[RV]] | keyword[def] identifier[getVarianceComps] ( identifier[self] , identifier[univariance] = keyword[False] ):
literal[string]
identifier[RV] = identifier[sp] . identifier[zeros] (( identifier[self] . identifier[P] , identifier[self] . identifier[n_randEffs] ))
keyword[for] identifier[term_i] keyword[in] identifier[range] ( identifier[self] . identifier[n_randEffs] ):
identifier[RV] [:, identifier[term_i] ]= identifier[self] . identifier[getTraitCovar] ( identifier[term_i] ). identifier[diagonal] ()
keyword[if] identifier[univariance] :
identifier[RV] /= identifier[RV] . identifier[sum] ( literal[int] )[:, identifier[sp] . identifier[newaxis] ]
keyword[return] identifier[RV] | def getVarianceComps(self, univariance=False):
"""
Return the estimated variance components
Args:
univariance: Boolean indicator, if True variance components are normalized to sum up to 1 for each trait
Returns:
variance components of all random effects on all phenotypes [P, n_randEffs matrix]
"""
RV = sp.zeros((self.P, self.n_randEffs))
for term_i in range(self.n_randEffs):
RV[:, term_i] = self.getTraitCovar(term_i).diagonal() # depends on [control=['for'], data=['term_i']]
if univariance:
RV /= RV.sum(1)[:, sp.newaxis] # depends on [control=['if'], data=[]]
return RV |
def parse_simplersp(self, tup_tree):
"""
Parse for SIMPLERSP Element.
::
<!ELEMENT SIMPLERSP (METHODRESPONSE | IMETHODRESPONSE)>
"""
self.check_node(tup_tree, 'SIMPLERSP')
child = self.one_child(tup_tree, ('METHODRESPONSE', 'IMETHODRESPONSE'))
return name(tup_tree), attrs(tup_tree), child | def function[parse_simplersp, parameter[self, tup_tree]]:
constant[
Parse for SIMPLERSP Element.
::
<!ELEMENT SIMPLERSP (METHODRESPONSE | IMETHODRESPONSE)>
]
call[name[self].check_node, parameter[name[tup_tree], constant[SIMPLERSP]]]
variable[child] assign[=] call[name[self].one_child, parameter[name[tup_tree], tuple[[<ast.Constant object at 0x7da18bcca200>, <ast.Constant object at 0x7da18bcc8ca0>]]]]
return[tuple[[<ast.Call object at 0x7da18bccb010>, <ast.Call object at 0x7da18bcc82b0>, <ast.Name object at 0x7da18bcc8f40>]]] | keyword[def] identifier[parse_simplersp] ( identifier[self] , identifier[tup_tree] ):
literal[string]
identifier[self] . identifier[check_node] ( identifier[tup_tree] , literal[string] )
identifier[child] = identifier[self] . identifier[one_child] ( identifier[tup_tree] ,( literal[string] , literal[string] ))
keyword[return] identifier[name] ( identifier[tup_tree] ), identifier[attrs] ( identifier[tup_tree] ), identifier[child] | def parse_simplersp(self, tup_tree):
"""
Parse for SIMPLERSP Element.
::
<!ELEMENT SIMPLERSP (METHODRESPONSE | IMETHODRESPONSE)>
"""
self.check_node(tup_tree, 'SIMPLERSP')
child = self.one_child(tup_tree, ('METHODRESPONSE', 'IMETHODRESPONSE'))
return (name(tup_tree), attrs(tup_tree), child) |
def calculate_connvectivity_radius(self, amount_clusters, maximum_iterations = 100):
"""!
@brief Calculates connectivity radius of allocation specified amount of clusters using ordering diagram and marks borders of clusters using indexes of values of ordering diagram.
@details Parameter 'maximum_iterations' is used to protect from hanging when it is impossible to allocate specified number of clusters.
@param[in] amount_clusters (uint): amount of clusters that should be allocated by calculated connectivity radius.
@param[in] maximum_iterations (uint): maximum number of iteration for searching connectivity radius to allocated specified amount of clusters (by default it is restricted by 100 iterations).
@return (double, list) Value of connectivity radius and borders of clusters like (radius, borders), radius may be 'None' as well as borders may be '[]'
if connectivity radius hasn't been found for the specified amount of iterations.
"""
maximum_distance = max(self.__ordering)
upper_distance = maximum_distance
lower_distance = 0.0
result = None
amount, borders = self.extract_cluster_amount(maximum_distance)
if amount <= amount_clusters:
for _ in range(maximum_iterations):
radius = (lower_distance + upper_distance) / 2.0
amount, borders = self.extract_cluster_amount(radius)
if amount == amount_clusters:
result = radius
break
elif amount == 0:
break
elif amount > amount_clusters:
lower_distance = radius
elif amount < amount_clusters:
upper_distance = radius
return result, borders | def function[calculate_connvectivity_radius, parameter[self, amount_clusters, maximum_iterations]]:
constant[!
@brief Calculates connectivity radius of allocation specified amount of clusters using ordering diagram and marks borders of clusters using indexes of values of ordering diagram.
@details Parameter 'maximum_iterations' is used to protect from hanging when it is impossible to allocate specified number of clusters.
@param[in] amount_clusters (uint): amount of clusters that should be allocated by calculated connectivity radius.
@param[in] maximum_iterations (uint): maximum number of iteration for searching connectivity radius to allocated specified amount of clusters (by default it is restricted by 100 iterations).
@return (double, list) Value of connectivity radius and borders of clusters like (radius, borders), radius may be 'None' as well as borders may be '[]'
if connectivity radius hasn't been found for the specified amount of iterations.
]
variable[maximum_distance] assign[=] call[name[max], parameter[name[self].__ordering]]
variable[upper_distance] assign[=] name[maximum_distance]
variable[lower_distance] assign[=] constant[0.0]
variable[result] assign[=] constant[None]
<ast.Tuple object at 0x7da1b01e2ec0> assign[=] call[name[self].extract_cluster_amount, parameter[name[maximum_distance]]]
if compare[name[amount] less_or_equal[<=] name[amount_clusters]] begin[:]
for taget[name[_]] in starred[call[name[range], parameter[name[maximum_iterations]]]] begin[:]
variable[radius] assign[=] binary_operation[binary_operation[name[lower_distance] + name[upper_distance]] / constant[2.0]]
<ast.Tuple object at 0x7da1b01e21a0> assign[=] call[name[self].extract_cluster_amount, parameter[name[radius]]]
if compare[name[amount] equal[==] name[amount_clusters]] begin[:]
variable[result] assign[=] name[radius]
break
return[tuple[[<ast.Name object at 0x7da1b01e11b0>, <ast.Name object at 0x7da1b01e1120>]]] | keyword[def] identifier[calculate_connvectivity_radius] ( identifier[self] , identifier[amount_clusters] , identifier[maximum_iterations] = literal[int] ):
literal[string]
identifier[maximum_distance] = identifier[max] ( identifier[self] . identifier[__ordering] )
identifier[upper_distance] = identifier[maximum_distance]
identifier[lower_distance] = literal[int]
identifier[result] = keyword[None]
identifier[amount] , identifier[borders] = identifier[self] . identifier[extract_cluster_amount] ( identifier[maximum_distance] )
keyword[if] identifier[amount] <= identifier[amount_clusters] :
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[maximum_iterations] ):
identifier[radius] =( identifier[lower_distance] + identifier[upper_distance] )/ literal[int]
identifier[amount] , identifier[borders] = identifier[self] . identifier[extract_cluster_amount] ( identifier[radius] )
keyword[if] identifier[amount] == identifier[amount_clusters] :
identifier[result] = identifier[radius]
keyword[break]
keyword[elif] identifier[amount] == literal[int] :
keyword[break]
keyword[elif] identifier[amount] > identifier[amount_clusters] :
identifier[lower_distance] = identifier[radius]
keyword[elif] identifier[amount] < identifier[amount_clusters] :
identifier[upper_distance] = identifier[radius]
keyword[return] identifier[result] , identifier[borders] | def calculate_connvectivity_radius(self, amount_clusters, maximum_iterations=100):
"""!
@brief Calculates connectivity radius of allocation specified amount of clusters using ordering diagram and marks borders of clusters using indexes of values of ordering diagram.
@details Parameter 'maximum_iterations' is used to protect from hanging when it is impossible to allocate specified number of clusters.
@param[in] amount_clusters (uint): amount of clusters that should be allocated by calculated connectivity radius.
@param[in] maximum_iterations (uint): maximum number of iteration for searching connectivity radius to allocated specified amount of clusters (by default it is restricted by 100 iterations).
@return (double, list) Value of connectivity radius and borders of clusters like (radius, borders), radius may be 'None' as well as borders may be '[]'
if connectivity radius hasn't been found for the specified amount of iterations.
"""
maximum_distance = max(self.__ordering)
upper_distance = maximum_distance
lower_distance = 0.0
result = None
(amount, borders) = self.extract_cluster_amount(maximum_distance)
if amount <= amount_clusters:
for _ in range(maximum_iterations):
radius = (lower_distance + upper_distance) / 2.0
(amount, borders) = self.extract_cluster_amount(radius)
if amount == amount_clusters:
result = radius
break # depends on [control=['if'], data=[]]
elif amount == 0:
break # depends on [control=['if'], data=[]]
elif amount > amount_clusters:
lower_distance = radius # depends on [control=['if'], data=[]]
elif amount < amount_clusters:
upper_distance = radius # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['amount', 'amount_clusters']]
return (result, borders) |
def _join_summary_file(data, summary_filename="msd_summary_file.h5"):
""" Gets the trackinfo array by joining taste profile to the track summary file """
msd = h5py.File(summary_filename)
# create a lookup table of trackid -> position
track_lookup = dict((t.encode("utf8"), i) for i, t in enumerate(data['track'].cat.categories))
# join on trackid to the summary file to get the artist/album/songname
track_info = np.empty(shape=(len(track_lookup), 4), dtype=np.object)
with tqdm.tqdm(total=len(track_info)) as progress:
for song in msd['metadata']['songs']:
trackid = song[17]
if trackid in track_lookup:
pos = track_lookup[trackid]
track_info[pos] = [x.decode("utf8") for x in (trackid, song[9], song[14], song[18])]
progress.update(1)
return track_info | def function[_join_summary_file, parameter[data, summary_filename]]:
constant[ Gets the trackinfo array by joining taste profile to the track summary file ]
variable[msd] assign[=] call[name[h5py].File, parameter[name[summary_filename]]]
variable[track_lookup] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b2347250>]]
variable[track_info] assign[=] call[name[np].empty, parameter[]]
with call[name[tqdm].tqdm, parameter[]] begin[:]
for taget[name[song]] in starred[call[call[name[msd]][constant[metadata]]][constant[songs]]] begin[:]
variable[trackid] assign[=] call[name[song]][constant[17]]
if compare[name[trackid] in name[track_lookup]] begin[:]
variable[pos] assign[=] call[name[track_lookup]][name[trackid]]
call[name[track_info]][name[pos]] assign[=] <ast.ListComp object at 0x7da1b2346080>
call[name[progress].update, parameter[constant[1]]]
return[name[track_info]] | keyword[def] identifier[_join_summary_file] ( identifier[data] , identifier[summary_filename] = literal[string] ):
literal[string]
identifier[msd] = identifier[h5py] . identifier[File] ( identifier[summary_filename] )
identifier[track_lookup] = identifier[dict] (( identifier[t] . identifier[encode] ( literal[string] ), identifier[i] ) keyword[for] identifier[i] , identifier[t] keyword[in] identifier[enumerate] ( identifier[data] [ literal[string] ]. identifier[cat] . identifier[categories] ))
identifier[track_info] = identifier[np] . identifier[empty] ( identifier[shape] =( identifier[len] ( identifier[track_lookup] ), literal[int] ), identifier[dtype] = identifier[np] . identifier[object] )
keyword[with] identifier[tqdm] . identifier[tqdm] ( identifier[total] = identifier[len] ( identifier[track_info] )) keyword[as] identifier[progress] :
keyword[for] identifier[song] keyword[in] identifier[msd] [ literal[string] ][ literal[string] ]:
identifier[trackid] = identifier[song] [ literal[int] ]
keyword[if] identifier[trackid] keyword[in] identifier[track_lookup] :
identifier[pos] = identifier[track_lookup] [ identifier[trackid] ]
identifier[track_info] [ identifier[pos] ]=[ identifier[x] . identifier[decode] ( literal[string] ) keyword[for] identifier[x] keyword[in] ( identifier[trackid] , identifier[song] [ literal[int] ], identifier[song] [ literal[int] ], identifier[song] [ literal[int] ])]
identifier[progress] . identifier[update] ( literal[int] )
keyword[return] identifier[track_info] | def _join_summary_file(data, summary_filename='msd_summary_file.h5'):
""" Gets the trackinfo array by joining taste profile to the track summary file """
msd = h5py.File(summary_filename)
# create a lookup table of trackid -> position
track_lookup = dict(((t.encode('utf8'), i) for (i, t) in enumerate(data['track'].cat.categories)))
# join on trackid to the summary file to get the artist/album/songname
track_info = np.empty(shape=(len(track_lookup), 4), dtype=np.object)
with tqdm.tqdm(total=len(track_info)) as progress:
for song in msd['metadata']['songs']:
trackid = song[17]
if trackid in track_lookup:
pos = track_lookup[trackid]
track_info[pos] = [x.decode('utf8') for x in (trackid, song[9], song[14], song[18])]
progress.update(1) # depends on [control=['if'], data=['trackid', 'track_lookup']] # depends on [control=['for'], data=['song']] # depends on [control=['with'], data=['progress']]
return track_info |
def calculate(self, T, method):
r'''Method to calculate surface tension of a liquid at temperature `T`
with a given method.
This method has no exception handling; see `T_dependent_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate heat capacity, [K]
method : str
Method name to use
Returns
-------
Cp : float
Calculated heat capacity, [J/mol/K]
'''
if method == TRCIG:
Cp = TRCCp(T, *self.TRCIG_coefs)
elif method == COOLPROP:
Cp = PropsSI('Cp0molar', 'T', T,'P', 101325.0, self.CASRN)
elif method == POLING:
Cp = R*(self.POLING_coefs[0] + self.POLING_coefs[1]*T
+ self.POLING_coefs[2]*T**2 + self.POLING_coefs[3]*T**3
+ self.POLING_coefs[4]*T**4)
elif method == POLING_CONST:
Cp = self.POLING_constant
elif method == CRCSTD:
Cp = self.CRCSTD_constant
elif method == LASTOVKA_SHAW:
Cp = Lastovka_Shaw(T, self.similarity_variable)
Cp = property_mass_to_molar(Cp, self.MW)
elif method in self.tabular_data:
Cp = self.interpolate(T, method)
return Cp | def function[calculate, parameter[self, T, method]]:
constant[Method to calculate surface tension of a liquid at temperature `T`
with a given method.
This method has no exception handling; see `T_dependent_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate heat capacity, [K]
method : str
Method name to use
Returns
-------
Cp : float
Calculated heat capacity, [J/mol/K]
]
if compare[name[method] equal[==] name[TRCIG]] begin[:]
variable[Cp] assign[=] call[name[TRCCp], parameter[name[T], <ast.Starred object at 0x7da20c76db10>]]
return[name[Cp]] | keyword[def] identifier[calculate] ( identifier[self] , identifier[T] , identifier[method] ):
literal[string]
keyword[if] identifier[method] == identifier[TRCIG] :
identifier[Cp] = identifier[TRCCp] ( identifier[T] ,* identifier[self] . identifier[TRCIG_coefs] )
keyword[elif] identifier[method] == identifier[COOLPROP] :
identifier[Cp] = identifier[PropsSI] ( literal[string] , literal[string] , identifier[T] , literal[string] , literal[int] , identifier[self] . identifier[CASRN] )
keyword[elif] identifier[method] == identifier[POLING] :
identifier[Cp] = identifier[R] *( identifier[self] . identifier[POLING_coefs] [ literal[int] ]+ identifier[self] . identifier[POLING_coefs] [ literal[int] ]* identifier[T]
+ identifier[self] . identifier[POLING_coefs] [ literal[int] ]* identifier[T] ** literal[int] + identifier[self] . identifier[POLING_coefs] [ literal[int] ]* identifier[T] ** literal[int]
+ identifier[self] . identifier[POLING_coefs] [ literal[int] ]* identifier[T] ** literal[int] )
keyword[elif] identifier[method] == identifier[POLING_CONST] :
identifier[Cp] = identifier[self] . identifier[POLING_constant]
keyword[elif] identifier[method] == identifier[CRCSTD] :
identifier[Cp] = identifier[self] . identifier[CRCSTD_constant]
keyword[elif] identifier[method] == identifier[LASTOVKA_SHAW] :
identifier[Cp] = identifier[Lastovka_Shaw] ( identifier[T] , identifier[self] . identifier[similarity_variable] )
identifier[Cp] = identifier[property_mass_to_molar] ( identifier[Cp] , identifier[self] . identifier[MW] )
keyword[elif] identifier[method] keyword[in] identifier[self] . identifier[tabular_data] :
identifier[Cp] = identifier[self] . identifier[interpolate] ( identifier[T] , identifier[method] )
keyword[return] identifier[Cp] | def calculate(self, T, method):
"""Method to calculate surface tension of a liquid at temperature `T`
with a given method.
This method has no exception handling; see `T_dependent_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate heat capacity, [K]
method : str
Method name to use
Returns
-------
Cp : float
Calculated heat capacity, [J/mol/K]
"""
if method == TRCIG:
Cp = TRCCp(T, *self.TRCIG_coefs) # depends on [control=['if'], data=[]]
elif method == COOLPROP:
Cp = PropsSI('Cp0molar', 'T', T, 'P', 101325.0, self.CASRN) # depends on [control=['if'], data=[]]
elif method == POLING:
Cp = R * (self.POLING_coefs[0] + self.POLING_coefs[1] * T + self.POLING_coefs[2] * T ** 2 + self.POLING_coefs[3] * T ** 3 + self.POLING_coefs[4] * T ** 4) # depends on [control=['if'], data=[]]
elif method == POLING_CONST:
Cp = self.POLING_constant # depends on [control=['if'], data=[]]
elif method == CRCSTD:
Cp = self.CRCSTD_constant # depends on [control=['if'], data=[]]
elif method == LASTOVKA_SHAW:
Cp = Lastovka_Shaw(T, self.similarity_variable)
Cp = property_mass_to_molar(Cp, self.MW) # depends on [control=['if'], data=[]]
elif method in self.tabular_data:
Cp = self.interpolate(T, method) # depends on [control=['if'], data=['method']]
return Cp |
def show_version(a_device):
"""Execute show version command using Netmiko."""
remote_conn = ConnectHandler(**a_device)
print()
print("#" * 80)
print(remote_conn.send_command("show version"))
print("#" * 80)
print() | def function[show_version, parameter[a_device]]:
constant[Execute show version command using Netmiko.]
variable[remote_conn] assign[=] call[name[ConnectHandler], parameter[]]
call[name[print], parameter[]]
call[name[print], parameter[binary_operation[constant[#] * constant[80]]]]
call[name[print], parameter[call[name[remote_conn].send_command, parameter[constant[show version]]]]]
call[name[print], parameter[binary_operation[constant[#] * constant[80]]]]
call[name[print], parameter[]] | keyword[def] identifier[show_version] ( identifier[a_device] ):
literal[string]
identifier[remote_conn] = identifier[ConnectHandler] (** identifier[a_device] )
identifier[print] ()
identifier[print] ( literal[string] * literal[int] )
identifier[print] ( identifier[remote_conn] . identifier[send_command] ( literal[string] ))
identifier[print] ( literal[string] * literal[int] )
identifier[print] () | def show_version(a_device):
"""Execute show version command using Netmiko."""
remote_conn = ConnectHandler(**a_device)
print()
print('#' * 80)
print(remote_conn.send_command('show version'))
print('#' * 80)
print() |
def commit(self, repository=None, tag=None, **kwargs):
"""
Commit a container to an image. Similar to the ``docker commit``
command.
Args:
repository (str): The repository to push the image to
tag (str): The tag to push
message (str): A commit message
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
`Engine API documentation
<https://docs.docker.com/reference/api/docker_remote_api/>`_
for full details.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.commit(self.id, repository=repository, tag=tag,
**kwargs)
return self.client.images.get(resp['Id']) | def function[commit, parameter[self, repository, tag]]:
constant[
Commit a container to an image. Similar to the ``docker commit``
command.
Args:
repository (str): The repository to push the image to
tag (str): The tag to push
message (str): A commit message
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
`Engine API documentation
<https://docs.docker.com/reference/api/docker_remote_api/>`_
for full details.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
]
variable[resp] assign[=] call[name[self].client.api.commit, parameter[name[self].id]]
return[call[name[self].client.images.get, parameter[call[name[resp]][constant[Id]]]]] | keyword[def] identifier[commit] ( identifier[self] , identifier[repository] = keyword[None] , identifier[tag] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[resp] = identifier[self] . identifier[client] . identifier[api] . identifier[commit] ( identifier[self] . identifier[id] , identifier[repository] = identifier[repository] , identifier[tag] = identifier[tag] ,
** identifier[kwargs] )
keyword[return] identifier[self] . identifier[client] . identifier[images] . identifier[get] ( identifier[resp] [ literal[string] ]) | def commit(self, repository=None, tag=None, **kwargs):
"""
Commit a container to an image. Similar to the ``docker commit``
command.
Args:
repository (str): The repository to push the image to
tag (str): The tag to push
message (str): A commit message
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
`Engine API documentation
<https://docs.docker.com/reference/api/docker_remote_api/>`_
for full details.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.commit(self.id, repository=repository, tag=tag, **kwargs)
return self.client.images.get(resp['Id']) |
def get_first_of_element(element, sub, contype=None):
"""抽取lxml.etree库中elem对象中文字
Args:
element: lxml.etree.Element
sub: str
Returns:
elem中文字
"""
content = element.xpath(sub)
return list_or_empty(content, contype) | def function[get_first_of_element, parameter[element, sub, contype]]:
constant[抽取lxml.etree库中elem对象中文字
Args:
element: lxml.etree.Element
sub: str
Returns:
elem中文字
]
variable[content] assign[=] call[name[element].xpath, parameter[name[sub]]]
return[call[name[list_or_empty], parameter[name[content], name[contype]]]] | keyword[def] identifier[get_first_of_element] ( identifier[element] , identifier[sub] , identifier[contype] = keyword[None] ):
literal[string]
identifier[content] = identifier[element] . identifier[xpath] ( identifier[sub] )
keyword[return] identifier[list_or_empty] ( identifier[content] , identifier[contype] ) | def get_first_of_element(element, sub, contype=None):
"""抽取lxml.etree库中elem对象中文字
Args:
element: lxml.etree.Element
sub: str
Returns:
elem中文字
"""
content = element.xpath(sub)
return list_or_empty(content, contype) |
def inasafe_exposure_summary_field_values(field, feature, parent):
"""Retrieve all values from a field in the exposure summary layer.
"""
_ = feature, parent # NOQA
layer = exposure_summary_layer()
if not layer:
return None
index = layer.fields().lookupField(field)
if index < 0:
return None
values = []
for feat in layer.getFeatures():
value = feat[index]
values.append(value)
return str(values) | def function[inasafe_exposure_summary_field_values, parameter[field, feature, parent]]:
constant[Retrieve all values from a field in the exposure summary layer.
]
variable[_] assign[=] tuple[[<ast.Name object at 0x7da1b0c3ee90>, <ast.Name object at 0x7da1b0c3e140>]]
variable[layer] assign[=] call[name[exposure_summary_layer], parameter[]]
if <ast.UnaryOp object at 0x7da1b0c3ff10> begin[:]
return[constant[None]]
variable[index] assign[=] call[call[name[layer].fields, parameter[]].lookupField, parameter[name[field]]]
if compare[name[index] less[<] constant[0]] begin[:]
return[constant[None]]
variable[values] assign[=] list[[]]
for taget[name[feat]] in starred[call[name[layer].getFeatures, parameter[]]] begin[:]
variable[value] assign[=] call[name[feat]][name[index]]
call[name[values].append, parameter[name[value]]]
return[call[name[str], parameter[name[values]]]] | keyword[def] identifier[inasafe_exposure_summary_field_values] ( identifier[field] , identifier[feature] , identifier[parent] ):
literal[string]
identifier[_] = identifier[feature] , identifier[parent]
identifier[layer] = identifier[exposure_summary_layer] ()
keyword[if] keyword[not] identifier[layer] :
keyword[return] keyword[None]
identifier[index] = identifier[layer] . identifier[fields] (). identifier[lookupField] ( identifier[field] )
keyword[if] identifier[index] < literal[int] :
keyword[return] keyword[None]
identifier[values] =[]
keyword[for] identifier[feat] keyword[in] identifier[layer] . identifier[getFeatures] ():
identifier[value] = identifier[feat] [ identifier[index] ]
identifier[values] . identifier[append] ( identifier[value] )
keyword[return] identifier[str] ( identifier[values] ) | def inasafe_exposure_summary_field_values(field, feature, parent):
"""Retrieve all values from a field in the exposure summary layer.
"""
_ = (feature, parent) # NOQA
layer = exposure_summary_layer()
if not layer:
return None # depends on [control=['if'], data=[]]
index = layer.fields().lookupField(field)
if index < 0:
return None # depends on [control=['if'], data=[]]
values = []
for feat in layer.getFeatures():
value = feat[index]
values.append(value) # depends on [control=['for'], data=['feat']]
return str(values) |
def _common_parser():
"""Returns a parser with common command-line options for all the scripts
in the fortpy suite.
"""
import argparse
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-examples", action="store_true",
help="See detailed help and examples for this script.")
parser.add_argument("-verbose", action="store_true",
help="See verbose output as the script runs.")
parser.add_argument('-action', nargs=1, choices=['save','print'], default='print',
help="Specify what to do with the output (print or save)")
parser.add_argument("-debug", action="store_true",
help="Print verbose calculation information for debugging.")
return parser | def function[_common_parser, parameter[]]:
constant[Returns a parser with common command-line options for all the scripts
in the fortpy suite.
]
import module[argparse]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[-examples]]]
call[name[parser].add_argument, parameter[constant[-verbose]]]
call[name[parser].add_argument, parameter[constant[-action]]]
call[name[parser].add_argument, parameter[constant[-debug]]]
return[name[parser]] | keyword[def] identifier[_common_parser] ():
literal[string]
keyword[import] identifier[argparse]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[add_help] = keyword[False] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[int] , identifier[choices] =[ literal[string] , literal[string] ], identifier[default] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
keyword[return] identifier[parser] | def _common_parser():
"""Returns a parser with common command-line options for all the scripts
in the fortpy suite.
"""
import argparse
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-examples', action='store_true', help='See detailed help and examples for this script.')
parser.add_argument('-verbose', action='store_true', help='See verbose output as the script runs.')
parser.add_argument('-action', nargs=1, choices=['save', 'print'], default='print', help='Specify what to do with the output (print or save)')
parser.add_argument('-debug', action='store_true', help='Print verbose calculation information for debugging.')
return parser |
def accept(self, message=None, expires_at=None):
"""Accept request."""
with db.session.begin_nested():
if self.status != RequestStatus.PENDING:
raise InvalidRequestStateError(RequestStatus.PENDING)
self.status = RequestStatus.ACCEPTED
request_accepted.send(self, message=message, expires_at=expires_at) | def function[accept, parameter[self, message, expires_at]]:
constant[Accept request.]
with call[name[db].session.begin_nested, parameter[]] begin[:]
if compare[name[self].status not_equal[!=] name[RequestStatus].PENDING] begin[:]
<ast.Raise object at 0x7da2044c1d50>
name[self].status assign[=] name[RequestStatus].ACCEPTED
call[name[request_accepted].send, parameter[name[self]]] | keyword[def] identifier[accept] ( identifier[self] , identifier[message] = keyword[None] , identifier[expires_at] = keyword[None] ):
literal[string]
keyword[with] identifier[db] . identifier[session] . identifier[begin_nested] ():
keyword[if] identifier[self] . identifier[status] != identifier[RequestStatus] . identifier[PENDING] :
keyword[raise] identifier[InvalidRequestStateError] ( identifier[RequestStatus] . identifier[PENDING] )
identifier[self] . identifier[status] = identifier[RequestStatus] . identifier[ACCEPTED]
identifier[request_accepted] . identifier[send] ( identifier[self] , identifier[message] = identifier[message] , identifier[expires_at] = identifier[expires_at] ) | def accept(self, message=None, expires_at=None):
"""Accept request."""
with db.session.begin_nested():
if self.status != RequestStatus.PENDING:
raise InvalidRequestStateError(RequestStatus.PENDING) # depends on [control=['if'], data=[]]
self.status = RequestStatus.ACCEPTED # depends on [control=['with'], data=[]]
request_accepted.send(self, message=message, expires_at=expires_at) |
def fn_minimum_argcount(callable):
"""Returns the minimum number of arguments that must be provided for the call to succeed."""
fn = get_fn(callable)
available_argcount = fn_available_argcount(callable)
try:
return available_argcount - len(fn.__defaults__)
except TypeError:
return available_argcount | def function[fn_minimum_argcount, parameter[callable]]:
constant[Returns the minimum number of arguments that must be provided for the call to succeed.]
variable[fn] assign[=] call[name[get_fn], parameter[name[callable]]]
variable[available_argcount] assign[=] call[name[fn_available_argcount], parameter[name[callable]]]
<ast.Try object at 0x7da18f813550> | keyword[def] identifier[fn_minimum_argcount] ( identifier[callable] ):
literal[string]
identifier[fn] = identifier[get_fn] ( identifier[callable] )
identifier[available_argcount] = identifier[fn_available_argcount] ( identifier[callable] )
keyword[try] :
keyword[return] identifier[available_argcount] - identifier[len] ( identifier[fn] . identifier[__defaults__] )
keyword[except] identifier[TypeError] :
keyword[return] identifier[available_argcount] | def fn_minimum_argcount(callable):
"""Returns the minimum number of arguments that must be provided for the call to succeed."""
fn = get_fn(callable)
available_argcount = fn_available_argcount(callable)
try:
return available_argcount - len(fn.__defaults__) # depends on [control=['try'], data=[]]
except TypeError:
return available_argcount # depends on [control=['except'], data=[]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.