code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def handle_fork(self):
"""
Forks happen. Here we handle them.
"""
self.reset()
self.sensor.handle_fork()
instana.singletons.tracer.handle_fork() | def function[handle_fork, parameter[self]]:
constant[
Forks happen. Here we handle them.
]
call[name[self].reset, parameter[]]
call[name[self].sensor.handle_fork, parameter[]]
call[name[instana].singletons.tracer.handle_fork, parameter[]] | keyword[def] identifier[handle_fork] ( identifier[self] ):
literal[string]
identifier[self] . identifier[reset] ()
identifier[self] . identifier[sensor] . identifier[handle_fork] ()
identifier[instana] . identifier[singletons] . identifier[tracer] . identifier[handle_fork] () | def handle_fork(self):
"""
Forks happen. Here we handle them.
"""
self.reset()
self.sensor.handle_fork()
instana.singletons.tracer.handle_fork() |
def connectionLost(self, reason=None):
"""Callback handler from twisted when a connection was lost."""
try:
self.connected = False
self.stop_block_loop()
self.stop_peerinfo_loop()
self.stop_header_loop()
self.ReleaseBlockRequests()
self.leader.RemoveConnectedPeer(self)
time_expired = self.time_expired(HEARTBEAT_BLOCKS)
# some NEO-cli versions have a 30s timeout to receive block/consensus or tx messages. By default neo-python doesn't respond to these requests
if time_expired > 20:
self.address.last_connection = Address.Now()
self.leader.AddDeadAddress(self.address, reason=f"{self.prefix} Premature disconnect")
if reason and reason.check(twisted_error.ConnectionDone):
# this might happen if they close our connection because they've reached max peers or something similar
logger.debug(f"{self.prefix} disconnected normally with reason:{reason.value}")
self._check_for_consecutive_disconnects("connection done")
elif reason and reason.check(twisted_error.ConnectionLost):
# Can be due to a timeout. Only if this happened again within 5 minutes do we label the node as bad
# because then it clearly doesn't want to talk to us or we have a bad connection to them.
# Otherwise allow for the node to be queued again by NodeLeader.
logger.debug(f"{self.prefix} disconnected with connectionlost reason: {reason.value}")
self._check_for_consecutive_disconnects("connection lost")
else:
logger.debug(f"{self.prefix} disconnected with reason: {reason.value}")
except Exception as e:
logger.error("Error with connection lost: %s " % e)
def try_me(err):
err.check(error.ConnectionAborted)
if self.disconnect_deferred:
d, self.disconnect_deferred = self.disconnect_deferred, None # type: defer.Deferred
d.addErrback(try_me)
if len(d.callbacks) > 0:
d.callback(reason)
else:
print("connLost, disconnect_deferred cancelling!")
d.cancel() | def function[connectionLost, parameter[self, reason]]:
constant[Callback handler from twisted when a connection was lost.]
<ast.Try object at 0x7da1b1ec1930>
def function[try_me, parameter[err]]:
call[name[err].check, parameter[name[error].ConnectionAborted]]
if name[self].disconnect_deferred begin[:]
<ast.Tuple object at 0x7da20e9b0a00> assign[=] tuple[[<ast.Attribute object at 0x7da20e9b0910>, <ast.Constant object at 0x7da20e9b15a0>]]
call[name[d].addErrback, parameter[name[try_me]]]
if compare[call[name[len], parameter[name[d].callbacks]] greater[>] constant[0]] begin[:]
call[name[d].callback, parameter[name[reason]]] | keyword[def] identifier[connectionLost] ( identifier[self] , identifier[reason] = keyword[None] ):
literal[string]
keyword[try] :
identifier[self] . identifier[connected] = keyword[False]
identifier[self] . identifier[stop_block_loop] ()
identifier[self] . identifier[stop_peerinfo_loop] ()
identifier[self] . identifier[stop_header_loop] ()
identifier[self] . identifier[ReleaseBlockRequests] ()
identifier[self] . identifier[leader] . identifier[RemoveConnectedPeer] ( identifier[self] )
identifier[time_expired] = identifier[self] . identifier[time_expired] ( identifier[HEARTBEAT_BLOCKS] )
keyword[if] identifier[time_expired] > literal[int] :
identifier[self] . identifier[address] . identifier[last_connection] = identifier[Address] . identifier[Now] ()
identifier[self] . identifier[leader] . identifier[AddDeadAddress] ( identifier[self] . identifier[address] , identifier[reason] = literal[string] )
keyword[if] identifier[reason] keyword[and] identifier[reason] . identifier[check] ( identifier[twisted_error] . identifier[ConnectionDone] ):
identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_check_for_consecutive_disconnects] ( literal[string] )
keyword[elif] identifier[reason] keyword[and] identifier[reason] . identifier[check] ( identifier[twisted_error] . identifier[ConnectionLost] ):
identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_check_for_consecutive_disconnects] ( literal[string] )
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( literal[string] % identifier[e] )
keyword[def] identifier[try_me] ( identifier[err] ):
identifier[err] . identifier[check] ( identifier[error] . identifier[ConnectionAborted] )
keyword[if] identifier[self] . identifier[disconnect_deferred] :
identifier[d] , identifier[self] . identifier[disconnect_deferred] = identifier[self] . identifier[disconnect_deferred] , keyword[None]
identifier[d] . identifier[addErrback] ( identifier[try_me] )
keyword[if] identifier[len] ( identifier[d] . identifier[callbacks] )> literal[int] :
identifier[d] . identifier[callback] ( identifier[reason] )
keyword[else] :
identifier[print] ( literal[string] )
identifier[d] . identifier[cancel] () | def connectionLost(self, reason=None):
"""Callback handler from twisted when a connection was lost."""
try:
self.connected = False
self.stop_block_loop()
self.stop_peerinfo_loop()
self.stop_header_loop()
self.ReleaseBlockRequests()
self.leader.RemoveConnectedPeer(self)
time_expired = self.time_expired(HEARTBEAT_BLOCKS)
# some NEO-cli versions have a 30s timeout to receive block/consensus or tx messages. By default neo-python doesn't respond to these requests
if time_expired > 20:
self.address.last_connection = Address.Now()
self.leader.AddDeadAddress(self.address, reason=f'{self.prefix} Premature disconnect') # depends on [control=['if'], data=[]]
if reason and reason.check(twisted_error.ConnectionDone):
# this might happen if they close our connection because they've reached max peers or something similar
logger.debug(f'{self.prefix} disconnected normally with reason:{reason.value}')
self._check_for_consecutive_disconnects('connection done') # depends on [control=['if'], data=[]]
elif reason and reason.check(twisted_error.ConnectionLost):
# Can be due to a timeout. Only if this happened again within 5 minutes do we label the node as bad
# because then it clearly doesn't want to talk to us or we have a bad connection to them.
# Otherwise allow for the node to be queued again by NodeLeader.
logger.debug(f'{self.prefix} disconnected with connectionlost reason: {reason.value}')
self._check_for_consecutive_disconnects('connection lost') # depends on [control=['if'], data=[]]
else:
logger.debug(f'{self.prefix} disconnected with reason: {reason.value}') # depends on [control=['try'], data=[]]
except Exception as e:
logger.error('Error with connection lost: %s ' % e) # depends on [control=['except'], data=['e']]
def try_me(err):
err.check(error.ConnectionAborted)
if self.disconnect_deferred:
(d, self.disconnect_deferred) = (self.disconnect_deferred, None) # type: defer.Deferred
d.addErrback(try_me)
if len(d.callbacks) > 0:
d.callback(reason) # depends on [control=['if'], data=[]]
else:
print('connLost, disconnect_deferred cancelling!')
d.cancel() # depends on [control=['if'], data=[]] |
def _close(self, args):
"""
request a channel close
This method indicates that the sender wants to close the
channel. This may be due to internal conditions (e.g. a forced
shut-down) or due to an error handling a specific method, i.e.
an exception. When a close is due to an exception, the sender
provides the class and method id of the method which caused
the exception.
RULE:
After sending this method any received method except
Channel.Close-OK MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with Channel.Close-OK..
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method.
"""
reply_code = args.read_short()
reply_text = args.read_shortstr()
class_id = args.read_short()
method_id = args.read_short()
# self.close_ok()
# def close_ok(self):
# """
# confirm a channel close
#
# This method confirms a Channel.Close method and tells the
# recipient that it is safe to release resources for the channel
# and close the socket.
#
# RULE:
#
# A peer that detects a socket closure without having
# received a Channel.Close-Ok handshake method SHOULD log
# the error.
#
# """
self._send_method((20, 41))
self._do_close()
raise AMQPChannelException(reply_code, reply_text,
(class_id, method_id)) | def function[_close, parameter[self, args]]:
constant[
request a channel close
This method indicates that the sender wants to close the
channel. This may be due to internal conditions (e.g. a forced
shut-down) or due to an error handling a specific method, i.e.
an exception. When a close is due to an exception, the sender
provides the class and method id of the method which caused
the exception.
RULE:
After sending this method any received method except
Channel.Close-OK MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with Channel.Close-OK..
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method.
]
variable[reply_code] assign[=] call[name[args].read_short, parameter[]]
variable[reply_text] assign[=] call[name[args].read_shortstr, parameter[]]
variable[class_id] assign[=] call[name[args].read_short, parameter[]]
variable[method_id] assign[=] call[name[args].read_short, parameter[]]
call[name[self]._send_method, parameter[tuple[[<ast.Constant object at 0x7da20c992d40>, <ast.Constant object at 0x7da20c991ae0>]]]]
call[name[self]._do_close, parameter[]]
<ast.Raise object at 0x7da20c990dc0> | keyword[def] identifier[_close] ( identifier[self] , identifier[args] ):
literal[string]
identifier[reply_code] = identifier[args] . identifier[read_short] ()
identifier[reply_text] = identifier[args] . identifier[read_shortstr] ()
identifier[class_id] = identifier[args] . identifier[read_short] ()
identifier[method_id] = identifier[args] . identifier[read_short] ()
identifier[self] . identifier[_send_method] (( literal[int] , literal[int] ))
identifier[self] . identifier[_do_close] ()
keyword[raise] identifier[AMQPChannelException] ( identifier[reply_code] , identifier[reply_text] ,
( identifier[class_id] , identifier[method_id] )) | def _close(self, args):
"""
request a channel close
This method indicates that the sender wants to close the
channel. This may be due to internal conditions (e.g. a forced
shut-down) or due to an error handling a specific method, i.e.
an exception. When a close is due to an exception, the sender
provides the class and method id of the method which caused
the exception.
RULE:
After sending this method any received method except
Channel.Close-OK MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with Channel.Close-OK..
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method.
"""
reply_code = args.read_short()
reply_text = args.read_shortstr()
class_id = args.read_short()
method_id = args.read_short()
# self.close_ok()
# def close_ok(self):
# """
# confirm a channel close
#
# This method confirms a Channel.Close method and tells the
# recipient that it is safe to release resources for the channel
# and close the socket.
#
# RULE:
#
# A peer that detects a socket closure without having
# received a Channel.Close-Ok handshake method SHOULD log
# the error.
#
# """
self._send_method((20, 41))
self._do_close()
raise AMQPChannelException(reply_code, reply_text, (class_id, method_id)) |
def parse_events(content, start=None, end=None, default_span=timedelta(days=7)):
"""
Query the events occurring in a given time range.
:param content: iCal URL/file content as String
:param start: start date for search, default today
:param end: end date for search
:param default_span: default query length (one week)
:return: events as list
"""
if not start:
start = now()
if not end:
end = start + default_span
if not content:
raise ValueError('Content is invalid!')
calendar = Calendar.from_ical(content)
# Find the calendar's timezone info, or use UTC
for c in calendar.walk():
if c.name == 'VTIMEZONE':
cal_tz = gettz(str(c['TZID']))
break;
else:
cal_tz = UTC
start = normalize(start, cal_tz)
end = normalize(end, cal_tz)
found = []
for component in calendar.walk():
if component.name == "VEVENT":
e = create_event(component)
if e.recurring:
# Unfold recurring events according to their rrule
rule = parse_rrule(component, cal_tz)
dur = e.end - e.start
found.extend(e.copy_to(dt) for dt in rule.between(start - dur, end, inc=True))
elif e.end >= start and e.start <= end:
found.append(e)
return found | def function[parse_events, parameter[content, start, end, default_span]]:
constant[
Query the events occurring in a given time range.
:param content: iCal URL/file content as String
:param start: start date for search, default today
:param end: end date for search
:param default_span: default query length (one week)
:return: events as list
]
if <ast.UnaryOp object at 0x7da18f58e500> begin[:]
variable[start] assign[=] call[name[now], parameter[]]
if <ast.UnaryOp object at 0x7da18f58fd30> begin[:]
variable[end] assign[=] binary_operation[name[start] + name[default_span]]
if <ast.UnaryOp object at 0x7da18f58c880> begin[:]
<ast.Raise object at 0x7da18f58d0c0>
variable[calendar] assign[=] call[name[Calendar].from_ical, parameter[name[content]]]
for taget[name[c]] in starred[call[name[calendar].walk, parameter[]]] begin[:]
if compare[name[c].name equal[==] constant[VTIMEZONE]] begin[:]
variable[cal_tz] assign[=] call[name[gettz], parameter[call[name[str], parameter[call[name[c]][constant[TZID]]]]]]
break
variable[start] assign[=] call[name[normalize], parameter[name[start], name[cal_tz]]]
variable[end] assign[=] call[name[normalize], parameter[name[end], name[cal_tz]]]
variable[found] assign[=] list[[]]
for taget[name[component]] in starred[call[name[calendar].walk, parameter[]]] begin[:]
if compare[name[component].name equal[==] constant[VEVENT]] begin[:]
variable[e] assign[=] call[name[create_event], parameter[name[component]]]
if name[e].recurring begin[:]
variable[rule] assign[=] call[name[parse_rrule], parameter[name[component], name[cal_tz]]]
variable[dur] assign[=] binary_operation[name[e].end - name[e].start]
call[name[found].extend, parameter[<ast.GeneratorExp object at 0x7da1b07f9900>]]
return[name[found]] | keyword[def] identifier[parse_events] ( identifier[content] , identifier[start] = keyword[None] , identifier[end] = keyword[None] , identifier[default_span] = identifier[timedelta] ( identifier[days] = literal[int] )):
literal[string]
keyword[if] keyword[not] identifier[start] :
identifier[start] = identifier[now] ()
keyword[if] keyword[not] identifier[end] :
identifier[end] = identifier[start] + identifier[default_span]
keyword[if] keyword[not] identifier[content] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[calendar] = identifier[Calendar] . identifier[from_ical] ( identifier[content] )
keyword[for] identifier[c] keyword[in] identifier[calendar] . identifier[walk] ():
keyword[if] identifier[c] . identifier[name] == literal[string] :
identifier[cal_tz] = identifier[gettz] ( identifier[str] ( identifier[c] [ literal[string] ]))
keyword[break] ;
keyword[else] :
identifier[cal_tz] = identifier[UTC]
identifier[start] = identifier[normalize] ( identifier[start] , identifier[cal_tz] )
identifier[end] = identifier[normalize] ( identifier[end] , identifier[cal_tz] )
identifier[found] =[]
keyword[for] identifier[component] keyword[in] identifier[calendar] . identifier[walk] ():
keyword[if] identifier[component] . identifier[name] == literal[string] :
identifier[e] = identifier[create_event] ( identifier[component] )
keyword[if] identifier[e] . identifier[recurring] :
identifier[rule] = identifier[parse_rrule] ( identifier[component] , identifier[cal_tz] )
identifier[dur] = identifier[e] . identifier[end] - identifier[e] . identifier[start]
identifier[found] . identifier[extend] ( identifier[e] . identifier[copy_to] ( identifier[dt] ) keyword[for] identifier[dt] keyword[in] identifier[rule] . identifier[between] ( identifier[start] - identifier[dur] , identifier[end] , identifier[inc] = keyword[True] ))
keyword[elif] identifier[e] . identifier[end] >= identifier[start] keyword[and] identifier[e] . identifier[start] <= identifier[end] :
identifier[found] . identifier[append] ( identifier[e] )
keyword[return] identifier[found] | def parse_events(content, start=None, end=None, default_span=timedelta(days=7)):
"""
Query the events occurring in a given time range.
:param content: iCal URL/file content as String
:param start: start date for search, default today
:param end: end date for search
:param default_span: default query length (one week)
:return: events as list
"""
if not start:
start = now() # depends on [control=['if'], data=[]]
if not end:
end = start + default_span # depends on [control=['if'], data=[]]
if not content:
raise ValueError('Content is invalid!') # depends on [control=['if'], data=[]]
calendar = Calendar.from_ical(content)
# Find the calendar's timezone info, or use UTC
for c in calendar.walk():
if c.name == 'VTIMEZONE':
cal_tz = gettz(str(c['TZID']))
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']]
else:
cal_tz = UTC
start = normalize(start, cal_tz)
end = normalize(end, cal_tz)
found = []
for component in calendar.walk():
if component.name == 'VEVENT':
e = create_event(component)
if e.recurring:
# Unfold recurring events according to their rrule
rule = parse_rrule(component, cal_tz)
dur = e.end - e.start
found.extend((e.copy_to(dt) for dt in rule.between(start - dur, end, inc=True))) # depends on [control=['if'], data=[]]
elif e.end >= start and e.start <= end:
found.append(e) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['component']]
return found |
def _store(self, uid, content, data=None):
"""Store the given dict of content at uid. Nothing returned."""
doc = dict(uid=uid)
if data:
gfs = gridfs.GridFS(self.db)
id = gfs.put(data, encoding='utf-8')
doc.update(data_id=id)
doc.update(content)
self.db.pastes.insert_one(doc) | def function[_store, parameter[self, uid, content, data]]:
constant[Store the given dict of content at uid. Nothing returned.]
variable[doc] assign[=] call[name[dict], parameter[]]
if name[data] begin[:]
variable[gfs] assign[=] call[name[gridfs].GridFS, parameter[name[self].db]]
variable[id] assign[=] call[name[gfs].put, parameter[name[data]]]
call[name[doc].update, parameter[]]
call[name[doc].update, parameter[name[content]]]
call[name[self].db.pastes.insert_one, parameter[name[doc]]] | keyword[def] identifier[_store] ( identifier[self] , identifier[uid] , identifier[content] , identifier[data] = keyword[None] ):
literal[string]
identifier[doc] = identifier[dict] ( identifier[uid] = identifier[uid] )
keyword[if] identifier[data] :
identifier[gfs] = identifier[gridfs] . identifier[GridFS] ( identifier[self] . identifier[db] )
identifier[id] = identifier[gfs] . identifier[put] ( identifier[data] , identifier[encoding] = literal[string] )
identifier[doc] . identifier[update] ( identifier[data_id] = identifier[id] )
identifier[doc] . identifier[update] ( identifier[content] )
identifier[self] . identifier[db] . identifier[pastes] . identifier[insert_one] ( identifier[doc] ) | def _store(self, uid, content, data=None):
"""Store the given dict of content at uid. Nothing returned."""
doc = dict(uid=uid)
if data:
gfs = gridfs.GridFS(self.db)
id = gfs.put(data, encoding='utf-8')
doc.update(data_id=id) # depends on [control=['if'], data=[]]
doc.update(content)
self.db.pastes.insert_one(doc) |
def __get_max_min_time_2(table, terms, exact):
"""
Search for either Age or Year to calculate the max, min, and time unit for this table/file.
Preference: Look for Age first, and then Year second (if needed)
:param dict table: Table data
:param list terms: age, yearbp, yrbp, or year, yr
:param bool exact: Look for exact key match, or no
:return bool: found age or year info
"""
vals = []
unit = ""
try:
for k, v in table["columns"].items():
if exact:
if k.lower() in terms:
try:
vals = v["values"]
unit = v["units"]
break
except KeyError:
pass
elif not exact:
for term in terms:
if term in k:
try:
vals = v["values"]
unit = v["units"]
break
except KeyError:
pass
except Exception as e:
logger_lpd_noaa.debug("get_max_min_time_3: {}".format(e))
return vals, unit | def function[__get_max_min_time_2, parameter[table, terms, exact]]:
constant[
Search for either Age or Year to calculate the max, min, and time unit for this table/file.
Preference: Look for Age first, and then Year second (if needed)
:param dict table: Table data
:param list terms: age, yearbp, yrbp, or year, yr
:param bool exact: Look for exact key match, or no
:return bool: found age or year info
]
variable[vals] assign[=] list[[]]
variable[unit] assign[=] constant[]
<ast.Try object at 0x7da18f09f100>
return[tuple[[<ast.Name object at 0x7da18c4cc5e0>, <ast.Name object at 0x7da18c4cea40>]]] | keyword[def] identifier[__get_max_min_time_2] ( identifier[table] , identifier[terms] , identifier[exact] ):
literal[string]
identifier[vals] =[]
identifier[unit] = literal[string]
keyword[try] :
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[table] [ literal[string] ]. identifier[items] ():
keyword[if] identifier[exact] :
keyword[if] identifier[k] . identifier[lower] () keyword[in] identifier[terms] :
keyword[try] :
identifier[vals] = identifier[v] [ literal[string] ]
identifier[unit] = identifier[v] [ literal[string] ]
keyword[break]
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[elif] keyword[not] identifier[exact] :
keyword[for] identifier[term] keyword[in] identifier[terms] :
keyword[if] identifier[term] keyword[in] identifier[k] :
keyword[try] :
identifier[vals] = identifier[v] [ literal[string] ]
identifier[unit] = identifier[v] [ literal[string] ]
keyword[break]
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger_lpd_noaa] . identifier[debug] ( literal[string] . identifier[format] ( identifier[e] ))
keyword[return] identifier[vals] , identifier[unit] | def __get_max_min_time_2(table, terms, exact):
"""
Search for either Age or Year to calculate the max, min, and time unit for this table/file.
Preference: Look for Age first, and then Year second (if needed)
:param dict table: Table data
:param list terms: age, yearbp, yrbp, or year, yr
:param bool exact: Look for exact key match, or no
:return bool: found age or year info
"""
vals = []
unit = ''
try:
for (k, v) in table['columns'].items():
if exact:
if k.lower() in terms:
try:
vals = v['values']
unit = v['units']
break # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif not exact:
for term in terms:
if term in k:
try:
vals = v['values']
unit = v['units']
break # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['term']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
logger_lpd_noaa.debug('get_max_min_time_3: {}'.format(e)) # depends on [control=['except'], data=['e']]
return (vals, unit) |
def gaussian_overlapping_coefficient(means_0, stds_0, means_1, stds_1, lower=None, upper=None):
"""Compute the overlapping coefficient of two Gaussian continuous_distributions.
This computes the :math:`\int_{-\infty}^{\infty}{\min(f(x), g(x))\partial x}` where
:math:`f \sim \mathcal{N}(\mu_0, \sigma_0^{2})` and :math:`f \sim \mathcal{N}(\mu_1, \sigma_1^{2})` are normally
distributed variables.
This will compute the overlap for each element in the first dimension.
Args:
means_0 (ndarray): the set of means of the first distribution
stds_0 (ndarray): the set of stds of the fist distribution
means_1 (ndarray): the set of means of the second distribution
stds_1 (ndarray): the set of stds of the second distribution
lower (float): the lower limit of the integration. If not set we set it to -inf.
upper (float): the upper limit of the integration. If not set we set it to +inf.
"""
if lower is None:
lower = -np.inf
if upper is None:
upper = np.inf
def point_iterator():
for ind in range(means_0.shape[0]):
yield np.squeeze(means_0[ind]), np.squeeze(stds_0[ind]), np.squeeze(means_1[ind]), np.squeeze(stds_1[ind])
return np.array(list(multiprocess_mapping(_ComputeGaussianOverlap(lower, upper), point_iterator()))) | def function[gaussian_overlapping_coefficient, parameter[means_0, stds_0, means_1, stds_1, lower, upper]]:
constant[Compute the overlapping coefficient of two Gaussian continuous_distributions.
This computes the :math:`\int_{-\infty}^{\infty}{\min(f(x), g(x))\partial x}` where
:math:`f \sim \mathcal{N}(\mu_0, \sigma_0^{2})` and :math:`f \sim \mathcal{N}(\mu_1, \sigma_1^{2})` are normally
distributed variables.
This will compute the overlap for each element in the first dimension.
Args:
means_0 (ndarray): the set of means of the first distribution
stds_0 (ndarray): the set of stds of the fist distribution
means_1 (ndarray): the set of means of the second distribution
stds_1 (ndarray): the set of stds of the second distribution
lower (float): the lower limit of the integration. If not set we set it to -inf.
upper (float): the upper limit of the integration. If not set we set it to +inf.
]
if compare[name[lower] is constant[None]] begin[:]
variable[lower] assign[=] <ast.UnaryOp object at 0x7da1b0403b80>
if compare[name[upper] is constant[None]] begin[:]
variable[upper] assign[=] name[np].inf
def function[point_iterator, parameter[]]:
for taget[name[ind]] in starred[call[name[range], parameter[call[name[means_0].shape][constant[0]]]]] begin[:]
<ast.Yield object at 0x7da1b0400940>
return[call[name[np].array, parameter[call[name[list], parameter[call[name[multiprocess_mapping], parameter[call[name[_ComputeGaussianOverlap], parameter[name[lower], name[upper]]], call[name[point_iterator], parameter[]]]]]]]]] | keyword[def] identifier[gaussian_overlapping_coefficient] ( identifier[means_0] , identifier[stds_0] , identifier[means_1] , identifier[stds_1] , identifier[lower] = keyword[None] , identifier[upper] = keyword[None] ):
literal[string]
keyword[if] identifier[lower] keyword[is] keyword[None] :
identifier[lower] =- identifier[np] . identifier[inf]
keyword[if] identifier[upper] keyword[is] keyword[None] :
identifier[upper] = identifier[np] . identifier[inf]
keyword[def] identifier[point_iterator] ():
keyword[for] identifier[ind] keyword[in] identifier[range] ( identifier[means_0] . identifier[shape] [ literal[int] ]):
keyword[yield] identifier[np] . identifier[squeeze] ( identifier[means_0] [ identifier[ind] ]), identifier[np] . identifier[squeeze] ( identifier[stds_0] [ identifier[ind] ]), identifier[np] . identifier[squeeze] ( identifier[means_1] [ identifier[ind] ]), identifier[np] . identifier[squeeze] ( identifier[stds_1] [ identifier[ind] ])
keyword[return] identifier[np] . identifier[array] ( identifier[list] ( identifier[multiprocess_mapping] ( identifier[_ComputeGaussianOverlap] ( identifier[lower] , identifier[upper] ), identifier[point_iterator] ()))) | def gaussian_overlapping_coefficient(means_0, stds_0, means_1, stds_1, lower=None, upper=None):
"""Compute the overlapping coefficient of two Gaussian continuous_distributions.
This computes the :math:`\\int_{-\\infty}^{\\infty}{\\min(f(x), g(x))\\partial x}` where
:math:`f \\sim \\mathcal{N}(\\mu_0, \\sigma_0^{2})` and :math:`f \\sim \\mathcal{N}(\\mu_1, \\sigma_1^{2})` are normally
distributed variables.
This will compute the overlap for each element in the first dimension.
Args:
means_0 (ndarray): the set of means of the first distribution
stds_0 (ndarray): the set of stds of the fist distribution
means_1 (ndarray): the set of means of the second distribution
stds_1 (ndarray): the set of stds of the second distribution
lower (float): the lower limit of the integration. If not set we set it to -inf.
upper (float): the upper limit of the integration. If not set we set it to +inf.
"""
if lower is None:
lower = -np.inf # depends on [control=['if'], data=['lower']]
if upper is None:
upper = np.inf # depends on [control=['if'], data=['upper']]
def point_iterator():
for ind in range(means_0.shape[0]):
yield (np.squeeze(means_0[ind]), np.squeeze(stds_0[ind]), np.squeeze(means_1[ind]), np.squeeze(stds_1[ind])) # depends on [control=['for'], data=['ind']]
return np.array(list(multiprocess_mapping(_ComputeGaussianOverlap(lower, upper), point_iterator()))) |
def finalizeOp(self, ops, account, permission, **kwargs):
""" This method obtains the required private keys if present in
the wallet, finalizes the transaction, signs it and
broadacasts it
:param operation ops: The operation (or list of operaions) to
broadcast
:param operation account: The account that authorizes the
operation
:param string permission: The required permission for
signing (active, owner, posting)
:param object append_to: This allows to provide an instance of
ProposalsBuilder (see :func:`new_proposal`) or
TransactionBuilder (see :func:`new_tx()`) to specify
where to put a specific operation.
... note:: ``append_to`` is exposed to every method used in the
this class
... note::
If ``ops`` is a list of operation, they all need to be
signable by the same key! Thus, you cannot combine ops
that require active permission with ops that require
posting permission. Neither can you use different
accounts for different operations!
... note:: This uses ``txbuffer`` as instance of
:class:`transactionbuilder.TransactionBuilder`.
You may want to use your own txbuffer
"""
if "append_to" in kwargs and kwargs["append_to"]:
if self.proposer:
log.warning(
"You may not use append_to and self.proposer at "
"the same time. Append new_proposal(..) instead"
)
# Append to the append_to and return
append_to = kwargs["append_to"]
parent = append_to.get_parent()
assert isinstance(
append_to, (self.transactionbuilder_class, self.proposalbuilder_class)
)
append_to.appendOps(ops)
# Add the signer to the buffer so we sign the tx properly
if isinstance(append_to, self.proposalbuilder_class):
parent.appendSigner(append_to.proposer, permission)
else:
parent.appendSigner(account, permission)
# This returns as we used append_to, it does NOT broadcast, or sign
return append_to.get_parent()
elif self.proposer:
# Legacy proposer mode!
proposal = self.proposal()
proposal.set_proposer(self.proposer)
proposal.set_expiration(self.proposal_expiration)
proposal.set_review(self.proposal_review)
proposal.appendOps(ops)
# Go forward to see what the other options do ...
else:
# Append tot he default buffer
self.txbuffer.appendOps(ops)
# The API that obtains the fee only allows to specify one particular
# fee asset for all operations in that transaction even though the
# blockchain itself could allow to pay multiple operations with
# different fee assets.
if "fee_asset" in kwargs and kwargs["fee_asset"]:
self.txbuffer.set_fee_asset(kwargs["fee_asset"])
# Add signing information, signer, sign and optionally broadcast
if self.unsigned:
# In case we don't want to sign anything
self.txbuffer.addSigningInformation(account, permission)
return self.txbuffer
elif self.bundle:
# In case we want to add more ops to the tx (bundle)
self.txbuffer.appendSigner(account, permission)
return self.txbuffer.json()
else:
# default behavior: sign + broadcast
self.txbuffer.appendSigner(account, permission)
self.txbuffer.sign()
return self.txbuffer.broadcast() | def function[finalizeOp, parameter[self, ops, account, permission]]:
constant[ This method obtains the required private keys if present in
the wallet, finalizes the transaction, signs it and
broadacasts it
:param operation ops: The operation (or list of operaions) to
broadcast
:param operation account: The account that authorizes the
operation
:param string permission: The required permission for
signing (active, owner, posting)
:param object append_to: This allows to provide an instance of
ProposalsBuilder (see :func:`new_proposal`) or
TransactionBuilder (see :func:`new_tx()`) to specify
where to put a specific operation.
... note:: ``append_to`` is exposed to every method used in the
this class
... note::
If ``ops`` is a list of operation, they all need to be
signable by the same key! Thus, you cannot combine ops
that require active permission with ops that require
posting permission. Neither can you use different
accounts for different operations!
... note:: This uses ``txbuffer`` as instance of
:class:`transactionbuilder.TransactionBuilder`.
You may want to use your own txbuffer
]
if <ast.BoolOp object at 0x7da1b0060220> begin[:]
if name[self].proposer begin[:]
call[name[log].warning, parameter[constant[You may not use append_to and self.proposer at the same time. Append new_proposal(..) instead]]]
variable[append_to] assign[=] call[name[kwargs]][constant[append_to]]
variable[parent] assign[=] call[name[append_to].get_parent, parameter[]]
assert[call[name[isinstance], parameter[name[append_to], tuple[[<ast.Attribute object at 0x7da1b0062680>, <ast.Attribute object at 0x7da1b0063220>]]]]]
call[name[append_to].appendOps, parameter[name[ops]]]
if call[name[isinstance], parameter[name[append_to], name[self].proposalbuilder_class]] begin[:]
call[name[parent].appendSigner, parameter[name[append_to].proposer, name[permission]]]
return[call[name[append_to].get_parent, parameter[]]]
if <ast.BoolOp object at 0x7da1b00639a0> begin[:]
call[name[self].txbuffer.set_fee_asset, parameter[call[name[kwargs]][constant[fee_asset]]]]
if name[self].unsigned begin[:]
call[name[self].txbuffer.addSigningInformation, parameter[name[account], name[permission]]]
return[name[self].txbuffer] | keyword[def] identifier[finalizeOp] ( identifier[self] , identifier[ops] , identifier[account] , identifier[permission] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[and] identifier[kwargs] [ literal[string] ]:
keyword[if] identifier[self] . identifier[proposer] :
identifier[log] . identifier[warning] (
literal[string]
literal[string]
)
identifier[append_to] = identifier[kwargs] [ literal[string] ]
identifier[parent] = identifier[append_to] . identifier[get_parent] ()
keyword[assert] identifier[isinstance] (
identifier[append_to] ,( identifier[self] . identifier[transactionbuilder_class] , identifier[self] . identifier[proposalbuilder_class] )
)
identifier[append_to] . identifier[appendOps] ( identifier[ops] )
keyword[if] identifier[isinstance] ( identifier[append_to] , identifier[self] . identifier[proposalbuilder_class] ):
identifier[parent] . identifier[appendSigner] ( identifier[append_to] . identifier[proposer] , identifier[permission] )
keyword[else] :
identifier[parent] . identifier[appendSigner] ( identifier[account] , identifier[permission] )
keyword[return] identifier[append_to] . identifier[get_parent] ()
keyword[elif] identifier[self] . identifier[proposer] :
identifier[proposal] = identifier[self] . identifier[proposal] ()
identifier[proposal] . identifier[set_proposer] ( identifier[self] . identifier[proposer] )
identifier[proposal] . identifier[set_expiration] ( identifier[self] . identifier[proposal_expiration] )
identifier[proposal] . identifier[set_review] ( identifier[self] . identifier[proposal_review] )
identifier[proposal] . identifier[appendOps] ( identifier[ops] )
keyword[else] :
identifier[self] . identifier[txbuffer] . identifier[appendOps] ( identifier[ops] )
keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[and] identifier[kwargs] [ literal[string] ]:
identifier[self] . identifier[txbuffer] . identifier[set_fee_asset] ( identifier[kwargs] [ literal[string] ])
keyword[if] identifier[self] . identifier[unsigned] :
identifier[self] . identifier[txbuffer] . identifier[addSigningInformation] ( identifier[account] , identifier[permission] )
keyword[return] identifier[self] . identifier[txbuffer]
keyword[elif] identifier[self] . identifier[bundle] :
identifier[self] . identifier[txbuffer] . identifier[appendSigner] ( identifier[account] , identifier[permission] )
keyword[return] identifier[self] . identifier[txbuffer] . identifier[json] ()
keyword[else] :
identifier[self] . identifier[txbuffer] . identifier[appendSigner] ( identifier[account] , identifier[permission] )
identifier[self] . identifier[txbuffer] . identifier[sign] ()
keyword[return] identifier[self] . identifier[txbuffer] . identifier[broadcast] () | def finalizeOp(self, ops, account, permission, **kwargs):
""" This method obtains the required private keys if present in
the wallet, finalizes the transaction, signs it and
broadacasts it
:param operation ops: The operation (or list of operaions) to
broadcast
:param operation account: The account that authorizes the
operation
:param string permission: The required permission for
signing (active, owner, posting)
:param object append_to: This allows to provide an instance of
ProposalsBuilder (see :func:`new_proposal`) or
TransactionBuilder (see :func:`new_tx()`) to specify
where to put a specific operation.
... note:: ``append_to`` is exposed to every method used in the
this class
... note::
If ``ops`` is a list of operation, they all need to be
signable by the same key! Thus, you cannot combine ops
that require active permission with ops that require
posting permission. Neither can you use different
accounts for different operations!
... note:: This uses ``txbuffer`` as instance of
:class:`transactionbuilder.TransactionBuilder`.
You may want to use your own txbuffer
"""
if 'append_to' in kwargs and kwargs['append_to']:
if self.proposer:
log.warning('You may not use append_to and self.proposer at the same time. Append new_proposal(..) instead') # depends on [control=['if'], data=[]]
# Append to the append_to and return
append_to = kwargs['append_to']
parent = append_to.get_parent()
assert isinstance(append_to, (self.transactionbuilder_class, self.proposalbuilder_class))
append_to.appendOps(ops)
# Add the signer to the buffer so we sign the tx properly
if isinstance(append_to, self.proposalbuilder_class):
parent.appendSigner(append_to.proposer, permission) # depends on [control=['if'], data=[]]
else:
parent.appendSigner(account, permission)
# This returns as we used append_to, it does NOT broadcast, or sign
return append_to.get_parent() # depends on [control=['if'], data=[]]
elif self.proposer:
# Legacy proposer mode!
proposal = self.proposal()
proposal.set_proposer(self.proposer)
proposal.set_expiration(self.proposal_expiration)
proposal.set_review(self.proposal_review)
proposal.appendOps(ops) # depends on [control=['if'], data=[]]
else:
# Go forward to see what the other options do ...
# Append tot he default buffer
self.txbuffer.appendOps(ops)
# The API that obtains the fee only allows to specify one particular
# fee asset for all operations in that transaction even though the
# blockchain itself could allow to pay multiple operations with
# different fee assets.
if 'fee_asset' in kwargs and kwargs['fee_asset']:
self.txbuffer.set_fee_asset(kwargs['fee_asset']) # depends on [control=['if'], data=[]]
# Add signing information, signer, sign and optionally broadcast
if self.unsigned:
# In case we don't want to sign anything
self.txbuffer.addSigningInformation(account, permission)
return self.txbuffer # depends on [control=['if'], data=[]]
elif self.bundle:
# In case we want to add more ops to the tx (bundle)
self.txbuffer.appendSigner(account, permission)
return self.txbuffer.json() # depends on [control=['if'], data=[]]
else:
# default behavior: sign + broadcast
self.txbuffer.appendSigner(account, permission)
self.txbuffer.sign()
return self.txbuffer.broadcast() |
def plot_ts(fignum, dates, ts):
"""
plot the geomagnetic polarity time scale
Parameters
__________
fignum : matplotlib figure number
dates : bounding dates for plot
ts : time scale ck95, gts04, or gts12
"""
vertical_plot_init(fignum, 10, 3)
TS, Chrons = pmag.get_ts(ts)
p = 1
X, Y = [], []
for d in TS:
if d <= dates[1]:
if d >= dates[0]:
if len(X) == 0:
ind = TS.index(d)
X.append(TS[ind - 1])
Y.append(p % 2)
X.append(d)
Y.append(p % 2)
p += 1
X.append(d)
Y.append(p % 2)
else:
X.append(dates[1])
Y.append(p % 2)
plt.plot(X, Y, 'k')
plot_vs(fignum, dates, 'w', '-')
plot_hs(fignum, [1.1, -.1], 'w', '-')
plt.xlabel("Age (Ma): " + ts)
isign = -1
for c in Chrons:
off = -.1
isign = -1 * isign
if isign > 0:
off = 1.05
if c[1] >= X[0] and c[1] < X[-1]:
plt.text(c[1] - .2, off, c[0])
return | def function[plot_ts, parameter[fignum, dates, ts]]:
constant[
plot the geomagnetic polarity time scale
Parameters
__________
fignum : matplotlib figure number
dates : bounding dates for plot
ts : time scale ck95, gts04, or gts12
]
call[name[vertical_plot_init], parameter[name[fignum], constant[10], constant[3]]]
<ast.Tuple object at 0x7da20c992ce0> assign[=] call[name[pmag].get_ts, parameter[name[ts]]]
variable[p] assign[=] constant[1]
<ast.Tuple object at 0x7da20c992e90> assign[=] tuple[[<ast.List object at 0x7da20c9925c0>, <ast.List object at 0x7da20c9911b0>]]
for taget[name[d]] in starred[name[TS]] begin[:]
if compare[name[d] less_or_equal[<=] call[name[dates]][constant[1]]] begin[:]
if compare[name[d] greater_or_equal[>=] call[name[dates]][constant[0]]] begin[:]
if compare[call[name[len], parameter[name[X]]] equal[==] constant[0]] begin[:]
variable[ind] assign[=] call[name[TS].index, parameter[name[d]]]
call[name[X].append, parameter[call[name[TS]][binary_operation[name[ind] - constant[1]]]]]
call[name[Y].append, parameter[binary_operation[name[p] <ast.Mod object at 0x7da2590d6920> constant[2]]]]
call[name[X].append, parameter[name[d]]]
call[name[Y].append, parameter[binary_operation[name[p] <ast.Mod object at 0x7da2590d6920> constant[2]]]]
<ast.AugAssign object at 0x7da20c993280>
call[name[X].append, parameter[name[d]]]
call[name[Y].append, parameter[binary_operation[name[p] <ast.Mod object at 0x7da2590d6920> constant[2]]]] | keyword[def] identifier[plot_ts] ( identifier[fignum] , identifier[dates] , identifier[ts] ):
literal[string]
identifier[vertical_plot_init] ( identifier[fignum] , literal[int] , literal[int] )
identifier[TS] , identifier[Chrons] = identifier[pmag] . identifier[get_ts] ( identifier[ts] )
identifier[p] = literal[int]
identifier[X] , identifier[Y] =[],[]
keyword[for] identifier[d] keyword[in] identifier[TS] :
keyword[if] identifier[d] <= identifier[dates] [ literal[int] ]:
keyword[if] identifier[d] >= identifier[dates] [ literal[int] ]:
keyword[if] identifier[len] ( identifier[X] )== literal[int] :
identifier[ind] = identifier[TS] . identifier[index] ( identifier[d] )
identifier[X] . identifier[append] ( identifier[TS] [ identifier[ind] - literal[int] ])
identifier[Y] . identifier[append] ( identifier[p] % literal[int] )
identifier[X] . identifier[append] ( identifier[d] )
identifier[Y] . identifier[append] ( identifier[p] % literal[int] )
identifier[p] += literal[int]
identifier[X] . identifier[append] ( identifier[d] )
identifier[Y] . identifier[append] ( identifier[p] % literal[int] )
keyword[else] :
identifier[X] . identifier[append] ( identifier[dates] [ literal[int] ])
identifier[Y] . identifier[append] ( identifier[p] % literal[int] )
identifier[plt] . identifier[plot] ( identifier[X] , identifier[Y] , literal[string] )
identifier[plot_vs] ( identifier[fignum] , identifier[dates] , literal[string] , literal[string] )
identifier[plot_hs] ( identifier[fignum] ,[ literal[int] ,- literal[int] ], literal[string] , literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] + identifier[ts] )
identifier[isign] =- literal[int]
keyword[for] identifier[c] keyword[in] identifier[Chrons] :
identifier[off] =- literal[int]
identifier[isign] =- literal[int] * identifier[isign]
keyword[if] identifier[isign] > literal[int] :
identifier[off] = literal[int]
keyword[if] identifier[c] [ literal[int] ]>= identifier[X] [ literal[int] ] keyword[and] identifier[c] [ literal[int] ]< identifier[X] [- literal[int] ]:
identifier[plt] . identifier[text] ( identifier[c] [ literal[int] ]- literal[int] , identifier[off] , identifier[c] [ literal[int] ])
keyword[return] | def plot_ts(fignum, dates, ts):
"""
plot the geomagnetic polarity time scale
Parameters
__________
fignum : matplotlib figure number
dates : bounding dates for plot
ts : time scale ck95, gts04, or gts12
"""
vertical_plot_init(fignum, 10, 3)
(TS, Chrons) = pmag.get_ts(ts)
p = 1
(X, Y) = ([], [])
for d in TS:
if d <= dates[1]:
if d >= dates[0]:
if len(X) == 0:
ind = TS.index(d)
X.append(TS[ind - 1])
Y.append(p % 2) # depends on [control=['if'], data=[]]
X.append(d)
Y.append(p % 2)
p += 1
X.append(d)
Y.append(p % 2) # depends on [control=['if'], data=['d']] # depends on [control=['if'], data=['d']]
else:
X.append(dates[1])
Y.append(p % 2)
plt.plot(X, Y, 'k')
plot_vs(fignum, dates, 'w', '-')
plot_hs(fignum, [1.1, -0.1], 'w', '-')
plt.xlabel('Age (Ma): ' + ts)
isign = -1
for c in Chrons:
off = -0.1
isign = -1 * isign
if isign > 0:
off = 1.05 # depends on [control=['if'], data=[]]
if c[1] >= X[0] and c[1] < X[-1]:
plt.text(c[1] - 0.2, off, c[0]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']]
return # depends on [control=['for'], data=['d']] |
def fix_indentation(code, new_indents):
"""Change the indentation of `code` to `new_indents`"""
min_indents = find_minimum_indents(code)
return indent_lines(code, new_indents - min_indents) | def function[fix_indentation, parameter[code, new_indents]]:
constant[Change the indentation of `code` to `new_indents`]
variable[min_indents] assign[=] call[name[find_minimum_indents], parameter[name[code]]]
return[call[name[indent_lines], parameter[name[code], binary_operation[name[new_indents] - name[min_indents]]]]] | keyword[def] identifier[fix_indentation] ( identifier[code] , identifier[new_indents] ):
literal[string]
identifier[min_indents] = identifier[find_minimum_indents] ( identifier[code] )
keyword[return] identifier[indent_lines] ( identifier[code] , identifier[new_indents] - identifier[min_indents] ) | def fix_indentation(code, new_indents):
"""Change the indentation of `code` to `new_indents`"""
min_indents = find_minimum_indents(code)
return indent_lines(code, new_indents - min_indents) |
def get_child_vaults(self, vault_id):
"""Gets the children of the given vault.
arg: vault_id (osid.id.Id): the ``Id`` to query
return: (osid.authorization.VaultList) - the children of the
vault
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_child_bins
if self._catalog_session is not None:
return self._catalog_session.get_child_catalogs(catalog_id=vault_id)
return VaultLookupSession(
self._proxy,
self._runtime).get_vaults_by_ids(
list(self.get_child_vault_ids(vault_id))) | def function[get_child_vaults, parameter[self, vault_id]]:
constant[Gets the children of the given vault.
arg: vault_id (osid.id.Id): the ``Id`` to query
return: (osid.authorization.VaultList) - the children of the
vault
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
if compare[name[self]._catalog_session is_not constant[None]] begin[:]
return[call[name[self]._catalog_session.get_child_catalogs, parameter[]]]
return[call[call[name[VaultLookupSession], parameter[name[self]._proxy, name[self]._runtime]].get_vaults_by_ids, parameter[call[name[list], parameter[call[name[self].get_child_vault_ids, parameter[name[vault_id]]]]]]]] | keyword[def] identifier[get_child_vaults] ( identifier[self] , identifier[vault_id] ):
literal[string]
keyword[if] identifier[self] . identifier[_catalog_session] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_catalog_session] . identifier[get_child_catalogs] ( identifier[catalog_id] = identifier[vault_id] )
keyword[return] identifier[VaultLookupSession] (
identifier[self] . identifier[_proxy] ,
identifier[self] . identifier[_runtime] ). identifier[get_vaults_by_ids] (
identifier[list] ( identifier[self] . identifier[get_child_vault_ids] ( identifier[vault_id] ))) | def get_child_vaults(self, vault_id):
"""Gets the children of the given vault.
arg: vault_id (osid.id.Id): the ``Id`` to query
return: (osid.authorization.VaultList) - the children of the
vault
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_child_bins
if self._catalog_session is not None:
return self._catalog_session.get_child_catalogs(catalog_id=vault_id) # depends on [control=['if'], data=[]]
return VaultLookupSession(self._proxy, self._runtime).get_vaults_by_ids(list(self.get_child_vault_ids(vault_id))) |
def close(self):
"""Close the client. This includes closing the Session
and CBS authentication layer as well as the Connection.
If the client was opened using an external Connection,
this will be left intact.
No further messages can be sent or received and the client
cannot be re-opened.
All pending, unsent messages will remain uncleared to allow
them to be inspected and queued to a new client.
"""
if self.message_handler:
self.message_handler.destroy()
self.message_handler = None
self._shutdown = True
if self._keep_alive_thread:
self._keep_alive_thread.join()
self._keep_alive_thread = None
if not self._session:
return # already closed.
if not self._connection.cbs:
_logger.debug("Closing non-CBS session.")
self._session.destroy()
else:
_logger.debug("CBS session pending.")
self._session = None
if not self._ext_connection:
_logger.debug("Closing exclusive connection.")
self._connection.destroy()
else:
_logger.debug("Shared connection remaining open.")
self._connection = None | def function[close, parameter[self]]:
constant[Close the client. This includes closing the Session
and CBS authentication layer as well as the Connection.
If the client was opened using an external Connection,
this will be left intact.
No further messages can be sent or received and the client
cannot be re-opened.
All pending, unsent messages will remain uncleared to allow
them to be inspected and queued to a new client.
]
if name[self].message_handler begin[:]
call[name[self].message_handler.destroy, parameter[]]
name[self].message_handler assign[=] constant[None]
name[self]._shutdown assign[=] constant[True]
if name[self]._keep_alive_thread begin[:]
call[name[self]._keep_alive_thread.join, parameter[]]
name[self]._keep_alive_thread assign[=] constant[None]
if <ast.UnaryOp object at 0x7da20c7ca8c0> begin[:]
return[None]
if <ast.UnaryOp object at 0x7da20c7c8c70> begin[:]
call[name[_logger].debug, parameter[constant[Closing non-CBS session.]]]
call[name[self]._session.destroy, parameter[]]
name[self]._session assign[=] constant[None]
if <ast.UnaryOp object at 0x7da20c7cb6a0> begin[:]
call[name[_logger].debug, parameter[constant[Closing exclusive connection.]]]
call[name[self]._connection.destroy, parameter[]]
name[self]._connection assign[=] constant[None] | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[message_handler] :
identifier[self] . identifier[message_handler] . identifier[destroy] ()
identifier[self] . identifier[message_handler] = keyword[None]
identifier[self] . identifier[_shutdown] = keyword[True]
keyword[if] identifier[self] . identifier[_keep_alive_thread] :
identifier[self] . identifier[_keep_alive_thread] . identifier[join] ()
identifier[self] . identifier[_keep_alive_thread] = keyword[None]
keyword[if] keyword[not] identifier[self] . identifier[_session] :
keyword[return]
keyword[if] keyword[not] identifier[self] . identifier[_connection] . identifier[cbs] :
identifier[_logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_session] . identifier[destroy] ()
keyword[else] :
identifier[_logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_session] = keyword[None]
keyword[if] keyword[not] identifier[self] . identifier[_ext_connection] :
identifier[_logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_connection] . identifier[destroy] ()
keyword[else] :
identifier[_logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_connection] = keyword[None] | def close(self):
"""Close the client. This includes closing the Session
and CBS authentication layer as well as the Connection.
If the client was opened using an external Connection,
this will be left intact.
No further messages can be sent or received and the client
cannot be re-opened.
All pending, unsent messages will remain uncleared to allow
them to be inspected and queued to a new client.
"""
if self.message_handler:
self.message_handler.destroy()
self.message_handler = None # depends on [control=['if'], data=[]]
self._shutdown = True
if self._keep_alive_thread:
self._keep_alive_thread.join()
self._keep_alive_thread = None # depends on [control=['if'], data=[]]
if not self._session:
return # already closed. # depends on [control=['if'], data=[]]
if not self._connection.cbs:
_logger.debug('Closing non-CBS session.')
self._session.destroy() # depends on [control=['if'], data=[]]
else:
_logger.debug('CBS session pending.')
self._session = None
if not self._ext_connection:
_logger.debug('Closing exclusive connection.')
self._connection.destroy() # depends on [control=['if'], data=[]]
else:
_logger.debug('Shared connection remaining open.')
self._connection = None |
def print_tb(tb, limit=None, file=None):
"""Print up to 'limit' stack trace entries from the traceback 'tb'.
If 'limit' is omitted or None, all entries are printed. If 'file'
is omitted or None, the output goes to sys.stderr; otherwise
'file' should be an open file or file-like object with a write()
method.
"""
if file is None:
file = sys.stderr
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
_print(file,
' File "%s", line %d, in %s' % (filename, lineno, name))
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: _print(file, ' ' + line.strip())
tb = tb.tb_next
n = n+1 | def function[print_tb, parameter[tb, limit, file]]:
constant[Print up to 'limit' stack trace entries from the traceback 'tb'.
If 'limit' is omitted or None, all entries are printed. If 'file'
is omitted or None, the output goes to sys.stderr; otherwise
'file' should be an open file or file-like object with a write()
method.
]
if compare[name[file] is constant[None]] begin[:]
variable[file] assign[=] name[sys].stderr
if compare[name[limit] is constant[None]] begin[:]
if call[name[hasattr], parameter[name[sys], constant[tracebacklimit]]] begin[:]
variable[limit] assign[=] name[sys].tracebacklimit
variable[n] assign[=] constant[0]
while <ast.BoolOp object at 0x7da18bccbfd0> begin[:]
variable[f] assign[=] name[tb].tb_frame
variable[lineno] assign[=] name[tb].tb_lineno
variable[co] assign[=] name[f].f_code
variable[filename] assign[=] name[co].co_filename
variable[name] assign[=] name[co].co_name
call[name[_print], parameter[name[file], binary_operation[constant[ File "%s", line %d, in %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18bcc8760>, <ast.Name object at 0x7da18bccbcd0>, <ast.Name object at 0x7da18bcc9090>]]]]]
call[name[linecache].checkcache, parameter[name[filename]]]
variable[line] assign[=] call[name[linecache].getline, parameter[name[filename], name[lineno], name[f].f_globals]]
if name[line] begin[:]
call[name[_print], parameter[name[file], binary_operation[constant[ ] + call[name[line].strip, parameter[]]]]]
variable[tb] assign[=] name[tb].tb_next
variable[n] assign[=] binary_operation[name[n] + constant[1]] | keyword[def] identifier[print_tb] ( identifier[tb] , identifier[limit] = keyword[None] , identifier[file] = keyword[None] ):
literal[string]
keyword[if] identifier[file] keyword[is] keyword[None] :
identifier[file] = identifier[sys] . identifier[stderr]
keyword[if] identifier[limit] keyword[is] keyword[None] :
keyword[if] identifier[hasattr] ( identifier[sys] , literal[string] ):
identifier[limit] = identifier[sys] . identifier[tracebacklimit]
identifier[n] = literal[int]
keyword[while] identifier[tb] keyword[is] keyword[not] keyword[None] keyword[and] ( identifier[limit] keyword[is] keyword[None] keyword[or] identifier[n] < identifier[limit] ):
identifier[f] = identifier[tb] . identifier[tb_frame]
identifier[lineno] = identifier[tb] . identifier[tb_lineno]
identifier[co] = identifier[f] . identifier[f_code]
identifier[filename] = identifier[co] . identifier[co_filename]
identifier[name] = identifier[co] . identifier[co_name]
identifier[_print] ( identifier[file] ,
literal[string] %( identifier[filename] , identifier[lineno] , identifier[name] ))
identifier[linecache] . identifier[checkcache] ( identifier[filename] )
identifier[line] = identifier[linecache] . identifier[getline] ( identifier[filename] , identifier[lineno] , identifier[f] . identifier[f_globals] )
keyword[if] identifier[line] : identifier[_print] ( identifier[file] , literal[string] + identifier[line] . identifier[strip] ())
identifier[tb] = identifier[tb] . identifier[tb_next]
identifier[n] = identifier[n] + literal[int] | def print_tb(tb, limit=None, file=None):
"""Print up to 'limit' stack trace entries from the traceback 'tb'.
If 'limit' is omitted or None, all entries are printed. If 'file'
is omitted or None, the output goes to sys.stderr; otherwise
'file' should be an open file or file-like object with a write()
method.
"""
if file is None:
file = sys.stderr # depends on [control=['if'], data=['file']]
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['limit']]
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
_print(file, ' File "%s", line %d, in %s' % (filename, lineno, name))
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line:
_print(file, ' ' + line.strip()) # depends on [control=['if'], data=[]]
tb = tb.tb_next
n = n + 1 # depends on [control=['while'], data=[]] |
def version(path):
"""Obtain the packge version from a python file e.g. pkg/__init__.py
See <https://packaging.python.org/en/latest/single_source_version.html>.
"""
version_file = read(path)
version_match = re.search(r"""^__version__ = ['"]([^'"]*)['"]""",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.") | def function[version, parameter[path]]:
constant[Obtain the packge version from a python file e.g. pkg/__init__.py
See <https://packaging.python.org/en/latest/single_source_version.html>.
]
variable[version_file] assign[=] call[name[read], parameter[name[path]]]
variable[version_match] assign[=] call[name[re].search, parameter[constant[^__version__ = ['"]([^'"]*)['"]], name[version_file], name[re].M]]
if name[version_match] begin[:]
return[call[name[version_match].group, parameter[constant[1]]]]
<ast.Raise object at 0x7da204961d80> | keyword[def] identifier[version] ( identifier[path] ):
literal[string]
identifier[version_file] = identifier[read] ( identifier[path] )
identifier[version_match] = identifier[re] . identifier[search] ( literal[string] ,
identifier[version_file] , identifier[re] . identifier[M] )
keyword[if] identifier[version_match] :
keyword[return] identifier[version_match] . identifier[group] ( literal[int] )
keyword[raise] identifier[RuntimeError] ( literal[string] ) | def version(path):
"""Obtain the packge version from a python file e.g. pkg/__init__.py
See <https://packaging.python.org/en/latest/single_source_version.html>.
"""
version_file = read(path)
version_match = re.search('^__version__ = [\'"]([^\'"]*)[\'"]', version_file, re.M)
if version_match:
return version_match.group(1) # depends on [control=['if'], data=[]]
raise RuntimeError('Unable to find version string.') |
def _check_for_encoding(b):
"""You can use a different encoding from UTF-8 by putting a specially-formatted
comment as the first or second line of the source code."""
eol = b.find(b'\n')
if eol < 0:
return _check_line_for_encoding(b)[0]
enc, again = _check_line_for_encoding(b[:eol])
if enc or not again:
return enc
eol2 = b.find(b'\n', eol + 1)
if eol2 < 0:
return _check_line_for_encoding(b[eol + 1:])[0]
return _check_line_for_encoding(b[eol + 1:eol2])[0] | def function[_check_for_encoding, parameter[b]]:
constant[You can use a different encoding from UTF-8 by putting a specially-formatted
comment as the first or second line of the source code.]
variable[eol] assign[=] call[name[b].find, parameter[constant[b'\n']]]
if compare[name[eol] less[<] constant[0]] begin[:]
return[call[call[name[_check_line_for_encoding], parameter[name[b]]]][constant[0]]]
<ast.Tuple object at 0x7da204963340> assign[=] call[name[_check_line_for_encoding], parameter[call[name[b]][<ast.Slice object at 0x7da18bc72830>]]]
if <ast.BoolOp object at 0x7da18bc70e50> begin[:]
return[name[enc]]
variable[eol2] assign[=] call[name[b].find, parameter[constant[b'\n'], binary_operation[name[eol] + constant[1]]]]
if compare[name[eol2] less[<] constant[0]] begin[:]
return[call[call[name[_check_line_for_encoding], parameter[call[name[b]][<ast.Slice object at 0x7da18bc71d20>]]]][constant[0]]]
return[call[call[name[_check_line_for_encoding], parameter[call[name[b]][<ast.Slice object at 0x7da18bc72380>]]]][constant[0]]] | keyword[def] identifier[_check_for_encoding] ( identifier[b] ):
literal[string]
identifier[eol] = identifier[b] . identifier[find] ( literal[string] )
keyword[if] identifier[eol] < literal[int] :
keyword[return] identifier[_check_line_for_encoding] ( identifier[b] )[ literal[int] ]
identifier[enc] , identifier[again] = identifier[_check_line_for_encoding] ( identifier[b] [: identifier[eol] ])
keyword[if] identifier[enc] keyword[or] keyword[not] identifier[again] :
keyword[return] identifier[enc]
identifier[eol2] = identifier[b] . identifier[find] ( literal[string] , identifier[eol] + literal[int] )
keyword[if] identifier[eol2] < literal[int] :
keyword[return] identifier[_check_line_for_encoding] ( identifier[b] [ identifier[eol] + literal[int] :])[ literal[int] ]
keyword[return] identifier[_check_line_for_encoding] ( identifier[b] [ identifier[eol] + literal[int] : identifier[eol2] ])[ literal[int] ] | def _check_for_encoding(b):
"""You can use a different encoding from UTF-8 by putting a specially-formatted
comment as the first or second line of the source code."""
eol = b.find(b'\n')
if eol < 0:
return _check_line_for_encoding(b)[0] # depends on [control=['if'], data=[]]
(enc, again) = _check_line_for_encoding(b[:eol])
if enc or not again:
return enc # depends on [control=['if'], data=[]]
eol2 = b.find(b'\n', eol + 1)
if eol2 < 0:
return _check_line_for_encoding(b[eol + 1:])[0] # depends on [control=['if'], data=[]]
return _check_line_for_encoding(b[eol + 1:eol2])[0] |
def request(self, server=None):
""" Sends the request """
request = {
"headers": {"Content-Type": "application/json"},
"timeout": self._pump.timeout,
"data": self.context,
}
url = "{proto}://{server}/{endpoint}".format(
proto=self._pump.protocol,
server=server or self.server,
endpoint=self.ENDPOINT,
)
response = self._pump._requester(requests.post, url, **request)
try:
server_data = response.json()
except ValueError:
raise ClientException(response.content)
if "error" in server_data:
raise ClientException(server_data["error"], self.context)
_log.debug("Client registration recieved: %(id)s %(secret)s %(expire)s", {
"id": server_data["client_id"],
"secret": server_data["client_secret"],
"expire": server_data["expires_at"],
})
return server_data | def function[request, parameter[self, server]]:
constant[ Sends the request ]
variable[request] assign[=] dictionary[[<ast.Constant object at 0x7da1b26081f0>, <ast.Constant object at 0x7da1b2608850>, <ast.Constant object at 0x7da1b2609480>], [<ast.Dict object at 0x7da1b260a020>, <ast.Attribute object at 0x7da1b2609f60>, <ast.Attribute object at 0x7da1b260bc70>]]
variable[url] assign[=] call[constant[{proto}://{server}/{endpoint}].format, parameter[]]
variable[response] assign[=] call[name[self]._pump._requester, parameter[name[requests].post, name[url]]]
<ast.Try object at 0x7da1b2609240>
if compare[constant[error] in name[server_data]] begin[:]
<ast.Raise object at 0x7da1b2839780>
call[name[_log].debug, parameter[constant[Client registration recieved: %(id)s %(secret)s %(expire)s], dictionary[[<ast.Constant object at 0x7da1b283ace0>, <ast.Constant object at 0x7da1b2838b50>, <ast.Constant object at 0x7da1b283b070>], [<ast.Subscript object at 0x7da1b2839b40>, <ast.Subscript object at 0x7da1b283bf70>, <ast.Subscript object at 0x7da1b283af50>]]]]
return[name[server_data]] | keyword[def] identifier[request] ( identifier[self] , identifier[server] = keyword[None] ):
literal[string]
identifier[request] ={
literal[string] :{ literal[string] : literal[string] },
literal[string] : identifier[self] . identifier[_pump] . identifier[timeout] ,
literal[string] : identifier[self] . identifier[context] ,
}
identifier[url] = literal[string] . identifier[format] (
identifier[proto] = identifier[self] . identifier[_pump] . identifier[protocol] ,
identifier[server] = identifier[server] keyword[or] identifier[self] . identifier[server] ,
identifier[endpoint] = identifier[self] . identifier[ENDPOINT] ,
)
identifier[response] = identifier[self] . identifier[_pump] . identifier[_requester] ( identifier[requests] . identifier[post] , identifier[url] ,** identifier[request] )
keyword[try] :
identifier[server_data] = identifier[response] . identifier[json] ()
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ClientException] ( identifier[response] . identifier[content] )
keyword[if] literal[string] keyword[in] identifier[server_data] :
keyword[raise] identifier[ClientException] ( identifier[server_data] [ literal[string] ], identifier[self] . identifier[context] )
identifier[_log] . identifier[debug] ( literal[string] ,{
literal[string] : identifier[server_data] [ literal[string] ],
literal[string] : identifier[server_data] [ literal[string] ],
literal[string] : identifier[server_data] [ literal[string] ],
})
keyword[return] identifier[server_data] | def request(self, server=None):
""" Sends the request """
request = {'headers': {'Content-Type': 'application/json'}, 'timeout': self._pump.timeout, 'data': self.context}
url = '{proto}://{server}/{endpoint}'.format(proto=self._pump.protocol, server=server or self.server, endpoint=self.ENDPOINT)
response = self._pump._requester(requests.post, url, **request)
try:
server_data = response.json() # depends on [control=['try'], data=[]]
except ValueError:
raise ClientException(response.content) # depends on [control=['except'], data=[]]
if 'error' in server_data:
raise ClientException(server_data['error'], self.context) # depends on [control=['if'], data=['server_data']]
_log.debug('Client registration recieved: %(id)s %(secret)s %(expire)s', {'id': server_data['client_id'], 'secret': server_data['client_secret'], 'expire': server_data['expires_at']})
return server_data |
def upload(directory, metadata_csv, master_token=None, member=None,
access_token=None, safe=False, sync=False, max_size='128m',
mode='default', verbose=False, debug=False):
"""
Upload files for the project to Open Humans member accounts.
If using a master access token and not specifying member ID:
(1) Files should be organized in subdirectories according to project
member ID, e.g.:
main_directory/01234567/data.json
main_directory/12345678/data.json
main_directory/23456789/data.json
(2) The metadata CSV should have the following format:
1st column: Project member ID
2nd column: filenames
3rd & additional columns: Metadata fields (see below)
If uploading for a specific member:
(1) The local directory should not contain subdirectories.
(2) The metadata CSV should have the following format:
1st column: filenames
2nd & additional columns: Metadata fields (see below)
The default behavior is to overwrite files with matching filenames on
Open Humans, but not otherwise delete files. (Use --safe or --sync to
change this behavior.)
If included, the following metadata columns should be correctly formatted:
'tags': should be comma-separated strings
'md5': should match the file's md5 hexdigest
'creation_date', 'start_date', 'end_date': ISO 8601 dates or datetimes
Other metedata fields (e.g. 'description') can be arbitrary strings.
Either specify sync as True or safe as True but not both.
:param directory: This field is the target directory from which data will
be uploaded.
:param metadata_csv: This field is the filepath of the metadata csv file.
:param master_token: This field is the master access token for the project.
It's default value is None.
:param member: This field is specific member whose project data is
downloaded. It's default value is None.
:param access_token: This field is the user specific access token. It's
default value is None.
:param safe: This boolean field will overwrite matching filename. It's
default value is False.
:param sync: This boolean field will delete files on Open Humans that are
not in the local directory. It's default value is False.
:param max_size: This field is the maximum file size. It's default value is
None.
:param mode: This field takes three value default, sync, safe. It's default
value is 'default'.
:param verbose: This boolean field is the logging level. It's default value
is False.
:param debug: This boolean field is the logging level. It's default value
is False.
"""
if safe and sync:
raise UsageError('Safe (--safe) and sync (--sync) modes are mutually '
'incompatible!')
if not (master_token or access_token) or (master_token and access_token):
raise UsageError('Please specify either a master access token (-T), '
'or an OAuth2 user access token (-t).')
set_log_level(debug, verbose)
if sync:
mode = 'sync'
elif safe:
mode = 'safe'
metadata = load_metadata_csv(metadata_csv)
subdirs = [i for i in os.listdir(directory) if
os.path.isdir(os.path.join(directory, i))]
if subdirs:
if not all([re.match(r'^[0-9]{8}$', d) for d in subdirs]):
raise UsageError(
"Subdirs expected to match project member ID format!")
if (master_token and member) or not master_token:
raise UsageError(
"Subdirs shouldn't exist if uploading for specific member!")
project = OHProject(master_access_token=master_token)
for member_id in subdirs:
subdir_path = os.path.join(directory, member_id)
project.upload_member_from_dir(
member_data=project.project_data[member_id],
target_member_dir=subdir_path,
metadata=metadata[member_id],
mode=mode,
access_token=project.master_access_token,
)
else:
if master_token and not (master_token and member):
raise UsageError('No member specified!')
if master_token:
project = OHProject(master_access_token=master_token)
project.upload_member_from_dir(
member_data=project.project_data[member],
target_member_dir=directory,
metadata=metadata,
mode=mode,
access_token=project.master_access_token,
)
else:
member_data = exchange_oauth2_member(access_token)
OHProject.upload_member_from_dir(
member_data=member_data,
target_member_dir=directory,
metadata=metadata,
mode=mode,
access_token=access_token,
) | def function[upload, parameter[directory, metadata_csv, master_token, member, access_token, safe, sync, max_size, mode, verbose, debug]]:
constant[
Upload files for the project to Open Humans member accounts.
If using a master access token and not specifying member ID:
(1) Files should be organized in subdirectories according to project
member ID, e.g.:
main_directory/01234567/data.json
main_directory/12345678/data.json
main_directory/23456789/data.json
(2) The metadata CSV should have the following format:
1st column: Project member ID
2nd column: filenames
3rd & additional columns: Metadata fields (see below)
If uploading for a specific member:
(1) The local directory should not contain subdirectories.
(2) The metadata CSV should have the following format:
1st column: filenames
2nd & additional columns: Metadata fields (see below)
The default behavior is to overwrite files with matching filenames on
Open Humans, but not otherwise delete files. (Use --safe or --sync to
change this behavior.)
If included, the following metadata columns should be correctly formatted:
'tags': should be comma-separated strings
'md5': should match the file's md5 hexdigest
'creation_date', 'start_date', 'end_date': ISO 8601 dates or datetimes
Other metedata fields (e.g. 'description') can be arbitrary strings.
Either specify sync as True or safe as True but not both.
:param directory: This field is the target directory from which data will
be uploaded.
:param metadata_csv: This field is the filepath of the metadata csv file.
:param master_token: This field is the master access token for the project.
It's default value is None.
:param member: This field is specific member whose project data is
downloaded. It's default value is None.
:param access_token: This field is the user specific access token. It's
default value is None.
:param safe: This boolean field will overwrite matching filename. It's
default value is False.
:param sync: This boolean field will delete files on Open Humans that are
not in the local directory. It's default value is False.
:param max_size: This field is the maximum file size. It's default value is
None.
:param mode: This field takes three value default, sync, safe. It's default
value is 'default'.
:param verbose: This boolean field is the logging level. It's default value
is False.
:param debug: This boolean field is the logging level. It's default value
is False.
]
if <ast.BoolOp object at 0x7da1b11e2140> begin[:]
<ast.Raise object at 0x7da1b11e0490>
if <ast.BoolOp object at 0x7da1b11e0220> begin[:]
<ast.Raise object at 0x7da1b11e10c0>
call[name[set_log_level], parameter[name[debug], name[verbose]]]
if name[sync] begin[:]
variable[mode] assign[=] constant[sync]
variable[metadata] assign[=] call[name[load_metadata_csv], parameter[name[metadata_csv]]]
variable[subdirs] assign[=] <ast.ListComp object at 0x7da1b0f39780>
if name[subdirs] begin[:]
if <ast.UnaryOp object at 0x7da1b0f38a30> begin[:]
<ast.Raise object at 0x7da1b0f3a380>
if <ast.BoolOp object at 0x7da1b0f3b1f0> begin[:]
<ast.Raise object at 0x7da1b0f381f0>
variable[project] assign[=] call[name[OHProject], parameter[]]
for taget[name[member_id]] in starred[name[subdirs]] begin[:]
variable[subdir_path] assign[=] call[name[os].path.join, parameter[name[directory], name[member_id]]]
call[name[project].upload_member_from_dir, parameter[]] | keyword[def] identifier[upload] ( identifier[directory] , identifier[metadata_csv] , identifier[master_token] = keyword[None] , identifier[member] = keyword[None] ,
identifier[access_token] = keyword[None] , identifier[safe] = keyword[False] , identifier[sync] = keyword[False] , identifier[max_size] = literal[string] ,
identifier[mode] = literal[string] , identifier[verbose] = keyword[False] , identifier[debug] = keyword[False] ):
literal[string]
keyword[if] identifier[safe] keyword[and] identifier[sync] :
keyword[raise] identifier[UsageError] ( literal[string]
literal[string] )
keyword[if] keyword[not] ( identifier[master_token] keyword[or] identifier[access_token] ) keyword[or] ( identifier[master_token] keyword[and] identifier[access_token] ):
keyword[raise] identifier[UsageError] ( literal[string]
literal[string] )
identifier[set_log_level] ( identifier[debug] , identifier[verbose] )
keyword[if] identifier[sync] :
identifier[mode] = literal[string]
keyword[elif] identifier[safe] :
identifier[mode] = literal[string]
identifier[metadata] = identifier[load_metadata_csv] ( identifier[metadata_csv] )
identifier[subdirs] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[os] . identifier[listdir] ( identifier[directory] ) keyword[if]
identifier[os] . identifier[path] . identifier[isdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[i] ))]
keyword[if] identifier[subdirs] :
keyword[if] keyword[not] identifier[all] ([ identifier[re] . identifier[match] ( literal[string] , identifier[d] ) keyword[for] identifier[d] keyword[in] identifier[subdirs] ]):
keyword[raise] identifier[UsageError] (
literal[string] )
keyword[if] ( identifier[master_token] keyword[and] identifier[member] ) keyword[or] keyword[not] identifier[master_token] :
keyword[raise] identifier[UsageError] (
literal[string] )
identifier[project] = identifier[OHProject] ( identifier[master_access_token] = identifier[master_token] )
keyword[for] identifier[member_id] keyword[in] identifier[subdirs] :
identifier[subdir_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[member_id] )
identifier[project] . identifier[upload_member_from_dir] (
identifier[member_data] = identifier[project] . identifier[project_data] [ identifier[member_id] ],
identifier[target_member_dir] = identifier[subdir_path] ,
identifier[metadata] = identifier[metadata] [ identifier[member_id] ],
identifier[mode] = identifier[mode] ,
identifier[access_token] = identifier[project] . identifier[master_access_token] ,
)
keyword[else] :
keyword[if] identifier[master_token] keyword[and] keyword[not] ( identifier[master_token] keyword[and] identifier[member] ):
keyword[raise] identifier[UsageError] ( literal[string] )
keyword[if] identifier[master_token] :
identifier[project] = identifier[OHProject] ( identifier[master_access_token] = identifier[master_token] )
identifier[project] . identifier[upload_member_from_dir] (
identifier[member_data] = identifier[project] . identifier[project_data] [ identifier[member] ],
identifier[target_member_dir] = identifier[directory] ,
identifier[metadata] = identifier[metadata] ,
identifier[mode] = identifier[mode] ,
identifier[access_token] = identifier[project] . identifier[master_access_token] ,
)
keyword[else] :
identifier[member_data] = identifier[exchange_oauth2_member] ( identifier[access_token] )
identifier[OHProject] . identifier[upload_member_from_dir] (
identifier[member_data] = identifier[member_data] ,
identifier[target_member_dir] = identifier[directory] ,
identifier[metadata] = identifier[metadata] ,
identifier[mode] = identifier[mode] ,
identifier[access_token] = identifier[access_token] ,
) | def upload(directory, metadata_csv, master_token=None, member=None, access_token=None, safe=False, sync=False, max_size='128m', mode='default', verbose=False, debug=False):
"""
Upload files for the project to Open Humans member accounts.
If using a master access token and not specifying member ID:
(1) Files should be organized in subdirectories according to project
member ID, e.g.:
main_directory/01234567/data.json
main_directory/12345678/data.json
main_directory/23456789/data.json
(2) The metadata CSV should have the following format:
1st column: Project member ID
2nd column: filenames
3rd & additional columns: Metadata fields (see below)
If uploading for a specific member:
(1) The local directory should not contain subdirectories.
(2) The metadata CSV should have the following format:
1st column: filenames
2nd & additional columns: Metadata fields (see below)
The default behavior is to overwrite files with matching filenames on
Open Humans, but not otherwise delete files. (Use --safe or --sync to
change this behavior.)
If included, the following metadata columns should be correctly formatted:
'tags': should be comma-separated strings
'md5': should match the file's md5 hexdigest
'creation_date', 'start_date', 'end_date': ISO 8601 dates or datetimes
Other metedata fields (e.g. 'description') can be arbitrary strings.
Either specify sync as True or safe as True but not both.
:param directory: This field is the target directory from which data will
be uploaded.
:param metadata_csv: This field is the filepath of the metadata csv file.
:param master_token: This field is the master access token for the project.
It's default value is None.
:param member: This field is specific member whose project data is
downloaded. It's default value is None.
:param access_token: This field is the user specific access token. It's
default value is None.
:param safe: This boolean field will overwrite matching filename. It's
default value is False.
:param sync: This boolean field will delete files on Open Humans that are
not in the local directory. It's default value is False.
:param max_size: This field is the maximum file size. It's default value is
None.
:param mode: This field takes three value default, sync, safe. It's default
value is 'default'.
:param verbose: This boolean field is the logging level. It's default value
is False.
:param debug: This boolean field is the logging level. It's default value
is False.
"""
if safe and sync:
raise UsageError('Safe (--safe) and sync (--sync) modes are mutually incompatible!') # depends on [control=['if'], data=[]]
if not (master_token or access_token) or (master_token and access_token):
raise UsageError('Please specify either a master access token (-T), or an OAuth2 user access token (-t).') # depends on [control=['if'], data=[]]
set_log_level(debug, verbose)
if sync:
mode = 'sync' # depends on [control=['if'], data=[]]
elif safe:
mode = 'safe' # depends on [control=['if'], data=[]]
metadata = load_metadata_csv(metadata_csv)
subdirs = [i for i in os.listdir(directory) if os.path.isdir(os.path.join(directory, i))]
if subdirs:
if not all([re.match('^[0-9]{8}$', d) for d in subdirs]):
raise UsageError('Subdirs expected to match project member ID format!') # depends on [control=['if'], data=[]]
if master_token and member or not master_token:
raise UsageError("Subdirs shouldn't exist if uploading for specific member!") # depends on [control=['if'], data=[]]
project = OHProject(master_access_token=master_token)
for member_id in subdirs:
subdir_path = os.path.join(directory, member_id)
project.upload_member_from_dir(member_data=project.project_data[member_id], target_member_dir=subdir_path, metadata=metadata[member_id], mode=mode, access_token=project.master_access_token) # depends on [control=['for'], data=['member_id']] # depends on [control=['if'], data=[]]
else:
if master_token and (not (master_token and member)):
raise UsageError('No member specified!') # depends on [control=['if'], data=[]]
if master_token:
project = OHProject(master_access_token=master_token)
project.upload_member_from_dir(member_data=project.project_data[member], target_member_dir=directory, metadata=metadata, mode=mode, access_token=project.master_access_token) # depends on [control=['if'], data=[]]
else:
member_data = exchange_oauth2_member(access_token)
OHProject.upload_member_from_dir(member_data=member_data, target_member_dir=directory, metadata=metadata, mode=mode, access_token=access_token) |
def check_sizes(size, width, height):
"""
Check that these arguments, if supplied, are consistent.
Return a (width, height) pair.
"""
if not size:
return width, height
if len(size) != 2:
raise ProtocolError(
"size argument should be a pair (width, height)")
if width is not None and width != size[0]:
raise ProtocolError(
"size[0] (%r) and width (%r) should match when both are used."
% (size[0], width))
if height is not None and height != size[1]:
raise ProtocolError(
"size[1] (%r) and height (%r) should match when both are used."
% (size[1], height))
return size | def function[check_sizes, parameter[size, width, height]]:
constant[
Check that these arguments, if supplied, are consistent.
Return a (width, height) pair.
]
if <ast.UnaryOp object at 0x7da1b06fe650> begin[:]
return[tuple[[<ast.Name object at 0x7da1b06fd330>, <ast.Name object at 0x7da1b06fdc60>]]]
if compare[call[name[len], parameter[name[size]]] not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da1b06fc070>
if <ast.BoolOp object at 0x7da1b06fd4b0> begin[:]
<ast.Raise object at 0x7da1b06fcd60>
if <ast.BoolOp object at 0x7da1b06fc160> begin[:]
<ast.Raise object at 0x7da1b06fc880>
return[name[size]] | keyword[def] identifier[check_sizes] ( identifier[size] , identifier[width] , identifier[height] ):
literal[string]
keyword[if] keyword[not] identifier[size] :
keyword[return] identifier[width] , identifier[height]
keyword[if] identifier[len] ( identifier[size] )!= literal[int] :
keyword[raise] identifier[ProtocolError] (
literal[string] )
keyword[if] identifier[width] keyword[is] keyword[not] keyword[None] keyword[and] identifier[width] != identifier[size] [ literal[int] ]:
keyword[raise] identifier[ProtocolError] (
literal[string]
%( identifier[size] [ literal[int] ], identifier[width] ))
keyword[if] identifier[height] keyword[is] keyword[not] keyword[None] keyword[and] identifier[height] != identifier[size] [ literal[int] ]:
keyword[raise] identifier[ProtocolError] (
literal[string]
%( identifier[size] [ literal[int] ], identifier[height] ))
keyword[return] identifier[size] | def check_sizes(size, width, height):
"""
Check that these arguments, if supplied, are consistent.
Return a (width, height) pair.
"""
if not size:
return (width, height) # depends on [control=['if'], data=[]]
if len(size) != 2:
raise ProtocolError('size argument should be a pair (width, height)') # depends on [control=['if'], data=[]]
if width is not None and width != size[0]:
raise ProtocolError('size[0] (%r) and width (%r) should match when both are used.' % (size[0], width)) # depends on [control=['if'], data=[]]
if height is not None and height != size[1]:
raise ProtocolError('size[1] (%r) and height (%r) should match when both are used.' % (size[1], height)) # depends on [control=['if'], data=[]]
return size |
def get_proj(geom, proj_list=None):
"""Determine best projection for input geometry
"""
out_srs = None
if proj_list is None:
proj_list = gen_proj_list()
#Go through user-defined projeciton list
for projbox in proj_list:
if projbox.geom.Intersects(geom):
out_srs = projbox.srs
break
#If geom doesn't fall in any of the user projection bbox, use UTM
if out_srs is None:
out_srs = getUTMsrs(geom)
return out_srs | def function[get_proj, parameter[geom, proj_list]]:
constant[Determine best projection for input geometry
]
variable[out_srs] assign[=] constant[None]
if compare[name[proj_list] is constant[None]] begin[:]
variable[proj_list] assign[=] call[name[gen_proj_list], parameter[]]
for taget[name[projbox]] in starred[name[proj_list]] begin[:]
if call[name[projbox].geom.Intersects, parameter[name[geom]]] begin[:]
variable[out_srs] assign[=] name[projbox].srs
break
if compare[name[out_srs] is constant[None]] begin[:]
variable[out_srs] assign[=] call[name[getUTMsrs], parameter[name[geom]]]
return[name[out_srs]] | keyword[def] identifier[get_proj] ( identifier[geom] , identifier[proj_list] = keyword[None] ):
literal[string]
identifier[out_srs] = keyword[None]
keyword[if] identifier[proj_list] keyword[is] keyword[None] :
identifier[proj_list] = identifier[gen_proj_list] ()
keyword[for] identifier[projbox] keyword[in] identifier[proj_list] :
keyword[if] identifier[projbox] . identifier[geom] . identifier[Intersects] ( identifier[geom] ):
identifier[out_srs] = identifier[projbox] . identifier[srs]
keyword[break]
keyword[if] identifier[out_srs] keyword[is] keyword[None] :
identifier[out_srs] = identifier[getUTMsrs] ( identifier[geom] )
keyword[return] identifier[out_srs] | def get_proj(geom, proj_list=None):
"""Determine best projection for input geometry
"""
out_srs = None
if proj_list is None:
proj_list = gen_proj_list() # depends on [control=['if'], data=['proj_list']]
#Go through user-defined projeciton list
for projbox in proj_list:
if projbox.geom.Intersects(geom):
out_srs = projbox.srs
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['projbox']]
#If geom doesn't fall in any of the user projection bbox, use UTM
if out_srs is None:
out_srs = getUTMsrs(geom) # depends on [control=['if'], data=['out_srs']]
return out_srs |
def finish(self, data: bytes=b"") -> None:
"""
Finish the stream.
"""
if self.finished():
if self._exc:
raise self._exc
if data:
raise WriteAfterFinishedError
return
try:
self._delegate.write_data(data, finished=True)
except BaseWriteException as e:
if self._exc is None:
self._exc = e
raise
finally:
self._finished.set() | def function[finish, parameter[self, data]]:
constant[
Finish the stream.
]
if call[name[self].finished, parameter[]] begin[:]
if name[self]._exc begin[:]
<ast.Raise object at 0x7da207f981f0>
if name[data] begin[:]
<ast.Raise object at 0x7da207f9b9a0>
return[None]
<ast.Try object at 0x7da207f98a00> | keyword[def] identifier[finish] ( identifier[self] , identifier[data] : identifier[bytes] = literal[string] )-> keyword[None] :
literal[string]
keyword[if] identifier[self] . identifier[finished] ():
keyword[if] identifier[self] . identifier[_exc] :
keyword[raise] identifier[self] . identifier[_exc]
keyword[if] identifier[data] :
keyword[raise] identifier[WriteAfterFinishedError]
keyword[return]
keyword[try] :
identifier[self] . identifier[_delegate] . identifier[write_data] ( identifier[data] , identifier[finished] = keyword[True] )
keyword[except] identifier[BaseWriteException] keyword[as] identifier[e] :
keyword[if] identifier[self] . identifier[_exc] keyword[is] keyword[None] :
identifier[self] . identifier[_exc] = identifier[e]
keyword[raise]
keyword[finally] :
identifier[self] . identifier[_finished] . identifier[set] () | def finish(self, data: bytes=b'') -> None:
"""
Finish the stream.
"""
if self.finished():
if self._exc:
raise self._exc # depends on [control=['if'], data=[]]
if data:
raise WriteAfterFinishedError # depends on [control=['if'], data=[]]
return # depends on [control=['if'], data=[]]
try:
self._delegate.write_data(data, finished=True) # depends on [control=['try'], data=[]]
except BaseWriteException as e:
if self._exc is None:
self._exc = e # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=['e']]
finally:
self._finished.set() |
def delete(cls, uuid):
"""Delete a workflow."""
to_delete = Workflow.query.get(uuid)
db.session.delete(to_delete) | def function[delete, parameter[cls, uuid]]:
constant[Delete a workflow.]
variable[to_delete] assign[=] call[name[Workflow].query.get, parameter[name[uuid]]]
call[name[db].session.delete, parameter[name[to_delete]]] | keyword[def] identifier[delete] ( identifier[cls] , identifier[uuid] ):
literal[string]
identifier[to_delete] = identifier[Workflow] . identifier[query] . identifier[get] ( identifier[uuid] )
identifier[db] . identifier[session] . identifier[delete] ( identifier[to_delete] ) | def delete(cls, uuid):
"""Delete a workflow."""
to_delete = Workflow.query.get(uuid)
db.session.delete(to_delete) |
def diagonal_gaussian_posterior_builder(
getter, name, shape=None, *args, **kwargs):
"""A pre-canned builder for diagonal gaussian posterior distributions.
Given a true `getter` function and arguments forwarded from `tf.get_variable`,
return a distribution object for a diagonal posterior over a variable of the
requisite shape.
Args:
getter: The `getter` passed to a `custom_getter`. Please see the
documentation for `tf.get_variable`.
name: The `name` argument passed to `tf.get_variable`.
shape: The `shape` argument passed to `tf.get_variable`.
*args: See positional arguments passed to `tf.get_variable`.
**kwargs: See keyword arguments passed to `tf.get_variable`.
Returns:
An instance of `tfp.distributions.Normal` representing the posterior
distribution over the variable in question.
"""
# Please see the documentation for
# `tfp.distributions.param_static_shapes`.
parameter_shapes = tfp.distributions.Normal.param_static_shapes(shape)
loc_var = getter(
name + "/posterior_loc", shape=parameter_shapes["loc"], *args, **kwargs)
scale_var = getter(
name + "/posterior_scale",
shape=parameter_shapes["scale"],
*args,
**kwargs)
posterior = tfp.distributions.Normal(
loc=loc_var,
scale=tf.nn.softplus(scale_var),
name="{}_posterior_dist".format(name))
return posterior | def function[diagonal_gaussian_posterior_builder, parameter[getter, name, shape]]:
constant[A pre-canned builder for diagonal gaussian posterior distributions.
Given a true `getter` function and arguments forwarded from `tf.get_variable`,
return a distribution object for a diagonal posterior over a variable of the
requisite shape.
Args:
getter: The `getter` passed to a `custom_getter`. Please see the
documentation for `tf.get_variable`.
name: The `name` argument passed to `tf.get_variable`.
shape: The `shape` argument passed to `tf.get_variable`.
*args: See positional arguments passed to `tf.get_variable`.
**kwargs: See keyword arguments passed to `tf.get_variable`.
Returns:
An instance of `tfp.distributions.Normal` representing the posterior
distribution over the variable in question.
]
variable[parameter_shapes] assign[=] call[name[tfp].distributions.Normal.param_static_shapes, parameter[name[shape]]]
variable[loc_var] assign[=] call[name[getter], parameter[binary_operation[name[name] + constant[/posterior_loc]], <ast.Starred object at 0x7da1b1c62800>]]
variable[scale_var] assign[=] call[name[getter], parameter[binary_operation[name[name] + constant[/posterior_scale]], <ast.Starred object at 0x7da1b1c604c0>]]
variable[posterior] assign[=] call[name[tfp].distributions.Normal, parameter[]]
return[name[posterior]] | keyword[def] identifier[diagonal_gaussian_posterior_builder] (
identifier[getter] , identifier[name] , identifier[shape] = keyword[None] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[parameter_shapes] = identifier[tfp] . identifier[distributions] . identifier[Normal] . identifier[param_static_shapes] ( identifier[shape] )
identifier[loc_var] = identifier[getter] (
identifier[name] + literal[string] , identifier[shape] = identifier[parameter_shapes] [ literal[string] ],* identifier[args] ,** identifier[kwargs] )
identifier[scale_var] = identifier[getter] (
identifier[name] + literal[string] ,
identifier[shape] = identifier[parameter_shapes] [ literal[string] ],
* identifier[args] ,
** identifier[kwargs] )
identifier[posterior] = identifier[tfp] . identifier[distributions] . identifier[Normal] (
identifier[loc] = identifier[loc_var] ,
identifier[scale] = identifier[tf] . identifier[nn] . identifier[softplus] ( identifier[scale_var] ),
identifier[name] = literal[string] . identifier[format] ( identifier[name] ))
keyword[return] identifier[posterior] | def diagonal_gaussian_posterior_builder(getter, name, shape=None, *args, **kwargs):
"""A pre-canned builder for diagonal gaussian posterior distributions.
Given a true `getter` function and arguments forwarded from `tf.get_variable`,
return a distribution object for a diagonal posterior over a variable of the
requisite shape.
Args:
getter: The `getter` passed to a `custom_getter`. Please see the
documentation for `tf.get_variable`.
name: The `name` argument passed to `tf.get_variable`.
shape: The `shape` argument passed to `tf.get_variable`.
*args: See positional arguments passed to `tf.get_variable`.
**kwargs: See keyword arguments passed to `tf.get_variable`.
Returns:
An instance of `tfp.distributions.Normal` representing the posterior
distribution over the variable in question.
"""
# Please see the documentation for
# `tfp.distributions.param_static_shapes`.
parameter_shapes = tfp.distributions.Normal.param_static_shapes(shape)
loc_var = getter(name + '/posterior_loc', *args, shape=parameter_shapes['loc'], **kwargs)
scale_var = getter(name + '/posterior_scale', *args, shape=parameter_shapes['scale'], **kwargs)
posterior = tfp.distributions.Normal(loc=loc_var, scale=tf.nn.softplus(scale_var), name='{}_posterior_dist'.format(name))
return posterior |
def batch_encode(self, iterator, *args, dim=0, **kwargs):
"""
Args:
iterator (iterator): Batch of text to encode.
*args: Arguments passed onto ``Encoder.__init__``.
dim (int, optional): Dimension along which to concatenate tensors.
**kwargs: Keyword arguments passed onto ``Encoder.__init__``.
Returns
torch.Tensor, list of int: Encoded and padded batch of sequences; Original lengths of
sequences.
"""
return stack_and_pad_tensors(
super().batch_encode(iterator), padding_index=self.padding_index, dim=dim) | def function[batch_encode, parameter[self, iterator]]:
constant[
Args:
iterator (iterator): Batch of text to encode.
*args: Arguments passed onto ``Encoder.__init__``.
dim (int, optional): Dimension along which to concatenate tensors.
**kwargs: Keyword arguments passed onto ``Encoder.__init__``.
Returns
torch.Tensor, list of int: Encoded and padded batch of sequences; Original lengths of
sequences.
]
return[call[name[stack_and_pad_tensors], parameter[call[call[name[super], parameter[]].batch_encode, parameter[name[iterator]]]]]] | keyword[def] identifier[batch_encode] ( identifier[self] , identifier[iterator] ,* identifier[args] , identifier[dim] = literal[int] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[stack_and_pad_tensors] (
identifier[super] (). identifier[batch_encode] ( identifier[iterator] ), identifier[padding_index] = identifier[self] . identifier[padding_index] , identifier[dim] = identifier[dim] ) | def batch_encode(self, iterator, *args, dim=0, **kwargs):
"""
Args:
iterator (iterator): Batch of text to encode.
*args: Arguments passed onto ``Encoder.__init__``.
dim (int, optional): Dimension along which to concatenate tensors.
**kwargs: Keyword arguments passed onto ``Encoder.__init__``.
Returns
torch.Tensor, list of int: Encoded and padded batch of sequences; Original lengths of
sequences.
"""
return stack_and_pad_tensors(super().batch_encode(iterator), padding_index=self.padding_index, dim=dim) |
def points(self):
'''Return unordered array with all the points in this neurite'''
# add all points in a section except the first one, which is a duplicate
_pts = [v for s in self.root_node.ipreorder()
for v in s.points[1:, COLS.XYZR]]
# except for the very first point, which is not a duplicate
_pts.insert(0, self.root_node.points[0][COLS.XYZR])
return np.array(_pts) | def function[points, parameter[self]]:
constant[Return unordered array with all the points in this neurite]
variable[_pts] assign[=] <ast.ListComp object at 0x7da20e960280>
call[name[_pts].insert, parameter[constant[0], call[call[name[self].root_node.points][constant[0]]][name[COLS].XYZR]]]
return[call[name[np].array, parameter[name[_pts]]]] | keyword[def] identifier[points] ( identifier[self] ):
literal[string]
identifier[_pts] =[ identifier[v] keyword[for] identifier[s] keyword[in] identifier[self] . identifier[root_node] . identifier[ipreorder] ()
keyword[for] identifier[v] keyword[in] identifier[s] . identifier[points] [ literal[int] :, identifier[COLS] . identifier[XYZR] ]]
identifier[_pts] . identifier[insert] ( literal[int] , identifier[self] . identifier[root_node] . identifier[points] [ literal[int] ][ identifier[COLS] . identifier[XYZR] ])
keyword[return] identifier[np] . identifier[array] ( identifier[_pts] ) | def points(self):
"""Return unordered array with all the points in this neurite"""
# add all points in a section except the first one, which is a duplicate
_pts = [v for s in self.root_node.ipreorder() for v in s.points[1:, COLS.XYZR]]
# except for the very first point, which is not a duplicate
_pts.insert(0, self.root_node.points[0][COLS.XYZR])
return np.array(_pts) |
def duplicate(self, contributor=None):
"""Duplicate (make a copy) ``Data`` objects.
:param contributor: Duplication user
"""
bundle = [
{'original': data, 'copy': data.duplicate(contributor=contributor)}
for data in self
]
bundle = rewire_inputs(bundle)
duplicated = [item['copy'] for item in bundle]
return duplicated | def function[duplicate, parameter[self, contributor]]:
constant[Duplicate (make a copy) ``Data`` objects.
:param contributor: Duplication user
]
variable[bundle] assign[=] <ast.ListComp object at 0x7da18c4ce860>
variable[bundle] assign[=] call[name[rewire_inputs], parameter[name[bundle]]]
variable[duplicated] assign[=] <ast.ListComp object at 0x7da20e9b1510>
return[name[duplicated]] | keyword[def] identifier[duplicate] ( identifier[self] , identifier[contributor] = keyword[None] ):
literal[string]
identifier[bundle] =[
{ literal[string] : identifier[data] , literal[string] : identifier[data] . identifier[duplicate] ( identifier[contributor] = identifier[contributor] )}
keyword[for] identifier[data] keyword[in] identifier[self]
]
identifier[bundle] = identifier[rewire_inputs] ( identifier[bundle] )
identifier[duplicated] =[ identifier[item] [ literal[string] ] keyword[for] identifier[item] keyword[in] identifier[bundle] ]
keyword[return] identifier[duplicated] | def duplicate(self, contributor=None):
"""Duplicate (make a copy) ``Data`` objects.
:param contributor: Duplication user
"""
bundle = [{'original': data, 'copy': data.duplicate(contributor=contributor)} for data in self]
bundle = rewire_inputs(bundle)
duplicated = [item['copy'] for item in bundle]
return duplicated |
def _filtercomment(
self,
sql):
"Get rid of comments starting with --"
import os
fsql = ''
for line in sql.split('\n'):
fsql += line.split('--')[0] + ' ' + os.linesep
return fsql | def function[_filtercomment, parameter[self, sql]]:
constant[Get rid of comments starting with --]
import module[os]
variable[fsql] assign[=] constant[]
for taget[name[line]] in starred[call[name[sql].split, parameter[constant[
]]]] begin[:]
<ast.AugAssign object at 0x7da1b15b52a0>
return[name[fsql]] | keyword[def] identifier[_filtercomment] (
identifier[self] ,
identifier[sql] ):
literal[string]
keyword[import] identifier[os]
identifier[fsql] = literal[string]
keyword[for] identifier[line] keyword[in] identifier[sql] . identifier[split] ( literal[string] ):
identifier[fsql] += identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]+ literal[string] + identifier[os] . identifier[linesep]
keyword[return] identifier[fsql] | def _filtercomment(self, sql):
"""Get rid of comments starting with --"""
import os
fsql = ''
for line in sql.split('\n'):
fsql += line.split('--')[0] + ' ' + os.linesep # depends on [control=['for'], data=['line']]
return fsql |
def associate_notification_template(self, job_template,
notification_template, status):
"""Associate a notification template from this job template.
=====API DOCS=====
Associate a notification template from this job template.
:param job_template: The job template to associate to.
:type job_template: str
:param notification_template: The notification template to be associated.
:type notification_template: str
:param status: type of notification this notification template should be associated to.
:type status: str
:returns: Dictionary of only one key "changed", which indicates whether the association succeeded.
:rtype: dict
=====API DOCS=====
"""
return self._assoc('notification_templates_%s' % status,
job_template, notification_template) | def function[associate_notification_template, parameter[self, job_template, notification_template, status]]:
constant[Associate a notification template from this job template.
=====API DOCS=====
Associate a notification template from this job template.
:param job_template: The job template to associate to.
:type job_template: str
:param notification_template: The notification template to be associated.
:type notification_template: str
:param status: type of notification this notification template should be associated to.
:type status: str
:returns: Dictionary of only one key "changed", which indicates whether the association succeeded.
:rtype: dict
=====API DOCS=====
]
return[call[name[self]._assoc, parameter[binary_operation[constant[notification_templates_%s] <ast.Mod object at 0x7da2590d6920> name[status]], name[job_template], name[notification_template]]]] | keyword[def] identifier[associate_notification_template] ( identifier[self] , identifier[job_template] ,
identifier[notification_template] , identifier[status] ):
literal[string]
keyword[return] identifier[self] . identifier[_assoc] ( literal[string] % identifier[status] ,
identifier[job_template] , identifier[notification_template] ) | def associate_notification_template(self, job_template, notification_template, status):
"""Associate a notification template from this job template.
=====API DOCS=====
Associate a notification template from this job template.
:param job_template: The job template to associate to.
:type job_template: str
:param notification_template: The notification template to be associated.
:type notification_template: str
:param status: type of notification this notification template should be associated to.
:type status: str
:returns: Dictionary of only one key "changed", which indicates whether the association succeeded.
:rtype: dict
=====API DOCS=====
"""
return self._assoc('notification_templates_%s' % status, job_template, notification_template) |
def peek_openssl_error():
"""
Peeks into the error stack and pulls out the lib, func and reason
:return:
A three-element tuple of integers (lib, func, reason)
"""
error = libcrypto.ERR_peek_error()
lib = int((error >> 24) & 0xff)
func = int((error >> 12) & 0xfff)
reason = int(error & 0xfff)
return (lib, func, reason) | def function[peek_openssl_error, parameter[]]:
constant[
Peeks into the error stack and pulls out the lib, func and reason
:return:
A three-element tuple of integers (lib, func, reason)
]
variable[error] assign[=] call[name[libcrypto].ERR_peek_error, parameter[]]
variable[lib] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[error] <ast.RShift object at 0x7da2590d6a40> constant[24]] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]]
variable[func] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[error] <ast.RShift object at 0x7da2590d6a40> constant[12]] <ast.BitAnd object at 0x7da2590d6b60> constant[4095]]]]
variable[reason] assign[=] call[name[int], parameter[binary_operation[name[error] <ast.BitAnd object at 0x7da2590d6b60> constant[4095]]]]
return[tuple[[<ast.Name object at 0x7da1aff3da80>, <ast.Name object at 0x7da1aff3e170>, <ast.Name object at 0x7da1aff3e500>]]] | keyword[def] identifier[peek_openssl_error] ():
literal[string]
identifier[error] = identifier[libcrypto] . identifier[ERR_peek_error] ()
identifier[lib] = identifier[int] (( identifier[error] >> literal[int] )& literal[int] )
identifier[func] = identifier[int] (( identifier[error] >> literal[int] )& literal[int] )
identifier[reason] = identifier[int] ( identifier[error] & literal[int] )
keyword[return] ( identifier[lib] , identifier[func] , identifier[reason] ) | def peek_openssl_error():
"""
Peeks into the error stack and pulls out the lib, func and reason
:return:
A three-element tuple of integers (lib, func, reason)
"""
error = libcrypto.ERR_peek_error()
lib = int(error >> 24 & 255)
func = int(error >> 12 & 4095)
reason = int(error & 4095)
return (lib, func, reason) |
def fetch_results_from_source(self, *fields, dataframe=False):
"""
Get values for specific fields in the elasticsearch index, from source
:param fields: a list of fields that have to be retrieved from the index
:param dataframe: if true, will return the data in the form of a pandas.DataFrame
:returns: a list of dicts(key_val pairs) containing the values for the applied fields
if dataframe=True, will return the a dataframe containing the data in rows
and the fields representing column names
"""
if not fields:
raise AttributeError("Please provide the fields to get from elasticsearch!")
self.reset_aggregations()
self.search = self.search.extra(_source=fields)
self.search = self.search.extra(size=self.size)
response = self.search.execute()
hits = response.to_dict()['hits']['hits']
data = [item["_source"] for item in hits]
if dataframe:
df = pd.DataFrame.from_records(data)
return df.fillna(0)
return data | def function[fetch_results_from_source, parameter[self]]:
constant[
Get values for specific fields in the elasticsearch index, from source
:param fields: a list of fields that have to be retrieved from the index
:param dataframe: if true, will return the data in the form of a pandas.DataFrame
:returns: a list of dicts(key_val pairs) containing the values for the applied fields
if dataframe=True, will return the a dataframe containing the data in rows
and the fields representing column names
]
if <ast.UnaryOp object at 0x7da1b268ef80> begin[:]
<ast.Raise object at 0x7da1b268d7b0>
call[name[self].reset_aggregations, parameter[]]
name[self].search assign[=] call[name[self].search.extra, parameter[]]
name[self].search assign[=] call[name[self].search.extra, parameter[]]
variable[response] assign[=] call[name[self].search.execute, parameter[]]
variable[hits] assign[=] call[call[call[name[response].to_dict, parameter[]]][constant[hits]]][constant[hits]]
variable[data] assign[=] <ast.ListComp object at 0x7da1b268d5d0>
if name[dataframe] begin[:]
variable[df] assign[=] call[name[pd].DataFrame.from_records, parameter[name[data]]]
return[call[name[df].fillna, parameter[constant[0]]]]
return[name[data]] | keyword[def] identifier[fetch_results_from_source] ( identifier[self] ,* identifier[fields] , identifier[dataframe] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[fields] :
keyword[raise] identifier[AttributeError] ( literal[string] )
identifier[self] . identifier[reset_aggregations] ()
identifier[self] . identifier[search] = identifier[self] . identifier[search] . identifier[extra] ( identifier[_source] = identifier[fields] )
identifier[self] . identifier[search] = identifier[self] . identifier[search] . identifier[extra] ( identifier[size] = identifier[self] . identifier[size] )
identifier[response] = identifier[self] . identifier[search] . identifier[execute] ()
identifier[hits] = identifier[response] . identifier[to_dict] ()[ literal[string] ][ literal[string] ]
identifier[data] =[ identifier[item] [ literal[string] ] keyword[for] identifier[item] keyword[in] identifier[hits] ]
keyword[if] identifier[dataframe] :
identifier[df] = identifier[pd] . identifier[DataFrame] . identifier[from_records] ( identifier[data] )
keyword[return] identifier[df] . identifier[fillna] ( literal[int] )
keyword[return] identifier[data] | def fetch_results_from_source(self, *fields, dataframe=False):
"""
Get values for specific fields in the elasticsearch index, from source
:param fields: a list of fields that have to be retrieved from the index
:param dataframe: if true, will return the data in the form of a pandas.DataFrame
:returns: a list of dicts(key_val pairs) containing the values for the applied fields
if dataframe=True, will return the a dataframe containing the data in rows
and the fields representing column names
"""
if not fields:
raise AttributeError('Please provide the fields to get from elasticsearch!') # depends on [control=['if'], data=[]]
self.reset_aggregations()
self.search = self.search.extra(_source=fields)
self.search = self.search.extra(size=self.size)
response = self.search.execute()
hits = response.to_dict()['hits']['hits']
data = [item['_source'] for item in hits]
if dataframe:
df = pd.DataFrame.from_records(data)
return df.fillna(0) # depends on [control=['if'], data=[]]
return data |
def post_process(self, stream_to_file=False):
"""
This will set error for an exception if the call isn't in the 200s.
It will also extract the raw data from the response
:stream_to_file: str of the file name to stream the data to
:return: None
"""
self.status_code = self.response.status_code
if 200 <= self.status_code < 300:
if stream_to_file:
self.stream_to_file(stream_to_file)
else:
self.extract_data()
else:
self._timestamps.setdefault('exception', time.time())
if self.status_code in HTTP_STATUS_CODES:
self.error = HTTP_STATUS_CODES[self.status_code].str_to_obj(
self.response.content)
else:
self.error = Exception('Status code = %s' % self.status_code)
raise self.error | def function[post_process, parameter[self, stream_to_file]]:
constant[
This will set error for an exception if the call isn't in the 200s.
It will also extract the raw data from the response
:stream_to_file: str of the file name to stream the data to
:return: None
]
name[self].status_code assign[=] name[self].response.status_code
if compare[constant[200] less_or_equal[<=] name[self].status_code] begin[:]
if name[stream_to_file] begin[:]
call[name[self].stream_to_file, parameter[name[stream_to_file]]] | keyword[def] identifier[post_process] ( identifier[self] , identifier[stream_to_file] = keyword[False] ):
literal[string]
identifier[self] . identifier[status_code] = identifier[self] . identifier[response] . identifier[status_code]
keyword[if] literal[int] <= identifier[self] . identifier[status_code] < literal[int] :
keyword[if] identifier[stream_to_file] :
identifier[self] . identifier[stream_to_file] ( identifier[stream_to_file] )
keyword[else] :
identifier[self] . identifier[extract_data] ()
keyword[else] :
identifier[self] . identifier[_timestamps] . identifier[setdefault] ( literal[string] , identifier[time] . identifier[time] ())
keyword[if] identifier[self] . identifier[status_code] keyword[in] identifier[HTTP_STATUS_CODES] :
identifier[self] . identifier[error] = identifier[HTTP_STATUS_CODES] [ identifier[self] . identifier[status_code] ]. identifier[str_to_obj] (
identifier[self] . identifier[response] . identifier[content] )
keyword[else] :
identifier[self] . identifier[error] = identifier[Exception] ( literal[string] % identifier[self] . identifier[status_code] )
keyword[raise] identifier[self] . identifier[error] | def post_process(self, stream_to_file=False):
"""
This will set error for an exception if the call isn't in the 200s.
It will also extract the raw data from the response
:stream_to_file: str of the file name to stream the data to
:return: None
"""
self.status_code = self.response.status_code
if 200 <= self.status_code < 300:
if stream_to_file:
self.stream_to_file(stream_to_file) # depends on [control=['if'], data=[]]
else:
self.extract_data() # depends on [control=['if'], data=[]]
else:
self._timestamps.setdefault('exception', time.time())
if self.status_code in HTTP_STATUS_CODES:
self.error = HTTP_STATUS_CODES[self.status_code].str_to_obj(self.response.content) # depends on [control=['if'], data=['HTTP_STATUS_CODES']]
else:
self.error = Exception('Status code = %s' % self.status_code)
raise self.error |
def register(namespace, base_classes: Tuple[type], properties: Dict[str, Any]) -> None:
'''Register a Fixture class in namespace with the given properties.
Creates a Fixture class (not object) and inserts it into the provided
namespace. The properties is a dict but allows functions to reference other
properties and acts like a small DSL (domain specific language). This is
really just a declarative way to compose data about a test fixture and make
it repeatable.
Files calling this function are expected to house one or more Fixtures and
have a name that ends with a UUID without its hyphens. For example:
foo_38de9ceec5694c96ace90c9ca37e5bcb.py. This UUID is used to uniquely
track the Fixture through the test suite and allow Fixtures to scale without
concern.
**Parameters**
:``namespace``: dictionary to insert the generated class into
:``base_classes``: list of classes the new class should inherit
:``properties``: dictionary of properties with their values
Properties can have the following forms:
:functions: invoked with the Fixture as it's argument
:classes: instantiated without any arguments (unless it subclasses
``torment.fixtures.Fixture`` in which case it's passed context)
:literals: any standard python type (i.e. int, str, dict)
.. note::
function execution may error (this will be emitted as a logging event).
functions will continually be tried until they resolve or the same set
of functions is continually erroring. These functions that failed to
resolve are left in tact for later processing.
Properties by the following names also have defined behavior:
:description: added to the Fixture's description as an addendum
:error: must be a dictionary with three keys:
:class: class to instantiate (usually an exception)
:args: arguments to pass to class initialization
:kwargs: keyword arguments to pass to class initialization
:mocks: dictionary mapping mock symbols to corresponding values
Properties by the following names are reserved and should not be used:
* name
'''
# ensure we have a clean copy of the data
# and won't stomp on re-uses elsewhere in
# someone's code
props = copy.deepcopy(properties)
desc = props.pop('description', None) # type: Union[str, None]
caller_frame = inspect.stack()[1]
caller_file = caller_frame[1]
caller_module = inspect.getmodule(caller_frame[0])
my_uuid = uuid.UUID(os.path.basename(caller_file).replace('.py', '').rsplit('_', 1)[-1])
class_name = _unique_class_name(namespace, my_uuid)
@property
def description(self) -> str:
_ = super(self.__class__, self).description
if desc is not None:
_ += '—' + desc
return _
def __init__(self, context: 'torment.TestContext') -> None:
super(self.__class__, self).__init__(context)
functions = {}
for name, value in props.items():
if name == 'error':
self.error = value['class'](*value.get('args', ()), **value.get('kwargs', {}))
continue
if inspect.isclass(value):
if issubclass(value, Fixture):
value = value(self.context)
else:
value = value()
if inspect.isfunction(value):
functions[name] = value
continue
setattr(self, name, value)
_resolve_functions(functions, self)
self.initialize()
def setup(self) -> None:
if hasattr(self, 'mocks'):
logger.debug('self.mocks: %s', self.mocks)
for mock_symbol, mock_result in self.mocks.items():
if _find_mocker(mock_symbol, self.context)():
_prepare_mock(self.context, mock_symbol, **mock_result)
super(self.__class__, self).setup()
namespace[class_name] = type(class_name, base_classes, {
'description': description,
'__init__': __init__,
'__module__': caller_module,
'setup': setup,
'uuid': my_uuid,
}) | def function[register, parameter[namespace, base_classes, properties]]:
constant[Register a Fixture class in namespace with the given properties.
Creates a Fixture class (not object) and inserts it into the provided
namespace. The properties is a dict but allows functions to reference other
properties and acts like a small DSL (domain specific language). This is
really just a declarative way to compose data about a test fixture and make
it repeatable.
Files calling this function are expected to house one or more Fixtures and
have a name that ends with a UUID without its hyphens. For example:
foo_38de9ceec5694c96ace90c9ca37e5bcb.py. This UUID is used to uniquely
track the Fixture through the test suite and allow Fixtures to scale without
concern.
**Parameters**
:``namespace``: dictionary to insert the generated class into
:``base_classes``: list of classes the new class should inherit
:``properties``: dictionary of properties with their values
Properties can have the following forms:
:functions: invoked with the Fixture as it's argument
:classes: instantiated without any arguments (unless it subclasses
``torment.fixtures.Fixture`` in which case it's passed context)
:literals: any standard python type (i.e. int, str, dict)
.. note::
function execution may error (this will be emitted as a logging event).
functions will continually be tried until they resolve or the same set
of functions is continually erroring. These functions that failed to
resolve are left in tact for later processing.
Properties by the following names also have defined behavior:
:description: added to the Fixture's description as an addendum
:error: must be a dictionary with three keys:
:class: class to instantiate (usually an exception)
:args: arguments to pass to class initialization
:kwargs: keyword arguments to pass to class initialization
:mocks: dictionary mapping mock symbols to corresponding values
Properties by the following names are reserved and should not be used:
* name
]
variable[props] assign[=] call[name[copy].deepcopy, parameter[name[properties]]]
variable[desc] assign[=] call[name[props].pop, parameter[constant[description], constant[None]]]
variable[caller_frame] assign[=] call[call[name[inspect].stack, parameter[]]][constant[1]]
variable[caller_file] assign[=] call[name[caller_frame]][constant[1]]
variable[caller_module] assign[=] call[name[inspect].getmodule, parameter[call[name[caller_frame]][constant[0]]]]
variable[my_uuid] assign[=] call[name[uuid].UUID, parameter[call[call[call[call[name[os].path.basename, parameter[name[caller_file]]].replace, parameter[constant[.py], constant[]]].rsplit, parameter[constant[_], constant[1]]]][<ast.UnaryOp object at 0x7da1b1615f60>]]]
variable[class_name] assign[=] call[name[_unique_class_name], parameter[name[namespace], name[my_uuid]]]
def function[description, parameter[self]]:
variable[_] assign[=] call[name[super], parameter[name[self].__class__, name[self]]].description
if compare[name[desc] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b1616290>
return[name[_]]
def function[__init__, parameter[self, context]]:
call[call[name[super], parameter[name[self].__class__, name[self]]].__init__, parameter[name[context]]]
variable[functions] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1615bd0>, <ast.Name object at 0x7da1b1614b50>]]] in starred[call[name[props].items, parameter[]]] begin[:]
if compare[name[name] equal[==] constant[error]] begin[:]
name[self].error assign[=] call[call[name[value]][constant[class]], parameter[<ast.Starred object at 0x7da1b16155a0>]]
continue
if call[name[inspect].isclass, parameter[name[value]]] begin[:]
if call[name[issubclass], parameter[name[value], name[Fixture]]] begin[:]
variable[value] assign[=] call[name[value], parameter[name[self].context]]
if call[name[inspect].isfunction, parameter[name[value]]] begin[:]
call[name[functions]][name[name]] assign[=] name[value]
continue
call[name[setattr], parameter[name[self], name[name], name[value]]]
call[name[_resolve_functions], parameter[name[functions], name[self]]]
call[name[self].initialize, parameter[]]
def function[setup, parameter[self]]:
if call[name[hasattr], parameter[name[self], constant[mocks]]] begin[:]
call[name[logger].debug, parameter[constant[self.mocks: %s], name[self].mocks]]
for taget[tuple[[<ast.Name object at 0x7da20e962e90>, <ast.Name object at 0x7da20e9625c0>]]] in starred[call[name[self].mocks.items, parameter[]]] begin[:]
if call[call[name[_find_mocker], parameter[name[mock_symbol], name[self].context]], parameter[]] begin[:]
call[name[_prepare_mock], parameter[name[self].context, name[mock_symbol]]]
call[call[name[super], parameter[name[self].__class__, name[self]]].setup, parameter[]]
call[name[namespace]][name[class_name]] assign[=] call[name[type], parameter[name[class_name], name[base_classes], dictionary[[<ast.Constant object at 0x7da1b16e1000>, <ast.Constant object at 0x7da1b16e2350>, <ast.Constant object at 0x7da1b16e1ff0>, <ast.Constant object at 0x7da1b16e01f0>, <ast.Constant object at 0x7da1b16e2e00>], [<ast.Name object at 0x7da1b16e30a0>, <ast.Name object at 0x7da1b16e3250>, <ast.Name object at 0x7da1b16e0af0>, <ast.Name object at 0x7da1b16e1690>, <ast.Name object at 0x7da1b16e2b30>]]]] | keyword[def] identifier[register] ( identifier[namespace] , identifier[base_classes] : identifier[Tuple] [ identifier[type] ], identifier[properties] : identifier[Dict] [ identifier[str] , identifier[Any] ])-> keyword[None] :
literal[string]
identifier[props] = identifier[copy] . identifier[deepcopy] ( identifier[properties] )
identifier[desc] = identifier[props] . identifier[pop] ( literal[string] , keyword[None] )
identifier[caller_frame] = identifier[inspect] . identifier[stack] ()[ literal[int] ]
identifier[caller_file] = identifier[caller_frame] [ literal[int] ]
identifier[caller_module] = identifier[inspect] . identifier[getmodule] ( identifier[caller_frame] [ literal[int] ])
identifier[my_uuid] = identifier[uuid] . identifier[UUID] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[caller_file] ). identifier[replace] ( literal[string] , literal[string] ). identifier[rsplit] ( literal[string] , literal[int] )[- literal[int] ])
identifier[class_name] = identifier[_unique_class_name] ( identifier[namespace] , identifier[my_uuid] )
@ identifier[property]
keyword[def] identifier[description] ( identifier[self] )-> identifier[str] :
identifier[_] = identifier[super] ( identifier[self] . identifier[__class__] , identifier[self] ). identifier[description]
keyword[if] identifier[desc] keyword[is] keyword[not] keyword[None] :
identifier[_] += literal[string] + identifier[desc]
keyword[return] identifier[_]
keyword[def] identifier[__init__] ( identifier[self] , identifier[context] : literal[string] )-> keyword[None] :
identifier[super] ( identifier[self] . identifier[__class__] , identifier[self] ). identifier[__init__] ( identifier[context] )
identifier[functions] ={}
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[props] . identifier[items] ():
keyword[if] identifier[name] == literal[string] :
identifier[self] . identifier[error] = identifier[value] [ literal[string] ](* identifier[value] . identifier[get] ( literal[string] ,()),** identifier[value] . identifier[get] ( literal[string] ,{}))
keyword[continue]
keyword[if] identifier[inspect] . identifier[isclass] ( identifier[value] ):
keyword[if] identifier[issubclass] ( identifier[value] , identifier[Fixture] ):
identifier[value] = identifier[value] ( identifier[self] . identifier[context] )
keyword[else] :
identifier[value] = identifier[value] ()
keyword[if] identifier[inspect] . identifier[isfunction] ( identifier[value] ):
identifier[functions] [ identifier[name] ]= identifier[value]
keyword[continue]
identifier[setattr] ( identifier[self] , identifier[name] , identifier[value] )
identifier[_resolve_functions] ( identifier[functions] , identifier[self] )
identifier[self] . identifier[initialize] ()
keyword[def] identifier[setup] ( identifier[self] )-> keyword[None] :
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[mocks] )
keyword[for] identifier[mock_symbol] , identifier[mock_result] keyword[in] identifier[self] . identifier[mocks] . identifier[items] ():
keyword[if] identifier[_find_mocker] ( identifier[mock_symbol] , identifier[self] . identifier[context] )():
identifier[_prepare_mock] ( identifier[self] . identifier[context] , identifier[mock_symbol] ,** identifier[mock_result] )
identifier[super] ( identifier[self] . identifier[__class__] , identifier[self] ). identifier[setup] ()
identifier[namespace] [ identifier[class_name] ]= identifier[type] ( identifier[class_name] , identifier[base_classes] ,{
literal[string] : identifier[description] ,
literal[string] : identifier[__init__] ,
literal[string] : identifier[caller_module] ,
literal[string] : identifier[setup] ,
literal[string] : identifier[my_uuid] ,
}) | def register(namespace, base_classes: Tuple[type], properties: Dict[str, Any]) -> None:
"""Register a Fixture class in namespace with the given properties.
Creates a Fixture class (not object) and inserts it into the provided
namespace. The properties is a dict but allows functions to reference other
properties and acts like a small DSL (domain specific language). This is
really just a declarative way to compose data about a test fixture and make
it repeatable.
Files calling this function are expected to house one or more Fixtures and
have a name that ends with a UUID without its hyphens. For example:
foo_38de9ceec5694c96ace90c9ca37e5bcb.py. This UUID is used to uniquely
track the Fixture through the test suite and allow Fixtures to scale without
concern.
**Parameters**
:``namespace``: dictionary to insert the generated class into
:``base_classes``: list of classes the new class should inherit
:``properties``: dictionary of properties with their values
Properties can have the following forms:
:functions: invoked with the Fixture as it's argument
:classes: instantiated without any arguments (unless it subclasses
``torment.fixtures.Fixture`` in which case it's passed context)
:literals: any standard python type (i.e. int, str, dict)
.. note::
function execution may error (this will be emitted as a logging event).
functions will continually be tried until they resolve or the same set
of functions is continually erroring. These functions that failed to
resolve are left in tact for later processing.
Properties by the following names also have defined behavior:
:description: added to the Fixture's description as an addendum
:error: must be a dictionary with three keys:
:class: class to instantiate (usually an exception)
:args: arguments to pass to class initialization
:kwargs: keyword arguments to pass to class initialization
:mocks: dictionary mapping mock symbols to corresponding values
Properties by the following names are reserved and should not be used:
* name
"""
# ensure we have a clean copy of the data
# and won't stomp on re-uses elsewhere in
# someone's code
props = copy.deepcopy(properties)
desc = props.pop('description', None) # type: Union[str, None]
caller_frame = inspect.stack()[1]
caller_file = caller_frame[1]
caller_module = inspect.getmodule(caller_frame[0])
my_uuid = uuid.UUID(os.path.basename(caller_file).replace('.py', '').rsplit('_', 1)[-1])
class_name = _unique_class_name(namespace, my_uuid)
@property
def description(self) -> str:
_ = super(self.__class__, self).description
if desc is not None:
_ += '—' + desc # depends on [control=['if'], data=['desc']]
return _
def __init__(self, context: 'torment.TestContext') -> None:
super(self.__class__, self).__init__(context)
functions = {}
for (name, value) in props.items():
if name == 'error':
self.error = value['class'](*value.get('args', ()), **value.get('kwargs', {}))
continue # depends on [control=['if'], data=[]]
if inspect.isclass(value):
if issubclass(value, Fixture):
value = value(self.context) # depends on [control=['if'], data=[]]
else:
value = value() # depends on [control=['if'], data=[]]
if inspect.isfunction(value):
functions[name] = value
continue # depends on [control=['if'], data=[]]
setattr(self, name, value) # depends on [control=['for'], data=[]]
_resolve_functions(functions, self)
self.initialize()
def setup(self) -> None:
if hasattr(self, 'mocks'):
logger.debug('self.mocks: %s', self.mocks)
for (mock_symbol, mock_result) in self.mocks.items():
if _find_mocker(mock_symbol, self.context)():
_prepare_mock(self.context, mock_symbol, **mock_result) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
super(self.__class__, self).setup()
namespace[class_name] = type(class_name, base_classes, {'description': description, '__init__': __init__, '__module__': caller_module, 'setup': setup, 'uuid': my_uuid}) |
def sync(self):
"""Retrieve areas from ElkM1"""
self.elk.send(ka_encode())
self.get_descriptions(TextDescriptions.KEYPAD.value) | def function[sync, parameter[self]]:
constant[Retrieve areas from ElkM1]
call[name[self].elk.send, parameter[call[name[ka_encode], parameter[]]]]
call[name[self].get_descriptions, parameter[name[TextDescriptions].KEYPAD.value]] | keyword[def] identifier[sync] ( identifier[self] ):
literal[string]
identifier[self] . identifier[elk] . identifier[send] ( identifier[ka_encode] ())
identifier[self] . identifier[get_descriptions] ( identifier[TextDescriptions] . identifier[KEYPAD] . identifier[value] ) | def sync(self):
"""Retrieve areas from ElkM1"""
self.elk.send(ka_encode())
self.get_descriptions(TextDescriptions.KEYPAD.value) |
def move_to(self, r):
'''Translate the molecule to a new position *r*.
'''
dx = r - self.r_array[0]
self.r_array += dx | def function[move_to, parameter[self, r]]:
constant[Translate the molecule to a new position *r*.
]
variable[dx] assign[=] binary_operation[name[r] - call[name[self].r_array][constant[0]]]
<ast.AugAssign object at 0x7da207f00430> | keyword[def] identifier[move_to] ( identifier[self] , identifier[r] ):
literal[string]
identifier[dx] = identifier[r] - identifier[self] . identifier[r_array] [ literal[int] ]
identifier[self] . identifier[r_array] += identifier[dx] | def move_to(self, r):
"""Translate the molecule to a new position *r*.
"""
dx = r - self.r_array[0]
self.r_array += dx |
def logger(self):
''' Lazy logger '''
if self.__logger is None:
self.__logger = logging.getLogger(self.__name)
return self.__logger | def function[logger, parameter[self]]:
constant[ Lazy logger ]
if compare[name[self].__logger is constant[None]] begin[:]
name[self].__logger assign[=] call[name[logging].getLogger, parameter[name[self].__name]]
return[name[self].__logger] | keyword[def] identifier[logger] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[__logger] keyword[is] keyword[None] :
identifier[self] . identifier[__logger] = identifier[logging] . identifier[getLogger] ( identifier[self] . identifier[__name] )
keyword[return] identifier[self] . identifier[__logger] | def logger(self):
""" Lazy logger """
if self.__logger is None:
self.__logger = logging.getLogger(self.__name) # depends on [control=['if'], data=[]]
return self.__logger |
def video_l1_top(body_output, targets, model_hparams, vocab_size):
"""Top transformation for video."""
del targets, vocab_size # unused arg
num_channels = model_hparams.problem.num_channels
num_frames = model_hparams.video_num_target_frames
with tf.variable_scope("rgb"):
body_output_shape = common_layers.shape_list(body_output)
res = tf.layers.dense(body_output, num_channels * num_frames, name="cast")
res = tf.reshape(res, body_output_shape[:3] + [num_channels, num_frames])
res = tf.transpose(res, [0, 4, 1, 2, 3]) # Move frames next to batch.
if not tf.get_variable_scope().reuse:
res_argmax = res[:, -1, :, :, :]
tf.summary.image(
"result",
common_layers.tpu_safe_image_summary(res_argmax),
max_outputs=1)
return tf.expand_dims(res, axis=-1) | def function[video_l1_top, parameter[body_output, targets, model_hparams, vocab_size]]:
constant[Top transformation for video.]
<ast.Delete object at 0x7da207f015d0>
variable[num_channels] assign[=] name[model_hparams].problem.num_channels
variable[num_frames] assign[=] name[model_hparams].video_num_target_frames
with call[name[tf].variable_scope, parameter[constant[rgb]]] begin[:]
variable[body_output_shape] assign[=] call[name[common_layers].shape_list, parameter[name[body_output]]]
variable[res] assign[=] call[name[tf].layers.dense, parameter[name[body_output], binary_operation[name[num_channels] * name[num_frames]]]]
variable[res] assign[=] call[name[tf].reshape, parameter[name[res], binary_operation[call[name[body_output_shape]][<ast.Slice object at 0x7da207f03b50>] + list[[<ast.Name object at 0x7da207f01450>, <ast.Name object at 0x7da207f013c0>]]]]]
variable[res] assign[=] call[name[tf].transpose, parameter[name[res], list[[<ast.Constant object at 0x7da207f02b00>, <ast.Constant object at 0x7da207f03310>, <ast.Constant object at 0x7da207f03cd0>, <ast.Constant object at 0x7da207f01000>, <ast.Constant object at 0x7da207f01030>]]]]
if <ast.UnaryOp object at 0x7da207f00280> begin[:]
variable[res_argmax] assign[=] call[name[res]][tuple[[<ast.Slice object at 0x7da207f02a10>, <ast.UnaryOp object at 0x7da207f039a0>, <ast.Slice object at 0x7da207f01690>, <ast.Slice object at 0x7da207f008b0>, <ast.Slice object at 0x7da207f03fa0>]]]
call[name[tf].summary.image, parameter[constant[result], call[name[common_layers].tpu_safe_image_summary, parameter[name[res_argmax]]]]]
return[call[name[tf].expand_dims, parameter[name[res]]]] | keyword[def] identifier[video_l1_top] ( identifier[body_output] , identifier[targets] , identifier[model_hparams] , identifier[vocab_size] ):
literal[string]
keyword[del] identifier[targets] , identifier[vocab_size]
identifier[num_channels] = identifier[model_hparams] . identifier[problem] . identifier[num_channels]
identifier[num_frames] = identifier[model_hparams] . identifier[video_num_target_frames]
keyword[with] identifier[tf] . identifier[variable_scope] ( literal[string] ):
identifier[body_output_shape] = identifier[common_layers] . identifier[shape_list] ( identifier[body_output] )
identifier[res] = identifier[tf] . identifier[layers] . identifier[dense] ( identifier[body_output] , identifier[num_channels] * identifier[num_frames] , identifier[name] = literal[string] )
identifier[res] = identifier[tf] . identifier[reshape] ( identifier[res] , identifier[body_output_shape] [: literal[int] ]+[ identifier[num_channels] , identifier[num_frames] ])
identifier[res] = identifier[tf] . identifier[transpose] ( identifier[res] ,[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ])
keyword[if] keyword[not] identifier[tf] . identifier[get_variable_scope] (). identifier[reuse] :
identifier[res_argmax] = identifier[res] [:,- literal[int] ,:,:,:]
identifier[tf] . identifier[summary] . identifier[image] (
literal[string] ,
identifier[common_layers] . identifier[tpu_safe_image_summary] ( identifier[res_argmax] ),
identifier[max_outputs] = literal[int] )
keyword[return] identifier[tf] . identifier[expand_dims] ( identifier[res] , identifier[axis] =- literal[int] ) | def video_l1_top(body_output, targets, model_hparams, vocab_size):
"""Top transformation for video."""
del targets, vocab_size # unused arg
num_channels = model_hparams.problem.num_channels
num_frames = model_hparams.video_num_target_frames
with tf.variable_scope('rgb'):
body_output_shape = common_layers.shape_list(body_output)
res = tf.layers.dense(body_output, num_channels * num_frames, name='cast')
res = tf.reshape(res, body_output_shape[:3] + [num_channels, num_frames])
res = tf.transpose(res, [0, 4, 1, 2, 3]) # Move frames next to batch.
if not tf.get_variable_scope().reuse:
res_argmax = res[:, -1, :, :, :]
tf.summary.image('result', common_layers.tpu_safe_image_summary(res_argmax), max_outputs=1) # depends on [control=['if'], data=[]]
return tf.expand_dims(res, axis=-1) # depends on [control=['with'], data=[]] |
def rot_consts(geom, masses, units=_EURC.INV_INERTIA, on_tol=_DEF.ORTHONORM_TOL):
"""Rotational constants for a given molecular system.
Calculates the rotational constants for the provided system with numerical
value given in the units provided in `units`. The orthnormality tolerance
`on_tol` is required in order to be passed through to the
:func:`principals` function.
If the system is linear or a single atom, the effectively-zero principal
moments of inertia will be assigned values of
:data:`opan.const.PRM.ZERO_MOMENT_TOL`
before transformation into the appropriate rotational constant units.
The moments of inertia are always sorted in increasing order as
:math:`0 \\leq I_A \\leq I_B \\leq I_C`; the rotational constants
calculated from these will thus always be in **decreasing** order
as :math:`B_A \\geq B_B \\geq B_C`, retaining the
ordering and association with the three principal ``axes[:,i]`` generated
by :func:`principals`.
Parameters
----------
geom
length-3N |npfloat_| --
Coordinates of the atoms
masses
length-N OR length-3N |npfloat_| --
Atomic masses of the atoms. Length-3N option is to allow calculation of
a per-coordinate perturbed value.
units
:class:`~opan.const.EnumUnitsRotConst`, optional --
Enum value indicating the desired units of the output rotational
constants. Default is :data:`~opan.const.EnumUnitsRotConst.INV_INERTIA`
:math:`\\left(1\\over \\mathrm{uB^2}\\right)`
on_tol
|npfloat_|, optional --
Tolerance for deviation from unity/zero for principal axis dot
products, within which axes are considered orthonormal. Default is
:data:`opan.const.DEF.ORTHONORM_TOL`
Returns
-------
rc
length-3 |npfloat_| --
Vector of rotational constants in the indicated units
"""
# Imports
import numpy as np
from ..const import EnumTopType as ETT, EnumUnitsRotConst as EURC, PRM, PHYS
# Ensure units are valid
if not units in EURC:
raise ValueError("'{0}' is not a valid units value".format(units))
## end if
# Retrieve the moments, axes and top type. Geom and masses are proofed
# internally in this call.
mom, ax, top = principals(geom, masses, on_tol)
# Check for special cases
if top == ETT.ATOM:
# All moments are zero; set to zero-moment threshold
mom = np.repeat(PRM.ZERO_MOMENT_TOL, 3)
elif top == ETT.LINEAR:
# First moment is zero; set to zero-moment threshold
mom[0] = PRM.ZERO_MOMENT_TOL
## end if
# Calculate the values in the indicated units
if units == EURC.INV_INERTIA: # 1/(amu*B^2)
rc = 1.0 / (2.0 * mom)
elif units == EURC.ANGFREQ_ATOMIC: # 1/Ta
rc = PHYS.PLANCK_BAR / (2.0 * mom * PHYS.ME_PER_AMU)
elif units == EURC.ANGFREQ_SECS: # 1/s
rc = PHYS.PLANCK_BAR / (2.0 * mom * PHYS.ME_PER_AMU) / PHYS.SEC_PER_TA
elif units == EURC.CYCFREQ_ATOMIC: # cyc/Ta
rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU)
elif units == EURC.CYCFREQ_HZ: # cyc/s
rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU) / \
PHYS.SEC_PER_TA
elif units == EURC.CYCFREQ_MHZ: # Mcyc/s
rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU) / \
PHYS.SEC_PER_TA / 1.0e6
elif units == EURC.WAVENUM_ATOMIC: # cyc/B
rc = PHYS.PLANCK / (mom * PHYS.ME_PER_AMU) / \
(8.0 * np.pi**2.0 * PHYS.LIGHT_SPEED)
elif units == EURC.WAVENUM_CM: # cyc/cm
rc = PHYS.PLANCK / (mom * PHYS.ME_PER_AMU) / \
(8.0 * np.pi**2.0 * PHYS.LIGHT_SPEED * PHYS.ANG_PER_BOHR) * 1.0e8
else: # pragma: no cover -- Valid units; not implemented
raise NotImplementedError("Units conversion not yet implemented.")
## end if
# Return the result
return rc | def function[rot_consts, parameter[geom, masses, units, on_tol]]:
constant[Rotational constants for a given molecular system.
Calculates the rotational constants for the provided system with numerical
value given in the units provided in `units`. The orthnormality tolerance
`on_tol` is required in order to be passed through to the
:func:`principals` function.
If the system is linear or a single atom, the effectively-zero principal
moments of inertia will be assigned values of
:data:`opan.const.PRM.ZERO_MOMENT_TOL`
before transformation into the appropriate rotational constant units.
The moments of inertia are always sorted in increasing order as
:math:`0 \leq I_A \leq I_B \leq I_C`; the rotational constants
calculated from these will thus always be in **decreasing** order
as :math:`B_A \geq B_B \geq B_C`, retaining the
ordering and association with the three principal ``axes[:,i]`` generated
by :func:`principals`.
Parameters
----------
geom
length-3N |npfloat_| --
Coordinates of the atoms
masses
length-N OR length-3N |npfloat_| --
Atomic masses of the atoms. Length-3N option is to allow calculation of
a per-coordinate perturbed value.
units
:class:`~opan.const.EnumUnitsRotConst`, optional --
Enum value indicating the desired units of the output rotational
constants. Default is :data:`~opan.const.EnumUnitsRotConst.INV_INERTIA`
:math:`\left(1\over \mathrm{uB^2}\right)`
on_tol
|npfloat_|, optional --
Tolerance for deviation from unity/zero for principal axis dot
products, within which axes are considered orthonormal. Default is
:data:`opan.const.DEF.ORTHONORM_TOL`
Returns
-------
rc
length-3 |npfloat_| --
Vector of rotational constants in the indicated units
]
import module[numpy] as alias[np]
from relative_module[const] import module[EnumTopType], module[EnumUnitsRotConst], module[PRM], module[PHYS]
if <ast.UnaryOp object at 0x7da18bc73280> begin[:]
<ast.Raise object at 0x7da18bc73730>
<ast.Tuple object at 0x7da18bc73850> assign[=] call[name[principals], parameter[name[geom], name[masses], name[on_tol]]]
if compare[name[top] equal[==] name[ETT].ATOM] begin[:]
variable[mom] assign[=] call[name[np].repeat, parameter[name[PRM].ZERO_MOMENT_TOL, constant[3]]]
if compare[name[units] equal[==] name[EURC].INV_INERTIA] begin[:]
variable[rc] assign[=] binary_operation[constant[1.0] / binary_operation[constant[2.0] * name[mom]]]
return[name[rc]] | keyword[def] identifier[rot_consts] ( identifier[geom] , identifier[masses] , identifier[units] = identifier[_EURC] . identifier[INV_INERTIA] , identifier[on_tol] = identifier[_DEF] . identifier[ORTHONORM_TOL] ):
literal[string]
keyword[import] identifier[numpy] keyword[as] identifier[np]
keyword[from] .. identifier[const] keyword[import] identifier[EnumTopType] keyword[as] identifier[ETT] , identifier[EnumUnitsRotConst] keyword[as] identifier[EURC] , identifier[PRM] , identifier[PHYS]
keyword[if] keyword[not] identifier[units] keyword[in] identifier[EURC] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[units] ))
identifier[mom] , identifier[ax] , identifier[top] = identifier[principals] ( identifier[geom] , identifier[masses] , identifier[on_tol] )
keyword[if] identifier[top] == identifier[ETT] . identifier[ATOM] :
identifier[mom] = identifier[np] . identifier[repeat] ( identifier[PRM] . identifier[ZERO_MOMENT_TOL] , literal[int] )
keyword[elif] identifier[top] == identifier[ETT] . identifier[LINEAR] :
identifier[mom] [ literal[int] ]= identifier[PRM] . identifier[ZERO_MOMENT_TOL]
keyword[if] identifier[units] == identifier[EURC] . identifier[INV_INERTIA] :
identifier[rc] = literal[int] /( literal[int] * identifier[mom] )
keyword[elif] identifier[units] == identifier[EURC] . identifier[ANGFREQ_ATOMIC] :
identifier[rc] = identifier[PHYS] . identifier[PLANCK_BAR] /( literal[int] * identifier[mom] * identifier[PHYS] . identifier[ME_PER_AMU] )
keyword[elif] identifier[units] == identifier[EURC] . identifier[ANGFREQ_SECS] :
identifier[rc] = identifier[PHYS] . identifier[PLANCK_BAR] /( literal[int] * identifier[mom] * identifier[PHYS] . identifier[ME_PER_AMU] )/ identifier[PHYS] . identifier[SEC_PER_TA]
keyword[elif] identifier[units] == identifier[EURC] . identifier[CYCFREQ_ATOMIC] :
identifier[rc] = identifier[PHYS] . identifier[PLANCK_BAR] /( literal[int] * identifier[np] . identifier[pi] * identifier[mom] * identifier[PHYS] . identifier[ME_PER_AMU] )
keyword[elif] identifier[units] == identifier[EURC] . identifier[CYCFREQ_HZ] :
identifier[rc] = identifier[PHYS] . identifier[PLANCK_BAR] /( literal[int] * identifier[np] . identifier[pi] * identifier[mom] * identifier[PHYS] . identifier[ME_PER_AMU] )/ identifier[PHYS] . identifier[SEC_PER_TA]
keyword[elif] identifier[units] == identifier[EURC] . identifier[CYCFREQ_MHZ] :
identifier[rc] = identifier[PHYS] . identifier[PLANCK_BAR] /( literal[int] * identifier[np] . identifier[pi] * identifier[mom] * identifier[PHYS] . identifier[ME_PER_AMU] )/ identifier[PHYS] . identifier[SEC_PER_TA] / literal[int]
keyword[elif] identifier[units] == identifier[EURC] . identifier[WAVENUM_ATOMIC] :
identifier[rc] = identifier[PHYS] . identifier[PLANCK] /( identifier[mom] * identifier[PHYS] . identifier[ME_PER_AMU] )/( literal[int] * identifier[np] . identifier[pi] ** literal[int] * identifier[PHYS] . identifier[LIGHT_SPEED] )
keyword[elif] identifier[units] == identifier[EURC] . identifier[WAVENUM_CM] :
identifier[rc] = identifier[PHYS] . identifier[PLANCK] /( identifier[mom] * identifier[PHYS] . identifier[ME_PER_AMU] )/( literal[int] * identifier[np] . identifier[pi] ** literal[int] * identifier[PHYS] . identifier[LIGHT_SPEED] * identifier[PHYS] . identifier[ANG_PER_BOHR] )* literal[int]
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
keyword[return] identifier[rc] | def rot_consts(geom, masses, units=_EURC.INV_INERTIA, on_tol=_DEF.ORTHONORM_TOL):
"""Rotational constants for a given molecular system.
Calculates the rotational constants for the provided system with numerical
value given in the units provided in `units`. The orthnormality tolerance
`on_tol` is required in order to be passed through to the
:func:`principals` function.
If the system is linear or a single atom, the effectively-zero principal
moments of inertia will be assigned values of
:data:`opan.const.PRM.ZERO_MOMENT_TOL`
before transformation into the appropriate rotational constant units.
The moments of inertia are always sorted in increasing order as
:math:`0 \\leq I_A \\leq I_B \\leq I_C`; the rotational constants
calculated from these will thus always be in **decreasing** order
as :math:`B_A \\geq B_B \\geq B_C`, retaining the
ordering and association with the three principal ``axes[:,i]`` generated
by :func:`principals`.
Parameters
----------
geom
length-3N |npfloat_| --
Coordinates of the atoms
masses
length-N OR length-3N |npfloat_| --
Atomic masses of the atoms. Length-3N option is to allow calculation of
a per-coordinate perturbed value.
units
:class:`~opan.const.EnumUnitsRotConst`, optional --
Enum value indicating the desired units of the output rotational
constants. Default is :data:`~opan.const.EnumUnitsRotConst.INV_INERTIA`
:math:`\\left(1\\over \\mathrm{uB^2}\\right)`
on_tol
|npfloat_|, optional --
Tolerance for deviation from unity/zero for principal axis dot
products, within which axes are considered orthonormal. Default is
:data:`opan.const.DEF.ORTHONORM_TOL`
Returns
-------
rc
length-3 |npfloat_| --
Vector of rotational constants in the indicated units
"""
# Imports
import numpy as np
from ..const import EnumTopType as ETT, EnumUnitsRotConst as EURC, PRM, PHYS
# Ensure units are valid
if not units in EURC:
raise ValueError("'{0}' is not a valid units value".format(units)) # depends on [control=['if'], data=[]]
## end if
# Retrieve the moments, axes and top type. Geom and masses are proofed
# internally in this call.
(mom, ax, top) = principals(geom, masses, on_tol)
# Check for special cases
if top == ETT.ATOM:
# All moments are zero; set to zero-moment threshold
mom = np.repeat(PRM.ZERO_MOMENT_TOL, 3) # depends on [control=['if'], data=[]]
elif top == ETT.LINEAR:
# First moment is zero; set to zero-moment threshold
mom[0] = PRM.ZERO_MOMENT_TOL # depends on [control=['if'], data=[]]
## end if
# Calculate the values in the indicated units
if units == EURC.INV_INERTIA: # 1/(amu*B^2)
rc = 1.0 / (2.0 * mom) # depends on [control=['if'], data=[]]
elif units == EURC.ANGFREQ_ATOMIC: # 1/Ta
rc = PHYS.PLANCK_BAR / (2.0 * mom * PHYS.ME_PER_AMU) # depends on [control=['if'], data=[]]
elif units == EURC.ANGFREQ_SECS: # 1/s
rc = PHYS.PLANCK_BAR / (2.0 * mom * PHYS.ME_PER_AMU) / PHYS.SEC_PER_TA # depends on [control=['if'], data=[]]
elif units == EURC.CYCFREQ_ATOMIC: # cyc/Ta
rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU) # depends on [control=['if'], data=[]]
elif units == EURC.CYCFREQ_HZ: # cyc/s
rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU) / PHYS.SEC_PER_TA # depends on [control=['if'], data=[]]
elif units == EURC.CYCFREQ_MHZ: # Mcyc/s
rc = PHYS.PLANCK_BAR / (4.0 * np.pi * mom * PHYS.ME_PER_AMU) / PHYS.SEC_PER_TA / 1000000.0 # depends on [control=['if'], data=[]]
elif units == EURC.WAVENUM_ATOMIC: # cyc/B
rc = PHYS.PLANCK / (mom * PHYS.ME_PER_AMU) / (8.0 * np.pi ** 2.0 * PHYS.LIGHT_SPEED) # depends on [control=['if'], data=[]]
elif units == EURC.WAVENUM_CM: # cyc/cm
rc = PHYS.PLANCK / (mom * PHYS.ME_PER_AMU) / (8.0 * np.pi ** 2.0 * PHYS.LIGHT_SPEED * PHYS.ANG_PER_BOHR) * 100000000.0 # depends on [control=['if'], data=[]]
else: # pragma: no cover -- Valid units; not implemented
raise NotImplementedError('Units conversion not yet implemented.')
## end if
# Return the result
return rc |
def _add_flaky_report(self, stream):
"""
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
"""
value = self._stream.getvalue()
# If everything succeeded and --no-success-flaky-report is specified
# don't print anything.
if not self._flaky_success_report and not value:
return
stream.write('===Flaky Test Report===\n\n')
# Python 2 will write to the stderr stream as a byte string, whereas
# Python 3 will write to the stream as text. Only encode into a byte
# string if the write tries to encode it first and raises a
# UnicodeEncodeError.
try:
stream.write(value)
except UnicodeEncodeError:
stream.write(value.encode('utf-8', 'replace'))
stream.write('\n===End Flaky Test Report===\n') | def function[_add_flaky_report, parameter[self, stream]]:
constant[
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
]
variable[value] assign[=] call[name[self]._stream.getvalue, parameter[]]
if <ast.BoolOp object at 0x7da1b07f6f50> begin[:]
return[None]
call[name[stream].write, parameter[constant[===Flaky Test Report===
]]]
<ast.Try object at 0x7da1b07f5630>
call[name[stream].write, parameter[constant[
===End Flaky Test Report===
]]] | keyword[def] identifier[_add_flaky_report] ( identifier[self] , identifier[stream] ):
literal[string]
identifier[value] = identifier[self] . identifier[_stream] . identifier[getvalue] ()
keyword[if] keyword[not] identifier[self] . identifier[_flaky_success_report] keyword[and] keyword[not] identifier[value] :
keyword[return]
identifier[stream] . identifier[write] ( literal[string] )
keyword[try] :
identifier[stream] . identifier[write] ( identifier[value] )
keyword[except] identifier[UnicodeEncodeError] :
identifier[stream] . identifier[write] ( identifier[value] . identifier[encode] ( literal[string] , literal[string] ))
identifier[stream] . identifier[write] ( literal[string] ) | def _add_flaky_report(self, stream):
"""
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
"""
value = self._stream.getvalue()
# If everything succeeded and --no-success-flaky-report is specified
# don't print anything.
if not self._flaky_success_report and (not value):
return # depends on [control=['if'], data=[]]
stream.write('===Flaky Test Report===\n\n')
# Python 2 will write to the stderr stream as a byte string, whereas
# Python 3 will write to the stream as text. Only encode into a byte
# string if the write tries to encode it first and raises a
# UnicodeEncodeError.
try:
stream.write(value) # depends on [control=['try'], data=[]]
except UnicodeEncodeError:
stream.write(value.encode('utf-8', 'replace')) # depends on [control=['except'], data=[]]
stream.write('\n===End Flaky Test Report===\n') |
def solar_noon(self, date=None, local=True):
"""Calculates the solar noon (the time when the sun is at its highest
point.)
:param date: The date for which to calculate the noon time.
If no date is specified then the current date will be used.
:type date: :class:`~datetime.date`
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:type local: bool
:returns: The date and time at which the solar noon occurs.
:rtype: :class:`~datetime.datetime`
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if self.astral is None:
self.astral = Astral()
if date is None:
date = datetime.date.today()
noon = self.astral.solar_noon_utc(date, self.longitude)
if local:
return noon.astimezone(self.tz)
else:
return noon | def function[solar_noon, parameter[self, date, local]]:
constant[Calculates the solar noon (the time when the sun is at its highest
point.)
:param date: The date for which to calculate the noon time.
If no date is specified then the current date will be used.
:type date: :class:`~datetime.date`
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:type local: bool
:returns: The date and time at which the solar noon occurs.
:rtype: :class:`~datetime.datetime`
]
if <ast.BoolOp object at 0x7da20c7cb760> begin[:]
<ast.Raise object at 0x7da20c7ca8f0>
if compare[name[self].astral is constant[None]] begin[:]
name[self].astral assign[=] call[name[Astral], parameter[]]
if compare[name[date] is constant[None]] begin[:]
variable[date] assign[=] call[name[datetime].date.today, parameter[]]
variable[noon] assign[=] call[name[self].astral.solar_noon_utc, parameter[name[date], name[self].longitude]]
if name[local] begin[:]
return[call[name[noon].astimezone, parameter[name[self].tz]]] | keyword[def] identifier[solar_noon] ( identifier[self] , identifier[date] = keyword[None] , identifier[local] = keyword[True] ):
literal[string]
keyword[if] identifier[local] keyword[and] identifier[self] . identifier[timezone] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[astral] keyword[is] keyword[None] :
identifier[self] . identifier[astral] = identifier[Astral] ()
keyword[if] identifier[date] keyword[is] keyword[None] :
identifier[date] = identifier[datetime] . identifier[date] . identifier[today] ()
identifier[noon] = identifier[self] . identifier[astral] . identifier[solar_noon_utc] ( identifier[date] , identifier[self] . identifier[longitude] )
keyword[if] identifier[local] :
keyword[return] identifier[noon] . identifier[astimezone] ( identifier[self] . identifier[tz] )
keyword[else] :
keyword[return] identifier[noon] | def solar_noon(self, date=None, local=True):
"""Calculates the solar noon (the time when the sun is at its highest
point.)
:param date: The date for which to calculate the noon time.
If no date is specified then the current date will be used.
:type date: :class:`~datetime.date`
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:type local: bool
:returns: The date and time at which the solar noon occurs.
:rtype: :class:`~datetime.datetime`
"""
if local and self.timezone is None:
raise ValueError('Local time requested but Location has no timezone set.') # depends on [control=['if'], data=[]]
if self.astral is None:
self.astral = Astral() # depends on [control=['if'], data=[]]
if date is None:
date = datetime.date.today() # depends on [control=['if'], data=['date']]
noon = self.astral.solar_noon_utc(date, self.longitude)
if local:
return noon.astimezone(self.tz) # depends on [control=['if'], data=[]]
else:
return noon |
def set_style(self, style):
""" Sets the style to the specified Pygments style.
"""
style = SolarizedStyle # get_style_by_name(style)
self._style = style
self._clear_caches() | def function[set_style, parameter[self, style]]:
constant[ Sets the style to the specified Pygments style.
]
variable[style] assign[=] name[SolarizedStyle]
name[self]._style assign[=] name[style]
call[name[self]._clear_caches, parameter[]] | keyword[def] identifier[set_style] ( identifier[self] , identifier[style] ):
literal[string]
identifier[style] = identifier[SolarizedStyle]
identifier[self] . identifier[_style] = identifier[style]
identifier[self] . identifier[_clear_caches] () | def set_style(self, style):
""" Sets the style to the specified Pygments style.
"""
style = SolarizedStyle # get_style_by_name(style)
self._style = style
self._clear_caches() |
def hypercube(number_of_samples, variables):
"""
This implements Latin Hypercube Sampling.
See https://mathieu.fenniak.net/latin-hypercube-sampling/ for intuitive explanation of what it is
:param number_of_samples: number of segments/samples
:param variables: initial parameters and conditions (list of ranges, i.e. (70, 110), (0.1, 0.5) ..)
:return:
"""
number_of_dimensions = len(variables)
# Split range 0-1 into `nSeg` segments of equal size
segment_ranges = []
for i in range(number_of_samples):
ratio = 1.0 / number_of_samples
segment_ranges.append((ratio * i, ratio * (i + 1)))
x = []
for i in range(number_of_dimensions):
values = []
for j, segment in enumerate(segment_ranges):
# Set values[j] to a random value within the appropriate segment
random_element = random.random()
value = (random_element * (segment[1] - segment[0])) + (segment[0])
values.append(value)
# TODO: replace the below line with random.shuffle(values) (no need values= in front)
# this breaks regression tests as the values are shuffled in different order
values = random.sample(values, len(values))
x.append(values)
# at this point x is a list of lists containing a randomly-ordered list of random values
# in each of the `possvalues` segments
samples = []
for i in range(len(segment_ranges)):
sample = [y[i] for y in x]
samples.append(sample)
# It looks like `samples` is just transposed version of `x`, i.e. `samples[i][j] = x[j][i]`
for sample in samples:
for i, variable in enumerate(variables):
# if no range given for parameter/variable
if variable[1] == variable[0]:
# just return the whatever constant was given
sample[i] = variable[1]
else:
# return the value indicated by random number in sample[i] that is within that range
sample[i] = (sample[i] * (variable[1] - variable[0])) + variable[0]
return samples | def function[hypercube, parameter[number_of_samples, variables]]:
constant[
This implements Latin Hypercube Sampling.
See https://mathieu.fenniak.net/latin-hypercube-sampling/ for intuitive explanation of what it is
:param number_of_samples: number of segments/samples
:param variables: initial parameters and conditions (list of ranges, i.e. (70, 110), (0.1, 0.5) ..)
:return:
]
variable[number_of_dimensions] assign[=] call[name[len], parameter[name[variables]]]
variable[segment_ranges] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[name[number_of_samples]]]] begin[:]
variable[ratio] assign[=] binary_operation[constant[1.0] / name[number_of_samples]]
call[name[segment_ranges].append, parameter[tuple[[<ast.BinOp object at 0x7da1b0a72ec0>, <ast.BinOp object at 0x7da1b0a70520>]]]]
variable[x] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[name[number_of_dimensions]]]] begin[:]
variable[values] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0a71630>, <ast.Name object at 0x7da1b0a70250>]]] in starred[call[name[enumerate], parameter[name[segment_ranges]]]] begin[:]
variable[random_element] assign[=] call[name[random].random, parameter[]]
variable[value] assign[=] binary_operation[binary_operation[name[random_element] * binary_operation[call[name[segment]][constant[1]] - call[name[segment]][constant[0]]]] + call[name[segment]][constant[0]]]
call[name[values].append, parameter[name[value]]]
variable[values] assign[=] call[name[random].sample, parameter[name[values], call[name[len], parameter[name[values]]]]]
call[name[x].append, parameter[name[values]]]
variable[samples] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[segment_ranges]]]]]] begin[:]
variable[sample] assign[=] <ast.ListComp object at 0x7da1b0a66140>
call[name[samples].append, parameter[name[sample]]]
for taget[name[sample]] in starred[name[samples]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b09700d0>, <ast.Name object at 0x7da1b0972560>]]] in starred[call[name[enumerate], parameter[name[variables]]]] begin[:]
if compare[call[name[variable]][constant[1]] equal[==] call[name[variable]][constant[0]]] begin[:]
call[name[sample]][name[i]] assign[=] call[name[variable]][constant[1]]
return[name[samples]] | keyword[def] identifier[hypercube] ( identifier[number_of_samples] , identifier[variables] ):
literal[string]
identifier[number_of_dimensions] = identifier[len] ( identifier[variables] )
identifier[segment_ranges] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[number_of_samples] ):
identifier[ratio] = literal[int] / identifier[number_of_samples]
identifier[segment_ranges] . identifier[append] (( identifier[ratio] * identifier[i] , identifier[ratio] *( identifier[i] + literal[int] )))
identifier[x] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[number_of_dimensions] ):
identifier[values] =[]
keyword[for] identifier[j] , identifier[segment] keyword[in] identifier[enumerate] ( identifier[segment_ranges] ):
identifier[random_element] = identifier[random] . identifier[random] ()
identifier[value] =( identifier[random_element] *( identifier[segment] [ literal[int] ]- identifier[segment] [ literal[int] ]))+( identifier[segment] [ literal[int] ])
identifier[values] . identifier[append] ( identifier[value] )
identifier[values] = identifier[random] . identifier[sample] ( identifier[values] , identifier[len] ( identifier[values] ))
identifier[x] . identifier[append] ( identifier[values] )
identifier[samples] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[segment_ranges] )):
identifier[sample] =[ identifier[y] [ identifier[i] ] keyword[for] identifier[y] keyword[in] identifier[x] ]
identifier[samples] . identifier[append] ( identifier[sample] )
keyword[for] identifier[sample] keyword[in] identifier[samples] :
keyword[for] identifier[i] , identifier[variable] keyword[in] identifier[enumerate] ( identifier[variables] ):
keyword[if] identifier[variable] [ literal[int] ]== identifier[variable] [ literal[int] ]:
identifier[sample] [ identifier[i] ]= identifier[variable] [ literal[int] ]
keyword[else] :
identifier[sample] [ identifier[i] ]=( identifier[sample] [ identifier[i] ]*( identifier[variable] [ literal[int] ]- identifier[variable] [ literal[int] ]))+ identifier[variable] [ literal[int] ]
keyword[return] identifier[samples] | def hypercube(number_of_samples, variables):
"""
This implements Latin Hypercube Sampling.
See https://mathieu.fenniak.net/latin-hypercube-sampling/ for intuitive explanation of what it is
:param number_of_samples: number of segments/samples
:param variables: initial parameters and conditions (list of ranges, i.e. (70, 110), (0.1, 0.5) ..)
:return:
"""
number_of_dimensions = len(variables)
# Split range 0-1 into `nSeg` segments of equal size
segment_ranges = []
for i in range(number_of_samples):
ratio = 1.0 / number_of_samples
segment_ranges.append((ratio * i, ratio * (i + 1))) # depends on [control=['for'], data=['i']]
x = []
for i in range(number_of_dimensions):
values = []
for (j, segment) in enumerate(segment_ranges):
# Set values[j] to a random value within the appropriate segment
random_element = random.random()
value = random_element * (segment[1] - segment[0]) + segment[0]
values.append(value) # depends on [control=['for'], data=[]]
# TODO: replace the below line with random.shuffle(values) (no need values= in front)
# this breaks regression tests as the values are shuffled in different order
values = random.sample(values, len(values))
x.append(values) # depends on [control=['for'], data=[]]
# at this point x is a list of lists containing a randomly-ordered list of random values
# in each of the `possvalues` segments
samples = []
for i in range(len(segment_ranges)):
sample = [y[i] for y in x]
samples.append(sample) # depends on [control=['for'], data=['i']]
# It looks like `samples` is just transposed version of `x`, i.e. `samples[i][j] = x[j][i]`
for sample in samples:
for (i, variable) in enumerate(variables):
# if no range given for parameter/variable
if variable[1] == variable[0]:
# just return the whatever constant was given
sample[i] = variable[1] # depends on [control=['if'], data=[]]
else:
# return the value indicated by random number in sample[i] that is within that range
sample[i] = sample[i] * (variable[1] - variable[0]) + variable[0] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['sample']]
return samples |
def structured_sgd(sgd):
r"""
Allow an SGD to accept nested sequences of Parameters to optimize.
This decorator can intepret the :code:`Parameter` objects in `btypes.py`,
and can accept nested sequences of *any* structure of these objects to
optimise!
It can also optionally evaluate *random starts* (i.e. random starting
candidates) if the parameter objects have been initialised with
distributions. For this, an additional parameter is exposed in the
:code:`minimizer` interface.
Parameters
----------
fun : callable
objective function that takes in arbitrary ndarrays, floats or nested
sequences of these.
parameters : (nested) sequences of Parameter objects
Initial guess of the parameters of the objective function
nstarts : int, optional
The number random starting candidates for optimisation to evaluate.
This will only happen for :code:`nstarts > 0` and if at least one
:code:`Parameter` object is random.
Examples
--------
>>> from ..optimize import sgd
>>> from ..btypes import Parameter, Bound
Define a cost function that returns a pair. The first element is the cost
value and the second element is the gradient represented by a sequence.
Even if the cost is a function of a single variable, the gradient must be a
sequence containing one element.
>>> def cost(w, lambda_, data):
... N = len(data)
... y, X = data[:, 0], data[:, 1:]
... y_est = X.dot(w)
... ww = w.T.dot(w)
... obj = (y - y_est).sum() / N + lambda_ * ww
... gradw = - 2 * X.T.dot(y - y_est) / N + 2 * lambda_ * w
... gradl = ww
... return obj, [gradw, gradl]
Augment the SGD optimizer to take structured inputs
>>> new_sgd = structured_sgd(sgd)
Data
>>> y = np.linspace(1, 10, 100) + np.random.randn(100) + 1
>>> X = np.array([np.ones(100), np.linspace(1, 100, 100)]).T
>>> data = np.hstack((y[:, np.newaxis], X))
Constant Initial values
>>> w_0 = Parameter(np.array([1., 1.]), Bound())
>>> lambda_0 = Parameter(.25, Bound())
>>> res = new_sgd(cost, [w_0, lambda_0], data, batch_size=10,
... eval_obj=True)
>>> res_w, res_lambda = res.x
Random Initial values
>>> from scipy.stats import norm, gamma
>>> w_0 = Parameter(norm(), Bound(), shape=(2,))
>>> lambda_0 = Parameter(gamma(1.), Bound())
>>> res = new_sgd(cost, [w_0, lambda_0], data, batch_size=10,
... eval_obj=True, nstarts=100)
>>> res_w, res_lambda = res.x
"""
@wraps(sgd)
def new_sgd(fun,
parameters,
data,
eval_obj=False,
batch_size=10,
args=(),
random_state=None,
nstarts=100,
**sgd_kwargs):
(array1d, fbounds), shapes = flatten(parameters,
hstack=bt.hstack,
shape=bt.shape,
ravel=bt.ravel
)
flatten_args_dec = flatten_args(shapes)
new_fun = flatten_args_dec(fun)
# Find best random starting candidate if we are doing random starts
if eval_obj and nstarts > 0:
data_gen = gen_batch(data, batch_size, random_state=random_state)
array1d = _random_starts(
fun=fun,
parameters=parameters,
jac=True,
args=args,
data_gen=data_gen,
nstarts=nstarts,
random_state=random_state
)
if bool(eval_obj):
new_fun = flatten_func_grad(new_fun)
else:
new_fun = flatten_grad(new_fun)
result = sgd(new_fun, array1d, data=data, bounds=fbounds, args=args,
eval_obj=eval_obj, random_state=random_state,
**sgd_kwargs)
result['x'] = tuple(unflatten(result['x'], shapes))
return result
return new_sgd | def function[structured_sgd, parameter[sgd]]:
constant[
Allow an SGD to accept nested sequences of Parameters to optimize.
This decorator can intepret the :code:`Parameter` objects in `btypes.py`,
and can accept nested sequences of *any* structure of these objects to
optimise!
It can also optionally evaluate *random starts* (i.e. random starting
candidates) if the parameter objects have been initialised with
distributions. For this, an additional parameter is exposed in the
:code:`minimizer` interface.
Parameters
----------
fun : callable
objective function that takes in arbitrary ndarrays, floats or nested
sequences of these.
parameters : (nested) sequences of Parameter objects
Initial guess of the parameters of the objective function
nstarts : int, optional
The number random starting candidates for optimisation to evaluate.
This will only happen for :code:`nstarts > 0` and if at least one
:code:`Parameter` object is random.
Examples
--------
>>> from ..optimize import sgd
>>> from ..btypes import Parameter, Bound
Define a cost function that returns a pair. The first element is the cost
value and the second element is the gradient represented by a sequence.
Even if the cost is a function of a single variable, the gradient must be a
sequence containing one element.
>>> def cost(w, lambda_, data):
... N = len(data)
... y, X = data[:, 0], data[:, 1:]
... y_est = X.dot(w)
... ww = w.T.dot(w)
... obj = (y - y_est).sum() / N + lambda_ * ww
... gradw = - 2 * X.T.dot(y - y_est) / N + 2 * lambda_ * w
... gradl = ww
... return obj, [gradw, gradl]
Augment the SGD optimizer to take structured inputs
>>> new_sgd = structured_sgd(sgd)
Data
>>> y = np.linspace(1, 10, 100) + np.random.randn(100) + 1
>>> X = np.array([np.ones(100), np.linspace(1, 100, 100)]).T
>>> data = np.hstack((y[:, np.newaxis], X))
Constant Initial values
>>> w_0 = Parameter(np.array([1., 1.]), Bound())
>>> lambda_0 = Parameter(.25, Bound())
>>> res = new_sgd(cost, [w_0, lambda_0], data, batch_size=10,
... eval_obj=True)
>>> res_w, res_lambda = res.x
Random Initial values
>>> from scipy.stats import norm, gamma
>>> w_0 = Parameter(norm(), Bound(), shape=(2,))
>>> lambda_0 = Parameter(gamma(1.), Bound())
>>> res = new_sgd(cost, [w_0, lambda_0], data, batch_size=10,
... eval_obj=True, nstarts=100)
>>> res_w, res_lambda = res.x
]
def function[new_sgd, parameter[fun, parameters, data, eval_obj, batch_size, args, random_state, nstarts]]:
<ast.Tuple object at 0x7da2054a58d0> assign[=] call[name[flatten], parameter[name[parameters]]]
variable[flatten_args_dec] assign[=] call[name[flatten_args], parameter[name[shapes]]]
variable[new_fun] assign[=] call[name[flatten_args_dec], parameter[name[fun]]]
if <ast.BoolOp object at 0x7da2054a5060> begin[:]
variable[data_gen] assign[=] call[name[gen_batch], parameter[name[data], name[batch_size]]]
variable[array1d] assign[=] call[name[_random_starts], parameter[]]
if call[name[bool], parameter[name[eval_obj]]] begin[:]
variable[new_fun] assign[=] call[name[flatten_func_grad], parameter[name[new_fun]]]
variable[result] assign[=] call[name[sgd], parameter[name[new_fun], name[array1d]]]
call[name[result]][constant[x]] assign[=] call[name[tuple], parameter[call[name[unflatten], parameter[call[name[result]][constant[x]], name[shapes]]]]]
return[name[result]]
return[name[new_sgd]] | keyword[def] identifier[structured_sgd] ( identifier[sgd] ):
literal[string]
@ identifier[wraps] ( identifier[sgd] )
keyword[def] identifier[new_sgd] ( identifier[fun] ,
identifier[parameters] ,
identifier[data] ,
identifier[eval_obj] = keyword[False] ,
identifier[batch_size] = literal[int] ,
identifier[args] =(),
identifier[random_state] = keyword[None] ,
identifier[nstarts] = literal[int] ,
** identifier[sgd_kwargs] ):
( identifier[array1d] , identifier[fbounds] ), identifier[shapes] = identifier[flatten] ( identifier[parameters] ,
identifier[hstack] = identifier[bt] . identifier[hstack] ,
identifier[shape] = identifier[bt] . identifier[shape] ,
identifier[ravel] = identifier[bt] . identifier[ravel]
)
identifier[flatten_args_dec] = identifier[flatten_args] ( identifier[shapes] )
identifier[new_fun] = identifier[flatten_args_dec] ( identifier[fun] )
keyword[if] identifier[eval_obj] keyword[and] identifier[nstarts] > literal[int] :
identifier[data_gen] = identifier[gen_batch] ( identifier[data] , identifier[batch_size] , identifier[random_state] = identifier[random_state] )
identifier[array1d] = identifier[_random_starts] (
identifier[fun] = identifier[fun] ,
identifier[parameters] = identifier[parameters] ,
identifier[jac] = keyword[True] ,
identifier[args] = identifier[args] ,
identifier[data_gen] = identifier[data_gen] ,
identifier[nstarts] = identifier[nstarts] ,
identifier[random_state] = identifier[random_state]
)
keyword[if] identifier[bool] ( identifier[eval_obj] ):
identifier[new_fun] = identifier[flatten_func_grad] ( identifier[new_fun] )
keyword[else] :
identifier[new_fun] = identifier[flatten_grad] ( identifier[new_fun] )
identifier[result] = identifier[sgd] ( identifier[new_fun] , identifier[array1d] , identifier[data] = identifier[data] , identifier[bounds] = identifier[fbounds] , identifier[args] = identifier[args] ,
identifier[eval_obj] = identifier[eval_obj] , identifier[random_state] = identifier[random_state] ,
** identifier[sgd_kwargs] )
identifier[result] [ literal[string] ]= identifier[tuple] ( identifier[unflatten] ( identifier[result] [ literal[string] ], identifier[shapes] ))
keyword[return] identifier[result]
keyword[return] identifier[new_sgd] | def structured_sgd(sgd):
"""
Allow an SGD to accept nested sequences of Parameters to optimize.
This decorator can intepret the :code:`Parameter` objects in `btypes.py`,
and can accept nested sequences of *any* structure of these objects to
optimise!
It can also optionally evaluate *random starts* (i.e. random starting
candidates) if the parameter objects have been initialised with
distributions. For this, an additional parameter is exposed in the
:code:`minimizer` interface.
Parameters
----------
fun : callable
objective function that takes in arbitrary ndarrays, floats or nested
sequences of these.
parameters : (nested) sequences of Parameter objects
Initial guess of the parameters of the objective function
nstarts : int, optional
The number random starting candidates for optimisation to evaluate.
This will only happen for :code:`nstarts > 0` and if at least one
:code:`Parameter` object is random.
Examples
--------
>>> from ..optimize import sgd
>>> from ..btypes import Parameter, Bound
Define a cost function that returns a pair. The first element is the cost
value and the second element is the gradient represented by a sequence.
Even if the cost is a function of a single variable, the gradient must be a
sequence containing one element.
>>> def cost(w, lambda_, data):
... N = len(data)
... y, X = data[:, 0], data[:, 1:]
... y_est = X.dot(w)
... ww = w.T.dot(w)
... obj = (y - y_est).sum() / N + lambda_ * ww
... gradw = - 2 * X.T.dot(y - y_est) / N + 2 * lambda_ * w
... gradl = ww
... return obj, [gradw, gradl]
Augment the SGD optimizer to take structured inputs
>>> new_sgd = structured_sgd(sgd)
Data
>>> y = np.linspace(1, 10, 100) + np.random.randn(100) + 1
>>> X = np.array([np.ones(100), np.linspace(1, 100, 100)]).T
>>> data = np.hstack((y[:, np.newaxis], X))
Constant Initial values
>>> w_0 = Parameter(np.array([1., 1.]), Bound())
>>> lambda_0 = Parameter(.25, Bound())
>>> res = new_sgd(cost, [w_0, lambda_0], data, batch_size=10,
... eval_obj=True)
>>> res_w, res_lambda = res.x
Random Initial values
>>> from scipy.stats import norm, gamma
>>> w_0 = Parameter(norm(), Bound(), shape=(2,))
>>> lambda_0 = Parameter(gamma(1.), Bound())
>>> res = new_sgd(cost, [w_0, lambda_0], data, batch_size=10,
... eval_obj=True, nstarts=100)
>>> res_w, res_lambda = res.x
"""
@wraps(sgd)
def new_sgd(fun, parameters, data, eval_obj=False, batch_size=10, args=(), random_state=None, nstarts=100, **sgd_kwargs):
((array1d, fbounds), shapes) = flatten(parameters, hstack=bt.hstack, shape=bt.shape, ravel=bt.ravel)
flatten_args_dec = flatten_args(shapes)
new_fun = flatten_args_dec(fun)
# Find best random starting candidate if we are doing random starts
if eval_obj and nstarts > 0:
data_gen = gen_batch(data, batch_size, random_state=random_state)
array1d = _random_starts(fun=fun, parameters=parameters, jac=True, args=args, data_gen=data_gen, nstarts=nstarts, random_state=random_state) # depends on [control=['if'], data=[]]
if bool(eval_obj):
new_fun = flatten_func_grad(new_fun) # depends on [control=['if'], data=[]]
else:
new_fun = flatten_grad(new_fun)
result = sgd(new_fun, array1d, data=data, bounds=fbounds, args=args, eval_obj=eval_obj, random_state=random_state, **sgd_kwargs)
result['x'] = tuple(unflatten(result['x'], shapes))
return result
return new_sgd |
def read_ical(self, ical_file_location): # type: (str) -> Calendar
""" Read the ical file """
with open(ical_file_location, 'r') as ical_file:
data = ical_file.read()
self.cal = Calendar.from_ical(data)
return self.cal | def function[read_ical, parameter[self, ical_file_location]]:
constant[ Read the ical file ]
with call[name[open], parameter[name[ical_file_location], constant[r]]] begin[:]
variable[data] assign[=] call[name[ical_file].read, parameter[]]
name[self].cal assign[=] call[name[Calendar].from_ical, parameter[name[data]]]
return[name[self].cal] | keyword[def] identifier[read_ical] ( identifier[self] , identifier[ical_file_location] ):
literal[string]
keyword[with] identifier[open] ( identifier[ical_file_location] , literal[string] ) keyword[as] identifier[ical_file] :
identifier[data] = identifier[ical_file] . identifier[read] ()
identifier[self] . identifier[cal] = identifier[Calendar] . identifier[from_ical] ( identifier[data] )
keyword[return] identifier[self] . identifier[cal] | def read_ical(self, ical_file_location): # type: (str) -> Calendar
' Read the ical file '
with open(ical_file_location, 'r') as ical_file:
data = ical_file.read() # depends on [control=['with'], data=['ical_file']]
self.cal = Calendar.from_ical(data)
return self.cal |
def with_objattr(name):
'''
wrap `with getattr(self, name)` out of func.
usage:
``` py
class A:
def __init__(self):
self._lock = RLock()
@with_objattr('_lock') # so easy to make a sync instance method !
def func():
pass
```
'''
def _wrap(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
with getattr(self, name):
return func(self, *args, **kwargs)
return wrapper
return _wrap | def function[with_objattr, parameter[name]]:
constant[
wrap `with getattr(self, name)` out of func.
usage:
``` py
class A:
def __init__(self):
self._lock = RLock()
@with_objattr('_lock') # so easy to make a sync instance method !
def func():
pass
```
]
def function[_wrap, parameter[func]]:
def function[wrapper, parameter[self]]:
with call[name[getattr], parameter[name[self], name[name]]] begin[:]
return[call[name[func], parameter[name[self], <ast.Starred object at 0x7da18f721a20>]]]
return[name[wrapper]]
return[name[_wrap]] | keyword[def] identifier[with_objattr] ( identifier[name] ):
literal[string]
keyword[def] identifier[_wrap] ( identifier[func] ):
@ identifier[functools] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
keyword[with] identifier[getattr] ( identifier[self] , identifier[name] ):
keyword[return] identifier[func] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper]
keyword[return] identifier[_wrap] | def with_objattr(name):
"""
wrap `with getattr(self, name)` out of func.
usage:
``` py
class A:
def __init__(self):
self._lock = RLock()
@with_objattr('_lock') # so easy to make a sync instance method !
def func():
pass
```
"""
def _wrap(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
with getattr(self, name):
return func(self, *args, **kwargs) # depends on [control=['with'], data=[]]
return wrapper
return _wrap |
def format_top(counter, top=3):
""" Format a top.
"""
items = islice(reversed(sorted(counter.iteritems(), key=lambda x: x[1])), 0, top)
return u'; '.join(u'{g} ({nb})'.format(g=g, nb=nb) for g, nb in items) | def function[format_top, parameter[counter, top]]:
constant[ Format a top.
]
variable[items] assign[=] call[name[islice], parameter[call[name[reversed], parameter[call[name[sorted], parameter[call[name[counter].iteritems, parameter[]]]]]], constant[0], name[top]]]
return[call[constant[; ].join, parameter[<ast.GeneratorExp object at 0x7da1b2369960>]]] | keyword[def] identifier[format_top] ( identifier[counter] , identifier[top] = literal[int] ):
literal[string]
identifier[items] = identifier[islice] ( identifier[reversed] ( identifier[sorted] ( identifier[counter] . identifier[iteritems] (), identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ])), literal[int] , identifier[top] )
keyword[return] literal[string] . identifier[join] ( literal[string] . identifier[format] ( identifier[g] = identifier[g] , identifier[nb] = identifier[nb] ) keyword[for] identifier[g] , identifier[nb] keyword[in] identifier[items] ) | def format_top(counter, top=3):
""" Format a top.
"""
items = islice(reversed(sorted(counter.iteritems(), key=lambda x: x[1])), 0, top)
return u'; '.join((u'{g} ({nb})'.format(g=g, nb=nb) for (g, nb) in items)) |
def run(self):
# dynamic import for this command only to not need these in usual python case...
import git
import yaml
"""runner"""
repo_path = tempfile.mkdtemp(prefix='rosdevelop-' + os.path.dirname(__file__)) # TODO get actual package name ?
print("Getting ROS release repo in {0}...".format(repo_path))
rosrelease_repo = git.Repo.clone_from('https://github.com/asmodehn/pyzmp-rosrelease.git', repo_path)
# Reset our working tree to master
origin = rosrelease_repo.remotes.origin
rosrelease_repo.remotes.origin.fetch() # assure we actually have data. fetch() returns useful information
# Setup a local tracking branch of a remote branch
rosrelease_repo.create_head('master', origin.refs.master).set_tracking_branch(origin.refs.master).checkout()
print("Reading tracks.yaml...")
with open(os.path.join(rosrelease_repo.working_tree_dir, 'tracks.yaml'), 'r') as tracks:
try:
tracks_dict = yaml.load(tracks)
except yaml.YAMLError as exc:
raise
patch_dir = tracks_dict.get('tracks', {}).get('indigo', {}).get('patches', {})
print("Found patches for indigo in {0}".format(patch_dir))
src_files = os.listdir(os.path.join(rosrelease_repo.working_tree_dir, patch_dir))
working_repo = git.Repo(os.path.dirname(os.path.abspath(__file__)))
# adding patched files to ignore list if needed (to prevent accidental commit of patch)
# => BETTER if the patch do not erase previous file. TODO : fix problem with both .travis.yml
with open(os.path.join(working_repo.working_tree_dir, '.gitignore'), 'a+') as gitignore:
skipit = []
for line in gitignore:
if line in src_files:
skipit += line
else: # not found, we are at the eof
for f in src_files:
if f not in skipit:
gitignore.write(f+'\n') # append missing data
working_repo.git.add(['.gitignore']) # adding .gitignore to the index so git applies it (and hide new files)
for file_name in src_files:
print("Patching {0}".format(file_name))
full_file_name = os.path.join(rosrelease_repo.working_tree_dir, patch_dir, file_name)
if os.path.isfile(full_file_name):
# Special case for package.xml and version template string
if file_name == 'package.xml':
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'package.xml'), "wt") as fout:
with open(full_file_name, "rt") as fin:
for line in fin:
fout.write(line.replace(':{version}', __version__)) # TODO: proper template engine ?
else:
shutil.copy(full_file_name, os.path.dirname(os.path.abspath(__file__)))
sys.exit() | def function[run, parameter[self]]:
import module[git]
import module[yaml]
constant[runner]
variable[repo_path] assign[=] call[name[tempfile].mkdtemp, parameter[]]
call[name[print], parameter[call[constant[Getting ROS release repo in {0}...].format, parameter[name[repo_path]]]]]
variable[rosrelease_repo] assign[=] call[name[git].Repo.clone_from, parameter[constant[https://github.com/asmodehn/pyzmp-rosrelease.git], name[repo_path]]]
variable[origin] assign[=] name[rosrelease_repo].remotes.origin
call[name[rosrelease_repo].remotes.origin.fetch, parameter[]]
call[call[call[name[rosrelease_repo].create_head, parameter[constant[master], name[origin].refs.master]].set_tracking_branch, parameter[name[origin].refs.master]].checkout, parameter[]]
call[name[print], parameter[constant[Reading tracks.yaml...]]]
with call[name[open], parameter[call[name[os].path.join, parameter[name[rosrelease_repo].working_tree_dir, constant[tracks.yaml]]], constant[r]]] begin[:]
<ast.Try object at 0x7da1b23b27a0>
variable[patch_dir] assign[=] call[call[call[name[tracks_dict].get, parameter[constant[tracks], dictionary[[], []]]].get, parameter[constant[indigo], dictionary[[], []]]].get, parameter[constant[patches], dictionary[[], []]]]
call[name[print], parameter[call[constant[Found patches for indigo in {0}].format, parameter[name[patch_dir]]]]]
variable[src_files] assign[=] call[name[os].listdir, parameter[call[name[os].path.join, parameter[name[rosrelease_repo].working_tree_dir, name[patch_dir]]]]]
variable[working_repo] assign[=] call[name[git].Repo, parameter[call[name[os].path.dirname, parameter[call[name[os].path.abspath, parameter[name[__file__]]]]]]]
with call[name[open], parameter[call[name[os].path.join, parameter[name[working_repo].working_tree_dir, constant[.gitignore]]], constant[a+]]] begin[:]
variable[skipit] assign[=] list[[]]
for taget[name[line]] in starred[name[gitignore]] begin[:]
if compare[name[line] in name[src_files]] begin[:]
<ast.AugAssign object at 0x7da1b23b0be0>
call[name[working_repo].git.add, parameter[list[[<ast.Constant object at 0x7da1b23b1810>]]]]
for taget[name[file_name]] in starred[name[src_files]] begin[:]
call[name[print], parameter[call[constant[Patching {0}].format, parameter[name[file_name]]]]]
variable[full_file_name] assign[=] call[name[os].path.join, parameter[name[rosrelease_repo].working_tree_dir, name[patch_dir], name[file_name]]]
if call[name[os].path.isfile, parameter[name[full_file_name]]] begin[:]
if compare[name[file_name] equal[==] constant[package.xml]] begin[:]
with call[name[open], parameter[call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[call[name[os].path.abspath, parameter[name[__file__]]]]], constant[package.xml]]], constant[wt]]] begin[:]
with call[name[open], parameter[name[full_file_name], constant[rt]]] begin[:]
for taget[name[line]] in starred[name[fin]] begin[:]
call[name[fout].write, parameter[call[name[line].replace, parameter[constant[:{version}], name[__version__]]]]]
call[name[sys].exit, parameter[]] | keyword[def] identifier[run] ( identifier[self] ):
keyword[import] identifier[git]
keyword[import] identifier[yaml]
literal[string]
identifier[repo_path] = identifier[tempfile] . identifier[mkdtemp] ( identifier[prefix] = literal[string] + identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ))
identifier[print] ( literal[string] . identifier[format] ( identifier[repo_path] ))
identifier[rosrelease_repo] = identifier[git] . identifier[Repo] . identifier[clone_from] ( literal[string] , identifier[repo_path] )
identifier[origin] = identifier[rosrelease_repo] . identifier[remotes] . identifier[origin]
identifier[rosrelease_repo] . identifier[remotes] . identifier[origin] . identifier[fetch] ()
identifier[rosrelease_repo] . identifier[create_head] ( literal[string] , identifier[origin] . identifier[refs] . identifier[master] ). identifier[set_tracking_branch] ( identifier[origin] . identifier[refs] . identifier[master] ). identifier[checkout] ()
identifier[print] ( literal[string] )
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[rosrelease_repo] . identifier[working_tree_dir] , literal[string] ), literal[string] ) keyword[as] identifier[tracks] :
keyword[try] :
identifier[tracks_dict] = identifier[yaml] . identifier[load] ( identifier[tracks] )
keyword[except] identifier[yaml] . identifier[YAMLError] keyword[as] identifier[exc] :
keyword[raise]
identifier[patch_dir] = identifier[tracks_dict] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] ,{})
identifier[print] ( literal[string] . identifier[format] ( identifier[patch_dir] ))
identifier[src_files] = identifier[os] . identifier[listdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[rosrelease_repo] . identifier[working_tree_dir] , identifier[patch_dir] ))
identifier[working_repo] = identifier[git] . identifier[Repo] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[__file__] )))
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[working_repo] . identifier[working_tree_dir] , literal[string] ), literal[string] ) keyword[as] identifier[gitignore] :
identifier[skipit] =[]
keyword[for] identifier[line] keyword[in] identifier[gitignore] :
keyword[if] identifier[line] keyword[in] identifier[src_files] :
identifier[skipit] += identifier[line]
keyword[else] :
keyword[for] identifier[f] keyword[in] identifier[src_files] :
keyword[if] identifier[f] keyword[not] keyword[in] identifier[skipit] :
identifier[gitignore] . identifier[write] ( identifier[f] + literal[string] )
identifier[working_repo] . identifier[git] . identifier[add] ([ literal[string] ])
keyword[for] identifier[file_name] keyword[in] identifier[src_files] :
identifier[print] ( literal[string] . identifier[format] ( identifier[file_name] ))
identifier[full_file_name] = identifier[os] . identifier[path] . identifier[join] ( identifier[rosrelease_repo] . identifier[working_tree_dir] , identifier[patch_dir] , identifier[file_name] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[full_file_name] ):
keyword[if] identifier[file_name] == literal[string] :
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[__file__] )), literal[string] ), literal[string] ) keyword[as] identifier[fout] :
keyword[with] identifier[open] ( identifier[full_file_name] , literal[string] ) keyword[as] identifier[fin] :
keyword[for] identifier[line] keyword[in] identifier[fin] :
identifier[fout] . identifier[write] ( identifier[line] . identifier[replace] ( literal[string] , identifier[__version__] ))
keyword[else] :
identifier[shutil] . identifier[copy] ( identifier[full_file_name] , identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[__file__] )))
identifier[sys] . identifier[exit] () | def run(self):
# dynamic import for this command only to not need these in usual python case...
import git
import yaml
'runner'
repo_path = tempfile.mkdtemp(prefix='rosdevelop-' + os.path.dirname(__file__)) # TODO get actual package name ?
print('Getting ROS release repo in {0}...'.format(repo_path))
rosrelease_repo = git.Repo.clone_from('https://github.com/asmodehn/pyzmp-rosrelease.git', repo_path)
# Reset our working tree to master
origin = rosrelease_repo.remotes.origin
rosrelease_repo.remotes.origin.fetch() # assure we actually have data. fetch() returns useful information
# Setup a local tracking branch of a remote branch
rosrelease_repo.create_head('master', origin.refs.master).set_tracking_branch(origin.refs.master).checkout()
print('Reading tracks.yaml...')
with open(os.path.join(rosrelease_repo.working_tree_dir, 'tracks.yaml'), 'r') as tracks:
try:
tracks_dict = yaml.load(tracks) # depends on [control=['try'], data=[]]
except yaml.YAMLError as exc:
raise # depends on [control=['except'], data=[]] # depends on [control=['with'], data=['tracks']]
patch_dir = tracks_dict.get('tracks', {}).get('indigo', {}).get('patches', {})
print('Found patches for indigo in {0}'.format(patch_dir))
src_files = os.listdir(os.path.join(rosrelease_repo.working_tree_dir, patch_dir))
working_repo = git.Repo(os.path.dirname(os.path.abspath(__file__)))
# adding patched files to ignore list if needed (to prevent accidental commit of patch)
# => BETTER if the patch do not erase previous file. TODO : fix problem with both .travis.yml
with open(os.path.join(working_repo.working_tree_dir, '.gitignore'), 'a+') as gitignore:
skipit = []
for line in gitignore:
if line in src_files:
skipit += line # depends on [control=['if'], data=['line']]
else: # not found, we are at the eof
for f in src_files:
if f not in skipit:
gitignore.write(f + '\n') # append missing data # depends on [control=['if'], data=['f']] # depends on [control=['for'], data=['f']] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['gitignore']]
working_repo.git.add(['.gitignore']) # adding .gitignore to the index so git applies it (and hide new files)
for file_name in src_files:
print('Patching {0}'.format(file_name))
full_file_name = os.path.join(rosrelease_repo.working_tree_dir, patch_dir, file_name)
if os.path.isfile(full_file_name):
# Special case for package.xml and version template string
if file_name == 'package.xml':
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'package.xml'), 'wt') as fout:
with open(full_file_name, 'rt') as fin:
for line in fin:
fout.write(line.replace(':{version}', __version__)) # TODO: proper template engine ? # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['fin']] # depends on [control=['with'], data=['open', 'fout']] # depends on [control=['if'], data=[]]
else:
shutil.copy(full_file_name, os.path.dirname(os.path.abspath(__file__))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['file_name']]
sys.exit() |
def setupTable_post(self):
"""Make a format 2 post table with the compiler's glyph order."""
super(OutlineTTFCompiler, self).setupTable_post()
if "post" not in self.otf:
return
post = self.otf["post"]
post.formatType = 2.0
post.extraNames = []
post.mapping = {}
post.glyphOrder = self.glyphOrder | def function[setupTable_post, parameter[self]]:
constant[Make a format 2 post table with the compiler's glyph order.]
call[call[name[super], parameter[name[OutlineTTFCompiler], name[self]]].setupTable_post, parameter[]]
if compare[constant[post] <ast.NotIn object at 0x7da2590d7190> name[self].otf] begin[:]
return[None]
variable[post] assign[=] call[name[self].otf][constant[post]]
name[post].formatType assign[=] constant[2.0]
name[post].extraNames assign[=] list[[]]
name[post].mapping assign[=] dictionary[[], []]
name[post].glyphOrder assign[=] name[self].glyphOrder | keyword[def] identifier[setupTable_post] ( identifier[self] ):
literal[string]
identifier[super] ( identifier[OutlineTTFCompiler] , identifier[self] ). identifier[setupTable_post] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[otf] :
keyword[return]
identifier[post] = identifier[self] . identifier[otf] [ literal[string] ]
identifier[post] . identifier[formatType] = literal[int]
identifier[post] . identifier[extraNames] =[]
identifier[post] . identifier[mapping] ={}
identifier[post] . identifier[glyphOrder] = identifier[self] . identifier[glyphOrder] | def setupTable_post(self):
"""Make a format 2 post table with the compiler's glyph order."""
super(OutlineTTFCompiler, self).setupTable_post()
if 'post' not in self.otf:
return # depends on [control=['if'], data=[]]
post = self.otf['post']
post.formatType = 2.0
post.extraNames = []
post.mapping = {}
post.glyphOrder = self.glyphOrder |
def bury(self, priority=None):
"""Bury this job."""
if self.reserved:
self.conn.bury(self.jid, priority or self._priority())
self.reserved = False | def function[bury, parameter[self, priority]]:
constant[Bury this job.]
if name[self].reserved begin[:]
call[name[self].conn.bury, parameter[name[self].jid, <ast.BoolOp object at 0x7da20e9b1180>]]
name[self].reserved assign[=] constant[False] | keyword[def] identifier[bury] ( identifier[self] , identifier[priority] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[reserved] :
identifier[self] . identifier[conn] . identifier[bury] ( identifier[self] . identifier[jid] , identifier[priority] keyword[or] identifier[self] . identifier[_priority] ())
identifier[self] . identifier[reserved] = keyword[False] | def bury(self, priority=None):
"""Bury this job."""
if self.reserved:
self.conn.bury(self.jid, priority or self._priority())
self.reserved = False # depends on [control=['if'], data=[]] |
def path_generator(
path_list,
include_glob_list=None,
exclude_glob_list=None,
recursive=True,
ignore_invalid=False,
default_excludes=True,
return_dir_paths=False,
):
"""# language=rst.
Args:
path_list: list of str
List of file- and dir paths. File paths are used directly and dirs are searched
for files.
``path_list`` does not accept glob patterns, as it's more convenient to let the
shell expand glob patterns to directly specified files and dirs. E.g., to use a
glob to select all .py files in a subdir, the command may be called with
sub/dir/*.py, which the shell expands to a list of files, which are then passed
to this function. The paths should be Unicode or utf-8 strings. Tilde ("~") to
home expansion is performed on the paths.
The shell can also expand glob patterns to dir paths or a mix of file and dir
paths.
include_glob_list: list of str
exclude_glob_list: list of str
Patterns ending with "/" are matched only against dir names. All other patterns
are matched only against file names.
If the include list contains any file patterns, files must match one or more of
the patterns in order to be returned.
If the include list contains any dir patterns, dirs must match one or more of
the patterns in order for the recursive search to descend into them.
The exclude list works in the same way except that matching files and dirs are
excluded instead of included. If both include and exclude lists are specified,
files and dirs must both match the include and not match the exclude patterns in
order to be returned or descended into.
recursive: bool
- **True** (default): Search subdirectories
- **False**: Do not search subdirectories
ignore_invalid: bool
- **True**: Invalid paths in path_list are ignored.
- **False** (default): EnvironmentError is raised if any of the paths in
``path_list`` do not reference an existing file or dir.
default_excludes: bool
- **True**: A list of glob patterns for files and dirs that should typically be
ignored is added to any exclude patterns passed to the function. These
include dirs such as .git and backup files, such as files appended with "~".
- **False**: No files or dirs are excluded by default.
return_dir_paths: bool
- **False**: Only file paths are returned.
- **True**: Directory paths are also returned.
Returns:
File path iterator
Notes:
During iteration, the iterator can be prevented from descending into a directory
by sending a "skip" flag when the iterator yields the directory path. This allows
the client to determine if directories should be iterated by, for instance, which
files are present in the directory. This can be used in conjunction with the
include and exclude glob lists. Note that, in order to receive directory paths
that can be skipped, ``return_dir_paths`` must be set to True.
The regular ``for...in`` syntax does not support sending the "skip" flag back to
the iterator. Instead, use a pattern like:
.. highlight: python
::
itr = file_iterator.file_iter(..., return_dir_paths=True)
try:
path = itr.next()
while True:
skip_dir = determine_if_dir_should_be_skipped(path)
file_path = itr.send(skip_dir)
except KeyboardInterrupt:
raise StopIteration
except StopIteration:
pass
Glob patterns are matched only against file and directory names, not the full
paths.
Paths passed directly in ``path_list`` are not filtered.
The same file can be returned multiple times if ``path_list`` contains duplicated
file paths or dir paths, or dir paths that implicitly include the same subdirs.
``include_glob_list`` and ``exclude_glob_list`` are handy for filtering the files
found in dir searches.
Remember to escape the include and exclude glob patterns on the command line so
that they're not expanded by the shell.
"""
include_glob_list = include_glob_list or []
exclude_glob_list = exclude_glob_list or []
if default_excludes:
exclude_glob_list += DEFAULT_EXCLUDE_GLOB_LIST
logging.debug('file_iter():')
logging.debug(' paths: {}'.format(', '.join(path_list)))
logging.debug(' include: {}'.format(', '.join(include_glob_list)))
logging.debug(' exclude: {}'.format(', '.join(exclude_glob_list)))
logging.debug(' recursive: {}'.format(recursive))
logging.debug(' ignore_invalid: {}'.format(ignore_invalid))
logging.debug(' default_excludes: {}'.format(default_excludes))
logging.debug(' return_dir_paths: {}'.format(return_dir_paths))
logging.debug('')
include_file_glob_list = [
p for p in include_glob_list if not p.endswith(os.path.sep)
]
exclude_file_glob_list = [
p for p in exclude_glob_list if not p.endswith(os.path.sep)
]
include_dir_glob_list = [p for p in include_glob_list if p.endswith(os.path.sep)]
exclude_dir_glob_list = [p for p in exclude_glob_list if p.endswith(os.path.sep)]
for path in path_list:
path = os.path.expanduser(path)
# Return file
if os.path.isfile(path):
file_name = os.path.split(path)[1]
if not _is_filtered(
file_name, include_file_glob_list, exclude_file_glob_list
):
yield path
# Search directory
elif os.path.isdir(path):
yield from _filtered_walk(
path,
include_dir_glob_list,
exclude_dir_glob_list,
include_file_glob_list,
exclude_file_glob_list,
return_dir_paths,
recursive,
)
# else:
# # Single directory search
# file_path_iter = os.listdir(path)
#
# skip_dir = None
#
# while True:
# file_or_dir_path = file_path_iter.send(skip_dir)
# file_or_dir_name = os.path.split(file_or_dir_path)[1]
# skip_dir = False
# skip_dir = yield file_or_dir_path
else:
if not ignore_invalid:
raise EnvironmentError(0, 'Not a valid file or dir path', path) | def function[path_generator, parameter[path_list, include_glob_list, exclude_glob_list, recursive, ignore_invalid, default_excludes, return_dir_paths]]:
constant[# language=rst.
Args:
path_list: list of str
List of file- and dir paths. File paths are used directly and dirs are searched
for files.
``path_list`` does not accept glob patterns, as it's more convenient to let the
shell expand glob patterns to directly specified files and dirs. E.g., to use a
glob to select all .py files in a subdir, the command may be called with
sub/dir/*.py, which the shell expands to a list of files, which are then passed
to this function. The paths should be Unicode or utf-8 strings. Tilde ("~") to
home expansion is performed on the paths.
The shell can also expand glob patterns to dir paths or a mix of file and dir
paths.
include_glob_list: list of str
exclude_glob_list: list of str
Patterns ending with "/" are matched only against dir names. All other patterns
are matched only against file names.
If the include list contains any file patterns, files must match one or more of
the patterns in order to be returned.
If the include list contains any dir patterns, dirs must match one or more of
the patterns in order for the recursive search to descend into them.
The exclude list works in the same way except that matching files and dirs are
excluded instead of included. If both include and exclude lists are specified,
files and dirs must both match the include and not match the exclude patterns in
order to be returned or descended into.
recursive: bool
- **True** (default): Search subdirectories
- **False**: Do not search subdirectories
ignore_invalid: bool
- **True**: Invalid paths in path_list are ignored.
- **False** (default): EnvironmentError is raised if any of the paths in
``path_list`` do not reference an existing file or dir.
default_excludes: bool
- **True**: A list of glob patterns for files and dirs that should typically be
ignored is added to any exclude patterns passed to the function. These
include dirs such as .git and backup files, such as files appended with "~".
- **False**: No files or dirs are excluded by default.
return_dir_paths: bool
- **False**: Only file paths are returned.
- **True**: Directory paths are also returned.
Returns:
File path iterator
Notes:
During iteration, the iterator can be prevented from descending into a directory
by sending a "skip" flag when the iterator yields the directory path. This allows
the client to determine if directories should be iterated by, for instance, which
files are present in the directory. This can be used in conjunction with the
include and exclude glob lists. Note that, in order to receive directory paths
that can be skipped, ``return_dir_paths`` must be set to True.
The regular ``for...in`` syntax does not support sending the "skip" flag back to
the iterator. Instead, use a pattern like:
.. highlight: python
::
itr = file_iterator.file_iter(..., return_dir_paths=True)
try:
path = itr.next()
while True:
skip_dir = determine_if_dir_should_be_skipped(path)
file_path = itr.send(skip_dir)
except KeyboardInterrupt:
raise StopIteration
except StopIteration:
pass
Glob patterns are matched only against file and directory names, not the full
paths.
Paths passed directly in ``path_list`` are not filtered.
The same file can be returned multiple times if ``path_list`` contains duplicated
file paths or dir paths, or dir paths that implicitly include the same subdirs.
``include_glob_list`` and ``exclude_glob_list`` are handy for filtering the files
found in dir searches.
Remember to escape the include and exclude glob patterns on the command line so
that they're not expanded by the shell.
]
variable[include_glob_list] assign[=] <ast.BoolOp object at 0x7da1b19b5330>
variable[exclude_glob_list] assign[=] <ast.BoolOp object at 0x7da1b19b7520>
if name[default_excludes] begin[:]
<ast.AugAssign object at 0x7da1b19b4f70>
call[name[logging].debug, parameter[constant[file_iter():]]]
call[name[logging].debug, parameter[call[constant[ paths: {}].format, parameter[call[constant[, ].join, parameter[name[path_list]]]]]]]
call[name[logging].debug, parameter[call[constant[ include: {}].format, parameter[call[constant[, ].join, parameter[name[include_glob_list]]]]]]]
call[name[logging].debug, parameter[call[constant[ exclude: {}].format, parameter[call[constant[, ].join, parameter[name[exclude_glob_list]]]]]]]
call[name[logging].debug, parameter[call[constant[ recursive: {}].format, parameter[name[recursive]]]]]
call[name[logging].debug, parameter[call[constant[ ignore_invalid: {}].format, parameter[name[ignore_invalid]]]]]
call[name[logging].debug, parameter[call[constant[ default_excludes: {}].format, parameter[name[default_excludes]]]]]
call[name[logging].debug, parameter[call[constant[ return_dir_paths: {}].format, parameter[name[return_dir_paths]]]]]
call[name[logging].debug, parameter[constant[]]]
variable[include_file_glob_list] assign[=] <ast.ListComp object at 0x7da1b19b6e90>
variable[exclude_file_glob_list] assign[=] <ast.ListComp object at 0x7da1b19b6f50>
variable[include_dir_glob_list] assign[=] <ast.ListComp object at 0x7da1b19b5e10>
variable[exclude_dir_glob_list] assign[=] <ast.ListComp object at 0x7da18dc051e0>
for taget[name[path]] in starred[name[path_list]] begin[:]
variable[path] assign[=] call[name[os].path.expanduser, parameter[name[path]]]
if call[name[os].path.isfile, parameter[name[path]]] begin[:]
variable[file_name] assign[=] call[call[name[os].path.split, parameter[name[path]]]][constant[1]]
if <ast.UnaryOp object at 0x7da1b1af9270> begin[:]
<ast.Yield object at 0x7da1b1af8ca0> | keyword[def] identifier[path_generator] (
identifier[path_list] ,
identifier[include_glob_list] = keyword[None] ,
identifier[exclude_glob_list] = keyword[None] ,
identifier[recursive] = keyword[True] ,
identifier[ignore_invalid] = keyword[False] ,
identifier[default_excludes] = keyword[True] ,
identifier[return_dir_paths] = keyword[False] ,
):
literal[string]
identifier[include_glob_list] = identifier[include_glob_list] keyword[or] []
identifier[exclude_glob_list] = identifier[exclude_glob_list] keyword[or] []
keyword[if] identifier[default_excludes] :
identifier[exclude_glob_list] += identifier[DEFAULT_EXCLUDE_GLOB_LIST]
identifier[logging] . identifier[debug] ( literal[string] )
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[path_list] )))
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[include_glob_list] )))
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[exclude_glob_list] )))
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[recursive] ))
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[ignore_invalid] ))
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[default_excludes] ))
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[return_dir_paths] ))
identifier[logging] . identifier[debug] ( literal[string] )
identifier[include_file_glob_list] =[
identifier[p] keyword[for] identifier[p] keyword[in] identifier[include_glob_list] keyword[if] keyword[not] identifier[p] . identifier[endswith] ( identifier[os] . identifier[path] . identifier[sep] )
]
identifier[exclude_file_glob_list] =[
identifier[p] keyword[for] identifier[p] keyword[in] identifier[exclude_glob_list] keyword[if] keyword[not] identifier[p] . identifier[endswith] ( identifier[os] . identifier[path] . identifier[sep] )
]
identifier[include_dir_glob_list] =[ identifier[p] keyword[for] identifier[p] keyword[in] identifier[include_glob_list] keyword[if] identifier[p] . identifier[endswith] ( identifier[os] . identifier[path] . identifier[sep] )]
identifier[exclude_dir_glob_list] =[ identifier[p] keyword[for] identifier[p] keyword[in] identifier[exclude_glob_list] keyword[if] identifier[p] . identifier[endswith] ( identifier[os] . identifier[path] . identifier[sep] )]
keyword[for] identifier[path] keyword[in] identifier[path_list] :
identifier[path] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[path] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[path] ):
identifier[file_name] = identifier[os] . identifier[path] . identifier[split] ( identifier[path] )[ literal[int] ]
keyword[if] keyword[not] identifier[_is_filtered] (
identifier[file_name] , identifier[include_file_glob_list] , identifier[exclude_file_glob_list]
):
keyword[yield] identifier[path]
keyword[elif] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ):
keyword[yield] keyword[from] identifier[_filtered_walk] (
identifier[path] ,
identifier[include_dir_glob_list] ,
identifier[exclude_dir_glob_list] ,
identifier[include_file_glob_list] ,
identifier[exclude_file_glob_list] ,
identifier[return_dir_paths] ,
identifier[recursive] ,
)
keyword[else] :
keyword[if] keyword[not] identifier[ignore_invalid] :
keyword[raise] identifier[EnvironmentError] ( literal[int] , literal[string] , identifier[path] ) | def path_generator(path_list, include_glob_list=None, exclude_glob_list=None, recursive=True, ignore_invalid=False, default_excludes=True, return_dir_paths=False):
"""# language=rst.
Args:
path_list: list of str
List of file- and dir paths. File paths are used directly and dirs are searched
for files.
``path_list`` does not accept glob patterns, as it's more convenient to let the
shell expand glob patterns to directly specified files and dirs. E.g., to use a
glob to select all .py files in a subdir, the command may be called with
sub/dir/*.py, which the shell expands to a list of files, which are then passed
to this function. The paths should be Unicode or utf-8 strings. Tilde ("~") to
home expansion is performed on the paths.
The shell can also expand glob patterns to dir paths or a mix of file and dir
paths.
include_glob_list: list of str
exclude_glob_list: list of str
Patterns ending with "/" are matched only against dir names. All other patterns
are matched only against file names.
If the include list contains any file patterns, files must match one or more of
the patterns in order to be returned.
If the include list contains any dir patterns, dirs must match one or more of
the patterns in order for the recursive search to descend into them.
The exclude list works in the same way except that matching files and dirs are
excluded instead of included. If both include and exclude lists are specified,
files and dirs must both match the include and not match the exclude patterns in
order to be returned or descended into.
recursive: bool
- **True** (default): Search subdirectories
- **False**: Do not search subdirectories
ignore_invalid: bool
- **True**: Invalid paths in path_list are ignored.
- **False** (default): EnvironmentError is raised if any of the paths in
``path_list`` do not reference an existing file or dir.
default_excludes: bool
- **True**: A list of glob patterns for files and dirs that should typically be
ignored is added to any exclude patterns passed to the function. These
include dirs such as .git and backup files, such as files appended with "~".
- **False**: No files or dirs are excluded by default.
return_dir_paths: bool
- **False**: Only file paths are returned.
- **True**: Directory paths are also returned.
Returns:
File path iterator
Notes:
During iteration, the iterator can be prevented from descending into a directory
by sending a "skip" flag when the iterator yields the directory path. This allows
the client to determine if directories should be iterated by, for instance, which
files are present in the directory. This can be used in conjunction with the
include and exclude glob lists. Note that, in order to receive directory paths
that can be skipped, ``return_dir_paths`` must be set to True.
The regular ``for...in`` syntax does not support sending the "skip" flag back to
the iterator. Instead, use a pattern like:
.. highlight: python
::
itr = file_iterator.file_iter(..., return_dir_paths=True)
try:
path = itr.next()
while True:
skip_dir = determine_if_dir_should_be_skipped(path)
file_path = itr.send(skip_dir)
except KeyboardInterrupt:
raise StopIteration
except StopIteration:
pass
Glob patterns are matched only against file and directory names, not the full
paths.
Paths passed directly in ``path_list`` are not filtered.
The same file can be returned multiple times if ``path_list`` contains duplicated
file paths or dir paths, or dir paths that implicitly include the same subdirs.
``include_glob_list`` and ``exclude_glob_list`` are handy for filtering the files
found in dir searches.
Remember to escape the include and exclude glob patterns on the command line so
that they're not expanded by the shell.
"""
include_glob_list = include_glob_list or []
exclude_glob_list = exclude_glob_list or []
if default_excludes:
exclude_glob_list += DEFAULT_EXCLUDE_GLOB_LIST # depends on [control=['if'], data=[]]
logging.debug('file_iter():')
logging.debug(' paths: {}'.format(', '.join(path_list)))
logging.debug(' include: {}'.format(', '.join(include_glob_list)))
logging.debug(' exclude: {}'.format(', '.join(exclude_glob_list)))
logging.debug(' recursive: {}'.format(recursive))
logging.debug(' ignore_invalid: {}'.format(ignore_invalid))
logging.debug(' default_excludes: {}'.format(default_excludes))
logging.debug(' return_dir_paths: {}'.format(return_dir_paths))
logging.debug('')
include_file_glob_list = [p for p in include_glob_list if not p.endswith(os.path.sep)]
exclude_file_glob_list = [p for p in exclude_glob_list if not p.endswith(os.path.sep)]
include_dir_glob_list = [p for p in include_glob_list if p.endswith(os.path.sep)]
exclude_dir_glob_list = [p for p in exclude_glob_list if p.endswith(os.path.sep)]
for path in path_list:
path = os.path.expanduser(path)
# Return file
if os.path.isfile(path):
file_name = os.path.split(path)[1]
if not _is_filtered(file_name, include_file_glob_list, exclude_file_glob_list):
yield path # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Search directory
elif os.path.isdir(path):
yield from _filtered_walk(path, include_dir_glob_list, exclude_dir_glob_list, include_file_glob_list, exclude_file_glob_list, return_dir_paths, recursive) # depends on [control=['if'], data=[]]
# else:
# # Single directory search
# file_path_iter = os.listdir(path)
#
# skip_dir = None
#
# while True:
# file_or_dir_path = file_path_iter.send(skip_dir)
# file_or_dir_name = os.path.split(file_or_dir_path)[1]
# skip_dir = False
# skip_dir = yield file_or_dir_path
elif not ignore_invalid:
raise EnvironmentError(0, 'Not a valid file or dir path', path) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['path']] |
def get_cwd(self):
"""
Get the working directory. This is usually relative to the directory that contains the template. If a Docker
volume location is specified, it takes preference
All Lambda function code paths are resolved relative to this working directory
:return string: Working directory
"""
cwd = os.path.dirname(os.path.abspath(self._template_file))
if self._docker_volume_basedir:
cwd = self._docker_volume_basedir
return cwd | def function[get_cwd, parameter[self]]:
constant[
Get the working directory. This is usually relative to the directory that contains the template. If a Docker
volume location is specified, it takes preference
All Lambda function code paths are resolved relative to this working directory
:return string: Working directory
]
variable[cwd] assign[=] call[name[os].path.dirname, parameter[call[name[os].path.abspath, parameter[name[self]._template_file]]]]
if name[self]._docker_volume_basedir begin[:]
variable[cwd] assign[=] name[self]._docker_volume_basedir
return[name[cwd]] | keyword[def] identifier[get_cwd] ( identifier[self] ):
literal[string]
identifier[cwd] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[self] . identifier[_template_file] ))
keyword[if] identifier[self] . identifier[_docker_volume_basedir] :
identifier[cwd] = identifier[self] . identifier[_docker_volume_basedir]
keyword[return] identifier[cwd] | def get_cwd(self):
"""
Get the working directory. This is usually relative to the directory that contains the template. If a Docker
volume location is specified, it takes preference
All Lambda function code paths are resolved relative to this working directory
:return string: Working directory
"""
cwd = os.path.dirname(os.path.abspath(self._template_file))
if self._docker_volume_basedir:
cwd = self._docker_volume_basedir # depends on [control=['if'], data=[]]
return cwd |
def get_historical_base_info(event):
"""Gets the base details from the CloudWatch Event."""
data = {
'principalId': get_principal(event),
'userIdentity': get_user_identity(event),
'accountId': event['account'],
'userAgent': event['detail'].get('userAgent'),
'sourceIpAddress': event['detail'].get('sourceIPAddress'),
'requestParameters': event['detail'].get('requestParameters')
}
if event['detail'].get('eventTime'):
data['eventTime'] = event['detail']['eventTime']
if event['detail'].get('eventSource'):
data['eventSource'] = event['detail']['eventSource']
if event['detail'].get('eventName'):
data['eventName'] = event['detail']['eventName']
return data | def function[get_historical_base_info, parameter[event]]:
constant[Gets the base details from the CloudWatch Event.]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b1252920>, <ast.Constant object at 0x7da1b1251ae0>, <ast.Constant object at 0x7da1b1252bc0>, <ast.Constant object at 0x7da1b1250730>, <ast.Constant object at 0x7da1b1252830>, <ast.Constant object at 0x7da1b12505b0>], [<ast.Call object at 0x7da1b1250eb0>, <ast.Call object at 0x7da1b1250340>, <ast.Subscript object at 0x7da1b1252380>, <ast.Call object at 0x7da1b1250bb0>, <ast.Call object at 0x7da1b12507c0>, <ast.Call object at 0x7da1b1251b10>]]
if call[call[name[event]][constant[detail]].get, parameter[constant[eventTime]]] begin[:]
call[name[data]][constant[eventTime]] assign[=] call[call[name[event]][constant[detail]]][constant[eventTime]]
if call[call[name[event]][constant[detail]].get, parameter[constant[eventSource]]] begin[:]
call[name[data]][constant[eventSource]] assign[=] call[call[name[event]][constant[detail]]][constant[eventSource]]
if call[call[name[event]][constant[detail]].get, parameter[constant[eventName]]] begin[:]
call[name[data]][constant[eventName]] assign[=] call[call[name[event]][constant[detail]]][constant[eventName]]
return[name[data]] | keyword[def] identifier[get_historical_base_info] ( identifier[event] ):
literal[string]
identifier[data] ={
literal[string] : identifier[get_principal] ( identifier[event] ),
literal[string] : identifier[get_user_identity] ( identifier[event] ),
literal[string] : identifier[event] [ literal[string] ],
literal[string] : identifier[event] [ literal[string] ]. identifier[get] ( literal[string] ),
literal[string] : identifier[event] [ literal[string] ]. identifier[get] ( literal[string] ),
literal[string] : identifier[event] [ literal[string] ]. identifier[get] ( literal[string] )
}
keyword[if] identifier[event] [ literal[string] ]. identifier[get] ( literal[string] ):
identifier[data] [ literal[string] ]= identifier[event] [ literal[string] ][ literal[string] ]
keyword[if] identifier[event] [ literal[string] ]. identifier[get] ( literal[string] ):
identifier[data] [ literal[string] ]= identifier[event] [ literal[string] ][ literal[string] ]
keyword[if] identifier[event] [ literal[string] ]. identifier[get] ( literal[string] ):
identifier[data] [ literal[string] ]= identifier[event] [ literal[string] ][ literal[string] ]
keyword[return] identifier[data] | def get_historical_base_info(event):
"""Gets the base details from the CloudWatch Event."""
data = {'principalId': get_principal(event), 'userIdentity': get_user_identity(event), 'accountId': event['account'], 'userAgent': event['detail'].get('userAgent'), 'sourceIpAddress': event['detail'].get('sourceIPAddress'), 'requestParameters': event['detail'].get('requestParameters')}
if event['detail'].get('eventTime'):
data['eventTime'] = event['detail']['eventTime'] # depends on [control=['if'], data=[]]
if event['detail'].get('eventSource'):
data['eventSource'] = event['detail']['eventSource'] # depends on [control=['if'], data=[]]
if event['detail'].get('eventName'):
data['eventName'] = event['detail']['eventName'] # depends on [control=['if'], data=[]]
return data |
def _combine_sets(self, sets, final_set):
"""
Given a list of set, combine them to create the final set that will be
used to make the final redis call.
"""
self.cls.get_connection().sinterstore(final_set, list(sets))
return final_set | def function[_combine_sets, parameter[self, sets, final_set]]:
constant[
Given a list of set, combine them to create the final set that will be
used to make the final redis call.
]
call[call[name[self].cls.get_connection, parameter[]].sinterstore, parameter[name[final_set], call[name[list], parameter[name[sets]]]]]
return[name[final_set]] | keyword[def] identifier[_combine_sets] ( identifier[self] , identifier[sets] , identifier[final_set] ):
literal[string]
identifier[self] . identifier[cls] . identifier[get_connection] (). identifier[sinterstore] ( identifier[final_set] , identifier[list] ( identifier[sets] ))
keyword[return] identifier[final_set] | def _combine_sets(self, sets, final_set):
"""
Given a list of set, combine them to create the final set that will be
used to make the final redis call.
"""
self.cls.get_connection().sinterstore(final_set, list(sets))
return final_set |
def get_cl_start(self, addr):
"""Return first address belonging to the same cacheline as *addr*."""
return addr >> self.backend.cl_bits << self.backend.cl_bits | def function[get_cl_start, parameter[self, addr]]:
constant[Return first address belonging to the same cacheline as *addr*.]
return[binary_operation[binary_operation[name[addr] <ast.RShift object at 0x7da2590d6a40> name[self].backend.cl_bits] <ast.LShift object at 0x7da2590d69e0> name[self].backend.cl_bits]] | keyword[def] identifier[get_cl_start] ( identifier[self] , identifier[addr] ):
literal[string]
keyword[return] identifier[addr] >> identifier[self] . identifier[backend] . identifier[cl_bits] << identifier[self] . identifier[backend] . identifier[cl_bits] | def get_cl_start(self, addr):
"""Return first address belonging to the same cacheline as *addr*."""
return addr >> self.backend.cl_bits << self.backend.cl_bits |
def role_name_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
role = ET.SubElement(config, "role", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name = ET.SubElement(role, "name")
name = ET.SubElement(name, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[role_name_name, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[role] assign[=] call[name[ET].SubElement, parameter[name[config], constant[role]]]
variable[name] assign[=] call[name[ET].SubElement, parameter[name[role], constant[name]]]
variable[name] assign[=] call[name[ET].SubElement, parameter[name[name], constant[name]]]
name[name].text assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[role_name_name] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[role] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[name] = identifier[ET] . identifier[SubElement] ( identifier[role] , literal[string] )
identifier[name] = identifier[ET] . identifier[SubElement] ( identifier[name] , literal[string] )
identifier[name] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def role_name_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
role = ET.SubElement(config, 'role', xmlns='urn:brocade.com:mgmt:brocade-aaa')
name = ET.SubElement(role, 'name')
name = ET.SubElement(name, 'name')
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def verify_signature(certificate, signing_pub_key=None,
signing_pub_key_passphrase=None):
'''
Verify that ``certificate`` has been signed by ``signing_pub_key``
certificate:
The certificate to verify. Can be a path or string containing a
PEM formatted certificate.
signing_pub_key:
The public key to verify, can be a string or path to a PEM formatted
certificate, csr, or private key.
signing_pub_key_passphrase:
Passphrase to the signing_pub_key if it is an encrypted private key.
CLI Example:
.. code-block:: bash
salt '*' x509.verify_signature /etc/pki/mycert.pem \\
signing_pub_key=/etc/pki/myca.crt
'''
cert = _get_certificate_obj(certificate)
if signing_pub_key:
signing_pub_key = get_public_key(signing_pub_key,
passphrase=signing_pub_key_passphrase, asObj=True)
return bool(cert.verify(pkey=signing_pub_key) == 1) | def function[verify_signature, parameter[certificate, signing_pub_key, signing_pub_key_passphrase]]:
constant[
Verify that ``certificate`` has been signed by ``signing_pub_key``
certificate:
The certificate to verify. Can be a path or string containing a
PEM formatted certificate.
signing_pub_key:
The public key to verify, can be a string or path to a PEM formatted
certificate, csr, or private key.
signing_pub_key_passphrase:
Passphrase to the signing_pub_key if it is an encrypted private key.
CLI Example:
.. code-block:: bash
salt '*' x509.verify_signature /etc/pki/mycert.pem \
signing_pub_key=/etc/pki/myca.crt
]
variable[cert] assign[=] call[name[_get_certificate_obj], parameter[name[certificate]]]
if name[signing_pub_key] begin[:]
variable[signing_pub_key] assign[=] call[name[get_public_key], parameter[name[signing_pub_key]]]
return[call[name[bool], parameter[compare[call[name[cert].verify, parameter[]] equal[==] constant[1]]]]] | keyword[def] identifier[verify_signature] ( identifier[certificate] , identifier[signing_pub_key] = keyword[None] ,
identifier[signing_pub_key_passphrase] = keyword[None] ):
literal[string]
identifier[cert] = identifier[_get_certificate_obj] ( identifier[certificate] )
keyword[if] identifier[signing_pub_key] :
identifier[signing_pub_key] = identifier[get_public_key] ( identifier[signing_pub_key] ,
identifier[passphrase] = identifier[signing_pub_key_passphrase] , identifier[asObj] = keyword[True] )
keyword[return] identifier[bool] ( identifier[cert] . identifier[verify] ( identifier[pkey] = identifier[signing_pub_key] )== literal[int] ) | def verify_signature(certificate, signing_pub_key=None, signing_pub_key_passphrase=None):
"""
Verify that ``certificate`` has been signed by ``signing_pub_key``
certificate:
The certificate to verify. Can be a path or string containing a
PEM formatted certificate.
signing_pub_key:
The public key to verify, can be a string or path to a PEM formatted
certificate, csr, or private key.
signing_pub_key_passphrase:
Passphrase to the signing_pub_key if it is an encrypted private key.
CLI Example:
.. code-block:: bash
salt '*' x509.verify_signature /etc/pki/mycert.pem \\
signing_pub_key=/etc/pki/myca.crt
"""
cert = _get_certificate_obj(certificate)
if signing_pub_key:
signing_pub_key = get_public_key(signing_pub_key, passphrase=signing_pub_key_passphrase, asObj=True) # depends on [control=['if'], data=[]]
return bool(cert.verify(pkey=signing_pub_key) == 1) |
def load_sgraph(filename, format='binary', delimiter='auto'):
"""
Load SGraph from text file or previously saved SGraph binary.
Parameters
----------
filename : string
Location of the file. Can be a local path or a remote URL.
format : {'binary', 'snap', 'csv', 'tsv'}, optional
Format to of the file to load.
- 'binary': native graph format obtained from `SGraph.save`.
- 'snap': tab or space separated edge list format with comments, used in
the `Stanford Network Analysis Platform <http://snap.stanford.edu/snap/>`_.
- 'csv': comma-separated edge list without header or comments.
- 'tsv': tab-separated edge list without header or comments.
delimiter : str, optional
Specifying the Delimiter used in 'snap', 'csv' or 'tsv' format. Those
format has default delimiter, but sometimes it is useful to
overwrite the default delimiter.
Returns
-------
out : SGraph
Loaded SGraph.
See Also
--------
SGraph, SGraph.save
Examples
--------
>>> g = turicreate.SGraph().add_vertices([turicreate.Vertex(i) for i in range(5)])
Save and load in binary format.
>>> g.save('mygraph')
>>> g2 = turicreate.load_sgraph('mygraph')
"""
if not format in ['binary', 'snap', 'csv', 'tsv']:
raise ValueError('Invalid format: %s' % format)
with cython_context():
g = None
if format is 'binary':
proxy = glconnect.get_unity().load_graph(_make_internal_url(filename))
g = SGraph(_proxy=proxy)
elif format is 'snap':
if delimiter == 'auto':
delimiter = '\t'
sf = SFrame.read_csv(filename, comment_char='#', delimiter=delimiter,
header=False, column_type_hints=int)
g = SGraph().add_edges(sf, 'X1', 'X2')
elif format is 'csv':
if delimiter == 'auto':
delimiter = ','
sf = SFrame.read_csv(filename, header=False, delimiter=delimiter)
g = SGraph().add_edges(sf, 'X1', 'X2')
elif format is 'tsv':
if delimiter == 'auto':
delimiter = '\t'
sf = SFrame.read_csv(filename, header=False, delimiter=delimiter)
g = SGraph().add_edges(sf, 'X1', 'X2')
g.summary() # materialize
return g | def function[load_sgraph, parameter[filename, format, delimiter]]:
constant[
Load SGraph from text file or previously saved SGraph binary.
Parameters
----------
filename : string
Location of the file. Can be a local path or a remote URL.
format : {'binary', 'snap', 'csv', 'tsv'}, optional
Format to of the file to load.
- 'binary': native graph format obtained from `SGraph.save`.
- 'snap': tab or space separated edge list format with comments, used in
the `Stanford Network Analysis Platform <http://snap.stanford.edu/snap/>`_.
- 'csv': comma-separated edge list without header or comments.
- 'tsv': tab-separated edge list without header or comments.
delimiter : str, optional
Specifying the Delimiter used in 'snap', 'csv' or 'tsv' format. Those
format has default delimiter, but sometimes it is useful to
overwrite the default delimiter.
Returns
-------
out : SGraph
Loaded SGraph.
See Also
--------
SGraph, SGraph.save
Examples
--------
>>> g = turicreate.SGraph().add_vertices([turicreate.Vertex(i) for i in range(5)])
Save and load in binary format.
>>> g.save('mygraph')
>>> g2 = turicreate.load_sgraph('mygraph')
]
if <ast.UnaryOp object at 0x7da1b2058f40> begin[:]
<ast.Raise object at 0x7da1b2058130>
with call[name[cython_context], parameter[]] begin[:]
variable[g] assign[=] constant[None]
if compare[name[format] is constant[binary]] begin[:]
variable[proxy] assign[=] call[call[name[glconnect].get_unity, parameter[]].load_graph, parameter[call[name[_make_internal_url], parameter[name[filename]]]]]
variable[g] assign[=] call[name[SGraph], parameter[]]
call[name[g].summary, parameter[]]
return[name[g]] | keyword[def] identifier[load_sgraph] ( identifier[filename] , identifier[format] = literal[string] , identifier[delimiter] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[format] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
keyword[raise] identifier[ValueError] ( literal[string] % identifier[format] )
keyword[with] identifier[cython_context] ():
identifier[g] = keyword[None]
keyword[if] identifier[format] keyword[is] literal[string] :
identifier[proxy] = identifier[glconnect] . identifier[get_unity] (). identifier[load_graph] ( identifier[_make_internal_url] ( identifier[filename] ))
identifier[g] = identifier[SGraph] ( identifier[_proxy] = identifier[proxy] )
keyword[elif] identifier[format] keyword[is] literal[string] :
keyword[if] identifier[delimiter] == literal[string] :
identifier[delimiter] = literal[string]
identifier[sf] = identifier[SFrame] . identifier[read_csv] ( identifier[filename] , identifier[comment_char] = literal[string] , identifier[delimiter] = identifier[delimiter] ,
identifier[header] = keyword[False] , identifier[column_type_hints] = identifier[int] )
identifier[g] = identifier[SGraph] (). identifier[add_edges] ( identifier[sf] , literal[string] , literal[string] )
keyword[elif] identifier[format] keyword[is] literal[string] :
keyword[if] identifier[delimiter] == literal[string] :
identifier[delimiter] = literal[string]
identifier[sf] = identifier[SFrame] . identifier[read_csv] ( identifier[filename] , identifier[header] = keyword[False] , identifier[delimiter] = identifier[delimiter] )
identifier[g] = identifier[SGraph] (). identifier[add_edges] ( identifier[sf] , literal[string] , literal[string] )
keyword[elif] identifier[format] keyword[is] literal[string] :
keyword[if] identifier[delimiter] == literal[string] :
identifier[delimiter] = literal[string]
identifier[sf] = identifier[SFrame] . identifier[read_csv] ( identifier[filename] , identifier[header] = keyword[False] , identifier[delimiter] = identifier[delimiter] )
identifier[g] = identifier[SGraph] (). identifier[add_edges] ( identifier[sf] , literal[string] , literal[string] )
identifier[g] . identifier[summary] ()
keyword[return] identifier[g] | def load_sgraph(filename, format='binary', delimiter='auto'):
"""
Load SGraph from text file or previously saved SGraph binary.
Parameters
----------
filename : string
Location of the file. Can be a local path or a remote URL.
format : {'binary', 'snap', 'csv', 'tsv'}, optional
Format to of the file to load.
- 'binary': native graph format obtained from `SGraph.save`.
- 'snap': tab or space separated edge list format with comments, used in
the `Stanford Network Analysis Platform <http://snap.stanford.edu/snap/>`_.
- 'csv': comma-separated edge list without header or comments.
- 'tsv': tab-separated edge list without header or comments.
delimiter : str, optional
Specifying the Delimiter used in 'snap', 'csv' or 'tsv' format. Those
format has default delimiter, but sometimes it is useful to
overwrite the default delimiter.
Returns
-------
out : SGraph
Loaded SGraph.
See Also
--------
SGraph, SGraph.save
Examples
--------
>>> g = turicreate.SGraph().add_vertices([turicreate.Vertex(i) for i in range(5)])
Save and load in binary format.
>>> g.save('mygraph')
>>> g2 = turicreate.load_sgraph('mygraph')
"""
if not format in ['binary', 'snap', 'csv', 'tsv']:
raise ValueError('Invalid format: %s' % format) # depends on [control=['if'], data=[]]
with cython_context():
g = None
if format is 'binary':
proxy = glconnect.get_unity().load_graph(_make_internal_url(filename))
g = SGraph(_proxy=proxy) # depends on [control=['if'], data=[]]
elif format is 'snap':
if delimiter == 'auto':
delimiter = '\t' # depends on [control=['if'], data=['delimiter']]
sf = SFrame.read_csv(filename, comment_char='#', delimiter=delimiter, header=False, column_type_hints=int)
g = SGraph().add_edges(sf, 'X1', 'X2') # depends on [control=['if'], data=[]]
elif format is 'csv':
if delimiter == 'auto':
delimiter = ',' # depends on [control=['if'], data=['delimiter']]
sf = SFrame.read_csv(filename, header=False, delimiter=delimiter)
g = SGraph().add_edges(sf, 'X1', 'X2') # depends on [control=['if'], data=[]]
elif format is 'tsv':
if delimiter == 'auto':
delimiter = '\t' # depends on [control=['if'], data=['delimiter']]
sf = SFrame.read_csv(filename, header=False, delimiter=delimiter)
g = SGraph().add_edges(sf, 'X1', 'X2') # depends on [control=['if'], data=[]]
g.summary() # materialize
return g # depends on [control=['with'], data=[]] |
def cublasSsbmv(handle, uplo, n, k, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for real symmetric-banded matrix.
"""
status = _libcublas.cublasSsbmv_v2(handle,
_CUBLAS_FILL_MODE[uplo], n, k,
ctypes.byref(ctypes.c_float(alpha)),
int(A), lda, int(x), incx,
ctypes.byref(ctypes.c_float(beta)),
int(y), incy)
cublasCheckStatus(status) | def function[cublasSsbmv, parameter[handle, uplo, n, k, alpha, A, lda, x, incx, beta, y, incy]]:
constant[
Matrix-vector product for real symmetric-banded matrix.
]
variable[status] assign[=] call[name[_libcublas].cublasSsbmv_v2, parameter[name[handle], call[name[_CUBLAS_FILL_MODE]][name[uplo]], name[n], name[k], call[name[ctypes].byref, parameter[call[name[ctypes].c_float, parameter[name[alpha]]]]], call[name[int], parameter[name[A]]], name[lda], call[name[int], parameter[name[x]]], name[incx], call[name[ctypes].byref, parameter[call[name[ctypes].c_float, parameter[name[beta]]]]], call[name[int], parameter[name[y]]], name[incy]]]
call[name[cublasCheckStatus], parameter[name[status]]] | keyword[def] identifier[cublasSsbmv] ( identifier[handle] , identifier[uplo] , identifier[n] , identifier[k] , identifier[alpha] , identifier[A] , identifier[lda] , identifier[x] , identifier[incx] , identifier[beta] , identifier[y] , identifier[incy] ):
literal[string]
identifier[status] = identifier[_libcublas] . identifier[cublasSsbmv_v2] ( identifier[handle] ,
identifier[_CUBLAS_FILL_MODE] [ identifier[uplo] ], identifier[n] , identifier[k] ,
identifier[ctypes] . identifier[byref] ( identifier[ctypes] . identifier[c_float] ( identifier[alpha] )),
identifier[int] ( identifier[A] ), identifier[lda] , identifier[int] ( identifier[x] ), identifier[incx] ,
identifier[ctypes] . identifier[byref] ( identifier[ctypes] . identifier[c_float] ( identifier[beta] )),
identifier[int] ( identifier[y] ), identifier[incy] )
identifier[cublasCheckStatus] ( identifier[status] ) | def cublasSsbmv(handle, uplo, n, k, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for real symmetric-banded matrix.
"""
status = _libcublas.cublasSsbmv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, k, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, int(x), incx, ctypes.byref(ctypes.c_float(beta)), int(y), incy)
cublasCheckStatus(status) |
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
params -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(2)
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros((n_y, 1))
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters | def function[initialize_parameters, parameter[n_x, n_h, n_y]]:
constant[
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
params -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
]
call[name[np].random.seed, parameter[constant[2]]]
variable[W1] assign[=] binary_operation[call[name[np].random.randn, parameter[name[n_h], name[n_x]]] * constant[0.01]]
variable[b1] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b2674eb0>, <ast.Constant object at 0x7da1b2674ee0>]]]]
variable[W2] assign[=] binary_operation[call[name[np].random.randn, parameter[name[n_y], name[n_h]]] * constant[0.01]]
variable[b2] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b26745b0>, <ast.Constant object at 0x7da1b26769e0>]]]]
assert[compare[name[W1].shape equal[==] tuple[[<ast.Name object at 0x7da18fe90df0>, <ast.Name object at 0x7da18fe926b0>]]]]
assert[compare[name[b1].shape equal[==] tuple[[<ast.Name object at 0x7da18fe91570>, <ast.Constant object at 0x7da18fe916c0>]]]]
assert[compare[name[W2].shape equal[==] tuple[[<ast.Name object at 0x7da18fe92650>, <ast.Name object at 0x7da18fe90580>]]]]
assert[compare[name[b2].shape equal[==] tuple[[<ast.Name object at 0x7da18fe929b0>, <ast.Constant object at 0x7da18fe92fb0>]]]]
variable[parameters] assign[=] dictionary[[<ast.Constant object at 0x7da18fe91db0>, <ast.Constant object at 0x7da18fe90670>, <ast.Constant object at 0x7da18fe92290>, <ast.Constant object at 0x7da18fe93d30>], [<ast.Name object at 0x7da18fe90130>, <ast.Name object at 0x7da18fe90790>, <ast.Name object at 0x7da18fe934c0>, <ast.Name object at 0x7da18fe91330>]]
return[name[parameters]] | keyword[def] identifier[initialize_parameters] ( identifier[n_x] , identifier[n_h] , identifier[n_y] ):
literal[string]
identifier[np] . identifier[random] . identifier[seed] ( literal[int] )
identifier[W1] = identifier[np] . identifier[random] . identifier[randn] ( identifier[n_h] , identifier[n_x] )* literal[int]
identifier[b1] = identifier[np] . identifier[zeros] (( identifier[n_h] , literal[int] ))
identifier[W2] = identifier[np] . identifier[random] . identifier[randn] ( identifier[n_y] , identifier[n_h] )* literal[int]
identifier[b2] = identifier[np] . identifier[zeros] (( identifier[n_y] , literal[int] ))
keyword[assert] ( identifier[W1] . identifier[shape] ==( identifier[n_h] , identifier[n_x] ))
keyword[assert] ( identifier[b1] . identifier[shape] ==( identifier[n_h] , literal[int] ))
keyword[assert] ( identifier[W2] . identifier[shape] ==( identifier[n_y] , identifier[n_h] ))
keyword[assert] ( identifier[b2] . identifier[shape] ==( identifier[n_y] , literal[int] ))
identifier[parameters] ={ literal[string] : identifier[W1] ,
literal[string] : identifier[b1] ,
literal[string] : identifier[W2] ,
literal[string] : identifier[b2] }
keyword[return] identifier[parameters] | def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
params -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(2)
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros((n_y, 1))
assert W1.shape == (n_h, n_x)
assert b1.shape == (n_h, 1)
assert W2.shape == (n_y, n_h)
assert b2.shape == (n_y, 1)
parameters = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2}
return parameters |
def _propagate_param_grad(self, parray, garray):
"""
For propagating the param_array and gradient_array.
This ensures the in memory view of each subsequent array.
1.) connect param_array of children to self.param_array
2.) tell all children to propagate further
"""
#if self.param_array.size != self.size:
# self._param_array_ = np.empty(self.size, dtype=np.float64)
#if self.gradient.size != self.size:
# self._gradient_array_ = np.empty(self.size, dtype=np.float64)
pi_old_size = 0
for pi in self.parameters:
pislice = slice(pi_old_size, pi_old_size + pi.size)
self.param_array[pislice] = pi.param_array.flat # , requirements=['C', 'W']).flat
self.gradient_full[pislice] = pi.gradient_full.flat # , requirements=['C', 'W']).flat
pi.param_array.data = parray[pislice].data
pi.gradient_full.data = garray[pislice].data
pi._propagate_param_grad(parray[pislice], garray[pislice])
pi_old_size += pi.size
self._model_initialized_ = True | def function[_propagate_param_grad, parameter[self, parray, garray]]:
constant[
For propagating the param_array and gradient_array.
This ensures the in memory view of each subsequent array.
1.) connect param_array of children to self.param_array
2.) tell all children to propagate further
]
variable[pi_old_size] assign[=] constant[0]
for taget[name[pi]] in starred[name[self].parameters] begin[:]
variable[pislice] assign[=] call[name[slice], parameter[name[pi_old_size], binary_operation[name[pi_old_size] + name[pi].size]]]
call[name[self].param_array][name[pislice]] assign[=] name[pi].param_array.flat
call[name[self].gradient_full][name[pislice]] assign[=] name[pi].gradient_full.flat
name[pi].param_array.data assign[=] call[name[parray]][name[pislice]].data
name[pi].gradient_full.data assign[=] call[name[garray]][name[pislice]].data
call[name[pi]._propagate_param_grad, parameter[call[name[parray]][name[pislice]], call[name[garray]][name[pislice]]]]
<ast.AugAssign object at 0x7da1b0dc18a0>
name[self]._model_initialized_ assign[=] constant[True] | keyword[def] identifier[_propagate_param_grad] ( identifier[self] , identifier[parray] , identifier[garray] ):
literal[string]
identifier[pi_old_size] = literal[int]
keyword[for] identifier[pi] keyword[in] identifier[self] . identifier[parameters] :
identifier[pislice] = identifier[slice] ( identifier[pi_old_size] , identifier[pi_old_size] + identifier[pi] . identifier[size] )
identifier[self] . identifier[param_array] [ identifier[pislice] ]= identifier[pi] . identifier[param_array] . identifier[flat]
identifier[self] . identifier[gradient_full] [ identifier[pislice] ]= identifier[pi] . identifier[gradient_full] . identifier[flat]
identifier[pi] . identifier[param_array] . identifier[data] = identifier[parray] [ identifier[pislice] ]. identifier[data]
identifier[pi] . identifier[gradient_full] . identifier[data] = identifier[garray] [ identifier[pislice] ]. identifier[data]
identifier[pi] . identifier[_propagate_param_grad] ( identifier[parray] [ identifier[pislice] ], identifier[garray] [ identifier[pislice] ])
identifier[pi_old_size] += identifier[pi] . identifier[size]
identifier[self] . identifier[_model_initialized_] = keyword[True] | def _propagate_param_grad(self, parray, garray):
"""
For propagating the param_array and gradient_array.
This ensures the in memory view of each subsequent array.
1.) connect param_array of children to self.param_array
2.) tell all children to propagate further
"""
#if self.param_array.size != self.size:
# self._param_array_ = np.empty(self.size, dtype=np.float64)
#if self.gradient.size != self.size:
# self._gradient_array_ = np.empty(self.size, dtype=np.float64)
pi_old_size = 0
for pi in self.parameters:
pislice = slice(pi_old_size, pi_old_size + pi.size)
self.param_array[pislice] = pi.param_array.flat # , requirements=['C', 'W']).flat
self.gradient_full[pislice] = pi.gradient_full.flat # , requirements=['C', 'W']).flat
pi.param_array.data = parray[pislice].data
pi.gradient_full.data = garray[pislice].data
pi._propagate_param_grad(parray[pislice], garray[pislice])
pi_old_size += pi.size # depends on [control=['for'], data=['pi']]
self._model_initialized_ = True |
def read(varin, fname='MS2_L10.mat.txt'):
'''Read in dataset for variable var
:param varin: Variable for which to read in data.
'''
# # fname = 'MS09_L10.mat.txt'
# # fname = 'MS09_L05.mat.txt' # has PAR
# fname = 'MS2_L10.mat.txt' # empty PAR
d = np.loadtxt(fname, comments='*')
if fname == 'MS2_L10.mat.txt':
var = ['lat', 'lon', 'depth', 'temp', 'density', 'sigma', 'oxygen',
'voltage 2', 'voltage 3', 'fluorescence-CDOM', 'fluorescence-ECO',
'turbidity', 'pressure', 'salinity', 'RINKO temperature',
'RINKO DO - CTD temp', 'RINKO DO - RINKO temp', 'bottom', 'PAR']
elif (fname == 'MS09_L05.mat.txt') or (fname == 'MS09_L10.mat.txt') or (fname == 'MS08_L12.mat.txt'):
var = ['lat', 'lon', 'depth', 'temp', 'density', 'sigma', 'oxygen',
'voltage 2', 'voltage 3', 'voltage 4', 'fluorescence-CDOM', 'fluorescence-ECO',
'turbidity', 'pressure', 'salinity', 'RINKO temperature',
'RINKO DO - CTD temp', 'RINKO DO - RINKO temp', 'bottom', 'PAR']
# return data for variable varin
return d[:, 0], d[:, 1], d[:, 2], d[:, var.index(varin)] | def function[read, parameter[varin, fname]]:
constant[Read in dataset for variable var
:param varin: Variable for which to read in data.
]
variable[d] assign[=] call[name[np].loadtxt, parameter[name[fname]]]
if compare[name[fname] equal[==] constant[MS2_L10.mat.txt]] begin[:]
variable[var] assign[=] list[[<ast.Constant object at 0x7da2047eb7f0>, <ast.Constant object at 0x7da2047ea7a0>, <ast.Constant object at 0x7da2047e9150>, <ast.Constant object at 0x7da2047e9f60>, <ast.Constant object at 0x7da2047ebb20>, <ast.Constant object at 0x7da2047e9060>, <ast.Constant object at 0x7da2047ea4d0>, <ast.Constant object at 0x7da2047ebaf0>, <ast.Constant object at 0x7da2047e9480>, <ast.Constant object at 0x7da2047ebb50>, <ast.Constant object at 0x7da2047e8310>, <ast.Constant object at 0x7da2047e9840>, <ast.Constant object at 0x7da204566230>, <ast.Constant object at 0x7da204566d10>, <ast.Constant object at 0x7da204567a60>, <ast.Constant object at 0x7da204567f10>, <ast.Constant object at 0x7da2045675e0>, <ast.Constant object at 0x7da204567c10>, <ast.Constant object at 0x7da204565900>]]
return[tuple[[<ast.Subscript object at 0x7da18f7205e0>, <ast.Subscript object at 0x7da18fe93bb0>, <ast.Subscript object at 0x7da18fe928f0>, <ast.Subscript object at 0x7da18fe932e0>]]] | keyword[def] identifier[read] ( identifier[varin] , identifier[fname] = literal[string] ):
literal[string]
identifier[d] = identifier[np] . identifier[loadtxt] ( identifier[fname] , identifier[comments] = literal[string] )
keyword[if] identifier[fname] == literal[string] :
identifier[var] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[elif] ( identifier[fname] == literal[string] ) keyword[or] ( identifier[fname] == literal[string] ) keyword[or] ( identifier[fname] == literal[string] ):
identifier[var] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[return] identifier[d] [:, literal[int] ], identifier[d] [:, literal[int] ], identifier[d] [:, literal[int] ], identifier[d] [:, identifier[var] . identifier[index] ( identifier[varin] )] | def read(varin, fname='MS2_L10.mat.txt'):
"""Read in dataset for variable var
:param varin: Variable for which to read in data.
"""
# # fname = 'MS09_L10.mat.txt'
# # fname = 'MS09_L05.mat.txt' # has PAR
# fname = 'MS2_L10.mat.txt' # empty PAR
d = np.loadtxt(fname, comments='*')
if fname == 'MS2_L10.mat.txt':
var = ['lat', 'lon', 'depth', 'temp', 'density', 'sigma', 'oxygen', 'voltage 2', 'voltage 3', 'fluorescence-CDOM', 'fluorescence-ECO', 'turbidity', 'pressure', 'salinity', 'RINKO temperature', 'RINKO DO - CTD temp', 'RINKO DO - RINKO temp', 'bottom', 'PAR'] # depends on [control=['if'], data=[]]
elif fname == 'MS09_L05.mat.txt' or fname == 'MS09_L10.mat.txt' or fname == 'MS08_L12.mat.txt':
var = ['lat', 'lon', 'depth', 'temp', 'density', 'sigma', 'oxygen', 'voltage 2', 'voltage 3', 'voltage 4', 'fluorescence-CDOM', 'fluorescence-ECO', 'turbidity', 'pressure', 'salinity', 'RINKO temperature', 'RINKO DO - CTD temp', 'RINKO DO - RINKO temp', 'bottom', 'PAR'] # depends on [control=['if'], data=[]]
# return data for variable varin
return (d[:, 0], d[:, 1], d[:, 2], d[:, var.index(varin)]) |
def extractResponse(cls, request, data):
"""Take a C{L{SRegRequest}} and a dictionary of simple
registration values and create a C{L{SRegResponse}}
object containing that data.
@param request: The simple registration request object
@type request: SRegRequest
@param data: The simple registration data for this
response, as a dictionary from unqualified simple
registration field name to string (unicode) value. For
instance, the nickname should be stored under the key
'nickname'.
@type data: {str:str}
@returns: a simple registration response object
@rtype: SRegResponse
"""
self = cls()
self.ns_uri = request.ns_uri
for field in request.allRequestedFields():
value = data.get(field)
if value is not None:
self.data[field] = value
return self | def function[extractResponse, parameter[cls, request, data]]:
constant[Take a C{L{SRegRequest}} and a dictionary of simple
registration values and create a C{L{SRegResponse}}
object containing that data.
@param request: The simple registration request object
@type request: SRegRequest
@param data: The simple registration data for this
response, as a dictionary from unqualified simple
registration field name to string (unicode) value. For
instance, the nickname should be stored under the key
'nickname'.
@type data: {str:str}
@returns: a simple registration response object
@rtype: SRegResponse
]
variable[self] assign[=] call[name[cls], parameter[]]
name[self].ns_uri assign[=] name[request].ns_uri
for taget[name[field]] in starred[call[name[request].allRequestedFields, parameter[]]] begin[:]
variable[value] assign[=] call[name[data].get, parameter[name[field]]]
if compare[name[value] is_not constant[None]] begin[:]
call[name[self].data][name[field]] assign[=] name[value]
return[name[self]] | keyword[def] identifier[extractResponse] ( identifier[cls] , identifier[request] , identifier[data] ):
literal[string]
identifier[self] = identifier[cls] ()
identifier[self] . identifier[ns_uri] = identifier[request] . identifier[ns_uri]
keyword[for] identifier[field] keyword[in] identifier[request] . identifier[allRequestedFields] ():
identifier[value] = identifier[data] . identifier[get] ( identifier[field] )
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[data] [ identifier[field] ]= identifier[value]
keyword[return] identifier[self] | def extractResponse(cls, request, data):
"""Take a C{L{SRegRequest}} and a dictionary of simple
registration values and create a C{L{SRegResponse}}
object containing that data.
@param request: The simple registration request object
@type request: SRegRequest
@param data: The simple registration data for this
response, as a dictionary from unqualified simple
registration field name to string (unicode) value. For
instance, the nickname should be stored under the key
'nickname'.
@type data: {str:str}
@returns: a simple registration response object
@rtype: SRegResponse
"""
self = cls()
self.ns_uri = request.ns_uri
for field in request.allRequestedFields():
value = data.get(field)
if value is not None:
self.data[field] = value # depends on [control=['if'], data=['value']] # depends on [control=['for'], data=['field']]
return self |
def ValidOptions(cls):
"""Returns a list of valid option names."""
valid_options = []
for obj_name in dir(cls):
obj = getattr(cls, obj_name)
if inspect.isclass(obj) and issubclass(obj, cls.OptionBase):
valid_options.append(obj_name)
return valid_options | def function[ValidOptions, parameter[cls]]:
constant[Returns a list of valid option names.]
variable[valid_options] assign[=] list[[]]
for taget[name[obj_name]] in starred[call[name[dir], parameter[name[cls]]]] begin[:]
variable[obj] assign[=] call[name[getattr], parameter[name[cls], name[obj_name]]]
if <ast.BoolOp object at 0x7da1b175f340> begin[:]
call[name[valid_options].append, parameter[name[obj_name]]]
return[name[valid_options]] | keyword[def] identifier[ValidOptions] ( identifier[cls] ):
literal[string]
identifier[valid_options] =[]
keyword[for] identifier[obj_name] keyword[in] identifier[dir] ( identifier[cls] ):
identifier[obj] = identifier[getattr] ( identifier[cls] , identifier[obj_name] )
keyword[if] identifier[inspect] . identifier[isclass] ( identifier[obj] ) keyword[and] identifier[issubclass] ( identifier[obj] , identifier[cls] . identifier[OptionBase] ):
identifier[valid_options] . identifier[append] ( identifier[obj_name] )
keyword[return] identifier[valid_options] | def ValidOptions(cls):
"""Returns a list of valid option names."""
valid_options = []
for obj_name in dir(cls):
obj = getattr(cls, obj_name)
if inspect.isclass(obj) and issubclass(obj, cls.OptionBase):
valid_options.append(obj_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['obj_name']]
return valid_options |
def _on_progress(adapter, operation, conn_id, done, total):
"""Callback when progress is reported."""
conn_string = adapter._get_property(conn_id, 'connection_string')
if conn_string is None:
return
adapter.notify_progress(conn_string, operation, done, total) | def function[_on_progress, parameter[adapter, operation, conn_id, done, total]]:
constant[Callback when progress is reported.]
variable[conn_string] assign[=] call[name[adapter]._get_property, parameter[name[conn_id], constant[connection_string]]]
if compare[name[conn_string] is constant[None]] begin[:]
return[None]
call[name[adapter].notify_progress, parameter[name[conn_string], name[operation], name[done], name[total]]] | keyword[def] identifier[_on_progress] ( identifier[adapter] , identifier[operation] , identifier[conn_id] , identifier[done] , identifier[total] ):
literal[string]
identifier[conn_string] = identifier[adapter] . identifier[_get_property] ( identifier[conn_id] , literal[string] )
keyword[if] identifier[conn_string] keyword[is] keyword[None] :
keyword[return]
identifier[adapter] . identifier[notify_progress] ( identifier[conn_string] , identifier[operation] , identifier[done] , identifier[total] ) | def _on_progress(adapter, operation, conn_id, done, total):
"""Callback when progress is reported."""
conn_string = adapter._get_property(conn_id, 'connection_string')
if conn_string is None:
return # depends on [control=['if'], data=[]]
adapter.notify_progress(conn_string, operation, done, total) |
def get_instance_state(self, instances=None):
"""Get states of all instances on EC2 which were started by this file."""
if instances:
desc = self.client.describe_instances(InstanceIds=instances)
else:
desc = self.client.describe_instances(InstanceIds=self.instances)
# pprint.pprint(desc['Reservations'],indent=4)
for i in range(len(desc['Reservations'])):
instance = desc['Reservations'][i]['Instances'][0]
self.instance_states[instance['InstanceId']] = instance['State']['Name']
return self.instance_states | def function[get_instance_state, parameter[self, instances]]:
constant[Get states of all instances on EC2 which were started by this file.]
if name[instances] begin[:]
variable[desc] assign[=] call[name[self].client.describe_instances, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[desc]][constant[Reservations]]]]]]] begin[:]
variable[instance] assign[=] call[call[call[call[name[desc]][constant[Reservations]]][name[i]]][constant[Instances]]][constant[0]]
call[name[self].instance_states][call[name[instance]][constant[InstanceId]]] assign[=] call[call[name[instance]][constant[State]]][constant[Name]]
return[name[self].instance_states] | keyword[def] identifier[get_instance_state] ( identifier[self] , identifier[instances] = keyword[None] ):
literal[string]
keyword[if] identifier[instances] :
identifier[desc] = identifier[self] . identifier[client] . identifier[describe_instances] ( identifier[InstanceIds] = identifier[instances] )
keyword[else] :
identifier[desc] = identifier[self] . identifier[client] . identifier[describe_instances] ( identifier[InstanceIds] = identifier[self] . identifier[instances] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[desc] [ literal[string] ])):
identifier[instance] = identifier[desc] [ literal[string] ][ identifier[i] ][ literal[string] ][ literal[int] ]
identifier[self] . identifier[instance_states] [ identifier[instance] [ literal[string] ]]= identifier[instance] [ literal[string] ][ literal[string] ]
keyword[return] identifier[self] . identifier[instance_states] | def get_instance_state(self, instances=None):
"""Get states of all instances on EC2 which were started by this file."""
if instances:
desc = self.client.describe_instances(InstanceIds=instances) # depends on [control=['if'], data=[]]
else:
desc = self.client.describe_instances(InstanceIds=self.instances)
# pprint.pprint(desc['Reservations'],indent=4)
for i in range(len(desc['Reservations'])):
instance = desc['Reservations'][i]['Instances'][0]
self.instance_states[instance['InstanceId']] = instance['State']['Name'] # depends on [control=['for'], data=['i']]
return self.instance_states |
def send_to_output(master_dict, mash_output, sample_id, assembly_file):
"""Send dictionary to output json file
This function sends master_dict dictionary to a json file if master_dict is
populated with entries, otherwise it won't create the file
Parameters
----------
master_dict: dict
dictionary that stores all entries for a specific query sequence
in multi-fasta given to mash dist as input against patlas database
last_seq: str
string that stores the last sequence that was parsed before writing to
file and therefore after the change of query sequence between different
rows on the input file
mash_output: str
the name/path of input file to main function, i.e., the name/path of
the mash dist output txt file.
sample_id: str
The name of the sample being parse to .report.json file
Returns
-------
"""
plot_dict = {}
# create a new file only if master_dict is populated
if master_dict:
out_file = open("{}.json".format(
"".join(mash_output.split(".")[0])), "w")
out_file.write(json.dumps(master_dict))
out_file.close()
# iterate through master_dict in order to make contigs the keys
for k,v in master_dict.items():
if not v[2] in plot_dict:
plot_dict[v[2]] = [k]
else:
plot_dict[v[2]].append(k)
number_hits = len(master_dict)
else:
number_hits = 0
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [{
"header": "Mash Dist",
"table": "plasmids",
"patlas_mashdist": master_dict,
"value": number_hits
}]
}],
"plotData": [{
"sample": sample_id,
"data": {
"patlasMashDistXrange": plot_dict
},
"assemblyFile": assembly_file
}]
}
with open(".report.json", "w") as json_report:
json_report.write(json.dumps(json_dic, separators=(",", ":"))) | def function[send_to_output, parameter[master_dict, mash_output, sample_id, assembly_file]]:
constant[Send dictionary to output json file
This function sends master_dict dictionary to a json file if master_dict is
populated with entries, otherwise it won't create the file
Parameters
----------
master_dict: dict
dictionary that stores all entries for a specific query sequence
in multi-fasta given to mash dist as input against patlas database
last_seq: str
string that stores the last sequence that was parsed before writing to
file and therefore after the change of query sequence between different
rows on the input file
mash_output: str
the name/path of input file to main function, i.e., the name/path of
the mash dist output txt file.
sample_id: str
The name of the sample being parse to .report.json file
Returns
-------
]
variable[plot_dict] assign[=] dictionary[[], []]
if name[master_dict] begin[:]
variable[out_file] assign[=] call[name[open], parameter[call[constant[{}.json].format, parameter[call[constant[].join, parameter[call[call[name[mash_output].split, parameter[constant[.]]]][constant[0]]]]]], constant[w]]]
call[name[out_file].write, parameter[call[name[json].dumps, parameter[name[master_dict]]]]]
call[name[out_file].close, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b03b9e70>, <ast.Name object at 0x7da1b03b9690>]]] in starred[call[name[master_dict].items, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b03ba260> begin[:]
call[name[plot_dict]][call[name[v]][constant[2]]] assign[=] list[[<ast.Name object at 0x7da1b0287820>]]
variable[number_hits] assign[=] call[name[len], parameter[name[master_dict]]]
variable[json_dic] assign[=] dictionary[[<ast.Constant object at 0x7da1b0287b50>, <ast.Constant object at 0x7da1b0287d60>], [<ast.List object at 0x7da1b0287df0>, <ast.List object at 0x7da1b0285810>]]
with call[name[open], parameter[constant[.report.json], constant[w]]] begin[:]
call[name[json_report].write, parameter[call[name[json].dumps, parameter[name[json_dic]]]]] | keyword[def] identifier[send_to_output] ( identifier[master_dict] , identifier[mash_output] , identifier[sample_id] , identifier[assembly_file] ):
literal[string]
identifier[plot_dict] ={}
keyword[if] identifier[master_dict] :
identifier[out_file] = identifier[open] ( literal[string] . identifier[format] (
literal[string] . identifier[join] ( identifier[mash_output] . identifier[split] ( literal[string] )[ literal[int] ])), literal[string] )
identifier[out_file] . identifier[write] ( identifier[json] . identifier[dumps] ( identifier[master_dict] ))
identifier[out_file] . identifier[close] ()
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[master_dict] . identifier[items] ():
keyword[if] keyword[not] identifier[v] [ literal[int] ] keyword[in] identifier[plot_dict] :
identifier[plot_dict] [ identifier[v] [ literal[int] ]]=[ identifier[k] ]
keyword[else] :
identifier[plot_dict] [ identifier[v] [ literal[int] ]]. identifier[append] ( identifier[k] )
identifier[number_hits] = identifier[len] ( identifier[master_dict] )
keyword[else] :
identifier[number_hits] = literal[int]
identifier[json_dic] ={
literal[string] :[{
literal[string] : identifier[sample_id] ,
literal[string] :[{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[master_dict] ,
literal[string] : identifier[number_hits]
}]
}],
literal[string] :[{
literal[string] : identifier[sample_id] ,
literal[string] :{
literal[string] : identifier[plot_dict]
},
literal[string] : identifier[assembly_file]
}]
}
keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[json_report] :
identifier[json_report] . identifier[write] ( identifier[json] . identifier[dumps] ( identifier[json_dic] , identifier[separators] =( literal[string] , literal[string] ))) | def send_to_output(master_dict, mash_output, sample_id, assembly_file):
"""Send dictionary to output json file
This function sends master_dict dictionary to a json file if master_dict is
populated with entries, otherwise it won't create the file
Parameters
----------
master_dict: dict
dictionary that stores all entries for a specific query sequence
in multi-fasta given to mash dist as input against patlas database
last_seq: str
string that stores the last sequence that was parsed before writing to
file and therefore after the change of query sequence between different
rows on the input file
mash_output: str
the name/path of input file to main function, i.e., the name/path of
the mash dist output txt file.
sample_id: str
The name of the sample being parse to .report.json file
Returns
-------
"""
plot_dict = {}
# create a new file only if master_dict is populated
if master_dict:
out_file = open('{}.json'.format(''.join(mash_output.split('.')[0])), 'w')
out_file.write(json.dumps(master_dict))
out_file.close()
# iterate through master_dict in order to make contigs the keys
for (k, v) in master_dict.items():
if not v[2] in plot_dict:
plot_dict[v[2]] = [k] # depends on [control=['if'], data=[]]
else:
plot_dict[v[2]].append(k) # depends on [control=['for'], data=[]]
number_hits = len(master_dict) # depends on [control=['if'], data=[]]
else:
number_hits = 0
json_dic = {'tableRow': [{'sample': sample_id, 'data': [{'header': 'Mash Dist', 'table': 'plasmids', 'patlas_mashdist': master_dict, 'value': number_hits}]}], 'plotData': [{'sample': sample_id, 'data': {'patlasMashDistXrange': plot_dict}, 'assemblyFile': assembly_file}]}
with open('.report.json', 'w') as json_report:
json_report.write(json.dumps(json_dic, separators=(',', ':'))) # depends on [control=['with'], data=['json_report']] |
def set_metric(self, slug, value, category=None, expire=None, date=None):
"""Assigns a specific value to the *current* metric. You can use this
to start a metric at a value greater than 0 or to reset a metric.
The given slug will be used to generate Redis keys at the following
granularities: Seconds, Minutes, Hours, Day, Week, Month, and Year.
Parameters:
* ``slug`` -- a unique value to identify the metric; used in
construction of redis keys (see below).
* ``value`` -- The value of the metric.
* ``category`` -- (optional) Assign the metric to a Category (a string)
* ``expire`` -- (optional) Specify the number of seconds in which the
metric will expire.
* ``date`` -- (optional) Specify the timestamp for the metric; default
used to build the keys will be the current date and time in UTC form.
Redis keys for each metric (slug) take the form:
m:<slug>:s:<yyyy-mm-dd-hh-mm-ss> # Second
m:<slug>:i:<yyyy-mm-dd-hh-mm> # Minute
m:<slug>:h:<yyyy-mm-dd-hh> # Hour
m:<slug>:<yyyy-mm-dd> # Day
m:<slug>:w:<yyyy-num> # Week (year - week number)
m:<slug>:m:<yyyy-mm> # Month
m:<slug>:y:<yyyy> # Year
"""
keys = self._build_keys(slug, date=date)
# Add the slug to the set of metric slugs
self.r.sadd(self._metric_slugs_key, slug)
# Construct a dictionary of key/values for use with mset
data = {}
for k in keys:
data[k] = value
self.r.mset(data)
# Add the category if applicable.
if category:
self._categorize(slug, category)
# Expire the Metric in ``expire`` seconds if applicable.
if expire:
for k in keys:
self.r.expire(k, expire) | def function[set_metric, parameter[self, slug, value, category, expire, date]]:
constant[Assigns a specific value to the *current* metric. You can use this
to start a metric at a value greater than 0 or to reset a metric.
The given slug will be used to generate Redis keys at the following
granularities: Seconds, Minutes, Hours, Day, Week, Month, and Year.
Parameters:
* ``slug`` -- a unique value to identify the metric; used in
construction of redis keys (see below).
* ``value`` -- The value of the metric.
* ``category`` -- (optional) Assign the metric to a Category (a string)
* ``expire`` -- (optional) Specify the number of seconds in which the
metric will expire.
* ``date`` -- (optional) Specify the timestamp for the metric; default
used to build the keys will be the current date and time in UTC form.
Redis keys for each metric (slug) take the form:
m:<slug>:s:<yyyy-mm-dd-hh-mm-ss> # Second
m:<slug>:i:<yyyy-mm-dd-hh-mm> # Minute
m:<slug>:h:<yyyy-mm-dd-hh> # Hour
m:<slug>:<yyyy-mm-dd> # Day
m:<slug>:w:<yyyy-num> # Week (year - week number)
m:<slug>:m:<yyyy-mm> # Month
m:<slug>:y:<yyyy> # Year
]
variable[keys] assign[=] call[name[self]._build_keys, parameter[name[slug]]]
call[name[self].r.sadd, parameter[name[self]._metric_slugs_key, name[slug]]]
variable[data] assign[=] dictionary[[], []]
for taget[name[k]] in starred[name[keys]] begin[:]
call[name[data]][name[k]] assign[=] name[value]
call[name[self].r.mset, parameter[name[data]]]
if name[category] begin[:]
call[name[self]._categorize, parameter[name[slug], name[category]]]
if name[expire] begin[:]
for taget[name[k]] in starred[name[keys]] begin[:]
call[name[self].r.expire, parameter[name[k], name[expire]]] | keyword[def] identifier[set_metric] ( identifier[self] , identifier[slug] , identifier[value] , identifier[category] = keyword[None] , identifier[expire] = keyword[None] , identifier[date] = keyword[None] ):
literal[string]
identifier[keys] = identifier[self] . identifier[_build_keys] ( identifier[slug] , identifier[date] = identifier[date] )
identifier[self] . identifier[r] . identifier[sadd] ( identifier[self] . identifier[_metric_slugs_key] , identifier[slug] )
identifier[data] ={}
keyword[for] identifier[k] keyword[in] identifier[keys] :
identifier[data] [ identifier[k] ]= identifier[value]
identifier[self] . identifier[r] . identifier[mset] ( identifier[data] )
keyword[if] identifier[category] :
identifier[self] . identifier[_categorize] ( identifier[slug] , identifier[category] )
keyword[if] identifier[expire] :
keyword[for] identifier[k] keyword[in] identifier[keys] :
identifier[self] . identifier[r] . identifier[expire] ( identifier[k] , identifier[expire] ) | def set_metric(self, slug, value, category=None, expire=None, date=None):
"""Assigns a specific value to the *current* metric. You can use this
to start a metric at a value greater than 0 or to reset a metric.
The given slug will be used to generate Redis keys at the following
granularities: Seconds, Minutes, Hours, Day, Week, Month, and Year.
Parameters:
* ``slug`` -- a unique value to identify the metric; used in
construction of redis keys (see below).
* ``value`` -- The value of the metric.
* ``category`` -- (optional) Assign the metric to a Category (a string)
* ``expire`` -- (optional) Specify the number of seconds in which the
metric will expire.
* ``date`` -- (optional) Specify the timestamp for the metric; default
used to build the keys will be the current date and time in UTC form.
Redis keys for each metric (slug) take the form:
m:<slug>:s:<yyyy-mm-dd-hh-mm-ss> # Second
m:<slug>:i:<yyyy-mm-dd-hh-mm> # Minute
m:<slug>:h:<yyyy-mm-dd-hh> # Hour
m:<slug>:<yyyy-mm-dd> # Day
m:<slug>:w:<yyyy-num> # Week (year - week number)
m:<slug>:m:<yyyy-mm> # Month
m:<slug>:y:<yyyy> # Year
"""
keys = self._build_keys(slug, date=date)
# Add the slug to the set of metric slugs
self.r.sadd(self._metric_slugs_key, slug)
# Construct a dictionary of key/values for use with mset
data = {}
for k in keys:
data[k] = value # depends on [control=['for'], data=['k']]
self.r.mset(data)
# Add the category if applicable.
if category:
self._categorize(slug, category) # depends on [control=['if'], data=[]]
# Expire the Metric in ``expire`` seconds if applicable.
if expire:
for k in keys:
self.r.expire(k, expire) # depends on [control=['for'], data=['k']] # depends on [control=['if'], data=[]] |
def zero_dataset(train=False, dev=False, test=False, train_rows=256, dev_rows=64, test_rows=64):
"""
Load the Zero dataset.
The Zero dataset is a simple task of predicting zero from zero. This dataset is useful for
integration testing. The extreme simplicity of the dataset allows for models to learn the task
quickly allowing for quick end-to-end testing.
Args:
train (bool, optional): If to load the training split of the dataset.
dev (bool, optional): If to load the development split of the dataset.
test (bool, optional): If to load the test split of the dataset.
train_rows (int, optional): Number of training rows to generate.
dev_rows (int, optional): Number of development rows to generate.
test_rows (int, optional): Number of test rows to generate.
Returns:
:class:`tuple` of :class:`torchnlp.datasets.Dataset` or :class:`torchnlp.datasets.Dataset`:
Returns between one and all dataset splits (train, dev and test) depending on if their
respective boolean argument is ``True``.
Example:
>>> from torchnlp.datasets import zero_dataset
>>> train = zero_dataset(train=True)
>>> train[0:2]
[{'source': '0', 'target': '0'}, {'source': '0', 'target': '0'}]
"""
ret = []
for is_requested, n_rows in [(train, train_rows), (dev, dev_rows), (test, test_rows)]:
if not is_requested:
continue
rows = [{'source': str(0), 'target': str(0)} for i in range(n_rows)]
ret.append(Dataset(rows))
if len(ret) == 1:
return ret[0]
else:
return tuple(ret) | def function[zero_dataset, parameter[train, dev, test, train_rows, dev_rows, test_rows]]:
constant[
Load the Zero dataset.
The Zero dataset is a simple task of predicting zero from zero. This dataset is useful for
integration testing. The extreme simplicity of the dataset allows for models to learn the task
quickly allowing for quick end-to-end testing.
Args:
train (bool, optional): If to load the training split of the dataset.
dev (bool, optional): If to load the development split of the dataset.
test (bool, optional): If to load the test split of the dataset.
train_rows (int, optional): Number of training rows to generate.
dev_rows (int, optional): Number of development rows to generate.
test_rows (int, optional): Number of test rows to generate.
Returns:
:class:`tuple` of :class:`torchnlp.datasets.Dataset` or :class:`torchnlp.datasets.Dataset`:
Returns between one and all dataset splits (train, dev and test) depending on if their
respective boolean argument is ``True``.
Example:
>>> from torchnlp.datasets import zero_dataset
>>> train = zero_dataset(train=True)
>>> train[0:2]
[{'source': '0', 'target': '0'}, {'source': '0', 'target': '0'}]
]
variable[ret] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18fe93b20>, <ast.Name object at 0x7da18fe90dc0>]]] in starred[list[[<ast.Tuple object at 0x7da18fe915a0>, <ast.Tuple object at 0x7da18fe92110>, <ast.Tuple object at 0x7da18bcc8640>]]] begin[:]
if <ast.UnaryOp object at 0x7da18bcca5c0> begin[:]
continue
variable[rows] assign[=] <ast.ListComp object at 0x7da18bccaa40>
call[name[ret].append, parameter[call[name[Dataset], parameter[name[rows]]]]]
if compare[call[name[len], parameter[name[ret]]] equal[==] constant[1]] begin[:]
return[call[name[ret]][constant[0]]] | keyword[def] identifier[zero_dataset] ( identifier[train] = keyword[False] , identifier[dev] = keyword[False] , identifier[test] = keyword[False] , identifier[train_rows] = literal[int] , identifier[dev_rows] = literal[int] , identifier[test_rows] = literal[int] ):
literal[string]
identifier[ret] =[]
keyword[for] identifier[is_requested] , identifier[n_rows] keyword[in] [( identifier[train] , identifier[train_rows] ),( identifier[dev] , identifier[dev_rows] ),( identifier[test] , identifier[test_rows] )]:
keyword[if] keyword[not] identifier[is_requested] :
keyword[continue]
identifier[rows] =[{ literal[string] : identifier[str] ( literal[int] ), literal[string] : identifier[str] ( literal[int] )} keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n_rows] )]
identifier[ret] . identifier[append] ( identifier[Dataset] ( identifier[rows] ))
keyword[if] identifier[len] ( identifier[ret] )== literal[int] :
keyword[return] identifier[ret] [ literal[int] ]
keyword[else] :
keyword[return] identifier[tuple] ( identifier[ret] ) | def zero_dataset(train=False, dev=False, test=False, train_rows=256, dev_rows=64, test_rows=64):
"""
Load the Zero dataset.
The Zero dataset is a simple task of predicting zero from zero. This dataset is useful for
integration testing. The extreme simplicity of the dataset allows for models to learn the task
quickly allowing for quick end-to-end testing.
Args:
train (bool, optional): If to load the training split of the dataset.
dev (bool, optional): If to load the development split of the dataset.
test (bool, optional): If to load the test split of the dataset.
train_rows (int, optional): Number of training rows to generate.
dev_rows (int, optional): Number of development rows to generate.
test_rows (int, optional): Number of test rows to generate.
Returns:
:class:`tuple` of :class:`torchnlp.datasets.Dataset` or :class:`torchnlp.datasets.Dataset`:
Returns between one and all dataset splits (train, dev and test) depending on if their
respective boolean argument is ``True``.
Example:
>>> from torchnlp.datasets import zero_dataset
>>> train = zero_dataset(train=True)
>>> train[0:2]
[{'source': '0', 'target': '0'}, {'source': '0', 'target': '0'}]
"""
ret = []
for (is_requested, n_rows) in [(train, train_rows), (dev, dev_rows), (test, test_rows)]:
if not is_requested:
continue # depends on [control=['if'], data=[]]
rows = [{'source': str(0), 'target': str(0)} for i in range(n_rows)]
ret.append(Dataset(rows)) # depends on [control=['for'], data=[]]
if len(ret) == 1:
return ret[0] # depends on [control=['if'], data=[]]
else:
return tuple(ret) |
def bradykinesia(self, data_frame, method='fft'):
"""
This method calculates the bradykinesia amplitude of the data frame. It accepts two different methods, \
'fft' and 'welch'. First the signal gets re-sampled, dc removed and then high pass filtered.
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:param method: fft or welch.
:type method: str
:return ampl: the amplitude of the Bradykinesia
:rtype ampl: float
:return freq: the frequency of the Bradykinesia
:rtype freq: float
"""
try:
data_frame_resampled = self.resample_signal(data_frame)
data_frame_dc = self.dc_remove_signal(data_frame_resampled)
data_frame_filtered = self.filter_signal(data_frame_dc, 'dc_mag_sum_acc')
if method == 'fft':
data_frame_fft = self.fft_signal(data_frame_filtered)
return self.amplitude_by_fft(data_frame_fft)
else:
return self.amplitude_by_welch(data_frame_filtered)
except ValueError as verr:
logging.error("TremorProcessor bradykinesia ValueError ->%s", verr.message)
except:
logging.error("Unexpected error on TemorProcessor bradykinesia: %s", sys.exc_info()[0]) | def function[bradykinesia, parameter[self, data_frame, method]]:
constant[
This method calculates the bradykinesia amplitude of the data frame. It accepts two different methods, 'fft' and 'welch'. First the signal gets re-sampled, dc removed and then high pass filtered.
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:param method: fft or welch.
:type method: str
:return ampl: the amplitude of the Bradykinesia
:rtype ampl: float
:return freq: the frequency of the Bradykinesia
:rtype freq: float
]
<ast.Try object at 0x7da18fe90e20> | keyword[def] identifier[bradykinesia] ( identifier[self] , identifier[data_frame] , identifier[method] = literal[string] ):
literal[string]
keyword[try] :
identifier[data_frame_resampled] = identifier[self] . identifier[resample_signal] ( identifier[data_frame] )
identifier[data_frame_dc] = identifier[self] . identifier[dc_remove_signal] ( identifier[data_frame_resampled] )
identifier[data_frame_filtered] = identifier[self] . identifier[filter_signal] ( identifier[data_frame_dc] , literal[string] )
keyword[if] identifier[method] == literal[string] :
identifier[data_frame_fft] = identifier[self] . identifier[fft_signal] ( identifier[data_frame_filtered] )
keyword[return] identifier[self] . identifier[amplitude_by_fft] ( identifier[data_frame_fft] )
keyword[else] :
keyword[return] identifier[self] . identifier[amplitude_by_welch] ( identifier[data_frame_filtered] )
keyword[except] identifier[ValueError] keyword[as] identifier[verr] :
identifier[logging] . identifier[error] ( literal[string] , identifier[verr] . identifier[message] )
keyword[except] :
identifier[logging] . identifier[error] ( literal[string] , identifier[sys] . identifier[exc_info] ()[ literal[int] ]) | def bradykinesia(self, data_frame, method='fft'):
"""
This method calculates the bradykinesia amplitude of the data frame. It accepts two different methods, 'fft' and 'welch'. First the signal gets re-sampled, dc removed and then high pass filtered.
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:param method: fft or welch.
:type method: str
:return ampl: the amplitude of the Bradykinesia
:rtype ampl: float
:return freq: the frequency of the Bradykinesia
:rtype freq: float
"""
try:
data_frame_resampled = self.resample_signal(data_frame)
data_frame_dc = self.dc_remove_signal(data_frame_resampled)
data_frame_filtered = self.filter_signal(data_frame_dc, 'dc_mag_sum_acc')
if method == 'fft':
data_frame_fft = self.fft_signal(data_frame_filtered)
return self.amplitude_by_fft(data_frame_fft) # depends on [control=['if'], data=[]]
else:
return self.amplitude_by_welch(data_frame_filtered) # depends on [control=['try'], data=[]]
except ValueError as verr:
logging.error('TremorProcessor bradykinesia ValueError ->%s', verr.message) # depends on [control=['except'], data=['verr']]
except:
logging.error('Unexpected error on TemorProcessor bradykinesia: %s', sys.exc_info()[0]) # depends on [control=['except'], data=[]] |
def getEndpointSubscriptions(self,ep):
'''
Get list of all subscriptions on a given endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._getURL("/subscriptions/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result | def function[getEndpointSubscriptions, parameter[self, ep]]:
constant[
Get list of all subscriptions on a given endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
]
variable[result] assign[=] call[name[asyncResult], parameter[]]
name[result].endpoint assign[=] name[ep]
variable[data] assign[=] call[name[self]._getURL, parameter[binary_operation[constant[/subscriptions/] + name[ep]]]]
if compare[name[data].status_code equal[==] constant[200]] begin[:]
name[result].error assign[=] constant[False]
name[result].is_done assign[=] constant[True]
name[result].result assign[=] name[data].content
name[result].raw_data assign[=] name[data].content
name[result].status_code assign[=] name[data].status_code
return[name[result]] | keyword[def] identifier[getEndpointSubscriptions] ( identifier[self] , identifier[ep] ):
literal[string]
identifier[result] = identifier[asyncResult] ()
identifier[result] . identifier[endpoint] = identifier[ep]
identifier[data] = identifier[self] . identifier[_getURL] ( literal[string] + identifier[ep] )
keyword[if] identifier[data] . identifier[status_code] == literal[int] :
identifier[result] . identifier[error] = keyword[False]
identifier[result] . identifier[is_done] = keyword[True]
identifier[result] . identifier[result] = identifier[data] . identifier[content]
keyword[else] :
identifier[result] . identifier[error] = identifier[response_codes] ( literal[string] , identifier[data] . identifier[status_code] )
identifier[result] . identifier[is_done] = keyword[True]
identifier[result] . identifier[raw_data] = identifier[data] . identifier[content]
identifier[result] . identifier[status_code] = identifier[data] . identifier[status_code]
keyword[return] identifier[result] | def getEndpointSubscriptions(self, ep):
"""
Get list of all subscriptions on a given endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
"""
result = asyncResult()
result.endpoint = ep
data = self._getURL('/subscriptions/' + ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content # depends on [control=['if'], data=[]]
else:
result.error = response_codes('unsubscribe', data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result |
def grid_interpolate(func):
"""
Decorate a profile method that accepts a coordinate grid and returns a data grid.
If an interpolator attribute is associated with the input grid then that interpolator is used to down sample the
coordinate grid prior to calling the function and up sample the result of the function.
If no interpolator attribute is associated with the input grid then the function is called as normal.
Parameters
----------
func
Some method that accepts a grid
Returns
-------
decorated_function
The function with optional interpolation
"""
@wraps(func)
def wrapper(profile, grid, grid_radial_minimum=None, *args, **kwargs):
if hasattr(grid, "interpolator"):
interpolator = grid.interpolator
if grid.interpolator is not None:
values = func(profile, interpolator.interp_grid, grid_radial_minimum, *args, **kwargs)
if values.ndim == 1:
return interpolator.interpolated_values_from_values(values=values)
elif values.ndim == 2:
y_values = interpolator.interpolated_values_from_values(values=values[:, 0])
x_values = interpolator.interpolated_values_from_values(values=values[:, 1])
return np.asarray([y_values, x_values]).T
return func(profile, grid, grid_radial_minimum, *args, **kwargs)
return wrapper | def function[grid_interpolate, parameter[func]]:
constant[
Decorate a profile method that accepts a coordinate grid and returns a data grid.
If an interpolator attribute is associated with the input grid then that interpolator is used to down sample the
coordinate grid prior to calling the function and up sample the result of the function.
If no interpolator attribute is associated with the input grid then the function is called as normal.
Parameters
----------
func
Some method that accepts a grid
Returns
-------
decorated_function
The function with optional interpolation
]
def function[wrapper, parameter[profile, grid, grid_radial_minimum]]:
if call[name[hasattr], parameter[name[grid], constant[interpolator]]] begin[:]
variable[interpolator] assign[=] name[grid].interpolator
if compare[name[grid].interpolator is_not constant[None]] begin[:]
variable[values] assign[=] call[name[func], parameter[name[profile], name[interpolator].interp_grid, name[grid_radial_minimum], <ast.Starred object at 0x7da18f810d30>]]
if compare[name[values].ndim equal[==] constant[1]] begin[:]
return[call[name[interpolator].interpolated_values_from_values, parameter[]]]
return[call[name[func], parameter[name[profile], name[grid], name[grid_radial_minimum], <ast.Starred object at 0x7da18f812800>]]]
return[name[wrapper]] | keyword[def] identifier[grid_interpolate] ( identifier[func] ):
literal[string]
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] ( identifier[profile] , identifier[grid] , identifier[grid_radial_minimum] = keyword[None] ,* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[hasattr] ( identifier[grid] , literal[string] ):
identifier[interpolator] = identifier[grid] . identifier[interpolator]
keyword[if] identifier[grid] . identifier[interpolator] keyword[is] keyword[not] keyword[None] :
identifier[values] = identifier[func] ( identifier[profile] , identifier[interpolator] . identifier[interp_grid] , identifier[grid_radial_minimum] ,* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[values] . identifier[ndim] == literal[int] :
keyword[return] identifier[interpolator] . identifier[interpolated_values_from_values] ( identifier[values] = identifier[values] )
keyword[elif] identifier[values] . identifier[ndim] == literal[int] :
identifier[y_values] = identifier[interpolator] . identifier[interpolated_values_from_values] ( identifier[values] = identifier[values] [:, literal[int] ])
identifier[x_values] = identifier[interpolator] . identifier[interpolated_values_from_values] ( identifier[values] = identifier[values] [:, literal[int] ])
keyword[return] identifier[np] . identifier[asarray] ([ identifier[y_values] , identifier[x_values] ]). identifier[T]
keyword[return] identifier[func] ( identifier[profile] , identifier[grid] , identifier[grid_radial_minimum] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper] | def grid_interpolate(func):
"""
Decorate a profile method that accepts a coordinate grid and returns a data grid.
If an interpolator attribute is associated with the input grid then that interpolator is used to down sample the
coordinate grid prior to calling the function and up sample the result of the function.
If no interpolator attribute is associated with the input grid then the function is called as normal.
Parameters
----------
func
Some method that accepts a grid
Returns
-------
decorated_function
The function with optional interpolation
"""
@wraps(func)
def wrapper(profile, grid, grid_radial_minimum=None, *args, **kwargs):
if hasattr(grid, 'interpolator'):
interpolator = grid.interpolator
if grid.interpolator is not None:
values = func(profile, interpolator.interp_grid, grid_radial_minimum, *args, **kwargs)
if values.ndim == 1:
return interpolator.interpolated_values_from_values(values=values) # depends on [control=['if'], data=[]]
elif values.ndim == 2:
y_values = interpolator.interpolated_values_from_values(values=values[:, 0])
x_values = interpolator.interpolated_values_from_values(values=values[:, 1])
return np.asarray([y_values, x_values]).T # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return func(profile, grid, grid_radial_minimum, *args, **kwargs)
return wrapper |
def get_request_query(self):
"""Checks the request for known catalog indexes and converts the values
to fit the type of the catalog index.
:param catalog: The catalog to build the query for
:type catalog: ZCatalog
:returns: Catalog query
:rtype: dict
"""
query = {}
# only known indexes get observed
indexes = self.catalog.get_indexes()
for index in indexes:
# Check if the request contains a parameter named like the index
value = req.get(index)
# No value found, continue
if value is None:
continue
# Convert the found value to format understandable by the index
index_value = self.catalog.to_index_value(value, index)
# Conversion returned None, continue
if index_value is None:
continue
# Append the found value to the query
query[index] = index_value
return query | def function[get_request_query, parameter[self]]:
constant[Checks the request for known catalog indexes and converts the values
to fit the type of the catalog index.
:param catalog: The catalog to build the query for
:type catalog: ZCatalog
:returns: Catalog query
:rtype: dict
]
variable[query] assign[=] dictionary[[], []]
variable[indexes] assign[=] call[name[self].catalog.get_indexes, parameter[]]
for taget[name[index]] in starred[name[indexes]] begin[:]
variable[value] assign[=] call[name[req].get, parameter[name[index]]]
if compare[name[value] is constant[None]] begin[:]
continue
variable[index_value] assign[=] call[name[self].catalog.to_index_value, parameter[name[value], name[index]]]
if compare[name[index_value] is constant[None]] begin[:]
continue
call[name[query]][name[index]] assign[=] name[index_value]
return[name[query]] | keyword[def] identifier[get_request_query] ( identifier[self] ):
literal[string]
identifier[query] ={}
identifier[indexes] = identifier[self] . identifier[catalog] . identifier[get_indexes] ()
keyword[for] identifier[index] keyword[in] identifier[indexes] :
identifier[value] = identifier[req] . identifier[get] ( identifier[index] )
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[continue]
identifier[index_value] = identifier[self] . identifier[catalog] . identifier[to_index_value] ( identifier[value] , identifier[index] )
keyword[if] identifier[index_value] keyword[is] keyword[None] :
keyword[continue]
identifier[query] [ identifier[index] ]= identifier[index_value]
keyword[return] identifier[query] | def get_request_query(self):
"""Checks the request for known catalog indexes and converts the values
to fit the type of the catalog index.
:param catalog: The catalog to build the query for
:type catalog: ZCatalog
:returns: Catalog query
:rtype: dict
"""
query = {}
# only known indexes get observed
indexes = self.catalog.get_indexes()
for index in indexes:
# Check if the request contains a parameter named like the index
value = req.get(index)
# No value found, continue
if value is None:
continue # depends on [control=['if'], data=[]]
# Convert the found value to format understandable by the index
index_value = self.catalog.to_index_value(value, index)
# Conversion returned None, continue
if index_value is None:
continue # depends on [control=['if'], data=[]]
# Append the found value to the query
query[index] = index_value # depends on [control=['for'], data=['index']]
return query |
def _Backward3_v_Ps(P, s):
"""Backward equation for region 3, v=f(P,s)
Parameters
----------
P : float
Pressure, [MPa]
s : float
Specific entropy, [kJ/kgK]
Returns
-------
v : float
Specific volume, [m³/kg]
"""
if s <= sc:
return _Backward3a_v_Ps(P, s)
else:
return _Backward3b_v_Ps(P, s) | def function[_Backward3_v_Ps, parameter[P, s]]:
constant[Backward equation for region 3, v=f(P,s)
Parameters
----------
P : float
Pressure, [MPa]
s : float
Specific entropy, [kJ/kgK]
Returns
-------
v : float
Specific volume, [m³/kg]
]
if compare[name[s] less_or_equal[<=] name[sc]] begin[:]
return[call[name[_Backward3a_v_Ps], parameter[name[P], name[s]]]] | keyword[def] identifier[_Backward3_v_Ps] ( identifier[P] , identifier[s] ):
literal[string]
keyword[if] identifier[s] <= identifier[sc] :
keyword[return] identifier[_Backward3a_v_Ps] ( identifier[P] , identifier[s] )
keyword[else] :
keyword[return] identifier[_Backward3b_v_Ps] ( identifier[P] , identifier[s] ) | def _Backward3_v_Ps(P, s):
"""Backward equation for region 3, v=f(P,s)
Parameters
----------
P : float
Pressure, [MPa]
s : float
Specific entropy, [kJ/kgK]
Returns
-------
v : float
Specific volume, [m³/kg]
"""
if s <= sc:
return _Backward3a_v_Ps(P, s) # depends on [control=['if'], data=['s']]
else:
return _Backward3b_v_Ps(P, s) |
def coerce(cls, key, value):
"""Convert plain list to MutationList."""
if isinstance(value, string_types):
value = value.strip()
if value[0] == '[': # It's json encoded, probably
try:
value = json.loads(value)
except ValueError:
raise ValueError("Failed to parse JSON: '{}' ".format(value))
else:
value = value.split(',')
if not value:
value = []
self = MutationList((MutationObj.coerce(key, v) for v in value))
self._key = key
return self | def function[coerce, parameter[cls, key, value]]:
constant[Convert plain list to MutationList.]
if call[name[isinstance], parameter[name[value], name[string_types]]] begin[:]
variable[value] assign[=] call[name[value].strip, parameter[]]
if compare[call[name[value]][constant[0]] equal[==] constant[[]] begin[:]
<ast.Try object at 0x7da20c993580>
if <ast.UnaryOp object at 0x7da20c9932e0> begin[:]
variable[value] assign[=] list[[]]
variable[self] assign[=] call[name[MutationList], parameter[<ast.GeneratorExp object at 0x7da20c992080>]]
name[self]._key assign[=] name[key]
return[name[self]] | keyword[def] identifier[coerce] ( identifier[cls] , identifier[key] , identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[string_types] ):
identifier[value] = identifier[value] . identifier[strip] ()
keyword[if] identifier[value] [ literal[int] ]== literal[string] :
keyword[try] :
identifier[value] = identifier[json] . identifier[loads] ( identifier[value] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[value] ))
keyword[else] :
identifier[value] = identifier[value] . identifier[split] ( literal[string] )
keyword[if] keyword[not] identifier[value] :
identifier[value] =[]
identifier[self] = identifier[MutationList] (( identifier[MutationObj] . identifier[coerce] ( identifier[key] , identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[value] ))
identifier[self] . identifier[_key] = identifier[key]
keyword[return] identifier[self] | def coerce(cls, key, value):
"""Convert plain list to MutationList."""
if isinstance(value, string_types):
value = value.strip()
if value[0] == '[': # It's json encoded, probably
try:
value = json.loads(value) # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError("Failed to parse JSON: '{}' ".format(value)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
value = value.split(',') # depends on [control=['if'], data=[]]
if not value:
value = [] # depends on [control=['if'], data=[]]
self = MutationList((MutationObj.coerce(key, v) for v in value))
self._key = key
return self |
def _protected_division(x1, x2):
"""Closure of division (x1/x2) for zero denominator."""
with np.errstate(divide='ignore', invalid='ignore'):
return np.where(np.abs(x2) > 0.001, np.divide(x1, x2), 1.) | def function[_protected_division, parameter[x1, x2]]:
constant[Closure of division (x1/x2) for zero denominator.]
with call[name[np].errstate, parameter[]] begin[:]
return[call[name[np].where, parameter[compare[call[name[np].abs, parameter[name[x2]]] greater[>] constant[0.001]], call[name[np].divide, parameter[name[x1], name[x2]]], constant[1.0]]]] | keyword[def] identifier[_protected_division] ( identifier[x1] , identifier[x2] ):
literal[string]
keyword[with] identifier[np] . identifier[errstate] ( identifier[divide] = literal[string] , identifier[invalid] = literal[string] ):
keyword[return] identifier[np] . identifier[where] ( identifier[np] . identifier[abs] ( identifier[x2] )> literal[int] , identifier[np] . identifier[divide] ( identifier[x1] , identifier[x2] ), literal[int] ) | def _protected_division(x1, x2):
"""Closure of division (x1/x2) for zero denominator."""
with np.errstate(divide='ignore', invalid='ignore'):
return np.where(np.abs(x2) > 0.001, np.divide(x1, x2), 1.0) # depends on [control=['with'], data=[]] |
def on_message(self, event):
'''Runs when a message event is received
Args:
event: RTM API event.
Returns:
Legobot.messge
'''
metadata = self._parse_metadata(event)
message = Message(text=metadata['text'],
metadata=metadata).__dict__
if message.get('text'):
message['text'] = self.find_and_replace_userids(message['text'])
message['text'] = self.find_and_replace_channel_refs(
message['text']
)
return message | def function[on_message, parameter[self, event]]:
constant[Runs when a message event is received
Args:
event: RTM API event.
Returns:
Legobot.messge
]
variable[metadata] assign[=] call[name[self]._parse_metadata, parameter[name[event]]]
variable[message] assign[=] call[name[Message], parameter[]].__dict__
if call[name[message].get, parameter[constant[text]]] begin[:]
call[name[message]][constant[text]] assign[=] call[name[self].find_and_replace_userids, parameter[call[name[message]][constant[text]]]]
call[name[message]][constant[text]] assign[=] call[name[self].find_and_replace_channel_refs, parameter[call[name[message]][constant[text]]]]
return[name[message]] | keyword[def] identifier[on_message] ( identifier[self] , identifier[event] ):
literal[string]
identifier[metadata] = identifier[self] . identifier[_parse_metadata] ( identifier[event] )
identifier[message] = identifier[Message] ( identifier[text] = identifier[metadata] [ literal[string] ],
identifier[metadata] = identifier[metadata] ). identifier[__dict__]
keyword[if] identifier[message] . identifier[get] ( literal[string] ):
identifier[message] [ literal[string] ]= identifier[self] . identifier[find_and_replace_userids] ( identifier[message] [ literal[string] ])
identifier[message] [ literal[string] ]= identifier[self] . identifier[find_and_replace_channel_refs] (
identifier[message] [ literal[string] ]
)
keyword[return] identifier[message] | def on_message(self, event):
"""Runs when a message event is received
Args:
event: RTM API event.
Returns:
Legobot.messge
"""
metadata = self._parse_metadata(event)
message = Message(text=metadata['text'], metadata=metadata).__dict__
if message.get('text'):
message['text'] = self.find_and_replace_userids(message['text'])
message['text'] = self.find_and_replace_channel_refs(message['text']) # depends on [control=['if'], data=[]]
return message |
def remove(self, elem):
"""Removes an item from the list. Similar to list.remove()."""
self._values.remove(elem)
self._message_listener.Modified() | def function[remove, parameter[self, elem]]:
constant[Removes an item from the list. Similar to list.remove().]
call[name[self]._values.remove, parameter[name[elem]]]
call[name[self]._message_listener.Modified, parameter[]] | keyword[def] identifier[remove] ( identifier[self] , identifier[elem] ):
literal[string]
identifier[self] . identifier[_values] . identifier[remove] ( identifier[elem] )
identifier[self] . identifier[_message_listener] . identifier[Modified] () | def remove(self, elem):
"""Removes an item from the list. Similar to list.remove()."""
self._values.remove(elem)
self._message_listener.Modified() |
def load_toml_path_config(filename):
"""Returns a PathConfig created by loading a TOML file from the
filesystem.
"""
if not os.path.exists(filename):
LOGGER.info(
"Skipping path loading from non-existent config file: %s",
filename)
return PathConfig()
LOGGER.info("Loading path information from config: %s", filename)
try:
with open(filename) as fd:
raw_config = fd.read()
except IOError as e:
raise LocalConfigurationError(
"Unable to load path configuration file: {}".format(str(e)))
toml_config = toml.loads(raw_config)
invalid_keys = set(toml_config.keys()).difference(
['data_dir', 'key_dir', 'log_dir', 'policy_dir'])
if invalid_keys:
raise LocalConfigurationError("Invalid keys in path config: {}".format(
", ".join(sorted(list(invalid_keys)))))
config = PathConfig(
config_dir=None,
data_dir=toml_config.get('data_dir', None),
key_dir=toml_config.get('key_dir', None),
log_dir=toml_config.get('log_dir', None),
policy_dir=toml_config.get('policy_dir', None)
)
return config | def function[load_toml_path_config, parameter[filename]]:
constant[Returns a PathConfig created by loading a TOML file from the
filesystem.
]
if <ast.UnaryOp object at 0x7da20e956290> begin[:]
call[name[LOGGER].info, parameter[constant[Skipping path loading from non-existent config file: %s], name[filename]]]
return[call[name[PathConfig], parameter[]]]
call[name[LOGGER].info, parameter[constant[Loading path information from config: %s], name[filename]]]
<ast.Try object at 0x7da20e9571c0>
variable[toml_config] assign[=] call[name[toml].loads, parameter[name[raw_config]]]
variable[invalid_keys] assign[=] call[call[name[set], parameter[call[name[toml_config].keys, parameter[]]]].difference, parameter[list[[<ast.Constant object at 0x7da18bc720b0>, <ast.Constant object at 0x7da18bc73df0>, <ast.Constant object at 0x7da18bc721d0>, <ast.Constant object at 0x7da18bc72770>]]]]
if name[invalid_keys] begin[:]
<ast.Raise object at 0x7da18bc72260>
variable[config] assign[=] call[name[PathConfig], parameter[]]
return[name[config]] | keyword[def] identifier[load_toml_path_config] ( identifier[filename] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[filename] ):
identifier[LOGGER] . identifier[info] (
literal[string] ,
identifier[filename] )
keyword[return] identifier[PathConfig] ()
identifier[LOGGER] . identifier[info] ( literal[string] , identifier[filename] )
keyword[try] :
keyword[with] identifier[open] ( identifier[filename] ) keyword[as] identifier[fd] :
identifier[raw_config] = identifier[fd] . identifier[read] ()
keyword[except] identifier[IOError] keyword[as] identifier[e] :
keyword[raise] identifier[LocalConfigurationError] (
literal[string] . identifier[format] ( identifier[str] ( identifier[e] )))
identifier[toml_config] = identifier[toml] . identifier[loads] ( identifier[raw_config] )
identifier[invalid_keys] = identifier[set] ( identifier[toml_config] . identifier[keys] ()). identifier[difference] (
[ literal[string] , literal[string] , literal[string] , literal[string] ])
keyword[if] identifier[invalid_keys] :
keyword[raise] identifier[LocalConfigurationError] ( literal[string] . identifier[format] (
literal[string] . identifier[join] ( identifier[sorted] ( identifier[list] ( identifier[invalid_keys] )))))
identifier[config] = identifier[PathConfig] (
identifier[config_dir] = keyword[None] ,
identifier[data_dir] = identifier[toml_config] . identifier[get] ( literal[string] , keyword[None] ),
identifier[key_dir] = identifier[toml_config] . identifier[get] ( literal[string] , keyword[None] ),
identifier[log_dir] = identifier[toml_config] . identifier[get] ( literal[string] , keyword[None] ),
identifier[policy_dir] = identifier[toml_config] . identifier[get] ( literal[string] , keyword[None] )
)
keyword[return] identifier[config] | def load_toml_path_config(filename):
"""Returns a PathConfig created by loading a TOML file from the
filesystem.
"""
if not os.path.exists(filename):
LOGGER.info('Skipping path loading from non-existent config file: %s', filename)
return PathConfig() # depends on [control=['if'], data=[]]
LOGGER.info('Loading path information from config: %s', filename)
try:
with open(filename) as fd:
raw_config = fd.read() # depends on [control=['with'], data=['fd']] # depends on [control=['try'], data=[]]
except IOError as e:
raise LocalConfigurationError('Unable to load path configuration file: {}'.format(str(e))) # depends on [control=['except'], data=['e']]
toml_config = toml.loads(raw_config)
invalid_keys = set(toml_config.keys()).difference(['data_dir', 'key_dir', 'log_dir', 'policy_dir'])
if invalid_keys:
raise LocalConfigurationError('Invalid keys in path config: {}'.format(', '.join(sorted(list(invalid_keys))))) # depends on [control=['if'], data=[]]
config = PathConfig(config_dir=None, data_dir=toml_config.get('data_dir', None), key_dir=toml_config.get('key_dir', None), log_dir=toml_config.get('log_dir', None), policy_dir=toml_config.get('policy_dir', None))
return config |
def _safebuiltins():
"""Construct a safe builtin environment without I/O functions.
:rtype: dict"""
result = {}
objectnames = [
objectname for objectname in dir(builtins)
if objectname not in BUILTIN_IO_PROPS
]
for objectname in objectnames:
result[objectname] = getattr(builtins, objectname)
return result | def function[_safebuiltins, parameter[]]:
constant[Construct a safe builtin environment without I/O functions.
:rtype: dict]
variable[result] assign[=] dictionary[[], []]
variable[objectnames] assign[=] <ast.ListComp object at 0x7da1b13509d0>
for taget[name[objectname]] in starred[name[objectnames]] begin[:]
call[name[result]][name[objectname]] assign[=] call[name[getattr], parameter[name[builtins], name[objectname]]]
return[name[result]] | keyword[def] identifier[_safebuiltins] ():
literal[string]
identifier[result] ={}
identifier[objectnames] =[
identifier[objectname] keyword[for] identifier[objectname] keyword[in] identifier[dir] ( identifier[builtins] )
keyword[if] identifier[objectname] keyword[not] keyword[in] identifier[BUILTIN_IO_PROPS]
]
keyword[for] identifier[objectname] keyword[in] identifier[objectnames] :
identifier[result] [ identifier[objectname] ]= identifier[getattr] ( identifier[builtins] , identifier[objectname] )
keyword[return] identifier[result] | def _safebuiltins():
"""Construct a safe builtin environment without I/O functions.
:rtype: dict"""
result = {}
objectnames = [objectname for objectname in dir(builtins) if objectname not in BUILTIN_IO_PROPS]
for objectname in objectnames:
result[objectname] = getattr(builtins, objectname) # depends on [control=['for'], data=['objectname']]
return result |
async def jsk_source(self, ctx: commands.Context, *, command_name: str):
"""
Displays the source code for a command.
"""
command = self.bot.get_command(command_name)
if not command:
return await ctx.send(f"Couldn't find command `{command_name}`.")
try:
source_lines, _ = inspect.getsourcelines(command.callback)
except (TypeError, OSError):
return await ctx.send(f"Was unable to retrieve the source for `{command}` for some reason.")
# getsourcelines for some reason returns WITH line endings
source_lines = ''.join(source_lines).split('\n')
paginator = WrappedPaginator(prefix='```py', suffix='```', max_size=1985)
for line in source_lines:
paginator.add_line(line)
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
await interface.send_to(ctx) | <ast.AsyncFunctionDef object at 0x7da1b1ecc250> | keyword[async] keyword[def] identifier[jsk_source] ( identifier[self] , identifier[ctx] : identifier[commands] . identifier[Context] ,*, identifier[command_name] : identifier[str] ):
literal[string]
identifier[command] = identifier[self] . identifier[bot] . identifier[get_command] ( identifier[command_name] )
keyword[if] keyword[not] identifier[command] :
keyword[return] keyword[await] identifier[ctx] . identifier[send] ( literal[string] )
keyword[try] :
identifier[source_lines] , identifier[_] = identifier[inspect] . identifier[getsourcelines] ( identifier[command] . identifier[callback] )
keyword[except] ( identifier[TypeError] , identifier[OSError] ):
keyword[return] keyword[await] identifier[ctx] . identifier[send] ( literal[string] )
identifier[source_lines] = literal[string] . identifier[join] ( identifier[source_lines] ). identifier[split] ( literal[string] )
identifier[paginator] = identifier[WrappedPaginator] ( identifier[prefix] = literal[string] , identifier[suffix] = literal[string] , identifier[max_size] = literal[int] )
keyword[for] identifier[line] keyword[in] identifier[source_lines] :
identifier[paginator] . identifier[add_line] ( identifier[line] )
identifier[interface] = identifier[PaginatorInterface] ( identifier[ctx] . identifier[bot] , identifier[paginator] , identifier[owner] = identifier[ctx] . identifier[author] )
keyword[await] identifier[interface] . identifier[send_to] ( identifier[ctx] ) | async def jsk_source(self, ctx: commands.Context, *, command_name: str):
"""
Displays the source code for a command.
"""
command = self.bot.get_command(command_name)
if not command:
return await ctx.send(f"Couldn't find command `{command_name}`.") # depends on [control=['if'], data=[]]
try:
(source_lines, _) = inspect.getsourcelines(command.callback) # depends on [control=['try'], data=[]]
except (TypeError, OSError):
return await ctx.send(f'Was unable to retrieve the source for `{command}` for some reason.') # depends on [control=['except'], data=[]]
# getsourcelines for some reason returns WITH line endings
source_lines = ''.join(source_lines).split('\n')
paginator = WrappedPaginator(prefix='```py', suffix='```', max_size=1985)
for line in source_lines:
paginator.add_line(line) # depends on [control=['for'], data=['line']]
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
await interface.send_to(ctx) |
def load_crawler(self, crawler, url, ignore_regex):
"""
Loads the given crawler with the given url.
:param class crawler: class of the crawler to load
:param str url: url to start the crawler with
:param regex ignore_regex: to be able to ignore urls that match this
regex code
"""
self.process = CrawlerProcess(self.cfg.get_scrapy_options())
self.process.crawl(
crawler,
self.helper,
url=url,
config=self.cfg,
ignore_regex=ignore_regex) | def function[load_crawler, parameter[self, crawler, url, ignore_regex]]:
constant[
Loads the given crawler with the given url.
:param class crawler: class of the crawler to load
:param str url: url to start the crawler with
:param regex ignore_regex: to be able to ignore urls that match this
regex code
]
name[self].process assign[=] call[name[CrawlerProcess], parameter[call[name[self].cfg.get_scrapy_options, parameter[]]]]
call[name[self].process.crawl, parameter[name[crawler], name[self].helper]] | keyword[def] identifier[load_crawler] ( identifier[self] , identifier[crawler] , identifier[url] , identifier[ignore_regex] ):
literal[string]
identifier[self] . identifier[process] = identifier[CrawlerProcess] ( identifier[self] . identifier[cfg] . identifier[get_scrapy_options] ())
identifier[self] . identifier[process] . identifier[crawl] (
identifier[crawler] ,
identifier[self] . identifier[helper] ,
identifier[url] = identifier[url] ,
identifier[config] = identifier[self] . identifier[cfg] ,
identifier[ignore_regex] = identifier[ignore_regex] ) | def load_crawler(self, crawler, url, ignore_regex):
"""
Loads the given crawler with the given url.
:param class crawler: class of the crawler to load
:param str url: url to start the crawler with
:param regex ignore_regex: to be able to ignore urls that match this
regex code
"""
self.process = CrawlerProcess(self.cfg.get_scrapy_options())
self.process.crawl(crawler, self.helper, url=url, config=self.cfg, ignore_regex=ignore_regex) |
def _raise_for_status(response):
"""Raises stored :class:`HTTPError`, if one occurred.
This is the :meth:`requests.models.Response.raise_for_status` method,
modified to add the response from Space-Track, if given.
"""
http_error_msg = ''
if 400 <= response.status_code < 500:
http_error_msg = '%s Client Error: %s for url: %s' % (
response.status_code, response.reason, response.url)
elif 500 <= response.status_code < 600:
http_error_msg = '%s Server Error: %s for url: %s' % (
response.status_code, response.reason, response.url)
if http_error_msg:
spacetrack_error_msg = None
try:
json = response.json()
if isinstance(json, Mapping):
spacetrack_error_msg = json['error']
except (ValueError, KeyError):
pass
if not spacetrack_error_msg:
spacetrack_error_msg = response.text
if spacetrack_error_msg:
http_error_msg += '\nSpace-Track response:\n' + spacetrack_error_msg
raise requests.HTTPError(http_error_msg, response=response) | def function[_raise_for_status, parameter[response]]:
constant[Raises stored :class:`HTTPError`, if one occurred.
This is the :meth:`requests.models.Response.raise_for_status` method,
modified to add the response from Space-Track, if given.
]
variable[http_error_msg] assign[=] constant[]
if compare[constant[400] less_or_equal[<=] name[response].status_code] begin[:]
variable[http_error_msg] assign[=] binary_operation[constant[%s Client Error: %s for url: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18f810c40>, <ast.Attribute object at 0x7da18f8111b0>, <ast.Attribute object at 0x7da18f810d00>]]]
if name[http_error_msg] begin[:]
variable[spacetrack_error_msg] assign[=] constant[None]
<ast.Try object at 0x7da18f813d00>
if <ast.UnaryOp object at 0x7da18f811ba0> begin[:]
variable[spacetrack_error_msg] assign[=] name[response].text
if name[spacetrack_error_msg] begin[:]
<ast.AugAssign object at 0x7da18f810c70>
<ast.Raise object at 0x7da18f8136d0> | keyword[def] identifier[_raise_for_status] ( identifier[response] ):
literal[string]
identifier[http_error_msg] = literal[string]
keyword[if] literal[int] <= identifier[response] . identifier[status_code] < literal[int] :
identifier[http_error_msg] = literal[string] %(
identifier[response] . identifier[status_code] , identifier[response] . identifier[reason] , identifier[response] . identifier[url] )
keyword[elif] literal[int] <= identifier[response] . identifier[status_code] < literal[int] :
identifier[http_error_msg] = literal[string] %(
identifier[response] . identifier[status_code] , identifier[response] . identifier[reason] , identifier[response] . identifier[url] )
keyword[if] identifier[http_error_msg] :
identifier[spacetrack_error_msg] = keyword[None]
keyword[try] :
identifier[json] = identifier[response] . identifier[json] ()
keyword[if] identifier[isinstance] ( identifier[json] , identifier[Mapping] ):
identifier[spacetrack_error_msg] = identifier[json] [ literal[string] ]
keyword[except] ( identifier[ValueError] , identifier[KeyError] ):
keyword[pass]
keyword[if] keyword[not] identifier[spacetrack_error_msg] :
identifier[spacetrack_error_msg] = identifier[response] . identifier[text]
keyword[if] identifier[spacetrack_error_msg] :
identifier[http_error_msg] += literal[string] + identifier[spacetrack_error_msg]
keyword[raise] identifier[requests] . identifier[HTTPError] ( identifier[http_error_msg] , identifier[response] = identifier[response] ) | def _raise_for_status(response):
"""Raises stored :class:`HTTPError`, if one occurred.
This is the :meth:`requests.models.Response.raise_for_status` method,
modified to add the response from Space-Track, if given.
"""
http_error_msg = ''
if 400 <= response.status_code < 500:
http_error_msg = '%s Client Error: %s for url: %s' % (response.status_code, response.reason, response.url) # depends on [control=['if'], data=[]]
elif 500 <= response.status_code < 600:
http_error_msg = '%s Server Error: %s for url: %s' % (response.status_code, response.reason, response.url) # depends on [control=['if'], data=[]]
if http_error_msg:
spacetrack_error_msg = None
try:
json = response.json()
if isinstance(json, Mapping):
spacetrack_error_msg = json['error'] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (ValueError, KeyError):
pass # depends on [control=['except'], data=[]]
if not spacetrack_error_msg:
spacetrack_error_msg = response.text # depends on [control=['if'], data=[]]
if spacetrack_error_msg:
http_error_msg += '\nSpace-Track response:\n' + spacetrack_error_msg # depends on [control=['if'], data=[]]
raise requests.HTTPError(http_error_msg, response=response) # depends on [control=['if'], data=[]] |
def read_pmid_sentences(pmid_sentences, **drum_args):
"""Read sentences from a PMID-keyed dictonary and return all Statements
Parameters
----------
pmid_sentences : dict[str, list[str]]
A dictonary where each key is a PMID pointing to a list of sentences
to be read.
**drum_args
Keyword arguments passed directly to the DrumReader. Typical
things to specify are `host` and `port`. If `run_drum` is specified
as True, this process will internally run the DRUM reading system
as a subprocess. Otherwise, DRUM is expected to be running
independently.
Returns
-------
all_statements : list[indra.statement.Statement]
A list of INDRA Statements resulting from the reading
"""
def _set_pmid(statements, pmid):
for stmt in statements:
for evidence in stmt.evidence:
evidence.pmid = pmid
# See if we need to start DRUM as a subprocess
run_drum = drum_args.get('run_drum', False)
drum_process = None
all_statements = {}
# Iterate over all the keys and sentences to read
for pmid, sentences in pmid_sentences.items():
logger.info('================================')
logger.info('Processing %d sentences for %s' % (len(sentences), pmid))
ts = time.time()
# Make a DrumReader instance
drum_args['name'] = 'DrumReader%s' % pmid
dr = DrumReader(**drum_args)
time.sleep(3)
# If there is no DRUM process set yet, we get the one that was
# just started by the DrumReader
if run_drum and drum_process is None:
drum_args.pop('run_drum', None)
drum_process = dr.drum_system
# By setting this, we ensuer that the reference to the
# process is passed in to all future DrumReaders
drum_args['drum_system'] = drum_process
# Now read each sentence for this key
for sentence in sentences:
dr.read_text(sentence)
# Start receiving results and exit when done
try:
dr.start()
except SystemExit:
pass
statements = []
# Process all the extractions into INDRA Statements
for extraction in dr.extractions:
# Sometimes we get nothing back
if not extraction:
continue
tp = process_xml(extraction)
statements += tp.statements
# Set the PMIDs for the evidences of the Statements
_set_pmid(statements, pmid)
te = time.time()
logger.info('Reading took %d seconds and produced %d Statements.' %
(te-ts, len(statements)))
all_statements[pmid] = statements
# If we were running a DRUM process, we should kill it
if drum_process and dr.drum_system:
dr._kill_drum()
return all_statements | def function[read_pmid_sentences, parameter[pmid_sentences]]:
constant[Read sentences from a PMID-keyed dictonary and return all Statements
Parameters
----------
pmid_sentences : dict[str, list[str]]
A dictonary where each key is a PMID pointing to a list of sentences
to be read.
**drum_args
Keyword arguments passed directly to the DrumReader. Typical
things to specify are `host` and `port`. If `run_drum` is specified
as True, this process will internally run the DRUM reading system
as a subprocess. Otherwise, DRUM is expected to be running
independently.
Returns
-------
all_statements : list[indra.statement.Statement]
A list of INDRA Statements resulting from the reading
]
def function[_set_pmid, parameter[statements, pmid]]:
for taget[name[stmt]] in starred[name[statements]] begin[:]
for taget[name[evidence]] in starred[name[stmt].evidence] begin[:]
name[evidence].pmid assign[=] name[pmid]
variable[run_drum] assign[=] call[name[drum_args].get, parameter[constant[run_drum], constant[False]]]
variable[drum_process] assign[=] constant[None]
variable[all_statements] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18c4cc8e0>, <ast.Name object at 0x7da18c4cd810>]]] in starred[call[name[pmid_sentences].items, parameter[]]] begin[:]
call[name[logger].info, parameter[constant[================================]]]
call[name[logger].info, parameter[binary_operation[constant[Processing %d sentences for %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18c4cfac0>, <ast.Name object at 0x7da18c4cd8d0>]]]]]
variable[ts] assign[=] call[name[time].time, parameter[]]
call[name[drum_args]][constant[name]] assign[=] binary_operation[constant[DrumReader%s] <ast.Mod object at 0x7da2590d6920> name[pmid]]
variable[dr] assign[=] call[name[DrumReader], parameter[]]
call[name[time].sleep, parameter[constant[3]]]
if <ast.BoolOp object at 0x7da18c4cca90> begin[:]
call[name[drum_args].pop, parameter[constant[run_drum], constant[None]]]
variable[drum_process] assign[=] name[dr].drum_system
call[name[drum_args]][constant[drum_system]] assign[=] name[drum_process]
for taget[name[sentence]] in starred[name[sentences]] begin[:]
call[name[dr].read_text, parameter[name[sentence]]]
<ast.Try object at 0x7da18c4ccaf0>
variable[statements] assign[=] list[[]]
for taget[name[extraction]] in starred[name[dr].extractions] begin[:]
if <ast.UnaryOp object at 0x7da18c4cc850> begin[:]
continue
variable[tp] assign[=] call[name[process_xml], parameter[name[extraction]]]
<ast.AugAssign object at 0x7da18c4ce5c0>
call[name[_set_pmid], parameter[name[statements], name[pmid]]]
variable[te] assign[=] call[name[time].time, parameter[]]
call[name[logger].info, parameter[binary_operation[constant[Reading took %d seconds and produced %d Statements.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da18c4cc130>, <ast.Call object at 0x7da20c76fd30>]]]]]
call[name[all_statements]][name[pmid]] assign[=] name[statements]
if <ast.BoolOp object at 0x7da20c76ead0> begin[:]
call[name[dr]._kill_drum, parameter[]]
return[name[all_statements]] | keyword[def] identifier[read_pmid_sentences] ( identifier[pmid_sentences] ,** identifier[drum_args] ):
literal[string]
keyword[def] identifier[_set_pmid] ( identifier[statements] , identifier[pmid] ):
keyword[for] identifier[stmt] keyword[in] identifier[statements] :
keyword[for] identifier[evidence] keyword[in] identifier[stmt] . identifier[evidence] :
identifier[evidence] . identifier[pmid] = identifier[pmid]
identifier[run_drum] = identifier[drum_args] . identifier[get] ( literal[string] , keyword[False] )
identifier[drum_process] = keyword[None]
identifier[all_statements] ={}
keyword[for] identifier[pmid] , identifier[sentences] keyword[in] identifier[pmid_sentences] . identifier[items] ():
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] %( identifier[len] ( identifier[sentences] ), identifier[pmid] ))
identifier[ts] = identifier[time] . identifier[time] ()
identifier[drum_args] [ literal[string] ]= literal[string] % identifier[pmid]
identifier[dr] = identifier[DrumReader] (** identifier[drum_args] )
identifier[time] . identifier[sleep] ( literal[int] )
keyword[if] identifier[run_drum] keyword[and] identifier[drum_process] keyword[is] keyword[None] :
identifier[drum_args] . identifier[pop] ( literal[string] , keyword[None] )
identifier[drum_process] = identifier[dr] . identifier[drum_system]
identifier[drum_args] [ literal[string] ]= identifier[drum_process]
keyword[for] identifier[sentence] keyword[in] identifier[sentences] :
identifier[dr] . identifier[read_text] ( identifier[sentence] )
keyword[try] :
identifier[dr] . identifier[start] ()
keyword[except] identifier[SystemExit] :
keyword[pass]
identifier[statements] =[]
keyword[for] identifier[extraction] keyword[in] identifier[dr] . identifier[extractions] :
keyword[if] keyword[not] identifier[extraction] :
keyword[continue]
identifier[tp] = identifier[process_xml] ( identifier[extraction] )
identifier[statements] += identifier[tp] . identifier[statements]
identifier[_set_pmid] ( identifier[statements] , identifier[pmid] )
identifier[te] = identifier[time] . identifier[time] ()
identifier[logger] . identifier[info] ( literal[string] %
( identifier[te] - identifier[ts] , identifier[len] ( identifier[statements] )))
identifier[all_statements] [ identifier[pmid] ]= identifier[statements]
keyword[if] identifier[drum_process] keyword[and] identifier[dr] . identifier[drum_system] :
identifier[dr] . identifier[_kill_drum] ()
keyword[return] identifier[all_statements] | def read_pmid_sentences(pmid_sentences, **drum_args):
"""Read sentences from a PMID-keyed dictonary and return all Statements
Parameters
----------
pmid_sentences : dict[str, list[str]]
A dictonary where each key is a PMID pointing to a list of sentences
to be read.
**drum_args
Keyword arguments passed directly to the DrumReader. Typical
things to specify are `host` and `port`. If `run_drum` is specified
as True, this process will internally run the DRUM reading system
as a subprocess. Otherwise, DRUM is expected to be running
independently.
Returns
-------
all_statements : list[indra.statement.Statement]
A list of INDRA Statements resulting from the reading
"""
def _set_pmid(statements, pmid):
for stmt in statements:
for evidence in stmt.evidence:
evidence.pmid = pmid # depends on [control=['for'], data=['evidence']] # depends on [control=['for'], data=['stmt']]
# See if we need to start DRUM as a subprocess
run_drum = drum_args.get('run_drum', False)
drum_process = None
all_statements = {}
# Iterate over all the keys and sentences to read
for (pmid, sentences) in pmid_sentences.items():
logger.info('================================')
logger.info('Processing %d sentences for %s' % (len(sentences), pmid))
ts = time.time()
# Make a DrumReader instance
drum_args['name'] = 'DrumReader%s' % pmid
dr = DrumReader(**drum_args)
time.sleep(3)
# If there is no DRUM process set yet, we get the one that was
# just started by the DrumReader
if run_drum and drum_process is None:
drum_args.pop('run_drum', None)
drum_process = dr.drum_system
# By setting this, we ensuer that the reference to the
# process is passed in to all future DrumReaders
drum_args['drum_system'] = drum_process # depends on [control=['if'], data=[]]
# Now read each sentence for this key
for sentence in sentences:
dr.read_text(sentence) # depends on [control=['for'], data=['sentence']]
# Start receiving results and exit when done
try:
dr.start() # depends on [control=['try'], data=[]]
except SystemExit:
pass # depends on [control=['except'], data=[]]
statements = []
# Process all the extractions into INDRA Statements
for extraction in dr.extractions:
# Sometimes we get nothing back
if not extraction:
continue # depends on [control=['if'], data=[]]
tp = process_xml(extraction)
statements += tp.statements # depends on [control=['for'], data=['extraction']]
# Set the PMIDs for the evidences of the Statements
_set_pmid(statements, pmid)
te = time.time()
logger.info('Reading took %d seconds and produced %d Statements.' % (te - ts, len(statements)))
all_statements[pmid] = statements # depends on [control=['for'], data=[]]
# If we were running a DRUM process, we should kill it
if drum_process and dr.drum_system:
dr._kill_drum() # depends on [control=['if'], data=[]]
return all_statements |
def make_windll(structs):
"""
Build the windll structure.
"""
name = 'windll_t'
var = 'windll'
struct_def = """
typedef struct _{0} {{
{1}
}}
{0};
""".strip().format(name, ''.join(structs))
x86 = reloc_var(var, 'reloc_delta', True, name)
x64 = '{0} *{1} = &_{1};\n'.format(name, var)
return struct_def, x86, x64 | def function[make_windll, parameter[structs]]:
constant[
Build the windll structure.
]
variable[name] assign[=] constant[windll_t]
variable[var] assign[=] constant[windll]
variable[struct_def] assign[=] call[call[constant[
typedef struct _{0} {{
{1}
}}
{0};
].strip, parameter[]].format, parameter[name[name], call[constant[].join, parameter[name[structs]]]]]
variable[x86] assign[=] call[name[reloc_var], parameter[name[var], constant[reloc_delta], constant[True], name[name]]]
variable[x64] assign[=] call[constant[{0} *{1} = &_{1};
].format, parameter[name[name], name[var]]]
return[tuple[[<ast.Name object at 0x7da1b1eae050>, <ast.Name object at 0x7da1b1ead7b0>, <ast.Name object at 0x7da1b1ead780>]]] | keyword[def] identifier[make_windll] ( identifier[structs] ):
literal[string]
identifier[name] = literal[string]
identifier[var] = literal[string]
identifier[struct_def] = literal[string] . identifier[strip] (). identifier[format] ( identifier[name] , literal[string] . identifier[join] ( identifier[structs] ))
identifier[x86] = identifier[reloc_var] ( identifier[var] , literal[string] , keyword[True] , identifier[name] )
identifier[x64] = literal[string] . identifier[format] ( identifier[name] , identifier[var] )
keyword[return] identifier[struct_def] , identifier[x86] , identifier[x64] | def make_windll(structs):
"""
Build the windll structure.
"""
name = 'windll_t'
var = 'windll'
struct_def = '\ntypedef struct _{0} {{\n{1}\n}}\n{0};\n'.strip().format(name, ''.join(structs))
x86 = reloc_var(var, 'reloc_delta', True, name)
x64 = '{0} *{1} = &_{1};\n'.format(name, var)
return (struct_def, x86, x64) |
def render_next_step(self, form, **kwargs):
"""
When using the NamedUrlFormWizard, we have to redirect to update the
browser's URL to match the shown step.
"""
next_step = self.get_next_step()
self.storage.current_step = next_step
return redirect(self.url_name, step=next_step) | def function[render_next_step, parameter[self, form]]:
constant[
When using the NamedUrlFormWizard, we have to redirect to update the
browser's URL to match the shown step.
]
variable[next_step] assign[=] call[name[self].get_next_step, parameter[]]
name[self].storage.current_step assign[=] name[next_step]
return[call[name[redirect], parameter[name[self].url_name]]] | keyword[def] identifier[render_next_step] ( identifier[self] , identifier[form] ,** identifier[kwargs] ):
literal[string]
identifier[next_step] = identifier[self] . identifier[get_next_step] ()
identifier[self] . identifier[storage] . identifier[current_step] = identifier[next_step]
keyword[return] identifier[redirect] ( identifier[self] . identifier[url_name] , identifier[step] = identifier[next_step] ) | def render_next_step(self, form, **kwargs):
"""
When using the NamedUrlFormWizard, we have to redirect to update the
browser's URL to match the shown step.
"""
next_step = self.get_next_step()
self.storage.current_step = next_step
return redirect(self.url_name, step=next_step) |
def get(self):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: text
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
data: {'tag': '', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
data: {'tag': '20130802115730568475', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
# Note, you must be authenticated!
var source = new EventSource('/events');
source.onopen = function() { console.debug('opening') };
source.onerror = function(e) { console.debug('error!', e) };
source.onmessage = function(e) { console.debug(e.data) };
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events', {withCredentials: true});
Some browser clients lack CORS support for the ``EventSource()`` API. Such
clients may instead pass the :mailheader:`X-Auth-Token` value as an URL
parameter:
.. code-block:: bash
curl -NsS localhost:8000/events/6d1b722e
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
# if you aren't authenticated, redirect to login
if not self._verify_auth():
self.redirect('/login')
return
# set the streaming headers
self.set_header('Content-Type', 'text/event-stream')
self.set_header('Cache-Control', 'no-cache')
self.set_header('Connection', 'keep-alive')
self.write('retry: {0}\n'.format(400))
self.flush()
while True:
try:
event = yield self.application.event_listener.get_event(self)
self.write('tag: {0}\n'.format(event.get('tag', '')))
self.write(str('data: {0}\n\n').format(_json_dumps(event))) # future lint: disable=blacklisted-function
self.flush()
except TimeoutException:
break | def function[get, parameter[self]]:
constant[
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: text
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
data: {'tag': '', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
data: {'tag': '20130802115730568475', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
# Note, you must be authenticated!
var source = new EventSource('/events');
source.onopen = function() { console.debug('opening') };
source.onerror = function(e) { console.debug('error!', e) };
source.onmessage = function(e) { console.debug(e.data) };
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events', {withCredentials: true});
Some browser clients lack CORS support for the ``EventSource()`` API. Such
clients may instead pass the :mailheader:`X-Auth-Token` value as an URL
parameter:
.. code-block:: bash
curl -NsS localhost:8000/events/6d1b722e
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
]
if <ast.UnaryOp object at 0x7da18f00e110> begin[:]
call[name[self].redirect, parameter[constant[/login]]]
return[None]
call[name[self].set_header, parameter[constant[Content-Type], constant[text/event-stream]]]
call[name[self].set_header, parameter[constant[Cache-Control], constant[no-cache]]]
call[name[self].set_header, parameter[constant[Connection], constant[keep-alive]]]
call[name[self].write, parameter[call[constant[retry: {0}
].format, parameter[constant[400]]]]]
call[name[self].flush, parameter[]]
while constant[True] begin[:]
<ast.Try object at 0x7da2044c02b0> | keyword[def] identifier[get] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_verify_auth] ():
identifier[self] . identifier[redirect] ( literal[string] )
keyword[return]
identifier[self] . identifier[set_header] ( literal[string] , literal[string] )
identifier[self] . identifier[set_header] ( literal[string] , literal[string] )
identifier[self] . identifier[set_header] ( literal[string] , literal[string] )
identifier[self] . identifier[write] ( literal[string] . identifier[format] ( literal[int] ))
identifier[self] . identifier[flush] ()
keyword[while] keyword[True] :
keyword[try] :
identifier[event] = keyword[yield] identifier[self] . identifier[application] . identifier[event_listener] . identifier[get_event] ( identifier[self] )
identifier[self] . identifier[write] ( literal[string] . identifier[format] ( identifier[event] . identifier[get] ( literal[string] , literal[string] )))
identifier[self] . identifier[write] ( identifier[str] ( literal[string] ). identifier[format] ( identifier[_json_dumps] ( identifier[event] )))
identifier[self] . identifier[flush] ()
keyword[except] identifier[TimeoutException] :
keyword[break] | def get(self):
"""
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: text
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
data: {'tag': '', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
data: {'tag': '20130802115730568475', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
# Note, you must be authenticated!
var source = new EventSource('/events');
source.onopen = function() { console.debug('opening') };
source.onerror = function(e) { console.debug('error!', e) };
source.onmessage = function(e) { console.debug(e.data) };
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events', {withCredentials: true});
Some browser clients lack CORS support for the ``EventSource()`` API. Such
clients may instead pass the :mailheader:`X-Auth-Token` value as an URL
parameter:
.. code-block:: bash
curl -NsS localhost:8000/events/6d1b722e
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\\
awk '
BEGIN { RS=""; FS="\\\\n" }
$1 ~ /^tag: salt\\/job\\/[0-9]+\\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
"""
# if you aren't authenticated, redirect to login
if not self._verify_auth():
self.redirect('/login')
return # depends on [control=['if'], data=[]]
# set the streaming headers
self.set_header('Content-Type', 'text/event-stream')
self.set_header('Cache-Control', 'no-cache')
self.set_header('Connection', 'keep-alive')
self.write('retry: {0}\n'.format(400))
self.flush()
while True:
try:
event = (yield self.application.event_listener.get_event(self))
self.write('tag: {0}\n'.format(event.get('tag', '')))
self.write(str('data: {0}\n\n').format(_json_dumps(event))) # future lint: disable=blacklisted-function
self.flush() # depends on [control=['try'], data=[]]
except TimeoutException:
break # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] |
def from_filepath(cls, filepath):
'''A function to replace the old constructor call where a filename was passed in.'''
assert(os.path.exists(filepath))
return cls(read_file(filepath)) | def function[from_filepath, parameter[cls, filepath]]:
constant[A function to replace the old constructor call where a filename was passed in.]
assert[call[name[os].path.exists, parameter[name[filepath]]]]
return[call[name[cls], parameter[call[name[read_file], parameter[name[filepath]]]]]] | keyword[def] identifier[from_filepath] ( identifier[cls] , identifier[filepath] ):
literal[string]
keyword[assert] ( identifier[os] . identifier[path] . identifier[exists] ( identifier[filepath] ))
keyword[return] identifier[cls] ( identifier[read_file] ( identifier[filepath] )) | def from_filepath(cls, filepath):
"""A function to replace the old constructor call where a filename was passed in."""
assert os.path.exists(filepath)
return cls(read_file(filepath)) |
def launch(self):
"""Launch and synchronously write metadata.
This is possible due to watchman's built-in async server startup - no double-forking required.
"""
cmd = self._construct_cmd((self._watchman_path, 'get-pid'),
state_file=self._state_file,
sock_file=self._sock_file,
pid_file=self._pid_file,
log_file=self._log_file,
log_level=str(self._log_level))
self._logger.debug('watchman cmd is: {}'.format(' '.join(cmd)))
self._maybe_init_metadata()
# Watchman is launched via its cli. By running the 'get-pid' command on the client we implicitly
# launch the Watchman daemon. This approach is somewhat error-prone - in some cases the client
# can successfully trigger the launch of the Watchman daemon, but fail to return successfully
# for the 'get-pid' result due to server <-> daemon socket issues - these can look like:
#
# 2016-04-01T17:31:23,820: [cli] unable to talk to your watchman
# on .../watchman.sock! (Permission denied)
#
# This results in a subprocess execution failure and leaves us with no pid information to write
# to the metadata directory - while in reality a Watchman daemon is actually running but now
# untracked. To safeguard against this, we retry the (idempotent) 'get-pid' command a few times
# to give the server-side socket setup a few chances to quiesce before potentially orphaning it.
get_output = functools.partial(self.get_subprocess_output, cmd)
output = retry_on_exception(get_output, 3, (ProcessManager.ExecutionError,), lambda n: n * .5)
# Parse the watchman PID from the cli output.
pid = self._parse_pid_from_output(output)
# Write the process metadata to disk.
self.write_pid(pid)
self.write_socket(self._sock_file) | def function[launch, parameter[self]]:
constant[Launch and synchronously write metadata.
This is possible due to watchman's built-in async server startup - no double-forking required.
]
variable[cmd] assign[=] call[name[self]._construct_cmd, parameter[tuple[[<ast.Attribute object at 0x7da1b224be80>, <ast.Constant object at 0x7da1b224a920>]]]]
call[name[self]._logger.debug, parameter[call[constant[watchman cmd is: {}].format, parameter[call[constant[ ].join, parameter[name[cmd]]]]]]]
call[name[self]._maybe_init_metadata, parameter[]]
variable[get_output] assign[=] call[name[functools].partial, parameter[name[self].get_subprocess_output, name[cmd]]]
variable[output] assign[=] call[name[retry_on_exception], parameter[name[get_output], constant[3], tuple[[<ast.Attribute object at 0x7da1b1e8dff0>]], <ast.Lambda object at 0x7da1b1e8ef80>]]
variable[pid] assign[=] call[name[self]._parse_pid_from_output, parameter[name[output]]]
call[name[self].write_pid, parameter[name[pid]]]
call[name[self].write_socket, parameter[name[self]._sock_file]] | keyword[def] identifier[launch] ( identifier[self] ):
literal[string]
identifier[cmd] = identifier[self] . identifier[_construct_cmd] (( identifier[self] . identifier[_watchman_path] , literal[string] ),
identifier[state_file] = identifier[self] . identifier[_state_file] ,
identifier[sock_file] = identifier[self] . identifier[_sock_file] ,
identifier[pid_file] = identifier[self] . identifier[_pid_file] ,
identifier[log_file] = identifier[self] . identifier[_log_file] ,
identifier[log_level] = identifier[str] ( identifier[self] . identifier[_log_level] ))
identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[cmd] )))
identifier[self] . identifier[_maybe_init_metadata] ()
identifier[get_output] = identifier[functools] . identifier[partial] ( identifier[self] . identifier[get_subprocess_output] , identifier[cmd] )
identifier[output] = identifier[retry_on_exception] ( identifier[get_output] , literal[int] ,( identifier[ProcessManager] . identifier[ExecutionError] ,), keyword[lambda] identifier[n] : identifier[n] * literal[int] )
identifier[pid] = identifier[self] . identifier[_parse_pid_from_output] ( identifier[output] )
identifier[self] . identifier[write_pid] ( identifier[pid] )
identifier[self] . identifier[write_socket] ( identifier[self] . identifier[_sock_file] ) | def launch(self):
"""Launch and synchronously write metadata.
This is possible due to watchman's built-in async server startup - no double-forking required.
"""
cmd = self._construct_cmd((self._watchman_path, 'get-pid'), state_file=self._state_file, sock_file=self._sock_file, pid_file=self._pid_file, log_file=self._log_file, log_level=str(self._log_level))
self._logger.debug('watchman cmd is: {}'.format(' '.join(cmd)))
self._maybe_init_metadata()
# Watchman is launched via its cli. By running the 'get-pid' command on the client we implicitly
# launch the Watchman daemon. This approach is somewhat error-prone - in some cases the client
# can successfully trigger the launch of the Watchman daemon, but fail to return successfully
# for the 'get-pid' result due to server <-> daemon socket issues - these can look like:
#
# 2016-04-01T17:31:23,820: [cli] unable to talk to your watchman
# on .../watchman.sock! (Permission denied)
#
# This results in a subprocess execution failure and leaves us with no pid information to write
# to the metadata directory - while in reality a Watchman daemon is actually running but now
# untracked. To safeguard against this, we retry the (idempotent) 'get-pid' command a few times
# to give the server-side socket setup a few chances to quiesce before potentially orphaning it.
get_output = functools.partial(self.get_subprocess_output, cmd)
output = retry_on_exception(get_output, 3, (ProcessManager.ExecutionError,), lambda n: n * 0.5)
# Parse the watchman PID from the cli output.
pid = self._parse_pid_from_output(output)
# Write the process metadata to disk.
self.write_pid(pid)
self.write_socket(self._sock_file) |
def get_json_or_yaml_settings(self, settings_name="zappa_settings"):
"""
Return zappa_settings path as JSON or YAML (or TOML), as appropriate.
"""
zs_json = settings_name + ".json"
zs_yml = settings_name + ".yml"
zs_yaml = settings_name + ".yaml"
zs_toml = settings_name + ".toml"
# Must have at least one
if not os.path.isfile(zs_json) \
and not os.path.isfile(zs_yml) \
and not os.path.isfile(zs_yaml) \
and not os.path.isfile(zs_toml):
raise ClickException("Please configure a zappa_settings file or call `zappa init`.")
# Prefer JSON
if os.path.isfile(zs_json):
settings_file = zs_json
elif os.path.isfile(zs_toml):
settings_file = zs_toml
elif os.path.isfile(zs_yml):
settings_file = zs_yml
else:
settings_file = zs_yaml
return settings_file | def function[get_json_or_yaml_settings, parameter[self, settings_name]]:
constant[
Return zappa_settings path as JSON or YAML (or TOML), as appropriate.
]
variable[zs_json] assign[=] binary_operation[name[settings_name] + constant[.json]]
variable[zs_yml] assign[=] binary_operation[name[settings_name] + constant[.yml]]
variable[zs_yaml] assign[=] binary_operation[name[settings_name] + constant[.yaml]]
variable[zs_toml] assign[=] binary_operation[name[settings_name] + constant[.toml]]
if <ast.BoolOp object at 0x7da20c993c10> begin[:]
<ast.Raise object at 0x7da20c9922f0>
if call[name[os].path.isfile, parameter[name[zs_json]]] begin[:]
variable[settings_file] assign[=] name[zs_json]
return[name[settings_file]] | keyword[def] identifier[get_json_or_yaml_settings] ( identifier[self] , identifier[settings_name] = literal[string] ):
literal[string]
identifier[zs_json] = identifier[settings_name] + literal[string]
identifier[zs_yml] = identifier[settings_name] + literal[string]
identifier[zs_yaml] = identifier[settings_name] + literal[string]
identifier[zs_toml] = identifier[settings_name] + literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[zs_json] ) keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[zs_yml] ) keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[zs_yaml] ) keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[zs_toml] ):
keyword[raise] identifier[ClickException] ( literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[zs_json] ):
identifier[settings_file] = identifier[zs_json]
keyword[elif] identifier[os] . identifier[path] . identifier[isfile] ( identifier[zs_toml] ):
identifier[settings_file] = identifier[zs_toml]
keyword[elif] identifier[os] . identifier[path] . identifier[isfile] ( identifier[zs_yml] ):
identifier[settings_file] = identifier[zs_yml]
keyword[else] :
identifier[settings_file] = identifier[zs_yaml]
keyword[return] identifier[settings_file] | def get_json_or_yaml_settings(self, settings_name='zappa_settings'):
"""
Return zappa_settings path as JSON or YAML (or TOML), as appropriate.
"""
zs_json = settings_name + '.json'
zs_yml = settings_name + '.yml'
zs_yaml = settings_name + '.yaml'
zs_toml = settings_name + '.toml'
# Must have at least one
if not os.path.isfile(zs_json) and (not os.path.isfile(zs_yml)) and (not os.path.isfile(zs_yaml)) and (not os.path.isfile(zs_toml)):
raise ClickException('Please configure a zappa_settings file or call `zappa init`.') # depends on [control=['if'], data=[]]
# Prefer JSON
if os.path.isfile(zs_json):
settings_file = zs_json # depends on [control=['if'], data=[]]
elif os.path.isfile(zs_toml):
settings_file = zs_toml # depends on [control=['if'], data=[]]
elif os.path.isfile(zs_yml):
settings_file = zs_yml # depends on [control=['if'], data=[]]
else:
settings_file = zs_yaml
return settings_file |
def _compute_standard_dev(self, rup, imt, C):
"""
Compute the the standard deviation in terms of magnitude
described on page 744, eq. 4
"""
sigma_mean = 0.
if imt.name in "SA PGA":
psi = -6.898E-3
else:
psi = -3.054E-5
if rup.mag <= 6.5:
sigma_mean = (C['c12'] * rup.mag) + C['c13']
elif rup.mag > 6.5:
sigma_mean = (psi * rup.mag) + C['c14']
return sigma_mean | def function[_compute_standard_dev, parameter[self, rup, imt, C]]:
constant[
Compute the the standard deviation in terms of magnitude
described on page 744, eq. 4
]
variable[sigma_mean] assign[=] constant[0.0]
if compare[name[imt].name in constant[SA PGA]] begin[:]
variable[psi] assign[=] <ast.UnaryOp object at 0x7da1b15cd1b0>
if compare[name[rup].mag less_or_equal[<=] constant[6.5]] begin[:]
variable[sigma_mean] assign[=] binary_operation[binary_operation[call[name[C]][constant[c12]] * name[rup].mag] + call[name[C]][constant[c13]]]
return[name[sigma_mean]] | keyword[def] identifier[_compute_standard_dev] ( identifier[self] , identifier[rup] , identifier[imt] , identifier[C] ):
literal[string]
identifier[sigma_mean] = literal[int]
keyword[if] identifier[imt] . identifier[name] keyword[in] literal[string] :
identifier[psi] =- literal[int]
keyword[else] :
identifier[psi] =- literal[int]
keyword[if] identifier[rup] . identifier[mag] <= literal[int] :
identifier[sigma_mean] =( identifier[C] [ literal[string] ]* identifier[rup] . identifier[mag] )+ identifier[C] [ literal[string] ]
keyword[elif] identifier[rup] . identifier[mag] > literal[int] :
identifier[sigma_mean] =( identifier[psi] * identifier[rup] . identifier[mag] )+ identifier[C] [ literal[string] ]
keyword[return] identifier[sigma_mean] | def _compute_standard_dev(self, rup, imt, C):
"""
Compute the the standard deviation in terms of magnitude
described on page 744, eq. 4
"""
sigma_mean = 0.0
if imt.name in 'SA PGA':
psi = -0.006898 # depends on [control=['if'], data=[]]
else:
psi = -3.054e-05
if rup.mag <= 6.5:
sigma_mean = C['c12'] * rup.mag + C['c13'] # depends on [control=['if'], data=[]]
elif rup.mag > 6.5:
sigma_mean = psi * rup.mag + C['c14'] # depends on [control=['if'], data=[]]
return sigma_mean |
def create(type_dict, *type_parameters):
"""
Construct a List containing type 'klazz'.
"""
assert len(type_parameters) == 1
klazz = TypeFactory.new(type_dict, *type_parameters[0])
assert isclass(klazz)
assert issubclass(klazz, Object)
return TypeMetaclass('%sList' % klazz.__name__, (ListContainer,), {'TYPE': klazz}) | def function[create, parameter[type_dict]]:
constant[
Construct a List containing type 'klazz'.
]
assert[compare[call[name[len], parameter[name[type_parameters]]] equal[==] constant[1]]]
variable[klazz] assign[=] call[name[TypeFactory].new, parameter[name[type_dict], <ast.Starred object at 0x7da1b2853b20>]]
assert[call[name[isclass], parameter[name[klazz]]]]
assert[call[name[issubclass], parameter[name[klazz], name[Object]]]]
return[call[name[TypeMetaclass], parameter[binary_operation[constant[%sList] <ast.Mod object at 0x7da2590d6920> name[klazz].__name__], tuple[[<ast.Name object at 0x7da1b264a770>]], dictionary[[<ast.Constant object at 0x7da1b2648dc0>], [<ast.Name object at 0x7da1b264a380>]]]]] | keyword[def] identifier[create] ( identifier[type_dict] ,* identifier[type_parameters] ):
literal[string]
keyword[assert] identifier[len] ( identifier[type_parameters] )== literal[int]
identifier[klazz] = identifier[TypeFactory] . identifier[new] ( identifier[type_dict] ,* identifier[type_parameters] [ literal[int] ])
keyword[assert] identifier[isclass] ( identifier[klazz] )
keyword[assert] identifier[issubclass] ( identifier[klazz] , identifier[Object] )
keyword[return] identifier[TypeMetaclass] ( literal[string] % identifier[klazz] . identifier[__name__] ,( identifier[ListContainer] ,),{ literal[string] : identifier[klazz] }) | def create(type_dict, *type_parameters):
"""
Construct a List containing type 'klazz'.
"""
assert len(type_parameters) == 1
klazz = TypeFactory.new(type_dict, *type_parameters[0])
assert isclass(klazz)
assert issubclass(klazz, Object)
return TypeMetaclass('%sList' % klazz.__name__, (ListContainer,), {'TYPE': klazz}) |
def GET_AUTH(self, courseid): # pylint: disable=arguments-differ
""" GET request """
course = self.course_factory.get_course(courseid)
scoreboards = course.get_descriptor().get('scoreboard', [])
try:
names = {i: val["name"] for i, val in enumerate(scoreboards)}
except:
raise web.notfound("Invalid configuration")
if len(names) == 0:
raise web.notfound()
return self.template_helper.get_custom_renderer('frontend/plugins/scoreboard').main(course, names) | def function[GET_AUTH, parameter[self, courseid]]:
constant[ GET request ]
variable[course] assign[=] call[name[self].course_factory.get_course, parameter[name[courseid]]]
variable[scoreboards] assign[=] call[call[name[course].get_descriptor, parameter[]].get, parameter[constant[scoreboard], list[[]]]]
<ast.Try object at 0x7da18c4cf100>
if compare[call[name[len], parameter[name[names]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da18c4cf880>
return[call[call[name[self].template_helper.get_custom_renderer, parameter[constant[frontend/plugins/scoreboard]]].main, parameter[name[course], name[names]]]] | keyword[def] identifier[GET_AUTH] ( identifier[self] , identifier[courseid] ):
literal[string]
identifier[course] = identifier[self] . identifier[course_factory] . identifier[get_course] ( identifier[courseid] )
identifier[scoreboards] = identifier[course] . identifier[get_descriptor] (). identifier[get] ( literal[string] ,[])
keyword[try] :
identifier[names] ={ identifier[i] : identifier[val] [ literal[string] ] keyword[for] identifier[i] , identifier[val] keyword[in] identifier[enumerate] ( identifier[scoreboards] )}
keyword[except] :
keyword[raise] identifier[web] . identifier[notfound] ( literal[string] )
keyword[if] identifier[len] ( identifier[names] )== literal[int] :
keyword[raise] identifier[web] . identifier[notfound] ()
keyword[return] identifier[self] . identifier[template_helper] . identifier[get_custom_renderer] ( literal[string] ). identifier[main] ( identifier[course] , identifier[names] ) | def GET_AUTH(self, courseid): # pylint: disable=arguments-differ
' GET request '
course = self.course_factory.get_course(courseid)
scoreboards = course.get_descriptor().get('scoreboard', [])
try:
names = {i: val['name'] for (i, val) in enumerate(scoreboards)} # depends on [control=['try'], data=[]]
except:
raise web.notfound('Invalid configuration') # depends on [control=['except'], data=[]]
if len(names) == 0:
raise web.notfound() # depends on [control=['if'], data=[]]
return self.template_helper.get_custom_renderer('frontend/plugins/scoreboard').main(course, names) |
def participant_names(self):
'''The names of the RTObjects participating in this context.'''
with self._mutex:
return [obj.get_component_profile().instance_name \
for obj in self._participants] | def function[participant_names, parameter[self]]:
constant[The names of the RTObjects participating in this context.]
with name[self]._mutex begin[:]
return[<ast.ListComp object at 0x7da207f03f10>] | keyword[def] identifier[participant_names] ( identifier[self] ):
literal[string]
keyword[with] identifier[self] . identifier[_mutex] :
keyword[return] [ identifier[obj] . identifier[get_component_profile] (). identifier[instance_name] keyword[for] identifier[obj] keyword[in] identifier[self] . identifier[_participants] ] | def participant_names(self):
"""The names of the RTObjects participating in this context."""
with self._mutex:
return [obj.get_component_profile().instance_name for obj in self._participants] # depends on [control=['with'], data=[]] |
def encode(something):
"""Encode something with SECRET_KEY."""
secret_key = current_app.config.get('SECRET_KEY')
s = URLSafeSerializer(secret_key)
return s.dumps(something) | def function[encode, parameter[something]]:
constant[Encode something with SECRET_KEY.]
variable[secret_key] assign[=] call[name[current_app].config.get, parameter[constant[SECRET_KEY]]]
variable[s] assign[=] call[name[URLSafeSerializer], parameter[name[secret_key]]]
return[call[name[s].dumps, parameter[name[something]]]] | keyword[def] identifier[encode] ( identifier[something] ):
literal[string]
identifier[secret_key] = identifier[current_app] . identifier[config] . identifier[get] ( literal[string] )
identifier[s] = identifier[URLSafeSerializer] ( identifier[secret_key] )
keyword[return] identifier[s] . identifier[dumps] ( identifier[something] ) | def encode(something):
"""Encode something with SECRET_KEY."""
secret_key = current_app.config.get('SECRET_KEY')
s = URLSafeSerializer(secret_key)
return s.dumps(something) |
def get_port_profile_for_intf_input_request_type_getnext_request_last_received_interface_info_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
input = ET.SubElement(get_port_profile_for_intf, "input")
request_type = ET.SubElement(input, "request-type")
getnext_request = ET.SubElement(request_type, "getnext-request")
last_received_interface_info = ET.SubElement(getnext_request, "last-received-interface-info")
interface_name = ET.SubElement(last_received_interface_info, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[get_port_profile_for_intf_input_request_type_getnext_request_last_received_interface_info_interface_name, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get_port_profile_for_intf] assign[=] call[name[ET].Element, parameter[constant[get_port_profile_for_intf]]]
variable[config] assign[=] name[get_port_profile_for_intf]
variable[input] assign[=] call[name[ET].SubElement, parameter[name[get_port_profile_for_intf], constant[input]]]
variable[request_type] assign[=] call[name[ET].SubElement, parameter[name[input], constant[request-type]]]
variable[getnext_request] assign[=] call[name[ET].SubElement, parameter[name[request_type], constant[getnext-request]]]
variable[last_received_interface_info] assign[=] call[name[ET].SubElement, parameter[name[getnext_request], constant[last-received-interface-info]]]
variable[interface_name] assign[=] call[name[ET].SubElement, parameter[name[last_received_interface_info], constant[interface-name]]]
name[interface_name].text assign[=] call[name[kwargs].pop, parameter[constant[interface_name]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[get_port_profile_for_intf_input_request_type_getnext_request_last_received_interface_info_interface_name] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get_port_profile_for_intf] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get_port_profile_for_intf]
identifier[input] = identifier[ET] . identifier[SubElement] ( identifier[get_port_profile_for_intf] , literal[string] )
identifier[request_type] = identifier[ET] . identifier[SubElement] ( identifier[input] , literal[string] )
identifier[getnext_request] = identifier[ET] . identifier[SubElement] ( identifier[request_type] , literal[string] )
identifier[last_received_interface_info] = identifier[ET] . identifier[SubElement] ( identifier[getnext_request] , literal[string] )
identifier[interface_name] = identifier[ET] . identifier[SubElement] ( identifier[last_received_interface_info] , literal[string] )
identifier[interface_name] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def get_port_profile_for_intf_input_request_type_getnext_request_last_received_interface_info_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get_port_profile_for_intf = ET.Element('get_port_profile_for_intf')
config = get_port_profile_for_intf
input = ET.SubElement(get_port_profile_for_intf, 'input')
request_type = ET.SubElement(input, 'request-type')
getnext_request = ET.SubElement(request_type, 'getnext-request')
last_received_interface_info = ET.SubElement(getnext_request, 'last-received-interface-info')
interface_name = ET.SubElement(last_received_interface_info, 'interface-name')
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def updateMedShockProcess(self):
'''
Constructs discrete distributions of medical preference shocks for each
period in the cycle. Distributions are saved as attribute MedShkDstn,
which is added to time_vary.
Parameters
----------
None
Returns
-------
None
'''
MedShkDstn = [] # empty list for medical shock distribution each period
for t in range(self.T_cycle):
MedShkAvgNow = self.MedShkAvg[t] # get shock distribution parameters
MedShkStdNow = self.MedShkStd[t]
MedShkDstnNow = approxLognormal(mu=np.log(MedShkAvgNow)-0.5*MedShkStdNow**2,\
sigma=MedShkStdNow,N=self.MedShkCount, tail_N=self.MedShkCountTail,
tail_bound=[0,0.9])
MedShkDstnNow = addDiscreteOutcomeConstantMean(MedShkDstnNow,0.0,0.0,sort=True) # add point at zero with no probability
MedShkDstn.append(MedShkDstnNow)
self.MedShkDstn = MedShkDstn
self.addToTimeVary('MedShkDstn') | def function[updateMedShockProcess, parameter[self]]:
constant[
Constructs discrete distributions of medical preference shocks for each
period in the cycle. Distributions are saved as attribute MedShkDstn,
which is added to time_vary.
Parameters
----------
None
Returns
-------
None
]
variable[MedShkDstn] assign[=] list[[]]
for taget[name[t]] in starred[call[name[range], parameter[name[self].T_cycle]]] begin[:]
variable[MedShkAvgNow] assign[=] call[name[self].MedShkAvg][name[t]]
variable[MedShkStdNow] assign[=] call[name[self].MedShkStd][name[t]]
variable[MedShkDstnNow] assign[=] call[name[approxLognormal], parameter[]]
variable[MedShkDstnNow] assign[=] call[name[addDiscreteOutcomeConstantMean], parameter[name[MedShkDstnNow], constant[0.0], constant[0.0]]]
call[name[MedShkDstn].append, parameter[name[MedShkDstnNow]]]
name[self].MedShkDstn assign[=] name[MedShkDstn]
call[name[self].addToTimeVary, parameter[constant[MedShkDstn]]] | keyword[def] identifier[updateMedShockProcess] ( identifier[self] ):
literal[string]
identifier[MedShkDstn] =[]
keyword[for] identifier[t] keyword[in] identifier[range] ( identifier[self] . identifier[T_cycle] ):
identifier[MedShkAvgNow] = identifier[self] . identifier[MedShkAvg] [ identifier[t] ]
identifier[MedShkStdNow] = identifier[self] . identifier[MedShkStd] [ identifier[t] ]
identifier[MedShkDstnNow] = identifier[approxLognormal] ( identifier[mu] = identifier[np] . identifier[log] ( identifier[MedShkAvgNow] )- literal[int] * identifier[MedShkStdNow] ** literal[int] , identifier[sigma] = identifier[MedShkStdNow] , identifier[N] = identifier[self] . identifier[MedShkCount] , identifier[tail_N] = identifier[self] . identifier[MedShkCountTail] ,
identifier[tail_bound] =[ literal[int] , literal[int] ])
identifier[MedShkDstnNow] = identifier[addDiscreteOutcomeConstantMean] ( identifier[MedShkDstnNow] , literal[int] , literal[int] , identifier[sort] = keyword[True] )
identifier[MedShkDstn] . identifier[append] ( identifier[MedShkDstnNow] )
identifier[self] . identifier[MedShkDstn] = identifier[MedShkDstn]
identifier[self] . identifier[addToTimeVary] ( literal[string] ) | def updateMedShockProcess(self):
"""
Constructs discrete distributions of medical preference shocks for each
period in the cycle. Distributions are saved as attribute MedShkDstn,
which is added to time_vary.
Parameters
----------
None
Returns
-------
None
"""
MedShkDstn = [] # empty list for medical shock distribution each period
for t in range(self.T_cycle):
MedShkAvgNow = self.MedShkAvg[t] # get shock distribution parameters
MedShkStdNow = self.MedShkStd[t]
MedShkDstnNow = approxLognormal(mu=np.log(MedShkAvgNow) - 0.5 * MedShkStdNow ** 2, sigma=MedShkStdNow, N=self.MedShkCount, tail_N=self.MedShkCountTail, tail_bound=[0, 0.9])
MedShkDstnNow = addDiscreteOutcomeConstantMean(MedShkDstnNow, 0.0, 0.0, sort=True) # add point at zero with no probability
MedShkDstn.append(MedShkDstnNow) # depends on [control=['for'], data=['t']]
self.MedShkDstn = MedShkDstn
self.addToTimeVary('MedShkDstn') |
def eigen_decomposition(G, n_components=8, eigen_solver='auto',
random_state=None,
drop_first=True, largest=True, solver_kwds=None):
"""
Function to compute the eigendecomposition of a square matrix.
Parameters
----------
G : array_like or sparse matrix
The square matrix for which to compute the eigen-decomposition.
n_components : integer, optional
The number of eigenvectors to return
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
attempt to choose the best method for input data (default)
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type.
This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
Algebraic Multigrid solver (requires ``pyamg`` to be installed)
It can be faster on very large, sparse problems, but may also lead
to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
lambdas, diffusion_map : eigenvalues, eigenvectors
"""
n_nodes = G.shape[0]
if drop_first:
n_components = n_components + 1
eigen_solver, solver_kwds = check_eigen_solver(eigen_solver, solver_kwds,
size=n_nodes,
nvec=n_components)
random_state = check_random_state(random_state)
# Convert G to best type for eigendecomposition
if sparse.issparse(G):
if G.getformat() is not 'csr':
G.tocsr()
G = G.astype(np.float)
# Check for symmetry
is_symmetric = _is_symmetric(G)
# Try Eigen Methods:
if eigen_solver == 'arpack':
# This matches the internal initial state used by ARPACK
v0 = random_state.uniform(-1, 1, G.shape[0])
if is_symmetric:
if largest:
which = 'LM'
else:
which = 'SM'
lambdas, diffusion_map = eigsh(G, k=n_components, which=which,
v0=v0,**(solver_kwds or {}))
else:
if largest:
which = 'LR'
else:
which = 'SR'
lambdas, diffusion_map = eigs(G, k=n_components, which=which,
**(solver_kwds or {}))
lambdas = np.real(lambdas)
diffusion_map = np.real(diffusion_map)
elif eigen_solver == 'amg':
# separate amg & lobpcg keywords:
if solver_kwds is not None:
amg_kwds = {}
lobpcg_kwds = solver_kwds.copy()
for kwd in AMG_KWDS:
if kwd in solver_kwds.keys():
amg_kwds[kwd] = solver_kwds[kwd]
del lobpcg_kwds[kwd]
else:
amg_kwds = None
lobpcg_kwds = None
if not is_symmetric:
raise ValueError("lobpcg requires symmetric matrices.")
if not sparse.issparse(G):
warnings.warn("AMG works better for sparse matrices")
# Use AMG to get a preconditioner and speed up the eigenvalue problem.
ml = smoothed_aggregation_solver(check_array(G, accept_sparse = ['csr']),**(amg_kwds or {}))
M = ml.aspreconditioner()
n_find = min(n_nodes, 5 + 2*n_components)
X = random_state.rand(n_nodes, n_find)
X[:, 0] = (G.diagonal()).ravel()
lambdas, diffusion_map = lobpcg(G, X, M=M, largest=largest,**(lobpcg_kwds or {}))
sort_order = np.argsort(lambdas)
if largest:
lambdas = lambdas[sort_order[::-1]]
diffusion_map = diffusion_map[:, sort_order[::-1]]
else:
lambdas = lambdas[sort_order]
diffusion_map = diffusion_map[:, sort_order]
lambdas = lambdas[:n_components]
diffusion_map = diffusion_map[:, :n_components]
elif eigen_solver == "lobpcg":
if not is_symmetric:
raise ValueError("lobpcg requires symmetric matrices.")
n_find = min(n_nodes, 5 + 2*n_components)
X = random_state.rand(n_nodes, n_find)
lambdas, diffusion_map = lobpcg(G, X, largest=largest,**(solver_kwds or {}))
sort_order = np.argsort(lambdas)
if largest:
lambdas = lambdas[sort_order[::-1]]
diffusion_map = diffusion_map[:, sort_order[::-1]]
else:
lambdas = lambdas[sort_order]
diffusion_map = diffusion_map[:, sort_order]
lambdas = lambdas[:n_components]
diffusion_map = diffusion_map[:, :n_components]
elif eigen_solver == 'dense':
if sparse.isspmatrix(G):
G = G.todense()
if is_symmetric:
lambdas, diffusion_map = eigh(G,**(solver_kwds or {}))
else:
lambdas, diffusion_map = eig(G,**(solver_kwds or {}))
sort_index = np.argsort(lambdas)
lambdas = lambdas[sort_index]
diffusion_map[:,sort_index]
if largest:# eigh always returns eigenvalues in ascending order
lambdas = lambdas[::-1] # reverse order the e-values
diffusion_map = diffusion_map[:, ::-1] # reverse order the vectors
lambdas = lambdas[:n_components]
diffusion_map = diffusion_map[:, :n_components]
return (lambdas, diffusion_map) | def function[eigen_decomposition, parameter[G, n_components, eigen_solver, random_state, drop_first, largest, solver_kwds]]:
constant[
Function to compute the eigendecomposition of a square matrix.
Parameters
----------
G : array_like or sparse matrix
The square matrix for which to compute the eigen-decomposition.
n_components : integer, optional
The number of eigenvectors to return
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
attempt to choose the best method for input data (default)
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type.
This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
Algebraic Multigrid solver (requires ``pyamg`` to be installed)
It can be faster on very large, sparse problems, but may also lead
to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
lambdas, diffusion_map : eigenvalues, eigenvectors
]
variable[n_nodes] assign[=] call[name[G].shape][constant[0]]
if name[drop_first] begin[:]
variable[n_components] assign[=] binary_operation[name[n_components] + constant[1]]
<ast.Tuple object at 0x7da18ede4580> assign[=] call[name[check_eigen_solver], parameter[name[eigen_solver], name[solver_kwds]]]
variable[random_state] assign[=] call[name[check_random_state], parameter[name[random_state]]]
if call[name[sparse].issparse, parameter[name[G]]] begin[:]
if compare[call[name[G].getformat, parameter[]] is_not constant[csr]] begin[:]
call[name[G].tocsr, parameter[]]
variable[G] assign[=] call[name[G].astype, parameter[name[np].float]]
variable[is_symmetric] assign[=] call[name[_is_symmetric], parameter[name[G]]]
if compare[name[eigen_solver] equal[==] constant[arpack]] begin[:]
variable[v0] assign[=] call[name[random_state].uniform, parameter[<ast.UnaryOp object at 0x7da18ede6920>, constant[1], call[name[G].shape][constant[0]]]]
if name[is_symmetric] begin[:]
if name[largest] begin[:]
variable[which] assign[=] constant[LM]
<ast.Tuple object at 0x7da18ede5bd0> assign[=] call[name[eigsh], parameter[name[G]]]
variable[lambdas] assign[=] call[name[np].real, parameter[name[lambdas]]]
variable[diffusion_map] assign[=] call[name[np].real, parameter[name[diffusion_map]]]
return[tuple[[<ast.Name object at 0x7da1b13e7fd0>, <ast.Name object at 0x7da1b13e6d70>]]] | keyword[def] identifier[eigen_decomposition] ( identifier[G] , identifier[n_components] = literal[int] , identifier[eigen_solver] = literal[string] ,
identifier[random_state] = keyword[None] ,
identifier[drop_first] = keyword[True] , identifier[largest] = keyword[True] , identifier[solver_kwds] = keyword[None] ):
literal[string]
identifier[n_nodes] = identifier[G] . identifier[shape] [ literal[int] ]
keyword[if] identifier[drop_first] :
identifier[n_components] = identifier[n_components] + literal[int]
identifier[eigen_solver] , identifier[solver_kwds] = identifier[check_eigen_solver] ( identifier[eigen_solver] , identifier[solver_kwds] ,
identifier[size] = identifier[n_nodes] ,
identifier[nvec] = identifier[n_components] )
identifier[random_state] = identifier[check_random_state] ( identifier[random_state] )
keyword[if] identifier[sparse] . identifier[issparse] ( identifier[G] ):
keyword[if] identifier[G] . identifier[getformat] () keyword[is] keyword[not] literal[string] :
identifier[G] . identifier[tocsr] ()
identifier[G] = identifier[G] . identifier[astype] ( identifier[np] . identifier[float] )
identifier[is_symmetric] = identifier[_is_symmetric] ( identifier[G] )
keyword[if] identifier[eigen_solver] == literal[string] :
identifier[v0] = identifier[random_state] . identifier[uniform] (- literal[int] , literal[int] , identifier[G] . identifier[shape] [ literal[int] ])
keyword[if] identifier[is_symmetric] :
keyword[if] identifier[largest] :
identifier[which] = literal[string]
keyword[else] :
identifier[which] = literal[string]
identifier[lambdas] , identifier[diffusion_map] = identifier[eigsh] ( identifier[G] , identifier[k] = identifier[n_components] , identifier[which] = identifier[which] ,
identifier[v0] = identifier[v0] ,**( identifier[solver_kwds] keyword[or] {}))
keyword[else] :
keyword[if] identifier[largest] :
identifier[which] = literal[string]
keyword[else] :
identifier[which] = literal[string]
identifier[lambdas] , identifier[diffusion_map] = identifier[eigs] ( identifier[G] , identifier[k] = identifier[n_components] , identifier[which] = identifier[which] ,
**( identifier[solver_kwds] keyword[or] {}))
identifier[lambdas] = identifier[np] . identifier[real] ( identifier[lambdas] )
identifier[diffusion_map] = identifier[np] . identifier[real] ( identifier[diffusion_map] )
keyword[elif] identifier[eigen_solver] == literal[string] :
keyword[if] identifier[solver_kwds] keyword[is] keyword[not] keyword[None] :
identifier[amg_kwds] ={}
identifier[lobpcg_kwds] = identifier[solver_kwds] . identifier[copy] ()
keyword[for] identifier[kwd] keyword[in] identifier[AMG_KWDS] :
keyword[if] identifier[kwd] keyword[in] identifier[solver_kwds] . identifier[keys] ():
identifier[amg_kwds] [ identifier[kwd] ]= identifier[solver_kwds] [ identifier[kwd] ]
keyword[del] identifier[lobpcg_kwds] [ identifier[kwd] ]
keyword[else] :
identifier[amg_kwds] = keyword[None]
identifier[lobpcg_kwds] = keyword[None]
keyword[if] keyword[not] identifier[is_symmetric] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[sparse] . identifier[issparse] ( identifier[G] ):
identifier[warnings] . identifier[warn] ( literal[string] )
identifier[ml] = identifier[smoothed_aggregation_solver] ( identifier[check_array] ( identifier[G] , identifier[accept_sparse] =[ literal[string] ]),**( identifier[amg_kwds] keyword[or] {}))
identifier[M] = identifier[ml] . identifier[aspreconditioner] ()
identifier[n_find] = identifier[min] ( identifier[n_nodes] , literal[int] + literal[int] * identifier[n_components] )
identifier[X] = identifier[random_state] . identifier[rand] ( identifier[n_nodes] , identifier[n_find] )
identifier[X] [:, literal[int] ]=( identifier[G] . identifier[diagonal] ()). identifier[ravel] ()
identifier[lambdas] , identifier[diffusion_map] = identifier[lobpcg] ( identifier[G] , identifier[X] , identifier[M] = identifier[M] , identifier[largest] = identifier[largest] ,**( identifier[lobpcg_kwds] keyword[or] {}))
identifier[sort_order] = identifier[np] . identifier[argsort] ( identifier[lambdas] )
keyword[if] identifier[largest] :
identifier[lambdas] = identifier[lambdas] [ identifier[sort_order] [::- literal[int] ]]
identifier[diffusion_map] = identifier[diffusion_map] [:, identifier[sort_order] [::- literal[int] ]]
keyword[else] :
identifier[lambdas] = identifier[lambdas] [ identifier[sort_order] ]
identifier[diffusion_map] = identifier[diffusion_map] [:, identifier[sort_order] ]
identifier[lambdas] = identifier[lambdas] [: identifier[n_components] ]
identifier[diffusion_map] = identifier[diffusion_map] [:,: identifier[n_components] ]
keyword[elif] identifier[eigen_solver] == literal[string] :
keyword[if] keyword[not] identifier[is_symmetric] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[n_find] = identifier[min] ( identifier[n_nodes] , literal[int] + literal[int] * identifier[n_components] )
identifier[X] = identifier[random_state] . identifier[rand] ( identifier[n_nodes] , identifier[n_find] )
identifier[lambdas] , identifier[diffusion_map] = identifier[lobpcg] ( identifier[G] , identifier[X] , identifier[largest] = identifier[largest] ,**( identifier[solver_kwds] keyword[or] {}))
identifier[sort_order] = identifier[np] . identifier[argsort] ( identifier[lambdas] )
keyword[if] identifier[largest] :
identifier[lambdas] = identifier[lambdas] [ identifier[sort_order] [::- literal[int] ]]
identifier[diffusion_map] = identifier[diffusion_map] [:, identifier[sort_order] [::- literal[int] ]]
keyword[else] :
identifier[lambdas] = identifier[lambdas] [ identifier[sort_order] ]
identifier[diffusion_map] = identifier[diffusion_map] [:, identifier[sort_order] ]
identifier[lambdas] = identifier[lambdas] [: identifier[n_components] ]
identifier[diffusion_map] = identifier[diffusion_map] [:,: identifier[n_components] ]
keyword[elif] identifier[eigen_solver] == literal[string] :
keyword[if] identifier[sparse] . identifier[isspmatrix] ( identifier[G] ):
identifier[G] = identifier[G] . identifier[todense] ()
keyword[if] identifier[is_symmetric] :
identifier[lambdas] , identifier[diffusion_map] = identifier[eigh] ( identifier[G] ,**( identifier[solver_kwds] keyword[or] {}))
keyword[else] :
identifier[lambdas] , identifier[diffusion_map] = identifier[eig] ( identifier[G] ,**( identifier[solver_kwds] keyword[or] {}))
identifier[sort_index] = identifier[np] . identifier[argsort] ( identifier[lambdas] )
identifier[lambdas] = identifier[lambdas] [ identifier[sort_index] ]
identifier[diffusion_map] [:, identifier[sort_index] ]
keyword[if] identifier[largest] :
identifier[lambdas] = identifier[lambdas] [::- literal[int] ]
identifier[diffusion_map] = identifier[diffusion_map] [:,::- literal[int] ]
identifier[lambdas] = identifier[lambdas] [: identifier[n_components] ]
identifier[diffusion_map] = identifier[diffusion_map] [:,: identifier[n_components] ]
keyword[return] ( identifier[lambdas] , identifier[diffusion_map] ) | def eigen_decomposition(G, n_components=8, eigen_solver='auto', random_state=None, drop_first=True, largest=True, solver_kwds=None):
"""
Function to compute the eigendecomposition of a square matrix.
Parameters
----------
G : array_like or sparse matrix
The square matrix for which to compute the eigen-decomposition.
n_components : integer, optional
The number of eigenvectors to return
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
attempt to choose the best method for input data (default)
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type.
This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
Algebraic Multigrid solver (requires ``pyamg`` to be installed)
It can be faster on very large, sparse problems, but may also lead
to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
lambdas, diffusion_map : eigenvalues, eigenvectors
"""
n_nodes = G.shape[0]
if drop_first:
n_components = n_components + 1 # depends on [control=['if'], data=[]]
(eigen_solver, solver_kwds) = check_eigen_solver(eigen_solver, solver_kwds, size=n_nodes, nvec=n_components)
random_state = check_random_state(random_state)
# Convert G to best type for eigendecomposition
if sparse.issparse(G):
if G.getformat() is not 'csr':
G.tocsr() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
G = G.astype(np.float)
# Check for symmetry
is_symmetric = _is_symmetric(G)
# Try Eigen Methods:
if eigen_solver == 'arpack':
# This matches the internal initial state used by ARPACK
v0 = random_state.uniform(-1, 1, G.shape[0])
if is_symmetric:
if largest:
which = 'LM' # depends on [control=['if'], data=[]]
else:
which = 'SM'
(lambdas, diffusion_map) = eigsh(G, k=n_components, which=which, v0=v0, **solver_kwds or {}) # depends on [control=['if'], data=[]]
else:
if largest:
which = 'LR' # depends on [control=['if'], data=[]]
else:
which = 'SR'
(lambdas, diffusion_map) = eigs(G, k=n_components, which=which, **solver_kwds or {})
lambdas = np.real(lambdas)
diffusion_map = np.real(diffusion_map) # depends on [control=['if'], data=[]]
elif eigen_solver == 'amg':
# separate amg & lobpcg keywords:
if solver_kwds is not None:
amg_kwds = {}
lobpcg_kwds = solver_kwds.copy()
for kwd in AMG_KWDS:
if kwd in solver_kwds.keys():
amg_kwds[kwd] = solver_kwds[kwd]
del lobpcg_kwds[kwd] # depends on [control=['if'], data=['kwd']] # depends on [control=['for'], data=['kwd']] # depends on [control=['if'], data=['solver_kwds']]
else:
amg_kwds = None
lobpcg_kwds = None
if not is_symmetric:
raise ValueError('lobpcg requires symmetric matrices.') # depends on [control=['if'], data=[]]
if not sparse.issparse(G):
warnings.warn('AMG works better for sparse matrices') # depends on [control=['if'], data=[]]
# Use AMG to get a preconditioner and speed up the eigenvalue problem.
ml = smoothed_aggregation_solver(check_array(G, accept_sparse=['csr']), **amg_kwds or {})
M = ml.aspreconditioner()
n_find = min(n_nodes, 5 + 2 * n_components)
X = random_state.rand(n_nodes, n_find)
X[:, 0] = G.diagonal().ravel()
(lambdas, diffusion_map) = lobpcg(G, X, M=M, largest=largest, **lobpcg_kwds or {})
sort_order = np.argsort(lambdas)
if largest:
lambdas = lambdas[sort_order[::-1]]
diffusion_map = diffusion_map[:, sort_order[::-1]] # depends on [control=['if'], data=[]]
else:
lambdas = lambdas[sort_order]
diffusion_map = diffusion_map[:, sort_order]
lambdas = lambdas[:n_components]
diffusion_map = diffusion_map[:, :n_components] # depends on [control=['if'], data=[]]
elif eigen_solver == 'lobpcg':
if not is_symmetric:
raise ValueError('lobpcg requires symmetric matrices.') # depends on [control=['if'], data=[]]
n_find = min(n_nodes, 5 + 2 * n_components)
X = random_state.rand(n_nodes, n_find)
(lambdas, diffusion_map) = lobpcg(G, X, largest=largest, **solver_kwds or {})
sort_order = np.argsort(lambdas)
if largest:
lambdas = lambdas[sort_order[::-1]]
diffusion_map = diffusion_map[:, sort_order[::-1]] # depends on [control=['if'], data=[]]
else:
lambdas = lambdas[sort_order]
diffusion_map = diffusion_map[:, sort_order]
lambdas = lambdas[:n_components]
diffusion_map = diffusion_map[:, :n_components] # depends on [control=['if'], data=[]]
elif eigen_solver == 'dense':
if sparse.isspmatrix(G):
G = G.todense() # depends on [control=['if'], data=[]]
if is_symmetric:
(lambdas, diffusion_map) = eigh(G, **solver_kwds or {}) # depends on [control=['if'], data=[]]
else:
(lambdas, diffusion_map) = eig(G, **solver_kwds or {})
sort_index = np.argsort(lambdas)
lambdas = lambdas[sort_index]
diffusion_map[:, sort_index]
if largest: # eigh always returns eigenvalues in ascending order
lambdas = lambdas[::-1] # reverse order the e-values
diffusion_map = diffusion_map[:, ::-1] # reverse order the vectors # depends on [control=['if'], data=[]]
lambdas = lambdas[:n_components]
diffusion_map = diffusion_map[:, :n_components] # depends on [control=['if'], data=[]]
return (lambdas, diffusion_map) |
def bind(self, func: Callable[[Any], IO]) -> IO:
"""IO a -> (a -> IO b) -> IO b"""
g = self._value
return Get(lambda text: g(text).bind(func)) | def function[bind, parameter[self, func]]:
constant[IO a -> (a -> IO b) -> IO b]
variable[g] assign[=] name[self]._value
return[call[name[Get], parameter[<ast.Lambda object at 0x7da1b0bf0fa0>]]] | keyword[def] identifier[bind] ( identifier[self] , identifier[func] : identifier[Callable] [[ identifier[Any] ], identifier[IO] ])-> identifier[IO] :
literal[string]
identifier[g] = identifier[self] . identifier[_value]
keyword[return] identifier[Get] ( keyword[lambda] identifier[text] : identifier[g] ( identifier[text] ). identifier[bind] ( identifier[func] )) | def bind(self, func: Callable[[Any], IO]) -> IO:
"""IO a -> (a -> IO b) -> IO b"""
g = self._value
return Get(lambda text: g(text).bind(func)) |
def getFingerprintForExpression(self, body, sparsity=1.0):
"""Resolve an expression
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
Fingerprint
Raises:
CorticalioException: if the request was not successful
"""
return self._expressions.resolveExpression(self._retina, body, sparsity) | def function[getFingerprintForExpression, parameter[self, body, sparsity]]:
constant[Resolve an expression
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
Fingerprint
Raises:
CorticalioException: if the request was not successful
]
return[call[name[self]._expressions.resolveExpression, parameter[name[self]._retina, name[body], name[sparsity]]]] | keyword[def] identifier[getFingerprintForExpression] ( identifier[self] , identifier[body] , identifier[sparsity] = literal[int] ):
literal[string]
keyword[return] identifier[self] . identifier[_expressions] . identifier[resolveExpression] ( identifier[self] . identifier[_retina] , identifier[body] , identifier[sparsity] ) | def getFingerprintForExpression(self, body, sparsity=1.0):
"""Resolve an expression
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
Fingerprint
Raises:
CorticalioException: if the request was not successful
"""
return self._expressions.resolveExpression(self._retina, body, sparsity) |
def configure_cmake(self):
"""Create CMake instance and execute configure step
"""
cmake = CMake(self)
cmake.definitions["FLATBUFFERS_BUILD_TESTS"] = False
cmake.definitions["FLATBUFFERS_BUILD_SHAREDLIB"] = self.options.shared
cmake.definitions["FLATBUFFERS_BUILD_FLATLIB"] = not self.options.shared
cmake.configure()
return cmake | def function[configure_cmake, parameter[self]]:
constant[Create CMake instance and execute configure step
]
variable[cmake] assign[=] call[name[CMake], parameter[name[self]]]
call[name[cmake].definitions][constant[FLATBUFFERS_BUILD_TESTS]] assign[=] constant[False]
call[name[cmake].definitions][constant[FLATBUFFERS_BUILD_SHAREDLIB]] assign[=] name[self].options.shared
call[name[cmake].definitions][constant[FLATBUFFERS_BUILD_FLATLIB]] assign[=] <ast.UnaryOp object at 0x7da18fe91e40>
call[name[cmake].configure, parameter[]]
return[name[cmake]] | keyword[def] identifier[configure_cmake] ( identifier[self] ):
literal[string]
identifier[cmake] = identifier[CMake] ( identifier[self] )
identifier[cmake] . identifier[definitions] [ literal[string] ]= keyword[False]
identifier[cmake] . identifier[definitions] [ literal[string] ]= identifier[self] . identifier[options] . identifier[shared]
identifier[cmake] . identifier[definitions] [ literal[string] ]= keyword[not] identifier[self] . identifier[options] . identifier[shared]
identifier[cmake] . identifier[configure] ()
keyword[return] identifier[cmake] | def configure_cmake(self):
"""Create CMake instance and execute configure step
"""
cmake = CMake(self)
cmake.definitions['FLATBUFFERS_BUILD_TESTS'] = False
cmake.definitions['FLATBUFFERS_BUILD_SHAREDLIB'] = self.options.shared
cmake.definitions['FLATBUFFERS_BUILD_FLATLIB'] = not self.options.shared
cmake.configure()
return cmake |
def parse_docstring(thing):
"""Parse a Python docstring, or the docstring found on `thing`.
:return: a ``(title, body)`` tuple. As per docstring convention, title is
the docstring's first paragraph and body is the rest.
"""
assert not isinstance(thing, bytes)
doc = cleandoc(thing) if isinstance(thing, str) else getdoc(thing)
doc = empty if doc is None else doc
assert not isinstance(doc, bytes)
# Break the docstring into two parts: title and body.
parts = docstring_split(doc)
if len(parts) == 2:
title, body = parts[0], parts[1]
else:
title, body = parts[0], empty
# Remove line breaks from the title line.
title = remove_line_breaks(title)
# Normalise line-breaks on newline.
body = body.replace("\r\n", newline).replace("\r", newline)
return docstring(title, body) | def function[parse_docstring, parameter[thing]]:
constant[Parse a Python docstring, or the docstring found on `thing`.
:return: a ``(title, body)`` tuple. As per docstring convention, title is
the docstring's first paragraph and body is the rest.
]
assert[<ast.UnaryOp object at 0x7da204623d30>]
variable[doc] assign[=] <ast.IfExp object at 0x7da204622ef0>
variable[doc] assign[=] <ast.IfExp object at 0x7da204623910>
assert[<ast.UnaryOp object at 0x7da204621810>]
variable[parts] assign[=] call[name[docstring_split], parameter[name[doc]]]
if compare[call[name[len], parameter[name[parts]]] equal[==] constant[2]] begin[:]
<ast.Tuple object at 0x7da2046220b0> assign[=] tuple[[<ast.Subscript object at 0x7da204620bb0>, <ast.Subscript object at 0x7da204621f30>]]
variable[title] assign[=] call[name[remove_line_breaks], parameter[name[title]]]
variable[body] assign[=] call[call[name[body].replace, parameter[constant[
], name[newline]]].replace, parameter[constant[
], name[newline]]]
return[call[name[docstring], parameter[name[title], name[body]]]] | keyword[def] identifier[parse_docstring] ( identifier[thing] ):
literal[string]
keyword[assert] keyword[not] identifier[isinstance] ( identifier[thing] , identifier[bytes] )
identifier[doc] = identifier[cleandoc] ( identifier[thing] ) keyword[if] identifier[isinstance] ( identifier[thing] , identifier[str] ) keyword[else] identifier[getdoc] ( identifier[thing] )
identifier[doc] = identifier[empty] keyword[if] identifier[doc] keyword[is] keyword[None] keyword[else] identifier[doc]
keyword[assert] keyword[not] identifier[isinstance] ( identifier[doc] , identifier[bytes] )
identifier[parts] = identifier[docstring_split] ( identifier[doc] )
keyword[if] identifier[len] ( identifier[parts] )== literal[int] :
identifier[title] , identifier[body] = identifier[parts] [ literal[int] ], identifier[parts] [ literal[int] ]
keyword[else] :
identifier[title] , identifier[body] = identifier[parts] [ literal[int] ], identifier[empty]
identifier[title] = identifier[remove_line_breaks] ( identifier[title] )
identifier[body] = identifier[body] . identifier[replace] ( literal[string] , identifier[newline] ). identifier[replace] ( literal[string] , identifier[newline] )
keyword[return] identifier[docstring] ( identifier[title] , identifier[body] ) | def parse_docstring(thing):
"""Parse a Python docstring, or the docstring found on `thing`.
:return: a ``(title, body)`` tuple. As per docstring convention, title is
the docstring's first paragraph and body is the rest.
"""
assert not isinstance(thing, bytes)
doc = cleandoc(thing) if isinstance(thing, str) else getdoc(thing)
doc = empty if doc is None else doc
assert not isinstance(doc, bytes)
# Break the docstring into two parts: title and body.
parts = docstring_split(doc)
if len(parts) == 2:
(title, body) = (parts[0], parts[1]) # depends on [control=['if'], data=[]]
else:
(title, body) = (parts[0], empty)
# Remove line breaks from the title line.
title = remove_line_breaks(title)
# Normalise line-breaks on newline.
body = body.replace('\r\n', newline).replace('\r', newline)
return docstring(title, body) |
def _apply(self, f, grouper=None, *args, **kwargs):
"""
Dispatch to _upsample; we are stripping all of the _upsample kwargs and
performing the original function call on the grouped object.
"""
def func(x):
x = self._shallow_copy(x, groupby=self.groupby)
if isinstance(f, str):
return getattr(x, f)(**kwargs)
return x.apply(f, *args, **kwargs)
result = self._groupby.apply(func)
return self._wrap_result(result) | def function[_apply, parameter[self, f, grouper]]:
constant[
Dispatch to _upsample; we are stripping all of the _upsample kwargs and
performing the original function call on the grouped object.
]
def function[func, parameter[x]]:
variable[x] assign[=] call[name[self]._shallow_copy, parameter[name[x]]]
if call[name[isinstance], parameter[name[f], name[str]]] begin[:]
return[call[call[name[getattr], parameter[name[x], name[f]]], parameter[]]]
return[call[name[x].apply, parameter[name[f], <ast.Starred object at 0x7da18f00c2b0>]]]
variable[result] assign[=] call[name[self]._groupby.apply, parameter[name[func]]]
return[call[name[self]._wrap_result, parameter[name[result]]]] | keyword[def] identifier[_apply] ( identifier[self] , identifier[f] , identifier[grouper] = keyword[None] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[func] ( identifier[x] ):
identifier[x] = identifier[self] . identifier[_shallow_copy] ( identifier[x] , identifier[groupby] = identifier[self] . identifier[groupby] )
keyword[if] identifier[isinstance] ( identifier[f] , identifier[str] ):
keyword[return] identifier[getattr] ( identifier[x] , identifier[f] )(** identifier[kwargs] )
keyword[return] identifier[x] . identifier[apply] ( identifier[f] ,* identifier[args] ,** identifier[kwargs] )
identifier[result] = identifier[self] . identifier[_groupby] . identifier[apply] ( identifier[func] )
keyword[return] identifier[self] . identifier[_wrap_result] ( identifier[result] ) | def _apply(self, f, grouper=None, *args, **kwargs):
"""
Dispatch to _upsample; we are stripping all of the _upsample kwargs and
performing the original function call on the grouped object.
"""
def func(x):
x = self._shallow_copy(x, groupby=self.groupby)
if isinstance(f, str):
return getattr(x, f)(**kwargs) # depends on [control=['if'], data=[]]
return x.apply(f, *args, **kwargs)
result = self._groupby.apply(func)
return self._wrap_result(result) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.