text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def data(value):
"""list or KeyedList of ``Data`` : Data definitions
This defines the data being visualized. See the :class:`Data` class
for details.
"""
for i, entry in enumerate(value):
_assert_is_type('data[{0}]'.format(i), entry, Data) | [
"def",
"data",
"(",
"value",
")",
":",
"for",
"i",
",",
"entry",
"in",
"enumerate",
"(",
"value",
")",
":",
"_assert_is_type",
"(",
"'data[{0}]'",
".",
"format",
"(",
"i",
")",
",",
"entry",
",",
"Data",
")"
] | 35.75 | 18 |
def value(self):
""" Returns the node's value. """
if self.is_multi_select():
return [opt.value()
for opt in self.xpath(".//option")
if opt["selected"]]
else:
return self._invoke("value") | [
"def",
"value",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_multi_select",
"(",
")",
":",
"return",
"[",
"opt",
".",
"value",
"(",
")",
"for",
"opt",
"in",
"self",
".",
"xpath",
"(",
"\".//option\"",
")",
"if",
"opt",
"[",
"\"selected\"",
"]",
"]",
"else",
":",
"return",
"self",
".",
"_invoke",
"(",
"\"value\"",
")"
] | 29 | 12.625 |
def btc_is_multisig_segwit(privkey_info):
"""
Does the given private key info represent
a multisig bundle?
For Bitcoin, this is true for multisig p2sh (not p2sh-p2wsh)
"""
try:
jsonschema.validate(privkey_info, PRIVKEY_MULTISIG_SCHEMA)
if len(privkey_info['private_keys']) == 1:
return False
return privkey_info.get('segwit', False)
except ValidationError as e:
return False | [
"def",
"btc_is_multisig_segwit",
"(",
"privkey_info",
")",
":",
"try",
":",
"jsonschema",
".",
"validate",
"(",
"privkey_info",
",",
"PRIVKEY_MULTISIG_SCHEMA",
")",
"if",
"len",
"(",
"privkey_info",
"[",
"'private_keys'",
"]",
")",
"==",
"1",
":",
"return",
"False",
"return",
"privkey_info",
".",
"get",
"(",
"'segwit'",
",",
"False",
")",
"except",
"ValidationError",
"as",
"e",
":",
"return",
"False"
] | 28.933333 | 16.533333 |
def pid(self):
"""The pid of the process associated to the scheduler."""
try:
return self._pid
except AttributeError:
self._pid = os.getpid()
return self._pid | [
"def",
"pid",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_pid",
"except",
"AttributeError",
":",
"self",
".",
"_pid",
"=",
"os",
".",
"getpid",
"(",
")",
"return",
"self",
".",
"_pid"
] | 30.285714 | 13.285714 |
def get(self, name=None):
"""Get initial yield value, or result of send(name) if name given."""
if name is None:
return self.init_value
elif not self.support_name:
msg = "generator does not support get-by-name: function {!r}"
raise TypeError(msg.format(self.function))
try:
value = self.generator.send(name)
except StopIteration:
msg = "generator didn't yield: function {!r}"
raise RuntimeError(msg.format(self.function))
return value | [
"def",
"get",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"return",
"self",
".",
"init_value",
"elif",
"not",
"self",
".",
"support_name",
":",
"msg",
"=",
"\"generator does not support get-by-name: function {!r}\"",
"raise",
"TypeError",
"(",
"msg",
".",
"format",
"(",
"self",
".",
"function",
")",
")",
"try",
":",
"value",
"=",
"self",
".",
"generator",
".",
"send",
"(",
"name",
")",
"except",
"StopIteration",
":",
"msg",
"=",
"\"generator didn't yield: function {!r}\"",
"raise",
"RuntimeError",
"(",
"msg",
".",
"format",
"(",
"self",
".",
"function",
")",
")",
"return",
"value"
] | 41.692308 | 14.384615 |
def click(self, x, y):
"""
x, y can be float(percent) or int
"""
if isinstance(x, float) or isinstance(y, float):
x, y = self._percent2pos(x, y)
return self.tap(x, y) | [
"def",
"click",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"float",
")",
"or",
"isinstance",
"(",
"y",
",",
"float",
")",
":",
"x",
",",
"y",
"=",
"self",
".",
"_percent2pos",
"(",
"x",
",",
"y",
")",
"return",
"self",
".",
"tap",
"(",
"x",
",",
"y",
")"
] | 30.285714 | 6.857143 |
def main(output):
"""
Generate a c7n-org subscriptions config file
"""
client = SubscriptionClient(Session().get_credentials())
subs = [sub.serialize(True) for sub in client.subscriptions.list()]
results = []
for sub in subs:
sub_info = {
'subscription_id': sub['subscriptionId'],
'name': sub['displayName']
}
results.append(sub_info)
print(
yaml.safe_dump(
{'subscriptions': results},
default_flow_style=False),
file=output) | [
"def",
"main",
"(",
"output",
")",
":",
"client",
"=",
"SubscriptionClient",
"(",
"Session",
"(",
")",
".",
"get_credentials",
"(",
")",
")",
"subs",
"=",
"[",
"sub",
".",
"serialize",
"(",
"True",
")",
"for",
"sub",
"in",
"client",
".",
"subscriptions",
".",
"list",
"(",
")",
"]",
"results",
"=",
"[",
"]",
"for",
"sub",
"in",
"subs",
":",
"sub_info",
"=",
"{",
"'subscription_id'",
":",
"sub",
"[",
"'subscriptionId'",
"]",
",",
"'name'",
":",
"sub",
"[",
"'displayName'",
"]",
"}",
"results",
".",
"append",
"(",
"sub_info",
")",
"print",
"(",
"yaml",
".",
"safe_dump",
"(",
"{",
"'subscriptions'",
":",
"results",
"}",
",",
"default_flow_style",
"=",
"False",
")",
",",
"file",
"=",
"output",
")"
] | 26.4 | 17.5 |
def scard(incard, cell):
"""
Set the cardinality of a SPICE cell of any data type.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/scard_c.html
:param incard: Cardinality of (number of elements in) the cell.
:type incard: int
:param cell: The cell.
:type cell: spiceypy.utils.support_types.SpiceCell
:return: The updated Cell.
:rtype: spiceypy.utils.support_types.SpiceCell
"""
assert isinstance(cell, stypes.SpiceCell)
incard = ctypes.c_int(incard)
libspice.scard_c(incard, ctypes.byref(cell))
return cell | [
"def",
"scard",
"(",
"incard",
",",
"cell",
")",
":",
"assert",
"isinstance",
"(",
"cell",
",",
"stypes",
".",
"SpiceCell",
")",
"incard",
"=",
"ctypes",
".",
"c_int",
"(",
"incard",
")",
"libspice",
".",
"scard_c",
"(",
"incard",
",",
"ctypes",
".",
"byref",
"(",
"cell",
")",
")",
"return",
"cell"
] | 32.705882 | 16.705882 |
def parse(url):
"""Parses an email URL."""
conf = {}
url = urlparse.urlparse(url)
qs = urlparse.parse_qs(url.query)
# Remove query strings
path = url.path[1:]
path = path.split('?', 2)[0]
# Update with environment configuration
conf.update({
'EMAIL_FILE_PATH': path,
'EMAIL_HOST_USER': unquote(url.username),
'EMAIL_HOST_PASSWORD': unquote(url.password),
'EMAIL_HOST': url.hostname,
'EMAIL_PORT': url.port,
'EMAIL_USE_SSL': False,
'EMAIL_USE_TLS': False,
})
if url.scheme in SCHEMES:
conf['EMAIL_BACKEND'] = SCHEMES[url.scheme]
# Set defaults for `smtp`
if url.scheme == 'smtp':
if not conf['EMAIL_HOST']:
conf['EMAIL_HOST'] = 'localhost'
if not conf['EMAIL_PORT']:
conf['EMAIL_PORT'] = 25
# Set defaults for `smtps`
if url.scheme == 'smtps':
warnings.warn(
"`smpts` scheme will be deprecated in a future version,"
" use `submission` instead",
UserWarning,
)
conf['EMAIL_USE_TLS'] = True
# Set defaults for `submission`/`submit`
if url.scheme in ('submission', 'submit'):
conf['EMAIL_USE_TLS'] = True
if not conf['EMAIL_PORT']:
conf['EMAIL_PORT'] = 587
# Query args overwrite defaults
if 'ssl' in qs and qs['ssl']:
if qs['ssl'][0] in TRUTHY:
conf['EMAIL_USE_SSL'] = True
conf['EMAIL_USE_TLS'] = False
elif 'tls' in qs and qs['tls']:
if qs['tls'][0] in TRUTHY:
conf['EMAIL_USE_SSL'] = False
conf['EMAIL_USE_TLS'] = True
# From addresses
if '_server_email' in qs:
conf['SERVER_EMAIL'] = qs['_server_email'][0]
if '_default_from_email' in qs:
conf['DEFAULT_FROM_EMAIL'] = qs['_default_from_email'][0]
return conf | [
"def",
"parse",
"(",
"url",
")",
":",
"conf",
"=",
"{",
"}",
"url",
"=",
"urlparse",
".",
"urlparse",
"(",
"url",
")",
"qs",
"=",
"urlparse",
".",
"parse_qs",
"(",
"url",
".",
"query",
")",
"# Remove query strings",
"path",
"=",
"url",
".",
"path",
"[",
"1",
":",
"]",
"path",
"=",
"path",
".",
"split",
"(",
"'?'",
",",
"2",
")",
"[",
"0",
"]",
"# Update with environment configuration",
"conf",
".",
"update",
"(",
"{",
"'EMAIL_FILE_PATH'",
":",
"path",
",",
"'EMAIL_HOST_USER'",
":",
"unquote",
"(",
"url",
".",
"username",
")",
",",
"'EMAIL_HOST_PASSWORD'",
":",
"unquote",
"(",
"url",
".",
"password",
")",
",",
"'EMAIL_HOST'",
":",
"url",
".",
"hostname",
",",
"'EMAIL_PORT'",
":",
"url",
".",
"port",
",",
"'EMAIL_USE_SSL'",
":",
"False",
",",
"'EMAIL_USE_TLS'",
":",
"False",
",",
"}",
")",
"if",
"url",
".",
"scheme",
"in",
"SCHEMES",
":",
"conf",
"[",
"'EMAIL_BACKEND'",
"]",
"=",
"SCHEMES",
"[",
"url",
".",
"scheme",
"]",
"# Set defaults for `smtp`",
"if",
"url",
".",
"scheme",
"==",
"'smtp'",
":",
"if",
"not",
"conf",
"[",
"'EMAIL_HOST'",
"]",
":",
"conf",
"[",
"'EMAIL_HOST'",
"]",
"=",
"'localhost'",
"if",
"not",
"conf",
"[",
"'EMAIL_PORT'",
"]",
":",
"conf",
"[",
"'EMAIL_PORT'",
"]",
"=",
"25",
"# Set defaults for `smtps`",
"if",
"url",
".",
"scheme",
"==",
"'smtps'",
":",
"warnings",
".",
"warn",
"(",
"\"`smpts` scheme will be deprecated in a future version,\"",
"\" use `submission` instead\"",
",",
"UserWarning",
",",
")",
"conf",
"[",
"'EMAIL_USE_TLS'",
"]",
"=",
"True",
"# Set defaults for `submission`/`submit`",
"if",
"url",
".",
"scheme",
"in",
"(",
"'submission'",
",",
"'submit'",
")",
":",
"conf",
"[",
"'EMAIL_USE_TLS'",
"]",
"=",
"True",
"if",
"not",
"conf",
"[",
"'EMAIL_PORT'",
"]",
":",
"conf",
"[",
"'EMAIL_PORT'",
"]",
"=",
"587",
"# Query args overwrite defaults",
"if",
"'ssl'",
"in",
"qs",
"and",
"qs",
"[",
"'ssl'",
"]",
":",
"if",
"qs",
"[",
"'ssl'",
"]",
"[",
"0",
"]",
"in",
"TRUTHY",
":",
"conf",
"[",
"'EMAIL_USE_SSL'",
"]",
"=",
"True",
"conf",
"[",
"'EMAIL_USE_TLS'",
"]",
"=",
"False",
"elif",
"'tls'",
"in",
"qs",
"and",
"qs",
"[",
"'tls'",
"]",
":",
"if",
"qs",
"[",
"'tls'",
"]",
"[",
"0",
"]",
"in",
"TRUTHY",
":",
"conf",
"[",
"'EMAIL_USE_SSL'",
"]",
"=",
"False",
"conf",
"[",
"'EMAIL_USE_TLS'",
"]",
"=",
"True",
"# From addresses",
"if",
"'_server_email'",
"in",
"qs",
":",
"conf",
"[",
"'SERVER_EMAIL'",
"]",
"=",
"qs",
"[",
"'_server_email'",
"]",
"[",
"0",
"]",
"if",
"'_default_from_email'",
"in",
"qs",
":",
"conf",
"[",
"'DEFAULT_FROM_EMAIL'",
"]",
"=",
"qs",
"[",
"'_default_from_email'",
"]",
"[",
"0",
"]",
"return",
"conf"
] | 28.061538 | 15.415385 |
def learn(self, grad_arr, fix_opt_flag=False):
'''
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients.
'''
deconvolution_layer_list = self.__deconvolution_layer_list[::-1]
for i in range(len(deconvolution_layer_list)):
try:
grad_arr = deconvolution_layer_list[i].back_propagate(grad_arr)
except:
self.__logger.debug("Error raised in Convolution layer " + str(i + 1))
raise
if fix_opt_flag is False:
self.__optimize(self.__learning_rate, 1)
return grad_arr | [
"def",
"learn",
"(",
"self",
",",
"grad_arr",
",",
"fix_opt_flag",
"=",
"False",
")",
":",
"deconvolution_layer_list",
"=",
"self",
".",
"__deconvolution_layer_list",
"[",
":",
":",
"-",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"deconvolution_layer_list",
")",
")",
":",
"try",
":",
"grad_arr",
"=",
"deconvolution_layer_list",
"[",
"i",
"]",
".",
"back_propagate",
"(",
"grad_arr",
")",
"except",
":",
"self",
".",
"__logger",
".",
"debug",
"(",
"\"Error raised in Convolution layer \"",
"+",
"str",
"(",
"i",
"+",
"1",
")",
")",
"raise",
"if",
"fix_opt_flag",
"is",
"False",
":",
"self",
".",
"__optimize",
"(",
"self",
".",
"__learning_rate",
",",
"1",
")",
"return",
"grad_arr"
] | 36.73913 | 25.347826 |
def add_file_handler(log_file_level, log_filename, str_format=None,
date_format=None, formatter=None, log_filter=None):
"""
:param log_filename:
:param log_file_level str of the log level to use on this file
:param str_format: str of the logging format
:param date_format: str of the date format
:param log_restart: bool if True the log file will be deleted first
:param log_history: bool if True will save another log file in a folder
called history with the datetime
:param formatter: logging.Format instance to use
:param log_filter: logging.filter instance to add to handler
:return: None
"""
formatter = formatter or SeabornFormatter(str_format=str_format,
date_format=date_format)
mkdir_for_file(log_filename)
handler = logging.FileHandler(log_filename)
add_handler(log_file_level, handler, formatter, log_filter=log_filter) | [
"def",
"add_file_handler",
"(",
"log_file_level",
",",
"log_filename",
",",
"str_format",
"=",
"None",
",",
"date_format",
"=",
"None",
",",
"formatter",
"=",
"None",
",",
"log_filter",
"=",
"None",
")",
":",
"formatter",
"=",
"formatter",
"or",
"SeabornFormatter",
"(",
"str_format",
"=",
"str_format",
",",
"date_format",
"=",
"date_format",
")",
"mkdir_for_file",
"(",
"log_filename",
")",
"handler",
"=",
"logging",
".",
"FileHandler",
"(",
"log_filename",
")",
"add_handler",
"(",
"log_file_level",
",",
"handler",
",",
"formatter",
",",
"log_filter",
"=",
"log_filter",
")"
] | 50.6 | 21.1 |
def get_availabilities_for_duration(duration, availabilities):
"""
Helper function to return the windows of availability of the given duration, when provided a set of 30 minute windows.
"""
duration_availabilities = []
start_time = '10:00'
while start_time != '17:00':
if start_time in availabilities:
if duration == 30:
duration_availabilities.append(start_time)
elif increment_time_by_thirty_mins(start_time) in availabilities:
duration_availabilities.append(start_time)
start_time = increment_time_by_thirty_mins(start_time)
return duration_availabilities | [
"def",
"get_availabilities_for_duration",
"(",
"duration",
",",
"availabilities",
")",
":",
"duration_availabilities",
"=",
"[",
"]",
"start_time",
"=",
"'10:00'",
"while",
"start_time",
"!=",
"'17:00'",
":",
"if",
"start_time",
"in",
"availabilities",
":",
"if",
"duration",
"==",
"30",
":",
"duration_availabilities",
".",
"append",
"(",
"start_time",
")",
"elif",
"increment_time_by_thirty_mins",
"(",
"start_time",
")",
"in",
"availabilities",
":",
"duration_availabilities",
".",
"append",
"(",
"start_time",
")",
"start_time",
"=",
"increment_time_by_thirty_mins",
"(",
"start_time",
")",
"return",
"duration_availabilities"
] | 40.3125 | 20.4375 |
def set_code(self, key, code):
"""Sets code of cell key, marks grid as changed"""
old_code = self.grid.code_array(key)
try:
old_code = unicode(old_code, encoding="utf-8")
except TypeError:
pass
if code == old_code:
return
if not (old_code is None and not code) and code != old_code:
# Mark content as changed
post_command_event(self.main_window, self.ContentChangedMsg)
# Set cell code
self.grid.code_array.__setitem__(key, code) | [
"def",
"set_code",
"(",
"self",
",",
"key",
",",
"code",
")",
":",
"old_code",
"=",
"self",
".",
"grid",
".",
"code_array",
"(",
"key",
")",
"try",
":",
"old_code",
"=",
"unicode",
"(",
"old_code",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"except",
"TypeError",
":",
"pass",
"if",
"code",
"==",
"old_code",
":",
"return",
"if",
"not",
"(",
"old_code",
"is",
"None",
"and",
"not",
"code",
")",
"and",
"code",
"!=",
"old_code",
":",
"# Mark content as changed",
"post_command_event",
"(",
"self",
".",
"main_window",
",",
"self",
".",
"ContentChangedMsg",
")",
"# Set cell code",
"self",
".",
"grid",
".",
"code_array",
".",
"__setitem__",
"(",
"key",
",",
"code",
")"
] | 27 | 23.2 |
def rsl_dump_stream_next(self, output_format):
"""Dump the next reading from the output stream."""
timestamp = 0
stream_id = 0
value = 0
reading_id = 0
error = Error.NO_ERROR
reading = self.sensor_log.dump_next()
if reading is not None:
timestamp = reading.raw_time
stream_id = reading.stream
value = reading.value
reading_id = reading.reading_id
else:
error = pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.NO_MORE_READINGS)
if output_format == 0:
return [struct.pack("<LLL", error, timestamp, value)]
elif output_format != 1:
raise ValueError("Output format other than 1 not yet supported")
return [struct.pack("<LLLLH2x", error, timestamp, value, reading_id, stream_id)] | [
"def",
"rsl_dump_stream_next",
"(",
"self",
",",
"output_format",
")",
":",
"timestamp",
"=",
"0",
"stream_id",
"=",
"0",
"value",
"=",
"0",
"reading_id",
"=",
"0",
"error",
"=",
"Error",
".",
"NO_ERROR",
"reading",
"=",
"self",
".",
"sensor_log",
".",
"dump_next",
"(",
")",
"if",
"reading",
"is",
"not",
"None",
":",
"timestamp",
"=",
"reading",
".",
"raw_time",
"stream_id",
"=",
"reading",
".",
"stream",
"value",
"=",
"reading",
".",
"value",
"reading_id",
"=",
"reading",
".",
"reading_id",
"else",
":",
"error",
"=",
"pack_error",
"(",
"ControllerSubsystem",
".",
"SENSOR_LOG",
",",
"SensorLogError",
".",
"NO_MORE_READINGS",
")",
"if",
"output_format",
"==",
"0",
":",
"return",
"[",
"struct",
".",
"pack",
"(",
"\"<LLL\"",
",",
"error",
",",
"timestamp",
",",
"value",
")",
"]",
"elif",
"output_format",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Output format other than 1 not yet supported\"",
")",
"return",
"[",
"struct",
".",
"pack",
"(",
"\"<LLLLH2x\"",
",",
"error",
",",
"timestamp",
",",
"value",
",",
"reading_id",
",",
"stream_id",
")",
"]"
] | 35.208333 | 20.416667 |
def jhk_to_rmag(jmag,hmag,kmag):
'''Converts given J, H, Ks mags to an R magnitude value.
Parameters
----------
jmag,hmag,kmag : float
2MASS J, H, Ks mags of the object.
Returns
-------
float
The converted R band magnitude.
'''
return convert_constants(jmag,hmag,kmag,
RJHK,
RJH, RJK, RHK,
RJ, RH, RK) | [
"def",
"jhk_to_rmag",
"(",
"jmag",
",",
"hmag",
",",
"kmag",
")",
":",
"return",
"convert_constants",
"(",
"jmag",
",",
"hmag",
",",
"kmag",
",",
"RJHK",
",",
"RJH",
",",
"RJK",
",",
"RHK",
",",
"RJ",
",",
"RH",
",",
"RK",
")"
] | 20.285714 | 22.47619 |
def close(self):
"""
Close this handle. If not called explicitely, will be called by
destructor.
This method cancels any in-flight transfer when it is called. As
cancellation is not immediate, this method needs to let libusb handle
events until transfers are actually cancelled.
In multi-threaded programs, this can lead to stalls. To avoid this,
do not close nor let GC collect a USBDeviceHandle which has in-flight
transfers.
"""
handle = self.__handle
if handle is None:
return
# Build a strong set from weak self.__transfer_set so we can doom
# and close all contained transfers.
# Because of backward compatibility, self.__transfer_set might be a
# wrapper around WeakKeyDictionary. As it might be modified by gc,
# we must pop until there is not key left instead of iterating over
# it.
weak_transfer_set = self.__transfer_set
transfer_set = self.__set()
while True:
try:
transfer = weak_transfer_set.pop()
except self.__KeyError:
break
transfer_set.add(transfer)
transfer.doom()
inflight = self.__inflight
for transfer in inflight:
try:
transfer.cancel()
except (self.__USBErrorNotFound, self.__USBErrorNoDevice):
pass
while inflight:
try:
self.__context.handleEvents()
except self.__USBErrorInterrupted:
pass
for transfer in transfer_set:
transfer.close()
self.__libusb_close(handle)
self.__handle = None | [
"def",
"close",
"(",
"self",
")",
":",
"handle",
"=",
"self",
".",
"__handle",
"if",
"handle",
"is",
"None",
":",
"return",
"# Build a strong set from weak self.__transfer_set so we can doom",
"# and close all contained transfers.",
"# Because of backward compatibility, self.__transfer_set might be a",
"# wrapper around WeakKeyDictionary. As it might be modified by gc,",
"# we must pop until there is not key left instead of iterating over",
"# it.",
"weak_transfer_set",
"=",
"self",
".",
"__transfer_set",
"transfer_set",
"=",
"self",
".",
"__set",
"(",
")",
"while",
"True",
":",
"try",
":",
"transfer",
"=",
"weak_transfer_set",
".",
"pop",
"(",
")",
"except",
"self",
".",
"__KeyError",
":",
"break",
"transfer_set",
".",
"add",
"(",
"transfer",
")",
"transfer",
".",
"doom",
"(",
")",
"inflight",
"=",
"self",
".",
"__inflight",
"for",
"transfer",
"in",
"inflight",
":",
"try",
":",
"transfer",
".",
"cancel",
"(",
")",
"except",
"(",
"self",
".",
"__USBErrorNotFound",
",",
"self",
".",
"__USBErrorNoDevice",
")",
":",
"pass",
"while",
"inflight",
":",
"try",
":",
"self",
".",
"__context",
".",
"handleEvents",
"(",
")",
"except",
"self",
".",
"__USBErrorInterrupted",
":",
"pass",
"for",
"transfer",
"in",
"transfer_set",
":",
"transfer",
".",
"close",
"(",
")",
"self",
".",
"__libusb_close",
"(",
"handle",
")",
"self",
".",
"__handle",
"=",
"None"
] | 37.8 | 18.022222 |
def _move_to_desired_location(self):
"""Animate movement to desired location on map."""
self._next_update = 100000
x_start = self._convert_longitude(self._longitude)
y_start = self._convert_latitude(self._latitude)
x_end = self._convert_longitude(self._desired_longitude)
y_end = self._convert_latitude(self._desired_latitude)
if sqrt((x_end - x_start) ** 2 + (y_end - y_start) ** 2) > _START_SIZE // 4:
self._zoom_map(True)
elif self._zoom != self._desired_zoom:
self._zoom_map(self._desired_zoom < self._zoom)
if self._longitude != self._desired_longitude:
self._next_update = 1
if self._desired_longitude < self._longitude:
self._longitude = max(self._longitude - 360 / 2 ** self._zoom / self._size * 2,
self._desired_longitude)
else:
self._longitude = min(self._longitude + 360 / 2 ** self._zoom / self._size * 2,
self._desired_longitude)
if self._latitude != self._desired_latitude:
self._next_update = 1
if self._desired_latitude < self._latitude:
self._latitude = max(self._inc_lat(self._latitude, 2), self._desired_latitude)
else:
self._latitude = min(self._inc_lat(self._latitude, -2), self._desired_latitude)
if self._next_update == 1:
self._updated.set() | [
"def",
"_move_to_desired_location",
"(",
"self",
")",
":",
"self",
".",
"_next_update",
"=",
"100000",
"x_start",
"=",
"self",
".",
"_convert_longitude",
"(",
"self",
".",
"_longitude",
")",
"y_start",
"=",
"self",
".",
"_convert_latitude",
"(",
"self",
".",
"_latitude",
")",
"x_end",
"=",
"self",
".",
"_convert_longitude",
"(",
"self",
".",
"_desired_longitude",
")",
"y_end",
"=",
"self",
".",
"_convert_latitude",
"(",
"self",
".",
"_desired_latitude",
")",
"if",
"sqrt",
"(",
"(",
"x_end",
"-",
"x_start",
")",
"**",
"2",
"+",
"(",
"y_end",
"-",
"y_start",
")",
"**",
"2",
")",
">",
"_START_SIZE",
"//",
"4",
":",
"self",
".",
"_zoom_map",
"(",
"True",
")",
"elif",
"self",
".",
"_zoom",
"!=",
"self",
".",
"_desired_zoom",
":",
"self",
".",
"_zoom_map",
"(",
"self",
".",
"_desired_zoom",
"<",
"self",
".",
"_zoom",
")",
"if",
"self",
".",
"_longitude",
"!=",
"self",
".",
"_desired_longitude",
":",
"self",
".",
"_next_update",
"=",
"1",
"if",
"self",
".",
"_desired_longitude",
"<",
"self",
".",
"_longitude",
":",
"self",
".",
"_longitude",
"=",
"max",
"(",
"self",
".",
"_longitude",
"-",
"360",
"/",
"2",
"**",
"self",
".",
"_zoom",
"/",
"self",
".",
"_size",
"*",
"2",
",",
"self",
".",
"_desired_longitude",
")",
"else",
":",
"self",
".",
"_longitude",
"=",
"min",
"(",
"self",
".",
"_longitude",
"+",
"360",
"/",
"2",
"**",
"self",
".",
"_zoom",
"/",
"self",
".",
"_size",
"*",
"2",
",",
"self",
".",
"_desired_longitude",
")",
"if",
"self",
".",
"_latitude",
"!=",
"self",
".",
"_desired_latitude",
":",
"self",
".",
"_next_update",
"=",
"1",
"if",
"self",
".",
"_desired_latitude",
"<",
"self",
".",
"_latitude",
":",
"self",
".",
"_latitude",
"=",
"max",
"(",
"self",
".",
"_inc_lat",
"(",
"self",
".",
"_latitude",
",",
"2",
")",
",",
"self",
".",
"_desired_latitude",
")",
"else",
":",
"self",
".",
"_latitude",
"=",
"min",
"(",
"self",
".",
"_inc_lat",
"(",
"self",
".",
"_latitude",
",",
"-",
"2",
")",
",",
"self",
".",
"_desired_latitude",
")",
"if",
"self",
".",
"_next_update",
"==",
"1",
":",
"self",
".",
"_updated",
".",
"set",
"(",
")"
] | 54.62963 | 20.851852 |
def first_or_fail(self, columns=None):
"""
Execute the query and get the first result or raise an exception.
:type columns: list
:raises: ModelNotFound
"""
model = self.first(columns)
if model is not None:
return model
raise ModelNotFound(self._parent.__class__) | [
"def",
"first_or_fail",
"(",
"self",
",",
"columns",
"=",
"None",
")",
":",
"model",
"=",
"self",
".",
"first",
"(",
"columns",
")",
"if",
"model",
"is",
"not",
"None",
":",
"return",
"model",
"raise",
"ModelNotFound",
"(",
"self",
".",
"_parent",
".",
"__class__",
")"
] | 25.307692 | 17 |
def buy(self, price, pause=0):
""" Attempts to purchase a main shop item, returns result
Uses the item's stock id and brr to navigate to the haggle page. Auotmatically downloads
the OCR image from the haggle page and attempts to crack it. Submits the haggle form with
the given price and returns if the item was successfully bought or not.
Parameters:
price (str) -- The price to buy the item for
pause (int) -- The time in seconds to pause before submitting the haggle form
Returns
bool - True if successful, false otherwise
Raises:
failedOCR
"""
pg = self.usr.getPage("http://www.neopets.com/haggle.phtml?obj_info_id=%s&stock_id=%s&brr=%s" % (self.id, self.stockid, self.brr))
form = pg.form(name="haggleform")
form['x'], form['y'] = self.crackOCR(StringIO.StringIO(self.usr.getPage("http://www.neopets.com" + form.image).content))
form['current_offer'] = price
if pause != 0:
time.sleep(pause)
pg = form.submit()
if "I accept" in pg.content:
return True
elif "You must select the correct pet" in pg.content:
logging.getLogger("neolib.item").exception("Failed to crack OCR")
raise failedOCR
else:
return False | [
"def",
"buy",
"(",
"self",
",",
"price",
",",
"pause",
"=",
"0",
")",
":",
"pg",
"=",
"self",
".",
"usr",
".",
"getPage",
"(",
"\"http://www.neopets.com/haggle.phtml?obj_info_id=%s&stock_id=%s&brr=%s\"",
"%",
"(",
"self",
".",
"id",
",",
"self",
".",
"stockid",
",",
"self",
".",
"brr",
")",
")",
"form",
"=",
"pg",
".",
"form",
"(",
"name",
"=",
"\"haggleform\"",
")",
"form",
"[",
"'x'",
"]",
",",
"form",
"[",
"'y'",
"]",
"=",
"self",
".",
"crackOCR",
"(",
"StringIO",
".",
"StringIO",
"(",
"self",
".",
"usr",
".",
"getPage",
"(",
"\"http://www.neopets.com\"",
"+",
"form",
".",
"image",
")",
".",
"content",
")",
")",
"form",
"[",
"'current_offer'",
"]",
"=",
"price",
"if",
"pause",
"!=",
"0",
":",
"time",
".",
"sleep",
"(",
"pause",
")",
"pg",
"=",
"form",
".",
"submit",
"(",
")",
"if",
"\"I accept\"",
"in",
"pg",
".",
"content",
":",
"return",
"True",
"elif",
"\"You must select the correct pet\"",
"in",
"pg",
".",
"content",
":",
"logging",
".",
"getLogger",
"(",
"\"neolib.item\"",
")",
".",
"exception",
"(",
"\"Failed to crack OCR\"",
")",
"raise",
"failedOCR",
"else",
":",
"return",
"False"
] | 39.942857 | 26.971429 |
def GetLabelFromFleetspeak(client_id):
"""Returns the primary GRR label to use for a fleetspeak client."""
res = fleetspeak_connector.CONN.outgoing.ListClients(
admin_pb2.ListClientsRequest(client_ids=[GRRIDToFleetspeakID(client_id)]))
if not res.clients or not res.clients[0].labels:
return fleetspeak_connector.unknown_label
for label in res.clients[0].labels:
if label.service_name != "client":
continue
if label.label in fleetspeak_connector.label_map:
return fleetspeak_connector.label_map[label.label]
return fleetspeak_connector.unknown_label | [
"def",
"GetLabelFromFleetspeak",
"(",
"client_id",
")",
":",
"res",
"=",
"fleetspeak_connector",
".",
"CONN",
".",
"outgoing",
".",
"ListClients",
"(",
"admin_pb2",
".",
"ListClientsRequest",
"(",
"client_ids",
"=",
"[",
"GRRIDToFleetspeakID",
"(",
"client_id",
")",
"]",
")",
")",
"if",
"not",
"res",
".",
"clients",
"or",
"not",
"res",
".",
"clients",
"[",
"0",
"]",
".",
"labels",
":",
"return",
"fleetspeak_connector",
".",
"unknown_label",
"for",
"label",
"in",
"res",
".",
"clients",
"[",
"0",
"]",
".",
"labels",
":",
"if",
"label",
".",
"service_name",
"!=",
"\"client\"",
":",
"continue",
"if",
"label",
".",
"label",
"in",
"fleetspeak_connector",
".",
"label_map",
":",
"return",
"fleetspeak_connector",
".",
"label_map",
"[",
"label",
".",
"label",
"]",
"return",
"fleetspeak_connector",
".",
"unknown_label"
] | 41.285714 | 15.357143 |
def arrow_(self, xloc, yloc, text, orientation="v", arrowstyle='->'):
"""
Returns an arrow for a chart. Params: the text, xloc and yloc are
coordinates to position the arrow. Orientation is the way to display
the arrow: possible values are ``[<, ^, >, v]``. Arrow style is the
graphic style of the arrow:
possible values: ``[-, ->, -[, -|>, <->, <|-|>]``
"""
try:
arrow = hv.Arrow(
xloc,
yloc,
text,
orientation,
arrowstyle=arrowstyle)
return arrow
except Exception as e:
self.err(e, self.arrow_, "Can not draw arrow chart") | [
"def",
"arrow_",
"(",
"self",
",",
"xloc",
",",
"yloc",
",",
"text",
",",
"orientation",
"=",
"\"v\"",
",",
"arrowstyle",
"=",
"'->'",
")",
":",
"try",
":",
"arrow",
"=",
"hv",
".",
"Arrow",
"(",
"xloc",
",",
"yloc",
",",
"text",
",",
"orientation",
",",
"arrowstyle",
"=",
"arrowstyle",
")",
"return",
"arrow",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"err",
"(",
"e",
",",
"self",
".",
"arrow_",
",",
"\"Can not draw arrow chart\"",
")"
] | 30.833333 | 20.944444 |
def distances(a, b, shape, squared=False, axis=1):
'''
distances(a, b, (n,d)) yields a potential function whose output is equivalent to the row-norms
of reshape(a(x), (n,d)) - reshape(b(x), (n,d)).
The shape argument (n,m) may alternately be a matrix of parameter indices, as can be passed to
row_norms and col_norms.
The following optional arguments are accepted:
* squared (default: False) specifies whether the output should be the square distance or the
distance.
* axis (default: 1) specifies whether the rows (axis = 1) or columns (axis = 0) are treated
as the vectors between which the distances should be calculated.
'''
a = to_potential(a)
b = to_potential(b)
if axis == 1: return row_norms(shape, a - b, squared=squared)
else: return col_norms(shape, a - b, squared=squared) | [
"def",
"distances",
"(",
"a",
",",
"b",
",",
"shape",
",",
"squared",
"=",
"False",
",",
"axis",
"=",
"1",
")",
":",
"a",
"=",
"to_potential",
"(",
"a",
")",
"b",
"=",
"to_potential",
"(",
"b",
")",
"if",
"axis",
"==",
"1",
":",
"return",
"row_norms",
"(",
"shape",
",",
"a",
"-",
"b",
",",
"squared",
"=",
"squared",
")",
"else",
":",
"return",
"col_norms",
"(",
"shape",
",",
"a",
"-",
"b",
",",
"squared",
"=",
"squared",
")"
] | 47.5 | 30.944444 |
def profit_money(self):
"""盈利额
Returns:
[type] -- [description]
"""
return float(round(self.assets.iloc[-1] - self.assets.iloc[0], 2)) | [
"def",
"profit_money",
"(",
"self",
")",
":",
"return",
"float",
"(",
"round",
"(",
"self",
".",
"assets",
".",
"iloc",
"[",
"-",
"1",
"]",
"-",
"self",
".",
"assets",
".",
"iloc",
"[",
"0",
"]",
",",
"2",
")",
")"
] | 21.625 | 20 |
def __exhaustive_diff(self, check_list:List[dict]) -> List[List[dict]]:
''' Helper for exhaustive checks to see if there any matches at all besides the anchor
OUTPUT:
[
{
'external_ontology_row' : {},
'interlex_row' : {},
'same': {},
},
...
],
'''
def compare_rows(external_row:dict, ilx_row:dict) -> List[dict]:
''' dictionary comparator '''
def compare_values(string1:Union[str, None], string2:Union[str, None]) -> bool:
''' string comparator '''
if string1 is None or string2 is None:
return False
elif not isinstance(string1, str) or not isinstance(string2, str):
return False
elif string1.lower().strip() != string2.lower().strip():
return False
else:
return True
accepted_ilx_keys = ['label', 'definition']
local_diff = set()
for external_key, external_value in external_row.items():
if not external_value:
continue
if isinstance(external_value, list):
external_values = external_value
for external_value in external_values:
for ilx_key, ilx_value in ilx_row.items():
if ilx_key not in accepted_ilx_keys:
continue
if compare_values(external_value, ilx_value):
local_diff.add(
#((external_key, external_value), (ilx_key, ilx_value))
ilx_key # best to just have what you need and infer the rest :)
)
else:
for ilx_key, ilx_value in ilx_row.items():
if ilx_key not in accepted_ilx_keys:
continue
if compare_values(external_value, ilx_value):
local_diff.add(
#((external_key, external_value), (ilx_key, ilx_value))
ilx_key # best to just have what you need and infer the rest :)
)
local_diff = list(local_diff)
diff = {
'external_ontology_row': external_row,
'ilx_row': ilx_row,
'same': local_diff,
}
return diff
diff = []
for check_dict in check_list:
external_ontology_row = check_dict['external_ontology_row']
diff.append(
[compare_rows(external_ontology_row, ilx_row) for ilx_row in check_dict['ilx_rows']]
)
return diff | [
"def",
"__exhaustive_diff",
"(",
"self",
",",
"check_list",
":",
"List",
"[",
"dict",
"]",
")",
"->",
"List",
"[",
"List",
"[",
"dict",
"]",
"]",
":",
"def",
"compare_rows",
"(",
"external_row",
":",
"dict",
",",
"ilx_row",
":",
"dict",
")",
"->",
"List",
"[",
"dict",
"]",
":",
"''' dictionary comparator '''",
"def",
"compare_values",
"(",
"string1",
":",
"Union",
"[",
"str",
",",
"None",
"]",
",",
"string2",
":",
"Union",
"[",
"str",
",",
"None",
"]",
")",
"->",
"bool",
":",
"''' string comparator '''",
"if",
"string1",
"is",
"None",
"or",
"string2",
"is",
"None",
":",
"return",
"False",
"elif",
"not",
"isinstance",
"(",
"string1",
",",
"str",
")",
"or",
"not",
"isinstance",
"(",
"string2",
",",
"str",
")",
":",
"return",
"False",
"elif",
"string1",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"!=",
"string2",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
":",
"return",
"False",
"else",
":",
"return",
"True",
"accepted_ilx_keys",
"=",
"[",
"'label'",
",",
"'definition'",
"]",
"local_diff",
"=",
"set",
"(",
")",
"for",
"external_key",
",",
"external_value",
"in",
"external_row",
".",
"items",
"(",
")",
":",
"if",
"not",
"external_value",
":",
"continue",
"if",
"isinstance",
"(",
"external_value",
",",
"list",
")",
":",
"external_values",
"=",
"external_value",
"for",
"external_value",
"in",
"external_values",
":",
"for",
"ilx_key",
",",
"ilx_value",
"in",
"ilx_row",
".",
"items",
"(",
")",
":",
"if",
"ilx_key",
"not",
"in",
"accepted_ilx_keys",
":",
"continue",
"if",
"compare_values",
"(",
"external_value",
",",
"ilx_value",
")",
":",
"local_diff",
".",
"add",
"(",
"#((external_key, external_value), (ilx_key, ilx_value))",
"ilx_key",
"# best to just have what you need and infer the rest :)",
")",
"else",
":",
"for",
"ilx_key",
",",
"ilx_value",
"in",
"ilx_row",
".",
"items",
"(",
")",
":",
"if",
"ilx_key",
"not",
"in",
"accepted_ilx_keys",
":",
"continue",
"if",
"compare_values",
"(",
"external_value",
",",
"ilx_value",
")",
":",
"local_diff",
".",
"add",
"(",
"#((external_key, external_value), (ilx_key, ilx_value))",
"ilx_key",
"# best to just have what you need and infer the rest :)",
")",
"local_diff",
"=",
"list",
"(",
"local_diff",
")",
"diff",
"=",
"{",
"'external_ontology_row'",
":",
"external_row",
",",
"'ilx_row'",
":",
"ilx_row",
",",
"'same'",
":",
"local_diff",
",",
"}",
"return",
"diff",
"diff",
"=",
"[",
"]",
"for",
"check_dict",
"in",
"check_list",
":",
"external_ontology_row",
"=",
"check_dict",
"[",
"'external_ontology_row'",
"]",
"diff",
".",
"append",
"(",
"[",
"compare_rows",
"(",
"external_ontology_row",
",",
"ilx_row",
")",
"for",
"ilx_row",
"in",
"check_dict",
"[",
"'ilx_rows'",
"]",
"]",
")",
"return",
"diff"
] | 43.058824 | 21.735294 |
def status(self):
"""Return current readings, as a dictionary with:
duration -- the duration of the measurements, in seconds;
cpm -- the radiation count by minute;
uSvh -- the radiation dose, exprimed in Sievert per house (uSv/h);
uSvhError -- the incertitude for the radiation dose."""
minutes = min(self.duration, MAX_CPM_TIME) / 1000 / 60.0
cpm = self.count / minutes if minutes > 0 else 0
return dict(
duration=round(self.duration / 1000.0, 2),
cpm=round(cpm, 2),
uSvh=round(cpm / K_ALPHA, 3),
uSvhError=round(math.sqrt(self.count) / minutes / K_ALPHA, 3)
if minutes > 0
else 0,
) | [
"def",
"status",
"(",
"self",
")",
":",
"minutes",
"=",
"min",
"(",
"self",
".",
"duration",
",",
"MAX_CPM_TIME",
")",
"/",
"1000",
"/",
"60.0",
"cpm",
"=",
"self",
".",
"count",
"/",
"minutes",
"if",
"minutes",
">",
"0",
"else",
"0",
"return",
"dict",
"(",
"duration",
"=",
"round",
"(",
"self",
".",
"duration",
"/",
"1000.0",
",",
"2",
")",
",",
"cpm",
"=",
"round",
"(",
"cpm",
",",
"2",
")",
",",
"uSvh",
"=",
"round",
"(",
"cpm",
"/",
"K_ALPHA",
",",
"3",
")",
",",
"uSvhError",
"=",
"round",
"(",
"math",
".",
"sqrt",
"(",
"self",
".",
"count",
")",
"/",
"minutes",
"/",
"K_ALPHA",
",",
"3",
")",
"if",
"minutes",
">",
"0",
"else",
"0",
",",
")"
] | 45.5625 | 17.6875 |
def _handle_object(self, name, obj):
"""
Process an object using twitter_object_handlers_lookup.
Doesn't currently do anything (as of 2017-06-16).
:param str name: Object name
:param obj: An object to be processed
:return: A dictionary of attributes
"""
if type(obj) in twitter_object_handlers_lookup:
return twitter_object_handlers_lookup[type(obj)](name, obj)
else:
return {name: obj} | [
"def",
"_handle_object",
"(",
"self",
",",
"name",
",",
"obj",
")",
":",
"if",
"type",
"(",
"obj",
")",
"in",
"twitter_object_handlers_lookup",
":",
"return",
"twitter_object_handlers_lookup",
"[",
"type",
"(",
"obj",
")",
"]",
"(",
"name",
",",
"obj",
")",
"else",
":",
"return",
"{",
"name",
":",
"obj",
"}"
] | 36.230769 | 13.769231 |
async def trio_open_connection(host, port, *, ssl=False, **kwargs):
'''
Allows connections to be made that may or may not require ssl.
Somewhat surprisingly trio doesn't have an abstraction for this like
curio even though it's fairly trivial to write. Down the line hopefully.
Args:
host (str): Network location, either by domain or IP.
port (int): The requested port.
ssl (bool or SSLContext): If False or None, SSL is not required. If
True, the context returned by trio.ssl.create_default_context will
be used. Otherwise, this may be an SSLContext object.
kwargs: A catch all to soak up curio's additional kwargs and
ignore them.
'''
import trio
if not ssl:
sock = await trio.open_tcp_stream(host, port)
else:
if isinstance(ssl, bool):
ssl_context = None
else:
ssl_context = ssl
sock = await trio.open_ssl_over_tcp_stream(host, port, ssl_context=ssl_context)
await sock.do_handshake()
sock.close = sock.aclose
return sock | [
"async",
"def",
"trio_open_connection",
"(",
"host",
",",
"port",
",",
"*",
",",
"ssl",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"trio",
"if",
"not",
"ssl",
":",
"sock",
"=",
"await",
"trio",
".",
"open_tcp_stream",
"(",
"host",
",",
"port",
")",
"else",
":",
"if",
"isinstance",
"(",
"ssl",
",",
"bool",
")",
":",
"ssl_context",
"=",
"None",
"else",
":",
"ssl_context",
"=",
"ssl",
"sock",
"=",
"await",
"trio",
".",
"open_ssl_over_tcp_stream",
"(",
"host",
",",
"port",
",",
"ssl_context",
"=",
"ssl_context",
")",
"await",
"sock",
".",
"do_handshake",
"(",
")",
"sock",
".",
"close",
"=",
"sock",
".",
"aclose",
"return",
"sock"
] | 38.357143 | 25.071429 |
def _operator(attr):
"""Defers an operator overload to `attr`.
Args:
attr: Operator attribute to use.
Returns:
Function calling operator attribute.
"""
@functools.wraps(attr)
def func(a, *args):
return attr(a.value, *args)
return func | [
"def",
"_operator",
"(",
"attr",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"attr",
")",
"def",
"func",
"(",
"a",
",",
"*",
"args",
")",
":",
"return",
"attr",
"(",
"a",
".",
"value",
",",
"*",
"args",
")",
"return",
"func"
] | 19.230769 | 18.307692 |
def assembly_plus_protons(input_file, path=True, pdb_name=None,
save_output=False, force_save=False):
"""Returns an Assembly with protons added by Reduce.
Notes
-----
Looks for a pre-existing Reduce output in the standard location before
running Reduce. If the protein contains oligosaccharides or glycans,
use reduce_correct_carbohydrates.
Parameters
----------
input_file : str or pathlib.Path
Location of file to be converted to Assembly or PDB file as string.
path : bool
Whether we are looking at a file or a pdb string. Defaults to file.
pdb_name : str
PDB ID of protein. Required if providing string not path.
save_output : bool
If True will save the generated assembly.
force_save : bool
If True will overwrite existing reduced assembly.
Returns
-------
reduced_assembly : AMPAL Assembly
Assembly of protein with protons added by Reduce.
"""
from ampal.pdb_parser import convert_pdb_to_ampal
if path:
input_path = Path(input_file)
if not pdb_name:
pdb_name = input_path.stem[:4]
reduced_path = reduce_output_path(path=input_path)
if reduced_path.exists() and not save_output and not force_save:
reduced_assembly = convert_pdb_to_ampal(
str(reduced_path), pdb_id=pdb_name)
return reduced_assembly
if save_output:
reduced_path = output_reduce(
input_file, path=path, pdb_name=pdb_name, force=force_save)
reduced_assembly = convert_pdb_to_ampal(str(reduced_path), path=True)
else:
reduce_mmol, reduce_message = run_reduce(input_file, path=path)
if not reduce_mmol:
return None
reduced_assembly = convert_pdb_to_ampal(
reduce_mmol, path=False, pdb_id=pdb_name)
return reduced_assembly | [
"def",
"assembly_plus_protons",
"(",
"input_file",
",",
"path",
"=",
"True",
",",
"pdb_name",
"=",
"None",
",",
"save_output",
"=",
"False",
",",
"force_save",
"=",
"False",
")",
":",
"from",
"ampal",
".",
"pdb_parser",
"import",
"convert_pdb_to_ampal",
"if",
"path",
":",
"input_path",
"=",
"Path",
"(",
"input_file",
")",
"if",
"not",
"pdb_name",
":",
"pdb_name",
"=",
"input_path",
".",
"stem",
"[",
":",
"4",
"]",
"reduced_path",
"=",
"reduce_output_path",
"(",
"path",
"=",
"input_path",
")",
"if",
"reduced_path",
".",
"exists",
"(",
")",
"and",
"not",
"save_output",
"and",
"not",
"force_save",
":",
"reduced_assembly",
"=",
"convert_pdb_to_ampal",
"(",
"str",
"(",
"reduced_path",
")",
",",
"pdb_id",
"=",
"pdb_name",
")",
"return",
"reduced_assembly",
"if",
"save_output",
":",
"reduced_path",
"=",
"output_reduce",
"(",
"input_file",
",",
"path",
"=",
"path",
",",
"pdb_name",
"=",
"pdb_name",
",",
"force",
"=",
"force_save",
")",
"reduced_assembly",
"=",
"convert_pdb_to_ampal",
"(",
"str",
"(",
"reduced_path",
")",
",",
"path",
"=",
"True",
")",
"else",
":",
"reduce_mmol",
",",
"reduce_message",
"=",
"run_reduce",
"(",
"input_file",
",",
"path",
"=",
"path",
")",
"if",
"not",
"reduce_mmol",
":",
"return",
"None",
"reduced_assembly",
"=",
"convert_pdb_to_ampal",
"(",
"reduce_mmol",
",",
"path",
"=",
"False",
",",
"pdb_id",
"=",
"pdb_name",
")",
"return",
"reduced_assembly"
] | 37.3 | 20.68 |
def ReadGRRUser(self, username, cursor=None):
"""Reads a user object corresponding to a given name."""
cursor.execute(
"SELECT username, password, ui_mode, canary_mode, user_type "
"FROM grr_users WHERE username_hash = %s", [mysql_utils.Hash(username)])
row = cursor.fetchone()
if row is None:
raise db.UnknownGRRUserError(username)
return self._RowToGRRUser(row) | [
"def",
"ReadGRRUser",
"(",
"self",
",",
"username",
",",
"cursor",
"=",
"None",
")",
":",
"cursor",
".",
"execute",
"(",
"\"SELECT username, password, ui_mode, canary_mode, user_type \"",
"\"FROM grr_users WHERE username_hash = %s\"",
",",
"[",
"mysql_utils",
".",
"Hash",
"(",
"username",
")",
"]",
")",
"row",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"if",
"row",
"is",
"None",
":",
"raise",
"db",
".",
"UnknownGRRUserError",
"(",
"username",
")",
"return",
"self",
".",
"_RowToGRRUser",
"(",
"row",
")"
] | 36.090909 | 19.909091 |
def merge_pot1_files(self, delete_source=True):
"""
This method is called when all the q-points have been computed.
It runs `mrgdvdb` in sequential on the local machine to produce
the final DVDB file in the outdir of the `Work`.
Args:
delete_source: True if POT1 files should be removed after (successful) merge.
Returns:
path to the output DVDB file. None if not DFPT POT file is found.
"""
natom = len(self[0].input.structure)
max_pertcase = 3 * natom
pot1_files = []
for task in self:
if not isinstance(task, DfptTask): continue
paths = task.outdir.list_filepaths(wildcard="*_POT*")
for path in paths:
# Include only atomic perturbations i.e. files whose ext <= 3 * natom
i = path.rindex("_POT")
pertcase = int(path[i+4:].replace(".nc", ""))
if pertcase <= max_pertcase:
pot1_files.append(path)
# prtpot = 0 disables the output of the DFPT POT files so an empty list is not fatal here.
if not pot1_files: return None
self.history.info("Will call mrgdvdb to merge %s files:" % len(pot1_files))
# Final DDB file will be produced in the outdir of the work.
out_dvdb = self.outdir.path_in("out_DVDB")
if len(pot1_files) == 1:
# Avoid the merge. Just move the DDB file to the outdir of the work
shutil.copy(pot1_files[0], out_dvdb)
else:
# FIXME: The merge may require a non-negligible amount of memory if lots of qpts.
# Besides there are machines such as lemaitre3 that are problematic when
# running MPI applications on the front-end
mrgdvdb = wrappers.Mrgdvdb(manager=self[0].manager, verbose=0)
mrgdvdb.merge(self.outdir.path, pot1_files, out_dvdb, delete_source=delete_source)
return out_dvdb | [
"def",
"merge_pot1_files",
"(",
"self",
",",
"delete_source",
"=",
"True",
")",
":",
"natom",
"=",
"len",
"(",
"self",
"[",
"0",
"]",
".",
"input",
".",
"structure",
")",
"max_pertcase",
"=",
"3",
"*",
"natom",
"pot1_files",
"=",
"[",
"]",
"for",
"task",
"in",
"self",
":",
"if",
"not",
"isinstance",
"(",
"task",
",",
"DfptTask",
")",
":",
"continue",
"paths",
"=",
"task",
".",
"outdir",
".",
"list_filepaths",
"(",
"wildcard",
"=",
"\"*_POT*\"",
")",
"for",
"path",
"in",
"paths",
":",
"# Include only atomic perturbations i.e. files whose ext <= 3 * natom",
"i",
"=",
"path",
".",
"rindex",
"(",
"\"_POT\"",
")",
"pertcase",
"=",
"int",
"(",
"path",
"[",
"i",
"+",
"4",
":",
"]",
".",
"replace",
"(",
"\".nc\"",
",",
"\"\"",
")",
")",
"if",
"pertcase",
"<=",
"max_pertcase",
":",
"pot1_files",
".",
"append",
"(",
"path",
")",
"# prtpot = 0 disables the output of the DFPT POT files so an empty list is not fatal here.",
"if",
"not",
"pot1_files",
":",
"return",
"None",
"self",
".",
"history",
".",
"info",
"(",
"\"Will call mrgdvdb to merge %s files:\"",
"%",
"len",
"(",
"pot1_files",
")",
")",
"# Final DDB file will be produced in the outdir of the work.",
"out_dvdb",
"=",
"self",
".",
"outdir",
".",
"path_in",
"(",
"\"out_DVDB\"",
")",
"if",
"len",
"(",
"pot1_files",
")",
"==",
"1",
":",
"# Avoid the merge. Just move the DDB file to the outdir of the work",
"shutil",
".",
"copy",
"(",
"pot1_files",
"[",
"0",
"]",
",",
"out_dvdb",
")",
"else",
":",
"# FIXME: The merge may require a non-negligible amount of memory if lots of qpts.",
"# Besides there are machines such as lemaitre3 that are problematic when",
"# running MPI applications on the front-end",
"mrgdvdb",
"=",
"wrappers",
".",
"Mrgdvdb",
"(",
"manager",
"=",
"self",
"[",
"0",
"]",
".",
"manager",
",",
"verbose",
"=",
"0",
")",
"mrgdvdb",
".",
"merge",
"(",
"self",
".",
"outdir",
".",
"path",
",",
"pot1_files",
",",
"out_dvdb",
",",
"delete_source",
"=",
"delete_source",
")",
"return",
"out_dvdb"
] | 43.111111 | 25.555556 |
def trim(self, start, end):
'''Removes first 'start'/'end' bases off the start/end of the sequence'''
super().trim(start, end)
self.qual = self.qual[start:len(self.qual) - end] | [
"def",
"trim",
"(",
"self",
",",
"start",
",",
"end",
")",
":",
"super",
"(",
")",
".",
"trim",
"(",
"start",
",",
"end",
")",
"self",
".",
"qual",
"=",
"self",
".",
"qual",
"[",
"start",
":",
"len",
"(",
"self",
".",
"qual",
")",
"-",
"end",
"]"
] | 49.25 | 19.75 |
def verify_id_token(id_token, audience, http=None,
cert_uri=ID_TOKEN_VERIFICATION_CERTS):
"""Verifies a signed JWT id_token.
This function requires PyOpenSSL and because of that it does not work on
App Engine.
Args:
id_token: string, A Signed JWT.
audience: string, The audience 'aud' that the token should be for.
http: httplib2.Http, instance to use to make the HTTP request. Callers
should supply an instance that has caching enabled.
cert_uri: string, URI of the certificates in JSON format to
verify the JWT against.
Returns:
The deserialized JSON in the JWT.
Raises:
oauth2client.crypt.AppIdentityError: if the JWT fails to verify.
CryptoUnavailableError: if no crypto library is available.
"""
_require_crypto_or_die()
if http is None:
http = transport.get_cached_http()
resp, content = transport.request(http, cert_uri)
if resp.status == http_client.OK:
certs = json.loads(_helpers._from_bytes(content))
return crypt.verify_signed_jwt_with_certs(id_token, certs, audience)
else:
raise VerifyJwtTokenError('Status code: {0}'.format(resp.status)) | [
"def",
"verify_id_token",
"(",
"id_token",
",",
"audience",
",",
"http",
"=",
"None",
",",
"cert_uri",
"=",
"ID_TOKEN_VERIFICATION_CERTS",
")",
":",
"_require_crypto_or_die",
"(",
")",
"if",
"http",
"is",
"None",
":",
"http",
"=",
"transport",
".",
"get_cached_http",
"(",
")",
"resp",
",",
"content",
"=",
"transport",
".",
"request",
"(",
"http",
",",
"cert_uri",
")",
"if",
"resp",
".",
"status",
"==",
"http_client",
".",
"OK",
":",
"certs",
"=",
"json",
".",
"loads",
"(",
"_helpers",
".",
"_from_bytes",
"(",
"content",
")",
")",
"return",
"crypt",
".",
"verify_signed_jwt_with_certs",
"(",
"id_token",
",",
"certs",
",",
"audience",
")",
"else",
":",
"raise",
"VerifyJwtTokenError",
"(",
"'Status code: {0}'",
".",
"format",
"(",
"resp",
".",
"status",
")",
")"
] | 37.9375 | 22.78125 |
def is_probably_prime(self):
"""Tests with miller-rabin
:return: True iff prime
"""
if self.is_naive_prime():
return True
# check if multiple pf low primes
for prime in LOW_PRIMES:
if self.to_int % prime == 0:
return False
# if all else fails, call rabin to determine if to_int is prime
return self.test_miller_rabin(5) | [
"def",
"is_probably_prime",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_naive_prime",
"(",
")",
":",
"return",
"True",
"# check if multiple pf low primes",
"for",
"prime",
"in",
"LOW_PRIMES",
":",
"if",
"self",
".",
"to_int",
"%",
"prime",
"==",
"0",
":",
"return",
"False",
"# if all else fails, call rabin to determine if to_int is prime",
"return",
"self",
".",
"test_miller_rabin",
"(",
"5",
")"
] | 27.466667 | 14.466667 |
def get_type_item(self, value):
"""
Converts the given value to a ``UsedVolume`` or ``SharedVolume`` tuple for attached volumes. It
accepts strings, lists, tuples, and dicts as input.
For strings and collections of a single element, the first item is considered to be an alias for lookup on the map.
It is converted to a ``SharedVolume`` tuple.
For two-element collections, the first item defines a new volume alias that can be re-used by other instances and
the second item is considered to be the mount point for the volume.
All attached volumes are considered as read-write access.
:param value: Input value for conversion.
:return: UsedVolume or SharedVolume tuple.
:rtype: UsedVolume | SharedVolume
"""
if isinstance(value, (UsedVolume, SharedVolume)):
if value.readonly:
raise ValueError("Attached volumes should not be read-only.")
return value
elif isinstance(value, six.string_types):
return SharedVolume(value)
elif isinstance(value, (list, tuple)):
v_len = len(value)
if v_len == 2:
if value[1]:
return UsedVolume(value[0], value[1])
return SharedVolume(value[0])
elif v_len == 1:
return SharedVolume(value[0])
raise ValueError("Invalid element length; only tuples and lists of length 1-2 can be converted to a "
"UsedVolume or SharedVolume tuple; found length {0}.".format(v_len))
elif isinstance(value, dict):
v_len = len(value)
if v_len == 1:
k, v = list(value.items())[0]
if k == 'name':
return SharedVolume(v)
return UsedVolume(k, v)
elif 'path' in value:
return UsedVolume(**value)
return SharedVolume(**value)
raise ValueError(
"Invalid type; expected a list, tuple, dict, or string type, found {0}.".format(type(value).__name__)) | [
"def",
"get_type_item",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"(",
"UsedVolume",
",",
"SharedVolume",
")",
")",
":",
"if",
"value",
".",
"readonly",
":",
"raise",
"ValueError",
"(",
"\"Attached volumes should not be read-only.\"",
")",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"return",
"SharedVolume",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"v_len",
"=",
"len",
"(",
"value",
")",
"if",
"v_len",
"==",
"2",
":",
"if",
"value",
"[",
"1",
"]",
":",
"return",
"UsedVolume",
"(",
"value",
"[",
"0",
"]",
",",
"value",
"[",
"1",
"]",
")",
"return",
"SharedVolume",
"(",
"value",
"[",
"0",
"]",
")",
"elif",
"v_len",
"==",
"1",
":",
"return",
"SharedVolume",
"(",
"value",
"[",
"0",
"]",
")",
"raise",
"ValueError",
"(",
"\"Invalid element length; only tuples and lists of length 1-2 can be converted to a \"",
"\"UsedVolume or SharedVolume tuple; found length {0}.\"",
".",
"format",
"(",
"v_len",
")",
")",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"v_len",
"=",
"len",
"(",
"value",
")",
"if",
"v_len",
"==",
"1",
":",
"k",
",",
"v",
"=",
"list",
"(",
"value",
".",
"items",
"(",
")",
")",
"[",
"0",
"]",
"if",
"k",
"==",
"'name'",
":",
"return",
"SharedVolume",
"(",
"v",
")",
"return",
"UsedVolume",
"(",
"k",
",",
"v",
")",
"elif",
"'path'",
"in",
"value",
":",
"return",
"UsedVolume",
"(",
"*",
"*",
"value",
")",
"return",
"SharedVolume",
"(",
"*",
"*",
"value",
")",
"raise",
"ValueError",
"(",
"\"Invalid type; expected a list, tuple, dict, or string type, found {0}.\"",
".",
"format",
"(",
"type",
"(",
"value",
")",
".",
"__name__",
")",
")"
] | 48.488372 | 20.255814 |
def execute(self, command, data={}):
"""Format the endpoint url by data and then request the remote server.
Args:
command(Command): WebDriver command to be executed.
data(dict): Data fulfill the uri template and json body.
Returns:
A dict represent the json body from server response.
Raises:
KeyError: Data cannot fulfill the variable which command needed.
ConnectionError: Meet network problem (e.g. DNS failure,
refused connection, etc).
Timeout: A request times out.
HTTPError: HTTP request returned an unsuccessful status code.
"""
method, uri = command
try:
path = self._formatter.format_map(uri, data)
body = self._formatter.get_unused_kwargs()
url = "{0}{1}".format(self._url, path)
return self._request(method, url, body)
except KeyError as err:
LOGGER.debug(
'Endpoint {0} is missing argument {1}'.format(uri, err))
raise | [
"def",
"execute",
"(",
"self",
",",
"command",
",",
"data",
"=",
"{",
"}",
")",
":",
"method",
",",
"uri",
"=",
"command",
"try",
":",
"path",
"=",
"self",
".",
"_formatter",
".",
"format_map",
"(",
"uri",
",",
"data",
")",
"body",
"=",
"self",
".",
"_formatter",
".",
"get_unused_kwargs",
"(",
")",
"url",
"=",
"\"{0}{1}\"",
".",
"format",
"(",
"self",
".",
"_url",
",",
"path",
")",
"return",
"self",
".",
"_request",
"(",
"method",
",",
"url",
",",
"body",
")",
"except",
"KeyError",
"as",
"err",
":",
"LOGGER",
".",
"debug",
"(",
"'Endpoint {0} is missing argument {1}'",
".",
"format",
"(",
"uri",
",",
"err",
")",
")",
"raise"
] | 39.259259 | 20.111111 |
def print_progress(self, i, current_params):
"""
Prints the current ELBO at every decile of total iterations
"""
for split in range(1,11):
if i == (round(self.iterations/10*split)-1):
post = -self.full_neg_posterior(current_params)
approx = self.create_normal_logq(current_params)
diff = post - approx
if not self.quiet_progress:
print(str(split) + "0% done : ELBO is " + str(diff) + ", p(y,z) is " + str(post) + ", q(z) is " + str(approx)) | [
"def",
"print_progress",
"(",
"self",
",",
"i",
",",
"current_params",
")",
":",
"for",
"split",
"in",
"range",
"(",
"1",
",",
"11",
")",
":",
"if",
"i",
"==",
"(",
"round",
"(",
"self",
".",
"iterations",
"/",
"10",
"*",
"split",
")",
"-",
"1",
")",
":",
"post",
"=",
"-",
"self",
".",
"full_neg_posterior",
"(",
"current_params",
")",
"approx",
"=",
"self",
".",
"create_normal_logq",
"(",
"current_params",
")",
"diff",
"=",
"post",
"-",
"approx",
"if",
"not",
"self",
".",
"quiet_progress",
":",
"print",
"(",
"str",
"(",
"split",
")",
"+",
"\"0% done : ELBO is \"",
"+",
"str",
"(",
"diff",
")",
"+",
"\", p(y,z) is \"",
"+",
"str",
"(",
"post",
")",
"+",
"\", q(z) is \"",
"+",
"str",
"(",
"approx",
")",
")"
] | 50.727273 | 18 |
def integer_partition(size: int, nparts: int) -> Iterator[List[List[int]]]:
""" Partition a list of integers into a list of partitions """
for part in algorithm_u(range(size), nparts):
yield part | [
"def",
"integer_partition",
"(",
"size",
":",
"int",
",",
"nparts",
":",
"int",
")",
"->",
"Iterator",
"[",
"List",
"[",
"List",
"[",
"int",
"]",
"]",
"]",
":",
"for",
"part",
"in",
"algorithm_u",
"(",
"range",
"(",
"size",
")",
",",
"nparts",
")",
":",
"yield",
"part"
] | 52 | 16.5 |
def console_get_height_rect(
con: tcod.console.Console, x: int, y: int, w: int, h: int, fmt: str
) -> int:
"""Return the height of this text once word-wrapped into this rectangle.
Returns:
int: The number of lines of text once word-wrapped.
.. deprecated:: 8.5
Use :any:`Console.get_height_rect` instead.
"""
return int(
lib.TCOD_console_get_height_rect_fmt(
_console(con), x, y, w, h, _fmt(fmt)
)
) | [
"def",
"console_get_height_rect",
"(",
"con",
":",
"tcod",
".",
"console",
".",
"Console",
",",
"x",
":",
"int",
",",
"y",
":",
"int",
",",
"w",
":",
"int",
",",
"h",
":",
"int",
",",
"fmt",
":",
"str",
")",
"->",
"int",
":",
"return",
"int",
"(",
"lib",
".",
"TCOD_console_get_height_rect_fmt",
"(",
"_console",
"(",
"con",
")",
",",
"x",
",",
"y",
",",
"w",
",",
"h",
",",
"_fmt",
"(",
"fmt",
")",
")",
")"
] | 28.625 | 20.8125 |
def send(self, msg):
"""Send `data` to `handle`, and tell the broker we have output. May
be called from any thread."""
self._router.broker.defer(self._send, msg) | [
"def",
"send",
"(",
"self",
",",
"msg",
")",
":",
"self",
".",
"_router",
".",
"broker",
".",
"defer",
"(",
"self",
".",
"_send",
",",
"msg",
")"
] | 45.5 | 7.5 |
def _burstColumn(cls, connections, random, lastUsedIterationForSegment,
column, columnMatchingSegments, prevActiveCells,
prevWinnerCells, cellsForColumn,
numActivePotentialSynapsesForSegment, iteration,
maxNewSynapseCount, initialPermanence, permanenceIncrement,
permanenceDecrement, maxSegmentsPerCell,
maxSynapsesPerSegment, learn):
"""
:param connections: (Object)
Connections for the TM. Gets mutated.
:param random: (Object)
Random number generator. Gets mutated.
:param lastUsedIterationForSegment: (list)
Last used iteration for each segment, indexed by the segment's flatIdx.
Gets mutated.
:param column: (int)
Index of bursting column.
:param columnMatchingSegments: (iter)
Matching segments in this column.
:param prevActiveCells: (list)
Active cells in `t-1`.
:param prevWinnerCells: (list)
Winner cells in `t-1`.
:param cellsForColumn: (sequence)
Range of cell indices on which to operate.
:param numActivePotentialSynapsesForSegment: (list)
Number of active potential synapses per segment, indexed by the segment's
flatIdx.
:param iteration: (int)
The current timestep.
:param maxNewSynapseCount: (int)
The maximum number of synapses added to a segment during learning.
:param initialPermanence: (float)
Initial permanence of a new synapse.
:param permanenceIncrement: (float)
Amount by which permanences of synapses are incremented during learning.
:param permanenceDecrement: (float)
Amount by which permanences of synapses are decremented during learning.
:param maxSegmentsPerCell: (int)
The maximum number of segments per cell.
:param maxSynapsesPerSegment: (int)
The maximum number of synapses per segment.
:param learn: (bool)
Whether or not learning is enabled.
:returns: (tuple) Contains:
`cells` (iter),
`winnerCell` (int),
Pseudocode:
mark all cells as active
if there are any matching distal dendrite segments
find the most active matching segment
mark its cell as a winner cell
(learning)
grow and reinforce synapses to previous winner cells
else
find the cell with the least segments, mark it as a winner cell
(learning)
(optimization) if there are prev winner cells
add a segment to this winner cell
grow synapses to previous winner cells
"""
if columnMatchingSegments is not None:
numActive = lambda s: numActivePotentialSynapsesForSegment[s.flatIdx]
bestMatchingSegment = max(columnMatchingSegments, key=numActive)
winnerCell = bestMatchingSegment.cell
if learn:
cls._adaptSegment(connections, bestMatchingSegment, prevActiveCells,
permanenceIncrement, permanenceDecrement)
nGrowDesired = maxNewSynapseCount - numActive(bestMatchingSegment)
if nGrowDesired > 0:
cls._growSynapses(connections, random, bestMatchingSegment,
nGrowDesired, prevWinnerCells, initialPermanence,
maxSynapsesPerSegment)
else:
winnerCell = cls._leastUsedCell(random, cellsForColumn, connections)
if learn:
nGrowExact = min(maxNewSynapseCount, len(prevWinnerCells))
if nGrowExact > 0:
segment = cls._createSegment(connections,
lastUsedIterationForSegment, winnerCell,
iteration, maxSegmentsPerCell)
cls._growSynapses(connections, random, segment, nGrowExact,
prevWinnerCells, initialPermanence,
maxSynapsesPerSegment)
return cellsForColumn, winnerCell | [
"def",
"_burstColumn",
"(",
"cls",
",",
"connections",
",",
"random",
",",
"lastUsedIterationForSegment",
",",
"column",
",",
"columnMatchingSegments",
",",
"prevActiveCells",
",",
"prevWinnerCells",
",",
"cellsForColumn",
",",
"numActivePotentialSynapsesForSegment",
",",
"iteration",
",",
"maxNewSynapseCount",
",",
"initialPermanence",
",",
"permanenceIncrement",
",",
"permanenceDecrement",
",",
"maxSegmentsPerCell",
",",
"maxSynapsesPerSegment",
",",
"learn",
")",
":",
"if",
"columnMatchingSegments",
"is",
"not",
"None",
":",
"numActive",
"=",
"lambda",
"s",
":",
"numActivePotentialSynapsesForSegment",
"[",
"s",
".",
"flatIdx",
"]",
"bestMatchingSegment",
"=",
"max",
"(",
"columnMatchingSegments",
",",
"key",
"=",
"numActive",
")",
"winnerCell",
"=",
"bestMatchingSegment",
".",
"cell",
"if",
"learn",
":",
"cls",
".",
"_adaptSegment",
"(",
"connections",
",",
"bestMatchingSegment",
",",
"prevActiveCells",
",",
"permanenceIncrement",
",",
"permanenceDecrement",
")",
"nGrowDesired",
"=",
"maxNewSynapseCount",
"-",
"numActive",
"(",
"bestMatchingSegment",
")",
"if",
"nGrowDesired",
">",
"0",
":",
"cls",
".",
"_growSynapses",
"(",
"connections",
",",
"random",
",",
"bestMatchingSegment",
",",
"nGrowDesired",
",",
"prevWinnerCells",
",",
"initialPermanence",
",",
"maxSynapsesPerSegment",
")",
"else",
":",
"winnerCell",
"=",
"cls",
".",
"_leastUsedCell",
"(",
"random",
",",
"cellsForColumn",
",",
"connections",
")",
"if",
"learn",
":",
"nGrowExact",
"=",
"min",
"(",
"maxNewSynapseCount",
",",
"len",
"(",
"prevWinnerCells",
")",
")",
"if",
"nGrowExact",
">",
"0",
":",
"segment",
"=",
"cls",
".",
"_createSegment",
"(",
"connections",
",",
"lastUsedIterationForSegment",
",",
"winnerCell",
",",
"iteration",
",",
"maxSegmentsPerCell",
")",
"cls",
".",
"_growSynapses",
"(",
"connections",
",",
"random",
",",
"segment",
",",
"nGrowExact",
",",
"prevWinnerCells",
",",
"initialPermanence",
",",
"maxSynapsesPerSegment",
")",
"return",
"cellsForColumn",
",",
"winnerCell"
] | 35.616822 | 20.981308 |
def backward_delete_char(self, e): # (Rubout)
u"""Delete the character behind the cursor. A numeric argument means
to kill the characters instead of deleting them."""
self.l_buffer.backward_delete_char(self.argument_reset)
self.finalize() | [
"def",
"backward_delete_char",
"(",
"self",
",",
"e",
")",
":",
"# (Rubout)\r",
"self",
".",
"l_buffer",
".",
"backward_delete_char",
"(",
"self",
".",
"argument_reset",
")",
"self",
".",
"finalize",
"(",
")"
] | 54 | 9.4 |
def shutdown(self):
"""
Unconditionally shuts the TendrilManager down, killing all
threads and closing all tendrils.
"""
super(UDPTendrilManager, self).shutdown()
# Reset the socket and socket event
self._sock = None
self._sock_event.clear() | [
"def",
"shutdown",
"(",
"self",
")",
":",
"super",
"(",
"UDPTendrilManager",
",",
"self",
")",
".",
"shutdown",
"(",
")",
"# Reset the socket and socket event",
"self",
".",
"_sock",
"=",
"None",
"self",
".",
"_sock_event",
".",
"clear",
"(",
")"
] | 27 | 14.818182 |
def fit(self, features, labels, validation_split, epochs=50):
"""
Trains the neural net on the data provided.
:param features: Numpy array of training data.
:param labels: Numpy array of target (label) data.
:param validation_split: Float between 0 and 1. Percentage of training data to use for validation
:param epochs: Max number of times to train over dataset.
"""
self.model.fit(x=features, y=labels, epochs=epochs, verbose=1,
callbacks=[ReduceLROnPlateau(), EarlyStopping(patience=3)], validation_split=validation_split,
shuffle=True)
for layer in self.model.layers[:self._NUM_BOTTOM_LAYERS_TO_RETRAIN]:
layer.trainable = False
for layer in self.model.layers[self._NUM_BOTTOM_LAYERS_TO_RETRAIN:]:
layer.trainable = True
self.model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
self.model.fit(x=features, y=labels, epochs=50, verbose=1,
callbacks=[ReduceLROnPlateau(), EarlyStopping(patience=3)], validation_split=validation_split,
shuffle=True) | [
"def",
"fit",
"(",
"self",
",",
"features",
",",
"labels",
",",
"validation_split",
",",
"epochs",
"=",
"50",
")",
":",
"self",
".",
"model",
".",
"fit",
"(",
"x",
"=",
"features",
",",
"y",
"=",
"labels",
",",
"epochs",
"=",
"epochs",
",",
"verbose",
"=",
"1",
",",
"callbacks",
"=",
"[",
"ReduceLROnPlateau",
"(",
")",
",",
"EarlyStopping",
"(",
"patience",
"=",
"3",
")",
"]",
",",
"validation_split",
"=",
"validation_split",
",",
"shuffle",
"=",
"True",
")",
"for",
"layer",
"in",
"self",
".",
"model",
".",
"layers",
"[",
":",
"self",
".",
"_NUM_BOTTOM_LAYERS_TO_RETRAIN",
"]",
":",
"layer",
".",
"trainable",
"=",
"False",
"for",
"layer",
"in",
"self",
".",
"model",
".",
"layers",
"[",
"self",
".",
"_NUM_BOTTOM_LAYERS_TO_RETRAIN",
":",
"]",
":",
"layer",
".",
"trainable",
"=",
"True",
"self",
".",
"model",
".",
"compile",
"(",
"optimizer",
"=",
"'sgd'",
",",
"loss",
"=",
"'categorical_crossentropy'",
",",
"metrics",
"=",
"[",
"'accuracy'",
"]",
")",
"self",
".",
"model",
".",
"fit",
"(",
"x",
"=",
"features",
",",
"y",
"=",
"labels",
",",
"epochs",
"=",
"50",
",",
"verbose",
"=",
"1",
",",
"callbacks",
"=",
"[",
"ReduceLROnPlateau",
"(",
")",
",",
"EarlyStopping",
"(",
"patience",
"=",
"3",
")",
"]",
",",
"validation_split",
"=",
"validation_split",
",",
"shuffle",
"=",
"True",
")"
] | 53.5 | 28.772727 |
def read_count_plot (self):
""" Stacked bar plot showing counts of reads """
pconfig = {
'id': 'fastqc_sequence_counts_plot',
'title': 'FastQC: Sequence Counts',
'ylab': 'Number of reads',
'cpswitch_counts_label': 'Number of reads',
'hide_zero_cats': False
}
pdata = dict()
has_dups = False
has_total = False
for s_name in self.fastqc_data:
pd = self.fastqc_data[s_name]['basic_statistics']
pdata[s_name] = dict()
try:
pdata[s_name]['Duplicate Reads'] = int(((100.0 - float(pd['total_deduplicated_percentage']))/100.0) * pd['Total Sequences'])
pdata[s_name]['Unique Reads'] = pd['Total Sequences'] - pdata[s_name]['Duplicate Reads']
has_dups = True
except KeyError:
# Older versions of FastQC don't have duplicate reads
pdata[s_name] = { 'Total Sequences': pd['Total Sequences'] }
has_total = True
pcats = list()
duptext = ''
if has_total:
pcats.append('Total Sequences')
if has_dups:
pcats.extend(['Unique Reads', 'Duplicate Reads'])
duptext = ' Duplicate read counts are an estimate only.'
if has_total and not has_dups:
pconfig['use_legend'] = False
pconfig['cpswitch'] = False
self.add_section (
name = 'Sequence Counts',
anchor = 'fastqc_sequence_counts',
description = 'Sequence counts for each sample.'+duptext,
helptext = '''
This plot show the total number of reads, broken down into unique and duplicate
if possible (only more recent versions of FastQC give duplicate info).
You can read more about duplicate calculation in the
[FastQC documentation](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/8%20Duplicate%20Sequences.html).
A small part has been copied here for convenience:
_Only sequences which first appear in the first 100,000 sequences
in each file are analysed. This should be enough to get a good impression
for the duplication levels in the whole file. Each sequence is tracked to
the end of the file to give a representative count of the overall duplication level._
_The duplication detection requires an exact sequence match over the whole length of
the sequence. Any reads over 75bp in length are truncated to 50bp for this analysis._
''',
plot = bargraph.plot(pdata, pcats, pconfig)
) | [
"def",
"read_count_plot",
"(",
"self",
")",
":",
"pconfig",
"=",
"{",
"'id'",
":",
"'fastqc_sequence_counts_plot'",
",",
"'title'",
":",
"'FastQC: Sequence Counts'",
",",
"'ylab'",
":",
"'Number of reads'",
",",
"'cpswitch_counts_label'",
":",
"'Number of reads'",
",",
"'hide_zero_cats'",
":",
"False",
"}",
"pdata",
"=",
"dict",
"(",
")",
"has_dups",
"=",
"False",
"has_total",
"=",
"False",
"for",
"s_name",
"in",
"self",
".",
"fastqc_data",
":",
"pd",
"=",
"self",
".",
"fastqc_data",
"[",
"s_name",
"]",
"[",
"'basic_statistics'",
"]",
"pdata",
"[",
"s_name",
"]",
"=",
"dict",
"(",
")",
"try",
":",
"pdata",
"[",
"s_name",
"]",
"[",
"'Duplicate Reads'",
"]",
"=",
"int",
"(",
"(",
"(",
"100.0",
"-",
"float",
"(",
"pd",
"[",
"'total_deduplicated_percentage'",
"]",
")",
")",
"/",
"100.0",
")",
"*",
"pd",
"[",
"'Total Sequences'",
"]",
")",
"pdata",
"[",
"s_name",
"]",
"[",
"'Unique Reads'",
"]",
"=",
"pd",
"[",
"'Total Sequences'",
"]",
"-",
"pdata",
"[",
"s_name",
"]",
"[",
"'Duplicate Reads'",
"]",
"has_dups",
"=",
"True",
"except",
"KeyError",
":",
"# Older versions of FastQC don't have duplicate reads",
"pdata",
"[",
"s_name",
"]",
"=",
"{",
"'Total Sequences'",
":",
"pd",
"[",
"'Total Sequences'",
"]",
"}",
"has_total",
"=",
"True",
"pcats",
"=",
"list",
"(",
")",
"duptext",
"=",
"''",
"if",
"has_total",
":",
"pcats",
".",
"append",
"(",
"'Total Sequences'",
")",
"if",
"has_dups",
":",
"pcats",
".",
"extend",
"(",
"[",
"'Unique Reads'",
",",
"'Duplicate Reads'",
"]",
")",
"duptext",
"=",
"' Duplicate read counts are an estimate only.'",
"if",
"has_total",
"and",
"not",
"has_dups",
":",
"pconfig",
"[",
"'use_legend'",
"]",
"=",
"False",
"pconfig",
"[",
"'cpswitch'",
"]",
"=",
"False",
"self",
".",
"add_section",
"(",
"name",
"=",
"'Sequence Counts'",
",",
"anchor",
"=",
"'fastqc_sequence_counts'",
",",
"description",
"=",
"'Sequence counts for each sample.'",
"+",
"duptext",
",",
"helptext",
"=",
"'''\n This plot show the total number of reads, broken down into unique and duplicate\n if possible (only more recent versions of FastQC give duplicate info).\n\n You can read more about duplicate calculation in the\n [FastQC documentation](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/8%20Duplicate%20Sequences.html).\n A small part has been copied here for convenience:\n\n _Only sequences which first appear in the first 100,000 sequences\n in each file are analysed. This should be enough to get a good impression\n for the duplication levels in the whole file. Each sequence is tracked to\n the end of the file to give a representative count of the overall duplication level._\n\n _The duplication detection requires an exact sequence match over the whole length of\n the sequence. Any reads over 75bp in length are truncated to 50bp for this analysis._\n '''",
",",
"plot",
"=",
"bargraph",
".",
"plot",
"(",
"pdata",
",",
"pcats",
",",
"pconfig",
")",
")"
] | 48.745455 | 25.436364 |
def stack1d(*points):
"""Fill out the columns of matrix with a series of points.
This is because ``np.hstack()`` will just make another 1D vector
out of them and ``np.vstack()`` will put them in the rows.
Args:
points (Tuple[numpy.ndarray, ...]): Tuple of 1D points (i.e.
arrays with shape ``(2,)``.
Returns:
numpy.ndarray: The array with each point in ``points`` as its
columns.
"""
result = np.empty((2, len(points)), order="F")
for index, point in enumerate(points):
result[:, index] = point
return result | [
"def",
"stack1d",
"(",
"*",
"points",
")",
":",
"result",
"=",
"np",
".",
"empty",
"(",
"(",
"2",
",",
"len",
"(",
"points",
")",
")",
",",
"order",
"=",
"\"F\"",
")",
"for",
"index",
",",
"point",
"in",
"enumerate",
"(",
"points",
")",
":",
"result",
"[",
":",
",",
"index",
"]",
"=",
"point",
"return",
"result"
] | 31.888889 | 20.722222 |
def add_contact_to_group(self, contact, group):
"""Add contact to group
:param contact: name or contact object
:param group: name or group object
:type contact: ``str``, ``unicode``, ``dict``
:type group: ``str``, ``unicode``, ``dict``
:rtype: ``bool``
"""
if isinstance(contact, basestring):
contact = self.get_contact(contact)
if isinstance(group, basestring):
group = self.get_group(group)
method, url = get_URL('contacts_add_to_group')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'contactid': contact['contactid'],
'contactgroupid': group['contactgroupid']
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return True
hellraiser(res) | [
"def",
"add_contact_to_group",
"(",
"self",
",",
"contact",
",",
"group",
")",
":",
"if",
"isinstance",
"(",
"contact",
",",
"basestring",
")",
":",
"contact",
"=",
"self",
".",
"get_contact",
"(",
"contact",
")",
"if",
"isinstance",
"(",
"group",
",",
"basestring",
")",
":",
"group",
"=",
"self",
".",
"get_group",
"(",
"group",
")",
"method",
",",
"url",
"=",
"get_URL",
"(",
"'contacts_add_to_group'",
")",
"payload",
"=",
"{",
"'apikey'",
":",
"self",
".",
"config",
".",
"get",
"(",
"'apikey'",
")",
",",
"'logintoken'",
":",
"self",
".",
"session",
".",
"cookies",
".",
"get",
"(",
"'logintoken'",
")",
",",
"'contactid'",
":",
"contact",
"[",
"'contactid'",
"]",
",",
"'contactgroupid'",
":",
"group",
"[",
"'contactgroupid'",
"]",
"}",
"res",
"=",
"getattr",
"(",
"self",
".",
"session",
",",
"method",
")",
"(",
"url",
",",
"params",
"=",
"payload",
")",
"if",
"res",
".",
"status_code",
"==",
"200",
":",
"return",
"True",
"hellraiser",
"(",
"res",
")"
] | 29.645161 | 18.225806 |
def execute(self, *args, **kwargs):
"""
See :py:func:`silverberg.client.CQLClient.execute`
"""
num_clients = len(self._seed_clients)
start_client = (self._client_idx + 1) % num_clients
def _client_error(failure, client_i):
failure.trap(ConnectError)
client_i = (client_i + 1) % num_clients
if client_i == start_client:
return failure
else:
return _try_execute(client_i)
def _try_execute(client_i):
self._client_idx = client_i
d = self._seed_clients[client_i].execute(*args, **kwargs)
return d.addErrback(_client_error, client_i)
return _try_execute(start_client) | [
"def",
"execute",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"num_clients",
"=",
"len",
"(",
"self",
".",
"_seed_clients",
")",
"start_client",
"=",
"(",
"self",
".",
"_client_idx",
"+",
"1",
")",
"%",
"num_clients",
"def",
"_client_error",
"(",
"failure",
",",
"client_i",
")",
":",
"failure",
".",
"trap",
"(",
"ConnectError",
")",
"client_i",
"=",
"(",
"client_i",
"+",
"1",
")",
"%",
"num_clients",
"if",
"client_i",
"==",
"start_client",
":",
"return",
"failure",
"else",
":",
"return",
"_try_execute",
"(",
"client_i",
")",
"def",
"_try_execute",
"(",
"client_i",
")",
":",
"self",
".",
"_client_idx",
"=",
"client_i",
"d",
"=",
"self",
".",
"_seed_clients",
"[",
"client_i",
"]",
".",
"execute",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"d",
".",
"addErrback",
"(",
"_client_error",
",",
"client_i",
")",
"return",
"_try_execute",
"(",
"start_client",
")"
] | 34.52381 | 13.095238 |
def clear(self):
"""Remove all elements from this set.
>>> from ngram import NGram
>>> n = NGram(['spam', 'eggs'])
>>> sorted(list(n))
['eggs', 'spam']
>>> n.clear()
>>> list(n)
[]
"""
super(NGram, self).clear()
self._grams = {}
self.length = {} | [
"def",
"clear",
"(",
"self",
")",
":",
"super",
"(",
"NGram",
",",
"self",
")",
".",
"clear",
"(",
")",
"self",
".",
"_grams",
"=",
"{",
"}",
"self",
".",
"length",
"=",
"{",
"}"
] | 23.5 | 14.785714 |
def render_string(self, template_name: str, **kwargs: Any) -> bytes:
"""Generate the given template with the given arguments.
We return the generated byte string (in utf8). To generate and
write a template as a response, use render() above.
"""
# If no template_path is specified, use the path of the calling file
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
assert frame.f_code.co_filename is not None
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace) | [
"def",
"render_string",
"(",
"self",
",",
"template_name",
":",
"str",
",",
"*",
"*",
"kwargs",
":",
"Any",
")",
"->",
"bytes",
":",
"# If no template_path is specified, use the path of the calling file",
"template_path",
"=",
"self",
".",
"get_template_path",
"(",
")",
"if",
"not",
"template_path",
":",
"frame",
"=",
"sys",
".",
"_getframe",
"(",
"0",
")",
"web_file",
"=",
"frame",
".",
"f_code",
".",
"co_filename",
"while",
"frame",
".",
"f_code",
".",
"co_filename",
"==",
"web_file",
":",
"frame",
"=",
"frame",
".",
"f_back",
"assert",
"frame",
".",
"f_code",
".",
"co_filename",
"is",
"not",
"None",
"template_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"frame",
".",
"f_code",
".",
"co_filename",
")",
"with",
"RequestHandler",
".",
"_template_loader_lock",
":",
"if",
"template_path",
"not",
"in",
"RequestHandler",
".",
"_template_loaders",
":",
"loader",
"=",
"self",
".",
"create_template_loader",
"(",
"template_path",
")",
"RequestHandler",
".",
"_template_loaders",
"[",
"template_path",
"]",
"=",
"loader",
"else",
":",
"loader",
"=",
"RequestHandler",
".",
"_template_loaders",
"[",
"template_path",
"]",
"t",
"=",
"loader",
".",
"load",
"(",
"template_name",
")",
"namespace",
"=",
"self",
".",
"get_template_namespace",
"(",
")",
"namespace",
".",
"update",
"(",
"kwargs",
")",
"return",
"t",
".",
"generate",
"(",
"*",
"*",
"namespace",
")"
] | 49.08 | 16.8 |
def set_weights(params, new_params):
"""
Copies parameters from new_params to params
:param params: dst parameters
:param new_params: src parameters
"""
for param, new_param in zip(params, new_params):
param.data.copy_(new_param.data) | [
"def",
"set_weights",
"(",
"params",
",",
"new_params",
")",
":",
"for",
"param",
",",
"new_param",
"in",
"zip",
"(",
"params",
",",
"new_params",
")",
":",
"param",
".",
"data",
".",
"copy_",
"(",
"new_param",
".",
"data",
")"
] | 31.888889 | 8.777778 |
def extract_jwt_token(self, token):
"""
Extracts a data dictionary from a jwt token
"""
# Note: we disable exp verification because we will do it ourselves
with InvalidTokenHeader.handle_errors('failed to decode JWT token'):
data = jwt.decode(
token,
self.encode_key,
algorithms=self.allowed_algorithms,
options={'verify_exp': False},
)
self._validate_jwt_data(data, access_type=AccessType.access)
return data | [
"def",
"extract_jwt_token",
"(",
"self",
",",
"token",
")",
":",
"# Note: we disable exp verification because we will do it ourselves",
"with",
"InvalidTokenHeader",
".",
"handle_errors",
"(",
"'failed to decode JWT token'",
")",
":",
"data",
"=",
"jwt",
".",
"decode",
"(",
"token",
",",
"self",
".",
"encode_key",
",",
"algorithms",
"=",
"self",
".",
"allowed_algorithms",
",",
"options",
"=",
"{",
"'verify_exp'",
":",
"False",
"}",
",",
")",
"self",
".",
"_validate_jwt_data",
"(",
"data",
",",
"access_type",
"=",
"AccessType",
".",
"access",
")",
"return",
"data"
] | 38.571429 | 15.428571 |
def create_command(self, name, operation, **kwargs):
""" Constructs the command object that can then be added to the command table """
if not isinstance(operation, six.string_types):
raise ValueError("Operation must be a string. Got '{}'".format(operation))
name = ' '.join(name.split())
client_factory = kwargs.get('client_factory', None)
def _command_handler(command_args):
op = CLICommandsLoader._get_op_handler(operation)
client = client_factory(command_args) if client_factory else None
result = op(client, **command_args) if client else op(**command_args)
return result
def arguments_loader():
return list(extract_args_from_signature(CLICommandsLoader._get_op_handler(operation),
excluded_params=self.excluded_command_handler_args))
def description_loader():
return extract_full_summary_from_signature(CLICommandsLoader._get_op_handler(operation))
kwargs['arguments_loader'] = arguments_loader
kwargs['description_loader'] = description_loader
cmd = self.command_cls(self.cli_ctx, name, _command_handler, **kwargs)
return cmd | [
"def",
"create_command",
"(",
"self",
",",
"name",
",",
"operation",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"operation",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"ValueError",
"(",
"\"Operation must be a string. Got '{}'\"",
".",
"format",
"(",
"operation",
")",
")",
"name",
"=",
"' '",
".",
"join",
"(",
"name",
".",
"split",
"(",
")",
")",
"client_factory",
"=",
"kwargs",
".",
"get",
"(",
"'client_factory'",
",",
"None",
")",
"def",
"_command_handler",
"(",
"command_args",
")",
":",
"op",
"=",
"CLICommandsLoader",
".",
"_get_op_handler",
"(",
"operation",
")",
"client",
"=",
"client_factory",
"(",
"command_args",
")",
"if",
"client_factory",
"else",
"None",
"result",
"=",
"op",
"(",
"client",
",",
"*",
"*",
"command_args",
")",
"if",
"client",
"else",
"op",
"(",
"*",
"*",
"command_args",
")",
"return",
"result",
"def",
"arguments_loader",
"(",
")",
":",
"return",
"list",
"(",
"extract_args_from_signature",
"(",
"CLICommandsLoader",
".",
"_get_op_handler",
"(",
"operation",
")",
",",
"excluded_params",
"=",
"self",
".",
"excluded_command_handler_args",
")",
")",
"def",
"description_loader",
"(",
")",
":",
"return",
"extract_full_summary_from_signature",
"(",
"CLICommandsLoader",
".",
"_get_op_handler",
"(",
"operation",
")",
")",
"kwargs",
"[",
"'arguments_loader'",
"]",
"=",
"arguments_loader",
"kwargs",
"[",
"'description_loader'",
"]",
"=",
"description_loader",
"cmd",
"=",
"self",
".",
"command_cls",
"(",
"self",
".",
"cli_ctx",
",",
"name",
",",
"_command_handler",
",",
"*",
"*",
"kwargs",
")",
"return",
"cmd"
] | 45.777778 | 28.851852 |
def depth_january_average_ground_temperature(self, value=None):
"""Corresponds to IDD Field `depth_january_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_january_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_january_average_ground_temperature`'.format(value))
self._depth_january_average_ground_temperature = value | [
"def",
"depth_january_average_ground_temperature",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"try",
":",
"value",
"=",
"float",
"(",
"value",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'value {} need to be of type float '",
"'for field `depth_january_average_ground_temperature`'",
".",
"format",
"(",
"value",
")",
")",
"self",
".",
"_depth_january_average_ground_temperature",
"=",
"value"
] | 37.863636 | 23.363636 |
def create_historical_stream(directory, listener=None):
"""
Uses streaming listener/cache to parse betfair
historical data:
https://historicdata.betfair.com/#/home
:param str directory: Directory of betfair data
:param BaseListener listener: Listener object
:rtype: HistoricalStream
"""
listener = listener if listener else BaseListener()
listener.register_stream('HISTORICAL', 'marketSubscription')
return HistoricalStream(directory, listener) | [
"def",
"create_historical_stream",
"(",
"directory",
",",
"listener",
"=",
"None",
")",
":",
"listener",
"=",
"listener",
"if",
"listener",
"else",
"BaseListener",
"(",
")",
"listener",
".",
"register_stream",
"(",
"'HISTORICAL'",
",",
"'marketSubscription'",
")",
"return",
"HistoricalStream",
"(",
"directory",
",",
"listener",
")"
] | 37.5 | 16.5 |
def get_subparsers(parser):
'''get_subparser will get a dictionary of subparsers, to help with printing help
'''
actions = [action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
subparsers = dict()
for action in actions:
# get all subparsers and print help
for choice, subparser in action.choices.items():
subparsers[choice] = subparser
return subparsers | [
"def",
"get_subparsers",
"(",
"parser",
")",
":",
"actions",
"=",
"[",
"action",
"for",
"action",
"in",
"parser",
".",
"_actions",
"if",
"isinstance",
"(",
"action",
",",
"argparse",
".",
"_SubParsersAction",
")",
"]",
"subparsers",
"=",
"dict",
"(",
")",
"for",
"action",
"in",
"actions",
":",
"# get all subparsers and print help",
"for",
"choice",
",",
"subparser",
"in",
"action",
".",
"choices",
".",
"items",
"(",
")",
":",
"subparsers",
"[",
"choice",
"]",
"=",
"subparser",
"return",
"subparsers"
] | 31.857143 | 22.714286 |
def xmatch_external_catalogs(checkplotdict,
xmatchinfo,
xmatchradiusarcsec=2.0,
returndirect=False,
updatexmatch=True,
savepickle=None):
'''This matches the current object in the checkplotdict to all of the
external match catalogs specified.
Parameters
----------
checkplotdict : dict
This is a checkplotdict, generated by either the `checkplot_dict`
function, or read in from a `_read_checkplot_picklefile` function. This
must have a structure somewhat like the following, where the indicated
keys below are required::
{'objectid': the ID assigned to this object
'objectinfo': {'objectid': ID assigned to this object,
'ra': right ascension of the object in decimal deg,
'decl': declination of the object in decimal deg}}
xmatchinfo : str or dict
This is either the xmatch dict produced by the function
:py:func:`astrobase.checkplot.pkl_xmatch.load_xmatch_external_catalogs`
above, or the path to the xmatch info pickle file produced by that
function.
xmatchradiusarcsec : float
This is the cross-matching radius to use in arcseconds.
returndirect : bool
If this is True, will only return the xmatch results as a dict. If this
False, will return the checkplotdict with the xmatch results added in as
a key-val pair.
updatexmatch : bool
This function will look for an existing 'xmatch' key in the input
checkplotdict indicating that an xmatch has been performed before. If
`updatexmatch` is set to True, the xmatch results will be added onto
(e.g. when xmatching to additional catalogs after the first run). If
this is set to False, the xmatch key-val pair will be completely
overwritten.
savepickle : str or None
If this is None, it must be a path to where the updated checkplotdict
will be written to as a new checkplot pickle. If this is False, only the
updated checkplotdict is returned.
Returns
-------
dict or str
If `savepickle` is False, this returns a checkplotdict, with the xmatch
results added in. An 'xmatch' key will be added to this dict, with
something like the following dict as the value::
{'xmatchradiusarcsec':xmatchradiusarcsec,
'catalog1':{'name':'Catalog of interesting things',
'found':True,
'distarcsec':0.7,
'info':{'objectid':...,'ra':...,'decl':...,'desc':...}},
'catalog2':{'name':'Catalog of more interesting things',
'found':False,
'distarcsec':nan,
'info':None},
.
.
.
....}
This will contain the matches of the object in the input checkplotdict
to all of the catalogs provided in `xmatchinfo`.
If `savepickle` is True, will return the path to the saved checkplot
pickle file.
'''
# load the xmatch info
if isinstance(xmatchinfo, str) and os.path.exists(xmatchinfo):
with open(xmatchinfo,'rb') as infd:
xmatchdict = pickle.load(infd)
elif isinstance(xmatchinfo, dict):
xmatchdict = xmatchinfo
else:
LOGERROR("can't figure out xmatch info, can't xmatch, skipping...")
return checkplotdict
#
# generate the xmatch spec
#
# get our ra, decl
objra = checkplotdict['objectinfo']['ra']
objdecl = checkplotdict['objectinfo']['decl']
cosdecl = np.cos(np.radians(objdecl))
sindecl = np.sin(np.radians(objdecl))
cosra = np.cos(np.radians(objra))
sinra = np.sin(np.radians(objra))
objxyz = np.column_stack((cosra*cosdecl,
sinra*cosdecl,
sindecl))
# this is the search distance in xyz unit vectors
xyzdist = 2.0 * np.sin(np.radians(xmatchradiusarcsec/3600.0)/2.0)
#
# now search in each external catalog
#
xmatchresults = {}
extcats = sorted(list(xmatchdict.keys()))
for ecat in extcats:
# get the kdtree
kdt = xmatchdict[ecat]['kdtree']
# look up the coordinates
kdt_dist, kdt_ind = kdt.query(objxyz,
k=1,
distance_upper_bound=xyzdist)
# sort by matchdist
mdsorted = np.argsort(kdt_dist)
matchdists = kdt_dist[mdsorted]
matchinds = kdt_ind[mdsorted]
if matchdists[np.isfinite(matchdists)].size == 0:
xmatchresults[ecat] = {'name':xmatchdict[ecat]['name'],
'desc':xmatchdict[ecat]['desc'],
'found':False,
'distarcsec':None,
'info':None}
else:
for md, mi in zip(matchdists, matchinds):
if np.isfinite(md) and md < xyzdist:
infodict = {}
distarcsec = _xyzdist_to_distarcsec(md)
for col in xmatchdict[ecat]['columns']:
coldata = xmatchdict[ecat]['data'][col][mi]
if isinstance(coldata, str):
coldata = coldata.strip()
infodict[col] = coldata
xmatchresults[ecat] = {
'name':xmatchdict[ecat]['name'],
'desc':xmatchdict[ecat]['desc'],
'found':True,
'distarcsec':distarcsec,
'info':infodict,
'colkeys':xmatchdict[ecat]['columns'],
'colnames':xmatchdict[ecat]['colnames'],
'colunit':xmatchdict[ecat]['colunits'],
}
break
#
# should now have match results for all external catalogs
#
if returndirect:
return xmatchresults
else:
if updatexmatch and 'xmatch' in checkplotdict:
checkplotdict['xmatch'].update(xmatchresults)
else:
checkplotdict['xmatch'] = xmatchresults
if savepickle:
cpf = _write_checkplot_picklefile(checkplotdict,
outfile=savepickle,
protocol=4)
return cpf
else:
return checkplotdict | [
"def",
"xmatch_external_catalogs",
"(",
"checkplotdict",
",",
"xmatchinfo",
",",
"xmatchradiusarcsec",
"=",
"2.0",
",",
"returndirect",
"=",
"False",
",",
"updatexmatch",
"=",
"True",
",",
"savepickle",
"=",
"None",
")",
":",
"# load the xmatch info",
"if",
"isinstance",
"(",
"xmatchinfo",
",",
"str",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"xmatchinfo",
")",
":",
"with",
"open",
"(",
"xmatchinfo",
",",
"'rb'",
")",
"as",
"infd",
":",
"xmatchdict",
"=",
"pickle",
".",
"load",
"(",
"infd",
")",
"elif",
"isinstance",
"(",
"xmatchinfo",
",",
"dict",
")",
":",
"xmatchdict",
"=",
"xmatchinfo",
"else",
":",
"LOGERROR",
"(",
"\"can't figure out xmatch info, can't xmatch, skipping...\"",
")",
"return",
"checkplotdict",
"#",
"# generate the xmatch spec",
"#",
"# get our ra, decl",
"objra",
"=",
"checkplotdict",
"[",
"'objectinfo'",
"]",
"[",
"'ra'",
"]",
"objdecl",
"=",
"checkplotdict",
"[",
"'objectinfo'",
"]",
"[",
"'decl'",
"]",
"cosdecl",
"=",
"np",
".",
"cos",
"(",
"np",
".",
"radians",
"(",
"objdecl",
")",
")",
"sindecl",
"=",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"objdecl",
")",
")",
"cosra",
"=",
"np",
".",
"cos",
"(",
"np",
".",
"radians",
"(",
"objra",
")",
")",
"sinra",
"=",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"objra",
")",
")",
"objxyz",
"=",
"np",
".",
"column_stack",
"(",
"(",
"cosra",
"*",
"cosdecl",
",",
"sinra",
"*",
"cosdecl",
",",
"sindecl",
")",
")",
"# this is the search distance in xyz unit vectors",
"xyzdist",
"=",
"2.0",
"*",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"xmatchradiusarcsec",
"/",
"3600.0",
")",
"/",
"2.0",
")",
"#",
"# now search in each external catalog",
"#",
"xmatchresults",
"=",
"{",
"}",
"extcats",
"=",
"sorted",
"(",
"list",
"(",
"xmatchdict",
".",
"keys",
"(",
")",
")",
")",
"for",
"ecat",
"in",
"extcats",
":",
"# get the kdtree",
"kdt",
"=",
"xmatchdict",
"[",
"ecat",
"]",
"[",
"'kdtree'",
"]",
"# look up the coordinates",
"kdt_dist",
",",
"kdt_ind",
"=",
"kdt",
".",
"query",
"(",
"objxyz",
",",
"k",
"=",
"1",
",",
"distance_upper_bound",
"=",
"xyzdist",
")",
"# sort by matchdist",
"mdsorted",
"=",
"np",
".",
"argsort",
"(",
"kdt_dist",
")",
"matchdists",
"=",
"kdt_dist",
"[",
"mdsorted",
"]",
"matchinds",
"=",
"kdt_ind",
"[",
"mdsorted",
"]",
"if",
"matchdists",
"[",
"np",
".",
"isfinite",
"(",
"matchdists",
")",
"]",
".",
"size",
"==",
"0",
":",
"xmatchresults",
"[",
"ecat",
"]",
"=",
"{",
"'name'",
":",
"xmatchdict",
"[",
"ecat",
"]",
"[",
"'name'",
"]",
",",
"'desc'",
":",
"xmatchdict",
"[",
"ecat",
"]",
"[",
"'desc'",
"]",
",",
"'found'",
":",
"False",
",",
"'distarcsec'",
":",
"None",
",",
"'info'",
":",
"None",
"}",
"else",
":",
"for",
"md",
",",
"mi",
"in",
"zip",
"(",
"matchdists",
",",
"matchinds",
")",
":",
"if",
"np",
".",
"isfinite",
"(",
"md",
")",
"and",
"md",
"<",
"xyzdist",
":",
"infodict",
"=",
"{",
"}",
"distarcsec",
"=",
"_xyzdist_to_distarcsec",
"(",
"md",
")",
"for",
"col",
"in",
"xmatchdict",
"[",
"ecat",
"]",
"[",
"'columns'",
"]",
":",
"coldata",
"=",
"xmatchdict",
"[",
"ecat",
"]",
"[",
"'data'",
"]",
"[",
"col",
"]",
"[",
"mi",
"]",
"if",
"isinstance",
"(",
"coldata",
",",
"str",
")",
":",
"coldata",
"=",
"coldata",
".",
"strip",
"(",
")",
"infodict",
"[",
"col",
"]",
"=",
"coldata",
"xmatchresults",
"[",
"ecat",
"]",
"=",
"{",
"'name'",
":",
"xmatchdict",
"[",
"ecat",
"]",
"[",
"'name'",
"]",
",",
"'desc'",
":",
"xmatchdict",
"[",
"ecat",
"]",
"[",
"'desc'",
"]",
",",
"'found'",
":",
"True",
",",
"'distarcsec'",
":",
"distarcsec",
",",
"'info'",
":",
"infodict",
",",
"'colkeys'",
":",
"xmatchdict",
"[",
"ecat",
"]",
"[",
"'columns'",
"]",
",",
"'colnames'",
":",
"xmatchdict",
"[",
"ecat",
"]",
"[",
"'colnames'",
"]",
",",
"'colunit'",
":",
"xmatchdict",
"[",
"ecat",
"]",
"[",
"'colunits'",
"]",
",",
"}",
"break",
"#",
"# should now have match results for all external catalogs",
"#",
"if",
"returndirect",
":",
"return",
"xmatchresults",
"else",
":",
"if",
"updatexmatch",
"and",
"'xmatch'",
"in",
"checkplotdict",
":",
"checkplotdict",
"[",
"'xmatch'",
"]",
".",
"update",
"(",
"xmatchresults",
")",
"else",
":",
"checkplotdict",
"[",
"'xmatch'",
"]",
"=",
"xmatchresults",
"if",
"savepickle",
":",
"cpf",
"=",
"_write_checkplot_picklefile",
"(",
"checkplotdict",
",",
"outfile",
"=",
"savepickle",
",",
"protocol",
"=",
"4",
")",
"return",
"cpf",
"else",
":",
"return",
"checkplotdict"
] | 33.284264 | 23.649746 |
def _extract_options(line):
r'''Given a line as it would appear in the authorized_keys file,
return an OrderedDict of options, and the remainder of a line as a
string.
>>> Key._extract_options(r'no-pty,command="sh" ssh-rsa AAAAB3NzaC1yc2EAAA...OFy5Lwc8Lo+Jk=')
(OrderedDict([('no-pty', True), ('command', 'sh')]), 'ssh-rsa AAAAB3NzaC1yc2EAAA...OFy5Lwc8Lo+Jk=')
>>> Key._extract_options(r'ssh-rsa AAAAB3NzaC1yc...Lwc8OFy5Lo+kU=')
(OrderedDict(), 'ssh-rsa AAAAB3NzaC1yc...Lwc8OFy5Lo+kU=')
'''
options = OrderedDict({})
quoted = False
escaped = False
option_name = ''
option_val = None
key_without_options = ''
in_options = True
in_option_name = True
for letter in line.strip():
if in_options:
if quoted:
if letter == "\\":
escaped = True
elif letter == '"':
if escaped:
option_val += letter
escaped = False
else:
quoted = False
else:
if escaped:
option_val += "\\"
escaped = False
option_val += letter
else: # not quoted
if letter == ' ':
# end of options
in_options = False
if (option_name in ['ssh-rsa', 'ssh-dss'] or
option_name.startswith('ecdsa-')):
# what we thought was an option name was really the
# key type, and there are no options
key_without_options = option_name + " "
option_name = ''
else:
if option_val is None:
options[option_name] = True
else:
options[option_name] = option_val
elif letter == '"':
quoted = True
elif letter == '=':
# '=' separated option name from value
in_option_name = False
if option_val is None:
option_val = ''
elif letter == ',':
# next option_name
if option_val is None:
options[option_name] = True
else:
options[option_name] = option_val
in_option_name = True
option_name = ''
option_val = None
else: # general unquoted letter
if in_option_name:
option_name += letter
else:
option_val += letter
else:
key_without_options += letter
if key_without_options == '':
# certain mal-formed keys (e.g. a line not containing any spaces)
# will be completely swallowed up by the above parser. It's
# better to follow the principle of least surprize and return the
# original line, allowing the error to be handled later.
return OrderedDict({}), line.strip()
else:
return options, key_without_options | [
"def",
"_extract_options",
"(",
"line",
")",
":",
"options",
"=",
"OrderedDict",
"(",
"{",
"}",
")",
"quoted",
"=",
"False",
"escaped",
"=",
"False",
"option_name",
"=",
"''",
"option_val",
"=",
"None",
"key_without_options",
"=",
"''",
"in_options",
"=",
"True",
"in_option_name",
"=",
"True",
"for",
"letter",
"in",
"line",
".",
"strip",
"(",
")",
":",
"if",
"in_options",
":",
"if",
"quoted",
":",
"if",
"letter",
"==",
"\"\\\\\"",
":",
"escaped",
"=",
"True",
"elif",
"letter",
"==",
"'\"'",
":",
"if",
"escaped",
":",
"option_val",
"+=",
"letter",
"escaped",
"=",
"False",
"else",
":",
"quoted",
"=",
"False",
"else",
":",
"if",
"escaped",
":",
"option_val",
"+=",
"\"\\\\\"",
"escaped",
"=",
"False",
"option_val",
"+=",
"letter",
"else",
":",
"# not quoted",
"if",
"letter",
"==",
"' '",
":",
"# end of options",
"in_options",
"=",
"False",
"if",
"(",
"option_name",
"in",
"[",
"'ssh-rsa'",
",",
"'ssh-dss'",
"]",
"or",
"option_name",
".",
"startswith",
"(",
"'ecdsa-'",
")",
")",
":",
"# what we thought was an option name was really the",
"# key type, and there are no options",
"key_without_options",
"=",
"option_name",
"+",
"\" \"",
"option_name",
"=",
"''",
"else",
":",
"if",
"option_val",
"is",
"None",
":",
"options",
"[",
"option_name",
"]",
"=",
"True",
"else",
":",
"options",
"[",
"option_name",
"]",
"=",
"option_val",
"elif",
"letter",
"==",
"'\"'",
":",
"quoted",
"=",
"True",
"elif",
"letter",
"==",
"'='",
":",
"# '=' separated option name from value",
"in_option_name",
"=",
"False",
"if",
"option_val",
"is",
"None",
":",
"option_val",
"=",
"''",
"elif",
"letter",
"==",
"','",
":",
"# next option_name",
"if",
"option_val",
"is",
"None",
":",
"options",
"[",
"option_name",
"]",
"=",
"True",
"else",
":",
"options",
"[",
"option_name",
"]",
"=",
"option_val",
"in_option_name",
"=",
"True",
"option_name",
"=",
"''",
"option_val",
"=",
"None",
"else",
":",
"# general unquoted letter",
"if",
"in_option_name",
":",
"option_name",
"+=",
"letter",
"else",
":",
"option_val",
"+=",
"letter",
"else",
":",
"key_without_options",
"+=",
"letter",
"if",
"key_without_options",
"==",
"''",
":",
"# certain mal-formed keys (e.g. a line not containing any spaces)",
"# will be completely swallowed up by the above parser. It's",
"# better to follow the principle of least surprize and return the",
"# original line, allowing the error to be handled later.",
"return",
"OrderedDict",
"(",
"{",
"}",
")",
",",
"line",
".",
"strip",
"(",
")",
"else",
":",
"return",
"options",
",",
"key_without_options"
] | 44.4375 | 14.3125 |
def print_functions(self, d):
"""
Export all the functions to dot files
"""
for c in self.contracts:
for f in c.functions:
f.cfg_to_dot(os.path.join(d, '{}.{}.dot'.format(c.name, f.name))) | [
"def",
"print_functions",
"(",
"self",
",",
"d",
")",
":",
"for",
"c",
"in",
"self",
".",
"contracts",
":",
"for",
"f",
"in",
"c",
".",
"functions",
":",
"f",
".",
"cfg_to_dot",
"(",
"os",
".",
"path",
".",
"join",
"(",
"d",
",",
"'{}.{}.dot'",
".",
"format",
"(",
"c",
".",
"name",
",",
"f",
".",
"name",
")",
")",
")"
] | 35.142857 | 10.857143 |
def FileEntryExistsByPathSpec(self, path_spec):
"""Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification.
Returns:
bool: True if the file entry exists.
"""
volume_index = lvm.LVMPathSpecGetVolumeIndex(path_spec)
# The virtual root file has not corresponding volume index but
# should have a location.
if volume_index is None:
location = getattr(path_spec, 'location', None)
return location is not None and location == self.LOCATION_ROOT
return (
0 <= volume_index < self._vslvm_volume_group.number_of_logical_volumes) | [
"def",
"FileEntryExistsByPathSpec",
"(",
"self",
",",
"path_spec",
")",
":",
"volume_index",
"=",
"lvm",
".",
"LVMPathSpecGetVolumeIndex",
"(",
"path_spec",
")",
"# The virtual root file has not corresponding volume index but",
"# should have a location.",
"if",
"volume_index",
"is",
"None",
":",
"location",
"=",
"getattr",
"(",
"path_spec",
",",
"'location'",
",",
"None",
")",
"return",
"location",
"is",
"not",
"None",
"and",
"location",
"==",
"self",
".",
"LOCATION_ROOT",
"return",
"(",
"0",
"<=",
"volume_index",
"<",
"self",
".",
"_vslvm_volume_group",
".",
"number_of_logical_volumes",
")"
] | 32.842105 | 21.631579 |
def linkcode_resolve(domain, info): # NOQA: C901
"""
Determine the URL corresponding to Python object
Notes
-----
From https://github.com/numpy/numpy/blob/v1.15.1/doc/source/conf.py, 7c49cfa
on Jul 31. License BSD-3. https://github.com/numpy/numpy/blob/v1.15.1/LICENSE.txt
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return None
# strip decorators, which would resolve to the source of the decorator
# possibly an upstream bug in getsourcefile, bpo-1764286
try:
unwrap = inspect.unwrap
except AttributeError:
pass
else:
obj = unwrap(obj)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(cihai.__file__))
if 'dev' in about['__version__']:
return "%s/blob/master/%s/%s%s" % (
about['__github__'],
about['__package_name__'],
fn,
linespec,
)
else:
return "%s/blob/v%s/%s/%s%s" % (
about['__github__'],
about['__version__'],
about['__package_name__'],
fn,
linespec,
) | [
"def",
"linkcode_resolve",
"(",
"domain",
",",
"info",
")",
":",
"# NOQA: C901",
"if",
"domain",
"!=",
"'py'",
":",
"return",
"None",
"modname",
"=",
"info",
"[",
"'module'",
"]",
"fullname",
"=",
"info",
"[",
"'fullname'",
"]",
"submod",
"=",
"sys",
".",
"modules",
".",
"get",
"(",
"modname",
")",
"if",
"submod",
"is",
"None",
":",
"return",
"None",
"obj",
"=",
"submod",
"for",
"part",
"in",
"fullname",
".",
"split",
"(",
"'.'",
")",
":",
"try",
":",
"obj",
"=",
"getattr",
"(",
"obj",
",",
"part",
")",
"except",
"Exception",
":",
"return",
"None",
"# strip decorators, which would resolve to the source of the decorator",
"# possibly an upstream bug in getsourcefile, bpo-1764286",
"try",
":",
"unwrap",
"=",
"inspect",
".",
"unwrap",
"except",
"AttributeError",
":",
"pass",
"else",
":",
"obj",
"=",
"unwrap",
"(",
"obj",
")",
"try",
":",
"fn",
"=",
"inspect",
".",
"getsourcefile",
"(",
"obj",
")",
"except",
"Exception",
":",
"fn",
"=",
"None",
"if",
"not",
"fn",
":",
"return",
"None",
"try",
":",
"source",
",",
"lineno",
"=",
"inspect",
".",
"getsourcelines",
"(",
"obj",
")",
"except",
"Exception",
":",
"lineno",
"=",
"None",
"if",
"lineno",
":",
"linespec",
"=",
"\"#L%d-L%d\"",
"%",
"(",
"lineno",
",",
"lineno",
"+",
"len",
"(",
"source",
")",
"-",
"1",
")",
"else",
":",
"linespec",
"=",
"\"\"",
"fn",
"=",
"relpath",
"(",
"fn",
",",
"start",
"=",
"dirname",
"(",
"cihai",
".",
"__file__",
")",
")",
"if",
"'dev'",
"in",
"about",
"[",
"'__version__'",
"]",
":",
"return",
"\"%s/blob/master/%s/%s%s\"",
"%",
"(",
"about",
"[",
"'__github__'",
"]",
",",
"about",
"[",
"'__package_name__'",
"]",
",",
"fn",
",",
"linespec",
",",
")",
"else",
":",
"return",
"\"%s/blob/v%s/%s/%s%s\"",
"%",
"(",
"about",
"[",
"'__github__'",
"]",
",",
"about",
"[",
"'__version__'",
"]",
",",
"about",
"[",
"'__package_name__'",
"]",
",",
"fn",
",",
"linespec",
",",
")"
] | 23.797101 | 21.391304 |
def serialize(self):
'''
Return a JSON string of the serialized topology
'''
return json.dumps(json_graph.node_link_data(self.__nxgraph), cls=Encoder) | [
"def",
"serialize",
"(",
"self",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"json_graph",
".",
"node_link_data",
"(",
"self",
".",
"__nxgraph",
")",
",",
"cls",
"=",
"Encoder",
")"
] | 35.6 | 26.8 |
def find_datacenter_by_name(self, si, path, name):
"""
Finds datacenter in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the datacenter name to return
"""
return self.find_obj_by_path(si, path, name, self.Datacenter) | [
"def",
"find_datacenter_by_name",
"(",
"self",
",",
"si",
",",
"path",
",",
"name",
")",
":",
"return",
"self",
".",
"find_obj_by_path",
"(",
"si",
",",
"path",
",",
"name",
",",
"self",
".",
"Datacenter",
")"
] | 45.666667 | 21 |
def unique(series: pd.Series) -> pd.Series:
"""Test that the data items do not repeat."""
return ~series.duplicated(keep=False) | [
"def",
"unique",
"(",
"series",
":",
"pd",
".",
"Series",
")",
"->",
"pd",
".",
"Series",
":",
"return",
"~",
"series",
".",
"duplicated",
"(",
"keep",
"=",
"False",
")"
] | 44.333333 | 1.333333 |
def _get_dynamic_attr(self, attname, obj, default=None):
"""
Copied from django.contrib.syndication.views.Feed (v1.7.1)
"""
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check co_argcount rather than try/excepting the function and
# catching the TypeError, because something inside the function
# may raise the TypeError. This technique is more accurate.
try:
code = six.get_function_code(attr)
except AttributeError:
code = six.get_function_code(attr.__call__)
if code.co_argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr | [
"def",
"_get_dynamic_attr",
"(",
"self",
",",
"attname",
",",
"obj",
",",
"default",
"=",
"None",
")",
":",
"try",
":",
"attr",
"=",
"getattr",
"(",
"self",
",",
"attname",
")",
"except",
"AttributeError",
":",
"return",
"default",
"if",
"callable",
"(",
"attr",
")",
":",
"# Check co_argcount rather than try/excepting the function and",
"# catching the TypeError, because something inside the function",
"# may raise the TypeError. This technique is more accurate.",
"try",
":",
"code",
"=",
"six",
".",
"get_function_code",
"(",
"attr",
")",
"except",
"AttributeError",
":",
"code",
"=",
"six",
".",
"get_function_code",
"(",
"attr",
".",
"__call__",
")",
"if",
"code",
".",
"co_argcount",
"==",
"2",
":",
"# one argument is 'self'",
"return",
"attr",
"(",
"obj",
")",
"else",
":",
"return",
"attr",
"(",
")",
"return",
"attr"
] | 39.190476 | 17.095238 |
def rpush(self, key, *args):
"""Emulate rpush."""
redis_list = self._get_list(key, 'RPUSH', create=True)
# Creates the list at this key if it doesn't exist, and appends args to it
redis_list.extend(map(self._encode, args))
# Return the length of the list after the push operation
return len(redis_list) | [
"def",
"rpush",
"(",
"self",
",",
"key",
",",
"*",
"args",
")",
":",
"redis_list",
"=",
"self",
".",
"_get_list",
"(",
"key",
",",
"'RPUSH'",
",",
"create",
"=",
"True",
")",
"# Creates the list at this key if it doesn't exist, and appends args to it",
"redis_list",
".",
"extend",
"(",
"map",
"(",
"self",
".",
"_encode",
",",
"args",
")",
")",
"# Return the length of the list after the push operation",
"return",
"len",
"(",
"redis_list",
")"
] | 38.222222 | 22.222222 |
def _split_into_symbol_words(sym):
"""Split a technical looking word into a set of symbols.
This handles cases where technical words are separated by dots or
arrows, as is the convention in many programming languages.
"""
punc = r"[\s\-\*/\+\.,:\;=\)\(\[\]\{\}<>\|\?&\^\$@]"
words = [w.strip() for w in re.split(punc, sym)]
return words | [
"def",
"_split_into_symbol_words",
"(",
"sym",
")",
":",
"punc",
"=",
"r\"[\\s\\-\\*/\\+\\.,:\\;=\\)\\(\\[\\]\\{\\}<>\\|\\?&\\^\\$@]\"",
"words",
"=",
"[",
"w",
".",
"strip",
"(",
")",
"for",
"w",
"in",
"re",
".",
"split",
"(",
"punc",
",",
"sym",
")",
"]",
"return",
"words"
] | 39.666667 | 16.666667 |
def check_dependencies():
"""Make sure virtualenv is in the path."""
print 'Checking dependencies...'
if not HAS_VIRTUALENV:
print 'Virtual environment not found.'
# Try installing it via easy_install...
if HAS_EASY_INSTALL:
print 'Installing virtualenv via easy_install...',
run_command(['easy_install', 'virtualenv'],
die_message='easy_install failed to install virtualenv'
'\ndevelopment requires virtualenv, please'
' install it using your favorite tool')
if not run_command(['which', 'virtualenv']):
die('ERROR: virtualenv not found in path.\n\ndevelopment '
' requires virtualenv, please install it using your'
' favorite package management tool and ensure'
' virtualenv is in your path')
print 'virtualenv installation done.'
else:
die('easy_install not found.\n\nInstall easy_install'
' (python-setuptools in ubuntu) or virtualenv by hand,'
' then rerun.')
print 'dependency check done.' | [
"def",
"check_dependencies",
"(",
")",
":",
"print",
"'Checking dependencies...'",
"if",
"not",
"HAS_VIRTUALENV",
":",
"print",
"'Virtual environment not found.'",
"# Try installing it via easy_install...",
"if",
"HAS_EASY_INSTALL",
":",
"print",
"'Installing virtualenv via easy_install...'",
",",
"run_command",
"(",
"[",
"'easy_install'",
",",
"'virtualenv'",
"]",
",",
"die_message",
"=",
"'easy_install failed to install virtualenv'",
"'\\ndevelopment requires virtualenv, please'",
"' install it using your favorite tool'",
")",
"if",
"not",
"run_command",
"(",
"[",
"'which'",
",",
"'virtualenv'",
"]",
")",
":",
"die",
"(",
"'ERROR: virtualenv not found in path.\\n\\ndevelopment '",
"' requires virtualenv, please install it using your'",
"' favorite package management tool and ensure'",
"' virtualenv is in your path'",
")",
"print",
"'virtualenv installation done.'",
"else",
":",
"die",
"(",
"'easy_install not found.\\n\\nInstall easy_install'",
"' (python-setuptools in ubuntu) or virtualenv by hand,'",
"' then rerun.'",
")",
"print",
"'dependency check done.'"
] | 49.375 | 19.708333 |
def image_transformer2d_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 512
hparams.batch_size = 1
hparams.max_length = 256
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 4000
hparams.initializer_gain = 0.2
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.label_smoothing = 0.0
hparams.bottom["targets"] = modalities.make_targets_bottom(
modalities.image_channel_embeddings_bottom)
hparams.top["targets"] = modalities.identity_top
hparams.norm_type = "layer"
hparams.layer_prepostprocess_dropout = 0.0
hparams.add_hparam("filter_size", 512) # Add new ones like this.
# attention-related flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("ffn_layer", "conv_hidden_relu")
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("nbr_decoder_problems", 1)
hparams.add_hparam("num_output_layers", 3)
hparams.add_hparam("block_size", 1)
# image size related flags
# assuming that the image has same height and width
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
# Local attention params
hparams.add_hparam("local_and_global_att", False)
hparams.add_hparam("block_length", 256)
hparams.add_hparam("block_width", 128)
# Local 2D attention params
hparams.add_hparam("query_shape", (16, 16))
hparams.add_hparam("memory_flange", (16, 32))
hparams.add_hparam("num_encoder_layers", 4)
hparams.add_hparam("num_decoder_layers", 8)
# attention type related params
hparams.add_hparam("enc_attention_type", cia.AttentionType.GLOBAL)
hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_2D)
hparams.add_hparam("block_raster_scan", False)
# multipos attention params
hparams.add_hparam("q_filter_width", 1)
hparams.add_hparam("kv_filter_width", 1)
hparams.add_hparam("unconditional", False) # unconditional generation
# relative embedding hparams
hparams.add_hparam("shared_rel", False)
return hparams | [
"def",
"image_transformer2d_base",
"(",
")",
":",
"hparams",
"=",
"common_hparams",
".",
"basic_params1",
"(",
")",
"hparams",
".",
"hidden_size",
"=",
"512",
"hparams",
".",
"batch_size",
"=",
"1",
"hparams",
".",
"max_length",
"=",
"256",
"hparams",
".",
"dropout",
"=",
"0.0",
"hparams",
".",
"clip_grad_norm",
"=",
"0.",
"# i.e. no gradient clipping",
"hparams",
".",
"optimizer_adam_epsilon",
"=",
"1e-9",
"hparams",
".",
"learning_rate_decay_scheme",
"=",
"\"noam\"",
"hparams",
".",
"learning_rate",
"=",
"0.1",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"4000",
"hparams",
".",
"initializer_gain",
"=",
"0.2",
"hparams",
".",
"initializer",
"=",
"\"uniform_unit_scaling\"",
"hparams",
".",
"weight_decay",
"=",
"0.0",
"hparams",
".",
"optimizer_adam_beta1",
"=",
"0.9",
"hparams",
".",
"optimizer_adam_beta2",
"=",
"0.98",
"hparams",
".",
"label_smoothing",
"=",
"0.0",
"hparams",
".",
"bottom",
"[",
"\"targets\"",
"]",
"=",
"modalities",
".",
"make_targets_bottom",
"(",
"modalities",
".",
"image_channel_embeddings_bottom",
")",
"hparams",
".",
"top",
"[",
"\"targets\"",
"]",
"=",
"modalities",
".",
"identity_top",
"hparams",
".",
"norm_type",
"=",
"\"layer\"",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.0",
"hparams",
".",
"add_hparam",
"(",
"\"filter_size\"",
",",
"512",
")",
"# Add new ones like this.",
"# attention-related flags",
"hparams",
".",
"add_hparam",
"(",
"\"num_heads\"",
",",
"8",
")",
"hparams",
".",
"add_hparam",
"(",
"\"attention_key_channels\"",
",",
"0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"attention_value_channels\"",
",",
"0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"ffn_layer\"",
",",
"\"conv_hidden_relu\"",
")",
"# All hyperparameters ending in \"dropout\" are automatically set to 0.0",
"# when not in training mode.",
"hparams",
".",
"add_hparam",
"(",
"\"attention_dropout\"",
",",
"0.0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"relu_dropout\"",
",",
"0.0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"pos\"",
",",
"\"timing\"",
")",
"# timing, none",
"hparams",
".",
"add_hparam",
"(",
"\"nbr_decoder_problems\"",
",",
"1",
")",
"hparams",
".",
"add_hparam",
"(",
"\"num_output_layers\"",
",",
"3",
")",
"hparams",
".",
"add_hparam",
"(",
"\"block_size\"",
",",
"1",
")",
"# image size related flags",
"# assuming that the image has same height and width",
"hparams",
".",
"add_hparam",
"(",
"\"img_len\"",
",",
"32",
")",
"hparams",
".",
"add_hparam",
"(",
"\"num_channels\"",
",",
"3",
")",
"# Local attention params",
"hparams",
".",
"add_hparam",
"(",
"\"local_and_global_att\"",
",",
"False",
")",
"hparams",
".",
"add_hparam",
"(",
"\"block_length\"",
",",
"256",
")",
"hparams",
".",
"add_hparam",
"(",
"\"block_width\"",
",",
"128",
")",
"# Local 2D attention params",
"hparams",
".",
"add_hparam",
"(",
"\"query_shape\"",
",",
"(",
"16",
",",
"16",
")",
")",
"hparams",
".",
"add_hparam",
"(",
"\"memory_flange\"",
",",
"(",
"16",
",",
"32",
")",
")",
"hparams",
".",
"add_hparam",
"(",
"\"num_encoder_layers\"",
",",
"4",
")",
"hparams",
".",
"add_hparam",
"(",
"\"num_decoder_layers\"",
",",
"8",
")",
"# attention type related params",
"hparams",
".",
"add_hparam",
"(",
"\"enc_attention_type\"",
",",
"cia",
".",
"AttentionType",
".",
"GLOBAL",
")",
"hparams",
".",
"add_hparam",
"(",
"\"dec_attention_type\"",
",",
"cia",
".",
"AttentionType",
".",
"LOCAL_2D",
")",
"hparams",
".",
"add_hparam",
"(",
"\"block_raster_scan\"",
",",
"False",
")",
"# multipos attention params",
"hparams",
".",
"add_hparam",
"(",
"\"q_filter_width\"",
",",
"1",
")",
"hparams",
".",
"add_hparam",
"(",
"\"kv_filter_width\"",
",",
"1",
")",
"hparams",
".",
"add_hparam",
"(",
"\"unconditional\"",
",",
"False",
")",
"# unconditional generation",
"# relative embedding hparams",
"hparams",
".",
"add_hparam",
"(",
"\"shared_rel\"",
",",
"False",
")",
"return",
"hparams"
] | 38.30303 | 12.166667 |
def debug(method):
"""Decorator to debug the given method"""
def new_method(*args, **kwargs):
import pdb
try:
import pudb
except ImportError:
pudb = pdb
try:
pudb.runcall(method, *args, **kwargs)
except pdb.bdb.BdbQuit:
sys.exit('Normal quit from debugger')
new_method.__doc__ = method.__doc__
new_method.__name__ = 'debug(%s)' % method.__name__
return new_method | [
"def",
"debug",
"(",
"method",
")",
":",
"def",
"new_method",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"pdb",
"try",
":",
"import",
"pudb",
"except",
"ImportError",
":",
"pudb",
"=",
"pdb",
"try",
":",
"pudb",
".",
"runcall",
"(",
"method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"pdb",
".",
"bdb",
".",
"BdbQuit",
":",
"sys",
".",
"exit",
"(",
"'Normal quit from debugger'",
")",
"new_method",
".",
"__doc__",
"=",
"method",
".",
"__doc__",
"new_method",
".",
"__name__",
"=",
"'debug(%s)'",
"%",
"method",
".",
"__name__",
"return",
"new_method"
] | 30.466667 | 14.266667 |
def set_inteface_up(ifindex, auth, url, devid=None, devip=None):
"""
function takest devid and ifindex of specific device and interface and issues a RESTFUL call
to "undo shut" the specified interface on the target device.
:param devid: int or str value of the target device
:param devip: ipv4 address of the target devices
:param ifindex: int or str value of the target interface
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: HTTP status code 204 with no values.
:rype: int
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> int_down_response = set_interface_down( '9', auth.creds, auth.url, devid = '10')
204
>>> int_up_response = set_inteface_up( '9', auth.creds, auth.url, devid = '10')
>>> int_down_response = set_interface_down( '9', auth.creds, auth.url, devid = '10')
204
>>> int_up_response = set_inteface_up('9', auth.creds, auth.url, devip = '10.101.0.221')
>>> assert type(int_up_response) is int
>>> assert int_up_response is 204
"""
if devip is not None:
devid = get_dev_details(devip, auth, url)['id']
set_int_up_url = "/imcrs/plat/res/device/" + str(devid) + "/interface/" + str(ifindex) + "/up"
f_url = url + set_int_up_url
try:
response = requests.put(f_url, auth=auth, headers=HEADERS)
if response.status_code == 204:
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " set_inteface_up: An Error has occured" | [
"def",
"set_inteface_up",
"(",
"ifindex",
",",
"auth",
",",
"url",
",",
"devid",
"=",
"None",
",",
"devip",
"=",
"None",
")",
":",
"if",
"devip",
"is",
"not",
"None",
":",
"devid",
"=",
"get_dev_details",
"(",
"devip",
",",
"auth",
",",
"url",
")",
"[",
"'id'",
"]",
"set_int_up_url",
"=",
"\"/imcrs/plat/res/device/\"",
"+",
"str",
"(",
"devid",
")",
"+",
"\"/interface/\"",
"+",
"str",
"(",
"ifindex",
")",
"+",
"\"/up\"",
"f_url",
"=",
"url",
"+",
"set_int_up_url",
"try",
":",
"response",
"=",
"requests",
".",
"put",
"(",
"f_url",
",",
"auth",
"=",
"auth",
",",
"headers",
"=",
"HEADERS",
")",
"if",
"response",
".",
"status_code",
"==",
"204",
":",
"return",
"response",
".",
"status_code",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"error",
":",
"return",
"\"Error:\\n\"",
"+",
"str",
"(",
"error",
")",
"+",
"\" set_inteface_up: An Error has occured\""
] | 34.86 | 29.66 |
def derive_toctree_rst(self, current_file):
"""
Generate the rst content::
.. toctree::
args ...
example.rst
...
:param current_file:
:return:
"""
TAB = " " * 4
lines = list()
lines.append(".. toctree::")
for opt in TocTree.option_spec:
value = self.options.get(opt)
if value is not None:
lines.append(("{}:{}: {}".format(TAB, opt, value)).rstrip())
lines.append("")
append_ahead = "append_ahead" in self.options
if append_ahead:
for line in list(self.content):
lines.append(TAB + line)
article_folder = ArticleFolder(dir_path=Path(current_file).parent.abspath)
for af in article_folder.sub_article_folders:
line = "{}{} <{}>".format(TAB, af.title, af.rel_path)
lines.append(line)
append_behind = not append_ahead
if append_behind:
for line in list(self.content):
lines.append(TAB + line)
lines.append("")
return "\n".join(lines) | [
"def",
"derive_toctree_rst",
"(",
"self",
",",
"current_file",
")",
":",
"TAB",
"=",
"\" \"",
"*",
"4",
"lines",
"=",
"list",
"(",
")",
"lines",
".",
"append",
"(",
"\".. toctree::\"",
")",
"for",
"opt",
"in",
"TocTree",
".",
"option_spec",
":",
"value",
"=",
"self",
".",
"options",
".",
"get",
"(",
"opt",
")",
"if",
"value",
"is",
"not",
"None",
":",
"lines",
".",
"append",
"(",
"(",
"\"{}:{}: {}\"",
".",
"format",
"(",
"TAB",
",",
"opt",
",",
"value",
")",
")",
".",
"rstrip",
"(",
")",
")",
"lines",
".",
"append",
"(",
"\"\"",
")",
"append_ahead",
"=",
"\"append_ahead\"",
"in",
"self",
".",
"options",
"if",
"append_ahead",
":",
"for",
"line",
"in",
"list",
"(",
"self",
".",
"content",
")",
":",
"lines",
".",
"append",
"(",
"TAB",
"+",
"line",
")",
"article_folder",
"=",
"ArticleFolder",
"(",
"dir_path",
"=",
"Path",
"(",
"current_file",
")",
".",
"parent",
".",
"abspath",
")",
"for",
"af",
"in",
"article_folder",
".",
"sub_article_folders",
":",
"line",
"=",
"\"{}{} <{}>\"",
".",
"format",
"(",
"TAB",
",",
"af",
".",
"title",
",",
"af",
".",
"rel_path",
")",
"lines",
".",
"append",
"(",
"line",
")",
"append_behind",
"=",
"not",
"append_ahead",
"if",
"append_behind",
":",
"for",
"line",
"in",
"list",
"(",
"self",
".",
"content",
")",
":",
"lines",
".",
"append",
"(",
"TAB",
"+",
"line",
")",
"lines",
".",
"append",
"(",
"\"\"",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"lines",
")"
] | 28.769231 | 16.871795 |
def query(starttime, endtime, output=None, *filenames):
'''Given a time range and input file, query creates a new file with only
that subset of data. If no outfile name is given, the new file name is the
old file name with the time range appended.
Args:
starttime:
The datetime of the beginning time range to be extracted from the files.
endtime:
The datetime of the end of the time range to be extracted from the files.
output:
Optional: The output file name. Defaults to
[first filename in filenames][starttime]-[endtime].pcap
filenames:
A tuple of one or more file names to extract data from.
'''
if not output:
output = (filenames[0].replace('.pcap','') + starttime.isoformat() + '-' + endtime.isoformat() + '.pcap')
else:
output = output
with open(output,'w') as outfile:
for filename in filenames:
log.info("pcap.query: processing %s..." % filename)
with open(filename, 'r') as stream:
for header, packet in stream:
if packet is not None:
if header.timestamp >= starttime and header.timestamp <= endtime:
outfile.write(packet, header=header) | [
"def",
"query",
"(",
"starttime",
",",
"endtime",
",",
"output",
"=",
"None",
",",
"*",
"filenames",
")",
":",
"if",
"not",
"output",
":",
"output",
"=",
"(",
"filenames",
"[",
"0",
"]",
".",
"replace",
"(",
"'.pcap'",
",",
"''",
")",
"+",
"starttime",
".",
"isoformat",
"(",
")",
"+",
"'-'",
"+",
"endtime",
".",
"isoformat",
"(",
")",
"+",
"'.pcap'",
")",
"else",
":",
"output",
"=",
"output",
"with",
"open",
"(",
"output",
",",
"'w'",
")",
"as",
"outfile",
":",
"for",
"filename",
"in",
"filenames",
":",
"log",
".",
"info",
"(",
"\"pcap.query: processing %s...\"",
"%",
"filename",
")",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"stream",
":",
"for",
"header",
",",
"packet",
"in",
"stream",
":",
"if",
"packet",
"is",
"not",
"None",
":",
"if",
"header",
".",
"timestamp",
">=",
"starttime",
"and",
"header",
".",
"timestamp",
"<=",
"endtime",
":",
"outfile",
".",
"write",
"(",
"packet",
",",
"header",
"=",
"header",
")"
] | 42.7 | 26.433333 |
def format_original_error(self):
"""Return the typical "TypeError: blah blah" for the original wrapped
error.
"""
# TODO eventually we'll have sass-specific errors that will want nicer
# "names" in browser display and stderr
return "".join((
type(self.exc).__name__, ": ", six.text_type(self.exc), "\n",
)) | [
"def",
"format_original_error",
"(",
"self",
")",
":",
"# TODO eventually we'll have sass-specific errors that will want nicer",
"# \"names\" in browser display and stderr",
"return",
"\"\"",
".",
"join",
"(",
"(",
"type",
"(",
"self",
".",
"exc",
")",
".",
"__name__",
",",
"\": \"",
",",
"six",
".",
"text_type",
"(",
"self",
".",
"exc",
")",
",",
"\"\\n\"",
",",
")",
")"
] | 40.666667 | 17.555556 |
def calc_requiredremoterelease_v1(self):
"""Guess the required release necessary to not fall below the threshold
value at a cross section far downstream with a certain level of certainty.
Required control parameter:
|RemoteDischargeSafety|
Required derived parameters:
|RemoteDischargeSmoothPar|
|dam_derived.TOY|
Required flux sequence:
|RemoteDemand|
|RemoteFailure|
Calculated flux sequence:
|RequiredRemoteRelease|
Basic equation:
:math:`RequiredRemoteRelease = RemoteDemand + RemoteDischargeSafety
\\cdot smooth_{logistic1}(RemoteFailure, RemoteDischargeSmoothPar)`
Used auxiliary method:
|smooth_logistic1|
Examples:
As in the examples above, define a short simulation time period first:
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
Prepare the dam model:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> derived.toy.update()
Define a safety factor of 0.5 m³/s for the summer months and
no safety factor at all for the winter months:
>>> remotedischargesafety(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=1.0, _10_31_12=1.0)
>>> derived.remotedischargesmoothpar.update()
Assume the actual demand at the cross section downsstream has actually
been estimated to be 2 m³/s:
>>> fluxes.remotedemand = 2.0
Prepare a test function, that calculates the required discharge
based on the parameter values defined above and for a "remote
failure" values ranging between -4 and 4 m³/s:
>>> from hydpy import UnitTest
>>> test = UnitTest(model, model.calc_requiredremoterelease_v1,
... last_example=9,
... parseqs=(fluxes.remotefailure,
... fluxes.requiredremoterelease))
>>> test.nexts.remotefailure = range(-4, 5)
On May 31, the safety factor is 0 m³/s. Hence no discharge is
added to the estimated remote demand of 2 m³/s:
>>> model.idx_sim = pub.timegrids.init['2001.03.31']
>>> test()
| ex. | remotefailure | requiredremoterelease |
-----------------------------------------------
| 1 | -4.0 | 2.0 |
| 2 | -3.0 | 2.0 |
| 3 | -2.0 | 2.0 |
| 4 | -1.0 | 2.0 |
| 5 | 0.0 | 2.0 |
| 6 | 1.0 | 2.0 |
| 7 | 2.0 | 2.0 |
| 8 | 3.0 | 2.0 |
| 9 | 4.0 | 2.0 |
On April 1, the safety factor is 1 m³/s. If the remote failure was
exactly zero in the past, meaning the control of the dam was perfect,
only 0.5 m³/s are added to the estimated remote demand of 2 m³/s.
If the actual recharge did actually fall below the threshold value,
up to 1 m³/s is added. If the the actual discharge exceeded the
threshold value by 2 or 3 m³/s, virtually nothing is added:
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> test()
| ex. | remotefailure | requiredremoterelease |
-----------------------------------------------
| 1 | -4.0 | 2.0 |
| 2 | -3.0 | 2.000001 |
| 3 | -2.0 | 2.000102 |
| 4 | -1.0 | 2.01 |
| 5 | 0.0 | 2.5 |
| 6 | 1.0 | 2.99 |
| 7 | 2.0 | 2.999898 |
| 8 | 3.0 | 2.999999 |
| 9 | 4.0 | 3.0 |
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
flu.requiredremoterelease = (
flu.remotedemand+con.remotedischargesafety[der.toy[self.idx_sim]] *
smoothutils.smooth_logistic1(
flu.remotefailure,
der.remotedischargesmoothpar[der.toy[self.idx_sim]])) | [
"def",
"calc_requiredremoterelease_v1",
"(",
"self",
")",
":",
"con",
"=",
"self",
".",
"parameters",
".",
"control",
".",
"fastaccess",
"der",
"=",
"self",
".",
"parameters",
".",
"derived",
".",
"fastaccess",
"flu",
"=",
"self",
".",
"sequences",
".",
"fluxes",
".",
"fastaccess",
"flu",
".",
"requiredremoterelease",
"=",
"(",
"flu",
".",
"remotedemand",
"+",
"con",
".",
"remotedischargesafety",
"[",
"der",
".",
"toy",
"[",
"self",
".",
"idx_sim",
"]",
"]",
"*",
"smoothutils",
".",
"smooth_logistic1",
"(",
"flu",
".",
"remotefailure",
",",
"der",
".",
"remotedischargesmoothpar",
"[",
"der",
".",
"toy",
"[",
"self",
".",
"idx_sim",
"]",
"]",
")",
")"
] | 39.574074 | 21.703704 |
def _normalize_key(self, key, unknown_ok=True):
"""Return the normalized version of KEY.
KEY may be a frameid (a string), or a Frame class object.
If KEY corresponds to a registered frameid, then that frameid is returned.
Otherwise, either KeyError is raised, or KEY is returned verbatim,
depending on the value of UNKNOWN_OK.
"""
if Frames.is_frame_class(key):
key = key.frameid
if isinstance(key, str):
if not self._is_frame_id(key):
raise KeyError("{0}: Invalid frame id".format(key))
if key not in self.known_frames:
if unknown_ok:
warn("{0}: Unknown frame id".format(key), UnknownFrameWarning)
else:
raise KeyError("{0}: Unknown frame id".format(key))
return key | [
"def",
"_normalize_key",
"(",
"self",
",",
"key",
",",
"unknown_ok",
"=",
"True",
")",
":",
"if",
"Frames",
".",
"is_frame_class",
"(",
"key",
")",
":",
"key",
"=",
"key",
".",
"frameid",
"if",
"isinstance",
"(",
"key",
",",
"str",
")",
":",
"if",
"not",
"self",
".",
"_is_frame_id",
"(",
"key",
")",
":",
"raise",
"KeyError",
"(",
"\"{0}: Invalid frame id\"",
".",
"format",
"(",
"key",
")",
")",
"if",
"key",
"not",
"in",
"self",
".",
"known_frames",
":",
"if",
"unknown_ok",
":",
"warn",
"(",
"\"{0}: Unknown frame id\"",
".",
"format",
"(",
"key",
")",
",",
"UnknownFrameWarning",
")",
"else",
":",
"raise",
"KeyError",
"(",
"\"{0}: Unknown frame id\"",
".",
"format",
"(",
"key",
")",
")",
"return",
"key"
] | 47.055556 | 16.222222 |
def best_hits(self):
"""
returns a dict with query => best mapped position
"""
self.quality_sort()
best_hits = dict((query, next(blines)) for (query, blines) in \
groupby(self, lambda x: x.query))
self.ref_sort()
return best_hits | [
"def",
"best_hits",
"(",
"self",
")",
":",
"self",
".",
"quality_sort",
"(",
")",
"best_hits",
"=",
"dict",
"(",
"(",
"query",
",",
"next",
"(",
"blines",
")",
")",
"for",
"(",
"query",
",",
"blines",
")",
"in",
"groupby",
"(",
"self",
",",
"lambda",
"x",
":",
"x",
".",
"query",
")",
")",
"self",
".",
"ref_sort",
"(",
")",
"return",
"best_hits"
] | 25.083333 | 20.916667 |
def random_state(self):
"""
Generates a random state of the Markov Chain.
Return Type:
------------
List of namedtuples, representing a random assignment to all variables of the model.
Examples:
---------
>>> from pgmpy.models import MarkovChain as MC
>>> model = MC(['intel', 'diff'], [2, 3])
>>> model.random_state()
[State('diff', 2), State('intel', 1)]
"""
return [State(var, np.random.randint(self.cardinalities[var])) for var in self.variables] | [
"def",
"random_state",
"(",
"self",
")",
":",
"return",
"[",
"State",
"(",
"var",
",",
"np",
".",
"random",
".",
"randint",
"(",
"self",
".",
"cardinalities",
"[",
"var",
"]",
")",
")",
"for",
"var",
"in",
"self",
".",
"variables",
"]"
] | 33.8125 | 21.3125 |
def convert_invalid_url(url):
"""Convert invalid url with adding extra 'http://' schema into it
:param url:
:return:
"""
regex_valid_url = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return url if regex_valid_url.match(url) else 'http://{}'.format(url) | [
"def",
"convert_invalid_url",
"(",
"url",
")",
":",
"regex_valid_url",
"=",
"re",
".",
"compile",
"(",
"r'^(?:http|ftp)s?://'",
"# http:// or https://",
"r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'",
"#domain...",
"r'localhost|'",
"#localhost...",
"r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})'",
"# ...or ip",
"r'(?::\\d+)?'",
"# optional port",
"r'(?:/?|[/?]\\S+)$'",
",",
"re",
".",
"IGNORECASE",
")",
"return",
"url",
"if",
"regex_valid_url",
".",
"match",
"(",
"url",
")",
"else",
"'http://{}'",
".",
"format",
"(",
"url",
")"
] | 37.466667 | 19.066667 |
def _to_dict(self, include=None, exclude=None):
"""Return a dict containing the entity's property values.
Args:
include: Optional set of property names to include, default all.
exclude: Optional set of property names to skip, default none.
A name contained in both include and exclude is excluded.
"""
if (include is not None and
not isinstance(include, (list, tuple, set, frozenset))):
raise TypeError('include should be a list, tuple or set')
if (exclude is not None and
not isinstance(exclude, (list, tuple, set, frozenset))):
raise TypeError('exclude should be a list, tuple or set')
values = {}
for prop in self._properties.itervalues():
name = prop._code_name
if include is not None and name not in include:
continue
if exclude is not None and name in exclude:
continue
try:
values[name] = prop._get_for_dict(self)
except UnprojectedPropertyError:
pass # Ignore unprojected properties rather than failing.
return values | [
"def",
"_to_dict",
"(",
"self",
",",
"include",
"=",
"None",
",",
"exclude",
"=",
"None",
")",
":",
"if",
"(",
"include",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"include",
",",
"(",
"list",
",",
"tuple",
",",
"set",
",",
"frozenset",
")",
")",
")",
":",
"raise",
"TypeError",
"(",
"'include should be a list, tuple or set'",
")",
"if",
"(",
"exclude",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"exclude",
",",
"(",
"list",
",",
"tuple",
",",
"set",
",",
"frozenset",
")",
")",
")",
":",
"raise",
"TypeError",
"(",
"'exclude should be a list, tuple or set'",
")",
"values",
"=",
"{",
"}",
"for",
"prop",
"in",
"self",
".",
"_properties",
".",
"itervalues",
"(",
")",
":",
"name",
"=",
"prop",
".",
"_code_name",
"if",
"include",
"is",
"not",
"None",
"and",
"name",
"not",
"in",
"include",
":",
"continue",
"if",
"exclude",
"is",
"not",
"None",
"and",
"name",
"in",
"exclude",
":",
"continue",
"try",
":",
"values",
"[",
"name",
"]",
"=",
"prop",
".",
"_get_for_dict",
"(",
"self",
")",
"except",
"UnprojectedPropertyError",
":",
"pass",
"# Ignore unprojected properties rather than failing.",
"return",
"values"
] | 40.153846 | 18.230769 |
def visit_importfrom(self, node):
'''triggered when a from statement is seen'''
if self.process_module:
if not node.modname.startswith('salt'):
return
# Store salt imported modules
for module, import_as in node.names:
if import_as and import_as not in self.imported_salt_modules:
self.imported_salt_modules[import_as] = import_as
continue
if module not in self.imported_salt_modules:
self.imported_salt_modules[module] = module | [
"def",
"visit_importfrom",
"(",
"self",
",",
"node",
")",
":",
"if",
"self",
".",
"process_module",
":",
"if",
"not",
"node",
".",
"modname",
".",
"startswith",
"(",
"'salt'",
")",
":",
"return",
"# Store salt imported modules",
"for",
"module",
",",
"import_as",
"in",
"node",
".",
"names",
":",
"if",
"import_as",
"and",
"import_as",
"not",
"in",
"self",
".",
"imported_salt_modules",
":",
"self",
".",
"imported_salt_modules",
"[",
"import_as",
"]",
"=",
"import_as",
"continue",
"if",
"module",
"not",
"in",
"self",
".",
"imported_salt_modules",
":",
"self",
".",
"imported_salt_modules",
"[",
"module",
"]",
"=",
"module"
] | 48 | 15.666667 |
def ref(self, ref):
"""Get a reference pointed to by ``ref``.
The most common will be branches and tags. For a branch, you must
specify 'heads/branchname' and for a tag, 'tags/tagname'. Essentially,
the system should return any reference you provide it in the namespace,
including notes and stashes (provided they exist on the server).
:param str ref: (required)
:returns: :class:`Reference <github3.git.Reference>`
"""
json = None
if ref:
url = self._build_url('git', 'refs', ref, base_url=self._api)
json = self._json(self._get(url), 200)
return Reference(json, self) if json else None | [
"def",
"ref",
"(",
"self",
",",
"ref",
")",
":",
"json",
"=",
"None",
"if",
"ref",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"'git'",
",",
"'refs'",
",",
"ref",
",",
"base_url",
"=",
"self",
".",
"_api",
")",
"json",
"=",
"self",
".",
"_json",
"(",
"self",
".",
"_get",
"(",
"url",
")",
",",
"200",
")",
"return",
"Reference",
"(",
"json",
",",
"self",
")",
"if",
"json",
"else",
"None"
] | 42.875 | 23.25 |
def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
**kwargs):
"""Pause a system service.
Stop it, and prevent it from starting again at boot.
:param service_name: the name of the service to pause
:param init_dir: path to the upstart init directory
:param initd_dir: path to the sysv init directory
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for init systems which do not support
key=value arguments via the commandline.
"""
stopped = True
if service_running(service_name, **kwargs):
stopped = service_stop(service_name, **kwargs)
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if init_is_systemd():
service('disable', service_name)
service('mask', service_name)
elif os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
with open(override_path, 'w') as fh:
fh.write("manual\n")
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "disable"])
else:
raise ValueError(
"Unable to detect {0} as SystemD, Upstart {1} or"
" SysV {2}".format(
service_name, upstart_file, sysv_file))
return stopped | [
"def",
"service_pause",
"(",
"service_name",
",",
"init_dir",
"=",
"\"/etc/init\"",
",",
"initd_dir",
"=",
"\"/etc/init.d\"",
",",
"*",
"*",
"kwargs",
")",
":",
"stopped",
"=",
"True",
"if",
"service_running",
"(",
"service_name",
",",
"*",
"*",
"kwargs",
")",
":",
"stopped",
"=",
"service_stop",
"(",
"service_name",
",",
"*",
"*",
"kwargs",
")",
"upstart_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"init_dir",
",",
"\"{}.conf\"",
".",
"format",
"(",
"service_name",
")",
")",
"sysv_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"initd_dir",
",",
"service_name",
")",
"if",
"init_is_systemd",
"(",
")",
":",
"service",
"(",
"'disable'",
",",
"service_name",
")",
"service",
"(",
"'mask'",
",",
"service_name",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"upstart_file",
")",
":",
"override_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"init_dir",
",",
"'{}.override'",
".",
"format",
"(",
"service_name",
")",
")",
"with",
"open",
"(",
"override_path",
",",
"'w'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"\"manual\\n\"",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"sysv_file",
")",
":",
"subprocess",
".",
"check_call",
"(",
"[",
"\"update-rc.d\"",
",",
"service_name",
",",
"\"disable\"",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unable to detect {0} as SystemD, Upstart {1} or\"",
"\" SysV {2}\"",
".",
"format",
"(",
"service_name",
",",
"upstart_file",
",",
"sysv_file",
")",
")",
"return",
"stopped"
] | 43.722222 | 17.527778 |
def _validate_jp2c(self, boxes):
"""Validate the codestream box in relation to other boxes."""
# jp2c must be preceeded by jp2h
jp2h_lst = [idx for (idx, box) in enumerate(boxes)
if box.box_id == 'jp2h']
jp2h_idx = jp2h_lst[0]
jp2c_lst = [idx for (idx, box) in enumerate(boxes)
if box.box_id == 'jp2c']
if len(jp2c_lst) == 0:
msg = ("A codestream box must be defined in the outermost "
"list of boxes.")
raise IOError(msg)
jp2c_idx = jp2c_lst[0]
if jp2h_idx >= jp2c_idx:
msg = "The codestream box must be preceeded by a jp2 header box."
raise IOError(msg) | [
"def",
"_validate_jp2c",
"(",
"self",
",",
"boxes",
")",
":",
"# jp2c must be preceeded by jp2h",
"jp2h_lst",
"=",
"[",
"idx",
"for",
"(",
"idx",
",",
"box",
")",
"in",
"enumerate",
"(",
"boxes",
")",
"if",
"box",
".",
"box_id",
"==",
"'jp2h'",
"]",
"jp2h_idx",
"=",
"jp2h_lst",
"[",
"0",
"]",
"jp2c_lst",
"=",
"[",
"idx",
"for",
"(",
"idx",
",",
"box",
")",
"in",
"enumerate",
"(",
"boxes",
")",
"if",
"box",
".",
"box_id",
"==",
"'jp2c'",
"]",
"if",
"len",
"(",
"jp2c_lst",
")",
"==",
"0",
":",
"msg",
"=",
"(",
"\"A codestream box must be defined in the outermost \"",
"\"list of boxes.\"",
")",
"raise",
"IOError",
"(",
"msg",
")",
"jp2c_idx",
"=",
"jp2c_lst",
"[",
"0",
"]",
"if",
"jp2h_idx",
">=",
"jp2c_idx",
":",
"msg",
"=",
"\"The codestream box must be preceeded by a jp2 header box.\"",
"raise",
"IOError",
"(",
"msg",
")"
] | 41.823529 | 13.058824 |
def get_model_indexes(model):
"""Return list of all indexes in which a model is configured.
A model may be configured to appear in multiple indexes. This function
will return the names of the indexes as a list of strings. This is
useful if you want to know which indexes need updating when a model
is saved.
Args:
model: a Django model class.
"""
indexes = []
for index in get_index_names():
for app_model in get_index_models(index):
if app_model == model:
indexes.append(index)
return indexes | [
"def",
"get_model_indexes",
"(",
"model",
")",
":",
"indexes",
"=",
"[",
"]",
"for",
"index",
"in",
"get_index_names",
"(",
")",
":",
"for",
"app_model",
"in",
"get_index_models",
"(",
"index",
")",
":",
"if",
"app_model",
"==",
"model",
":",
"indexes",
".",
"append",
"(",
"index",
")",
"return",
"indexes"
] | 31.277778 | 19.833333 |
def founditem_modify_view(request, item_id=None):
"""Modify a founditem.
id: founditem id
"""
if request.method == "POST":
founditem = get_object_or_404(FoundItem, id=item_id)
form = FoundItemForm(request.POST, instance=founditem)
if form.is_valid():
obj = form.save()
logger.debug(form.cleaned_data)
# SAFE HTML
obj.description = safe_html(obj.description)
obj.save()
messages.success(request, "Successfully modified found item.")
return redirect("founditem_view", obj.id)
else:
messages.error(request, "Error adding found item.")
else:
founditem = get_object_or_404(FoundItem, id=item_id)
form = FoundItemForm(instance=founditem)
context = {"form": form, "action": "modify", "id": item_id, "founditem": founditem}
return render(request, "lostfound/founditem_form.html", context) | [
"def",
"founditem_modify_view",
"(",
"request",
",",
"item_id",
"=",
"None",
")",
":",
"if",
"request",
".",
"method",
"==",
"\"POST\"",
":",
"founditem",
"=",
"get_object_or_404",
"(",
"FoundItem",
",",
"id",
"=",
"item_id",
")",
"form",
"=",
"FoundItemForm",
"(",
"request",
".",
"POST",
",",
"instance",
"=",
"founditem",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"obj",
"=",
"form",
".",
"save",
"(",
")",
"logger",
".",
"debug",
"(",
"form",
".",
"cleaned_data",
")",
"# SAFE HTML",
"obj",
".",
"description",
"=",
"safe_html",
"(",
"obj",
".",
"description",
")",
"obj",
".",
"save",
"(",
")",
"messages",
".",
"success",
"(",
"request",
",",
"\"Successfully modified found item.\"",
")",
"return",
"redirect",
"(",
"\"founditem_view\"",
",",
"obj",
".",
"id",
")",
"else",
":",
"messages",
".",
"error",
"(",
"request",
",",
"\"Error adding found item.\"",
")",
"else",
":",
"founditem",
"=",
"get_object_or_404",
"(",
"FoundItem",
",",
"id",
"=",
"item_id",
")",
"form",
"=",
"FoundItemForm",
"(",
"instance",
"=",
"founditem",
")",
"context",
"=",
"{",
"\"form\"",
":",
"form",
",",
"\"action\"",
":",
"\"modify\"",
",",
"\"id\"",
":",
"item_id",
",",
"\"founditem\"",
":",
"founditem",
"}",
"return",
"render",
"(",
"request",
",",
"\"lostfound/founditem_form.html\"",
",",
"context",
")"
] | 37.24 | 20.32 |
def get_converted_image_name(image):
"""Return the name of the image after it has been converted to png format.
Strips off the old extension.
:param: image (string): The fullpath of the image before conversion
:return: converted_image (string): the fullpath of the image after convert
"""
png_extension = '.png'
if image[(0 - len(png_extension)):] == png_extension:
# it already ends in png! we're golden
return image
img_dir = os.path.split(image)[0]
image = os.path.split(image)[-1]
# cut off the old extension
if len(image.split('.')) > 1:
old_extension = '.' + image.split('.')[-1]
converted_image = image[:(0 - len(old_extension))] + png_extension
else:
# no extension... damn
converted_image = image + png_extension
return os.path.join(img_dir, converted_image) | [
"def",
"get_converted_image_name",
"(",
"image",
")",
":",
"png_extension",
"=",
"'.png'",
"if",
"image",
"[",
"(",
"0",
"-",
"len",
"(",
"png_extension",
")",
")",
":",
"]",
"==",
"png_extension",
":",
"# it already ends in png! we're golden",
"return",
"image",
"img_dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"image",
")",
"[",
"0",
"]",
"image",
"=",
"os",
".",
"path",
".",
"split",
"(",
"image",
")",
"[",
"-",
"1",
"]",
"# cut off the old extension",
"if",
"len",
"(",
"image",
".",
"split",
"(",
"'.'",
")",
")",
">",
"1",
":",
"old_extension",
"=",
"'.'",
"+",
"image",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"converted_image",
"=",
"image",
"[",
":",
"(",
"0",
"-",
"len",
"(",
"old_extension",
")",
")",
"]",
"+",
"png_extension",
"else",
":",
"# no extension... damn",
"converted_image",
"=",
"image",
"+",
"png_extension",
"return",
"os",
".",
"path",
".",
"join",
"(",
"img_dir",
",",
"converted_image",
")"
] | 31.444444 | 20.074074 |
def is_applicable_selector(selector, filter):
""" Given a Selector and Filter, return True if the Selector is
compatible with the given Filter, and False if they contradict.
"""
for test in selector.allTests():
if not test.isCompatible(filter.tests):
return False
return True | [
"def",
"is_applicable_selector",
"(",
"selector",
",",
"filter",
")",
":",
"for",
"test",
"in",
"selector",
".",
"allTests",
"(",
")",
":",
"if",
"not",
"test",
".",
"isCompatible",
"(",
"filter",
".",
"tests",
")",
":",
"return",
"False",
"return",
"True"
] | 35.111111 | 13.777778 |
def clear_url(self):
"""Removes the url.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetContentForm.clear_url_template
if (self.get_url_metadata().is_read_only() or
self.get_url_metadata().is_required()):
raise errors.NoAccess()
self._my_map['url'] = self._url_default | [
"def",
"clear_url",
"(",
"self",
")",
":",
"# Implemented from template for osid.repository.AssetContentForm.clear_url_template",
"if",
"(",
"self",
".",
"get_url_metadata",
"(",
")",
".",
"is_read_only",
"(",
")",
"or",
"self",
".",
"get_url_metadata",
"(",
")",
".",
"is_required",
"(",
")",
")",
":",
"raise",
"errors",
".",
"NoAccess",
"(",
")",
"self",
".",
"_my_map",
"[",
"'url'",
"]",
"=",
"self",
".",
"_url_default"
] | 40.461538 | 19.846154 |
def get_runs(self, sort_by=None, sort_direction=None, start=0, limit=None, query={"type": "and", "filters": []}):
"""
Return all runs in the file store.
If a run is corrupt, e.g. missing files, it is skipped.
:param sort_by: NotImplemented
:param sort_direction: NotImplemented
:param start: NotImplemented
:param limit: NotImplemented
:param query: NotImplemented
:return: FileStoreCursor
"""
all_run_ids = os.listdir(self.directory)
def run_iterator():
blacklist = set(["_sources"])
for id in all_run_ids:
if id in blacklist:
continue
try:
yield self.get(id)
except FileNotFoundError:
# An incomplete experiment is a corrupt experiment.
# Skip it for now.
# TODO
pass
count = len(all_run_ids)
return FileStoreCursor(count, run_iterator()) | [
"def",
"get_runs",
"(",
"self",
",",
"sort_by",
"=",
"None",
",",
"sort_direction",
"=",
"None",
",",
"start",
"=",
"0",
",",
"limit",
"=",
"None",
",",
"query",
"=",
"{",
"\"type\"",
":",
"\"and\"",
",",
"\"filters\"",
":",
"[",
"]",
"}",
")",
":",
"all_run_ids",
"=",
"os",
".",
"listdir",
"(",
"self",
".",
"directory",
")",
"def",
"run_iterator",
"(",
")",
":",
"blacklist",
"=",
"set",
"(",
"[",
"\"_sources\"",
"]",
")",
"for",
"id",
"in",
"all_run_ids",
":",
"if",
"id",
"in",
"blacklist",
":",
"continue",
"try",
":",
"yield",
"self",
".",
"get",
"(",
"id",
")",
"except",
"FileNotFoundError",
":",
"# An incomplete experiment is a corrupt experiment.",
"# Skip it for now.",
"# TODO",
"pass",
"count",
"=",
"len",
"(",
"all_run_ids",
")",
"return",
"FileStoreCursor",
"(",
"count",
",",
"run_iterator",
"(",
")",
")"
] | 34 | 14.6 |
def _import_PREFIXCC(keyword=""):
"""
List models from web catalog (prefix.cc) and ask which one to import
2015-10-10: originally part of main ontospy; now standalone only
2016-06-19: eliminated dependency on extras.import_web
"""
SOURCE = "http://prefix.cc/popular/all.file.vann"
options = []
printDebug("----------\nReading source...")
g = Ontospy(SOURCE, verbose=False)
for x in g.all_ontologies:
if keyword:
if keyword in unicode(x.prefix).lower() or keyword in unicode(
x.uri).lower():
options += [(unicode(x.prefix), unicode(x.uri))]
else:
options += [(unicode(x.prefix), unicode(x.uri))]
printDebug("----------\n%d results found." % len(options))
counter = 1
for x in options:
print(Fore.BLUE + Style.BRIGHT + "[%d]" % counter,
Style.RESET_ALL + x[0] + " ==> ", Fore.RED + x[1],
Style.RESET_ALL)
# print(Fore.BLUE + x[0], " ==> ", x[1])
counter += 1
while True:
var = input(Style.BRIGHT + "=====\nSelect ID to import: (q=quit)\n" +
Style.RESET_ALL)
if var == "q":
break
else:
try:
_id = int(var)
ontouri = options[_id - 1][1]
print(Fore.RED + "\n---------\n" + ontouri + "\n---------" +
Style.RESET_ALL)
action_analyze([ontouri])
if click.confirm(
'=====\nDo you want to save to your local library?'):
action_import(ontouri)
return
except:
print("Error retrieving file. Import failed.")
continue | [
"def",
"_import_PREFIXCC",
"(",
"keyword",
"=",
"\"\"",
")",
":",
"SOURCE",
"=",
"\"http://prefix.cc/popular/all.file.vann\"",
"options",
"=",
"[",
"]",
"printDebug",
"(",
"\"----------\\nReading source...\"",
")",
"g",
"=",
"Ontospy",
"(",
"SOURCE",
",",
"verbose",
"=",
"False",
")",
"for",
"x",
"in",
"g",
".",
"all_ontologies",
":",
"if",
"keyword",
":",
"if",
"keyword",
"in",
"unicode",
"(",
"x",
".",
"prefix",
")",
".",
"lower",
"(",
")",
"or",
"keyword",
"in",
"unicode",
"(",
"x",
".",
"uri",
")",
".",
"lower",
"(",
")",
":",
"options",
"+=",
"[",
"(",
"unicode",
"(",
"x",
".",
"prefix",
")",
",",
"unicode",
"(",
"x",
".",
"uri",
")",
")",
"]",
"else",
":",
"options",
"+=",
"[",
"(",
"unicode",
"(",
"x",
".",
"prefix",
")",
",",
"unicode",
"(",
"x",
".",
"uri",
")",
")",
"]",
"printDebug",
"(",
"\"----------\\n%d results found.\"",
"%",
"len",
"(",
"options",
")",
")",
"counter",
"=",
"1",
"for",
"x",
"in",
"options",
":",
"print",
"(",
"Fore",
".",
"BLUE",
"+",
"Style",
".",
"BRIGHT",
"+",
"\"[%d]\"",
"%",
"counter",
",",
"Style",
".",
"RESET_ALL",
"+",
"x",
"[",
"0",
"]",
"+",
"\" ==> \"",
",",
"Fore",
".",
"RED",
"+",
"x",
"[",
"1",
"]",
",",
"Style",
".",
"RESET_ALL",
")",
"# print(Fore.BLUE + x[0], \" ==> \", x[1])\r",
"counter",
"+=",
"1",
"while",
"True",
":",
"var",
"=",
"input",
"(",
"Style",
".",
"BRIGHT",
"+",
"\"=====\\nSelect ID to import: (q=quit)\\n\"",
"+",
"Style",
".",
"RESET_ALL",
")",
"if",
"var",
"==",
"\"q\"",
":",
"break",
"else",
":",
"try",
":",
"_id",
"=",
"int",
"(",
"var",
")",
"ontouri",
"=",
"options",
"[",
"_id",
"-",
"1",
"]",
"[",
"1",
"]",
"print",
"(",
"Fore",
".",
"RED",
"+",
"\"\\n---------\\n\"",
"+",
"ontouri",
"+",
"\"\\n---------\"",
"+",
"Style",
".",
"RESET_ALL",
")",
"action_analyze",
"(",
"[",
"ontouri",
"]",
")",
"if",
"click",
".",
"confirm",
"(",
"'=====\\nDo you want to save to your local library?'",
")",
":",
"action_import",
"(",
"ontouri",
")",
"return",
"except",
":",
"print",
"(",
"\"Error retrieving file. Import failed.\"",
")",
"continue"
] | 36.020408 | 19.285714 |
def create_app(self, app_id, app, minimal=True):
"""Create and start an app.
:param str app_id: application ID
:param :class:`marathon.models.app.MarathonApp` app: the application to create
:param bool minimal: ignore nulls and empty collections
:returns: the created app (on success)
:rtype: :class:`marathon.models.app.MarathonApp` or False
"""
app.id = app_id
data = app.to_json(minimal=minimal)
response = self._do_request('POST', '/v2/apps', data=data)
if response.status_code == 201:
return self._parse_response(response, MarathonApp)
else:
return False | [
"def",
"create_app",
"(",
"self",
",",
"app_id",
",",
"app",
",",
"minimal",
"=",
"True",
")",
":",
"app",
".",
"id",
"=",
"app_id",
"data",
"=",
"app",
".",
"to_json",
"(",
"minimal",
"=",
"minimal",
")",
"response",
"=",
"self",
".",
"_do_request",
"(",
"'POST'",
",",
"'/v2/apps'",
",",
"data",
"=",
"data",
")",
"if",
"response",
".",
"status_code",
"==",
"201",
":",
"return",
"self",
".",
"_parse_response",
"(",
"response",
",",
"MarathonApp",
")",
"else",
":",
"return",
"False"
] | 39.117647 | 17.705882 |
def form(**kwargs: Question):
"""Create a form with multiple questions.
The parameter name of a question will be the key for the answer in
the returned dict."""
return Form(*(FormField(k, q) for k, q in kwargs.items())) | [
"def",
"form",
"(",
"*",
"*",
"kwargs",
":",
"Question",
")",
":",
"return",
"Form",
"(",
"*",
"(",
"FormField",
"(",
"k",
",",
"q",
")",
"for",
"k",
",",
"q",
"in",
"kwargs",
".",
"items",
"(",
")",
")",
")"
] | 38.5 | 17.166667 |
def encrypt(self, plaintext_data_key, encryption_context):
"""Encrypts a data key using a direct wrapping key.
:param bytes plaintext_data_key: Data key to encrypt
:param dict encryption_context: Encryption context to use in encryption
:returns: Deserialized object containing encrypted key
:rtype: aws_encryption_sdk.internal.structures.EncryptedData
"""
if self.wrapping_algorithm.encryption_type is EncryptionType.ASYMMETRIC:
if self.wrapping_key_type is EncryptionKeyType.PRIVATE:
encrypted_key = self._wrapping_key.public_key().encrypt(
plaintext=plaintext_data_key, padding=self.wrapping_algorithm.padding
)
else:
encrypted_key = self._wrapping_key.encrypt(
plaintext=plaintext_data_key, padding=self.wrapping_algorithm.padding
)
return EncryptedData(iv=None, ciphertext=encrypted_key, tag=None)
serialized_encryption_context = serialize_encryption_context(encryption_context=encryption_context)
iv = os.urandom(self.wrapping_algorithm.algorithm.iv_len)
return encrypt(
algorithm=self.wrapping_algorithm.algorithm,
key=self._derived_wrapping_key,
plaintext=plaintext_data_key,
associated_data=serialized_encryption_context,
iv=iv,
) | [
"def",
"encrypt",
"(",
"self",
",",
"plaintext_data_key",
",",
"encryption_context",
")",
":",
"if",
"self",
".",
"wrapping_algorithm",
".",
"encryption_type",
"is",
"EncryptionType",
".",
"ASYMMETRIC",
":",
"if",
"self",
".",
"wrapping_key_type",
"is",
"EncryptionKeyType",
".",
"PRIVATE",
":",
"encrypted_key",
"=",
"self",
".",
"_wrapping_key",
".",
"public_key",
"(",
")",
".",
"encrypt",
"(",
"plaintext",
"=",
"plaintext_data_key",
",",
"padding",
"=",
"self",
".",
"wrapping_algorithm",
".",
"padding",
")",
"else",
":",
"encrypted_key",
"=",
"self",
".",
"_wrapping_key",
".",
"encrypt",
"(",
"plaintext",
"=",
"plaintext_data_key",
",",
"padding",
"=",
"self",
".",
"wrapping_algorithm",
".",
"padding",
")",
"return",
"EncryptedData",
"(",
"iv",
"=",
"None",
",",
"ciphertext",
"=",
"encrypted_key",
",",
"tag",
"=",
"None",
")",
"serialized_encryption_context",
"=",
"serialize_encryption_context",
"(",
"encryption_context",
"=",
"encryption_context",
")",
"iv",
"=",
"os",
".",
"urandom",
"(",
"self",
".",
"wrapping_algorithm",
".",
"algorithm",
".",
"iv_len",
")",
"return",
"encrypt",
"(",
"algorithm",
"=",
"self",
".",
"wrapping_algorithm",
".",
"algorithm",
",",
"key",
"=",
"self",
".",
"_derived_wrapping_key",
",",
"plaintext",
"=",
"plaintext_data_key",
",",
"associated_data",
"=",
"serialized_encryption_context",
",",
"iv",
"=",
"iv",
",",
")"
] | 51.888889 | 25.518519 |
def from_table(self, table=None, fields='*', schema=None, **kwargs):
"""
Adds a ``Table`` and any optional fields to the list of tables
this query is selecting from.
:type table: str or dict or :class:`Table <querybuilder.tables.Table>`
or :class:`Query <querybuilder.query.Query>` or
:class:`ModelBase <django:django.db.models.base.ModelBase>`
:param table: The table to select fields from. This can be a string of the table
name, a dict of {'alias': table}, a ``Table`` instance, a Query instance, or a
django Model instance
:type fields: str or tuple or list or Field
:param fields: The fields to select from ``table``. Defaults to '*'. This can be
a single field, a tuple of fields, or a list of fields. Each field can be a string
or ``Field`` instance
:type schema: str
:param schema: This is not implemented, but it will be a string of the db schema name
:param kwargs: Any additional parameters to be passed into the constructor of ``TableFactory``
:return: self
:rtype: :class:`Query <querybuilder.query.Query>`
"""
# self.mark_dirty()
self.tables.append(TableFactory(
table=table,
fields=fields,
schema=schema,
owner=self,
**kwargs
))
return self | [
"def",
"from_table",
"(",
"self",
",",
"table",
"=",
"None",
",",
"fields",
"=",
"'*'",
",",
"schema",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# self.mark_dirty()",
"self",
".",
"tables",
".",
"append",
"(",
"TableFactory",
"(",
"table",
"=",
"table",
",",
"fields",
"=",
"fields",
",",
"schema",
"=",
"schema",
",",
"owner",
"=",
"self",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"self"
] | 38.75 | 26.805556 |
def _new_type(cls, args):
"""Creates a new class similar to namedtuple.
Pass a list of field names or None for no field name.
>>> x = ResultTuple._new_type([None, "bar"])
>>> x((1, 3))
ResultTuple(1, bar=3)
"""
fformat = ["%r" if f is None else "%s=%%r" % f for f in args]
fformat = "(%s)" % ", ".join(fformat)
class _ResultTuple(cls):
__slots__ = ()
_fformat = fformat
if args:
for i, a in enumerate(args):
if a is not None:
vars()[a] = property(itemgetter(i))
del i, a
return _ResultTuple | [
"def",
"_new_type",
"(",
"cls",
",",
"args",
")",
":",
"fformat",
"=",
"[",
"\"%r\"",
"if",
"f",
"is",
"None",
"else",
"\"%s=%%r\"",
"%",
"f",
"for",
"f",
"in",
"args",
"]",
"fformat",
"=",
"\"(%s)\"",
"%",
"\", \"",
".",
"join",
"(",
"fformat",
")",
"class",
"_ResultTuple",
"(",
"cls",
")",
":",
"__slots__",
"=",
"(",
")",
"_fformat",
"=",
"fformat",
"if",
"args",
":",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"args",
")",
":",
"if",
"a",
"is",
"not",
"None",
":",
"vars",
"(",
")",
"[",
"a",
"]",
"=",
"property",
"(",
"itemgetter",
"(",
"i",
")",
")",
"del",
"i",
",",
"a",
"return",
"_ResultTuple"
] | 28.913043 | 18.217391 |
def patch_is_interactive():
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_is_interactive():
return matplotlib.rcParams['interactive']
matplotlib.real_is_interactive = matplotlib.is_interactive
matplotlib.is_interactive = patched_is_interactive | [
"def",
"patch_is_interactive",
"(",
")",
":",
"matplotlib",
"=",
"sys",
".",
"modules",
"[",
"'matplotlib'",
"]",
"def",
"patched_is_interactive",
"(",
")",
":",
"return",
"matplotlib",
".",
"rcParams",
"[",
"'interactive'",
"]",
"matplotlib",
".",
"real_is_interactive",
"=",
"matplotlib",
".",
"is_interactive",
"matplotlib",
".",
"is_interactive",
"=",
"patched_is_interactive"
] | 34.444444 | 16.333333 |
def make_label_compatible(cls, label):
"""
Conditionally mutate and/or quote a sql column/expression label. If
force_column_alias_quotes is set to True, return the label as a
sqlalchemy.sql.elements.quoted_name object to ensure that the select query
and query results have same case. Otherwise return the mutated label as a
regular string. If maxmimum supported column name length is exceeded,
generate a truncated label by calling truncate_label().
"""
label_mutated = cls.mutate_label(label)
if cls.max_column_name_length and len(label_mutated) > cls.max_column_name_length:
label_mutated = cls.truncate_label(label)
if cls.force_column_alias_quotes:
label_mutated = quoted_name(label_mutated, True)
return label_mutated | [
"def",
"make_label_compatible",
"(",
"cls",
",",
"label",
")",
":",
"label_mutated",
"=",
"cls",
".",
"mutate_label",
"(",
"label",
")",
"if",
"cls",
".",
"max_column_name_length",
"and",
"len",
"(",
"label_mutated",
")",
">",
"cls",
".",
"max_column_name_length",
":",
"label_mutated",
"=",
"cls",
".",
"truncate_label",
"(",
"label",
")",
"if",
"cls",
".",
"force_column_alias_quotes",
":",
"label_mutated",
"=",
"quoted_name",
"(",
"label_mutated",
",",
"True",
")",
"return",
"label_mutated"
] | 55.2 | 20.933333 |
def __find_node_by_rule(self, point, search_rule, cur_node):
"""!
@brief Search node that satisfy to parameters in search rule.
@details If node with specified parameters does not exist then None will be returned,
otherwise required node will be returned.
@param[in] point (list): Coordinates of the point whose node should be found.
@param[in] search_rule (lambda): Rule that is called to check whether node satisfies to search parameter.
@param[in] cur_node (node): Node from which search should be started.
@return (node) Node if it satisfies to input parameters, otherwise it return None.
"""
req_node = None
if cur_node is None:
cur_node = self.__root
while cur_node:
if cur_node.data[cur_node.disc] <= point[cur_node.disc]:
# Check if it's required node
if search_rule(cur_node):
req_node = cur_node
break
cur_node = cur_node.right
else:
cur_node = cur_node.left
return req_node | [
"def",
"__find_node_by_rule",
"(",
"self",
",",
"point",
",",
"search_rule",
",",
"cur_node",
")",
":",
"req_node",
"=",
"None",
"if",
"cur_node",
"is",
"None",
":",
"cur_node",
"=",
"self",
".",
"__root",
"while",
"cur_node",
":",
"if",
"cur_node",
".",
"data",
"[",
"cur_node",
".",
"disc",
"]",
"<=",
"point",
"[",
"cur_node",
".",
"disc",
"]",
":",
"# Check if it's required node\r",
"if",
"search_rule",
"(",
"cur_node",
")",
":",
"req_node",
"=",
"cur_node",
"break",
"cur_node",
"=",
"cur_node",
".",
"right",
"else",
":",
"cur_node",
"=",
"cur_node",
".",
"left",
"return",
"req_node"
] | 38.5 | 23.21875 |
def _generate_username(self):
""" Generate a unique username """
while True:
# Generate a UUID username, removing dashes and the last 2 chars
# to make it fit into the 30 char User.username field. Gracefully
# handle any unlikely, but possible duplicate usernames.
username = str(uuid.uuid4())
username = username.replace('-', '')
username = username[:-2]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username | [
"def",
"_generate_username",
"(",
"self",
")",
":",
"while",
"True",
":",
"# Generate a UUID username, removing dashes and the last 2 chars",
"# to make it fit into the 30 char User.username field. Gracefully",
"# handle any unlikely, but possible duplicate usernames.",
"username",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"username",
"=",
"username",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
"username",
"=",
"username",
"[",
":",
"-",
"2",
"]",
"try",
":",
"User",
".",
"objects",
".",
"get",
"(",
"username",
"=",
"username",
")",
"except",
"User",
".",
"DoesNotExist",
":",
"return",
"username"
] | 40.714286 | 16.571429 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.