text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def patch_certificate_signing_request(self, name, body, **kwargs):
"""
partially update the specified CertificateSigningRequest
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_certificate_signing_request(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CertificateSigningRequest (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1beta1CertificateSigningRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_certificate_signing_request_with_http_info(name, body, **kwargs)
else:
(data) = self.patch_certificate_signing_request_with_http_info(name, body, **kwargs)
return data | [
"def",
"patch_certificate_signing_request",
"(",
"self",
",",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"patch_certificate_signing_request_with_http_info",
"(",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"patch_certificate_signing_request_with_http_info",
"(",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | 78.8 | 49.84 |
def guard_submit(obj):
"""Returns if 'submit' transition can be applied to the worksheet passed in.
By default, the target state for the 'submit' transition for a worksheet is
'to_be_verified', so this guard returns true if all the analyses assigned
to the worksheet have already been submitted. Those analyses that are in a
non-valid state (cancelled, inactive) are dismissed in the evaluation, but
at least one analysis must be in an active state (and submitted) for this
guard to return True. Otherwise, always returns False.
Note this guard depends entirely on the current status of the children.
"""
analyses = obj.getAnalyses()
if not analyses:
# An empty worksheet cannot be submitted
return False
can_submit = False
for analysis in obj.getAnalyses():
# Dismiss analyses that are not active
if not api.is_active(analysis):
continue
# Dismiss analyses that have been rejected or retracted
if api.get_workflow_status_of(analysis) in ["rejected", "retracted"]:
continue
# Worksheet cannot be submitted if there is one analysis not submitted
can_submit = ISubmitted.providedBy(analysis)
if not can_submit:
# No need to look further
return False
# This prevents the submission of the worksheet if all its analyses are in
# a detached status (rejected, retracted or cancelled)
return can_submit | [
"def",
"guard_submit",
"(",
"obj",
")",
":",
"analyses",
"=",
"obj",
".",
"getAnalyses",
"(",
")",
"if",
"not",
"analyses",
":",
"# An empty worksheet cannot be submitted",
"return",
"False",
"can_submit",
"=",
"False",
"for",
"analysis",
"in",
"obj",
".",
"getAnalyses",
"(",
")",
":",
"# Dismiss analyses that are not active",
"if",
"not",
"api",
".",
"is_active",
"(",
"analysis",
")",
":",
"continue",
"# Dismiss analyses that have been rejected or retracted",
"if",
"api",
".",
"get_workflow_status_of",
"(",
"analysis",
")",
"in",
"[",
"\"rejected\"",
",",
"\"retracted\"",
"]",
":",
"continue",
"# Worksheet cannot be submitted if there is one analysis not submitted",
"can_submit",
"=",
"ISubmitted",
".",
"providedBy",
"(",
"analysis",
")",
"if",
"not",
"can_submit",
":",
"# No need to look further",
"return",
"False",
"# This prevents the submission of the worksheet if all its analyses are in",
"# a detached status (rejected, retracted or cancelled)",
"return",
"can_submit"
] | 45.3125 | 21.28125 |
def configure_profile(msg_type, profile_name, data, auth):
"""
Create the profile entry.
Args:
:msg_type: (str) message type to create config entry.
:profile_name: (str) name of the profile entry
:data: (dict) dict values for the 'settings'
:auth: (dict) auth parameters
"""
with jsonconfig.Config("messages", indent=4) as cfg:
write_data(msg_type, profile_name, data, cfg)
write_auth(msg_type, profile_name, auth, cfg)
print("[+] Configuration entry for <" + profile_name + "> created.")
print("[+] Configuration file location: " + cfg.filename) | [
"def",
"configure_profile",
"(",
"msg_type",
",",
"profile_name",
",",
"data",
",",
"auth",
")",
":",
"with",
"jsonconfig",
".",
"Config",
"(",
"\"messages\"",
",",
"indent",
"=",
"4",
")",
"as",
"cfg",
":",
"write_data",
"(",
"msg_type",
",",
"profile_name",
",",
"data",
",",
"cfg",
")",
"write_auth",
"(",
"msg_type",
",",
"profile_name",
",",
"auth",
",",
"cfg",
")",
"print",
"(",
"\"[+] Configuration entry for <\"",
"+",
"profile_name",
"+",
"\"> created.\"",
")",
"print",
"(",
"\"[+] Configuration file location: \"",
"+",
"cfg",
".",
"filename",
")"
] | 38.0625 | 17.8125 |
def get_config(cfg, number, name, connection_name):
"""Initialize a new consumer thread, setting defaults and config values
:param dict cfg: Consumer config section from YAML File
:param int number: The identification number for the consumer
:param str name: The name of the consumer
:param str connection_name: The name of the connection):
:rtype: dict
"""
return {
'connection': cfg['Connections'][connection_name],
'consumer_name': name,
'process_name': '%s_%i_tag_%i' % (name, os.getpid(), number)
} | [
"def",
"get_config",
"(",
"cfg",
",",
"number",
",",
"name",
",",
"connection_name",
")",
":",
"return",
"{",
"'connection'",
":",
"cfg",
"[",
"'Connections'",
"]",
"[",
"connection_name",
"]",
",",
"'consumer_name'",
":",
"name",
",",
"'process_name'",
":",
"'%s_%i_tag_%i'",
"%",
"(",
"name",
",",
"os",
".",
"getpid",
"(",
")",
",",
"number",
")",
"}"
] | 39.933333 | 20.733333 |
def db(self, db_alias, shard_key=None):
"""
Получить экземпляр работы с БД
:type db_alias: basestring Альяс БД из меты
:type shard_key: Любой тип. Некоторый идентификатор, который поможет мете найти нужную шарду. Тип зависи от принимающей стороны
:rtype: DbQueryService
"""
if shard_key is None:
shard_key = ''
db_key = db_alias + '__' + str(shard_key)
if db_key not in self.__db_list:
self.__db_list[db_key] = DbQueryService(self, self.__default_headers, {"db_alias": db_alias, "dbAlias": db_alias, "shard_find_key": shard_key, "shardKey": shard_key})
return self.__db_list[db_key] | [
"def",
"db",
"(",
"self",
",",
"db_alias",
",",
"shard_key",
"=",
"None",
")",
":",
"if",
"shard_key",
"is",
"None",
":",
"shard_key",
"=",
"''",
"db_key",
"=",
"db_alias",
"+",
"'__'",
"+",
"str",
"(",
"shard_key",
")",
"if",
"db_key",
"not",
"in",
"self",
".",
"__db_list",
":",
"self",
".",
"__db_list",
"[",
"db_key",
"]",
"=",
"DbQueryService",
"(",
"self",
",",
"self",
".",
"__default_headers",
",",
"{",
"\"db_alias\"",
":",
"db_alias",
",",
"\"dbAlias\"",
":",
"db_alias",
",",
"\"shard_find_key\"",
":",
"shard_key",
",",
"\"shardKey\"",
":",
"shard_key",
"}",
")",
"return",
"self",
".",
"__db_list",
"[",
"db_key",
"]"
] | 48.142857 | 23.857143 |
def make_plot(time, config, step):
"""
create a three color and all composite images for a given time
NOTE: channel mins and maxes are currently hardcoded since this is a very specific script
:param i: the index to save the file as
:param time:
:param config:
:return:
"""
fig, ax = plt.subplots()
try:
result = Fetcher(time, products=config.products,
suvi_composite_path=config.suvi_composite_path).fetch(multithread=False)
if result:
arr = make_three_color(result, time, step, config, upper_val=(2.4, 2.4, 2.4))
else:
arr = np.zeros((1280, 1280, 3))
except ValueError:
arr = np.zeros((1280, 1280, 3))
ax.imshow(arr, origin='lower')
timestr = time.strftime("%Y-%m-%d %H:%M:%S")
fnextend = time.strftime("%Y%m%d%H%M%S")
ax.set_title(timestr)
ax.set_axis_off()
fig.savefig("three_{}.png".format(fnextend), bbox_inches='tight', dpi=300)
plt.close(fig)
channel_min = {'suvi-l2-ci094': 0,
'suvi-l2-ci131': 0,
'suvi-l2-ci171': 0,
'suvi-l2-ci195': 0,
'suvi-l2-ci284': 0,
'suvi-l2-ci304': 0}
channel_max = {'suvi-l2-ci094': 1,
'suvi-l2-ci131': 1,
'suvi-l2-ci171': 1.8,
'suvi-l2-ci195': 1.8,
'suvi-l2-ci284': 1.8,
'suvi-l2-ci304': 2.5}
for channel in channel_min:
fig, ax = plt.subplots()
if result[channel][1] is not None and \
abs((time - date_parser.parse(result[channel][0]['date-end'])).total_seconds()) < step.total_seconds()/2.0:
dat = np.power(result[channel][1], 0.25)
ax.set_title(date_parser.parse(result[channel][0]['date-obs']).strftime("%Y-%m-%d %H:%M:%S"))
dat[np.isnan(dat)] = 0
else:
dat = np.zeros((1280, 1280))
ax.set_title(timestr)
ax.imshow(dat, vmin=channel_min[channel], vmax=channel_max[channel], cmap='gray', origin='lower')
ax.set_axis_off()
fig.savefig("{}_{}.png".format(channel, fnextend), bbox_inches='tight', dpi=300)
plt.close(fig) | [
"def",
"make_plot",
"(",
"time",
",",
"config",
",",
"step",
")",
":",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
")",
"try",
":",
"result",
"=",
"Fetcher",
"(",
"time",
",",
"products",
"=",
"config",
".",
"products",
",",
"suvi_composite_path",
"=",
"config",
".",
"suvi_composite_path",
")",
".",
"fetch",
"(",
"multithread",
"=",
"False",
")",
"if",
"result",
":",
"arr",
"=",
"make_three_color",
"(",
"result",
",",
"time",
",",
"step",
",",
"config",
",",
"upper_val",
"=",
"(",
"2.4",
",",
"2.4",
",",
"2.4",
")",
")",
"else",
":",
"arr",
"=",
"np",
".",
"zeros",
"(",
"(",
"1280",
",",
"1280",
",",
"3",
")",
")",
"except",
"ValueError",
":",
"arr",
"=",
"np",
".",
"zeros",
"(",
"(",
"1280",
",",
"1280",
",",
"3",
")",
")",
"ax",
".",
"imshow",
"(",
"arr",
",",
"origin",
"=",
"'lower'",
")",
"timestr",
"=",
"time",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"fnextend",
"=",
"time",
".",
"strftime",
"(",
"\"%Y%m%d%H%M%S\"",
")",
"ax",
".",
"set_title",
"(",
"timestr",
")",
"ax",
".",
"set_axis_off",
"(",
")",
"fig",
".",
"savefig",
"(",
"\"three_{}.png\"",
".",
"format",
"(",
"fnextend",
")",
",",
"bbox_inches",
"=",
"'tight'",
",",
"dpi",
"=",
"300",
")",
"plt",
".",
"close",
"(",
"fig",
")",
"channel_min",
"=",
"{",
"'suvi-l2-ci094'",
":",
"0",
",",
"'suvi-l2-ci131'",
":",
"0",
",",
"'suvi-l2-ci171'",
":",
"0",
",",
"'suvi-l2-ci195'",
":",
"0",
",",
"'suvi-l2-ci284'",
":",
"0",
",",
"'suvi-l2-ci304'",
":",
"0",
"}",
"channel_max",
"=",
"{",
"'suvi-l2-ci094'",
":",
"1",
",",
"'suvi-l2-ci131'",
":",
"1",
",",
"'suvi-l2-ci171'",
":",
"1.8",
",",
"'suvi-l2-ci195'",
":",
"1.8",
",",
"'suvi-l2-ci284'",
":",
"1.8",
",",
"'suvi-l2-ci304'",
":",
"2.5",
"}",
"for",
"channel",
"in",
"channel_min",
":",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
")",
"if",
"result",
"[",
"channel",
"]",
"[",
"1",
"]",
"is",
"not",
"None",
"and",
"abs",
"(",
"(",
"time",
"-",
"date_parser",
".",
"parse",
"(",
"result",
"[",
"channel",
"]",
"[",
"0",
"]",
"[",
"'date-end'",
"]",
")",
")",
".",
"total_seconds",
"(",
")",
")",
"<",
"step",
".",
"total_seconds",
"(",
")",
"/",
"2.0",
":",
"dat",
"=",
"np",
".",
"power",
"(",
"result",
"[",
"channel",
"]",
"[",
"1",
"]",
",",
"0.25",
")",
"ax",
".",
"set_title",
"(",
"date_parser",
".",
"parse",
"(",
"result",
"[",
"channel",
"]",
"[",
"0",
"]",
"[",
"'date-obs'",
"]",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
")",
"dat",
"[",
"np",
".",
"isnan",
"(",
"dat",
")",
"]",
"=",
"0",
"else",
":",
"dat",
"=",
"np",
".",
"zeros",
"(",
"(",
"1280",
",",
"1280",
")",
")",
"ax",
".",
"set_title",
"(",
"timestr",
")",
"ax",
".",
"imshow",
"(",
"dat",
",",
"vmin",
"=",
"channel_min",
"[",
"channel",
"]",
",",
"vmax",
"=",
"channel_max",
"[",
"channel",
"]",
",",
"cmap",
"=",
"'gray'",
",",
"origin",
"=",
"'lower'",
")",
"ax",
".",
"set_axis_off",
"(",
")",
"fig",
".",
"savefig",
"(",
"\"{}_{}.png\"",
".",
"format",
"(",
"channel",
",",
"fnextend",
")",
",",
"bbox_inches",
"=",
"'tight'",
",",
"dpi",
"=",
"300",
")",
"plt",
".",
"close",
"(",
"fig",
")"
] | 39.125 | 18.875 |
def kill_definitions(self, atom, code_loc, data=None, dummy=True):
"""
Overwrite existing definitions w.r.t 'atom' with a dummy definition instance. A dummy definition will not be
removed during simplification.
:param Atom atom:
:param CodeLocation code_loc:
:param object data:
:return: None
"""
if data is None:
data = DataSet(Undefined(atom.size), atom.size)
self.kill_and_add_definition(atom, code_loc, data, dummy=dummy) | [
"def",
"kill_definitions",
"(",
"self",
",",
"atom",
",",
"code_loc",
",",
"data",
"=",
"None",
",",
"dummy",
"=",
"True",
")",
":",
"if",
"data",
"is",
"None",
":",
"data",
"=",
"DataSet",
"(",
"Undefined",
"(",
"atom",
".",
"size",
")",
",",
"atom",
".",
"size",
")",
"self",
".",
"kill_and_add_definition",
"(",
"atom",
",",
"code_loc",
",",
"data",
",",
"dummy",
"=",
"dummy",
")"
] | 33.733333 | 22.666667 |
def remove(self, items):
"""Remove messages from lease management."""
# Remove the ack ID from lease management, and decrement the
# byte counter.
for item in items:
if self._leased_messages.pop(item.ack_id, None) is not None:
self._bytes -= item.byte_size
else:
_LOGGER.debug("Item %s was not managed.", item.ack_id)
if self._bytes < 0:
_LOGGER.debug("Bytes was unexpectedly negative: %d", self._bytes)
self._bytes = 0 | [
"def",
"remove",
"(",
"self",
",",
"items",
")",
":",
"# Remove the ack ID from lease management, and decrement the",
"# byte counter.",
"for",
"item",
"in",
"items",
":",
"if",
"self",
".",
"_leased_messages",
".",
"pop",
"(",
"item",
".",
"ack_id",
",",
"None",
")",
"is",
"not",
"None",
":",
"self",
".",
"_bytes",
"-=",
"item",
".",
"byte_size",
"else",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Item %s was not managed.\"",
",",
"item",
".",
"ack_id",
")",
"if",
"self",
".",
"_bytes",
"<",
"0",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Bytes was unexpectedly negative: %d\"",
",",
"self",
".",
"_bytes",
")",
"self",
".",
"_bytes",
"=",
"0"
] | 40.615385 | 20.615385 |
def QA_SU_save_stock_xdxr(client=DATABASE, ui_log=None, ui_progress=None):
"""[summary]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
stock_list = QA_fetch_get_stock_list().code.unique().tolist()
# client.drop_collection('stock_xdxr')
try:
coll = client.stock_xdxr
coll.create_index(
[('code',
pymongo.ASCENDING),
('date',
pymongo.ASCENDING)],
unique=True
)
except:
client.drop_collection('stock_xdxr')
coll = client.stock_xdxr
coll.create_index(
[('code',
pymongo.ASCENDING),
('date',
pymongo.ASCENDING)],
unique=True
)
err = []
def __saving_work(code, coll):
QA_util_log_info(
'##JOB02 Now Saving XDXR INFO ==== {}'.format(str(code)),
ui_log=ui_log
)
try:
coll.insert_many(
QA_util_to_json_from_pandas(QA_fetch_get_stock_xdxr(str(code))),
ordered=False
)
except:
err.append(str(code))
for i_ in range(len(stock_list)):
QA_util_log_info(
'The {} of Total {}'.format(i_,
len(stock_list)),
ui_log=ui_log
)
strLogInfo = 'DOWNLOAD PROGRESS {} '.format(
str(float(i_ / len(stock_list) * 100))[0:4] + '%'
)
intLogProgress = int(float(i_ / len(stock_list) * 100))
QA_util_log_info(
strLogInfo,
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intLogProgress
)
__saving_work(stock_list[i_], coll) | [
"def",
"QA_SU_save_stock_xdxr",
"(",
"client",
"=",
"DATABASE",
",",
"ui_log",
"=",
"None",
",",
"ui_progress",
"=",
"None",
")",
":",
"stock_list",
"=",
"QA_fetch_get_stock_list",
"(",
")",
".",
"code",
".",
"unique",
"(",
")",
".",
"tolist",
"(",
")",
"# client.drop_collection('stock_xdxr')",
"try",
":",
"coll",
"=",
"client",
".",
"stock_xdxr",
"coll",
".",
"create_index",
"(",
"[",
"(",
"'code'",
",",
"pymongo",
".",
"ASCENDING",
")",
",",
"(",
"'date'",
",",
"pymongo",
".",
"ASCENDING",
")",
"]",
",",
"unique",
"=",
"True",
")",
"except",
":",
"client",
".",
"drop_collection",
"(",
"'stock_xdxr'",
")",
"coll",
"=",
"client",
".",
"stock_xdxr",
"coll",
".",
"create_index",
"(",
"[",
"(",
"'code'",
",",
"pymongo",
".",
"ASCENDING",
")",
",",
"(",
"'date'",
",",
"pymongo",
".",
"ASCENDING",
")",
"]",
",",
"unique",
"=",
"True",
")",
"err",
"=",
"[",
"]",
"def",
"__saving_work",
"(",
"code",
",",
"coll",
")",
":",
"QA_util_log_info",
"(",
"'##JOB02 Now Saving XDXR INFO ==== {}'",
".",
"format",
"(",
"str",
"(",
"code",
")",
")",
",",
"ui_log",
"=",
"ui_log",
")",
"try",
":",
"coll",
".",
"insert_many",
"(",
"QA_util_to_json_from_pandas",
"(",
"QA_fetch_get_stock_xdxr",
"(",
"str",
"(",
"code",
")",
")",
")",
",",
"ordered",
"=",
"False",
")",
"except",
":",
"err",
".",
"append",
"(",
"str",
"(",
"code",
")",
")",
"for",
"i_",
"in",
"range",
"(",
"len",
"(",
"stock_list",
")",
")",
":",
"QA_util_log_info",
"(",
"'The {} of Total {}'",
".",
"format",
"(",
"i_",
",",
"len",
"(",
"stock_list",
")",
")",
",",
"ui_log",
"=",
"ui_log",
")",
"strLogInfo",
"=",
"'DOWNLOAD PROGRESS {} '",
".",
"format",
"(",
"str",
"(",
"float",
"(",
"i_",
"/",
"len",
"(",
"stock_list",
")",
"*",
"100",
")",
")",
"[",
"0",
":",
"4",
"]",
"+",
"'%'",
")",
"intLogProgress",
"=",
"int",
"(",
"float",
"(",
"i_",
"/",
"len",
"(",
"stock_list",
")",
"*",
"100",
")",
")",
"QA_util_log_info",
"(",
"strLogInfo",
",",
"ui_log",
"=",
"ui_log",
",",
"ui_progress",
"=",
"ui_progress",
",",
"ui_progress_int_value",
"=",
"intLogProgress",
")",
"__saving_work",
"(",
"stock_list",
"[",
"i_",
"]",
",",
"coll",
")"
] | 27.822581 | 19.096774 |
def handle_heartbeat(queue_name):
"""Updates the heartbeat message for a task."""
task_id = request.form.get('task_id', type=str)
message = request.form.get('message', type=str)
index = request.form.get('index', type=int)
try:
work_queue.heartbeat(
queue_name,
task_id,
request.form.get('owner', request.remote_addr, type=str),
message,
index)
except work_queue.Error, e:
return utils.jsonify_error(e)
db.session.commit()
logging.debug('Task heartbeat: queue=%r, task_id=%r, message=%r, index=%d',
queue_name, task_id, message, index)
return flask.jsonify(success=True) | [
"def",
"handle_heartbeat",
"(",
"queue_name",
")",
":",
"task_id",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'task_id'",
",",
"type",
"=",
"str",
")",
"message",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'message'",
",",
"type",
"=",
"str",
")",
"index",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'index'",
",",
"type",
"=",
"int",
")",
"try",
":",
"work_queue",
".",
"heartbeat",
"(",
"queue_name",
",",
"task_id",
",",
"request",
".",
"form",
".",
"get",
"(",
"'owner'",
",",
"request",
".",
"remote_addr",
",",
"type",
"=",
"str",
")",
",",
"message",
",",
"index",
")",
"except",
"work_queue",
".",
"Error",
",",
"e",
":",
"return",
"utils",
".",
"jsonify_error",
"(",
"e",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"logging",
".",
"debug",
"(",
"'Task heartbeat: queue=%r, task_id=%r, message=%r, index=%d'",
",",
"queue_name",
",",
"task_id",
",",
"message",
",",
"index",
")",
"return",
"flask",
".",
"jsonify",
"(",
"success",
"=",
"True",
")"
] | 35.894737 | 16.368421 |
def _handle_report(self, report):
"""Try to emit a report and possibly keep a copy of it"""
keep_report = True
if self.report_callback is not None:
keep_report = self.report_callback(report, self.context)
if keep_report:
self.reports.append(report) | [
"def",
"_handle_report",
"(",
"self",
",",
"report",
")",
":",
"keep_report",
"=",
"True",
"if",
"self",
".",
"report_callback",
"is",
"not",
"None",
":",
"keep_report",
"=",
"self",
".",
"report_callback",
"(",
"report",
",",
"self",
".",
"context",
")",
"if",
"keep_report",
":",
"self",
".",
"reports",
".",
"append",
"(",
"report",
")"
] | 29.8 | 19.1 |
def GetHasherNamesFromString(cls, hasher_names_string):
"""Retrieves a list of a hasher names from a comma separated string.
Takes a string of comma separated hasher names transforms it to a list of
hasher names.
Args:
hasher_names_string (str): comma separated names of hashers to enable,
the string 'all' to enable all hashers or 'none' to disable all
hashers.
Returns:
list[str]: names of valid hashers from the string, or an empty list if no
valid names are found.
"""
hasher_names = []
if not hasher_names_string or hasher_names_string.strip() == 'none':
return hasher_names
if hasher_names_string.strip() == 'all':
return cls.GetHasherNames()
for hasher_name in hasher_names_string.split(','):
hasher_name = hasher_name.strip()
if not hasher_name:
continue
hasher_name = hasher_name.lower()
if hasher_name in cls._hasher_classes:
hasher_names.append(hasher_name)
return hasher_names | [
"def",
"GetHasherNamesFromString",
"(",
"cls",
",",
"hasher_names_string",
")",
":",
"hasher_names",
"=",
"[",
"]",
"if",
"not",
"hasher_names_string",
"or",
"hasher_names_string",
".",
"strip",
"(",
")",
"==",
"'none'",
":",
"return",
"hasher_names",
"if",
"hasher_names_string",
".",
"strip",
"(",
")",
"==",
"'all'",
":",
"return",
"cls",
".",
"GetHasherNames",
"(",
")",
"for",
"hasher_name",
"in",
"hasher_names_string",
".",
"split",
"(",
"','",
")",
":",
"hasher_name",
"=",
"hasher_name",
".",
"strip",
"(",
")",
"if",
"not",
"hasher_name",
":",
"continue",
"hasher_name",
"=",
"hasher_name",
".",
"lower",
"(",
")",
"if",
"hasher_name",
"in",
"cls",
".",
"_hasher_classes",
":",
"hasher_names",
".",
"append",
"(",
"hasher_name",
")",
"return",
"hasher_names"
] | 30.363636 | 22.575758 |
def file_delete(context, id, file_id):
"""file_delete(context, id, path)
Delete a job file
>>> dcictl job-delete-file [OPTIONS]
:param string id: ID of the job to delete file [required]
:param string file_id: ID for the file to delete [required]
"""
dci_file.delete(context, id=file_id)
result = dci_file.delete(context, id=file_id)
utils.format_output(result, context.format) | [
"def",
"file_delete",
"(",
"context",
",",
"id",
",",
"file_id",
")",
":",
"dci_file",
".",
"delete",
"(",
"context",
",",
"id",
"=",
"file_id",
")",
"result",
"=",
"dci_file",
".",
"delete",
"(",
"context",
",",
"id",
"=",
"file_id",
")",
"utils",
".",
"format_output",
"(",
"result",
",",
"context",
".",
"format",
")"
] | 31 | 15.461538 |
def _build_matrix_non_uniform(p, q, coords, k):
"""Constructs the equation matrix for the finite difference coefficients of non-uniform grids at location k"""
A = [[1] * (p+q+1)]
for i in range(1, p + q + 1):
line = [(coords[k+j] - coords[k])**i for j in range(-p, q+1)]
A.append(line)
return np.array(A) | [
"def",
"_build_matrix_non_uniform",
"(",
"p",
",",
"q",
",",
"coords",
",",
"k",
")",
":",
"A",
"=",
"[",
"[",
"1",
"]",
"*",
"(",
"p",
"+",
"q",
"+",
"1",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"p",
"+",
"q",
"+",
"1",
")",
":",
"line",
"=",
"[",
"(",
"coords",
"[",
"k",
"+",
"j",
"]",
"-",
"coords",
"[",
"k",
"]",
")",
"**",
"i",
"for",
"j",
"in",
"range",
"(",
"-",
"p",
",",
"q",
"+",
"1",
")",
"]",
"A",
".",
"append",
"(",
"line",
")",
"return",
"np",
".",
"array",
"(",
"A",
")"
] | 47.142857 | 13.714286 |
def aptknt(tau, order):
"""Create an acceptable knot vector.
Minimal emulation of MATLAB's ``aptknt``.
The returned knot vector can be used to generate splines of desired `order`
that are suitable for interpolation to the collocation sites `tau`.
Note that this is only possible when ``len(tau)`` >= `order` + 1.
When this condition does not hold, a valid knot vector is returned,
but using it to generate a spline basis will not have the desired effect
(the spline will return a length-zero array upon evaluation).
Parameters:
tau:
Python list or rank-1 array, collocation sites
order:
int, >= 0, order of spline
Returns:
rank-1 array, `k` copies of ``tau[0]``, then ``aveknt(tau[1:-1], k-1)``,
and finally `k` copies of ``tau[-1]``, where ``k = min(order+1, len(tau))``.
"""
tau = np.atleast_1d(tau)
k = order + 1
if tau.ndim > 1:
raise ValueError("tau must be a list or a rank-1 array")
# emulate MATLAB behavior for the "k" parameter
#
# See
# https://se.mathworks.com/help/curvefit/aptknt.html
#
if len(tau) < k:
k = len(tau)
if not (tau == sorted(tau)).all():
raise ValueError("tau must be nondecreasing")
# last processed element needs to be:
# i + k - 1 = len(tau)- 1
# => i + k = len(tau)
# => i = len(tau) - k
#
u = len(tau) - k
for i in range(u):
if tau[i+k-1] == tau[i]:
raise ValueError("k-fold (or higher) repeated sites not allowed, but tau[i+k-1] == tau[i] for i = %d, k = %d" % (i,k))
# form the output sequence
#
prefix = [ tau[0] ] * k
suffix = [ tau[-1] ] * k
# https://se.mathworks.com/help/curvefit/aveknt.html
# MATLAB's aveknt():
# - averages successive k-1 entries, but ours averages k
# - seems to ignore the endpoints
#
tmp = aveknt(tau[1:-1], k-1)
middle = tmp.tolist()
return np.array( prefix + middle + suffix, dtype=tmp.dtype ) | [
"def",
"aptknt",
"(",
"tau",
",",
"order",
")",
":",
"tau",
"=",
"np",
".",
"atleast_1d",
"(",
"tau",
")",
"k",
"=",
"order",
"+",
"1",
"if",
"tau",
".",
"ndim",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"tau must be a list or a rank-1 array\"",
")",
"# emulate MATLAB behavior for the \"k\" parameter",
"#",
"# See",
"# https://se.mathworks.com/help/curvefit/aptknt.html",
"#",
"if",
"len",
"(",
"tau",
")",
"<",
"k",
":",
"k",
"=",
"len",
"(",
"tau",
")",
"if",
"not",
"(",
"tau",
"==",
"sorted",
"(",
"tau",
")",
")",
".",
"all",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"tau must be nondecreasing\"",
")",
"# last processed element needs to be:",
"# i + k - 1 = len(tau)- 1",
"# => i + k = len(tau)",
"# => i = len(tau) - k",
"#",
"u",
"=",
"len",
"(",
"tau",
")",
"-",
"k",
"for",
"i",
"in",
"range",
"(",
"u",
")",
":",
"if",
"tau",
"[",
"i",
"+",
"k",
"-",
"1",
"]",
"==",
"tau",
"[",
"i",
"]",
":",
"raise",
"ValueError",
"(",
"\"k-fold (or higher) repeated sites not allowed, but tau[i+k-1] == tau[i] for i = %d, k = %d\"",
"%",
"(",
"i",
",",
"k",
")",
")",
"# form the output sequence",
"#",
"prefix",
"=",
"[",
"tau",
"[",
"0",
"]",
"]",
"*",
"k",
"suffix",
"=",
"[",
"tau",
"[",
"-",
"1",
"]",
"]",
"*",
"k",
"# https://se.mathworks.com/help/curvefit/aveknt.html",
"# MATLAB's aveknt():",
"# - averages successive k-1 entries, but ours averages k",
"# - seems to ignore the endpoints",
"#",
"tmp",
"=",
"aveknt",
"(",
"tau",
"[",
"1",
":",
"-",
"1",
"]",
",",
"k",
"-",
"1",
")",
"middle",
"=",
"tmp",
".",
"tolist",
"(",
")",
"return",
"np",
".",
"array",
"(",
"prefix",
"+",
"middle",
"+",
"suffix",
",",
"dtype",
"=",
"tmp",
".",
"dtype",
")"
] | 29.523077 | 24.553846 |
def obtain(self, dest):
# type: (str) -> None
"""
Install or update in editable mode the package represented by this
VersionControl object.
Args:
dest: the repository directory in which to install or update.
"""
url, rev_options = self.get_url_rev_options(self.url)
if not os.path.exists(dest):
self.fetch_new(dest, url, rev_options)
return
rev_display = rev_options.to_display()
if self.is_repository_directory(dest):
existing_url = self.get_remote_url(dest)
if self.compare_urls(existing_url, url):
logger.debug(
'%s in %s exists, and has correct URL (%s)',
self.repo_name.title(),
display_path(dest),
url,
)
if not self.is_commit_id_equal(dest, rev_options.rev):
logger.info(
'Updating %s %s%s',
display_path(dest),
self.repo_name,
rev_display,
)
self.update(dest, url, rev_options)
else:
logger.info('Skipping because already up-to-date.')
return
logger.warning(
'%s %s in %s exists with URL %s',
self.name,
self.repo_name,
display_path(dest),
existing_url,
)
prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
('s', 'i', 'w', 'b'))
else:
logger.warning(
'Directory %s already exists, and is not a %s %s.',
dest,
self.name,
self.repo_name,
)
# https://github.com/python/mypy/issues/1174
prompt = ('(i)gnore, (w)ipe, (b)ackup ', # type: ignore
('i', 'w', 'b'))
logger.warning(
'The plan is to install the %s repository %s',
self.name,
url,
)
response = ask_path_exists('What to do? %s' % prompt[0], prompt[1])
if response == 'a':
sys.exit(-1)
if response == 'w':
logger.warning('Deleting %s', display_path(dest))
rmtree(dest)
self.fetch_new(dest, url, rev_options)
return
if response == 'b':
dest_dir = backup_dir(dest)
logger.warning(
'Backing up %s to %s', display_path(dest), dest_dir,
)
shutil.move(dest, dest_dir)
self.fetch_new(dest, url, rev_options)
return
# Do nothing if the response is "i".
if response == 's':
logger.info(
'Switching %s %s to %s%s',
self.repo_name,
display_path(dest),
url,
rev_display,
)
self.switch(dest, url, rev_options) | [
"def",
"obtain",
"(",
"self",
",",
"dest",
")",
":",
"# type: (str) -> None",
"url",
",",
"rev_options",
"=",
"self",
".",
"get_url_rev_options",
"(",
"self",
".",
"url",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dest",
")",
":",
"self",
".",
"fetch_new",
"(",
"dest",
",",
"url",
",",
"rev_options",
")",
"return",
"rev_display",
"=",
"rev_options",
".",
"to_display",
"(",
")",
"if",
"self",
".",
"is_repository_directory",
"(",
"dest",
")",
":",
"existing_url",
"=",
"self",
".",
"get_remote_url",
"(",
"dest",
")",
"if",
"self",
".",
"compare_urls",
"(",
"existing_url",
",",
"url",
")",
":",
"logger",
".",
"debug",
"(",
"'%s in %s exists, and has correct URL (%s)'",
",",
"self",
".",
"repo_name",
".",
"title",
"(",
")",
",",
"display_path",
"(",
"dest",
")",
",",
"url",
",",
")",
"if",
"not",
"self",
".",
"is_commit_id_equal",
"(",
"dest",
",",
"rev_options",
".",
"rev",
")",
":",
"logger",
".",
"info",
"(",
"'Updating %s %s%s'",
",",
"display_path",
"(",
"dest",
")",
",",
"self",
".",
"repo_name",
",",
"rev_display",
",",
")",
"self",
".",
"update",
"(",
"dest",
",",
"url",
",",
"rev_options",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'Skipping because already up-to-date.'",
")",
"return",
"logger",
".",
"warning",
"(",
"'%s %s in %s exists with URL %s'",
",",
"self",
".",
"name",
",",
"self",
".",
"repo_name",
",",
"display_path",
"(",
"dest",
")",
",",
"existing_url",
",",
")",
"prompt",
"=",
"(",
"'(s)witch, (i)gnore, (w)ipe, (b)ackup '",
",",
"(",
"'s'",
",",
"'i'",
",",
"'w'",
",",
"'b'",
")",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'Directory %s already exists, and is not a %s %s.'",
",",
"dest",
",",
"self",
".",
"name",
",",
"self",
".",
"repo_name",
",",
")",
"# https://github.com/python/mypy/issues/1174",
"prompt",
"=",
"(",
"'(i)gnore, (w)ipe, (b)ackup '",
",",
"# type: ignore",
"(",
"'i'",
",",
"'w'",
",",
"'b'",
")",
")",
"logger",
".",
"warning",
"(",
"'The plan is to install the %s repository %s'",
",",
"self",
".",
"name",
",",
"url",
",",
")",
"response",
"=",
"ask_path_exists",
"(",
"'What to do? %s'",
"%",
"prompt",
"[",
"0",
"]",
",",
"prompt",
"[",
"1",
"]",
")",
"if",
"response",
"==",
"'a'",
":",
"sys",
".",
"exit",
"(",
"-",
"1",
")",
"if",
"response",
"==",
"'w'",
":",
"logger",
".",
"warning",
"(",
"'Deleting %s'",
",",
"display_path",
"(",
"dest",
")",
")",
"rmtree",
"(",
"dest",
")",
"self",
".",
"fetch_new",
"(",
"dest",
",",
"url",
",",
"rev_options",
")",
"return",
"if",
"response",
"==",
"'b'",
":",
"dest_dir",
"=",
"backup_dir",
"(",
"dest",
")",
"logger",
".",
"warning",
"(",
"'Backing up %s to %s'",
",",
"display_path",
"(",
"dest",
")",
",",
"dest_dir",
",",
")",
"shutil",
".",
"move",
"(",
"dest",
",",
"dest_dir",
")",
"self",
".",
"fetch_new",
"(",
"dest",
",",
"url",
",",
"rev_options",
")",
"return",
"# Do nothing if the response is \"i\".",
"if",
"response",
"==",
"'s'",
":",
"logger",
".",
"info",
"(",
"'Switching %s %s to %s%s'",
",",
"self",
".",
"repo_name",
",",
"display_path",
"(",
"dest",
")",
",",
"url",
",",
"rev_display",
",",
")",
"self",
".",
"switch",
"(",
"dest",
",",
"url",
",",
"rev_options",
")"
] | 32.543478 | 17.304348 |
def sampleLocationFromFeature(self, feature):
"""
Samples a location from one specific feature.
This is only supported with three dimensions.
"""
if feature == "face":
return self._sampleFromFaces()
elif feature == "edge":
return self._sampleFromEdges()
elif feature == "vertex":
return self._sampleFromVertices()
elif feature == "random":
return self.sampleLocation()
else:
raise NameError("No such feature in {}: {}".format(self, feature)) | [
"def",
"sampleLocationFromFeature",
"(",
"self",
",",
"feature",
")",
":",
"if",
"feature",
"==",
"\"face\"",
":",
"return",
"self",
".",
"_sampleFromFaces",
"(",
")",
"elif",
"feature",
"==",
"\"edge\"",
":",
"return",
"self",
".",
"_sampleFromEdges",
"(",
")",
"elif",
"feature",
"==",
"\"vertex\"",
":",
"return",
"self",
".",
"_sampleFromVertices",
"(",
")",
"elif",
"feature",
"==",
"\"random\"",
":",
"return",
"self",
".",
"sampleLocation",
"(",
")",
"else",
":",
"raise",
"NameError",
"(",
"\"No such feature in {}: {}\"",
".",
"format",
"(",
"self",
",",
"feature",
")",
")"
] | 30.8125 | 11.9375 |
def translate_to_arpabet(self):
'''
转换成arpabet
:return:
'''
translations = []
for phoneme in self._phoneme_list:
if phoneme.is_vowel:
translations.append(phoneme.arpabet + self.stress.mark_arpabet())
else:
translations.append(phoneme.arpabet)
return " ".join(translations) | [
"def",
"translate_to_arpabet",
"(",
"self",
")",
":",
"translations",
"=",
"[",
"]",
"for",
"phoneme",
"in",
"self",
".",
"_phoneme_list",
":",
"if",
"phoneme",
".",
"is_vowel",
":",
"translations",
".",
"append",
"(",
"phoneme",
".",
"arpabet",
"+",
"self",
".",
"stress",
".",
"mark_arpabet",
"(",
")",
")",
"else",
":",
"translations",
".",
"append",
"(",
"phoneme",
".",
"arpabet",
")",
"return",
"\" \"",
".",
"join",
"(",
"translations",
")"
] | 24.866667 | 22.466667 |
def breaks_from_binwidth(x_range, binwidth=None, center=None,
boundary=None):
"""
Calculate breaks given binwidth
Parameters
----------
x_range : array_like
Range over with to calculate the breaks. Must be
of size 2.
binwidth : float
Separation between the breaks
center : float
The center of one of the bins
boundary : float
A boundary between two bins
Returns
-------
out : array_like
Sequence of break points.
"""
if binwidth <= 0:
raise PlotnineError("The 'binwidth' must be positive.")
if boundary is not None and center is not None:
raise PlotnineError("Only one of 'boundary' and 'center' "
"may be specified.")
elif boundary is None:
if center is None:
# This puts min and max of data in outer half
# of their bins
boundary = binwidth/2
else:
boundary = center - binwidth/2
epsilon = np.finfo(float).eps
shift = np.floor((x_range[0]-boundary)/binwidth)
origin = boundary + shift * binwidth
# The (1-epsilon) factor prevents numerical roundoff in the
# binwidth from creating an extra break beyond the one that
# includes x_range[1].
max_x = x_range[1]+binwidth*(1-epsilon)
breaks = np.arange(origin, max_x, binwidth)
return breaks | [
"def",
"breaks_from_binwidth",
"(",
"x_range",
",",
"binwidth",
"=",
"None",
",",
"center",
"=",
"None",
",",
"boundary",
"=",
"None",
")",
":",
"if",
"binwidth",
"<=",
"0",
":",
"raise",
"PlotnineError",
"(",
"\"The 'binwidth' must be positive.\"",
")",
"if",
"boundary",
"is",
"not",
"None",
"and",
"center",
"is",
"not",
"None",
":",
"raise",
"PlotnineError",
"(",
"\"Only one of 'boundary' and 'center' \"",
"\"may be specified.\"",
")",
"elif",
"boundary",
"is",
"None",
":",
"if",
"center",
"is",
"None",
":",
"# This puts min and max of data in outer half",
"# of their bins",
"boundary",
"=",
"binwidth",
"/",
"2",
"else",
":",
"boundary",
"=",
"center",
"-",
"binwidth",
"/",
"2",
"epsilon",
"=",
"np",
".",
"finfo",
"(",
"float",
")",
".",
"eps",
"shift",
"=",
"np",
".",
"floor",
"(",
"(",
"x_range",
"[",
"0",
"]",
"-",
"boundary",
")",
"/",
"binwidth",
")",
"origin",
"=",
"boundary",
"+",
"shift",
"*",
"binwidth",
"# The (1-epsilon) factor prevents numerical roundoff in the",
"# binwidth from creating an extra break beyond the one that",
"# includes x_range[1].",
"max_x",
"=",
"x_range",
"[",
"1",
"]",
"+",
"binwidth",
"*",
"(",
"1",
"-",
"epsilon",
")",
"breaks",
"=",
"np",
".",
"arange",
"(",
"origin",
",",
"max_x",
",",
"binwidth",
")",
"return",
"breaks"
] | 30.555556 | 16.511111 |
def start_process(parser_args):
""" Start up specific daemon """
import psutil
import process_starter
from synergy.system import process_helper
try:
pid = process_helper.get_process_pid(parser_args.process_name)
if pid is not None:
if psutil.pid_exists(pid):
message = 'ERROR: Process {0} is already running with pid {1}\n'.format(parser_args.process_name, pid)
sys.stderr.write(message)
sys.exit(1)
if not parser_args.console:
# this block triggers if the options.console is not defined or is False
process_helper.start_process(parser_args.process_name, parser_args.extra_parameters)
else:
process_starter.start_by_process_name(parser_args.process_name, parser_args.extra_parameters)
except Exception as e:
sys.stderr.write('Exception on starting {0} : {1}\n'.format(parser_args.process_name, e))
traceback.print_exc(file=sys.stderr) | [
"def",
"start_process",
"(",
"parser_args",
")",
":",
"import",
"psutil",
"import",
"process_starter",
"from",
"synergy",
".",
"system",
"import",
"process_helper",
"try",
":",
"pid",
"=",
"process_helper",
".",
"get_process_pid",
"(",
"parser_args",
".",
"process_name",
")",
"if",
"pid",
"is",
"not",
"None",
":",
"if",
"psutil",
".",
"pid_exists",
"(",
"pid",
")",
":",
"message",
"=",
"'ERROR: Process {0} is already running with pid {1}\\n'",
".",
"format",
"(",
"parser_args",
".",
"process_name",
",",
"pid",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"message",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"not",
"parser_args",
".",
"console",
":",
"# this block triggers if the options.console is not defined or is False",
"process_helper",
".",
"start_process",
"(",
"parser_args",
".",
"process_name",
",",
"parser_args",
".",
"extra_parameters",
")",
"else",
":",
"process_starter",
".",
"start_by_process_name",
"(",
"parser_args",
".",
"process_name",
",",
"parser_args",
".",
"extra_parameters",
")",
"except",
"Exception",
"as",
"e",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Exception on starting {0} : {1}\\n'",
".",
"format",
"(",
"parser_args",
".",
"process_name",
",",
"e",
")",
")",
"traceback",
".",
"print_exc",
"(",
"file",
"=",
"sys",
".",
"stderr",
")"
] | 44.681818 | 25.954545 |
def readiterinit(d):
""" Prepare to read data with ms.iter*
"""
# set requested time range based on given parameters
starttime_mjd = d['starttime_mjd']
timeskip = d['inttime']*d['nskip']
starttime = qa.getvalue(qa.convert(qa.time(qa.quantity(starttime_mjd+timeskip/(24.*60*60),'d'),form=['ymd'], prec=9)[0], 's'))[0]
stoptime = qa.getvalue(qa.convert(qa.time(qa.quantity(starttime_mjd+(timeskip+(d['nints']+1)*d['inttime'])/(24.*60*60), 'd'), form=['ymd'], prec=9)[0], 's'))[0] # nints+1 to be avoid buffer running out and stalling iteration
logger.debug('Time of first integration:', qa.time(qa.quantity(starttime_mjd,'d'),form=['ymd'],prec=9)[0])
logger.info('Reading times %s to %s in %d iterations' % (qa.time(qa.quantity(starttime_mjd+timeskip/(24.*60*60),'d'),form=['hms'], prec=9)[0], qa.time(qa.quantity(starttime_mjd+(timeskip+(d['nints']+1)*d['inttime'])/(24.*60*60), 'd'), form=['hms'], prec=9)[0], d['nthread']))
# read data into data structure
ms.open(d['filename'])
if len(d['spwlist']) == 1:
ms.selectinit(datadescid=d['spwlist'][0])
else:
ms.selectinit(datadescid=0, reset=True) # reset includes spw in iteration over time
selection = {'time': [starttime, stoptime], 'uvdist': [1., 1e10], 'antenna1': d['ants'], 'antenna2': d['ants']} # exclude auto-corrs
ms.select(items = selection)
ms.selectpolarization(d['pols'])
ms.iterinit(['TIME'], 0, d['iterint']*d['nbl']*d['nspw']*d['npol'], adddefaultsortcolumns=False)
iterstatus = ms.iterorigin() | [
"def",
"readiterinit",
"(",
"d",
")",
":",
"# set requested time range based on given parameters",
"starttime_mjd",
"=",
"d",
"[",
"'starttime_mjd'",
"]",
"timeskip",
"=",
"d",
"[",
"'inttime'",
"]",
"*",
"d",
"[",
"'nskip'",
"]",
"starttime",
"=",
"qa",
".",
"getvalue",
"(",
"qa",
".",
"convert",
"(",
"qa",
".",
"time",
"(",
"qa",
".",
"quantity",
"(",
"starttime_mjd",
"+",
"timeskip",
"/",
"(",
"24.",
"*",
"60",
"*",
"60",
")",
",",
"'d'",
")",
",",
"form",
"=",
"[",
"'ymd'",
"]",
",",
"prec",
"=",
"9",
")",
"[",
"0",
"]",
",",
"'s'",
")",
")",
"[",
"0",
"]",
"stoptime",
"=",
"qa",
".",
"getvalue",
"(",
"qa",
".",
"convert",
"(",
"qa",
".",
"time",
"(",
"qa",
".",
"quantity",
"(",
"starttime_mjd",
"+",
"(",
"timeskip",
"+",
"(",
"d",
"[",
"'nints'",
"]",
"+",
"1",
")",
"*",
"d",
"[",
"'inttime'",
"]",
")",
"/",
"(",
"24.",
"*",
"60",
"*",
"60",
")",
",",
"'d'",
")",
",",
"form",
"=",
"[",
"'ymd'",
"]",
",",
"prec",
"=",
"9",
")",
"[",
"0",
"]",
",",
"'s'",
")",
")",
"[",
"0",
"]",
"# nints+1 to be avoid buffer running out and stalling iteration",
"logger",
".",
"debug",
"(",
"'Time of first integration:'",
",",
"qa",
".",
"time",
"(",
"qa",
".",
"quantity",
"(",
"starttime_mjd",
",",
"'d'",
")",
",",
"form",
"=",
"[",
"'ymd'",
"]",
",",
"prec",
"=",
"9",
")",
"[",
"0",
"]",
")",
"logger",
".",
"info",
"(",
"'Reading times %s to %s in %d iterations'",
"%",
"(",
"qa",
".",
"time",
"(",
"qa",
".",
"quantity",
"(",
"starttime_mjd",
"+",
"timeskip",
"/",
"(",
"24.",
"*",
"60",
"*",
"60",
")",
",",
"'d'",
")",
",",
"form",
"=",
"[",
"'hms'",
"]",
",",
"prec",
"=",
"9",
")",
"[",
"0",
"]",
",",
"qa",
".",
"time",
"(",
"qa",
".",
"quantity",
"(",
"starttime_mjd",
"+",
"(",
"timeskip",
"+",
"(",
"d",
"[",
"'nints'",
"]",
"+",
"1",
")",
"*",
"d",
"[",
"'inttime'",
"]",
")",
"/",
"(",
"24.",
"*",
"60",
"*",
"60",
")",
",",
"'d'",
")",
",",
"form",
"=",
"[",
"'hms'",
"]",
",",
"prec",
"=",
"9",
")",
"[",
"0",
"]",
",",
"d",
"[",
"'nthread'",
"]",
")",
")",
"# read data into data structure",
"ms",
".",
"open",
"(",
"d",
"[",
"'filename'",
"]",
")",
"if",
"len",
"(",
"d",
"[",
"'spwlist'",
"]",
")",
"==",
"1",
":",
"ms",
".",
"selectinit",
"(",
"datadescid",
"=",
"d",
"[",
"'spwlist'",
"]",
"[",
"0",
"]",
")",
"else",
":",
"ms",
".",
"selectinit",
"(",
"datadescid",
"=",
"0",
",",
"reset",
"=",
"True",
")",
"# reset includes spw in iteration over time",
"selection",
"=",
"{",
"'time'",
":",
"[",
"starttime",
",",
"stoptime",
"]",
",",
"'uvdist'",
":",
"[",
"1.",
",",
"1e10",
"]",
",",
"'antenna1'",
":",
"d",
"[",
"'ants'",
"]",
",",
"'antenna2'",
":",
"d",
"[",
"'ants'",
"]",
"}",
"# exclude auto-corrs",
"ms",
".",
"select",
"(",
"items",
"=",
"selection",
")",
"ms",
".",
"selectpolarization",
"(",
"d",
"[",
"'pols'",
"]",
")",
"ms",
".",
"iterinit",
"(",
"[",
"'TIME'",
"]",
",",
"0",
",",
"d",
"[",
"'iterint'",
"]",
"*",
"d",
"[",
"'nbl'",
"]",
"*",
"d",
"[",
"'nspw'",
"]",
"*",
"d",
"[",
"'npol'",
"]",
",",
"adddefaultsortcolumns",
"=",
"False",
")",
"iterstatus",
"=",
"ms",
".",
"iterorigin",
"(",
")"
] | 66.695652 | 44.043478 |
def unlink_reference(self, source, target):
"""Unlink the target from the source
"""
target_uid = api.get_uid(target)
# get the storage key
key = self.get_relationship_key(target)
# get all backreferences from the source
# N.B. only like this we get the persistent mapping!
backrefs = get_backreferences(source, relationship=None)
if key not in backrefs:
logger.warn(
"Referenced object {} has no backreferences for the key {}"
.format(repr(source), key))
return False
if target_uid not in backrefs[key]:
logger.warn("Target {} was not linked by {}"
.format(repr(target), repr(source)))
return False
backrefs[key].remove(target_uid)
return True | [
"def",
"unlink_reference",
"(",
"self",
",",
"source",
",",
"target",
")",
":",
"target_uid",
"=",
"api",
".",
"get_uid",
"(",
"target",
")",
"# get the storage key",
"key",
"=",
"self",
".",
"get_relationship_key",
"(",
"target",
")",
"# get all backreferences from the source",
"# N.B. only like this we get the persistent mapping!",
"backrefs",
"=",
"get_backreferences",
"(",
"source",
",",
"relationship",
"=",
"None",
")",
"if",
"key",
"not",
"in",
"backrefs",
":",
"logger",
".",
"warn",
"(",
"\"Referenced object {} has no backreferences for the key {}\"",
".",
"format",
"(",
"repr",
"(",
"source",
")",
",",
"key",
")",
")",
"return",
"False",
"if",
"target_uid",
"not",
"in",
"backrefs",
"[",
"key",
"]",
":",
"logger",
".",
"warn",
"(",
"\"Target {} was not linked by {}\"",
".",
"format",
"(",
"repr",
"(",
"target",
")",
",",
"repr",
"(",
"source",
")",
")",
")",
"return",
"False",
"backrefs",
"[",
"key",
"]",
".",
"remove",
"(",
"target_uid",
")",
"return",
"True"
] | 41.25 | 11.4 |
def post_optimization_step(self, batch_info, device, model, rollout):
""" Steps to take after optimization has been done"""
# Update target model
for model_param, target_param in zip(model.parameters(), self.target_model.parameters()):
# EWMA average model update
target_param.data.mul_(1 - self.tau).add_(model_param.data * self.tau) | [
"def",
"post_optimization_step",
"(",
"self",
",",
"batch_info",
",",
"device",
",",
"model",
",",
"rollout",
")",
":",
"# Update target model",
"for",
"model_param",
",",
"target_param",
"in",
"zip",
"(",
"model",
".",
"parameters",
"(",
")",
",",
"self",
".",
"target_model",
".",
"parameters",
"(",
")",
")",
":",
"# EWMA average model update",
"target_param",
".",
"data",
".",
"mul_",
"(",
"1",
"-",
"self",
".",
"tau",
")",
".",
"add_",
"(",
"model_param",
".",
"data",
"*",
"self",
".",
"tau",
")"
] | 62.833333 | 23.333333 |
def new_histogram(name, reservoir=None):
"""
Build a new histogram metric with a given reservoir object
If the reservoir is not provided, a uniform reservoir with the default size is used
"""
if reservoir is None:
reservoir = histogram.UniformReservoir(histogram.DEFAULT_UNIFORM_RESERVOIR_SIZE)
return new_metric(name, histogram.Histogram, reservoir) | [
"def",
"new_histogram",
"(",
"name",
",",
"reservoir",
"=",
"None",
")",
":",
"if",
"reservoir",
"is",
"None",
":",
"reservoir",
"=",
"histogram",
".",
"UniformReservoir",
"(",
"histogram",
".",
"DEFAULT_UNIFORM_RESERVOIR_SIZE",
")",
"return",
"new_metric",
"(",
"name",
",",
"histogram",
".",
"Histogram",
",",
"reservoir",
")"
] | 37.5 | 23.1 |
def _find_day_section_from_indices(indices, split_interval):
"""
Returns a list with [weekday, section] identifiers found
using a list of indices.
"""
cells_day = 24 * 60 // split_interval
rv = [[int(math.floor(i / cells_day)), i % cells_day] for i in indices]
return rv | [
"def",
"_find_day_section_from_indices",
"(",
"indices",
",",
"split_interval",
")",
":",
"cells_day",
"=",
"24",
"*",
"60",
"//",
"split_interval",
"rv",
"=",
"[",
"[",
"int",
"(",
"math",
".",
"floor",
"(",
"i",
"/",
"cells_day",
")",
")",
",",
"i",
"%",
"cells_day",
"]",
"for",
"i",
"in",
"indices",
"]",
"return",
"rv"
] | 32.333333 | 17.222222 |
def check_elements(self):
"""
Checks element definitions.
"""
# ELEMENT TYPE CHECKING
existing_types = set(self.elements.type.argiope.values.flatten())
allowed_types = set(ELEMENTS.keys())
if (existing_types <= allowed_types) == False:
raise ValueError("Element types {0} not in know elements {1}".format(
existing_types - allowed_types, allowed_types))
print("<Elements: OK>") | [
"def",
"check_elements",
"(",
"self",
")",
":",
"# ELEMENT TYPE CHECKING",
"existing_types",
"=",
"set",
"(",
"self",
".",
"elements",
".",
"type",
".",
"argiope",
".",
"values",
".",
"flatten",
"(",
")",
")",
"allowed_types",
"=",
"set",
"(",
"ELEMENTS",
".",
"keys",
"(",
")",
")",
"if",
"(",
"existing_types",
"<=",
"allowed_types",
")",
"==",
"False",
":",
"raise",
"ValueError",
"(",
"\"Element types {0} not in know elements {1}\"",
".",
"format",
"(",
"existing_types",
"-",
"allowed_types",
",",
"allowed_types",
")",
")",
"print",
"(",
"\"<Elements: OK>\"",
")"
] | 38.909091 | 14 |
def get_surface_boundaries(self):
"""
Returns the boundaries in the same format as a multiplanar
surface, with two one-element lists of lons and lats
"""
mesh = self.mesh
lons = numpy.concatenate((mesh.lons[0, :],
mesh.lons[1:, -1],
mesh.lons[-1, :-1][::-1],
mesh.lons[:-1, 0][::-1]))
lats = numpy.concatenate((mesh.lats[0, :],
mesh.lats[1:, -1],
mesh.lats[-1, :-1][::-1],
mesh.lats[:-1, 0][::-1]))
return [lons], [lats] | [
"def",
"get_surface_boundaries",
"(",
"self",
")",
":",
"mesh",
"=",
"self",
".",
"mesh",
"lons",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"mesh",
".",
"lons",
"[",
"0",
",",
":",
"]",
",",
"mesh",
".",
"lons",
"[",
"1",
":",
",",
"-",
"1",
"]",
",",
"mesh",
".",
"lons",
"[",
"-",
"1",
",",
":",
"-",
"1",
"]",
"[",
":",
":",
"-",
"1",
"]",
",",
"mesh",
".",
"lons",
"[",
":",
"-",
"1",
",",
"0",
"]",
"[",
":",
":",
"-",
"1",
"]",
")",
")",
"lats",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"mesh",
".",
"lats",
"[",
"0",
",",
":",
"]",
",",
"mesh",
".",
"lats",
"[",
"1",
":",
",",
"-",
"1",
"]",
",",
"mesh",
".",
"lats",
"[",
"-",
"1",
",",
":",
"-",
"1",
"]",
"[",
":",
":",
"-",
"1",
"]",
",",
"mesh",
".",
"lats",
"[",
":",
"-",
"1",
",",
"0",
"]",
"[",
":",
":",
"-",
"1",
"]",
")",
")",
"return",
"[",
"lons",
"]",
",",
"[",
"lats",
"]"
] | 44.933333 | 13.333333 |
def run(self):
"""Runs server"""
try:
self._start()
running = True
while running:
msg = ''
name = ''
client_id = ''
request_id = ''
request = self._socket.recv_string()
self._logger.log(1, 'Recevied REQ `%s`', request)
split_msg = request.split(self.DELIMITER)
if len(split_msg) == 4:
msg, name, client_id, request_id = split_msg
if msg == self.LOCK:
response = self._lock(name, client_id, request_id)
elif msg == self.UNLOCK:
response = self._unlock(name, client_id, request_id)
elif msg == self.PING:
response = self.PONG
elif msg == self.DONE:
response = self.CLOSED
running = False
else:
response = (self.MSG_ERROR + self.DELIMITER +
'Request `%s` not understood '
'(or wrong number of delimiters)' % request)
self._logger.error(response)
respond = self._pre_respond_hook(response)
if respond:
self._logger.log(1, 'Sending REP `%s` to `%s` (request id `%s`)',
response, client_id, request_id)
self._socket.send_string(response)
# Close everything in the end
self._close()
except Exception:
self._logger.exception('Crashed Lock Server!')
raise | [
"def",
"run",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_start",
"(",
")",
"running",
"=",
"True",
"while",
"running",
":",
"msg",
"=",
"''",
"name",
"=",
"''",
"client_id",
"=",
"''",
"request_id",
"=",
"''",
"request",
"=",
"self",
".",
"_socket",
".",
"recv_string",
"(",
")",
"self",
".",
"_logger",
".",
"log",
"(",
"1",
",",
"'Recevied REQ `%s`'",
",",
"request",
")",
"split_msg",
"=",
"request",
".",
"split",
"(",
"self",
".",
"DELIMITER",
")",
"if",
"len",
"(",
"split_msg",
")",
"==",
"4",
":",
"msg",
",",
"name",
",",
"client_id",
",",
"request_id",
"=",
"split_msg",
"if",
"msg",
"==",
"self",
".",
"LOCK",
":",
"response",
"=",
"self",
".",
"_lock",
"(",
"name",
",",
"client_id",
",",
"request_id",
")",
"elif",
"msg",
"==",
"self",
".",
"UNLOCK",
":",
"response",
"=",
"self",
".",
"_unlock",
"(",
"name",
",",
"client_id",
",",
"request_id",
")",
"elif",
"msg",
"==",
"self",
".",
"PING",
":",
"response",
"=",
"self",
".",
"PONG",
"elif",
"msg",
"==",
"self",
".",
"DONE",
":",
"response",
"=",
"self",
".",
"CLOSED",
"running",
"=",
"False",
"else",
":",
"response",
"=",
"(",
"self",
".",
"MSG_ERROR",
"+",
"self",
".",
"DELIMITER",
"+",
"'Request `%s` not understood '",
"'(or wrong number of delimiters)'",
"%",
"request",
")",
"self",
".",
"_logger",
".",
"error",
"(",
"response",
")",
"respond",
"=",
"self",
".",
"_pre_respond_hook",
"(",
"response",
")",
"if",
"respond",
":",
"self",
".",
"_logger",
".",
"log",
"(",
"1",
",",
"'Sending REP `%s` to `%s` (request id `%s`)'",
",",
"response",
",",
"client_id",
",",
"request_id",
")",
"self",
".",
"_socket",
".",
"send_string",
"(",
"response",
")",
"# Close everything in the end",
"self",
".",
"_close",
"(",
")",
"except",
"Exception",
":",
"self",
".",
"_logger",
".",
"exception",
"(",
"'Crashed Lock Server!'",
")",
"raise"
] | 33.14 | 20.88 |
def newChannelOpened_channel_(self, notif, newChannel):
"""
Handle when a client connects to the server channel.
(This method is called for both RFCOMM and L2CAP channels.)
"""
if newChannel is not None and newChannel.isIncoming():
# not sure if delegate really needs to be set
newChannel.setDelegate_(self)
if hasattr(self.__cb_obj, '_handle_channelopened'):
self.__cb_obj._handle_channelopened(newChannel) | [
"def",
"newChannelOpened_channel_",
"(",
"self",
",",
"notif",
",",
"newChannel",
")",
":",
"if",
"newChannel",
"is",
"not",
"None",
"and",
"newChannel",
".",
"isIncoming",
"(",
")",
":",
"# not sure if delegate really needs to be set",
"newChannel",
".",
"setDelegate_",
"(",
"self",
")",
"if",
"hasattr",
"(",
"self",
".",
"__cb_obj",
",",
"'_handle_channelopened'",
")",
":",
"self",
".",
"__cb_obj",
".",
"_handle_channelopened",
"(",
"newChannel",
")"
] | 44.545455 | 17.090909 |
def get_hierarchy_traversal_session(self, proxy):
"""Gets the ``OsidSession`` associated with the hierarchy traversal service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.hierarchy.HierarchyTraversalSession) - a
``HierarchyTraversalSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_hierarchy_traversal()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_hierarchy_traversal()`` is ``true``.*
"""
if not self.supports_hierarchy_traversal():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.HierarchyTraversalSession(proxy=proxy, runtime=self._runtime) | [
"def",
"get_hierarchy_traversal_session",
"(",
"self",
",",
"proxy",
")",
":",
"if",
"not",
"self",
".",
"supports_hierarchy_traversal",
"(",
")",
":",
"raise",
"errors",
".",
"Unimplemented",
"(",
")",
"# pylint: disable=no-member",
"return",
"sessions",
".",
"HierarchyTraversalSession",
"(",
"proxy",
"=",
"proxy",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")"
] | 46.666667 | 16.944444 |
async def get_reviews(self, **params):
"""Receives all reviews by cid
Accepts:
- cid
- coinid
"""
if params.get("message"):
params = json.loads(params.get("message", "{}"))
if not params:
return {"error":400, "reason":"Missed required fields"}
cid = params.get("cid", 0)
coinid = params.get("coinid")
if not cid and not coinid:
return {"error":400, "reason":"Missed cid"}
reviews = []
database = client[coinid]
collection = database[settings.REVIEW]
async for document in collection.find({"confirmed":None, "cid":int(cid)}):
reviews.append({i:document[i] for i in document if i == "confirmed"})
return reviews | [
"async",
"def",
"get_reviews",
"(",
"self",
",",
"*",
"*",
"params",
")",
":",
"if",
"params",
".",
"get",
"(",
"\"message\"",
")",
":",
"params",
"=",
"json",
".",
"loads",
"(",
"params",
".",
"get",
"(",
"\"message\"",
",",
"\"{}\"",
")",
")",
"if",
"not",
"params",
":",
"return",
"{",
"\"error\"",
":",
"400",
",",
"\"reason\"",
":",
"\"Missed required fields\"",
"}",
"cid",
"=",
"params",
".",
"get",
"(",
"\"cid\"",
",",
"0",
")",
"coinid",
"=",
"params",
".",
"get",
"(",
"\"coinid\"",
")",
"if",
"not",
"cid",
"and",
"not",
"coinid",
":",
"return",
"{",
"\"error\"",
":",
"400",
",",
"\"reason\"",
":",
"\"Missed cid\"",
"}",
"reviews",
"=",
"[",
"]",
"database",
"=",
"client",
"[",
"coinid",
"]",
"collection",
"=",
"database",
"[",
"settings",
".",
"REVIEW",
"]",
"async",
"for",
"document",
"in",
"collection",
".",
"find",
"(",
"{",
"\"confirmed\"",
":",
"None",
",",
"\"cid\"",
":",
"int",
"(",
"cid",
")",
"}",
")",
":",
"reviews",
".",
"append",
"(",
"{",
"i",
":",
"document",
"[",
"i",
"]",
"for",
"i",
"in",
"document",
"if",
"i",
"==",
"\"confirmed\"",
"}",
")",
"return",
"reviews"
] | 26.416667 | 20.375 |
def get_shapes_intersecting_geometry(
feed: "Feed", geometry, geo_shapes=None, *, geometrized: bool = False
) -> DataFrame:
"""
Return the slice of ``feed.shapes`` that contains all shapes that
intersect the given Shapely geometry, e.g. a Polygon or LineString.
Parameters
----------
feed : Feed
geometry : Shapley geometry, e.g. a Polygon
Specified in WGS84 coordinates
geo_shapes : GeoPandas GeoDataFrame
The output of :func:`geometrize_shapes`
geometrize : boolean
If ``True``, then return the shapes DataFrame as a GeoDataFrame
of the form output by :func:`geometrize_shapes`
Returns
-------
DataFrame or GeoDataFrame
Notes
-----
- Requires GeoPandas
- Specifying ``geo_shapes`` will skip the first step of the
algorithm, namely, geometrizing ``feed.shapes``
- Assume the following feed attributes are not ``None``:
* ``feed.shapes``, if ``geo_shapes`` is not given
"""
if geo_shapes is not None:
f = geo_shapes.copy()
else:
f = geometrize_shapes(feed.shapes)
cols = f.columns
f["hit"] = f["geometry"].intersects(geometry)
f = f[f["hit"]][cols]
if geometrized:
return f
else:
return ungeometrize_shapes(f) | [
"def",
"get_shapes_intersecting_geometry",
"(",
"feed",
":",
"\"Feed\"",
",",
"geometry",
",",
"geo_shapes",
"=",
"None",
",",
"*",
",",
"geometrized",
":",
"bool",
"=",
"False",
")",
"->",
"DataFrame",
":",
"if",
"geo_shapes",
"is",
"not",
"None",
":",
"f",
"=",
"geo_shapes",
".",
"copy",
"(",
")",
"else",
":",
"f",
"=",
"geometrize_shapes",
"(",
"feed",
".",
"shapes",
")",
"cols",
"=",
"f",
".",
"columns",
"f",
"[",
"\"hit\"",
"]",
"=",
"f",
"[",
"\"geometry\"",
"]",
".",
"intersects",
"(",
"geometry",
")",
"f",
"=",
"f",
"[",
"f",
"[",
"\"hit\"",
"]",
"]",
"[",
"cols",
"]",
"if",
"geometrized",
":",
"return",
"f",
"else",
":",
"return",
"ungeometrize_shapes",
"(",
"f",
")"
] | 27.866667 | 21.2 |
def folderitem(self, obj, item, index):
"""Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
"""
cat = obj.getCategoryTitle()
cat_order = self.an_cats_order.get(cat)
if self.do_cats:
# category groups entries
item["category"] = cat
if (cat, cat_order) not in self.categories:
self.categories.append((cat, cat_order))
# Category
category = obj.getCategory()
if category:
title = category.Title()
url = category.absolute_url()
item["Category"] = title
item["replace"]["Category"] = get_link(url, value=title)
# Calculation
calculation = obj.getCalculation()
if calculation:
title = calculation.Title()
url = calculation.absolute_url()
item["Calculation"] = title
item["replace"]["Calculation"] = get_link(url, value=title)
# Methods
methods = obj.getMethods()
if methods:
links = map(
lambda m: get_link(
m.absolute_url(), value=m.Title(), css_class="link"),
methods)
item["replace"]["Methods"] = ", ".join(links)
# Max time allowed
maxtime = obj.MaxTimeAllowed
if maxtime:
item["MaxTimeAllowed"] = self.format_maxtime(maxtime)
# Price
item["Price"] = self.format_price(obj.Price)
# Duplicate Variation
dup_variation = obj.DuplicateVariation
if dup_variation:
item["DuplicateVariation"] = self.format_duplication_variation(
dup_variation)
# Icons
after_icons = ""
if obj.getAccredited():
after_icons += get_image(
"accredited.png", title=_("Accredited"))
if obj.getAttachmentOption() == "r":
after_icons += get_image(
"attach_reqd.png", title=_("Attachment required"))
if obj.getAttachmentOption() == "n":
after_icons += get_image(
"attach_no.png", title=_("Attachment not permitted"))
if after_icons:
item["after"]["Title"] = after_icons
return item | [
"def",
"folderitem",
"(",
"self",
",",
"obj",
",",
"item",
",",
"index",
")",
":",
"cat",
"=",
"obj",
".",
"getCategoryTitle",
"(",
")",
"cat_order",
"=",
"self",
".",
"an_cats_order",
".",
"get",
"(",
"cat",
")",
"if",
"self",
".",
"do_cats",
":",
"# category groups entries",
"item",
"[",
"\"category\"",
"]",
"=",
"cat",
"if",
"(",
"cat",
",",
"cat_order",
")",
"not",
"in",
"self",
".",
"categories",
":",
"self",
".",
"categories",
".",
"append",
"(",
"(",
"cat",
",",
"cat_order",
")",
")",
"# Category",
"category",
"=",
"obj",
".",
"getCategory",
"(",
")",
"if",
"category",
":",
"title",
"=",
"category",
".",
"Title",
"(",
")",
"url",
"=",
"category",
".",
"absolute_url",
"(",
")",
"item",
"[",
"\"Category\"",
"]",
"=",
"title",
"item",
"[",
"\"replace\"",
"]",
"[",
"\"Category\"",
"]",
"=",
"get_link",
"(",
"url",
",",
"value",
"=",
"title",
")",
"# Calculation",
"calculation",
"=",
"obj",
".",
"getCalculation",
"(",
")",
"if",
"calculation",
":",
"title",
"=",
"calculation",
".",
"Title",
"(",
")",
"url",
"=",
"calculation",
".",
"absolute_url",
"(",
")",
"item",
"[",
"\"Calculation\"",
"]",
"=",
"title",
"item",
"[",
"\"replace\"",
"]",
"[",
"\"Calculation\"",
"]",
"=",
"get_link",
"(",
"url",
",",
"value",
"=",
"title",
")",
"# Methods",
"methods",
"=",
"obj",
".",
"getMethods",
"(",
")",
"if",
"methods",
":",
"links",
"=",
"map",
"(",
"lambda",
"m",
":",
"get_link",
"(",
"m",
".",
"absolute_url",
"(",
")",
",",
"value",
"=",
"m",
".",
"Title",
"(",
")",
",",
"css_class",
"=",
"\"link\"",
")",
",",
"methods",
")",
"item",
"[",
"\"replace\"",
"]",
"[",
"\"Methods\"",
"]",
"=",
"\", \"",
".",
"join",
"(",
"links",
")",
"# Max time allowed",
"maxtime",
"=",
"obj",
".",
"MaxTimeAllowed",
"if",
"maxtime",
":",
"item",
"[",
"\"MaxTimeAllowed\"",
"]",
"=",
"self",
".",
"format_maxtime",
"(",
"maxtime",
")",
"# Price",
"item",
"[",
"\"Price\"",
"]",
"=",
"self",
".",
"format_price",
"(",
"obj",
".",
"Price",
")",
"# Duplicate Variation",
"dup_variation",
"=",
"obj",
".",
"DuplicateVariation",
"if",
"dup_variation",
":",
"item",
"[",
"\"DuplicateVariation\"",
"]",
"=",
"self",
".",
"format_duplication_variation",
"(",
"dup_variation",
")",
"# Icons",
"after_icons",
"=",
"\"\"",
"if",
"obj",
".",
"getAccredited",
"(",
")",
":",
"after_icons",
"+=",
"get_image",
"(",
"\"accredited.png\"",
",",
"title",
"=",
"_",
"(",
"\"Accredited\"",
")",
")",
"if",
"obj",
".",
"getAttachmentOption",
"(",
")",
"==",
"\"r\"",
":",
"after_icons",
"+=",
"get_image",
"(",
"\"attach_reqd.png\"",
",",
"title",
"=",
"_",
"(",
"\"Attachment required\"",
")",
")",
"if",
"obj",
".",
"getAttachmentOption",
"(",
")",
"==",
"\"n\"",
":",
"after_icons",
"+=",
"get_image",
"(",
"\"attach_no.png\"",
",",
"title",
"=",
"_",
"(",
"\"Attachment not permitted\"",
")",
")",
"if",
"after_icons",
":",
"item",
"[",
"\"after\"",
"]",
"[",
"\"Title\"",
"]",
"=",
"after_icons",
"return",
"item"
] | 34.704225 | 16.661972 |
def push(self, request):
"""Push a request"""
self.server.basic_publish(
exchange='',
routing_key=self.key,
body=self._encode_request(request)
) | [
"def",
"push",
"(",
"self",
",",
"request",
")",
":",
"self",
".",
"server",
".",
"basic_publish",
"(",
"exchange",
"=",
"''",
",",
"routing_key",
"=",
"self",
".",
"key",
",",
"body",
"=",
"self",
".",
"_encode_request",
"(",
"request",
")",
")"
] | 28.285714 | 11.714286 |
async def get_vm(self, vm_id):
""" Dummy get_vm func """
if vm_id not in self._vms:
raise DummyIaasVmNotFound()
return self._vms[vm_id] | [
"async",
"def",
"get_vm",
"(",
"self",
",",
"vm_id",
")",
":",
"if",
"vm_id",
"not",
"in",
"self",
".",
"_vms",
":",
"raise",
"DummyIaasVmNotFound",
"(",
")",
"return",
"self",
".",
"_vms",
"[",
"vm_id",
"]"
] | 33.4 | 5.2 |
def add_base_type_dynamically(error_type, additional_type):
"""
Utility method to create a new type dynamically, inheriting from both error_type (first) and additional_type
(second). The class representation (repr(cls)) of the resulting class reflects this by displaying both names
(fully qualified for the first type, __name__ for the second)
For example
```
> new_type = add_base_type_dynamically(ValidationError, ValueError)
> repr(new_type)
"<class 'valid8.entry_points.ValidationError+ValueError'>"
```
:return:
"""
# the new type created dynamically, with the same name
class new_error_type(with_metaclass(MetaReprForValidator, error_type, additional_type, object)):
pass
new_error_type.__name__ = error_type.__name__ + '[' + additional_type.__name__ + ']'
if sys.version_info >= (3, 0):
new_error_type.__qualname__ = error_type.__qualname__ + '[' + additional_type.__qualname__+ ']'
new_error_type.__module__ = error_type.__module__
return new_error_type | [
"def",
"add_base_type_dynamically",
"(",
"error_type",
",",
"additional_type",
")",
":",
"# the new type created dynamically, with the same name",
"class",
"new_error_type",
"(",
"with_metaclass",
"(",
"MetaReprForValidator",
",",
"error_type",
",",
"additional_type",
",",
"object",
")",
")",
":",
"pass",
"new_error_type",
".",
"__name__",
"=",
"error_type",
".",
"__name__",
"+",
"'['",
"+",
"additional_type",
".",
"__name__",
"+",
"']'",
"if",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
"0",
")",
":",
"new_error_type",
".",
"__qualname__",
"=",
"error_type",
".",
"__qualname__",
"+",
"'['",
"+",
"additional_type",
".",
"__qualname__",
"+",
"']'",
"new_error_type",
".",
"__module__",
"=",
"error_type",
".",
"__module__",
"return",
"new_error_type"
] | 42.875 | 31.291667 |
def status(self):
"""
The status of the container. For example, ``running``, or ``exited``.
"""
if isinstance(self.attrs['State'], dict):
return self.attrs['State']['Status']
return self.attrs['State'] | [
"def",
"status",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"attrs",
"[",
"'State'",
"]",
",",
"dict",
")",
":",
"return",
"self",
".",
"attrs",
"[",
"'State'",
"]",
"[",
"'Status'",
"]",
"return",
"self",
".",
"attrs",
"[",
"'State'",
"]"
] | 35.285714 | 11.857143 |
def patText(s0):
'''make text pattern'''
arr = np.zeros((s0,s0), dtype=np.uint8)
s = int(round(s0/100.))
p1 = 0
pp1 = int(round(s0/10.))
for pos0 in np.linspace(0,s0,10):
cv2.putText(arr, 'helloworld', (p1,int(round(pos0))),
cv2.FONT_HERSHEY_COMPLEX_SMALL, fontScale=s,
color=255, thickness=s,
lineType=cv2.LINE_AA )
if p1:
p1 = 0
else:
p1 = pp1
return arr.astype(float) | [
"def",
"patText",
"(",
"s0",
")",
":",
"arr",
"=",
"np",
".",
"zeros",
"(",
"(",
"s0",
",",
"s0",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"s",
"=",
"int",
"(",
"round",
"(",
"s0",
"/",
"100.",
")",
")",
"p1",
"=",
"0",
"pp1",
"=",
"int",
"(",
"round",
"(",
"s0",
"/",
"10.",
")",
")",
"for",
"pos0",
"in",
"np",
".",
"linspace",
"(",
"0",
",",
"s0",
",",
"10",
")",
":",
"cv2",
".",
"putText",
"(",
"arr",
",",
"'helloworld'",
",",
"(",
"p1",
",",
"int",
"(",
"round",
"(",
"pos0",
")",
")",
")",
",",
"cv2",
".",
"FONT_HERSHEY_COMPLEX_SMALL",
",",
"fontScale",
"=",
"s",
",",
"color",
"=",
"255",
",",
"thickness",
"=",
"s",
",",
"lineType",
"=",
"cv2",
".",
"LINE_AA",
")",
"if",
"p1",
":",
"p1",
"=",
"0",
"else",
":",
"p1",
"=",
"pp1",
"return",
"arr",
".",
"astype",
"(",
"float",
")"
] | 31.6875 | 15.6875 |
def _generate_values_with_variability_and_constraints(self, symbols, starting_values, variable_parameters):
"""
Generates the `values_with_variability` formatted list
from the provided symbols, starting values and variable parameters
:param symbols: The symbols defining each of the values in the starting values list
:param starting_values: the actual starting values
:param variable_parameters: a dictionary/set/list of variables that are variable
if dictionary provided, the contents should be `symbol: range` where range is
a tuple ``(min_val, max_val)`` of allowed parameter values or ``None`` for no limit.
if set/list provided, the ranges will be assumed to be ``None`` for each of
the parameters
:type variable_parameters: dict|iterable
:return:
"""
values_with_variability = []
constraints = []
if not isinstance(variable_parameters, dict):
# Convert non/dict representations to Dict with nones
variable_parameters = {p: None for p in variable_parameters}
for parameter, parameter_value in zip(symbols, starting_values):
try:
constraint = variable_parameters[parameter]
variable = True
except KeyError:
try:
constraint = variable_parameters[str(parameter)]
variable = True
except KeyError:
constraint = None
variable = False
values_with_variability.append((parameter_value, variable))
if variable:
constraints.append(constraint)
return values_with_variability, constraints | [
"def",
"_generate_values_with_variability_and_constraints",
"(",
"self",
",",
"symbols",
",",
"starting_values",
",",
"variable_parameters",
")",
":",
"values_with_variability",
"=",
"[",
"]",
"constraints",
"=",
"[",
"]",
"if",
"not",
"isinstance",
"(",
"variable_parameters",
",",
"dict",
")",
":",
"# Convert non/dict representations to Dict with nones",
"variable_parameters",
"=",
"{",
"p",
":",
"None",
"for",
"p",
"in",
"variable_parameters",
"}",
"for",
"parameter",
",",
"parameter_value",
"in",
"zip",
"(",
"symbols",
",",
"starting_values",
")",
":",
"try",
":",
"constraint",
"=",
"variable_parameters",
"[",
"parameter",
"]",
"variable",
"=",
"True",
"except",
"KeyError",
":",
"try",
":",
"constraint",
"=",
"variable_parameters",
"[",
"str",
"(",
"parameter",
")",
"]",
"variable",
"=",
"True",
"except",
"KeyError",
":",
"constraint",
"=",
"None",
"variable",
"=",
"False",
"values_with_variability",
".",
"append",
"(",
"(",
"parameter_value",
",",
"variable",
")",
")",
"if",
"variable",
":",
"constraints",
".",
"append",
"(",
"constraint",
")",
"return",
"values_with_variability",
",",
"constraints"
] | 47.076923 | 26.25641 |
def decompile(f):
"""
Decompile a function.
Parameters
----------
f : function
The function to decompile.
Returns
-------
ast : ast.FunctionDef
A FunctionDef node that compiles to f.
"""
co = f.__code__
args, kwonly, varargs, varkwargs = paramnames(co)
annotations = f.__annotations__ or {}
defaults = list(f.__defaults__ or ())
kw_defaults = f.__kwdefaults__ or {}
if f.__name__ == '<lambda>':
node = ast.Lambda
body = pycode_to_body(co, DecompilationContext(in_lambda=True))[0]
extra_kwargs = {}
else:
node = ast.FunctionDef
body = pycode_to_body(co, DecompilationContext(in_function_block=True))
extra_kwargs = {
'decorator_list': [],
'returns': annotations.get('return')
}
return node(
name=f.__name__,
args=make_function_arguments(
args=args,
kwonly=kwonly,
varargs=varargs,
varkwargs=varkwargs,
defaults=defaults,
kw_defaults=kw_defaults,
annotations=annotations,
),
body=body,
**extra_kwargs
) | [
"def",
"decompile",
"(",
"f",
")",
":",
"co",
"=",
"f",
".",
"__code__",
"args",
",",
"kwonly",
",",
"varargs",
",",
"varkwargs",
"=",
"paramnames",
"(",
"co",
")",
"annotations",
"=",
"f",
".",
"__annotations__",
"or",
"{",
"}",
"defaults",
"=",
"list",
"(",
"f",
".",
"__defaults__",
"or",
"(",
")",
")",
"kw_defaults",
"=",
"f",
".",
"__kwdefaults__",
"or",
"{",
"}",
"if",
"f",
".",
"__name__",
"==",
"'<lambda>'",
":",
"node",
"=",
"ast",
".",
"Lambda",
"body",
"=",
"pycode_to_body",
"(",
"co",
",",
"DecompilationContext",
"(",
"in_lambda",
"=",
"True",
")",
")",
"[",
"0",
"]",
"extra_kwargs",
"=",
"{",
"}",
"else",
":",
"node",
"=",
"ast",
".",
"FunctionDef",
"body",
"=",
"pycode_to_body",
"(",
"co",
",",
"DecompilationContext",
"(",
"in_function_block",
"=",
"True",
")",
")",
"extra_kwargs",
"=",
"{",
"'decorator_list'",
":",
"[",
"]",
",",
"'returns'",
":",
"annotations",
".",
"get",
"(",
"'return'",
")",
"}",
"return",
"node",
"(",
"name",
"=",
"f",
".",
"__name__",
",",
"args",
"=",
"make_function_arguments",
"(",
"args",
"=",
"args",
",",
"kwonly",
"=",
"kwonly",
",",
"varargs",
"=",
"varargs",
",",
"varkwargs",
"=",
"varkwargs",
",",
"defaults",
"=",
"defaults",
",",
"kw_defaults",
"=",
"kw_defaults",
",",
"annotations",
"=",
"annotations",
",",
")",
",",
"body",
"=",
"body",
",",
"*",
"*",
"extra_kwargs",
")"
] | 25.021739 | 17.978261 |
def get_filebase(path, pattern):
"""Get the end of *path* of same length as *pattern*."""
# A pattern can include directories
tail_len = len(pattern.split(os.path.sep))
return os.path.join(*str(path).split(os.path.sep)[-tail_len:]) | [
"def",
"get_filebase",
"(",
"path",
",",
"pattern",
")",
":",
"# A pattern can include directories",
"tail_len",
"=",
"len",
"(",
"pattern",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"*",
"str",
"(",
"path",
")",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
"[",
"-",
"tail_len",
":",
"]",
")"
] | 48.6 | 8.2 |
def setHidden(self, state):
"""
Marks this item as hidden based on the state. This will affect all
its children as well.
:param state | <bool>
"""
super(XListGroupItem, self).setHidden(state)
for child in self.children():
child.setHidden(state or not self.isExpanded()) | [
"def",
"setHidden",
"(",
"self",
",",
"state",
")",
":",
"super",
"(",
"XListGroupItem",
",",
"self",
")",
".",
"setHidden",
"(",
"state",
")",
"for",
"child",
"in",
"self",
".",
"children",
"(",
")",
":",
"child",
".",
"setHidden",
"(",
"state",
"or",
"not",
"self",
".",
"isExpanded",
"(",
")",
")"
] | 32.818182 | 14.454545 |
def _get_firmware_update_xml_for_file_and_component(
self, filename, component):
"""Creates the dynamic xml for flashing the device firmware via iLO.
This method creates the dynamic xml for flashing the firmware, based
on the component type so passed.
:param filename: location of the raw firmware file.
:param component_type: Type of component to be applied to.
:returns: the etree.Element for the root of the RIBCL XML
for flashing the device (component) firmware.
"""
if component == 'ilo':
cmd_name = 'UPDATE_RIB_FIRMWARE'
else:
# Note(deray): Not explicitly checking for all other supported
# devices (components), as those checks have already happened
# in the invoking methods and may seem redundant here.
cmd_name = 'UPDATE_FIRMWARE'
fwlen = os.path.getsize(filename)
root = self._create_dynamic_xml(cmd_name,
'RIB_INFO',
'write',
subelements={
'IMAGE_LOCATION': filename,
'IMAGE_LENGTH': str(fwlen)
})
return root | [
"def",
"_get_firmware_update_xml_for_file_and_component",
"(",
"self",
",",
"filename",
",",
"component",
")",
":",
"if",
"component",
"==",
"'ilo'",
":",
"cmd_name",
"=",
"'UPDATE_RIB_FIRMWARE'",
"else",
":",
"# Note(deray): Not explicitly checking for all other supported",
"# devices (components), as those checks have already happened",
"# in the invoking methods and may seem redundant here.",
"cmd_name",
"=",
"'UPDATE_FIRMWARE'",
"fwlen",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"filename",
")",
"root",
"=",
"self",
".",
"_create_dynamic_xml",
"(",
"cmd_name",
",",
"'RIB_INFO'",
",",
"'write'",
",",
"subelements",
"=",
"{",
"'IMAGE_LOCATION'",
":",
"filename",
",",
"'IMAGE_LENGTH'",
":",
"str",
"(",
"fwlen",
")",
"}",
")",
"return",
"root"
] | 47.535714 | 17.214286 |
def example_lchab_to_lchuv():
"""
This function shows very complex chain of conversions in action.
LCHab to LCHuv involves four different calculations, making this the
conversion requiring the most steps.
"""
print("=== Complex Example: LCHab->LCHuv ===")
# Instantiate an LCHab color object with the given values.
lchab = LCHabColor(0.903, 16.447, 352.252)
# Show a string representation.
print(lchab)
# Convert to LCHuv.
lchuv = convert_color(lchab, LCHuvColor)
print(lchuv)
print("=== End Example ===\n") | [
"def",
"example_lchab_to_lchuv",
"(",
")",
":",
"print",
"(",
"\"=== Complex Example: LCHab->LCHuv ===\"",
")",
"# Instantiate an LCHab color object with the given values.",
"lchab",
"=",
"LCHabColor",
"(",
"0.903",
",",
"16.447",
",",
"352.252",
")",
"# Show a string representation.",
"print",
"(",
"lchab",
")",
"# Convert to LCHuv.",
"lchuv",
"=",
"convert_color",
"(",
"lchab",
",",
"LCHuvColor",
")",
"print",
"(",
"lchuv",
")",
"print",
"(",
"\"=== End Example ===\\n\"",
")"
] | 32.294118 | 15.823529 |
def _jacobian_both(nodes, degree, dimension):
r"""Compute :math:`s` and :math:`t` partial of :math:`B`.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): Array of nodes in a surface.
degree (int): The degree of the surface.
dimension (int): The dimension the surface lives in.
Returns:
numpy.ndarray: Nodes of the Jacobian surfaces in
B |eacute| zier form.
"""
_, num_nodes = nodes.shape
result = np.empty((2 * dimension, num_nodes - degree - 1), order="F")
result[:dimension, :] = jacobian_s(nodes, degree, dimension)
result[dimension:, :] = jacobian_t(nodes, degree, dimension)
return result | [
"def",
"_jacobian_both",
"(",
"nodes",
",",
"degree",
",",
"dimension",
")",
":",
"_",
",",
"num_nodes",
"=",
"nodes",
".",
"shape",
"result",
"=",
"np",
".",
"empty",
"(",
"(",
"2",
"*",
"dimension",
",",
"num_nodes",
"-",
"degree",
"-",
"1",
")",
",",
"order",
"=",
"\"F\"",
")",
"result",
"[",
":",
"dimension",
",",
":",
"]",
"=",
"jacobian_s",
"(",
"nodes",
",",
"degree",
",",
"dimension",
")",
"result",
"[",
"dimension",
":",
",",
":",
"]",
"=",
"jacobian_t",
"(",
"nodes",
",",
"degree",
",",
"dimension",
")",
"return",
"result"
] | 34.5 | 21.136364 |
def persist_trie_data_dict(self, trie_data_dict: Dict[Hash32, bytes]) -> None:
"""
Store raw trie data to db from a dict
"""
with self.db.atomic_batch() as db:
for key, value in trie_data_dict.items():
db[key] = value | [
"def",
"persist_trie_data_dict",
"(",
"self",
",",
"trie_data_dict",
":",
"Dict",
"[",
"Hash32",
",",
"bytes",
"]",
")",
"->",
"None",
":",
"with",
"self",
".",
"db",
".",
"atomic_batch",
"(",
")",
"as",
"db",
":",
"for",
"key",
",",
"value",
"in",
"trie_data_dict",
".",
"items",
"(",
")",
":",
"db",
"[",
"key",
"]",
"=",
"value"
] | 38.714286 | 9.571429 |
def keys_breadth_first(self, include_dicts=False):
"""a generator that returns all the keys in a set of nested
DotDict instances. The keys take the form X.Y.Z"""
namespaces = []
for key in self._key_order:
if isinstance(getattr(self, key), DotDict):
namespaces.append(key)
if include_dicts:
yield key
else:
yield key
for a_namespace in namespaces:
for key in self[a_namespace].keys_breadth_first(include_dicts):
yield '%s.%s' % (a_namespace, key) | [
"def",
"keys_breadth_first",
"(",
"self",
",",
"include_dicts",
"=",
"False",
")",
":",
"namespaces",
"=",
"[",
"]",
"for",
"key",
"in",
"self",
".",
"_key_order",
":",
"if",
"isinstance",
"(",
"getattr",
"(",
"self",
",",
"key",
")",
",",
"DotDict",
")",
":",
"namespaces",
".",
"append",
"(",
"key",
")",
"if",
"include_dicts",
":",
"yield",
"key",
"else",
":",
"yield",
"key",
"for",
"a_namespace",
"in",
"namespaces",
":",
"for",
"key",
"in",
"self",
"[",
"a_namespace",
"]",
".",
"keys_breadth_first",
"(",
"include_dicts",
")",
":",
"yield",
"'%s.%s'",
"%",
"(",
"a_namespace",
",",
"key",
")"
] | 42.428571 | 10.857143 |
def cdd(d, k):
""" Conditionally delete key (or list of keys) 'k' from dict 'd' """
if not isinstance(k, list):
k = [k]
for i in k:
if i in d:
d.pop(i) | [
"def",
"cdd",
"(",
"d",
",",
"k",
")",
":",
"if",
"not",
"isinstance",
"(",
"k",
",",
"list",
")",
":",
"k",
"=",
"[",
"k",
"]",
"for",
"i",
"in",
"k",
":",
"if",
"i",
"in",
"d",
":",
"d",
".",
"pop",
"(",
"i",
")"
] | 26.428571 | 18.142857 |
def consume(self, callback, queue):
"""
Register a new consumer.
This consumer will be configured for every protocol this factory
produces so it will be reconfigured on network failures. If a connection
is already active, the consumer will be added to it.
Args:
callback (callable): The callback to invoke when a message arrives.
queue (str): The name of the queue to consume from.
"""
self.consumers[queue] = callback
if self._client_ready.called:
return self.client.consume(callback, queue) | [
"def",
"consume",
"(",
"self",
",",
"callback",
",",
"queue",
")",
":",
"self",
".",
"consumers",
"[",
"queue",
"]",
"=",
"callback",
"if",
"self",
".",
"_client_ready",
".",
"called",
":",
"return",
"self",
".",
"client",
".",
"consume",
"(",
"callback",
",",
"queue",
")"
] | 39.2 | 19.466667 |
def mangle_name(name):
"""Mangles a column name to a standard form, remoing illegal
characters.
:param name:
:return:
"""
import re
try:
return re.sub('_+', '_', re.sub('[^\w_]', '_', name).lower()).rstrip('_')
except TypeError:
raise TypeError(
'Trying to mangle name with invalid type of: ' + str(type(name))) | [
"def",
"mangle_name",
"(",
"name",
")",
":",
"import",
"re",
"try",
":",
"return",
"re",
".",
"sub",
"(",
"'_+'",
",",
"'_'",
",",
"re",
".",
"sub",
"(",
"'[^\\w_]'",
",",
"'_'",
",",
"name",
")",
".",
"lower",
"(",
")",
")",
".",
"rstrip",
"(",
"'_'",
")",
"except",
"TypeError",
":",
"raise",
"TypeError",
"(",
"'Trying to mangle name with invalid type of: '",
"+",
"str",
"(",
"type",
"(",
"name",
")",
")",
")"
] | 28.857143 | 23.357143 |
def set(self, key, value, time=0, compress_level=-1):
"""
Set a value for a key on server.
:param key: Key's name
:type key: str
:param value: A value to be stored on server.
:type value: object
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True in case of success and False in case of failure
:rtype: bool
"""
server = self._get_server(key)
return server.set(key, value, time, compress_level) | [
"def",
"set",
"(",
"self",
",",
"key",
",",
"value",
",",
"time",
"=",
"0",
",",
"compress_level",
"=",
"-",
"1",
")",
":",
"server",
"=",
"self",
".",
"_get_server",
"(",
"key",
")",
"return",
"server",
".",
"set",
"(",
"key",
",",
"value",
",",
"time",
",",
"compress_level",
")"
] | 37.526316 | 13.947368 |
def draw_char_screen(self):
"""
Draws the output buffered in the char_buffer.
"""
self.screen = Image.new("RGB", (self.height, self.width))
self.drawer = ImageDraw.Draw(self.screen)
for sy, line in enumerate(self.char_buffer):
for sx, tinfo in enumerate(line):
self.drawer.text((sx * 6, sy * 9), tinfo[0], fill=tinfo[1:])
self.output_device.interrupt() | [
"def",
"draw_char_screen",
"(",
"self",
")",
":",
"self",
".",
"screen",
"=",
"Image",
".",
"new",
"(",
"\"RGB\"",
",",
"(",
"self",
".",
"height",
",",
"self",
".",
"width",
")",
")",
"self",
".",
"drawer",
"=",
"ImageDraw",
".",
"Draw",
"(",
"self",
".",
"screen",
")",
"for",
"sy",
",",
"line",
"in",
"enumerate",
"(",
"self",
".",
"char_buffer",
")",
":",
"for",
"sx",
",",
"tinfo",
"in",
"enumerate",
"(",
"line",
")",
":",
"self",
".",
"drawer",
".",
"text",
"(",
"(",
"sx",
"*",
"6",
",",
"sy",
"*",
"9",
")",
",",
"tinfo",
"[",
"0",
"]",
",",
"fill",
"=",
"tinfo",
"[",
"1",
":",
"]",
")",
"self",
".",
"output_device",
".",
"interrupt",
"(",
")"
] | 33.090909 | 11.272727 |
def group(self):
"""(re-)group all logevents by the given group."""
if hasattr(self, 'group_by'):
group_by = self.group_by
else:
group_by = self.default_group_by
if self.args['group'] is not None:
group_by = self.args['group']
self.groups = Grouping(self.logevents, group_by)
self.groups.move_items(None, 'others')
self.groups.sort_by_size(group_limit=self.args['group_limit'],
discard_others=self.args['no_others']) | [
"def",
"group",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'group_by'",
")",
":",
"group_by",
"=",
"self",
".",
"group_by",
"else",
":",
"group_by",
"=",
"self",
".",
"default_group_by",
"if",
"self",
".",
"args",
"[",
"'group'",
"]",
"is",
"not",
"None",
":",
"group_by",
"=",
"self",
".",
"args",
"[",
"'group'",
"]",
"self",
".",
"groups",
"=",
"Grouping",
"(",
"self",
".",
"logevents",
",",
"group_by",
")",
"self",
".",
"groups",
".",
"move_items",
"(",
"None",
",",
"'others'",
")",
"self",
".",
"groups",
".",
"sort_by_size",
"(",
"group_limit",
"=",
"self",
".",
"args",
"[",
"'group_limit'",
"]",
",",
"discard_others",
"=",
"self",
".",
"args",
"[",
"'no_others'",
"]",
")"
] | 41.384615 | 15.076923 |
def delete_records(self, zone_name, record_type, subdomain):
"""
Delete record from have fieldType=type and subDomain=subdomain
:param zone_name: Name of the zone
:param record_type: fieldType
:param subdomain: subDomain
"""
records = self._client.get('/domain/zone/{}/record'.format(zone_name),
fieldType=record_type, subDomain=subdomain)
for record in records:
self.delete_record(zone_name, record) | [
"def",
"delete_records",
"(",
"self",
",",
"zone_name",
",",
"record_type",
",",
"subdomain",
")",
":",
"records",
"=",
"self",
".",
"_client",
".",
"get",
"(",
"'/domain/zone/{}/record'",
".",
"format",
"(",
"zone_name",
")",
",",
"fieldType",
"=",
"record_type",
",",
"subDomain",
"=",
"subdomain",
")",
"for",
"record",
"in",
"records",
":",
"self",
".",
"delete_record",
"(",
"zone_name",
",",
"record",
")"
] | 45.545455 | 14.090909 |
def get_list(list_type=None,
search_term=None,
page=None,
page_size=None,
sort_by=None):
'''
Returns a list of domains for the particular user as a list of objects
offset by ``page`` length of ``page_size``
list_type : ALL
One of ``ALL``, ``EXPIRING``, ``EXPIRED``
search_term
Keyword to look for on the domain list
page : 1
Number of result page to return
page_size : 20
Number of domains to be listed per page (minimum: ``10``, maximum:
``100``)
sort_by
One of ``NAME``, ``NAME_DESC``, ``EXPIREDATE``, ``EXPIREDATE_DESC``,
``CREATEDATE``, or ``CREATEDATE_DESC``
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.get_list
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.getList')
if list_type is not None:
if list_type not in ['ALL', 'EXPIRING', 'EXPIRED']:
log.error('Invalid option for list_type')
raise Exception('Invalid option for list_type')
opts['ListType'] = list_type
if search_term is not None:
if len(search_term) > 70:
log.warning('search_term trimmed to first 70 characters')
search_term = search_term[0:70]
opts['SearchTerm'] = search_term
if page is not None:
opts['Page'] = page
if page_size is not None:
if page_size > 100 or page_size < 10:
log.error('Invalid option for page')
raise Exception('Invalid option for page')
opts['PageSize'] = page_size
if sort_by is not None:
if sort_by not in ['NAME', 'NAME_DESC', 'EXPIREDATE', 'EXPIREDATE_DESC', 'CREATEDATE', 'CREATEDATE_DESC']:
log.error('Invalid option for sort_by')
raise Exception('Invalid option for sort_by')
opts['SortBy'] = sort_by
response_xml = salt.utils.namecheap.get_request(opts)
if response_xml is None:
return []
domainresult = response_xml.getElementsByTagName("DomainGetListResult")[0]
domains = []
for d in domainresult.getElementsByTagName("Domain"):
domains.append(salt.utils.namecheap.atts_to_dict(d))
return domains | [
"def",
"get_list",
"(",
"list_type",
"=",
"None",
",",
"search_term",
"=",
"None",
",",
"page",
"=",
"None",
",",
"page_size",
"=",
"None",
",",
"sort_by",
"=",
"None",
")",
":",
"opts",
"=",
"salt",
".",
"utils",
".",
"namecheap",
".",
"get_opts",
"(",
"'namecheap.domains.getList'",
")",
"if",
"list_type",
"is",
"not",
"None",
":",
"if",
"list_type",
"not",
"in",
"[",
"'ALL'",
",",
"'EXPIRING'",
",",
"'EXPIRED'",
"]",
":",
"log",
".",
"error",
"(",
"'Invalid option for list_type'",
")",
"raise",
"Exception",
"(",
"'Invalid option for list_type'",
")",
"opts",
"[",
"'ListType'",
"]",
"=",
"list_type",
"if",
"search_term",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"search_term",
")",
">",
"70",
":",
"log",
".",
"warning",
"(",
"'search_term trimmed to first 70 characters'",
")",
"search_term",
"=",
"search_term",
"[",
"0",
":",
"70",
"]",
"opts",
"[",
"'SearchTerm'",
"]",
"=",
"search_term",
"if",
"page",
"is",
"not",
"None",
":",
"opts",
"[",
"'Page'",
"]",
"=",
"page",
"if",
"page_size",
"is",
"not",
"None",
":",
"if",
"page_size",
">",
"100",
"or",
"page_size",
"<",
"10",
":",
"log",
".",
"error",
"(",
"'Invalid option for page'",
")",
"raise",
"Exception",
"(",
"'Invalid option for page'",
")",
"opts",
"[",
"'PageSize'",
"]",
"=",
"page_size",
"if",
"sort_by",
"is",
"not",
"None",
":",
"if",
"sort_by",
"not",
"in",
"[",
"'NAME'",
",",
"'NAME_DESC'",
",",
"'EXPIREDATE'",
",",
"'EXPIREDATE_DESC'",
",",
"'CREATEDATE'",
",",
"'CREATEDATE_DESC'",
"]",
":",
"log",
".",
"error",
"(",
"'Invalid option for sort_by'",
")",
"raise",
"Exception",
"(",
"'Invalid option for sort_by'",
")",
"opts",
"[",
"'SortBy'",
"]",
"=",
"sort_by",
"response_xml",
"=",
"salt",
".",
"utils",
".",
"namecheap",
".",
"get_request",
"(",
"opts",
")",
"if",
"response_xml",
"is",
"None",
":",
"return",
"[",
"]",
"domainresult",
"=",
"response_xml",
".",
"getElementsByTagName",
"(",
"\"DomainGetListResult\"",
")",
"[",
"0",
"]",
"domains",
"=",
"[",
"]",
"for",
"d",
"in",
"domainresult",
".",
"getElementsByTagName",
"(",
"\"Domain\"",
")",
":",
"domains",
".",
"append",
"(",
"salt",
".",
"utils",
".",
"namecheap",
".",
"atts_to_dict",
"(",
"d",
")",
")",
"return",
"domains"
] | 29.835616 | 23.178082 |
def drop_pathlist(self, pathlist):
"""Drop path list"""
if pathlist:
files = ["r'%s'" % path for path in pathlist]
if len(files) == 1:
text = files[0]
else:
text = "[" + ", ".join(files) + "]"
if self.new_input_line:
self.on_new_line()
self.insert_text(text)
self.setFocus() | [
"def",
"drop_pathlist",
"(",
"self",
",",
"pathlist",
")",
":",
"if",
"pathlist",
":",
"files",
"=",
"[",
"\"r'%s'\"",
"%",
"path",
"for",
"path",
"in",
"pathlist",
"]",
"if",
"len",
"(",
"files",
")",
"==",
"1",
":",
"text",
"=",
"files",
"[",
"0",
"]",
"else",
":",
"text",
"=",
"\"[\"",
"+",
"\", \"",
".",
"join",
"(",
"files",
")",
"+",
"\"]\"",
"if",
"self",
".",
"new_input_line",
":",
"self",
".",
"on_new_line",
"(",
")",
"self",
".",
"insert_text",
"(",
"text",
")",
"self",
".",
"setFocus",
"(",
")"
] | 34.166667 | 9.916667 |
def resume(self):
"""Resumes the pool and reindex all objects processed
"""
self.num_calls -= 1
if self.num_calls > 0:
return
logger.info("Resume actions for {} objects".format(len(self)))
# Fetch the objects from the pool
processed = list()
for brain in api.search(dict(UID=self.objects.keys()), UID_CATALOG):
uid = api.get_uid(brain)
if uid in processed:
# This object has been processed already, do nothing
continue
# Reindex the object
obj = api.get_object(brain)
idxs = self.get_indexes(uid)
idxs_str = idxs and ', '.join(idxs) or "-- All indexes --"
logger.info("Reindexing {}: {}".format(obj.getId(), idxs_str))
obj.reindexObject(idxs=idxs)
processed.append(uid)
# Cleanup the pool
logger.info("Objects processed: {}".format(len(processed)))
self.objects = collections.OrderedDict() | [
"def",
"resume",
"(",
"self",
")",
":",
"self",
".",
"num_calls",
"-=",
"1",
"if",
"self",
".",
"num_calls",
">",
"0",
":",
"return",
"logger",
".",
"info",
"(",
"\"Resume actions for {} objects\"",
".",
"format",
"(",
"len",
"(",
"self",
")",
")",
")",
"# Fetch the objects from the pool",
"processed",
"=",
"list",
"(",
")",
"for",
"brain",
"in",
"api",
".",
"search",
"(",
"dict",
"(",
"UID",
"=",
"self",
".",
"objects",
".",
"keys",
"(",
")",
")",
",",
"UID_CATALOG",
")",
":",
"uid",
"=",
"api",
".",
"get_uid",
"(",
"brain",
")",
"if",
"uid",
"in",
"processed",
":",
"# This object has been processed already, do nothing",
"continue",
"# Reindex the object",
"obj",
"=",
"api",
".",
"get_object",
"(",
"brain",
")",
"idxs",
"=",
"self",
".",
"get_indexes",
"(",
"uid",
")",
"idxs_str",
"=",
"idxs",
"and",
"', '",
".",
"join",
"(",
"idxs",
")",
"or",
"\"-- All indexes --\"",
"logger",
".",
"info",
"(",
"\"Reindexing {}: {}\"",
".",
"format",
"(",
"obj",
".",
"getId",
"(",
")",
",",
"idxs_str",
")",
")",
"obj",
".",
"reindexObject",
"(",
"idxs",
"=",
"idxs",
")",
"processed",
".",
"append",
"(",
"uid",
")",
"# Cleanup the pool",
"logger",
".",
"info",
"(",
"\"Objects processed: {}\"",
".",
"format",
"(",
"len",
"(",
"processed",
")",
")",
")",
"self",
".",
"objects",
"=",
"collections",
".",
"OrderedDict",
"(",
")"
] | 37.259259 | 16.814815 |
async def total_albums(self, *, market: str = None) -> int:
"""get the total amout of tracks in the album.
Parameters
----------
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
Returns
-------
total : int
The total amount of albums.
"""
data = await self.__client.http.artist_albums(self.id, limit=1, offset=0, market=market)
return data['total'] | [
"async",
"def",
"total_albums",
"(",
"self",
",",
"*",
",",
"market",
":",
"str",
"=",
"None",
")",
"->",
"int",
":",
"data",
"=",
"await",
"self",
".",
"__client",
".",
"http",
".",
"artist_albums",
"(",
"self",
".",
"id",
",",
"limit",
"=",
"1",
",",
"offset",
"=",
"0",
",",
"market",
"=",
"market",
")",
"return",
"data",
"[",
"'total'",
"]"
] | 29.933333 | 20 |
def resettable(f):
"""A decorator to simplify the context management of simple object
attributes. Gets the value of the attribute prior to setting it, and stores
a function to set the value to the old value in the HistoryManager.
"""
def wrapper(self, new_value):
context = get_context(self)
if context:
old_value = getattr(self, f.__name__)
# Don't clutter the context with unchanged variables
if old_value == new_value:
return
context(partial(f, self, old_value))
f(self, new_value)
return wrapper | [
"def",
"resettable",
"(",
"f",
")",
":",
"def",
"wrapper",
"(",
"self",
",",
"new_value",
")",
":",
"context",
"=",
"get_context",
"(",
"self",
")",
"if",
"context",
":",
"old_value",
"=",
"getattr",
"(",
"self",
",",
"f",
".",
"__name__",
")",
"# Don't clutter the context with unchanged variables",
"if",
"old_value",
"==",
"new_value",
":",
"return",
"context",
"(",
"partial",
"(",
"f",
",",
"self",
",",
"old_value",
")",
")",
"f",
"(",
"self",
",",
"new_value",
")",
"return",
"wrapper"
] | 33.166667 | 19 |
def match(self, ra, dec, radius, maxmatch=1):
"""*match a corrdinate set against this Matcher object's coordinate set*
**Key Arguments:**
- ``ra`` -- list, numpy array or single ra value
- ``dec`` -- --list, numpy array or single dec value (must match ra array length)
- ``radius`` -- radius of circle in degrees
- ``maxmatch`` -- maximum number of matches to return. Set to `0` to match all points. Default *1* (i.e. closest match)
**Return:**
- None
**Usage:**
Once we have initialised a Matcher coordinateSet object we can match other coordinate sets against it:
.. code-block:: python
twoArcsec = 2.0 / 3600.
raList2 = [200.0, 200.0, 200.0, 175.23, 55.25]
decList2 = [24.3 + 0.75 * twoArcsec, 24.3 + 0.25 * twoArcsec,
24.3 - 0.33 * twoArcsec, -28.25 + 0.58 * twoArcsec, 75.22]
matchIndices1, matchIndices2, seps = coordinateSet.match(
ra=raList2,
dec=decList2,
radius=twoArcsec,
maxmatch=0
)
for m1, m2, s in zip(matchIndices1, matchIndices2, seps):
print raList1[m1], decList1[m1], " -> ", s * 3600., " arcsec -> ", raList2[m2], decList2[m2]
Or to return just the nearest matches:
.. code-block:: python
matchIndices1, matchIndices2, seps = coordinateSet.match(
ra=raList2,
dec=decList2,
radius=twoArcsec,
maxmatch=1
)
Note from the print statement, you can index the arrays ``raList1``, ``decList1`` with the ``matchIndices1`` array values and ``raList2``, ``decList2`` with the ``matchIndices2`` values.
"""
if self.convertToArray == True:
from astrocalc.coords import coordinates_to_array
ra, dec = coordinates_to_array(
log=self.log,
ra=ra,
dec=dec
)
radius = numpy.array(radius, dtype='f8', ndmin=1, copy=False)
if ra.size != dec.size:
raise ValueError("ra size (%d) != "
"dec size (%d)" % (ra.size, dec.size))
if radius.size != 1 and radius.size != ra.size:
raise ValueError("radius size (%d) != 1 and"
" != ra,dec size (%d)" % (radius.size, ra.size))
return super(Matcher, self).match(ra, dec, radius, maxmatch, False) | [
"def",
"match",
"(",
"self",
",",
"ra",
",",
"dec",
",",
"radius",
",",
"maxmatch",
"=",
"1",
")",
":",
"if",
"self",
".",
"convertToArray",
"==",
"True",
":",
"from",
"astrocalc",
".",
"coords",
"import",
"coordinates_to_array",
"ra",
",",
"dec",
"=",
"coordinates_to_array",
"(",
"log",
"=",
"self",
".",
"log",
",",
"ra",
"=",
"ra",
",",
"dec",
"=",
"dec",
")",
"radius",
"=",
"numpy",
".",
"array",
"(",
"radius",
",",
"dtype",
"=",
"'f8'",
",",
"ndmin",
"=",
"1",
",",
"copy",
"=",
"False",
")",
"if",
"ra",
".",
"size",
"!=",
"dec",
".",
"size",
":",
"raise",
"ValueError",
"(",
"\"ra size (%d) != \"",
"\"dec size (%d)\"",
"%",
"(",
"ra",
".",
"size",
",",
"dec",
".",
"size",
")",
")",
"if",
"radius",
".",
"size",
"!=",
"1",
"and",
"radius",
".",
"size",
"!=",
"ra",
".",
"size",
":",
"raise",
"ValueError",
"(",
"\"radius size (%d) != 1 and\"",
"\" != ra,dec size (%d)\"",
"%",
"(",
"radius",
".",
"size",
",",
"ra",
".",
"size",
")",
")",
"return",
"super",
"(",
"Matcher",
",",
"self",
")",
".",
"match",
"(",
"ra",
",",
"dec",
",",
"radius",
",",
"maxmatch",
",",
"False",
")"
] | 39.045455 | 27.939394 |
def get_database(self, database_name=None, username=None, password=None):
"""
Get a pymongo database handle, after authenticating.
Authenticates using the username/password in the DB URI given to
__init__() unless username/password is supplied as arguments.
:param database_name: (optional) Name of database
:param username: (optional) Username to login with
:param password: (optional) Password to login with
:return: Pymongo database object
"""
if database_name is None:
database_name = self._database_name
if database_name is None:
raise ValueError('No database_name supplied, and no default provided to __init__')
db = self._connection[database_name]
if username and password:
db.authenticate(username, password)
elif self._parsed_uri.get("username", None):
if 'authSource' in self._options and self._options['authSource'] is not None:
db.authenticate(
self._parsed_uri.get("username", None),
self._parsed_uri.get("password", None),
source=self._options['authSource']
)
else:
db.authenticate(
self._parsed_uri.get("username", None),
self._parsed_uri.get("password", None)
)
return db | [
"def",
"get_database",
"(",
"self",
",",
"database_name",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
")",
":",
"if",
"database_name",
"is",
"None",
":",
"database_name",
"=",
"self",
".",
"_database_name",
"if",
"database_name",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'No database_name supplied, and no default provided to __init__'",
")",
"db",
"=",
"self",
".",
"_connection",
"[",
"database_name",
"]",
"if",
"username",
"and",
"password",
":",
"db",
".",
"authenticate",
"(",
"username",
",",
"password",
")",
"elif",
"self",
".",
"_parsed_uri",
".",
"get",
"(",
"\"username\"",
",",
"None",
")",
":",
"if",
"'authSource'",
"in",
"self",
".",
"_options",
"and",
"self",
".",
"_options",
"[",
"'authSource'",
"]",
"is",
"not",
"None",
":",
"db",
".",
"authenticate",
"(",
"self",
".",
"_parsed_uri",
".",
"get",
"(",
"\"username\"",
",",
"None",
")",
",",
"self",
".",
"_parsed_uri",
".",
"get",
"(",
"\"password\"",
",",
"None",
")",
",",
"source",
"=",
"self",
".",
"_options",
"[",
"'authSource'",
"]",
")",
"else",
":",
"db",
".",
"authenticate",
"(",
"self",
".",
"_parsed_uri",
".",
"get",
"(",
"\"username\"",
",",
"None",
")",
",",
"self",
".",
"_parsed_uri",
".",
"get",
"(",
"\"password\"",
",",
"None",
")",
")",
"return",
"db"
] | 43.8125 | 18.6875 |
def write_channel(self, out_data):
"""Generic handler that will write to both SSH and telnet channel.
:param out_data: data to be written to the channel
:type out_data: str (can be either unicode/byte string)
"""
self._lock_netmiko_session()
try:
self._write_channel(out_data)
finally:
# Always unlock the SSH channel, even on exception.
self._unlock_netmiko_session() | [
"def",
"write_channel",
"(",
"self",
",",
"out_data",
")",
":",
"self",
".",
"_lock_netmiko_session",
"(",
")",
"try",
":",
"self",
".",
"_write_channel",
"(",
"out_data",
")",
"finally",
":",
"# Always unlock the SSH channel, even on exception.",
"self",
".",
"_unlock_netmiko_session",
"(",
")"
] | 37.5 | 14.083333 |
def create_tarfile(files, project_name):
"""Create a tar file based on the list of files passed"""
fd, filename = tempfile.mkstemp(prefix="polyaxon_{}".format(project_name), suffix='.tar.gz')
with tarfile.open(filename, "w:gz") as tar:
for f in files:
tar.add(f)
yield filename
# clear
os.close(fd)
os.remove(filename) | [
"def",
"create_tarfile",
"(",
"files",
",",
"project_name",
")",
":",
"fd",
",",
"filename",
"=",
"tempfile",
".",
"mkstemp",
"(",
"prefix",
"=",
"\"polyaxon_{}\"",
".",
"format",
"(",
"project_name",
")",
",",
"suffix",
"=",
"'.tar.gz'",
")",
"with",
"tarfile",
".",
"open",
"(",
"filename",
",",
"\"w:gz\"",
")",
"as",
"tar",
":",
"for",
"f",
"in",
"files",
":",
"tar",
".",
"add",
"(",
"f",
")",
"yield",
"filename",
"# clear",
"os",
".",
"close",
"(",
"fd",
")",
"os",
".",
"remove",
"(",
"filename",
")"
] | 29.75 | 22.5 |
def for_page(self, page, per_page):
"""
"Paginate" the collection by slicing it into a smaller collection.
:param page: The current page
:type page: int
:param per_page: Number of items by slice
:type per_page: int
:rtype: Collection
"""
start = (page - 1) * per_page
return self[start:start + per_page] | [
"def",
"for_page",
"(",
"self",
",",
"page",
",",
"per_page",
")",
":",
"start",
"=",
"(",
"page",
"-",
"1",
")",
"*",
"per_page",
"return",
"self",
"[",
"start",
":",
"start",
"+",
"per_page",
"]"
] | 24.866667 | 17.4 |
def create_storage_account(self, service_name, description, label,
affinity_group=None, location=None,
geo_replication_enabled=None,
extended_properties=None,
account_type='Standard_GRS'):
'''
Creates a new storage account in Windows Azure.
service_name:
A name for the storage account that is unique within Windows Azure.
Storage account names must be between 3 and 24 characters in length
and use numbers and lower-case letters only.
description:
A description for the storage account. The description may be up
to 1024 characters in length.
label:
A name for the storage account. The name may be up to 100
characters in length. The name can be used to identify the storage
account for your tracking purposes.
affinity_group:
The name of an existing affinity group in the specified
subscription. You can specify either a location or affinity_group,
but not both.
location:
The location where the storage account is created. You can specify
either a location or affinity_group, but not both.
geo_replication_enabled:
Deprecated. Replaced by the account_type parameter.
extended_properties:
Dictionary containing name/value pairs of storage account
properties. You can have a maximum of 50 extended property
name/value pairs. The maximum length of the Name element is 64
characters, only alphanumeric characters and underscores are valid
in the Name, and the name must start with a letter. The value has
a maximum length of 255 characters.
account_type:
Specifies whether the account supports locally-redundant storage,
geo-redundant storage, zone-redundant storage, or read access
geo-redundant storage.
Possible values are:
Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS
'''
_validate_not_none('service_name', service_name)
_validate_not_none('description', description)
_validate_not_none('label', label)
if affinity_group is None and location is None:
raise ValueError(
'location or affinity_group must be specified')
if affinity_group is not None and location is not None:
raise ValueError(
'Only one of location or affinity_group needs to be specified')
if geo_replication_enabled == False:
account_type = 'Standard_LRS'
return self._perform_post(
self._get_storage_service_path(),
_XmlSerializer.create_storage_service_input_to_xml(
service_name,
description,
label,
affinity_group,
location,
account_type,
extended_properties),
as_async=True) | [
"def",
"create_storage_account",
"(",
"self",
",",
"service_name",
",",
"description",
",",
"label",
",",
"affinity_group",
"=",
"None",
",",
"location",
"=",
"None",
",",
"geo_replication_enabled",
"=",
"None",
",",
"extended_properties",
"=",
"None",
",",
"account_type",
"=",
"'Standard_GRS'",
")",
":",
"_validate_not_none",
"(",
"'service_name'",
",",
"service_name",
")",
"_validate_not_none",
"(",
"'description'",
",",
"description",
")",
"_validate_not_none",
"(",
"'label'",
",",
"label",
")",
"if",
"affinity_group",
"is",
"None",
"and",
"location",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'location or affinity_group must be specified'",
")",
"if",
"affinity_group",
"is",
"not",
"None",
"and",
"location",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'Only one of location or affinity_group needs to be specified'",
")",
"if",
"geo_replication_enabled",
"==",
"False",
":",
"account_type",
"=",
"'Standard_LRS'",
"return",
"self",
".",
"_perform_post",
"(",
"self",
".",
"_get_storage_service_path",
"(",
")",
",",
"_XmlSerializer",
".",
"create_storage_service_input_to_xml",
"(",
"service_name",
",",
"description",
",",
"label",
",",
"affinity_group",
",",
"location",
",",
"account_type",
",",
"extended_properties",
")",
",",
"as_async",
"=",
"True",
")"
] | 48.21875 | 20.46875 |
def exttype(suffix):
"""Type for use with argument(... type=) that will force a specific suffix
Especially for output files, so that we can enforce the use of appropriate
file-type specific suffixes"""
def inner(s):
if s == '':
return s
first, last = os.path.splitext(s)
return first + suffix
return inner | [
"def",
"exttype",
"(",
"suffix",
")",
":",
"def",
"inner",
"(",
"s",
")",
":",
"if",
"s",
"==",
"''",
":",
"return",
"s",
"first",
",",
"last",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"s",
")",
"return",
"first",
"+",
"suffix",
"return",
"inner"
] | 35.2 | 15.8 |
def visit_Call(self, node):
"""Replace function call by its correct iterator if it is possible."""
if node in self.potential_iterator:
matched_path = self.find_matching_builtin(node)
if matched_path is None:
return self.generic_visit(node)
# Special handling for map which can't be turn to imap with None as
# a parameter as map(None, [1, 2]) == [1, 2] while
# list(imap(None, [1, 2])) == [(1,), (2,)]
if (matched_path[1] == "map" and
MODULES["__builtin__"]["None"] in
self.aliases[node.args[0]]):
return self.generic_visit(node)
# if a dtype conversion is implied
if matched_path[1] in ('array', 'asarray') and len(node.args) != 1:
return self.generic_visit(node)
path = EQUIVALENT_ITERATORS[matched_path]
if path:
node.func = path_to_attr(path)
self.use_itertools |= path[0] == 'itertools'
else:
node = node.args[0]
self.update = True
return self.generic_visit(node) | [
"def",
"visit_Call",
"(",
"self",
",",
"node",
")",
":",
"if",
"node",
"in",
"self",
".",
"potential_iterator",
":",
"matched_path",
"=",
"self",
".",
"find_matching_builtin",
"(",
"node",
")",
"if",
"matched_path",
"is",
"None",
":",
"return",
"self",
".",
"generic_visit",
"(",
"node",
")",
"# Special handling for map which can't be turn to imap with None as",
"# a parameter as map(None, [1, 2]) == [1, 2] while",
"# list(imap(None, [1, 2])) == [(1,), (2,)]",
"if",
"(",
"matched_path",
"[",
"1",
"]",
"==",
"\"map\"",
"and",
"MODULES",
"[",
"\"__builtin__\"",
"]",
"[",
"\"None\"",
"]",
"in",
"self",
".",
"aliases",
"[",
"node",
".",
"args",
"[",
"0",
"]",
"]",
")",
":",
"return",
"self",
".",
"generic_visit",
"(",
"node",
")",
"# if a dtype conversion is implied",
"if",
"matched_path",
"[",
"1",
"]",
"in",
"(",
"'array'",
",",
"'asarray'",
")",
"and",
"len",
"(",
"node",
".",
"args",
")",
"!=",
"1",
":",
"return",
"self",
".",
"generic_visit",
"(",
"node",
")",
"path",
"=",
"EQUIVALENT_ITERATORS",
"[",
"matched_path",
"]",
"if",
"path",
":",
"node",
".",
"func",
"=",
"path_to_attr",
"(",
"path",
")",
"self",
".",
"use_itertools",
"|=",
"path",
"[",
"0",
"]",
"==",
"'itertools'",
"else",
":",
"node",
"=",
"node",
".",
"args",
"[",
"0",
"]",
"self",
".",
"update",
"=",
"True",
"return",
"self",
".",
"generic_visit",
"(",
"node",
")"
] | 41.035714 | 16.535714 |
def emit_event(project_slug, action_slug, payload, sender_name, sender_secret,
event_uuid=None):
"""Emit Event.
:param project_slug: the slug of the project
:param action_slug: the slug of the action
:param payload: the payload that emit with action
:param sender_name: name that identified the sender
:parma sender_secret: secret string
:return: dict with task_id and event_uuid
raise MissingSender if sender does not exist
raise WrongSenderSecret if sender_secret is wrong
raise NotAllowed if sender is not allowed to emit action to project
"""
project_graph = graph.get_project_graph(project_slug)
project_graph.verify_sender(sender_name, sender_secret)
action = project_graph.get_action(action_slug)
project = project_graph.project
# execute event
event_uuid = event_uuid or uuid4()
event = {'uuid': event_uuid, 'project': project['slug'], 'action': action['slug']}
res = exec_event(event, action['webhooks'], payload)
logger.info('EMIT %s "%s" "%s" %s',
event_uuid, project_slug, action_slug, json.dumps(payload))
return dict(
task=dict(
id=res.id,
),
event=dict(
uuid=event_uuid,
),
) | [
"def",
"emit_event",
"(",
"project_slug",
",",
"action_slug",
",",
"payload",
",",
"sender_name",
",",
"sender_secret",
",",
"event_uuid",
"=",
"None",
")",
":",
"project_graph",
"=",
"graph",
".",
"get_project_graph",
"(",
"project_slug",
")",
"project_graph",
".",
"verify_sender",
"(",
"sender_name",
",",
"sender_secret",
")",
"action",
"=",
"project_graph",
".",
"get_action",
"(",
"action_slug",
")",
"project",
"=",
"project_graph",
".",
"project",
"# execute event",
"event_uuid",
"=",
"event_uuid",
"or",
"uuid4",
"(",
")",
"event",
"=",
"{",
"'uuid'",
":",
"event_uuid",
",",
"'project'",
":",
"project",
"[",
"'slug'",
"]",
",",
"'action'",
":",
"action",
"[",
"'slug'",
"]",
"}",
"res",
"=",
"exec_event",
"(",
"event",
",",
"action",
"[",
"'webhooks'",
"]",
",",
"payload",
")",
"logger",
".",
"info",
"(",
"'EMIT %s \"%s\" \"%s\" %s'",
",",
"event_uuid",
",",
"project_slug",
",",
"action_slug",
",",
"json",
".",
"dumps",
"(",
"payload",
")",
")",
"return",
"dict",
"(",
"task",
"=",
"dict",
"(",
"id",
"=",
"res",
".",
"id",
",",
")",
",",
"event",
"=",
"dict",
"(",
"uuid",
"=",
"event_uuid",
",",
")",
",",
")"
] | 32.526316 | 20.815789 |
def _saveDB(self):
"""Overloaded - we don't have nextval() in mysql"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
if not self._validID():
if not len(self._getID()) == 1:
raise "Can't retrieve auto-inserted ID for multiple-primary-key"
# Here's the mysql magic to get the new ID
self._setID(cursor.insert_id())
cursor.close()
self._new = False | [
"def",
"_saveDB",
"(",
"self",
")",
":",
"# We're a \"fresh\" copy now",
"self",
".",
"_updated",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
".",
"_new",
":",
"operation",
"=",
"'INSERT'",
"else",
":",
"operation",
"=",
"'UPDATE'",
"(",
"sql",
",",
"fields",
")",
"=",
"self",
".",
"_prepareSQL",
"(",
"operation",
")",
"values",
"=",
"[",
"]",
"for",
"field",
"in",
"fields",
":",
"value",
"=",
"getattr",
"(",
"self",
",",
"field",
")",
"if",
"isinstance",
"(",
"value",
",",
"Forgetter",
")",
":",
"# It's another object, we store only the ID",
"if",
"value",
".",
"_new",
":",
"# It's a new object too, it must be saved!",
"value",
".",
"save",
"(",
")",
"try",
":",
"(",
"value",
",",
")",
"=",
"value",
".",
"_getID",
"(",
")",
"except",
":",
"raise",
"\"Can't reference multiple-primary-key: %s\"",
"%",
"value",
"values",
".",
"append",
"(",
"value",
")",
"cursor",
"=",
"self",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"sql",
",",
"values",
")",
"# cursor.commit()",
"if",
"not",
"self",
".",
"_validID",
"(",
")",
":",
"if",
"not",
"len",
"(",
"self",
".",
"_getID",
"(",
")",
")",
"==",
"1",
":",
"raise",
"\"Can't retrieve auto-inserted ID for multiple-primary-key\"",
"# Here's the mysql magic to get the new ID",
"self",
".",
"_setID",
"(",
"cursor",
".",
"insert_id",
"(",
")",
")",
"cursor",
".",
"close",
"(",
")",
"self",
".",
"_new",
"=",
"False"
] | 36.151515 | 13.939394 |
def getBirthdate(self, string=True):
"""
Returns the birthdate as string object
Parameters
----------
None
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getBirthdate()=='30 jun 1969'
True
>>> f._close()
>>> del f
"""
if string:
return self._convert_string(self.birthdate.rstrip())
else:
return datetime.strptime(self._convert_string(self.birthdate.rstrip()), "%d %b %Y") | [
"def",
"getBirthdate",
"(",
"self",
",",
"string",
"=",
"True",
")",
":",
"if",
"string",
":",
"return",
"self",
".",
"_convert_string",
"(",
"self",
".",
"birthdate",
".",
"rstrip",
"(",
")",
")",
"else",
":",
"return",
"datetime",
".",
"strptime",
"(",
"self",
".",
"_convert_string",
"(",
"self",
".",
"birthdate",
".",
"rstrip",
"(",
")",
")",
",",
"\"%d %b %Y\"",
")"
] | 23.521739 | 22.130435 |
def networkOneMode(self, mode, nodeCount = True, edgeWeight = True, stemmer = None, edgeAttribute = None, nodeAttribute = None):
"""Creates a network of the objects found by one tag _mode_. This is the same as [networkMultiLevel()](#metaknowledge.CollectionWithIDs.networkMultiLevel) with only one tag.
A **networkOneMode**() looks are each entry in the collection and extracts its values for the tag given by _mode_, e.g. the `'authorsFull'` tag. Then if multiple are returned an edge is created between them. So in the case of the author tag `'authorsFull'` a co-authorship network is created.
The number of times each object occurs is count if _nodeCount_ is `True` and the edges count the number of co-occurrences if _edgeWeight_ is `True`. Both are`True` by default.
**Note** Do not use this for the construction of co-citation networks use [Recordcollection.networkCoCitation()](./classes/RecordCollection.html#metaknowledge.RecordCollection.networkCoCitation) it is more accurate and has more options.
# Parameters
_mode_ : `str`
> A two character WOS tag or one of the full names for a tag
_nodeCount_ : `optional [bool]`
> Default `True`, if `True` each node will have an attribute called "count" that contains an int giving the number of time the object occurred.
_edgeWeight_ : `optional [bool]`
> Default `True`, if `True` each edge will have an attribute called "weight" that contains an int giving the number of time the two objects co-occurrenced.
_stemmer_ : `optional [func]`
> Default `None`, If _stemmer_ is a callable object, basically a function or possibly a class, it will be called for the ID of every node in the graph, all IDs are strings. For example:
> The function ` f = lambda x: x[0]` if given as the stemmer will cause all IDs to be the first character of their unstemmed IDs. e.g. the title `'Goos-Hanchen and Imbert-Fedorov shifts for leaky guided modes'` will create the node `'G'`.
# Returns
`networkx Graph`
> A networkx Graph with the objects of the tag _mode_ as nodes and their co-occurrences as edges
"""
return self.networkMultiLevel(mode, nodeCount = nodeCount, edgeWeight = edgeWeight, stemmer = stemmer, edgeAttribute = edgeAttribute, nodeAttribute = nodeAttribute, _networkTypeString = 'one mode network') | [
"def",
"networkOneMode",
"(",
"self",
",",
"mode",
",",
"nodeCount",
"=",
"True",
",",
"edgeWeight",
"=",
"True",
",",
"stemmer",
"=",
"None",
",",
"edgeAttribute",
"=",
"None",
",",
"nodeAttribute",
"=",
"None",
")",
":",
"return",
"self",
".",
"networkMultiLevel",
"(",
"mode",
",",
"nodeCount",
"=",
"nodeCount",
",",
"edgeWeight",
"=",
"edgeWeight",
",",
"stemmer",
"=",
"stemmer",
",",
"edgeAttribute",
"=",
"edgeAttribute",
",",
"nodeAttribute",
"=",
"nodeAttribute",
",",
"_networkTypeString",
"=",
"'one mode network'",
")"
] | 66.222222 | 63.166667 |
def left_to_right(self):
"""This is for text that flows Left to Right"""
self._entry_mode |= Command.MODE_INCREMENT
self.command(self._entry_mode) | [
"def",
"left_to_right",
"(",
"self",
")",
":",
"self",
".",
"_entry_mode",
"|=",
"Command",
".",
"MODE_INCREMENT",
"self",
".",
"command",
"(",
"self",
".",
"_entry_mode",
")"
] | 41.75 | 7 |
def copyidfintoidf(toidf, fromidf):
"""copy fromidf completely into toidf"""
idfobjlst = getidfobjectlist(fromidf)
for idfobj in idfobjlst:
toidf.copyidfobject(idfobj) | [
"def",
"copyidfintoidf",
"(",
"toidf",
",",
"fromidf",
")",
":",
"idfobjlst",
"=",
"getidfobjectlist",
"(",
"fromidf",
")",
"for",
"idfobj",
"in",
"idfobjlst",
":",
"toidf",
".",
"copyidfobject",
"(",
"idfobj",
")"
] | 36.6 | 4.6 |
def rbdd(*keywords):
"""
Run story matching keywords and rewrite story if code changed.
"""
settings = _personal_settings().data
settings["engine"]["rewrite"] = True
_storybook(settings["engine"]).with_params(
**{"python version": settings["params"]["python version"]}
).only_uninherited().shortcut(*keywords).play() | [
"def",
"rbdd",
"(",
"*",
"keywords",
")",
":",
"settings",
"=",
"_personal_settings",
"(",
")",
".",
"data",
"settings",
"[",
"\"engine\"",
"]",
"[",
"\"rewrite\"",
"]",
"=",
"True",
"_storybook",
"(",
"settings",
"[",
"\"engine\"",
"]",
")",
".",
"with_params",
"(",
"*",
"*",
"{",
"\"python version\"",
":",
"settings",
"[",
"\"params\"",
"]",
"[",
"\"python version\"",
"]",
"}",
")",
".",
"only_uninherited",
"(",
")",
".",
"shortcut",
"(",
"*",
"keywords",
")",
".",
"play",
"(",
")"
] | 38.222222 | 10 |
def add_request_session(self):
"""Add a request session for current thread."""
session = new_request_session(self.config, self.cookies)
self.request_sessions[thread.get_ident()] = session | [
"def",
"add_request_session",
"(",
"self",
")",
":",
"session",
"=",
"new_request_session",
"(",
"self",
".",
"config",
",",
"self",
".",
"cookies",
")",
"self",
".",
"request_sessions",
"[",
"thread",
".",
"get_ident",
"(",
")",
"]",
"=",
"session"
] | 52 | 13.25 |
def minimize_one_step(gradient_unregularized_loss,
hessian_unregularized_loss_outer,
hessian_unregularized_loss_middle,
x_start,
tolerance,
l1_regularizer,
l2_regularizer=None,
maximum_full_sweeps=1,
learning_rate=None,
name=None):
"""One step of (the outer loop of) the minimization algorithm.
This function returns a new value of `x`, equal to `x_start + x_update`. The
increment `x_update in R^n` is computed by a coordinate descent method, that
is, by a loop in which each iteration updates exactly one coordinate of
`x_update`. (Some updates may leave the value of the coordinate unchanged.)
The particular update method used is to apply an L1-based proximity operator,
"soft threshold", whose fixed point `x_update_fix` is the desired minimum
```none
x_update_fix = argmin{
Loss(x_start + x_update')
+ l1_regularizer * ||x_start + x_update'||_1
+ l2_regularizer * ||x_start + x_update'||_2**2
: x_update' }
```
where in each iteration `x_update'` is constrained to have at most one nonzero
coordinate.
This update method preserves sparsity, i.e., tends to find sparse solutions if
`x_start` is sparse. Additionally, the choice of step size is based on
curvature (Hessian), which significantly speeds up convergence.
This algorithm assumes that `Loss` is convex, at least in a region surrounding
the optimum. (If `l2_regularizer > 0`, then only weak convexity is needed.)
Args:
gradient_unregularized_loss: (Batch of) `Tensor` with the same shape and
dtype as `x_start` representing the gradient, evaluated at `x_start`, of
the unregularized loss function (denoted `Loss` above). (In all current
use cases, `Loss` is the negative log likelihood.)
hessian_unregularized_loss_outer: (Batch of) `Tensor` or `SparseTensor`
having the same dtype as `x_start`, and shape `[N, n]` where `x_start` has
shape `[n]`, satisfying the property
`Transpose(hessian_unregularized_loss_outer)
@ diag(hessian_unregularized_loss_middle)
@ hessian_unregularized_loss_inner
= (approximation of) Hessian matrix of Loss, evaluated at x_start`.
hessian_unregularized_loss_middle: (Batch of) vector-shaped `Tensor` having
the same dtype as `x_start`, and shape `[N]` where
`hessian_unregularized_loss_outer` has shape `[N, n]`, satisfying the
property
`Transpose(hessian_unregularized_loss_outer)
@ diag(hessian_unregularized_loss_middle)
@ hessian_unregularized_loss_inner
= (approximation of) Hessian matrix of Loss, evaluated at x_start`.
x_start: (Batch of) vector-shaped, `float` `Tensor` representing the current
value of the argument to the Loss function.
tolerance: scalar, `float` `Tensor` representing the convergence threshold.
The optimization step will terminate early, returning its current value of
`x_start + x_update`, once the following condition is met:
`||x_update_end - x_update_start||_2 / (1 + ||x_start||_2)
< sqrt(tolerance)`,
where `x_update_end` is the value of `x_update` at the end of a sweep and
`x_update_start` is the value of `x_update` at the beginning of that
sweep.
l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1
regularization term (see equation above). If L1 regularization is not
required, then `tfp.glm.fit_one_step` is preferable.
l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2
regularization term (see equation above).
Default value: `None` (i.e., no L2 regularization).
maximum_full_sweeps: Python integer specifying maximum number of sweeps to
run. A "sweep" consists of an iteration of coordinate descent on each
coordinate. After this many sweeps, the algorithm will terminate even if
convergence has not been reached.
Default value: `1`.
learning_rate: scalar, `float` `Tensor` representing a multiplicative factor
used to dampen the proximal gradient descent steps.
Default value: `None` (i.e., factor is conceptually `1`).
name: Python string representing the name of the TensorFlow operation.
The default name is `"minimize_one_step"`.
Returns:
x: (Batch of) `Tensor` having the same shape and dtype as `x_start`,
representing the updated value of `x`, that is, `x_start + x_update`.
is_converged: scalar, `bool` `Tensor` indicating whether convergence
occurred across all batches within the specified number of sweeps.
iter: scalar, `int` `Tensor` representing the actual number of coordinate
updates made (before achieving convergence). Since each sweep consists of
`tf.size(x_start)` iterations, the maximum number of updates is
`maximum_full_sweeps * tf.size(x_start)`.
#### References
[1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths
for Generalized Linear Models via Coordinate Descent. _Journal of
Statistical Software_, 33(1), 2010.
https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf
[2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for
L1-regularized Logistic Regression. _Journal of Machine Learning
Research_, 13, 2012.
http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf
"""
graph_deps = [
gradient_unregularized_loss,
hessian_unregularized_loss_outer,
hessian_unregularized_loss_middle,
x_start,
l1_regularizer,
l2_regularizer,
maximum_full_sweeps,
tolerance,
learning_rate,
]
with tf.compat.v1.name_scope(name, 'minimize_one_step', graph_deps):
x_shape = _get_shape(x_start)
batch_shape = x_shape[:-1]
dims = x_shape[-1]
def _hessian_diag_elt_with_l2(coord): # pylint: disable=missing-docstring
# Returns the (coord, coord) entry of
#
# Hessian(UnregularizedLoss(x) + l2_regularizer * ||x||_2**2)
#
# evaluated at x = x_start.
inner_square = tf.reduce_sum(
input_tensor=_sparse_or_dense_matmul_onehot(
hessian_unregularized_loss_outer, coord)**2,
axis=-1)
unregularized_component = (
hessian_unregularized_loss_middle[..., coord] * inner_square)
l2_component = _mul_or_none(2., l2_regularizer)
return _add_ignoring_nones(unregularized_component, l2_component)
grad_loss_with_l2 = _add_ignoring_nones(
gradient_unregularized_loss, _mul_or_none(2., l2_regularizer, x_start))
# We define `x_update_diff_norm_sq_convergence_threshold` such that the
# convergence condition
# ||x_update_end - x_update_start||_2 / (1 + ||x_start||_2)
# < sqrt(tolerance)
# is equivalent to
# ||x_update_end - x_update_start||_2**2
# < x_update_diff_norm_sq_convergence_threshold.
x_update_diff_norm_sq_convergence_threshold = (
tolerance * (1. + tf.norm(tensor=x_start, ord=2, axis=-1))**2)
# Reshape update vectors so that the coordinate sweeps happen along the
# first dimension. This is so that we can use tensor_scatter_update to make
# sparse updates along the first axis without copying the Tensor.
# TODO(b/118789120): Switch to something like tf.tensor_scatter_nd_add if
# or when it exists.
update_shape = tf.concat([[dims], batch_shape], axis=-1)
def _loop_cond(iter_, x_update_diff_norm_sq, x_update,
hess_matmul_x_update):
del x_update
del hess_matmul_x_update
sweep_complete = (iter_ > 0) & tf.equal(iter_ % dims, 0)
small_delta = (
x_update_diff_norm_sq < x_update_diff_norm_sq_convergence_threshold)
converged = sweep_complete & small_delta
allowed_more_iterations = iter_ < maximum_full_sweeps * dims
return allowed_more_iterations & tf.reduce_any(input_tensor=~converged)
def _loop_body( # pylint: disable=missing-docstring
iter_, x_update_diff_norm_sq, x_update, hess_matmul_x_update):
# Inner loop of the minimizer.
#
# This loop updates a single coordinate of x_update. Ideally, an
# iteration of this loop would set
#
# x_update[j] += argmin{ LocalLoss(x_update + z*e_j) : z in R }
#
# where
#
# LocalLoss(x_update')
# = LocalLossSmoothComponent(x_update')
# + l1_regularizer * (||x_start + x_update'||_1 -
# ||x_start + x_update||_1)
# := (UnregularizedLoss(x_start + x_update') -
# UnregularizedLoss(x_start + x_update)
# + l2_regularizer * (||x_start + x_update'||_2**2 -
# ||x_start + x_update||_2**2)
# + l1_regularizer * (||x_start + x_update'||_1 -
# ||x_start + x_update||_1)
#
# In this algorithm approximate the above argmin using (univariate)
# proximal gradient descent:
#
# (*) x_update[j] = prox_{t * l1_regularizer * L1}(
# x_update[j] -
# t * d/dz|z=0 UnivariateLocalLossSmoothComponent(z))
#
# where
#
# UnivariateLocalLossSmoothComponent(z)
# := LocalLossSmoothComponent(x_update + z*e_j)
#
# and we approximate
#
# d/dz UnivariateLocalLossSmoothComponent(z)
# = grad LocalLossSmoothComponent(x_update))[j]
# ~= (grad LossSmoothComponent(x_start)
# + x_update matmul HessianOfLossSmoothComponent(x_start))[j].
#
# To choose the parameter t, we squint and pretend that the inner term of
# (*) is a Newton update as if we were using Newton's method to minimize
# UnivariateLocalLossSmoothComponent. That is, we choose t such that
#
# -t * d/dz ULLSC = -learning_rate * (d/dz ULLSC) / (d^2/dz^2 ULLSC)
#
# at z=0. Hence
#
# t = learning_rate / (d^2/dz^2|z=0 ULLSC)
# = learning_rate / HessianOfLossSmoothComponent(
# x_start + x_update)[j,j]
# ~= learning_rate / HessianOfLossSmoothComponent(
# x_start)[j,j]
#
# The above approximation is equivalent to assuming that
# HessianOfUnregularizedLoss is constant, i.e., ignoring third-order
# effects.
#
# Note that because LossSmoothComponent is (assumed to be) convex, t is
# positive.
# In above notation, coord = j.
coord = iter_ % dims
# x_update_diff_norm_sq := ||x_update_end - x_update_start||_2**2,
# computed incrementally, where x_update_end and x_update_start are as
# defined in the convergence criteria. Accordingly, we reset
# x_update_diff_norm_sq to zero at the beginning of each sweep.
x_update_diff_norm_sq = tf.where(
tf.equal(coord, 0), tf.zeros_like(x_update_diff_norm_sq),
x_update_diff_norm_sq)
# Recall that x_update and hess_matmul_x_update has the rightmost
# dimension transposed to the leftmost dimension.
w_old = x_start[..., coord] + x_update[coord, ...]
# This is the coordinatewise Newton update if no L1 regularization.
# In above notation, newton_step = -t * (approximation of d/dz|z=0 ULLSC).
second_deriv = _hessian_diag_elt_with_l2(coord)
newton_step = -_mul_ignoring_nones( # pylint: disable=invalid-unary-operand-type
learning_rate, grad_loss_with_l2[..., coord] +
hess_matmul_x_update[coord, ...]) / second_deriv
# Applying the soft-threshold operator accounts for L1 regularization.
# In above notation, delta =
# prox_{t*l1_regularizer*L1}(w_old + newton_step) - w_old.
delta = (
soft_threshold(
w_old + newton_step,
_mul_ignoring_nones(learning_rate, l1_regularizer) / second_deriv)
- w_old)
def _do_update(x_update_diff_norm_sq, x_update, hess_matmul_x_update): # pylint: disable=missing-docstring
hessian_column_with_l2 = sparse_or_dense_matvecmul(
hessian_unregularized_loss_outer,
hessian_unregularized_loss_middle * _sparse_or_dense_matmul_onehot(
hessian_unregularized_loss_outer, coord),
adjoint_a=True)
if l2_regularizer is not None:
hessian_column_with_l2 += _one_hot_like(
hessian_column_with_l2, coord, on_value=2. * l2_regularizer)
# Move the batch dimensions of `hessian_column_with_l2` to rightmost in
# order to conform to `hess_matmul_x_update`.
n = tf.rank(hessian_column_with_l2)
perm = tf.roll(tf.range(n), shift=1, axis=0)
hessian_column_with_l2 = tf.transpose(
a=hessian_column_with_l2, perm=perm)
# Update the entire batch at `coord` even if `delta` may be 0 at some
# batch coordinates. In those cases, adding `delta` is a no-op.
x_update = tf.tensor_scatter_nd_add(x_update, [[coord]], [delta])
with tf.control_dependencies([x_update]):
x_update_diff_norm_sq_ = x_update_diff_norm_sq + delta**2
hess_matmul_x_update_ = (
hess_matmul_x_update + delta * hessian_column_with_l2)
# Hint that loop vars retain the same shape.
x_update_diff_norm_sq_.set_shape(
x_update_diff_norm_sq_.shape.merge_with(
x_update_diff_norm_sq.shape))
hess_matmul_x_update_.set_shape(
hess_matmul_x_update_.shape.merge_with(
hess_matmul_x_update.shape))
return [x_update_diff_norm_sq_, x_update, hess_matmul_x_update_]
inputs_to_update = [x_update_diff_norm_sq, x_update, hess_matmul_x_update]
return [iter_ + 1] + prefer_static.cond(
# Note on why checking delta (a difference of floats) for equality to
# zero is ok:
#
# First of all, x - x == 0 in floating point -- see
# https://stackoverflow.com/a/2686671
#
# Delta will conceptually equal zero when one of the following holds:
# (i) |w_old + newton_step| <= threshold and w_old == 0
# (ii) |w_old + newton_step| > threshold and
# w_old + newton_step - sign(w_old + newton_step) * threshold
# == w_old
#
# In case (i) comparing delta to zero is fine.
#
# In case (ii), newton_step conceptually equals
# sign(w_old + newton_step) * threshold.
# Also remember
# threshold = -newton_step / (approximation of d/dz|z=0 ULLSC).
# So (i) happens when
# (approximation of d/dz|z=0 ULLSC) == -sign(w_old + newton_step).
# If we did not require LossSmoothComponent to be strictly convex,
# then this could actually happen a non-negligible amount of the time,
# e.g. if the loss function is piecewise linear and one of the pieces
# has slope 1. But since LossSmoothComponent is strictly convex, (i)
# should not systematically happen.
tf.reduce_all(input_tensor=tf.equal(delta, 0.)),
lambda: inputs_to_update,
lambda: _do_update(*inputs_to_update))
base_dtype = x_start.dtype.base_dtype
iter_, x_update_diff_norm_sq, x_update, _ = tf.while_loop(
cond=_loop_cond,
body=_loop_body,
loop_vars=[
tf.zeros([], dtype=np.int32, name='iter'),
tf.zeros(
batch_shape, dtype=base_dtype, name='x_update_diff_norm_sq'),
tf.zeros(update_shape, dtype=base_dtype, name='x_update'),
tf.zeros(
update_shape, dtype=base_dtype, name='hess_matmul_x_update'),
])
# Convert back x_update to the shape of x_start by transposing the leftmost
# dimension to the rightmost.
n = tf.rank(x_update)
perm = tf.roll(tf.range(n), shift=-1, axis=0)
x_update = tf.transpose(a=x_update, perm=perm)
converged = tf.reduce_all(input_tensor=x_update_diff_norm_sq <
x_update_diff_norm_sq_convergence_threshold)
return x_start + x_update, converged, iter_ / dims | [
"def",
"minimize_one_step",
"(",
"gradient_unregularized_loss",
",",
"hessian_unregularized_loss_outer",
",",
"hessian_unregularized_loss_middle",
",",
"x_start",
",",
"tolerance",
",",
"l1_regularizer",
",",
"l2_regularizer",
"=",
"None",
",",
"maximum_full_sweeps",
"=",
"1",
",",
"learning_rate",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"graph_deps",
"=",
"[",
"gradient_unregularized_loss",
",",
"hessian_unregularized_loss_outer",
",",
"hessian_unregularized_loss_middle",
",",
"x_start",
",",
"l1_regularizer",
",",
"l2_regularizer",
",",
"maximum_full_sweeps",
",",
"tolerance",
",",
"learning_rate",
",",
"]",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'minimize_one_step'",
",",
"graph_deps",
")",
":",
"x_shape",
"=",
"_get_shape",
"(",
"x_start",
")",
"batch_shape",
"=",
"x_shape",
"[",
":",
"-",
"1",
"]",
"dims",
"=",
"x_shape",
"[",
"-",
"1",
"]",
"def",
"_hessian_diag_elt_with_l2",
"(",
"coord",
")",
":",
"# pylint: disable=missing-docstring",
"# Returns the (coord, coord) entry of",
"#",
"# Hessian(UnregularizedLoss(x) + l2_regularizer * ||x||_2**2)",
"#",
"# evaluated at x = x_start.",
"inner_square",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"_sparse_or_dense_matmul_onehot",
"(",
"hessian_unregularized_loss_outer",
",",
"coord",
")",
"**",
"2",
",",
"axis",
"=",
"-",
"1",
")",
"unregularized_component",
"=",
"(",
"hessian_unregularized_loss_middle",
"[",
"...",
",",
"coord",
"]",
"*",
"inner_square",
")",
"l2_component",
"=",
"_mul_or_none",
"(",
"2.",
",",
"l2_regularizer",
")",
"return",
"_add_ignoring_nones",
"(",
"unregularized_component",
",",
"l2_component",
")",
"grad_loss_with_l2",
"=",
"_add_ignoring_nones",
"(",
"gradient_unregularized_loss",
",",
"_mul_or_none",
"(",
"2.",
",",
"l2_regularizer",
",",
"x_start",
")",
")",
"# We define `x_update_diff_norm_sq_convergence_threshold` such that the",
"# convergence condition",
"# ||x_update_end - x_update_start||_2 / (1 + ||x_start||_2)",
"# < sqrt(tolerance)",
"# is equivalent to",
"# ||x_update_end - x_update_start||_2**2",
"# < x_update_diff_norm_sq_convergence_threshold.",
"x_update_diff_norm_sq_convergence_threshold",
"=",
"(",
"tolerance",
"*",
"(",
"1.",
"+",
"tf",
".",
"norm",
"(",
"tensor",
"=",
"x_start",
",",
"ord",
"=",
"2",
",",
"axis",
"=",
"-",
"1",
")",
")",
"**",
"2",
")",
"# Reshape update vectors so that the coordinate sweeps happen along the",
"# first dimension. This is so that we can use tensor_scatter_update to make",
"# sparse updates along the first axis without copying the Tensor.",
"# TODO(b/118789120): Switch to something like tf.tensor_scatter_nd_add if",
"# or when it exists.",
"update_shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"[",
"dims",
"]",
",",
"batch_shape",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"def",
"_loop_cond",
"(",
"iter_",
",",
"x_update_diff_norm_sq",
",",
"x_update",
",",
"hess_matmul_x_update",
")",
":",
"del",
"x_update",
"del",
"hess_matmul_x_update",
"sweep_complete",
"=",
"(",
"iter_",
">",
"0",
")",
"&",
"tf",
".",
"equal",
"(",
"iter_",
"%",
"dims",
",",
"0",
")",
"small_delta",
"=",
"(",
"x_update_diff_norm_sq",
"<",
"x_update_diff_norm_sq_convergence_threshold",
")",
"converged",
"=",
"sweep_complete",
"&",
"small_delta",
"allowed_more_iterations",
"=",
"iter_",
"<",
"maximum_full_sweeps",
"*",
"dims",
"return",
"allowed_more_iterations",
"&",
"tf",
".",
"reduce_any",
"(",
"input_tensor",
"=",
"~",
"converged",
")",
"def",
"_loop_body",
"(",
"# pylint: disable=missing-docstring",
"iter_",
",",
"x_update_diff_norm_sq",
",",
"x_update",
",",
"hess_matmul_x_update",
")",
":",
"# Inner loop of the minimizer.",
"#",
"# This loop updates a single coordinate of x_update. Ideally, an",
"# iteration of this loop would set",
"#",
"# x_update[j] += argmin{ LocalLoss(x_update + z*e_j) : z in R }",
"#",
"# where",
"#",
"# LocalLoss(x_update')",
"# = LocalLossSmoothComponent(x_update')",
"# + l1_regularizer * (||x_start + x_update'||_1 -",
"# ||x_start + x_update||_1)",
"# := (UnregularizedLoss(x_start + x_update') -",
"# UnregularizedLoss(x_start + x_update)",
"# + l2_regularizer * (||x_start + x_update'||_2**2 -",
"# ||x_start + x_update||_2**2)",
"# + l1_regularizer * (||x_start + x_update'||_1 -",
"# ||x_start + x_update||_1)",
"#",
"# In this algorithm approximate the above argmin using (univariate)",
"# proximal gradient descent:",
"#",
"# (*) x_update[j] = prox_{t * l1_regularizer * L1}(",
"# x_update[j] -",
"# t * d/dz|z=0 UnivariateLocalLossSmoothComponent(z))",
"#",
"# where",
"#",
"# UnivariateLocalLossSmoothComponent(z)",
"# := LocalLossSmoothComponent(x_update + z*e_j)",
"#",
"# and we approximate",
"#",
"# d/dz UnivariateLocalLossSmoothComponent(z)",
"# = grad LocalLossSmoothComponent(x_update))[j]",
"# ~= (grad LossSmoothComponent(x_start)",
"# + x_update matmul HessianOfLossSmoothComponent(x_start))[j].",
"#",
"# To choose the parameter t, we squint and pretend that the inner term of",
"# (*) is a Newton update as if we were using Newton's method to minimize",
"# UnivariateLocalLossSmoothComponent. That is, we choose t such that",
"#",
"# -t * d/dz ULLSC = -learning_rate * (d/dz ULLSC) / (d^2/dz^2 ULLSC)",
"#",
"# at z=0. Hence",
"#",
"# t = learning_rate / (d^2/dz^2|z=0 ULLSC)",
"# = learning_rate / HessianOfLossSmoothComponent(",
"# x_start + x_update)[j,j]",
"# ~= learning_rate / HessianOfLossSmoothComponent(",
"# x_start)[j,j]",
"#",
"# The above approximation is equivalent to assuming that",
"# HessianOfUnregularizedLoss is constant, i.e., ignoring third-order",
"# effects.",
"#",
"# Note that because LossSmoothComponent is (assumed to be) convex, t is",
"# positive.",
"# In above notation, coord = j.",
"coord",
"=",
"iter_",
"%",
"dims",
"# x_update_diff_norm_sq := ||x_update_end - x_update_start||_2**2,",
"# computed incrementally, where x_update_end and x_update_start are as",
"# defined in the convergence criteria. Accordingly, we reset",
"# x_update_diff_norm_sq to zero at the beginning of each sweep.",
"x_update_diff_norm_sq",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"equal",
"(",
"coord",
",",
"0",
")",
",",
"tf",
".",
"zeros_like",
"(",
"x_update_diff_norm_sq",
")",
",",
"x_update_diff_norm_sq",
")",
"# Recall that x_update and hess_matmul_x_update has the rightmost",
"# dimension transposed to the leftmost dimension.",
"w_old",
"=",
"x_start",
"[",
"...",
",",
"coord",
"]",
"+",
"x_update",
"[",
"coord",
",",
"...",
"]",
"# This is the coordinatewise Newton update if no L1 regularization.",
"# In above notation, newton_step = -t * (approximation of d/dz|z=0 ULLSC).",
"second_deriv",
"=",
"_hessian_diag_elt_with_l2",
"(",
"coord",
")",
"newton_step",
"=",
"-",
"_mul_ignoring_nones",
"(",
"# pylint: disable=invalid-unary-operand-type",
"learning_rate",
",",
"grad_loss_with_l2",
"[",
"...",
",",
"coord",
"]",
"+",
"hess_matmul_x_update",
"[",
"coord",
",",
"...",
"]",
")",
"/",
"second_deriv",
"# Applying the soft-threshold operator accounts for L1 regularization.",
"# In above notation, delta =",
"# prox_{t*l1_regularizer*L1}(w_old + newton_step) - w_old.",
"delta",
"=",
"(",
"soft_threshold",
"(",
"w_old",
"+",
"newton_step",
",",
"_mul_ignoring_nones",
"(",
"learning_rate",
",",
"l1_regularizer",
")",
"/",
"second_deriv",
")",
"-",
"w_old",
")",
"def",
"_do_update",
"(",
"x_update_diff_norm_sq",
",",
"x_update",
",",
"hess_matmul_x_update",
")",
":",
"# pylint: disable=missing-docstring",
"hessian_column_with_l2",
"=",
"sparse_or_dense_matvecmul",
"(",
"hessian_unregularized_loss_outer",
",",
"hessian_unregularized_loss_middle",
"*",
"_sparse_or_dense_matmul_onehot",
"(",
"hessian_unregularized_loss_outer",
",",
"coord",
")",
",",
"adjoint_a",
"=",
"True",
")",
"if",
"l2_regularizer",
"is",
"not",
"None",
":",
"hessian_column_with_l2",
"+=",
"_one_hot_like",
"(",
"hessian_column_with_l2",
",",
"coord",
",",
"on_value",
"=",
"2.",
"*",
"l2_regularizer",
")",
"# Move the batch dimensions of `hessian_column_with_l2` to rightmost in",
"# order to conform to `hess_matmul_x_update`.",
"n",
"=",
"tf",
".",
"rank",
"(",
"hessian_column_with_l2",
")",
"perm",
"=",
"tf",
".",
"roll",
"(",
"tf",
".",
"range",
"(",
"n",
")",
",",
"shift",
"=",
"1",
",",
"axis",
"=",
"0",
")",
"hessian_column_with_l2",
"=",
"tf",
".",
"transpose",
"(",
"a",
"=",
"hessian_column_with_l2",
",",
"perm",
"=",
"perm",
")",
"# Update the entire batch at `coord` even if `delta` may be 0 at some",
"# batch coordinates. In those cases, adding `delta` is a no-op.",
"x_update",
"=",
"tf",
".",
"tensor_scatter_nd_add",
"(",
"x_update",
",",
"[",
"[",
"coord",
"]",
"]",
",",
"[",
"delta",
"]",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"x_update",
"]",
")",
":",
"x_update_diff_norm_sq_",
"=",
"x_update_diff_norm_sq",
"+",
"delta",
"**",
"2",
"hess_matmul_x_update_",
"=",
"(",
"hess_matmul_x_update",
"+",
"delta",
"*",
"hessian_column_with_l2",
")",
"# Hint that loop vars retain the same shape.",
"x_update_diff_norm_sq_",
".",
"set_shape",
"(",
"x_update_diff_norm_sq_",
".",
"shape",
".",
"merge_with",
"(",
"x_update_diff_norm_sq",
".",
"shape",
")",
")",
"hess_matmul_x_update_",
".",
"set_shape",
"(",
"hess_matmul_x_update_",
".",
"shape",
".",
"merge_with",
"(",
"hess_matmul_x_update",
".",
"shape",
")",
")",
"return",
"[",
"x_update_diff_norm_sq_",
",",
"x_update",
",",
"hess_matmul_x_update_",
"]",
"inputs_to_update",
"=",
"[",
"x_update_diff_norm_sq",
",",
"x_update",
",",
"hess_matmul_x_update",
"]",
"return",
"[",
"iter_",
"+",
"1",
"]",
"+",
"prefer_static",
".",
"cond",
"(",
"# Note on why checking delta (a difference of floats) for equality to",
"# zero is ok:",
"#",
"# First of all, x - x == 0 in floating point -- see",
"# https://stackoverflow.com/a/2686671",
"#",
"# Delta will conceptually equal zero when one of the following holds:",
"# (i) |w_old + newton_step| <= threshold and w_old == 0",
"# (ii) |w_old + newton_step| > threshold and",
"# w_old + newton_step - sign(w_old + newton_step) * threshold",
"# == w_old",
"#",
"# In case (i) comparing delta to zero is fine.",
"#",
"# In case (ii), newton_step conceptually equals",
"# sign(w_old + newton_step) * threshold.",
"# Also remember",
"# threshold = -newton_step / (approximation of d/dz|z=0 ULLSC).",
"# So (i) happens when",
"# (approximation of d/dz|z=0 ULLSC) == -sign(w_old + newton_step).",
"# If we did not require LossSmoothComponent to be strictly convex,",
"# then this could actually happen a non-negligible amount of the time,",
"# e.g. if the loss function is piecewise linear and one of the pieces",
"# has slope 1. But since LossSmoothComponent is strictly convex, (i)",
"# should not systematically happen.",
"tf",
".",
"reduce_all",
"(",
"input_tensor",
"=",
"tf",
".",
"equal",
"(",
"delta",
",",
"0.",
")",
")",
",",
"lambda",
":",
"inputs_to_update",
",",
"lambda",
":",
"_do_update",
"(",
"*",
"inputs_to_update",
")",
")",
"base_dtype",
"=",
"x_start",
".",
"dtype",
".",
"base_dtype",
"iter_",
",",
"x_update_diff_norm_sq",
",",
"x_update",
",",
"_",
"=",
"tf",
".",
"while_loop",
"(",
"cond",
"=",
"_loop_cond",
",",
"body",
"=",
"_loop_body",
",",
"loop_vars",
"=",
"[",
"tf",
".",
"zeros",
"(",
"[",
"]",
",",
"dtype",
"=",
"np",
".",
"int32",
",",
"name",
"=",
"'iter'",
")",
",",
"tf",
".",
"zeros",
"(",
"batch_shape",
",",
"dtype",
"=",
"base_dtype",
",",
"name",
"=",
"'x_update_diff_norm_sq'",
")",
",",
"tf",
".",
"zeros",
"(",
"update_shape",
",",
"dtype",
"=",
"base_dtype",
",",
"name",
"=",
"'x_update'",
")",
",",
"tf",
".",
"zeros",
"(",
"update_shape",
",",
"dtype",
"=",
"base_dtype",
",",
"name",
"=",
"'hess_matmul_x_update'",
")",
",",
"]",
")",
"# Convert back x_update to the shape of x_start by transposing the leftmost",
"# dimension to the rightmost.",
"n",
"=",
"tf",
".",
"rank",
"(",
"x_update",
")",
"perm",
"=",
"tf",
".",
"roll",
"(",
"tf",
".",
"range",
"(",
"n",
")",
",",
"shift",
"=",
"-",
"1",
",",
"axis",
"=",
"0",
")",
"x_update",
"=",
"tf",
".",
"transpose",
"(",
"a",
"=",
"x_update",
",",
"perm",
"=",
"perm",
")",
"converged",
"=",
"tf",
".",
"reduce_all",
"(",
"input_tensor",
"=",
"x_update_diff_norm_sq",
"<",
"x_update_diff_norm_sq_convergence_threshold",
")",
"return",
"x_start",
"+",
"x_update",
",",
"converged",
",",
"iter_",
"/",
"dims"
] | 46.025714 | 24.08 |
def _build_request(self, path, params=None):
"""Build API request"""
_url = '{scheme}://{host}{port}/{version}{path}'.format(
scheme=self._scheme,
host=self._host,
port=':{}'.format(self._port) if self._port != 80 else '',
version=self._api_version,
path=path)
if params is not None:
_params = params.copy()
else:
_params = dict()
# Add required API parameters
_params['api_nonce'] = str(random.randint(0, 999999999)).zfill(9)
_params['api_timestamp'] = int(time.time())
_params['api_key'] = self.__key
_params['api_format'] = 'json'
_params['api_kit'] = 'py-{}{}'.format(
__version__, '-{}'.format(self._agent) if self._agent else '')
# Construct Signature Base String
sbs = '&'.join(['{}={}'.format(
quote((unicode(key).encode('utf-8')), safe='~'),
quote((unicode(value).encode('utf-8')), safe='~')
) for key, value in sorted(_params.items())])
# Add signature to the _params dict
_params['api_signature'] = hashlib.sha1(
'{}{}'.format(sbs, self.__secret).encode('utf-8')).hexdigest()
return _url, _params | [
"def",
"_build_request",
"(",
"self",
",",
"path",
",",
"params",
"=",
"None",
")",
":",
"_url",
"=",
"'{scheme}://{host}{port}/{version}{path}'",
".",
"format",
"(",
"scheme",
"=",
"self",
".",
"_scheme",
",",
"host",
"=",
"self",
".",
"_host",
",",
"port",
"=",
"':{}'",
".",
"format",
"(",
"self",
".",
"_port",
")",
"if",
"self",
".",
"_port",
"!=",
"80",
"else",
"''",
",",
"version",
"=",
"self",
".",
"_api_version",
",",
"path",
"=",
"path",
")",
"if",
"params",
"is",
"not",
"None",
":",
"_params",
"=",
"params",
".",
"copy",
"(",
")",
"else",
":",
"_params",
"=",
"dict",
"(",
")",
"# Add required API parameters",
"_params",
"[",
"'api_nonce'",
"]",
"=",
"str",
"(",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
")",
".",
"zfill",
"(",
"9",
")",
"_params",
"[",
"'api_timestamp'",
"]",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"_params",
"[",
"'api_key'",
"]",
"=",
"self",
".",
"__key",
"_params",
"[",
"'api_format'",
"]",
"=",
"'json'",
"_params",
"[",
"'api_kit'",
"]",
"=",
"'py-{}{}'",
".",
"format",
"(",
"__version__",
",",
"'-{}'",
".",
"format",
"(",
"self",
".",
"_agent",
")",
"if",
"self",
".",
"_agent",
"else",
"''",
")",
"# Construct Signature Base String",
"sbs",
"=",
"'&'",
".",
"join",
"(",
"[",
"'{}={}'",
".",
"format",
"(",
"quote",
"(",
"(",
"unicode",
"(",
"key",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
",",
"safe",
"=",
"'~'",
")",
",",
"quote",
"(",
"(",
"unicode",
"(",
"value",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
",",
"safe",
"=",
"'~'",
")",
")",
"for",
"key",
",",
"value",
"in",
"sorted",
"(",
"_params",
".",
"items",
"(",
")",
")",
"]",
")",
"# Add signature to the _params dict",
"_params",
"[",
"'api_signature'",
"]",
"=",
"hashlib",
".",
"sha1",
"(",
"'{}{}'",
".",
"format",
"(",
"sbs",
",",
"self",
".",
"__secret",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"return",
"_url",
",",
"_params"
] | 36.470588 | 17.5 |
def plot_full(candsfile, cands, mode='im'):
""" Plot 'full' features, such as cutout image and spectrum.
"""
loc, prop, d = read_candidates(candsfile, returnstate=True)
npixx, npixy = prop[0][4].shape
nints, nchan, npol = prop[0][5].shape
bin = 10
plt.figure(1)
for i in cands:
if mode == 'spec':
rr = np.array([np.abs(prop[i][5][:,i0:i0+bin,0].mean(axis=1)) for i0 in range(0,nchan,bin)])
ll = np.array([np.abs(prop[i][5][:,i0:i0+bin,1].mean(axis=1)) for i0 in range(0,nchan,bin)])
sh = ll.shape
data = np.concatenate( (rr, np.zeros(shape=(sh[0], sh[1]/2)), ll), axis=1)
elif mode == 'im':
data = prop[i][4]
plt.subplot(np.sqrt(len(cands)), np.sqrt(len(cands)), cands.index(i))
plt.imshow(data, interpolation='nearest')
plt.show() | [
"def",
"plot_full",
"(",
"candsfile",
",",
"cands",
",",
"mode",
"=",
"'im'",
")",
":",
"loc",
",",
"prop",
",",
"d",
"=",
"read_candidates",
"(",
"candsfile",
",",
"returnstate",
"=",
"True",
")",
"npixx",
",",
"npixy",
"=",
"prop",
"[",
"0",
"]",
"[",
"4",
"]",
".",
"shape",
"nints",
",",
"nchan",
",",
"npol",
"=",
"prop",
"[",
"0",
"]",
"[",
"5",
"]",
".",
"shape",
"bin",
"=",
"10",
"plt",
".",
"figure",
"(",
"1",
")",
"for",
"i",
"in",
"cands",
":",
"if",
"mode",
"==",
"'spec'",
":",
"rr",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"abs",
"(",
"prop",
"[",
"i",
"]",
"[",
"5",
"]",
"[",
":",
",",
"i0",
":",
"i0",
"+",
"bin",
",",
"0",
"]",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
")",
"for",
"i0",
"in",
"range",
"(",
"0",
",",
"nchan",
",",
"bin",
")",
"]",
")",
"ll",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"abs",
"(",
"prop",
"[",
"i",
"]",
"[",
"5",
"]",
"[",
":",
",",
"i0",
":",
"i0",
"+",
"bin",
",",
"1",
"]",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
")",
"for",
"i0",
"in",
"range",
"(",
"0",
",",
"nchan",
",",
"bin",
")",
"]",
")",
"sh",
"=",
"ll",
".",
"shape",
"data",
"=",
"np",
".",
"concatenate",
"(",
"(",
"rr",
",",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"sh",
"[",
"0",
"]",
",",
"sh",
"[",
"1",
"]",
"/",
"2",
")",
")",
",",
"ll",
")",
",",
"axis",
"=",
"1",
")",
"elif",
"mode",
"==",
"'im'",
":",
"data",
"=",
"prop",
"[",
"i",
"]",
"[",
"4",
"]",
"plt",
".",
"subplot",
"(",
"np",
".",
"sqrt",
"(",
"len",
"(",
"cands",
")",
")",
",",
"np",
".",
"sqrt",
"(",
"len",
"(",
"cands",
")",
")",
",",
"cands",
".",
"index",
"(",
"i",
")",
")",
"plt",
".",
"imshow",
"(",
"data",
",",
"interpolation",
"=",
"'nearest'",
")",
"plt",
".",
"show",
"(",
")"
] | 40.047619 | 23.047619 |
def _fromiter(it, dtype, count, progress, log):
"""Utility function to load an array from an iterator."""
if progress > 0:
it = _iter_withprogress(it, progress, log)
if count is not None:
a = np.fromiter(it, dtype=dtype, count=count)
else:
a = np.fromiter(it, dtype=dtype)
return a | [
"def",
"_fromiter",
"(",
"it",
",",
"dtype",
",",
"count",
",",
"progress",
",",
"log",
")",
":",
"if",
"progress",
">",
"0",
":",
"it",
"=",
"_iter_withprogress",
"(",
"it",
",",
"progress",
",",
"log",
")",
"if",
"count",
"is",
"not",
"None",
":",
"a",
"=",
"np",
".",
"fromiter",
"(",
"it",
",",
"dtype",
"=",
"dtype",
",",
"count",
"=",
"count",
")",
"else",
":",
"a",
"=",
"np",
".",
"fromiter",
"(",
"it",
",",
"dtype",
"=",
"dtype",
")",
"return",
"a"
] | 35.222222 | 13.777778 |
def get_feats(self, doc):
'''
Parameters
----------
doc, Spacy Doc
Returns
-------
Counter noun chunk -> count
'''
# ngram_counter = phrasemachine.get_phrases(str(doc), tagger='spacy')['counts']
ngram_counter = Counter()
for sent in doc.sents:
unigrams = self._get_unigram_feats(sent)
ngram_counter += Counter(unigrams) + _phrase_counts(sent)
return ngram_counter | [
"def",
"get_feats",
"(",
"self",
",",
"doc",
")",
":",
"# ngram_counter = phrasemachine.get_phrases(str(doc), tagger='spacy')['counts']",
"ngram_counter",
"=",
"Counter",
"(",
")",
"for",
"sent",
"in",
"doc",
".",
"sents",
":",
"unigrams",
"=",
"self",
".",
"_get_unigram_feats",
"(",
"sent",
")",
"ngram_counter",
"+=",
"Counter",
"(",
"unigrams",
")",
"+",
"_phrase_counts",
"(",
"sent",
")",
"return",
"ngram_counter"
] | 23.6875 | 24.3125 |
def contrast(self, value):
"""
Sets the LED intensity to the desired level, in the range 0-255.
:param level: Desired contrast level in the range of 0-255.
:type level: int
"""
assert(0x00 <= value <= 0xFF)
self._brightness = value >> 4
if self._last_image is not None:
self.display(self._last_image) | [
"def",
"contrast",
"(",
"self",
",",
"value",
")",
":",
"assert",
"(",
"0x00",
"<=",
"value",
"<=",
"0xFF",
")",
"self",
".",
"_brightness",
"=",
"value",
">>",
"4",
"if",
"self",
".",
"_last_image",
"is",
"not",
"None",
":",
"self",
".",
"display",
"(",
"self",
".",
"_last_image",
")"
] | 33.363636 | 12.454545 |
def Suzuki_LFL(Hc=None):
r'''Calculates lower flammability limit, using the Suzuki [1]_ correlation.
Uses heat of combustion only.
The lower flammability limit of a gas is air is:
.. math::
\text{LFL} = \frac{-3.42}{\Delta H_c^{\circ}} + 0.569
\Delta H_c^{\circ} + 0.0538\Delta H_c^{\circ 2} + 1.80
Parameters
----------
Hc : float
Heat of combustion of gas [J/mol]
Returns
-------
LFL : float
Lower flammability limit, mole fraction [-]
Notes
-----
Fit performed with 112 compounds, r^2 was 0.977.
LFL in percent volume in air. Hc is at standard conditions, in MJ/mol.
11 compounds left out as they were outliers.
Equation does not apply for molecules with halogen atoms, only hydrocarbons
with oxygen or nitrogen or sulfur.
No sample calculation provided with the article. However, the equation is
straightforward.
Limits of equations's validity are -6135596 J where it predicts a
LFL of 0, and -48322129 J where it predicts a LFL of 1.
Examples
--------
Pentane, 1.5 % LFL in literature
>>> Suzuki_LFL(-3536600)
0.014276107095811815
References
----------
.. [1] Suzuki, Takahiro. "Note: Empirical Relationship between Lower
Flammability Limits and Standard Enthalpies of Combustion of Organic
Compounds." Fire and Materials 18, no. 5 (September 1, 1994): 333-36.
doi:10.1002/fam.810180509.
'''
Hc = Hc/1E6
LFL = -3.42/Hc + 0.569*Hc + 0.0538*Hc*Hc + 1.80
return LFL/100. | [
"def",
"Suzuki_LFL",
"(",
"Hc",
"=",
"None",
")",
":",
"Hc",
"=",
"Hc",
"/",
"1E6",
"LFL",
"=",
"-",
"3.42",
"/",
"Hc",
"+",
"0.569",
"*",
"Hc",
"+",
"0.0538",
"*",
"Hc",
"*",
"Hc",
"+",
"1.80",
"return",
"LFL",
"/",
"100."
] | 30.34 | 25.58 |
def predict(self, a, b, **kwargs):
"""Perform the independence test.
:param a: input data
:param b: input data
:type a: array-like, numerical data
:type b: array-like, numerical data
:return: dependency statistic (1=Highly dependent, 0=Not dependent)
:rtype: float
"""
binning_alg = kwargs.get('bins', 'fd')
return metrics.adjusted_mutual_info_score(bin_variable(a, bins=binning_alg),
bin_variable(b, bins=binning_alg)) | [
"def",
"predict",
"(",
"self",
",",
"a",
",",
"b",
",",
"*",
"*",
"kwargs",
")",
":",
"binning_alg",
"=",
"kwargs",
".",
"get",
"(",
"'bins'",
",",
"'fd'",
")",
"return",
"metrics",
".",
"adjusted_mutual_info_score",
"(",
"bin_variable",
"(",
"a",
",",
"bins",
"=",
"binning_alg",
")",
",",
"bin_variable",
"(",
"b",
",",
"bins",
"=",
"binning_alg",
")",
")"
] | 41.384615 | 17.230769 |
def transform_dataframe(self, dataframe):
"""
Unstack the dataframe so header consists of a composite 'value' header
plus any other header fields.
"""
coord_fields = self.get_coord_fields()
header_fields = self.get_header_fields()
# Remove any pairs that don't have data for both x & y
for i in range(len(coord_fields)):
dataframe = dataframe.unstack()
dataframe = dataframe.dropna(axis=1, how='all')
dataframe = dataframe.dropna(axis=0, how='any')
# Unstack series header
for i in range(len(header_fields)):
dataframe = dataframe.unstack()
# Compute new column headers
columns = []
for i in range(len(header_fields) + 1):
columns.append([])
for col in dataframe.columns:
value_name = col[0]
coord_names = list(col[1:len(coord_fields) + 1])
header_names = list(col[len(coord_fields) + 1:])
coord_name = ''
for name in coord_names:
if name != self.index_none_value:
coord_name += name + '-'
coord_name += value_name
columns[0].append(coord_name)
for i, header_name in enumerate(header_names):
columns[1 + i].append(header_name)
dataframe.columns = columns
dataframe.columns.names = [''] + header_fields
return dataframe | [
"def",
"transform_dataframe",
"(",
"self",
",",
"dataframe",
")",
":",
"coord_fields",
"=",
"self",
".",
"get_coord_fields",
"(",
")",
"header_fields",
"=",
"self",
".",
"get_header_fields",
"(",
")",
"# Remove any pairs that don't have data for both x & y",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"coord_fields",
")",
")",
":",
"dataframe",
"=",
"dataframe",
".",
"unstack",
"(",
")",
"dataframe",
"=",
"dataframe",
".",
"dropna",
"(",
"axis",
"=",
"1",
",",
"how",
"=",
"'all'",
")",
"dataframe",
"=",
"dataframe",
".",
"dropna",
"(",
"axis",
"=",
"0",
",",
"how",
"=",
"'any'",
")",
"# Unstack series header",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"header_fields",
")",
")",
":",
"dataframe",
"=",
"dataframe",
".",
"unstack",
"(",
")",
"# Compute new column headers",
"columns",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"header_fields",
")",
"+",
"1",
")",
":",
"columns",
".",
"append",
"(",
"[",
"]",
")",
"for",
"col",
"in",
"dataframe",
".",
"columns",
":",
"value_name",
"=",
"col",
"[",
"0",
"]",
"coord_names",
"=",
"list",
"(",
"col",
"[",
"1",
":",
"len",
"(",
"coord_fields",
")",
"+",
"1",
"]",
")",
"header_names",
"=",
"list",
"(",
"col",
"[",
"len",
"(",
"coord_fields",
")",
"+",
"1",
":",
"]",
")",
"coord_name",
"=",
"''",
"for",
"name",
"in",
"coord_names",
":",
"if",
"name",
"!=",
"self",
".",
"index_none_value",
":",
"coord_name",
"+=",
"name",
"+",
"'-'",
"coord_name",
"+=",
"value_name",
"columns",
"[",
"0",
"]",
".",
"append",
"(",
"coord_name",
")",
"for",
"i",
",",
"header_name",
"in",
"enumerate",
"(",
"header_names",
")",
":",
"columns",
"[",
"1",
"+",
"i",
"]",
".",
"append",
"(",
"header_name",
")",
"dataframe",
".",
"columns",
"=",
"columns",
"dataframe",
".",
"columns",
".",
"names",
"=",
"[",
"''",
"]",
"+",
"header_fields",
"return",
"dataframe"
] | 35.525 | 13.975 |
def geo_description(self):
"""Return a description of the geographic extents, using the largest scale
space and grain coverages"""
sc = self._p.space_coverage
gc = self._p.grain_coverage
if sc and gc:
if parse_to_gvid(gc[0]).level == 'state' and parse_to_gvid(sc[0]).level == 'state':
return parse_to_gvid(sc[0]).geo_name
else:
return ("{} in {}".format(
parse_to_gvid(gc[0]).level_plural.title(),
parse_to_gvid(sc[0]).geo_name))
elif sc:
return parse_to_gvid(sc[0]).geo_name.title()
elif sc:
return parse_to_gvid(gc[0]).level_plural.title()
else:
return '' | [
"def",
"geo_description",
"(",
"self",
")",
":",
"sc",
"=",
"self",
".",
"_p",
".",
"space_coverage",
"gc",
"=",
"self",
".",
"_p",
".",
"grain_coverage",
"if",
"sc",
"and",
"gc",
":",
"if",
"parse_to_gvid",
"(",
"gc",
"[",
"0",
"]",
")",
".",
"level",
"==",
"'state'",
"and",
"parse_to_gvid",
"(",
"sc",
"[",
"0",
"]",
")",
".",
"level",
"==",
"'state'",
":",
"return",
"parse_to_gvid",
"(",
"sc",
"[",
"0",
"]",
")",
".",
"geo_name",
"else",
":",
"return",
"(",
"\"{} in {}\"",
".",
"format",
"(",
"parse_to_gvid",
"(",
"gc",
"[",
"0",
"]",
")",
".",
"level_plural",
".",
"title",
"(",
")",
",",
"parse_to_gvid",
"(",
"sc",
"[",
"0",
"]",
")",
".",
"geo_name",
")",
")",
"elif",
"sc",
":",
"return",
"parse_to_gvid",
"(",
"sc",
"[",
"0",
"]",
")",
".",
"geo_name",
".",
"title",
"(",
")",
"elif",
"sc",
":",
"return",
"parse_to_gvid",
"(",
"gc",
"[",
"0",
"]",
")",
".",
"level_plural",
".",
"title",
"(",
")",
"else",
":",
"return",
"''"
] | 36.8 | 18.9 |
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X | [
"def",
"sample",
"(",
"self",
",",
"n_samples",
"=",
"1",
",",
"random_state",
"=",
"None",
")",
":",
"check_is_fitted",
"(",
"self",
",",
"'means_'",
")",
"if",
"random_state",
"is",
"None",
":",
"random_state",
"=",
"self",
".",
"random_state",
"random_state",
"=",
"check_random_state",
"(",
"random_state",
")",
"weight_cdf",
"=",
"np",
".",
"cumsum",
"(",
"self",
".",
"weights_",
")",
"X",
"=",
"np",
".",
"empty",
"(",
"(",
"n_samples",
",",
"self",
".",
"means_",
".",
"shape",
"[",
"1",
"]",
")",
")",
"rand",
"=",
"random_state",
".",
"rand",
"(",
"n_samples",
")",
"# decide which component to use for each sample",
"comps",
"=",
"weight_cdf",
".",
"searchsorted",
"(",
"rand",
")",
"# for each component, generate all needed samples",
"for",
"comp",
"in",
"range",
"(",
"self",
".",
"n_components",
")",
":",
"# occurrences of current component in X",
"comp_in_X",
"=",
"(",
"comp",
"==",
"comps",
")",
"# number of those occurrences",
"num_comp_in_X",
"=",
"comp_in_X",
".",
"sum",
"(",
")",
"if",
"num_comp_in_X",
">",
"0",
":",
"if",
"self",
".",
"covariance_type",
"==",
"'tied'",
":",
"cv",
"=",
"self",
".",
"covars_",
"elif",
"self",
".",
"covariance_type",
"==",
"'spherical'",
":",
"cv",
"=",
"self",
".",
"covars_",
"[",
"comp",
"]",
"[",
"0",
"]",
"else",
":",
"cv",
"=",
"self",
".",
"covars_",
"[",
"comp",
"]",
"X",
"[",
"comp_in_X",
"]",
"=",
"sample_gaussian",
"(",
"self",
".",
"means_",
"[",
"comp",
"]",
",",
"cv",
",",
"self",
".",
"covariance_type",
",",
"num_comp_in_X",
",",
"random_state",
"=",
"random_state",
")",
".",
"T",
"return",
"X"
] | 36.878049 | 13.780488 |
def at_time(self, time, asof=False, axis=None):
"""
Select values at particular time of day (e.g. 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_at_time(time, asof=asof)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
return self._take(indexer, axis=axis) | [
"def",
"at_time",
"(",
"self",
",",
"time",
",",
"asof",
"=",
"False",
",",
"axis",
"=",
"None",
")",
":",
"if",
"axis",
"is",
"None",
":",
"axis",
"=",
"self",
".",
"_stat_axis_number",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"index",
"=",
"self",
".",
"_get_axis",
"(",
"axis",
")",
"try",
":",
"indexer",
"=",
"index",
".",
"indexer_at_time",
"(",
"time",
",",
"asof",
"=",
"asof",
")",
"except",
"AttributeError",
":",
"raise",
"TypeError",
"(",
"'Index must be DatetimeIndex'",
")",
"return",
"self",
".",
"_take",
"(",
"indexer",
",",
"axis",
"=",
"axis",
")"
] | 29.454545 | 20.327273 |
def __parse_stream(self, stream, parse_line):
"""Generic method to parse gitdm streams"""
if not stream:
raise InvalidFormatError(cause='stream cannot be empty or None')
nline = 0
lines = stream.split('\n')
for line in lines:
nline += 1
# Ignore blank lines and comments
m = re.match(self.LINES_TO_IGNORE_REGEX, line, re.UNICODE)
if m:
continue
m = re.match(self.VALID_LINE_REGEX, line, re.UNICODE)
if not m:
cause = "line %s: invalid format" % str(nline)
raise InvalidFormatError(cause=cause)
try:
result = parse_line(m.group(1), m.group(2))
yield result
except InvalidFormatError as e:
cause = "line %s: %s" % (str(nline), e)
raise InvalidFormatError(cause=cause) | [
"def",
"__parse_stream",
"(",
"self",
",",
"stream",
",",
"parse_line",
")",
":",
"if",
"not",
"stream",
":",
"raise",
"InvalidFormatError",
"(",
"cause",
"=",
"'stream cannot be empty or None'",
")",
"nline",
"=",
"0",
"lines",
"=",
"stream",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"lines",
":",
"nline",
"+=",
"1",
"# Ignore blank lines and comments",
"m",
"=",
"re",
".",
"match",
"(",
"self",
".",
"LINES_TO_IGNORE_REGEX",
",",
"line",
",",
"re",
".",
"UNICODE",
")",
"if",
"m",
":",
"continue",
"m",
"=",
"re",
".",
"match",
"(",
"self",
".",
"VALID_LINE_REGEX",
",",
"line",
",",
"re",
".",
"UNICODE",
")",
"if",
"not",
"m",
":",
"cause",
"=",
"\"line %s: invalid format\"",
"%",
"str",
"(",
"nline",
")",
"raise",
"InvalidFormatError",
"(",
"cause",
"=",
"cause",
")",
"try",
":",
"result",
"=",
"parse_line",
"(",
"m",
".",
"group",
"(",
"1",
")",
",",
"m",
".",
"group",
"(",
"2",
")",
")",
"yield",
"result",
"except",
"InvalidFormatError",
"as",
"e",
":",
"cause",
"=",
"\"line %s: %s\"",
"%",
"(",
"str",
"(",
"nline",
")",
",",
"e",
")",
"raise",
"InvalidFormatError",
"(",
"cause",
"=",
"cause",
")"
] | 32.285714 | 21.392857 |
def get_current_status(self):
"""Returns the current state of the local spotify client"""
url = get_url("/remote/status.json")
params = {"oauth": self._oauth_token, "csrf": self._csrf_token}
r = self._request(url=url, params=params)
return r.json() | [
"def",
"get_current_status",
"(",
"self",
")",
":",
"url",
"=",
"get_url",
"(",
"\"/remote/status.json\"",
")",
"params",
"=",
"{",
"\"oauth\"",
":",
"self",
".",
"_oauth_token",
",",
"\"csrf\"",
":",
"self",
".",
"_csrf_token",
"}",
"r",
"=",
"self",
".",
"_request",
"(",
"url",
"=",
"url",
",",
"params",
"=",
"params",
")",
"return",
"r",
".",
"json",
"(",
")"
] | 47.166667 | 12 |
def _evaluate(self,R,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential
INPUT:
R
phi
t
OUTPUT:
Pot(R(,\phi,t))
HISTORY:
2010-07-13 - Written - Bovy (NYU)
"""
return self._Pot(R,0.,t=t,use_physical=False) | [
"def",
"_evaluate",
"(",
"self",
",",
"R",
",",
"phi",
"=",
"0.",
",",
"t",
"=",
"0.",
")",
":",
"return",
"self",
".",
"_Pot",
"(",
"R",
",",
"0.",
",",
"t",
"=",
"t",
",",
"use_physical",
"=",
"False",
")"
] | 21.4375 | 17.0625 |
def _wave(self):
"""Return a wave.Wave_read instance from the ``wave`` module."""
try:
return wave.open(StringIO(self.contents))
except wave.Error, err:
err.message += "\nInvalid wave file: %s" % self
err.args = (err.message,)
raise | [
"def",
"_wave",
"(",
"self",
")",
":",
"try",
":",
"return",
"wave",
".",
"open",
"(",
"StringIO",
"(",
"self",
".",
"contents",
")",
")",
"except",
"wave",
".",
"Error",
",",
"err",
":",
"err",
".",
"message",
"+=",
"\"\\nInvalid wave file: %s\"",
"%",
"self",
"err",
".",
"args",
"=",
"(",
"err",
".",
"message",
",",
")",
"raise"
] | 37.125 | 14.875 |
def SInt64(value, min_value=None, max_value=None, encoder=ENC_INT_DEFAULT, fuzzable=True, name=None, full_range=False):
'''Signed 64-bit field'''
return BitField(value, 64, signed=True, min_value=min_value, max_value=max_value, encoder=encoder, fuzzable=fuzzable, name=name, full_range=full_range) | [
"def",
"SInt64",
"(",
"value",
",",
"min_value",
"=",
"None",
",",
"max_value",
"=",
"None",
",",
"encoder",
"=",
"ENC_INT_DEFAULT",
",",
"fuzzable",
"=",
"True",
",",
"name",
"=",
"None",
",",
"full_range",
"=",
"False",
")",
":",
"return",
"BitField",
"(",
"value",
",",
"64",
",",
"signed",
"=",
"True",
",",
"min_value",
"=",
"min_value",
",",
"max_value",
"=",
"max_value",
",",
"encoder",
"=",
"encoder",
",",
"fuzzable",
"=",
"fuzzable",
",",
"name",
"=",
"name",
",",
"full_range",
"=",
"full_range",
")"
] | 101 | 68.333333 |
def get_uid(path, follow_symlinks=True):
'''
Return the id of the user that owns a given file
Symlinks are followed by default to mimic Unix behavior. Specify
`follow_symlinks=False` to turn off this behavior.
Args:
path (str): The path to the file or directory
follow_symlinks (bool):
If the object specified by ``path`` is a symlink, get attributes of
the linked file instead of the symlink itself. Default is True
Returns:
str: The uid of the owner
CLI Example:
.. code-block:: bash
salt '*' file.get_uid c:\\temp\\test.txt
salt '*' file.get_uid c:\\temp\\test.txt follow_symlinks=False
'''
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
# Under Windows, if the path is a symlink, the user that owns the symlink is
# returned, not the user that owns the file/directory the symlink is
# pointing to. This behavior is *different* to *nix, therefore the symlink
# is first resolved manually if necessary. Remember symlinks are only
# supported on Windows Vista or later.
if follow_symlinks and sys.getwindowsversion().major >= 6:
path = _resolve_symlink(path)
owner_sid = salt.utils.win_dacl.get_owner(path)
return salt.utils.win_dacl.get_sid_string(owner_sid) | [
"def",
"get_uid",
"(",
"path",
",",
"follow_symlinks",
"=",
"True",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"raise",
"CommandExecutionError",
"(",
"'Path not found: {0}'",
".",
"format",
"(",
"path",
")",
")",
"# Under Windows, if the path is a symlink, the user that owns the symlink is",
"# returned, not the user that owns the file/directory the symlink is",
"# pointing to. This behavior is *different* to *nix, therefore the symlink",
"# is first resolved manually if necessary. Remember symlinks are only",
"# supported on Windows Vista or later.",
"if",
"follow_symlinks",
"and",
"sys",
".",
"getwindowsversion",
"(",
")",
".",
"major",
">=",
"6",
":",
"path",
"=",
"_resolve_symlink",
"(",
"path",
")",
"owner_sid",
"=",
"salt",
".",
"utils",
".",
"win_dacl",
".",
"get_owner",
"(",
"path",
")",
"return",
"salt",
".",
"utils",
".",
"win_dacl",
".",
"get_sid_string",
"(",
"owner_sid",
")"
] | 35.026316 | 26.184211 |
def present(
name,
subscriptions=None,
region=None,
key=None,
keyid=None,
profile=None):
'''
Ensure the SNS topic exists.
name
Name of the SNS topic.
subscriptions
List of SNS subscriptions.
Each subscription is a dictionary with a protocol and endpoint key:
.. code-block:: python
[
{'protocol': 'https', 'endpoint': 'https://www.example.com/sns-endpoint'},
{'protocol': 'sqs', 'endpoint': 'arn:aws:sqs:us-west-2:123456789012:MyQueue'}
]
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
is_present = __salt__['boto_sns.exists'](
name, region=region, key=key, keyid=keyid, profile=profile
)
if is_present:
ret['result'] = True
ret['comment'] = 'AWS SNS topic {0} present.'.format(name)
else:
if __opts__['test']:
msg = 'AWS SNS topic {0} is set to be created.'.format(name)
ret['comment'] = msg
ret['result'] = None
return ret
created = __salt__['boto_sns.create'](
name, region=region, key=key, keyid=keyid, profile=profile
)
if created:
msg = 'AWS SNS topic {0} created.'.format(name)
ret['comment'] = msg
ret['changes']['old'] = None
ret['changes']['new'] = {'topic': name, 'subscriptions': []}
ret['result'] = True
else:
ret['comment'] = 'Failed to create {0} AWS SNS topic'.format(name)
ret['result'] = False
return ret
if not subscriptions:
return ret
# Get current subscriptions
_subscriptions = __salt__['boto_sns.get_all_subscriptions_by_topic'](
name, region=region, key=key, keyid=keyid, profile=profile
)
# Convert subscriptions into a data strucure we can compare against
_subscriptions = [
{'protocol': s['Protocol'], 'endpoint': s['Endpoint']}
for s in _subscriptions
]
for subscription in subscriptions:
# If the subscription contains inline digest auth, AWS will *** the
# password. So we need to do the same with ours if the regex matches
# Example: https://user:****@my.endpoiint.com/foo/bar
_endpoint = subscription['endpoint']
matches = re.search(
r'https://(?P<user>\w+):(?P<pass>\w+)@',
_endpoint)
# We are using https and have auth creds - the password will be starred out,
# so star out our password so we can still match it
if matches is not None:
subscription['endpoint'] = _endpoint.replace(
matches.groupdict()['pass'],
'****')
if subscription not in _subscriptions:
# Ensure the endpoint is set back to it's original value,
# incase we starred out a password
subscription['endpoint'] = _endpoint
if __opts__['test']:
msg = ' AWS SNS subscription {0}:{1} to be set on topic {2}.'\
.format(
subscription['protocol'],
subscription['endpoint'],
name)
ret['comment'] += msg
ret['result'] = None
continue
created = __salt__['boto_sns.subscribe'](
name, subscription['protocol'], subscription['endpoint'],
region=region, key=key, keyid=keyid, profile=profile)
if created:
msg = ' AWS SNS subscription {0}:{1} set on topic {2}.'\
.format(subscription['protocol'],
subscription['endpoint'],
name)
ret['comment'] += msg
ret['changes'].setdefault('old', None)
ret['changes']\
.setdefault('new', {})\
.setdefault('subscriptions', [])\
.append(subscription)
ret['result'] = True
else:
ret['result'] = False
return ret
else:
msg = ' AWS SNS subscription {0}:{1} already set on topic {2}.'\
.format(
subscription['protocol'],
subscription['endpoint'],
name)
ret['comment'] += msg
return ret | [
"def",
"present",
"(",
"name",
",",
"subscriptions",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"None",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"is_present",
"=",
"__salt__",
"[",
"'boto_sns.exists'",
"]",
"(",
"name",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"is_present",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'AWS SNS topic {0} present.'",
".",
"format",
"(",
"name",
")",
"else",
":",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"msg",
"=",
"'AWS SNS topic {0} is set to be created.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"msg",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"ret",
"created",
"=",
"__salt__",
"[",
"'boto_sns.create'",
"]",
"(",
"name",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"created",
":",
"msg",
"=",
"'AWS SNS topic {0} created.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"msg",
"ret",
"[",
"'changes'",
"]",
"[",
"'old'",
"]",
"=",
"None",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"=",
"{",
"'topic'",
":",
"name",
",",
"'subscriptions'",
":",
"[",
"]",
"}",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to create {0} AWS SNS topic'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"return",
"ret",
"if",
"not",
"subscriptions",
":",
"return",
"ret",
"# Get current subscriptions",
"_subscriptions",
"=",
"__salt__",
"[",
"'boto_sns.get_all_subscriptions_by_topic'",
"]",
"(",
"name",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"# Convert subscriptions into a data strucure we can compare against",
"_subscriptions",
"=",
"[",
"{",
"'protocol'",
":",
"s",
"[",
"'Protocol'",
"]",
",",
"'endpoint'",
":",
"s",
"[",
"'Endpoint'",
"]",
"}",
"for",
"s",
"in",
"_subscriptions",
"]",
"for",
"subscription",
"in",
"subscriptions",
":",
"# If the subscription contains inline digest auth, AWS will *** the",
"# password. So we need to do the same with ours if the regex matches",
"# Example: https://user:****@my.endpoiint.com/foo/bar",
"_endpoint",
"=",
"subscription",
"[",
"'endpoint'",
"]",
"matches",
"=",
"re",
".",
"search",
"(",
"r'https://(?P<user>\\w+):(?P<pass>\\w+)@'",
",",
"_endpoint",
")",
"# We are using https and have auth creds - the password will be starred out,",
"# so star out our password so we can still match it",
"if",
"matches",
"is",
"not",
"None",
":",
"subscription",
"[",
"'endpoint'",
"]",
"=",
"_endpoint",
".",
"replace",
"(",
"matches",
".",
"groupdict",
"(",
")",
"[",
"'pass'",
"]",
",",
"'****'",
")",
"if",
"subscription",
"not",
"in",
"_subscriptions",
":",
"# Ensure the endpoint is set back to it's original value,",
"# incase we starred out a password",
"subscription",
"[",
"'endpoint'",
"]",
"=",
"_endpoint",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"msg",
"=",
"' AWS SNS subscription {0}:{1} to be set on topic {2}.'",
".",
"format",
"(",
"subscription",
"[",
"'protocol'",
"]",
",",
"subscription",
"[",
"'endpoint'",
"]",
",",
"name",
")",
"ret",
"[",
"'comment'",
"]",
"+=",
"msg",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"continue",
"created",
"=",
"__salt__",
"[",
"'boto_sns.subscribe'",
"]",
"(",
"name",
",",
"subscription",
"[",
"'protocol'",
"]",
",",
"subscription",
"[",
"'endpoint'",
"]",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"created",
":",
"msg",
"=",
"' AWS SNS subscription {0}:{1} set on topic {2}.'",
".",
"format",
"(",
"subscription",
"[",
"'protocol'",
"]",
",",
"subscription",
"[",
"'endpoint'",
"]",
",",
"name",
")",
"ret",
"[",
"'comment'",
"]",
"+=",
"msg",
"ret",
"[",
"'changes'",
"]",
".",
"setdefault",
"(",
"'old'",
",",
"None",
")",
"ret",
"[",
"'changes'",
"]",
".",
"setdefault",
"(",
"'new'",
",",
"{",
"}",
")",
".",
"setdefault",
"(",
"'subscriptions'",
",",
"[",
"]",
")",
".",
"append",
"(",
"subscription",
")",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"else",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"return",
"ret",
"else",
":",
"msg",
"=",
"' AWS SNS subscription {0}:{1} already set on topic {2}.'",
".",
"format",
"(",
"subscription",
"[",
"'protocol'",
"]",
",",
"subscription",
"[",
"'endpoint'",
"]",
",",
"name",
")",
"ret",
"[",
"'comment'",
"]",
"+=",
"msg",
"return",
"ret"
] | 33.311594 | 21.76087 |
def create(self, id, seq, resource): # pylint: disable=invalid-name,redefined-builtin
"""Create a highlight.
:param id: Result ID as an int.
:param seq: TestResult sequence ID as an int.
:param resource: :class:`highlights.Highlight <highlights.Highlight>` object
:return: :class:`highlights.Highlight <highlights.Highlight>` object
:rtype: highlights.Highlight
"""
return self.create_or_edit(id, seq, resource) | [
"def",
"create",
"(",
"self",
",",
"id",
",",
"seq",
",",
"resource",
")",
":",
"# pylint: disable=invalid-name,redefined-builtin",
"return",
"self",
".",
"create_or_edit",
"(",
"id",
",",
"seq",
",",
"resource",
")"
] | 46.7 | 19.6 |
def _grid_distance(self, index):
"""
Calculate the distance grid for a single index position.
This is pre-calculated for fast neighborhood calculations
later on (see _calc_influence).
"""
# Take every dimension but the first in reverse
# then reverse that list again.
dimensions = np.cumprod(self.map_dimensions[1::][::-1])[::-1]
coord = []
for idx, dim in enumerate(dimensions):
if idx != 0:
value = (index % dimensions[idx-1]) // dim
else:
value = index // dim
coord.append(value)
coord.append(index % self.map_dimensions[-1])
for idx, (width, row) in enumerate(zip(self.map_dimensions, coord)):
x = np.abs(np.arange(width) - row) ** 2
dims = self.map_dimensions[::-1]
if idx:
dims = dims[:-idx]
x = np.broadcast_to(x, dims).T
if idx == 0:
distance = np.copy(x)
else:
distance += x.T
return distance | [
"def",
"_grid_distance",
"(",
"self",
",",
"index",
")",
":",
"# Take every dimension but the first in reverse",
"# then reverse that list again.",
"dimensions",
"=",
"np",
".",
"cumprod",
"(",
"self",
".",
"map_dimensions",
"[",
"1",
":",
":",
"]",
"[",
":",
":",
"-",
"1",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"coord",
"=",
"[",
"]",
"for",
"idx",
",",
"dim",
"in",
"enumerate",
"(",
"dimensions",
")",
":",
"if",
"idx",
"!=",
"0",
":",
"value",
"=",
"(",
"index",
"%",
"dimensions",
"[",
"idx",
"-",
"1",
"]",
")",
"//",
"dim",
"else",
":",
"value",
"=",
"index",
"//",
"dim",
"coord",
".",
"append",
"(",
"value",
")",
"coord",
".",
"append",
"(",
"index",
"%",
"self",
".",
"map_dimensions",
"[",
"-",
"1",
"]",
")",
"for",
"idx",
",",
"(",
"width",
",",
"row",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"self",
".",
"map_dimensions",
",",
"coord",
")",
")",
":",
"x",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"arange",
"(",
"width",
")",
"-",
"row",
")",
"**",
"2",
"dims",
"=",
"self",
".",
"map_dimensions",
"[",
":",
":",
"-",
"1",
"]",
"if",
"idx",
":",
"dims",
"=",
"dims",
"[",
":",
"-",
"idx",
"]",
"x",
"=",
"np",
".",
"broadcast_to",
"(",
"x",
",",
"dims",
")",
".",
"T",
"if",
"idx",
"==",
"0",
":",
"distance",
"=",
"np",
".",
"copy",
"(",
"x",
")",
"else",
":",
"distance",
"+=",
"x",
".",
"T",
"return",
"distance"
] | 32.30303 | 17.030303 |
async def start_timeout(self):
"""Start timeout."""
self.timeout_handle = self.pyvlx.connection.loop.call_later(
self.timeout_in_seconds, self.timeout) | [
"async",
"def",
"start_timeout",
"(",
"self",
")",
":",
"self",
".",
"timeout_handle",
"=",
"self",
".",
"pyvlx",
".",
"connection",
".",
"loop",
".",
"call_later",
"(",
"self",
".",
"timeout_in_seconds",
",",
"self",
".",
"timeout",
")"
] | 44 | 12 |
def prepend(exception, message, end=': '):
"""Prepends the first argument (i.e., the exception message) of the a BaseException with the provided message.
Useful for reraising exceptions with additional information.
:param BaseException exception: the exception to prepend
:param str message: the message to prepend
:param str end: the separator to add to the end of the provided message
:returns: the exception
"""
exception.args = exception.args or ('',)
exception.args = (message + end + exception.args[0], ) + exception.args[1:]
return exception | [
"def",
"prepend",
"(",
"exception",
",",
"message",
",",
"end",
"=",
"': '",
")",
":",
"exception",
".",
"args",
"=",
"exception",
".",
"args",
"or",
"(",
"''",
",",
")",
"exception",
".",
"args",
"=",
"(",
"message",
"+",
"end",
"+",
"exception",
".",
"args",
"[",
"0",
"]",
",",
")",
"+",
"exception",
".",
"args",
"[",
"1",
":",
"]",
"return",
"exception"
] | 48.166667 | 16.916667 |
def get_octahedra(self, atoms, periodicity=3):
'''
Extract octahedra as lists of sequence numbers of corner atoms
'''
octahedra = []
for n, i in enumerate(atoms):
found = []
if i.symbol in Perovskite_Structure.B:
for m, j in enumerate(self.virtual_atoms):
if j.symbol in Perovskite_Structure.C and self.virtual_atoms.get_distance(n, m) <= self.OCTAHEDRON_BOND_LENGTH_LIMIT:
found.append(m)
if (periodicity == 3 and len(found) == 6) or (periodicity == 2 and len(found) in [5, 6]):
octahedra.append([n, found])
if not len(octahedra): raise ModuleError("Cannot extract valid octahedra: not enough corner atoms found!")
return octahedra | [
"def",
"get_octahedra",
"(",
"self",
",",
"atoms",
",",
"periodicity",
"=",
"3",
")",
":",
"octahedra",
"=",
"[",
"]",
"for",
"n",
",",
"i",
"in",
"enumerate",
"(",
"atoms",
")",
":",
"found",
"=",
"[",
"]",
"if",
"i",
".",
"symbol",
"in",
"Perovskite_Structure",
".",
"B",
":",
"for",
"m",
",",
"j",
"in",
"enumerate",
"(",
"self",
".",
"virtual_atoms",
")",
":",
"if",
"j",
".",
"symbol",
"in",
"Perovskite_Structure",
".",
"C",
"and",
"self",
".",
"virtual_atoms",
".",
"get_distance",
"(",
"n",
",",
"m",
")",
"<=",
"self",
".",
"OCTAHEDRON_BOND_LENGTH_LIMIT",
":",
"found",
".",
"append",
"(",
"m",
")",
"if",
"(",
"periodicity",
"==",
"3",
"and",
"len",
"(",
"found",
")",
"==",
"6",
")",
"or",
"(",
"periodicity",
"==",
"2",
"and",
"len",
"(",
"found",
")",
"in",
"[",
"5",
",",
"6",
"]",
")",
":",
"octahedra",
".",
"append",
"(",
"[",
"n",
",",
"found",
"]",
")",
"if",
"not",
"len",
"(",
"octahedra",
")",
":",
"raise",
"ModuleError",
"(",
"\"Cannot extract valid octahedra: not enough corner atoms found!\"",
")",
"return",
"octahedra"
] | 46.235294 | 29.058824 |
def backend_from_fobj(f):
"""Determine backend module object from a file object."""
if magic is None:
warn("magic lib is not installed; assuming mime type %r" % (
DEFAULT_MIME))
return backend_from_mime(DEFAULT_MIME)
else:
offset = f.tell()
try:
f.seek(0)
chunk = f.read(MAGIC_BUFFER_SIZE)
mime = magic.from_buffer(chunk, mime=True)
return backend_from_mime(mime)
finally:
f.seek(offset) | [
"def",
"backend_from_fobj",
"(",
"f",
")",
":",
"if",
"magic",
"is",
"None",
":",
"warn",
"(",
"\"magic lib is not installed; assuming mime type %r\"",
"%",
"(",
"DEFAULT_MIME",
")",
")",
"return",
"backend_from_mime",
"(",
"DEFAULT_MIME",
")",
"else",
":",
"offset",
"=",
"f",
".",
"tell",
"(",
")",
"try",
":",
"f",
".",
"seek",
"(",
"0",
")",
"chunk",
"=",
"f",
".",
"read",
"(",
"MAGIC_BUFFER_SIZE",
")",
"mime",
"=",
"magic",
".",
"from_buffer",
"(",
"chunk",
",",
"mime",
"=",
"True",
")",
"return",
"backend_from_mime",
"(",
"mime",
")",
"finally",
":",
"f",
".",
"seek",
"(",
"offset",
")"
] | 33.133333 | 15.6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.