text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def add_instruction(self, specification):
"""Add an instruction specification
:param specification: a specification with a key
:data:`knittingpattern.Instruction.TYPE`
.. seealso:: :meth:`as_instruction`
"""
instruction = self.as_instruction(specification)
self._type_to_instruction[instruction.type] = instruction | [
"def",
"add_instruction",
"(",
"self",
",",
"specification",
")",
":",
"instruction",
"=",
"self",
".",
"as_instruction",
"(",
"specification",
")",
"self",
".",
"_type_to_instruction",
"[",
"instruction",
".",
"type",
"]",
"=",
"instruction"
] | 36.5 | 15.1 |
def regroup_if_changed(group, op_list, name=None):
"""Creates a new group for op_list if it has changed.
Args:
group: The current group. It is returned if op_list is unchanged.
op_list: The list of operations to check.
name: The name to use if a new group is created.
Returns:
Either group or a new group (or if op_list is empty then no_op).
"""
has_deltas = isinstance(op_list, sequence_with_deltas.SequenceWithDeltas)
if (group is None or len(group.control_inputs) != len(op_list) or
(has_deltas and op_list.has_changed())):
if has_deltas:
op_list.mark()
if op_list:
return tf.group(*op_list, name=name)
else:
return tf.no_op(name=name)
else:
return group | [
"def",
"regroup_if_changed",
"(",
"group",
",",
"op_list",
",",
"name",
"=",
"None",
")",
":",
"has_deltas",
"=",
"isinstance",
"(",
"op_list",
",",
"sequence_with_deltas",
".",
"SequenceWithDeltas",
")",
"if",
"(",
"group",
"is",
"None",
"or",
"len",
"(",
... | 33.714286 | 20 |
def today(self) -> datetime:
""" Returns today (date only) as datetime """
self.value = datetime.combine(datetime.today().date(), time.min)
return self.value | [
"def",
"today",
"(",
"self",
")",
"->",
"datetime",
":",
"self",
".",
"value",
"=",
"datetime",
".",
"combine",
"(",
"datetime",
".",
"today",
"(",
")",
".",
"date",
"(",
")",
",",
"time",
".",
"min",
")",
"return",
"self",
".",
"value"
] | 44.5 | 14.75 |
def _append(self, target, value):
"""Replace PHP's []= idiom
"""
return self.__p(target) + '[] = ' + self.__p(value) + ';' | [
"def",
"_append",
"(",
"self",
",",
"target",
",",
"value",
")",
":",
"return",
"self",
".",
"__p",
"(",
"target",
")",
"+",
"'[] = '",
"+",
"self",
".",
"__p",
"(",
"value",
")",
"+",
"';'"
] | 28.6 | 14.4 |
def do_index_command(self, index, **options):
"""Delete search index."""
if options["interactive"]:
logger.warning("This will permanently delete the index '%s'.", index)
if not self._confirm_action():
logger.warning(
"Aborting deletion of index '%s' at user's request.", index
)
return
return delete_index(index) | [
"def",
"do_index_command",
"(",
"self",
",",
"index",
",",
"*",
"*",
"options",
")",
":",
"if",
"options",
"[",
"\"interactive\"",
"]",
":",
"logger",
".",
"warning",
"(",
"\"This will permanently delete the index '%s'.\"",
",",
"index",
")",
"if",
"not",
"sel... | 41.9 | 14.9 |
def create_update():
""" Create the grammar for the 'update' statement """
update = upkey("update").setResultsName("action")
returns, none, all_, updated, old, new = map(
upkey, ["returns", "none", "all", "updated", "old", "new"]
)
return_ = returns + Group(
none | (all_ + old) | (all_ + new) | (updated + old) | (updated + new)
).setResultsName("returns")
return (
update
+ table
+ update_expr
+ Optional(keys_in)
+ Optional(where)
+ Optional(using)
+ Optional(return_)
+ Optional(throttle)
) | [
"def",
"create_update",
"(",
")",
":",
"update",
"=",
"upkey",
"(",
"\"update\"",
")",
".",
"setResultsName",
"(",
"\"action\"",
")",
"returns",
",",
"none",
",",
"all_",
",",
"updated",
",",
"old",
",",
"new",
"=",
"map",
"(",
"upkey",
",",
"[",
"\"... | 30.947368 | 19 |
def does_database_exist(self, database_name):
"""
Checks if a database exists in CosmosDB.
"""
if database_name is None:
raise AirflowBadRequest("Database name cannot be None.")
existing_database = list(self.get_conn().QueryDatabases({
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [
{"name": "@id", "value": database_name}
]
}))
if len(existing_database) == 0:
return False
return True | [
"def",
"does_database_exist",
"(",
"self",
",",
"database_name",
")",
":",
"if",
"database_name",
"is",
"None",
":",
"raise",
"AirflowBadRequest",
"(",
"\"Database name cannot be None.\"",
")",
"existing_database",
"=",
"list",
"(",
"self",
".",
"get_conn",
"(",
"... | 30.764706 | 17 |
def to_record(self):
"""Create a CertStore record from this TLSFileBundle"""
tf_list = [getattr(self, k, None) for k in
[_.value for _ in TLSFileType]]
# If a cert isn't defined in this bundle, remove it
tf_list = filter(lambda x: x, tf_list)
files = {tf.file_type.value: tf.file_path for tf in tf_list}
self.record['files'] = files
return self.record | [
"def",
"to_record",
"(",
"self",
")",
":",
"tf_list",
"=",
"[",
"getattr",
"(",
"self",
",",
"k",
",",
"None",
")",
"for",
"k",
"in",
"[",
"_",
".",
"value",
"for",
"_",
"in",
"TLSFileType",
"]",
"]",
"# If a cert isn't defined in this bundle, remove it",
... | 41.8 | 15.1 |
def period_break(dates, period):
"""
Returns the indices where the given period changes.
Parameters
----------
dates : PeriodIndex
Array of intervals to monitor.
period : string
Name of the period to monitor.
"""
current = getattr(dates, period)
previous = getattr(dates - 1 * dates.freq, period)
return np.nonzero(current - previous)[0] | [
"def",
"period_break",
"(",
"dates",
",",
"period",
")",
":",
"current",
"=",
"getattr",
"(",
"dates",
",",
"period",
")",
"previous",
"=",
"getattr",
"(",
"dates",
"-",
"1",
"*",
"dates",
".",
"freq",
",",
"period",
")",
"return",
"np",
".",
"nonzer... | 27.214286 | 12.785714 |
def from_pb(cls, cell_pb):
"""Create a new cell from a Cell protobuf.
:type cell_pb: :class:`._generated.data_pb2.Cell`
:param cell_pb: The protobuf to convert.
:rtype: :class:`Cell`
:returns: The cell corresponding to the protobuf.
"""
if cell_pb.labels:
return cls(cell_pb.value, cell_pb.timestamp_micros, labels=cell_pb.labels)
else:
return cls(cell_pb.value, cell_pb.timestamp_micros) | [
"def",
"from_pb",
"(",
"cls",
",",
"cell_pb",
")",
":",
"if",
"cell_pb",
".",
"labels",
":",
"return",
"cls",
"(",
"cell_pb",
".",
"value",
",",
"cell_pb",
".",
"timestamp_micros",
",",
"labels",
"=",
"cell_pb",
".",
"labels",
")",
"else",
":",
"return... | 35.846154 | 19.769231 |
def _sanitize(cls, message):
"""
Sanitize the given message,
dealing with multiple arguments
and/or string formatting.
:param message: the log message to be sanitized
:type message: string or list of strings
:rtype: string
"""
if isinstance(message, list):
if len(message) == 0:
sanitized = u"Empty log message"
elif len(message) == 1:
sanitized = message[0]
else:
sanitized = message[0] % tuple(message[1:])
else:
sanitized = message
if not gf.is_unicode(sanitized):
raise TypeError("The given log message is not a Unicode string")
return sanitized | [
"def",
"_sanitize",
"(",
"cls",
",",
"message",
")",
":",
"if",
"isinstance",
"(",
"message",
",",
"list",
")",
":",
"if",
"len",
"(",
"message",
")",
"==",
"0",
":",
"sanitized",
"=",
"u\"Empty log message\"",
"elif",
"len",
"(",
"message",
")",
"==",... | 33.363636 | 11.909091 |
async def _seed2did(self) -> str:
"""
Derive DID, as per indy-sdk, from seed.
:return: DID
"""
rv = None
dids_with_meta = json.loads(await did.list_my_dids_with_meta(self.handle)) # list
if dids_with_meta:
for did_with_meta in dids_with_meta: # dict
if 'metadata' in did_with_meta:
try:
meta = json.loads(did_with_meta['metadata'])
if isinstance(meta, dict) and meta.get('seed', None) == self._seed:
rv = did_with_meta.get('did')
except json.decoder.JSONDecodeError:
continue # it's not one of ours, carry on
if not rv: # seed not in metadata, generate did again on temp wallet
temp_wallet = await Wallet(
self._seed,
'{}.seed2did'.format(self.name),
None,
{'auto-remove': True}).create()
rv = temp_wallet.did
await temp_wallet.remove()
return rv | [
"async",
"def",
"_seed2did",
"(",
"self",
")",
"->",
"str",
":",
"rv",
"=",
"None",
"dids_with_meta",
"=",
"json",
".",
"loads",
"(",
"await",
"did",
".",
"list_my_dids_with_meta",
"(",
"self",
".",
"handle",
")",
")",
"# list",
"if",
"dids_with_meta",
"... | 34.387097 | 21.16129 |
def process_hashes(self, body, allow_create=False):
"""Process any hashes mentioned and push them to related topics.
:arg body: Body of the comment to check for hashes and push out.
:arg allow_create=False: Whether to allow creating new topics
from hash tag mentions.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Look for hashtags matching self.hashtag_re and when found,
add comment from body to those topics.
"""
hash_re = re.compile(self.hashtag_re)
hashes = hash_re.findall(body)
done = {self.topic.lower(): True}
for mention in hashes:
mention = mention.strip('#')
if mention.lower() in done:
continue # Do not duplicate hash mentions
new_thread = self.__class__(
owner=self.owner, realm=self.realm, topic=mention,
user=self.user, token=self.token)
my_comment = '# Hashtag copy from %s:\n%s' % (self.topic, body)
new_thread.add_comment(
my_comment, allow_create=allow_create,
allow_hashes=False) # allow_hashes=False to prevent inf loop
done[mention.lower()] = True | [
"def",
"process_hashes",
"(",
"self",
",",
"body",
",",
"allow_create",
"=",
"False",
")",
":",
"hash_re",
"=",
"re",
".",
"compile",
"(",
"self",
".",
"hashtag_re",
")",
"hashes",
"=",
"hash_re",
".",
"findall",
"(",
"body",
")",
"done",
"=",
"{",
"... | 43.482759 | 18.413793 |
def external_account_cmd_by_name(self, command_name):
"""
Executes a command on the external account specified
by name.
@param command_name: The name of the command.
@return: Reference to the submitted command.
@since: API v16
"""
return self._cmd(command_name, data=self.name, api_version=16) | [
"def",
"external_account_cmd_by_name",
"(",
"self",
",",
"command_name",
")",
":",
"return",
"self",
".",
"_cmd",
"(",
"command_name",
",",
"data",
"=",
"self",
".",
"name",
",",
"api_version",
"=",
"16",
")"
] | 31.7 | 16.1 |
async def close(self, wait_for_completion=True):
"""Close window.
Parameters:
* wait_for_completion: If set, function will return
after device has reached target position.
"""
await self.set_position(
position=Position(position_percent=100),
wait_for_completion=wait_for_completion) | [
"async",
"def",
"close",
"(",
"self",
",",
"wait_for_completion",
"=",
"True",
")",
":",
"await",
"self",
".",
"set_position",
"(",
"position",
"=",
"Position",
"(",
"position_percent",
"=",
"100",
")",
",",
"wait_for_completion",
"=",
"wait_for_completion",
"... | 32.545455 | 16.454545 |
def send(self, str, end='\n'):
"""Sends a line to std_in."""
return self._process.stdin.write(str+end) | [
"def",
"send",
"(",
"self",
",",
"str",
",",
"end",
"=",
"'\\n'",
")",
":",
"return",
"self",
".",
"_process",
".",
"stdin",
".",
"write",
"(",
"str",
"+",
"end",
")"
] | 38.666667 | 6.333333 |
def aggregate(l):
"""Aggregate a `list` of prefixes.
Keyword arguments:
l -- a python list of prefixes
Example use:
>>> aggregate(["10.0.0.0/8", "10.0.0.0/24"])
['10.0.0.0/8']
"""
tree = radix.Radix()
for item in l:
try:
tree.add(item)
except (ValueError) as err:
raise Exception("ERROR: invalid IP prefix: {}".format(item))
return aggregate_tree(tree).prefixes() | [
"def",
"aggregate",
"(",
"l",
")",
":",
"tree",
"=",
"radix",
".",
"Radix",
"(",
")",
"for",
"item",
"in",
"l",
":",
"try",
":",
"tree",
".",
"add",
"(",
"item",
")",
"except",
"(",
"ValueError",
")",
"as",
"err",
":",
"raise",
"Exception",
"(",
... | 23.833333 | 18.888889 |
def to_value(self, variables_mapping=None):
""" parse lazy data with evaluated variables mapping.
Notice: variables_mapping should not contain any variable or function.
"""
variables_mapping = variables_mapping or {}
args = []
for arg in self._args:
if isinstance(arg, LazyFunction):
if self.cached and arg.cache_key and arg.cache_key in cached_functions_mapping:
value = cached_functions_mapping[arg.cache_key]
else:
value = arg.to_value(variables_mapping)
cached_functions_mapping[arg.cache_key] = value
args.append(value)
else:
# variable
var_value = get_mapping_variable(arg, variables_mapping)
args.append(var_value)
if self._string == "{}":
return args[0]
else:
return self._string.format(*args) | [
"def",
"to_value",
"(",
"self",
",",
"variables_mapping",
"=",
"None",
")",
":",
"variables_mapping",
"=",
"variables_mapping",
"or",
"{",
"}",
"args",
"=",
"[",
"]",
"for",
"arg",
"in",
"self",
".",
"_args",
":",
"if",
"isinstance",
"(",
"arg",
",",
"... | 39.666667 | 18.833333 |
def remove_terms_by_indices(self, idx_to_delete_list):
'''
Parameters
----------
idx_to_delete_list, list
Returns
-------
TermDocMatrix
'''
new_X, new_term_idx_store = self._get_X_after_delete_terms(idx_to_delete_list)
return self._make_new_term_doc_matrix(new_X=new_X,
new_mX=self._mX,
new_y=None,
new_category_idx_store=None,
new_term_idx_store=new_term_idx_store,
new_metadata_idx_store=self._metadata_idx_store,
new_y_mask=np.ones(new_X.shape[0]).astype(np.bool)) | [
"def",
"remove_terms_by_indices",
"(",
"self",
",",
"idx_to_delete_list",
")",
":",
"new_X",
",",
"new_term_idx_store",
"=",
"self",
".",
"_get_X_after_delete_terms",
"(",
"idx_to_delete_list",
")",
"return",
"self",
".",
"_make_new_term_doc_matrix",
"(",
"new_X",
"="... | 42.473684 | 29.736842 |
def delay(self,
gain_in=0.8,
gain_out=0.5,
delays=list((1000, 1800)),
decays=list((0.3, 0.25)),
parallel=False):
"""delay takes 4 parameters: input gain (max 1), output gain
and then two lists, delays and decays.
Each list is a pair of comma seperated values within
parenthesis.
"""
self.command.append('echo' + ('s' if parallel else ''))
self.command.append(gain_in)
self.command.append(gain_out)
self.command.extend(list(sum(zip(delays, decays), ())))
return self | [
"def",
"delay",
"(",
"self",
",",
"gain_in",
"=",
"0.8",
",",
"gain_out",
"=",
"0.5",
",",
"delays",
"=",
"list",
"(",
"(",
"1000",
",",
"1800",
")",
")",
",",
"decays",
"=",
"list",
"(",
"(",
"0.3",
",",
"0.25",
")",
")",
",",
"parallel",
"=",... | 35.352941 | 13.117647 |
def from_dict(data, ctx):
"""
Instantiate a new LimitOrder from a dict (generally from loading a JSON
response). The data used to instantiate the LimitOrder is a shallow
copy of the dict passed in, with any complex child types instantiated
appropriately.
"""
data = data.copy()
if data.get('clientExtensions') is not None:
data['clientExtensions'] = \
ctx.transaction.ClientExtensions.from_dict(
data['clientExtensions'], ctx
)
if data.get('units') is not None:
data['units'] = ctx.convert_decimal_number(
data.get('units')
)
if data.get('price') is not None:
data['price'] = ctx.convert_decimal_number(
data.get('price')
)
if data.get('takeProfitOnFill') is not None:
data['takeProfitOnFill'] = \
ctx.transaction.TakeProfitDetails.from_dict(
data['takeProfitOnFill'], ctx
)
if data.get('stopLossOnFill') is not None:
data['stopLossOnFill'] = \
ctx.transaction.StopLossDetails.from_dict(
data['stopLossOnFill'], ctx
)
if data.get('trailingStopLossOnFill') is not None:
data['trailingStopLossOnFill'] = \
ctx.transaction.TrailingStopLossDetails.from_dict(
data['trailingStopLossOnFill'], ctx
)
if data.get('tradeClientExtensions') is not None:
data['tradeClientExtensions'] = \
ctx.transaction.ClientExtensions.from_dict(
data['tradeClientExtensions'], ctx
)
return LimitOrder(**data) | [
"def",
"from_dict",
"(",
"data",
",",
"ctx",
")",
":",
"data",
"=",
"data",
".",
"copy",
"(",
")",
"if",
"data",
".",
"get",
"(",
"'clientExtensions'",
")",
"is",
"not",
"None",
":",
"data",
"[",
"'clientExtensions'",
"]",
"=",
"ctx",
".",
"transacti... | 33.884615 | 19.576923 |
def build(self):
"""
Build package from source and create log
file in path /var/log/slpkg/sbo/build_logs/.
Also check md5sum calculates.
"""
try:
self._delete_dir()
try:
tar = tarfile.open(self.script)
except Exception as err:
print err
raise SystemExit()
tar.extractall()
tar.close()
self._makeflags()
self._delete_sbo_tar_gz()
self._create_md5_dict()
if not self.auto:
os.chdir(self._SOURCES)
for src in self.sources:
if not os.path.isfile(src):
continue
# fix build sources with spaces
src = src.replace("%20", " ")
check_md5(self.sbo_md5[src], src)
# copy source and fix passing char '+' from file name
shutil.copy2(src.replace("%2B", "+"), self.path + self.prgnam)
os.chdir(self.path + self.prgnam)
# change permissions
subprocess.call("chmod +x {0}.SlackBuild".format(self.prgnam),
shell=True)
pass_var = self._pass_variable()
if self.meta.sbo_build_log in ["on", "ON"]:
if os.path.isfile(self.build_logs + self.log_file):
os.remove(self.build_logs + self.log_file)
# start log write
log_head(self.build_logs, self.log_file, self.start_log_time)
subprocess.Popen("{0} ./{1}.SlackBuild 2>&1 | tee -a "
"{2}{3}".format(" ".join(pass_var),
self.prgnam, self.build_logs,
self.log_file), shell=True,
stdout=sys.stdout).communicate()
sum_time = build_time(self.start_time)
# write end in log file
log_end(self.build_logs, self.log_file, sum_time)
print("Total build time for the package {0} : {1}\n".format(
self.prgnam, sum_time))
else:
subprocess.call("{0} ./{1}.SlackBuild".format(
" ".join(pass_var), self.prgnam), shell=True)
os.chdir(self.path)
except KeyboardInterrupt: # (OSError, IOError):
self.msg.pkg_not_found("\n", self.prgnam, "Wrong file", "\n") | [
"def",
"build",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_delete_dir",
"(",
")",
"try",
":",
"tar",
"=",
"tarfile",
".",
"open",
"(",
"self",
".",
"script",
")",
"except",
"Exception",
"as",
"err",
":",
"print",
"err",
"raise",
"SystemExit",
... | 45.5 | 15.425926 |
def setTau(self, vehID, tau):
"""setTau(string, double) -> None
Sets the driver's tau-parameter (reaction time or anticipation time depending on the car-following model) in s for this vehicle.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_TAU, vehID, tau) | [
"def",
"setTau",
"(",
"self",
",",
"vehID",
",",
"tau",
")",
":",
"self",
".",
"_connection",
".",
"_sendDoubleCmd",
"(",
"tc",
".",
"CMD_SET_VEHICLE_VARIABLE",
",",
"tc",
".",
"VAR_TAU",
",",
"vehID",
",",
"tau",
")"
] | 45.857143 | 24.428571 |
def connect_by_uri(uri):
"""General URI syntax:
postgresql://user:passwd@host:port/db
NOTE: the authority and the path parts of the URI have precedence
over the query part, if an argument is given in both.
conv,quote_conv,cursorclass
are not (yet?) allowed as complex Python objects are needed, hard to
transmit within an URI...
"""
puri = urisup.uri_help_split(uri)
#params = __dict_from_query(puri[QUERY])
params = {}
if puri[AUTHORITY]:
user, passwd, host, port = puri[AUTHORITY]
if user:
params['user'] = user
if passwd:
params['password'] = passwd
if host:
params['host'] = host
if port:
params['port'] = port
if puri[PATH]:
params['database'] = puri[PATH]
if params['database'] and params['database'][0] == '/':
params['database'] = params['database'][1:]
#__apply_types(params, __typemap)
return psycopg2.connect(**params) | [
"def",
"connect_by_uri",
"(",
"uri",
")",
":",
"puri",
"=",
"urisup",
".",
"uri_help_split",
"(",
"uri",
")",
"#params = __dict_from_query(puri[QUERY])",
"params",
"=",
"{",
"}",
"if",
"puri",
"[",
"AUTHORITY",
"]",
":",
"user",
",",
"passwd",
",",
"host",
... | 28.882353 | 17.441176 |
def finish_build(verbose=True):
'''finish_build will finish the build by way of sending the log to the same bucket.
the params are loaded from the previous function that built the image, expected in
$HOME/params.pkl
:: note: this function is currently configured to work with Google Compute
Engine metadata api, and should (will) be customized if needed to work elsewhere
'''
# If we are building the image, this will not be set
go = get_build_metadata(key='dobuild')
if go == None:
sys.exit(0)
# Load metadata
passing_params = "/tmp/params.pkl"
params = pickle.load(open(passing_params,'rb'))
# Start the storage service, retrieve the bucket
storage_service = get_google_service()
bucket = get_bucket(storage_service,params['bucket_name'])
# If version isn't in params, build failed
version = 'error-%s' % str(uuid.uuid4())
if 'version' in params:
version = params['version']
trailing_path = "%s/%s" %(params['commit'], version)
image_path = get_image_path(params['repo_url'], trailing_path)
# Upload the log file
params['log_file'] = upload_file(storage_service,
bucket=bucket,
bucket_path=image_path,
file_name=params['logfile'])
# Close up shop
send_build_close(params=params,
response_url=params['logging_url']) | [
"def",
"finish_build",
"(",
"verbose",
"=",
"True",
")",
":",
"# If we are building the image, this will not be set",
"go",
"=",
"get_build_metadata",
"(",
"key",
"=",
"'dobuild'",
")",
"if",
"go",
"==",
"None",
":",
"sys",
".",
"exit",
"(",
"0",
")",
"# Load ... | 40.222222 | 20.833333 |
def write_results(filename,config,srcfile,samples):
""" Package everything nicely """
results = createResults(config,srcfile,samples=samples)
results.write(filename) | [
"def",
"write_results",
"(",
"filename",
",",
"config",
",",
"srcfile",
",",
"samples",
")",
":",
"results",
"=",
"createResults",
"(",
"config",
",",
"srcfile",
",",
"samples",
"=",
"samples",
")",
"results",
".",
"write",
"(",
"filename",
")"
] | 43.75 | 10.75 |
def get_short_name(self):
"""
Returns the short type name of this X.509 extension.
The result is a byte string such as :py:const:`b"basicConstraints"`.
:return: The short type name.
:rtype: :py:data:`bytes`
.. versionadded:: 0.12
"""
obj = _lib.X509_EXTENSION_get_object(self._extension)
nid = _lib.OBJ_obj2nid(obj)
return _ffi.string(_lib.OBJ_nid2sn(nid)) | [
"def",
"get_short_name",
"(",
"self",
")",
":",
"obj",
"=",
"_lib",
".",
"X509_EXTENSION_get_object",
"(",
"self",
".",
"_extension",
")",
"nid",
"=",
"_lib",
".",
"OBJ_obj2nid",
"(",
"obj",
")",
"return",
"_ffi",
".",
"string",
"(",
"_lib",
".",
"OBJ_ni... | 30.428571 | 17.571429 |
def write_close(self, code=None):
'''Write a close ``frame`` with ``code``.
'''
return self.write(self.parser.close(code), opcode=0x8, encode=False) | [
"def",
"write_close",
"(",
"self",
",",
"code",
"=",
"None",
")",
":",
"return",
"self",
".",
"write",
"(",
"self",
".",
"parser",
".",
"close",
"(",
"code",
")",
",",
"opcode",
"=",
"0x8",
",",
"encode",
"=",
"False",
")"
] | 42.25 | 20.25 |
def kelly_kapowski(s, g, w, its=50, r=0.025, m=1.5, **kwargs):
"""
Compute cortical thickness using the DiReCT algorithm.
Diffeomorphic registration-based cortical thickness based on probabilistic
segmentation of an image. This is an optimization algorithm.
Arguments
---------
s : ANTsimage
segmentation image
g : ANTsImage
gray matter probability image
w : ANTsImage
white matter probability image
its : integer
convergence params - controls iterations
r : scalar
gradient descent update parameter
m : scalar
gradient field smoothing parameter
kwargs : keyword arguments
anything else, see KellyKapowski help in ANTs
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> img = ants.image_read( ants.get_ants_data('r16') ,2)
>>> img = ants.resample_image(img, (64,64),1,0)
>>> mask = ants.get_mask( img )
>>> segs = ants.kmeans_segmentation( img, k=3, kmask = mask)
>>> thick = ants.kelly_kapowski(s=segs['segmentation'], g=segs['probabilityimages'][1],
w=segs['probabilityimages'][2], its=45,
r=0.5, m=1)
"""
if isinstance(s, iio.ANTsImage):
s = s.clone('unsigned int')
d = s.dimension
outimg = g.clone()
kellargs = {'d': d,
's': s,
'g': g,
'w': w,
'c': its,
'r': r,
'm': m,
'o': outimg}
for k, v in kwargs.items():
kellargs[k] = v
processed_kellargs = utils._int_antsProcessArguments(kellargs)
libfn = utils.get_lib_fn('KellyKapowski')
libfn(processed_kellargs)
return outimg | [
"def",
"kelly_kapowski",
"(",
"s",
",",
"g",
",",
"w",
",",
"its",
"=",
"50",
",",
"r",
"=",
"0.025",
",",
"m",
"=",
"1.5",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"iio",
".",
"ANTsImage",
")",
":",
"s",
"=",
"... | 26.268657 | 21.910448 |
def MTF50(self, MTFx,MTFy):
'''
return object resolution as [line pairs/mm]
where MTF=50%
see http://www.imatest.com/docs/sharpness/
'''
if self.mtf_x is None:
self.MTF()
f = UnivariateSpline(self.mtf_x, self.mtf_y-0.5)
return f.roots()[0] | [
"def",
"MTF50",
"(",
"self",
",",
"MTFx",
",",
"MTFy",
")",
":",
"if",
"self",
".",
"mtf_x",
"is",
"None",
":",
"self",
".",
"MTF",
"(",
")",
"f",
"=",
"UnivariateSpline",
"(",
"self",
".",
"mtf_x",
",",
"self",
".",
"mtf_y",
"-",
"0.5",
")",
"... | 32.9 | 16.5 |
def UpdateBudget(self, client_customer_id, budget_id, micro_amount,
delivery_method):
"""Update a Budget with the given budgetId.
Args:
client_customer_id: str Client Customer Id used to update Budget.
budget_id: str Id of the budget to be updated.
micro_amount: str New value for the microAmount field.
delivery_method: str New value for the deliveryMethod field.
"""
self.client.SetClientCustomerId(client_customer_id)
operations = [{
'operator': 'SET',
'operand': {
'budgetId': budget_id,
'amount': {
'microAmount': micro_amount
},
'deliveryMethod': delivery_method
}
}]
self.client.GetService('BudgetService').mutate(operations) | [
"def",
"UpdateBudget",
"(",
"self",
",",
"client_customer_id",
",",
"budget_id",
",",
"micro_amount",
",",
"delivery_method",
")",
":",
"self",
".",
"client",
".",
"SetClientCustomerId",
"(",
"client_customer_id",
")",
"operations",
"=",
"[",
"{",
"'operator'",
... | 35.045455 | 18.409091 |
def compileGSUB(featureFile, glyphOrder):
""" Compile and return a GSUB table from `featureFile` (feaLib
FeatureFile), using the given `glyphOrder` (list of glyph names).
"""
font = ttLib.TTFont()
font.setGlyphOrder(glyphOrder)
addOpenTypeFeatures(font, featureFile, tables={"GSUB"})
return font.get("GSUB") | [
"def",
"compileGSUB",
"(",
"featureFile",
",",
"glyphOrder",
")",
":",
"font",
"=",
"ttLib",
".",
"TTFont",
"(",
")",
"font",
".",
"setGlyphOrder",
"(",
"glyphOrder",
")",
"addOpenTypeFeatures",
"(",
"font",
",",
"featureFile",
",",
"tables",
"=",
"{",
"\"... | 41 | 10.375 |
def atlas_zonefile_push_enqueue( zonefile_hash, name, txid, zonefile_data, zonefile_queue=None, con=None, path=None ):
"""
Enqueue the given zonefile into our "push" queue,
from which it will be replicated to storage and sent
out to other peers who don't have it.
Return True if we enqueued it
Return False if not
"""
res = False
bits = atlasdb_get_zonefile_bits( zonefile_hash, path=path, con=con )
if len(bits) == 0:
# invalid hash
return
with AtlasZonefileQueueLocked(zonefile_queue) as zfq:
if len(zfq) < MAX_QUEUED_ZONEFILES:
zfdata = {
'zonefile_hash': zonefile_hash,
'zonefile': zonefile_data,
'name': name,
'txid': txid
}
zfq.append( zfdata )
res = True
return res | [
"def",
"atlas_zonefile_push_enqueue",
"(",
"zonefile_hash",
",",
"name",
",",
"txid",
",",
"zonefile_data",
",",
"zonefile_queue",
"=",
"None",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"res",
"=",
"False",
"bits",
"=",
"atlasdb_get_zonefi... | 27.933333 | 21.266667 |
def make_ring_dicts(**kwargs):
"""Build and return the information about the Galprop rings
"""
library_yamlfile = kwargs.get('library', 'models/library.yaml')
gmm = kwargs.get('GalpropMapManager', GalpropMapManager(**kwargs))
if library_yamlfile is None or library_yamlfile == 'None':
return gmm
diffuse_comps = DiffuseModelManager.read_diffuse_component_yaml(library_yamlfile)
for diffuse_value in diffuse_comps.values():
if diffuse_value is None:
continue
if diffuse_value['model_type'] != 'galprop_rings':
continue
versions = diffuse_value['versions']
for version in versions:
gmm.make_ring_dict(version)
return gmm | [
"def",
"make_ring_dicts",
"(",
"*",
"*",
"kwargs",
")",
":",
"library_yamlfile",
"=",
"kwargs",
".",
"get",
"(",
"'library'",
",",
"'models/library.yaml'",
")",
"gmm",
"=",
"kwargs",
".",
"get",
"(",
"'GalpropMapManager'",
",",
"GalpropMapManager",
"(",
"*",
... | 41.764706 | 15.764706 |
def compile(self, name, migrate='', rollback='', num=None):
"""Create a migration."""
if num is None:
num = len(self.todo)
name = '{:03}_'.format(num + 1) + name
filename = name + '.py'
path = os.path.join(self.migrate_dir, filename)
with open(path, 'w') as f:
f.write(MIGRATE_TEMPLATE.format(migrate=migrate, rollback=rollback, name=filename))
return name | [
"def",
"compile",
"(",
"self",
",",
"name",
",",
"migrate",
"=",
"''",
",",
"rollback",
"=",
"''",
",",
"num",
"=",
"None",
")",
":",
"if",
"num",
"is",
"None",
":",
"num",
"=",
"len",
"(",
"self",
".",
"todo",
")",
"name",
"=",
"'{:03}_'",
"."... | 35.583333 | 19.666667 |
def long_to_bytes(lnum, padmultiple=1):
"""Packs the lnum (which must be convertable to a long) into a
byte string 0 padded to a multiple of padmultiple bytes in size. 0
means no padding whatsoever, so that packing 0 result in an empty
string. The resulting byte string is the big-endian two's
complement representation of the passed in long."""
# source: http://stackoverflow.com/a/14527004/1231454
if lnum == 0:
return b'\0' * padmultiple
elif lnum < 0:
raise ValueError("Can only convert non-negative numbers.")
s = hex(lnum)[2:]
s = s.rstrip('L')
if len(s) & 1:
s = '0' + s
s = binascii.unhexlify(s)
if (padmultiple != 1) and (padmultiple != 0):
filled_so_far = len(s) % padmultiple
if filled_so_far != 0:
s = b'\0' * (padmultiple - filled_so_far) + s
return s | [
"def",
"long_to_bytes",
"(",
"lnum",
",",
"padmultiple",
"=",
"1",
")",
":",
"# source: http://stackoverflow.com/a/14527004/1231454",
"if",
"lnum",
"==",
"0",
":",
"return",
"b'\\0'",
"*",
"padmultiple",
"elif",
"lnum",
"<",
"0",
":",
"raise",
"ValueError",
"(",... | 37.608696 | 18.478261 |
def search(cls, session, queries, out_type):
"""Search for a record given a domain.
Args:
session (requests.sessions.Session): Authenticated session.
queries (helpscout.models.Domain or iter): The queries for the
domain. If a ``Domain`` object is provided, it will simply be
returned. Otherwise, a ``Domain`` object will be generated
from the complex queries. In this case, the queries should
conform to the interface in
:func:`helpscout.domain.Domain.from_tuple`.
out_type (helpscout.BaseModel): The type of record to output. This
should be provided by child classes, by calling super.
Returns:
RequestPaginator(output_type=helpscout.BaseModel): Results
iterator of the ``out_type`` that is defined.
"""
cls._check_implements('search')
domain = cls.get_search_domain(queries)
return cls(
'/search/%s.json' % cls.__endpoint__,
data={'query': str(domain)},
session=session,
out_type=out_type,
) | [
"def",
"search",
"(",
"cls",
",",
"session",
",",
"queries",
",",
"out_type",
")",
":",
"cls",
".",
"_check_implements",
"(",
"'search'",
")",
"domain",
"=",
"cls",
".",
"get_search_domain",
"(",
"queries",
")",
"return",
"cls",
"(",
"'/search/%s.json'",
"... | 43.923077 | 20.653846 |
def Y(self, value):
""" sets the Y coordinate """
if isinstance(value, (int, float,
long, types.NoneType)):
self._y = value | [
"def",
"Y",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"(",
"int",
",",
"float",
",",
"long",
",",
"types",
".",
"NoneType",
")",
")",
":",
"self",
".",
"_y",
"=",
"value"
] | 35.4 | 9.6 |
def add_resourcegroupitems(scenario_id, items, scenario=None, **kwargs):
"""
Get all the items in a group, in a scenario.
"""
user_id = int(kwargs.get('user_id'))
if scenario is None:
scenario = _get_scenario(scenario_id, user_id)
_check_network_ownership(scenario.network_id, user_id)
newitems = []
for group_item in items:
group_item_i = _add_resourcegroupitem(group_item, scenario.id)
newitems.append(group_item_i)
db.DBSession.flush()
return newitems | [
"def",
"add_resourcegroupitems",
"(",
"scenario_id",
",",
"items",
",",
"scenario",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"user_id",
"=",
"int",
"(",
"kwargs",
".",
"get",
"(",
"'user_id'",
")",
")",
"if",
"scenario",
"is",
"None",
":",
"scen... | 25.45 | 21.85 |
def _adjusted_rand_index(reference_indices, estimated_indices):
"""Compute the Rand index, adjusted for change.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
ari : float
Adjusted Rand index
.. note:: Based on sklearn.metrics.cluster.adjusted_rand_score
"""
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0 or
(ref_classes.shape[0] == est_classes.shape[0] ==
len(reference_indices))):
return 1.0
contingency = _contingency_matrix(reference_indices, estimated_indices)
# Compute the ARI using the contingency data
sum_comb_c = sum(scipy.special.comb(n_c, 2, exact=1) for n_c in
contingency.sum(axis=1))
sum_comb_k = sum(scipy.special.comb(n_k, 2, exact=1) for n_k in
contingency.sum(axis=0))
sum_comb = sum((scipy.special.comb(n_ij, 2, exact=1) for n_ij in
contingency.flatten()))
prod_comb = (sum_comb_c * sum_comb_k)/float(scipy.special.comb(n_samples,
2))
mean_comb = (sum_comb_k + sum_comb_c)/2.
return (sum_comb - prod_comb)/(mean_comb - prod_comb) | [
"def",
"_adjusted_rand_index",
"(",
"reference_indices",
",",
"estimated_indices",
")",
":",
"n_samples",
"=",
"len",
"(",
"reference_indices",
")",
"ref_classes",
"=",
"np",
".",
"unique",
"(",
"reference_indices",
")",
"est_classes",
"=",
"np",
".",
"unique",
... | 38.045455 | 20.454545 |
def participants(self):
"""agents + computers (i.e. all non-observers)"""
ret = []
for p in self.players:
try:
if p.isComputer: ret.append(p)
if not p.isObserver: ret.append(p) # could cause an exception if player isn't a PlayerPreGame
except AttributeError: pass
return ret | [
"def",
"participants",
"(",
"self",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"p",
"in",
"self",
".",
"players",
":",
"try",
":",
"if",
"p",
".",
"isComputer",
":",
"ret",
".",
"append",
"(",
"p",
")",
"if",
"not",
"p",
".",
"isObserver",
":",
"re... | 39.777778 | 19.666667 |
def writerow(self, row):
"""Write a single row."""
json_text = json.dumps(row)
if isinstance(json_text, bytes):
json_text = json_text.decode('utf-8')
self._out.write(json_text)
self._out.write(u'\n') | [
"def",
"writerow",
"(",
"self",
",",
"row",
")",
":",
"json_text",
"=",
"json",
".",
"dumps",
"(",
"row",
")",
"if",
"isinstance",
"(",
"json_text",
",",
"bytes",
")",
":",
"json_text",
"=",
"json_text",
".",
"decode",
"(",
"'utf-8'",
")",
"self",
".... | 35 | 6.571429 |
def find_module(self, fullname, path=None):
"""
Tell if the module to load can be loaded by
the load_module function, ie: if it is a ``pygal.maps.*``
module.
"""
if fullname.startswith('pygal.maps.') and hasattr(
maps, fullname.split('.')[2]):
return self
return None | [
"def",
"find_module",
"(",
"self",
",",
"fullname",
",",
"path",
"=",
"None",
")",
":",
"if",
"fullname",
".",
"startswith",
"(",
"'pygal.maps.'",
")",
"and",
"hasattr",
"(",
"maps",
",",
"fullname",
".",
"split",
"(",
"'.'",
")",
"[",
"2",
"]",
")",... | 34.2 | 12.6 |
def _se_all(self):
"""Standard errors (SE) for all parameters, including the intercept."""
x = np.atleast_2d(self.x)
err = np.atleast_1d(self.ms_err)
se = np.sqrt(np.diagonal(np.linalg.inv(x.T @ x)) * err[:, None])
return np.squeeze(se) | [
"def",
"_se_all",
"(",
"self",
")",
":",
"x",
"=",
"np",
".",
"atleast_2d",
"(",
"self",
".",
"x",
")",
"err",
"=",
"np",
".",
"atleast_1d",
"(",
"self",
".",
"ms_err",
")",
"se",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"diagonal",
"(",
"np",
... | 46 | 12 |
def satellite(self,stellar_mass,distance_modulus,mc_source_id=1,seed=None,**kwargs):
"""
Create a simulated satellite. Returns a catalog object.
"""
if seed is not None: np.random.seed(seed)
isochrone = kwargs.pop('isochrone',self.isochrone)
kernel = kwargs.pop('kernel',self.kernel)
for k,v in kwargs.items():
if k in kernel.params.keys(): setattr(kernel,k,v)
mag_1, mag_2 = isochrone.simulate(stellar_mass, distance_modulus)
lon, lat = kernel.simulate(len(mag_1))
logger.info("Simulating %i satellite stars..."%len(mag_1))
pix = ang2pix(self.config['coords']['nside_pixel'], lon, lat)
# There is probably a better way to do this step without creating the full HEALPix map
mask = -1. * np.ones(hp.nside2npix(self.config['coords']['nside_pixel']))
mask[self.roi.pixels] = self.mask.mask_1.mask_roi_sparse
mag_lim_1 = mask[pix]
mask = -1. * np.ones(hp.nside2npix(self.config['coords']['nside_pixel']))
mask[self.roi.pixels] = self.mask.mask_2.mask_roi_sparse
mag_lim_2 = mask[pix]
mag_err_1 = self.photo_err_1(mag_lim_1 - mag_1)
mag_err_2 = self.photo_err_2(mag_lim_2 - mag_2)
# Randomize magnitudes by their errors
mag_obs_1 = mag_1+np.random.normal(size=len(mag_1))*mag_err_1
mag_obs_2 = mag_2+np.random.normal(size=len(mag_2))*mag_err_2
#mag_obs_1 = mag_1
#mag_obs_2 = mag_2
#select = np.logical_and(mag_obs_1 < mag_lim_1, mag_obs_2 < mag_lim_2)
select = (mag_lim_1>mag_obs_1)&(mag_lim_2>mag_obs_2)
# Make sure objects lie within the original cmd (should also be done later...)
#select &= (ugali.utils.binning.take2D(self.mask.solid_angle_cmd, mag_obs_1 - mag_obs_2, mag_obs_1,self.roi.bins_color, self.roi.bins_mag) > 0)
#return mag_1_obs[cut], mag_2_obs[cut], lon[cut], lat[cut]
logger.info("Clipping %i simulated satellite stars..."%(~select).sum())
mc_source_id = mc_source_id * np.ones(len(mag_1))
hdu = ugali.observation.catalog.makeHDU(self.config,mag_obs_1[select],mag_err_1[select],
mag_obs_2[select],mag_err_2[select],
lon[select],lat[select],mc_source_id[select])
catalog = ugali.observation.catalog.Catalog(self.config, data=hdu.data)
return catalog | [
"def",
"satellite",
"(",
"self",
",",
"stellar_mass",
",",
"distance_modulus",
",",
"mc_source_id",
"=",
"1",
",",
"seed",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"seed",
"is",
"not",
"None",
":",
"np",
".",
"random",
".",
"seed",
"(",
... | 48.6 | 29.96 |
def request_sensor_list(self, req, msg):
"""Sensor list"""
if msg.arguments:
name = (msg.arguments[0],)
keys = (name, )
if name not in self.fake_sensor_infos:
return ("fail", "Unknown sensor name.")
else:
keys = self.fake_sensor_infos.keys()
num_informs = 0
for sensor_name in keys:
infos = self.fake_sensor_infos[sensor_name]
num_informs += 1
req.inform(sensor_name, *infos)
return ('ok', num_informs) | [
"def",
"request_sensor_list",
"(",
"self",
",",
"req",
",",
"msg",
")",
":",
"if",
"msg",
".",
"arguments",
":",
"name",
"=",
"(",
"msg",
".",
"arguments",
"[",
"0",
"]",
",",
")",
"keys",
"=",
"(",
"name",
",",
")",
"if",
"name",
"not",
"in",
... | 31.529412 | 13.588235 |
def get_nowait(self, name, default=_MISSING, autoremove=False):
"""Get the value of a key if it is already set.
This method allows you to check if a key has already been set
without blocking. If the key has not been set you will get the
default value you pass in or KeyError() if no default is passed.
When this method returns the key is automatically removed unless
you pass ``autoremove=False``.
This method is not a coroutine and does not block.
Args:
name (str): The name of the key to wait on.
default (object): The default value to return if the key
has not yet been set. Defaults to raising KeyError().
autoremove (bool): Whether to automatically remove the
key when get() returns.
Returns:
object: Whatever was set in the key by :meth:`set`.
"""
self._ensure_declared(name)
try:
future = self._data[name]
if future.done():
return future.result()
if default is _MISSING:
raise KeyError("Key {} has not been assigned a value and no default given".format(name))
return default
finally:
if autoremove:
self._data[name].cancel()
del self._data[name] | [
"def",
"get_nowait",
"(",
"self",
",",
"name",
",",
"default",
"=",
"_MISSING",
",",
"autoremove",
"=",
"False",
")",
":",
"self",
".",
"_ensure_declared",
"(",
"name",
")",
"try",
":",
"future",
"=",
"self",
".",
"_data",
"[",
"name",
"]",
"if",
"fu... | 35.105263 | 23.052632 |
def start_environment(
self, user_name, environment_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Starts an environment by starting all resources inside the environment.
This operation can take a while to complete.
:param user_name: The name of the user.
:type user_name: str
:param environment_id: The resourceId of the environment
:type environment_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._start_environment_initial(
user_name=user_name,
environment_id=environment_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) | [
"def",
"start_environment",
"(",
"self",
",",
"user_name",
",",
"environment_id",
",",
"custom_headers",
"=",
"None",
",",
"raw",
"=",
"False",
",",
"polling",
"=",
"True",
",",
"*",
"*",
"operation_config",
")",
":",
"raw_result",
"=",
"self",
".",
"_star... | 49.1 | 21.175 |
def _findJobStoreForUrl(self, url, export=False):
"""
Returns the AbstractJobStore subclass that supports the given URL.
:param urlparse.ParseResult url: The given URL
:param bool export: The URL for
:rtype: toil.jobStore.AbstractJobStore
"""
for jobStoreCls in self._jobStoreClasses:
if jobStoreCls._supportsUrl(url, export):
return jobStoreCls
raise RuntimeError("No job store implementation supports %sporting for URL '%s'" %
('ex' if export else 'im', url.geturl())) | [
"def",
"_findJobStoreForUrl",
"(",
"self",
",",
"url",
",",
"export",
"=",
"False",
")",
":",
"for",
"jobStoreCls",
"in",
"self",
".",
"_jobStoreClasses",
":",
"if",
"jobStoreCls",
".",
"_supportsUrl",
"(",
"url",
",",
"export",
")",
":",
"return",
"jobSto... | 44.461538 | 16.153846 |
def make_coord_dict(coord):
"""helper function to make a dict from a coordinate for logging"""
return dict(
z=int_if_exact(coord.zoom),
x=int_if_exact(coord.column),
y=int_if_exact(coord.row),
) | [
"def",
"make_coord_dict",
"(",
"coord",
")",
":",
"return",
"dict",
"(",
"z",
"=",
"int_if_exact",
"(",
"coord",
".",
"zoom",
")",
",",
"x",
"=",
"int_if_exact",
"(",
"coord",
".",
"column",
")",
",",
"y",
"=",
"int_if_exact",
"(",
"coord",
".",
"row... | 32 | 12.285714 |
def url_to_filename(url):
"""
Safely translate url to relative filename
Args:
url (str): A target url string
Returns:
str
"""
# remove leading/trailing slash
if url.startswith('/'):
url = url[1:]
if url.endswith('/'):
url = url[:-1]
# remove pardir symbols to prevent unwilling filesystem access
url = remove_pardir_symbols(url)
# replace dots to underscore in filename part
url = replace_dots_to_underscores_at_last(url)
return url | [
"def",
"url_to_filename",
"(",
"url",
")",
":",
"# remove leading/trailing slash",
"if",
"url",
".",
"startswith",
"(",
"'/'",
")",
":",
"url",
"=",
"url",
"[",
"1",
":",
"]",
"if",
"url",
".",
"endswith",
"(",
"'/'",
")",
":",
"url",
"=",
"url",
"["... | 24.95 | 16.75 |
def after_output(command_status):
"""
Shell sequence to be run after the command output.
The ``command_status`` should be in the range 0-255.
"""
if command_status not in range(256):
raise ValueError("command_status must be an integer in the range 0-255")
sys.stdout.write(AFTER_OUTPUT.format(command_status=command_status))
# Flushing is important as the command timing feature maybe based on
# AFTER_OUTPUT in the future.
sys.stdout.flush() | [
"def",
"after_output",
"(",
"command_status",
")",
":",
"if",
"command_status",
"not",
"in",
"range",
"(",
"256",
")",
":",
"raise",
"ValueError",
"(",
"\"command_status must be an integer in the range 0-255\"",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"AFTER... | 39.666667 | 17.166667 |
def get_idp_choices():
"""
Get a list of identity providers choices for enterprise customer.
Return:
A list of choices of all identity providers, None if it can not get any available identity provider.
"""
try:
from third_party_auth.provider import Registry # pylint: disable=redefined-outer-name
except ImportError as exception:
LOGGER.warning("Could not import Registry from third_party_auth.provider")
LOGGER.warning(exception)
Registry = None # pylint: disable=redefined-outer-name
first = [("", "-" * 7)]
if Registry:
return first + [(idp.provider_id, idp.name) for idp in Registry.enabled()]
return None | [
"def",
"get_idp_choices",
"(",
")",
":",
"try",
":",
"from",
"third_party_auth",
".",
"provider",
"import",
"Registry",
"# pylint: disable=redefined-outer-name",
"except",
"ImportError",
"as",
"exception",
":",
"LOGGER",
".",
"warning",
"(",
"\"Could not import Registry... | 37.833333 | 27.277778 |
def cleanup(self):
"""
Stops any running entities in the prefix and uninitializes it, usually
you want to do this if you are going to remove the prefix afterwards
Returns:
None
"""
with LogTask('Stop prefix'):
self.stop()
with LogTask("Tag prefix as uninitialized"):
os.unlink(self.paths.prefix_lagofile()) | [
"def",
"cleanup",
"(",
"self",
")",
":",
"with",
"LogTask",
"(",
"'Stop prefix'",
")",
":",
"self",
".",
"stop",
"(",
")",
"with",
"LogTask",
"(",
"\"Tag prefix as uninitialized\"",
")",
":",
"os",
".",
"unlink",
"(",
"self",
".",
"paths",
".",
"prefix_l... | 32.333333 | 19 |
def disk_cache(basename, directory, method=False):
"""
Function decorator for caching pickleable return values on disk. Uses a
hash computed from the function arguments for invalidation. If 'method',
skip the first argument, usually being self or cls. The cache filepath is
'directory/basename-hash.pickle'.
"""
directory = os.path.expanduser(directory)
ensure_directory(directory)
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
key = (tuple(args), tuple(kwargs.items()))
# Don't use self or cls for the invalidation hash.
if method and key:
key = key[1:]
filename = '{}-{}.pickle'.format(basename, hash(key))
filepath = os.path.join(directory, filename)
if os.path.isfile(filepath):
with open(filepath, 'rb') as handle:
return pickle.load(handle)
result = func(*args, **kwargs)
with open(filepath, 'wb') as handle:
pickle.dump(result, handle)
return result
return wrapped
return wrapper | [
"def",
"disk_cache",
"(",
"basename",
",",
"directory",
",",
"method",
"=",
"False",
")",
":",
"directory",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"directory",
")",
"ensure_directory",
"(",
"directory",
")",
"def",
"wrapper",
"(",
"func",
")",
"... | 38.827586 | 14.827586 |
def rot3(theta):
"""
Args:
theta (float): Angle in radians
Return:
Rotation matrix of angle theta around the Z-axis
"""
return np.array([
[np.cos(theta), np.sin(theta), 0],
[-np.sin(theta), np.cos(theta), 0],
[0, 0, 1]
]) | [
"def",
"rot3",
"(",
"theta",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"[",
"np",
".",
"cos",
"(",
"theta",
")",
",",
"np",
".",
"sin",
"(",
"theta",
")",
",",
"0",
"]",
",",
"[",
"-",
"np",
".",
"sin",
"(",
"theta",
")",
",",
"np... | 22.833333 | 15.166667 |
def save(self, out, kind=None, **kw):
"""\
Saves the sequence of QR Code to `out`.
If `out` is a filename, this method modifies the filename and adds
``<Number of QR Codes>-<Current QR Code>`` to it.
``structured-append.svg`` becomes (if the sequence contains two QR Codes):
``structured-append-02-01.svg`` and ``structured-append-02-02.svg``
Please note that using a file or file-like object may result into an
invalid serialization format since all QR Codes are written to the same
output.
See :py:meth:`QRCode.save()` for a detailed enumeration of options.
"""
m = len(self)
def prepare_fn_noop(o, n):
"""\
Function to enumerate file names, does nothing by default
"""
return o
def prepare_filename(o, n):
"""\
Function to enumerate file names.
"""
return o.format(m, n)
prepare_fn = prepare_fn_noop
if m > 1 and isinstance(out, str_type):
dot_idx = out.rfind('.')
if dot_idx > -1:
out = out[:dot_idx] + '-{0:02d}-{1:02d}' + out[dot_idx:]
prepare_fn = prepare_filename
for n, qrcode in enumerate(self, start=1):
qrcode.save(prepare_fn(out, n), kind=kind, **kw) | [
"def",
"save",
"(",
"self",
",",
"out",
",",
"kind",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"m",
"=",
"len",
"(",
"self",
")",
"def",
"prepare_fn_noop",
"(",
"o",
",",
"n",
")",
":",
"\"\"\"\\\n Function to enumerate file names, does nothin... | 36.027027 | 18.864865 |
def Parse(self, stat, file_object, knowledge_base):
"""Parse the History file."""
_, _ = stat, knowledge_base
# TODO(user): Convert this to use the far more intelligent plaso parser.
ff = Firefox3History(file_object)
for timestamp, unused_entry_type, url, title in ff.Parse():
yield rdf_webhistory.BrowserHistoryItem(
url=url,
domain=urlparse.urlparse(url).netloc,
access_time=timestamp,
program_name="Firefox",
source_path=file_object.Path(),
title=title) | [
"def",
"Parse",
"(",
"self",
",",
"stat",
",",
"file_object",
",",
"knowledge_base",
")",
":",
"_",
",",
"_",
"=",
"stat",
",",
"knowledge_base",
"# TODO(user): Convert this to use the far more intelligent plaso parser.",
"ff",
"=",
"Firefox3History",
"(",
"file_objec... | 40.769231 | 11.615385 |
def is_nan(self):
"""Asserts that val is real number and NaN (not a number)."""
self._validate_number()
self._validate_real()
if not math.isnan(self.val):
self._err('Expected <%s> to be <NaN>, but was not.' % self.val)
return self | [
"def",
"is_nan",
"(",
"self",
")",
":",
"self",
".",
"_validate_number",
"(",
")",
"self",
".",
"_validate_real",
"(",
")",
"if",
"not",
"math",
".",
"isnan",
"(",
"self",
".",
"val",
")",
":",
"self",
".",
"_err",
"(",
"'Expected <%s> to be <NaN>, but w... | 39.428571 | 14.714286 |
def _hijack_gtk(self):
"""Hijack a few key functions in GTK for IPython integration.
Modifies pyGTK's main and main_quit with a dummy so user code does not
block IPython. This allows us to use %run to run arbitrary pygtk
scripts from a long-lived IPython session, and when they attempt to
start or stop
Returns
-------
The original functions that have been hijacked:
- gtk.main
- gtk.main_quit
"""
def dummy(*args, **kw):
pass
# save and trap main and main_quit from gtk
orig_main, gtk.main = gtk.main, dummy
orig_main_quit, gtk.main_quit = gtk.main_quit, dummy
return orig_main, orig_main_quit | [
"def",
"_hijack_gtk",
"(",
"self",
")",
":",
"def",
"dummy",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"pass",
"# save and trap main and main_quit from gtk",
"orig_main",
",",
"gtk",
".",
"main",
"=",
"gtk",
".",
"main",
",",
"dummy",
"orig_main_qui... | 35.9 | 19.8 |
def encode_scaled(data, size, version=0, level=QR_ECLEVEL_L, hint=QR_MODE_8,
case_sensitive=True):
"""Creates a QR-code from string data, resized to the specified dimensions.
Args:
data: string: The data to encode in a QR-code. If a unicode string is
supplied, it will be encoded in UTF-8.
size: int: Output size. If this is not an exact multiple of the QR-code's
dimensions, padding will be added. If this is smaller than the
QR-code's dimensions, it is ignored.
version: int: The minimum version to use. If set to 0, the library picks
the smallest version that the data fits in.
level: int: Error correction level. Defaults to 'L'.
hint: int: The type of data to encode. Either QR_MODE_8 or QR_MODE_KANJI.
case_sensitive: bool: Should string data be encoded case-preserving?
Returns:
A (version, size, image) tuple, where image is a size*size PIL image of
the QR-code.
"""
version, src_size, im = encode(data, version, level, hint, case_sensitive)
if size < src_size:
size = src_size
qr_size = (size / src_size) * src_size
im = im.resize((qr_size, qr_size), Image.NEAREST)
pad = (size - qr_size) / 2
ret = Image.new("L", (size, size), 255)
ret.paste(im, (pad, pad))
return (version, size, ret) | [
"def",
"encode_scaled",
"(",
"data",
",",
"size",
",",
"version",
"=",
"0",
",",
"level",
"=",
"QR_ECLEVEL_L",
",",
"hint",
"=",
"QR_MODE_8",
",",
"case_sensitive",
"=",
"True",
")",
":",
"version",
",",
"src_size",
",",
"im",
"=",
"encode",
"(",
"data... | 45.965517 | 21.206897 |
def all_consumed_offsets(self):
"""Returns consumed offsets as {TopicPartition: OffsetAndMetadata}"""
all_consumed = {}
for partition, state in six.iteritems(self.assignment):
if state.has_valid_position:
all_consumed[partition] = OffsetAndMetadata(state.position, '')
return all_consumed | [
"def",
"all_consumed_offsets",
"(",
"self",
")",
":",
"all_consumed",
"=",
"{",
"}",
"for",
"partition",
",",
"state",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"assignment",
")",
":",
"if",
"state",
".",
"has_valid_position",
":",
"all_consumed",
"... | 48.857143 | 14.142857 |
def _init(self, parser):
"""Initialize/Build the ``argparse.ArgumentParser`` and subparsers.
This internal version of ``init`` is used to ensure that all
subcommands have a properly initialized parser.
Args
----
parser : argparse.ArgumentParser
The parser for this command.
"""
assert isinstance(parser, argparse.ArgumentParser)
self._init_parser(parser)
self._attach_arguments()
self._attach_subcommands()
self.initialized = True | [
"def",
"_init",
"(",
"self",
",",
"parser",
")",
":",
"assert",
"isinstance",
"(",
"parser",
",",
"argparse",
".",
"ArgumentParser",
")",
"self",
".",
"_init_parser",
"(",
"parser",
")",
"self",
".",
"_attach_arguments",
"(",
")",
"self",
".",
"_attach_sub... | 29.166667 | 17.944444 |
def posterior_predictive_to_xarray(self):
"""Convert posterior_predictive samples to xarray."""
posterior = self.posterior
posterior_predictive = self.posterior_predictive
data = get_draws(posterior, variables=posterior_predictive)
return dict_to_dataset(data, library=self.pystan, coords=self.coords, dims=self.dims) | [
"def",
"posterior_predictive_to_xarray",
"(",
"self",
")",
":",
"posterior",
"=",
"self",
".",
"posterior",
"posterior_predictive",
"=",
"self",
".",
"posterior_predictive",
"data",
"=",
"get_draws",
"(",
"posterior",
",",
"variables",
"=",
"posterior_predictive",
"... | 59.5 | 17.5 |
def get(self, txn_id):
"""Returns the TransactionReceipt
Args:
txn_id (str): the id of the transaction for which the receipt
should be retrieved.
Returns:
TransactionReceipt: The receipt for the given transaction id.
Raises:
KeyError: if the transaction id is unknown.
"""
if txn_id not in self._receipt_db:
raise KeyError('Unknown transaction id {}'.format(txn_id))
txn_receipt_bytes = self._receipt_db[txn_id]
txn_receipt = TransactionReceipt()
txn_receipt.ParseFromString(txn_receipt_bytes)
return txn_receipt | [
"def",
"get",
"(",
"self",
",",
"txn_id",
")",
":",
"if",
"txn_id",
"not",
"in",
"self",
".",
"_receipt_db",
":",
"raise",
"KeyError",
"(",
"'Unknown transaction id {}'",
".",
"format",
"(",
"txn_id",
")",
")",
"txn_receipt_bytes",
"=",
"self",
".",
"_rece... | 32.05 | 20.65 |
def get_log_records_access(f):
"""Access to getLogRecords() controlled by settings.PUBLIC_LOG_RECORDS."""
@functools.wraps(f)
def wrapper(request, *args, **kwargs):
if not django.conf.settings.PUBLIC_LOG_RECORDS:
trusted(request)
return f(request, *args, **kwargs)
return wrapper | [
"def",
"get_log_records_access",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"django",
".",
"conf",
".",
"settings",
".",
"PU... | 31.6 | 16 |
def record(self):
# type: () -> bytes
'''
A method to generate the string representing this Volume Descriptor.
Parameters:
None.
Returns:
A string representing this Volume Descriptor.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Volume Descriptor is not yet initialized')
vol_mod_date = dates.VolumeDescriptorDate()
vol_mod_date.new(time.time())
return struct.pack(self.FMT,
self._vd_type,
b'CD001',
self.version,
self.flags,
self.system_identifier,
self.volume_identifier,
0,
self.space_size,
utils.swab_32bit(self.space_size),
self.escape_sequences,
self.set_size,
utils.swab_16bit(self.set_size),
self.seqnum,
utils.swab_16bit(self.seqnum),
self.log_block_size,
utils.swab_16bit(self.log_block_size),
self.path_tbl_size,
utils.swab_32bit(self.path_tbl_size),
self.path_table_location_le,
self.optional_path_table_location_le,
utils.swab_32bit(self.path_table_location_be),
self.optional_path_table_location_be,
self.root_dir_record.record(),
self.volume_set_identifier,
self.publisher_identifier.record(),
self.preparer_identifier.record(),
self.application_identifier.record(),
self.copyright_file_identifier,
self.abstract_file_identifier,
self.bibliographic_file_identifier,
self.volume_creation_date.record(),
vol_mod_date.record(),
self.volume_expiration_date.record(),
self.volume_effective_date.record(),
self.file_structure_version, 0, self.application_use,
b'\x00' * 653) | [
"def",
"record",
"(",
"self",
")",
":",
"# type: () -> bytes",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'This Volume Descriptor is not yet initialized'",
")",
"vol_mod_date",
"=",
"dates",
".",
"Vol... | 46.245283 | 18.207547 |
def open_url_in_browser(url, browsername=None, fallback=False):
r"""
Opens a url in the specified or default browser
Args:
url (str): web url
CommandLine:
python -m utool.util_grabdata --test-open_url_in_browser
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_grabdata import * # NOQA
>>> url = 'http://www.jrsoftware.org/isdl.php'
>>> open_url_in_browser(url, 'chrome')
"""
import webbrowser
print('[utool] Opening url=%r in browser' % (url,))
if browsername is None:
browser = webbrowser.open(url)
else:
browser = get_prefered_browser(pref_list=[browsername], fallback=fallback)
return browser.open(url) | [
"def",
"open_url_in_browser",
"(",
"url",
",",
"browsername",
"=",
"None",
",",
"fallback",
"=",
"False",
")",
":",
"import",
"webbrowser",
"print",
"(",
"'[utool] Opening url=%r in browser'",
"%",
"(",
"url",
",",
")",
")",
"if",
"browsername",
"is",
"None",
... | 29.916667 | 19.708333 |
def strip_and_uncomment(asm_lines):
"""Strip whitespaces and comments from asm lines."""
asm_stripped = []
for line in asm_lines:
# Strip comments and whitespaces
asm_stripped.append(line.split('#')[0].strip())
return asm_stripped | [
"def",
"strip_and_uncomment",
"(",
"asm_lines",
")",
":",
"asm_stripped",
"=",
"[",
"]",
"for",
"line",
"in",
"asm_lines",
":",
"# Strip comments and whitespaces",
"asm_stripped",
".",
"append",
"(",
"line",
".",
"split",
"(",
"'#'",
")",
"[",
"0",
"]",
".",... | 36.571429 | 10 |
def login(self, **kwargs):
"""登录"""
payload = {
'username': self.username,
'password': self.password,
}
headers = kwargs.setdefault('headers', {})
headers.setdefault(
'Referer',
'https://www.shanbay.com/web/account/login'
)
url = 'https://www.shanbay.com/api/v1/account/login/web/'
response = self.request(url, 'put', json=payload, **kwargs)
r_json = response.json()
return r_json['status_code'] == 0 | [
"def",
"login",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"payload",
"=",
"{",
"'username'",
":",
"self",
".",
"username",
",",
"'password'",
":",
"self",
".",
"password",
",",
"}",
"headers",
"=",
"kwargs",
".",
"setdefault",
"(",
"'headers'",
... | 34.266667 | 14.533333 |
def _has_valid_type_annotation(self, tokens, i):
"""Extended check of PEP-484 type hint presence"""
if not self._inside_brackets("("):
return False
# token_info
# type string start end line
# 0 1 2 3 4
bracket_level = 0
for token in tokens[i - 1 :: -1]:
if token[1] == ":":
return True
if token[1] == "(":
return False
if token[1] == "]":
bracket_level += 1
elif token[1] == "[":
bracket_level -= 1
elif token[1] == ",":
if not bracket_level:
return False
elif token[1] in (".", "..."):
continue
elif token[0] not in (tokenize.NAME, tokenize.STRING, tokenize.NL):
return False
return False | [
"def",
"_has_valid_type_annotation",
"(",
"self",
",",
"tokens",
",",
"i",
")",
":",
"if",
"not",
"self",
".",
"_inside_brackets",
"(",
"\"(\"",
")",
":",
"return",
"False",
"# token_info",
"# type string start end line",
"# 0 1 2 3 4",
"bracket_level"... | 34.92 | 9.96 |
def _api_key_patch_replace(conn, apiKey, path, value):
'''
the replace patch operation on an ApiKey resource
'''
response = conn.update_api_key(apiKey=apiKey,
patchOperations=[{'op': 'replace', 'path': path, 'value': value}])
return response | [
"def",
"_api_key_patch_replace",
"(",
"conn",
",",
"apiKey",
",",
"path",
",",
"value",
")",
":",
"response",
"=",
"conn",
".",
"update_api_key",
"(",
"apiKey",
"=",
"apiKey",
",",
"patchOperations",
"=",
"[",
"{",
"'op'",
":",
"'replace'",
",",
"'path'",
... | 41.428571 | 26.285714 |
def run(self, files, working_area):
"""
Run checks concurrently.
Returns a list of CheckResults ordered by declaration order of the checks in the imported module
"""
# Ensure that dictionary is ordered by check declaration order (via self.check_names)
# NOTE: Requires CPython 3.6. If we need to support older versions of Python, replace with OrderedDict.
results = {name: None for name in self.check_names}
checks_root = working_area.parent
with futures.ProcessPoolExecutor() as executor:
# Start all checks that have no dependencies
not_done = set(executor.submit(run_check(name, self.checks_spec, checks_root))
for name, _ in self.child_map[None])
not_passed = []
while not_done:
done, not_done = futures.wait(not_done, return_when=futures.FIRST_COMPLETED)
for future in done:
# Get result from completed check
result, state = future.result()
results[result.name] = result
if result.passed:
# Dispatch dependent checks
for child_name, _ in self.child_map[result.name]:
not_done.add(executor.submit(
run_check(child_name, self.checks_spec, checks_root, state)))
else:
not_passed.append(result.name)
for name in not_passed:
self._skip_children(name, results)
return results.values() | [
"def",
"run",
"(",
"self",
",",
"files",
",",
"working_area",
")",
":",
"# Ensure that dictionary is ordered by check declaration order (via self.check_names)",
"# NOTE: Requires CPython 3.6. If we need to support older versions of Python, replace with OrderedDict.",
"results",
"=",
"{",... | 45.514286 | 23.171429 |
def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs) | [
"def",
"ping",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_makeApiCall",
"(",
"self",
".",
"funcinfo",
"[",
"\"ping\"",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 25.363636 | 19.181818 |
def _check_import(module_names):
"""Import the specified modules and provide status."""
diagnostics = {}
for module_name in module_names:
try:
__import__(module_name)
res = 'ok'
except ImportError as err:
res = str(err)
diagnostics[module_name] = res
return diagnostics | [
"def",
"_check_import",
"(",
"module_names",
")",
":",
"diagnostics",
"=",
"{",
"}",
"for",
"module_name",
"in",
"module_names",
":",
"try",
":",
"__import__",
"(",
"module_name",
")",
"res",
"=",
"'ok'",
"except",
"ImportError",
"as",
"err",
":",
"res",
"... | 30.454545 | 11.181818 |
def _build_netengine_arguments(self):
"""
returns a python dictionary representing arguments
that will be passed to a netengine backend
for internal use only
"""
arguments = {
"host": self.host
}
if self.config is not None:
for key, value in self.config.iteritems():
arguments[key] = value
if self.port:
arguments["port"] = self.port
return arguments | [
"def",
"_build_netengine_arguments",
"(",
"self",
")",
":",
"arguments",
"=",
"{",
"\"host\"",
":",
"self",
".",
"host",
"}",
"if",
"self",
".",
"config",
"is",
"not",
"None",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"config",
".",
"iteritem... | 26 | 15.555556 |
def create_token(user):
"""
Create token.
"""
payload = jwt_payload_handler(user)
if api_settings.JWT_ALLOW_REFRESH:
payload['orig_iat'] = timegm(
datetime.utcnow().utctimetuple()
)
# Return values
token = jwt_encode_handler(payload)
return token | [
"def",
"create_token",
"(",
"user",
")",
":",
"payload",
"=",
"jwt_payload_handler",
"(",
"user",
")",
"if",
"api_settings",
".",
"JWT_ALLOW_REFRESH",
":",
"payload",
"[",
"'orig_iat'",
"]",
"=",
"timegm",
"(",
"datetime",
".",
"utcnow",
"(",
")",
".",
"ut... | 22.692308 | 12.846154 |
def tagged(self, *tag_slugs):
"""
Return the items which are tagged with a specific tag.
When multiple tags are provided, they operate as "OR" query.
"""
if getattr(self.model, 'tags', None) is None:
raise AttributeError("The {0} does not include TagsEntryMixin".format(self.model.__name__))
if len(tag_slugs) == 1:
return self.filter(tags__slug=tag_slugs[0])
else:
return self.filter(tags__slug__in=tag_slugs).distinct() | [
"def",
"tagged",
"(",
"self",
",",
"*",
"tag_slugs",
")",
":",
"if",
"getattr",
"(",
"self",
".",
"model",
",",
"'tags'",
",",
"None",
")",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"\"The {0} does not include TagsEntryMixin\"",
".",
"format",
"(",
... | 41.916667 | 21.25 |
def do_windowed(self, line):
"""
Un-fullscreen the current window
"""
self.bot.canvas.sink.trigger_fullscreen_action(False)
print(self.response_prompt, file=self.stdout) | [
"def",
"do_windowed",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"bot",
".",
"canvas",
".",
"sink",
".",
"trigger_fullscreen_action",
"(",
"False",
")",
"print",
"(",
"self",
".",
"response_prompt",
",",
"file",
"=",
"self",
".",
"stdout",
")"
] | 34 | 7.666667 |
def _get_xml(xml_str):
'''
Intrepret the data coming from opennebula and raise if it's not XML.
'''
try:
xml_data = etree.XML(xml_str)
# XMLSyntaxError seems to be only available from lxml, but that is the xml
# library loaded by this module
except etree.XMLSyntaxError as err:
# opennebula returned invalid XML, which could be an error message, so
# log it
raise SaltCloudSystemExit('opennebula returned: {0}'.format(xml_str))
return xml_data | [
"def",
"_get_xml",
"(",
"xml_str",
")",
":",
"try",
":",
"xml_data",
"=",
"etree",
".",
"XML",
"(",
"xml_str",
")",
"# XMLSyntaxError seems to be only available from lxml, but that is the xml",
"# library loaded by this module",
"except",
"etree",
".",
"XMLSyntaxError",
"... | 38.076923 | 24.230769 |
def terminate_session(self, token):
"""Terminates the session token, effectively logging out the user
from all crowd-enabled services.
Args:
token: The session token.
Returns:
True: If session terminated
None: If session termination failed
"""
url = self.rest_url + "/session/%s" % token
response = self._delete(url)
# For consistency between methods use None rather than False
# If token validation failed for any reason return None
if not response.ok:
return None
# Otherwise return True
return True | [
"def",
"terminate_session",
"(",
"self",
",",
"token",
")",
":",
"url",
"=",
"self",
".",
"rest_url",
"+",
"\"/session/%s\"",
"%",
"token",
"response",
"=",
"self",
".",
"_delete",
"(",
"url",
")",
"# For consistency between methods use None rather than False",
"#... | 27.347826 | 18.826087 |
def dqueries2queriessam(cfg,dqueries):
"""
Aligns queries to genome and gets SAM file
step#1
:param cfg: configuration dict
:param dqueries: dataframe of queries
"""
datatmpd=cfg['datatmpd']
dqueries=set_index(dqueries,'query id')
queryls=dqueries.loc[:,'query sequence'].apply(len).unique()
for queryl in queryls:
logging.debug(f"now aligning queries of length {queryl}")
queriesfap = f'{datatmpd}/01_queries_queryl{queryl:02}.fa'
logging.info(basename(queriesfap))
if not exists(queriesfap) or cfg['force']:
with open(queriesfap,'w') as f:
for gi in dqueries.index:
f.write('>{}\n{}\n'.format(gi.replace(' ','_'),dqueries.loc[gi,'query sequence']))
## BWA alignment command is adapted from cripror
## https://github.com/rraadd88/crisporWebsite/blob/master/crispor.py
# BWA allow up to X mismatches
# maximum number of occurences in the genome to get flagged as repeats.
# This is used in bwa samse, when converting the sam file
# and for warnings in the table output.
MAXOCC = 60000
# the BWA queue size is 2M by default. We derive the queue size from MAXOCC
MFAC = 2000000/MAXOCC
genomep=cfg['genomep']
genomed = dirname(genomep) # make var local, see below
genomegffp=cfg['genomegffp']
# increase MAXOCC if there is only a single query, but only in CGI mode
bwaM = MFAC*MAXOCC # -m is queue size in bwa
queriessap = f'{datatmpd}/01_queries_queryl{queryl:02}.sa'
logging.info(basename(queriessap))
if not exists(queriessap) or cfg['force']:
cmd=f"{cfg['bwa']} aln -t 1 -o 0 -m {bwaM} -n {cfg['mismatches_max']} -k {cfg['mismatches_max']} -N -l {queryl} {genomep} {queriesfap} > {queriessap} 2> {queriessap}.log"
runbashcmd(cmd)
queriessamp = f'{datatmpd}/01_queries_queryl{queryl:02}.sam'
logging.info(basename(queriessamp))
if not exists(queriessamp) or cfg['force']:
cmd=f"{cfg['bwa']} samse -n {MAXOCC} {genomep} {queriessap} {queriesfap} > {queriessamp} 2> {queriessamp}.log"
runbashcmd(cmd)
return cfg | [
"def",
"dqueries2queriessam",
"(",
"cfg",
",",
"dqueries",
")",
":",
"datatmpd",
"=",
"cfg",
"[",
"'datatmpd'",
"]",
"dqueries",
"=",
"set_index",
"(",
"dqueries",
",",
"'query id'",
")",
"queryls",
"=",
"dqueries",
".",
"loc",
"[",
":",
",",
"'query seque... | 46.104167 | 22.4375 |
def _register_service_type(cls, subclass):
"""Registers subclass handlers of various service-type-specific service
implementations. Look for classes decorated with
@Folder._register_service_type for hints on how this works."""
if hasattr(subclass, '__service_type__'):
cls._service_type_mapping[subclass.__service_type__] = subclass
if subclass.__service_type__:
setattr(subclass,
subclass.__service_type__,
property(lambda x: x))
return subclass | [
"def",
"_register_service_type",
"(",
"cls",
",",
"subclass",
")",
":",
"if",
"hasattr",
"(",
"subclass",
",",
"'__service_type__'",
")",
":",
"cls",
".",
"_service_type_mapping",
"[",
"subclass",
".",
"__service_type__",
"]",
"=",
"subclass",
"if",
"subclass",
... | 51.818182 | 9.636364 |
def findLabel(self, query, create=False):
"""Find a label with the given name.
Args:
name (Union[_sre.SRE_Pattern, str]): A str or regular expression to match against the name.
create (bool): Whether to create the label if it doesn't exist (only if name is a str).
Returns:
Union[gkeepapi.node.Label, None]: The label.
"""
if isinstance(query, six.string_types):
query = query.lower()
for label in self._labels.values():
if (isinstance(query, six.string_types) and query == label.name.lower()) or \
(isinstance(query, Pattern) and query.search(label.name)):
return label
return self.createLabel(query) if create and isinstance(query, six.string_types) else None | [
"def",
"findLabel",
"(",
"self",
",",
"query",
",",
"create",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"query",
",",
"six",
".",
"string_types",
")",
":",
"query",
"=",
"query",
".",
"lower",
"(",
")",
"for",
"label",
"in",
"self",
".",
"_l... | 41.842105 | 27.368421 |
def com_google_fonts_check_unique_glyphnames(ttFont):
"""Font contains unique glyph names?"""
if ttFont.sfntVersion == b'\x00\x01\x00\x00' and ttFont.get(
"post") and ttFont["post"].formatType == 3.0:
yield SKIP, ("TrueType fonts with a format 3.0 post table contain no"
" glyph names.")
else:
import re
glyphs = []
duplicated_glyphIDs = []
for _, g in enumerate(ttFont.getGlyphOrder()):
glyphID = re.sub(r'#\w+', '', g)
if glyphID in glyphs:
duplicated_glyphIDs.append(glyphID)
else:
glyphs.append(glyphID)
if len(duplicated_glyphIDs) == 0:
yield PASS, "Font contains unique glyph names."
else:
yield FAIL, ("The following glyph names"
" occur twice: {}").format(duplicated_glyphIDs) | [
"def",
"com_google_fonts_check_unique_glyphnames",
"(",
"ttFont",
")",
":",
"if",
"ttFont",
".",
"sfntVersion",
"==",
"b'\\x00\\x01\\x00\\x00'",
"and",
"ttFont",
".",
"get",
"(",
"\"post\"",
")",
"and",
"ttFont",
"[",
"\"post\"",
"]",
".",
"formatType",
"==",
"3... | 35.727273 | 16.772727 |
def relax_AX(self):
"""Implement relaxation if option ``RelaxParam`` != 1.0."""
self.AXnr = self.cnst_A(self.X, self.Xf)
if self.rlx == 1.0:
self.AX = self.AXnr
else:
alpha = self.rlx
self.AX = alpha*self.AXnr + (1-alpha)*self.block_cat(
self.var_y0() + self.S, self.var_y1()) | [
"def",
"relax_AX",
"(",
"self",
")",
":",
"self",
".",
"AXnr",
"=",
"self",
".",
"cnst_A",
"(",
"self",
".",
"X",
",",
"self",
".",
"Xf",
")",
"if",
"self",
".",
"rlx",
"==",
"1.0",
":",
"self",
".",
"AX",
"=",
"self",
".",
"AXnr",
"else",
":... | 35.2 | 16.9 |
def do_pp(self, arg):
"""pp expression
Pretty-print the value of the expression.
"""
obj = self._getval(arg)
try:
repr(obj)
except Exception:
self.message(bdb.safe_repr(obj))
else:
self.message(pprint.pformat(obj)) | [
"def",
"do_pp",
"(",
"self",
",",
"arg",
")",
":",
"obj",
"=",
"self",
".",
"_getval",
"(",
"arg",
")",
"try",
":",
"repr",
"(",
"obj",
")",
"except",
"Exception",
":",
"self",
".",
"message",
"(",
"bdb",
".",
"safe_repr",
"(",
"obj",
")",
")",
... | 26.909091 | 12.272727 |
def centerOfMass(self):
"""Get the center of mass of actor.
.. hint:: |fatlimb| |fatlimb.py|_
"""
cmf = vtk.vtkCenterOfMass()
cmf.SetInputData(self.polydata(True))
cmf.Update()
c = cmf.GetCenter()
return np.array(c) | [
"def",
"centerOfMass",
"(",
"self",
")",
":",
"cmf",
"=",
"vtk",
".",
"vtkCenterOfMass",
"(",
")",
"cmf",
".",
"SetInputData",
"(",
"self",
".",
"polydata",
"(",
"True",
")",
")",
"cmf",
".",
"Update",
"(",
")",
"c",
"=",
"cmf",
".",
"GetCenter",
"... | 27.1 | 11.5 |
def update(self, forecasts, observations):
"""
Update the statistics with forecasts and observations.
Args:
forecasts: The discrete Cumulative Distribution Functions of
observations:
"""
if len(observations.shape) == 1:
obs_cdfs = np.zeros((observations.size, self.thresholds.size))
for o, observation in enumerate(observations):
obs_cdfs[o, self.thresholds >= observation] = 1
else:
obs_cdfs = observations
self.errors["F_2"] += np.sum(forecasts ** 2, axis=0)
self.errors["F_O"] += np.sum(forecasts * obs_cdfs, axis=0)
self.errors["O_2"] += np.sum(obs_cdfs ** 2, axis=0)
self.errors["O"] += np.sum(obs_cdfs, axis=0)
self.num_forecasts += forecasts.shape[0] | [
"def",
"update",
"(",
"self",
",",
"forecasts",
",",
"observations",
")",
":",
"if",
"len",
"(",
"observations",
".",
"shape",
")",
"==",
"1",
":",
"obs_cdfs",
"=",
"np",
".",
"zeros",
"(",
"(",
"observations",
".",
"size",
",",
"self",
".",
"thresho... | 42.315789 | 17.368421 |
def tab_complete(input_list):
"""
<Purpose>
Gets the list of all valid tab-complete strings from all enabled modules.
<Arguments>
input_list: The list of words the user entered.
<Side Effects>
None
<Exceptions>
None
<Returns>
A list of valid tab-complete strings
"""
commands = []
for module in get_enabled_modules():
if 'tab_completer' in module_data[module]:
commands += module_data[module]['tab_completer'](input_list)
return commands | [
"def",
"tab_complete",
"(",
"input_list",
")",
":",
"commands",
"=",
"[",
"]",
"for",
"module",
"in",
"get_enabled_modules",
"(",
")",
":",
"if",
"'tab_completer'",
"in",
"module_data",
"[",
"module",
"]",
":",
"commands",
"+=",
"module_data",
"[",
"module",... | 26.111111 | 18.888889 |
def program(self):
"""
program: (statement)*
"""
root = Program()
while self.token.nature != Nature.EOF:
root.children.append(self.statement())
return root | [
"def",
"program",
"(",
"self",
")",
":",
"root",
"=",
"Program",
"(",
")",
"while",
"self",
".",
"token",
".",
"nature",
"!=",
"Nature",
".",
"EOF",
":",
"root",
".",
"children",
".",
"append",
"(",
"self",
".",
"statement",
"(",
")",
")",
"return"... | 20.8 | 16.6 |
def reduce(self) -> None:
"""
Remove redundant segments.
Since this class is implemented based on list,
this method may require O(n) time.
"""
idx = 0
while idx < len(self):
if idx > 0 and \
self[idx - 1].type == 'text' and self[idx].type == 'text':
self[idx - 1].data['text'] += self[idx].data['text']
del self[idx]
else:
idx += 1 | [
"def",
"reduce",
"(",
"self",
")",
"->",
"None",
":",
"idx",
"=",
"0",
"while",
"idx",
"<",
"len",
"(",
"self",
")",
":",
"if",
"idx",
">",
"0",
"and",
"self",
"[",
"idx",
"-",
"1",
"]",
".",
"type",
"==",
"'text'",
"and",
"self",
"[",
"idx",... | 31.066667 | 16 |
def _compile_pvariable_expression(self,
expr: Expression,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:
'''Compile a pvariable expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL pvariable expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
'''
etype = expr.etype
args = expr.args
name = expr._pvar_to_name(args)
if name not in scope:
raise ValueError('Variable {} not in scope.'.format(name))
fluent = scope[name]
scope = args[1] if args[1] is not None else []
if isinstance(fluent, TensorFluent):
fluent = TensorFluent(fluent.tensor, scope, batch=fluent.batch)
elif isinstance(fluent, tf.Tensor):
fluent = TensorFluent(fluent, scope, batch=self.batch_mode)
else:
raise ValueError('Variable in scope must be TensorFluent-like: {}'.format(fluent))
return fluent | [
"def",
"_compile_pvariable_expression",
"(",
"self",
",",
"expr",
":",
"Expression",
",",
"scope",
":",
"Dict",
"[",
"str",
",",
"TensorFluent",
"]",
",",
"batch_size",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"noise",
":",
"Optional",
"[",
"L... | 47.633333 | 25.033333 |
def _mpsse_enable(self):
"""Enable MPSSE mode on the FTDI device."""
# Reset MPSSE by sending mask = 0 and mode = 0
self._check(ftdi.set_bitmode, 0, 0)
# Enable MPSSE by sending mask = 0 and mode = 2
self._check(ftdi.set_bitmode, 0, 2) | [
"def",
"_mpsse_enable",
"(",
"self",
")",
":",
"# Reset MPSSE by sending mask = 0 and mode = 0",
"self",
".",
"_check",
"(",
"ftdi",
".",
"set_bitmode",
",",
"0",
",",
"0",
")",
"# Enable MPSSE by sending mask = 0 and mode = 2",
"self",
".",
"_check",
"(",
"ftdi",
"... | 45 | 8.5 |
def _align_iteration_with_cl_boundary(self, iteration, subtract=True):
"""Align iteration with cacheline boundary."""
# FIXME handle multiple datatypes
element_size = self.kernel.datatypes_size[self.kernel.datatype]
cacheline_size = self.machine['cacheline size']
elements_per_cacheline = int(cacheline_size // element_size)
# Gathering some loop information:
inner_loop = list(self.kernel.get_loop_stack(subs_consts=True))[-1]
inner_increment = inner_loop['increment']
# do this by aligning either writes (preferred) or reads
# Assumption: writes (and reads) increase linearly
o = self.kernel.compile_global_offsets(iteration=iteration)[0]
if len(o[1]):
# we have a write to work with:
first_offset = min(o[1])
else:
# we use reads
first_offset = min(o[0])
diff = first_offset - \
(int(first_offset) >> self.csim.first_level.cl_bits << self.csim.first_level.cl_bits)
if diff == 0:
return iteration
elif subtract:
return iteration - (diff // element_size) // inner_increment
else:
return iteration + (elements_per_cacheline - diff // element_size) // inner_increment | [
"def",
"_align_iteration_with_cl_boundary",
"(",
"self",
",",
"iteration",
",",
"subtract",
"=",
"True",
")",
":",
"# FIXME handle multiple datatypes",
"element_size",
"=",
"self",
".",
"kernel",
".",
"datatypes_size",
"[",
"self",
".",
"kernel",
".",
"datatype",
... | 44 | 22.344828 |
def to_jgif(graph):
"""Build a JGIF dictionary from a BEL graph.
:param pybel.BELGraph graph: A BEL graph
:return: A JGIF dictionary
:rtype: dict
.. warning::
Untested! This format is not general purpose and is therefore time is not heavily invested. If you want to
use Cytoscape.js, we suggest using :func:`pybel.to_cx` instead.
Example:
>>> import pybel, os, json
>>> graph_url = 'https://arty.scai.fraunhofer.de/artifactory/bel/knowledge/selventa-small-corpus/selventa-small-corpus-20150611.bel'
>>> graph = pybel.from_url(graph_url)
>>> graph_jgif_json = pybel.to_jgif(graph)
>>> with open(os.path.expanduser('~/Desktop/small_corpus.json'), 'w') as f:
... json.dump(graph_jgif_json, f)
"""
node_bel = {}
u_v_r_bel = {}
nodes_entry = []
edges_entry = []
for i, node in enumerate(sorted(graph, key=methodcaller('as_bel'))):
node_bel[node] = bel = node.as_bel()
nodes_entry.append({
'id': bel,
'label': bel,
'nodeId': i,
'bel_function_type': node[FUNCTION],
'metadata': {}
})
for u, v in graph.edges():
relation_evidences = defaultdict(list)
for data in graph[u][v].values():
if (u, v, data[RELATION]) not in u_v_r_bel:
u_v_r_bel[u, v, data[RELATION]] = graph.edge_to_bel(u, v, edge_data=data)
bel = u_v_r_bel[u, v, data[RELATION]]
evidence_dict = {
'bel_statement': bel,
}
if ANNOTATIONS in data:
evidence_dict['experiment_context'] = data[ANNOTATIONS]
if EVIDENCE in data:
evidence_dict['summary_text'] = data[EVIDENCE]
if CITATION in data:
evidence_dict['citation'] = data[CITATION]
relation_evidences[data[RELATION]].append(evidence_dict)
for relation, evidences in relation_evidences.items():
edges_entry.append({
'source': node_bel[u],
'target': node_bel[v],
'relation': relation,
'label': u_v_r_bel[u, v, relation],
'metadata': {
'evidences': evidences
}
})
return {
'graph': {
'metadata': graph.document,
'nodes': nodes_entry,
'edges': edges_entry
}
} | [
"def",
"to_jgif",
"(",
"graph",
")",
":",
"node_bel",
"=",
"{",
"}",
"u_v_r_bel",
"=",
"{",
"}",
"nodes_entry",
"=",
"[",
"]",
"edges_entry",
"=",
"[",
"]",
"for",
"i",
",",
"node",
"in",
"enumerate",
"(",
"sorted",
"(",
"graph",
",",
"key",
"=",
... | 29.37037 | 22.91358 |
def class_balancing_oversample(X_train=None, y_train=None, printable=True):
"""Input the features and labels, return the features and labels after oversampling.
Parameters
----------
X_train : numpy.array
The inputs.
y_train : numpy.array
The targets.
Examples
--------
One X
>>> X_train, y_train = class_balancing_oversample(X_train, y_train, printable=True)
Two X
>>> X, y = tl.utils.class_balancing_oversample(X_train=np.hstack((X1, X2)), y_train=y, printable=False)
>>> X1 = X[:, 0:5]
>>> X2 = X[:, 5:]
"""
# ======== Classes balancing
if printable:
tl.logging.info("Classes balancing for training examples...")
c = Counter(y_train)
if printable:
tl.logging.info('the occurrence number of each stage: %s' % c.most_common())
tl.logging.info('the least stage is Label %s have %s instances' % c.most_common()[-1])
tl.logging.info('the most stage is Label %s have %s instances' % c.most_common(1)[0])
most_num = c.most_common(1)[0][1]
if printable:
tl.logging.info('most num is %d, all classes tend to be this num' % most_num)
locations = {}
number = {}
for lab, num in c.most_common(): # find the index from y_train
number[lab] = num
locations[lab] = np.where(np.array(y_train) == lab)[0]
if printable:
tl.logging.info('convert list(np.array) to dict format')
X = {} # convert list to dict
for lab, num in number.items():
X[lab] = X_train[locations[lab]]
# oversampling
if printable:
tl.logging.info('start oversampling')
for key in X:
temp = X[key]
while True:
if len(X[key]) >= most_num:
break
X[key] = np.vstack((X[key], temp))
if printable:
tl.logging.info('first features of label 0 > %d' % len(X[0][0]))
tl.logging.info('the occurrence num of each stage after oversampling')
for key in X:
tl.logging.info("%s %d" % (key, len(X[key])))
if printable:
tl.logging.info('make each stage have same num of instances')
for key in X:
X[key] = X[key][0:most_num, :]
tl.logging.info("%s %d" % (key, len(X[key])))
# convert dict to list
if printable:
tl.logging.info('convert from dict to list format')
y_train = []
X_train = np.empty(shape=(0, len(X[0][0])))
for key in X:
X_train = np.vstack((X_train, X[key]))
y_train.extend([key for i in range(len(X[key]))])
# tl.logging.info(len(X_train), len(y_train))
c = Counter(y_train)
if printable:
tl.logging.info('the occurrence number of each stage after oversampling: %s' % c.most_common())
# ================ End of Classes balancing
return X_train, y_train | [
"def",
"class_balancing_oversample",
"(",
"X_train",
"=",
"None",
",",
"y_train",
"=",
"None",
",",
"printable",
"=",
"True",
")",
":",
"# ======== Classes balancing",
"if",
"printable",
":",
"tl",
".",
"logging",
".",
"info",
"(",
"\"Classes balancing for trainin... | 32.223529 | 24.705882 |
def can_edit(self, user=None, request=None):
"""
Define if a user can edit or not the instance, according to his account
or the request.
"""
can = False
if request and not self.owner:
if (getattr(settings, "UMAP_ALLOW_ANONYMOUS", False)
and self.is_anonymous_owner(request)):
can = True
if self.edit_status == self.ANONYMOUS:
can = True
elif not user.is_authenticated:
pass
elif user == self.owner:
can = True
elif self.edit_status == self.EDITORS and user in self.editors.all():
can = True
return can | [
"def",
"can_edit",
"(",
"self",
",",
"user",
"=",
"None",
",",
"request",
"=",
"None",
")",
":",
"can",
"=",
"False",
"if",
"request",
"and",
"not",
"self",
".",
"owner",
":",
"if",
"(",
"getattr",
"(",
"settings",
",",
"\"UMAP_ALLOW_ANONYMOUS\"",
",",... | 35.105263 | 15.315789 |
def pool_by_environmentvip(self, environment_vip_id):
"""
Method to return list object pool by environment vip id
Param environment_vip_id: environment vip id
Return list object pool
"""
uri = 'api/v3/pool/environment-vip/%s/' % environment_vip_id
return super(ApiPool, self).get(uri) | [
"def",
"pool_by_environmentvip",
"(",
"self",
",",
"environment_vip_id",
")",
":",
"uri",
"=",
"'api/v3/pool/environment-vip/%s/'",
"%",
"environment_vip_id",
"return",
"super",
"(",
"ApiPool",
",",
"self",
")",
".",
"get",
"(",
"uri",
")"
] | 33.3 | 16.9 |
def _factory(importname, base_class_type, path=None, *args, **kargs):
''' Load a module of a given base class type
Parameter
--------
importname: string
Name of the module, etc. converter
base_class_type: class type
E.g converter
path: Absoulte path of the module
Neede for extensions. If not given module is in online_monitor
package
*args, **kargs:
Arguments to pass to the object init
Return
------
Object of given base class type
'''
def is_base_class(item):
return isclass(item) and item.__module__ == importname
if path:
# Needed to find the module in forked processes; if you know a better
# way tell me!
sys.path.append(path)
# Absolute full path of python module
absolute_path = os.path.join(path, importname) + '.py'
module = imp.load_source(importname, absolute_path)
else:
module = import_module(importname)
# Get the defined base class in the loaded module to be name indendend
clsmembers = getmembers(module, is_base_class)
if not len(clsmembers):
raise ValueError('Found no matching class in %s.' % importname)
else:
cls = clsmembers[0][1]
return cls(*args, **kargs) | [
"def",
"_factory",
"(",
"importname",
",",
"base_class_type",
",",
"path",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kargs",
")",
":",
"def",
"is_base_class",
"(",
"item",
")",
":",
"return",
"isclass",
"(",
"item",
")",
"and",
"item",
".",
"__m... | 32.475 | 20.925 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.