repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
hbldh/dlxsudoku | dlxsudoku/sudoku.py | https://github.com/hbldh/dlxsudoku/blob/8d774e0883eb615533d04f07e58a95db716226e0/dlxsudoku/sudoku.py#L147-L150 | def row_iter(self):
"""Get an iterator over all rows in the Sudoku"""
for k in utils.range_(self.side):
yield self.row(k) | [
"def",
"row_iter",
"(",
"self",
")",
":",
"for",
"k",
"in",
"utils",
".",
"range_",
"(",
"self",
".",
"side",
")",
":",
"yield",
"self",
".",
"row",
"(",
"k",
")"
] | Get an iterator over all rows in the Sudoku | [
"Get",
"an",
"iterator",
"over",
"all",
"rows",
"in",
"the",
"Sudoku"
] | python | train |
inspirehep/refextract | refextract/references/text.py | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/text.py#L145-L152 | def match_pagination(ref_line):
"""Remove footer pagination from references lines"""
pattern = ur'\(?\[?(\d{1,4})\]?\)?\.?\s*$'
re_footer = re.compile(pattern, re.UNICODE)
match = re_footer.match(ref_line)
if match:
return int(match.group(1))
return None | [
"def",
"match_pagination",
"(",
"ref_line",
")",
":",
"pattern",
"=",
"ur'\\(?\\[?(\\d{1,4})\\]?\\)?\\.?\\s*$'",
"re_footer",
"=",
"re",
".",
"compile",
"(",
"pattern",
",",
"re",
".",
"UNICODE",
")",
"match",
"=",
"re_footer",
".",
"match",
"(",
"ref_line",
"... | Remove footer pagination from references lines | [
"Remove",
"footer",
"pagination",
"from",
"references",
"lines"
] | python | train |
blockstack/blockstack-core | blockstack/lib/operations/namespacepreorder.py | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/operations/namespacepreorder.py#L137-L159 | def get_namespace_preorder_burn_info( outputs ):
"""
Given the set of outputs, find the fee sent
to our burn address.
Return the fee and burn address on success as {'op_fee': ..., 'burn_address': ...}
Return None if not found
"""
if len(outputs) < 3:
# not a well-formed preorder
return None
op_fee = outputs[2]['value']
burn_address = None
try:
burn_address = virtualchain.script_hex_to_address(outputs[2]['script'])
assert burn_address
except:
log.warning("Invalid burn script: {}".format(outputs[2]['script']))
return None
return {'op_fee': op_fee, 'burn_address': burn_address} | [
"def",
"get_namespace_preorder_burn_info",
"(",
"outputs",
")",
":",
"if",
"len",
"(",
"outputs",
")",
"<",
"3",
":",
"# not a well-formed preorder ",
"return",
"None",
"op_fee",
"=",
"outputs",
"[",
"2",
"]",
"[",
"'value'",
"]",
"burn_address",
"=",
"None",
... | Given the set of outputs, find the fee sent
to our burn address.
Return the fee and burn address on success as {'op_fee': ..., 'burn_address': ...}
Return None if not found | [
"Given",
"the",
"set",
"of",
"outputs",
"find",
"the",
"fee",
"sent",
"to",
"our",
"burn",
"address",
".",
"Return",
"the",
"fee",
"and",
"burn",
"address",
"on",
"success",
"as",
"{",
"op_fee",
":",
"...",
"burn_address",
":",
"...",
"}",
"Return",
"N... | python | train |
digi604/django-smart-selects | smart_selects/views.py | https://github.com/digi604/django-smart-selects/blob/05dcc4a3de2874499ff3b9a3dfac5c623206e3e5/smart_selects/views.py#L94-L130 | def filterchain_all(request, app, model, field, foreign_key_app_name,
foreign_key_model_name, foreign_key_field_name, value):
"""Returns filtered results followed by excluded results below."""
model_class = get_model(app, model)
keywords = get_keywords(field, value)
# SECURITY: Make sure all smart selects requests are opt-in
foreign_model_class = get_model(foreign_key_app_name, foreign_key_model_name)
if not any([(isinstance(f, ChainedManyToManyField) or
isinstance(f, ChainedForeignKey))
for f in foreign_model_class._meta.get_fields()]):
raise PermissionDenied("Smart select disallowed")
# filter queryset using limit_choices_to
limit_choices_to = get_limit_choices_to(foreign_key_app_name, foreign_key_model_name, foreign_key_field_name)
queryset = get_queryset(model_class, limit_choices_to=limit_choices_to)
filtered = list(do_filter(queryset, keywords))
# Sort results if model doesn't include a default ordering.
if not getattr(model_class._meta, 'ordering', False):
sort_results(list(filtered))
excluded = list(do_filter(queryset, keywords, exclude=True))
# Sort results if model doesn't include a default ordering.
if not getattr(model_class._meta, 'ordering', False):
sort_results(list(excluded))
# Empty choice to separate filtered and excluded results.
empty_choice = {'value': "", 'display': "---------"}
serialized_results = (
serialize_results(filtered) +
[empty_choice] +
serialize_results(excluded)
)
return JsonResponse(serialized_results, safe=False) | [
"def",
"filterchain_all",
"(",
"request",
",",
"app",
",",
"model",
",",
"field",
",",
"foreign_key_app_name",
",",
"foreign_key_model_name",
",",
"foreign_key_field_name",
",",
"value",
")",
":",
"model_class",
"=",
"get_model",
"(",
"app",
",",
"model",
")",
... | Returns filtered results followed by excluded results below. | [
"Returns",
"filtered",
"results",
"followed",
"by",
"excluded",
"results",
"below",
"."
] | python | valid |
dfm/george | george/solvers/basic.py | https://github.com/dfm/george/blob/44819680036387625ee89f81c55104f3c1600759/george/solvers/basic.py#L51-L70 | def compute(self, x, yerr):
"""
Compute and factorize the covariance matrix.
Args:
x (ndarray[nsamples, ndim]): The independent coordinates of the
data points.
yerr (ndarray[nsamples] or float): The Gaussian uncertainties on
the data points at coordinates ``x``. These values will be
added in quadrature to the diagonal of the covariance matrix.
"""
# Compute the kernel matrix.
K = self.kernel.get_value(x)
K[np.diag_indices_from(K)] += yerr ** 2
# Factor the matrix and compute the log-determinant.
self._factor = (cholesky(K, overwrite_a=True, lower=False), False)
self.log_determinant = 2 * np.sum(np.log(np.diag(self._factor[0])))
self.computed = True | [
"def",
"compute",
"(",
"self",
",",
"x",
",",
"yerr",
")",
":",
"# Compute the kernel matrix.",
"K",
"=",
"self",
".",
"kernel",
".",
"get_value",
"(",
"x",
")",
"K",
"[",
"np",
".",
"diag_indices_from",
"(",
"K",
")",
"]",
"+=",
"yerr",
"**",
"2",
... | Compute and factorize the covariance matrix.
Args:
x (ndarray[nsamples, ndim]): The independent coordinates of the
data points.
yerr (ndarray[nsamples] or float): The Gaussian uncertainties on
the data points at coordinates ``x``. These values will be
added in quadrature to the diagonal of the covariance matrix. | [
"Compute",
"and",
"factorize",
"the",
"covariance",
"matrix",
"."
] | python | train |
mozilla/build-mar | src/mardor/reader.py | https://github.com/mozilla/build-mar/blob/d8c3b3469e55654d31f430cb343fd89392196c4e/src/mardor/reader.py#L79-L95 | def signature_type(self):
"""Return the signature type used in this MAR.
Returns:
One of None, 'unknown', 'sha1', or 'sha384'
"""
if not self.mardata.signatures:
return None
for sig in self.mardata.signatures.sigs:
if sig.algorithm_id == 1:
return 'sha1'
elif sig.algorithm_id == 2:
return 'sha384'
else:
return 'unknown' | [
"def",
"signature_type",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"mardata",
".",
"signatures",
":",
"return",
"None",
"for",
"sig",
"in",
"self",
".",
"mardata",
".",
"signatures",
".",
"sigs",
":",
"if",
"sig",
".",
"algorithm_id",
"==",
"1",... | Return the signature type used in this MAR.
Returns:
One of None, 'unknown', 'sha1', or 'sha384' | [
"Return",
"the",
"signature",
"type",
"used",
"in",
"this",
"MAR",
"."
] | python | train |
Azure/azure-sdk-for-python | azure-servicebus/azure/servicebus/control_client/_common_serialization.py | https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicebus/azure/servicebus/control_client/_common_serialization.py#L254-L280 | def get_entry_properties_from_element(element, include_id, id_prefix_to_skip=None, use_title_as_id=False):
''' get properties from element tree element '''
properties = {}
etag = element.attrib.get(_make_etree_ns_attr_name(_etree_entity_feed_namespaces['m'], 'etag'), None)
if etag is not None:
properties['etag'] = etag
updated = element.findtext('./atom:updated', '', _etree_entity_feed_namespaces)
if updated:
properties['updated'] = updated
author_name = element.findtext('./atom:author/atom:name', '', _etree_entity_feed_namespaces)
if author_name:
properties['author'] = author_name
if include_id:
if use_title_as_id:
title = element.findtext('./atom:title', '', _etree_entity_feed_namespaces)
if title:
properties['name'] = title
else:
element_id = element.findtext('./atom:id', '', _etree_entity_feed_namespaces)
if element_id:
properties['name'] = _get_readable_id(element_id, id_prefix_to_skip)
return properties | [
"def",
"get_entry_properties_from_element",
"(",
"element",
",",
"include_id",
",",
"id_prefix_to_skip",
"=",
"None",
",",
"use_title_as_id",
"=",
"False",
")",
":",
"properties",
"=",
"{",
"}",
"etag",
"=",
"element",
".",
"attrib",
".",
"get",
"(",
"_make_et... | get properties from element tree element | [
"get",
"properties",
"from",
"element",
"tree",
"element"
] | python | test |
python-diamond/Diamond | src/collectors/memory/memory.py | https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/memory/memory.py#L59-L72 | def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MemoryCollector, self).get_default_config()
config.update({
'path': 'memory',
'method': 'Threaded',
'force_psutil': 'False'
# Collect all the nodes or just a few standard ones?
# Uncomment to enable
# 'detailed': 'True'
})
return config | [
"def",
"get_default_config",
"(",
"self",
")",
":",
"config",
"=",
"super",
"(",
"MemoryCollector",
",",
"self",
")",
".",
"get_default_config",
"(",
")",
"config",
".",
"update",
"(",
"{",
"'path'",
":",
"'memory'",
",",
"'method'",
":",
"'Threaded'",
","... | Returns the default collector settings | [
"Returns",
"the",
"default",
"collector",
"settings"
] | python | train |
python-diamond/Diamond | src/diamond/handler/cloudwatch.py | https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/handler/cloudwatch.py#L125-L142 | def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(cloudwatchHandler, self).get_default_config_help()
config.update({
'region': 'AWS region',
'metric': 'Diamond metric name',
'namespace': 'CloudWatch metric namespace',
'name': 'CloudWatch metric name',
'unit': 'CloudWatch metric unit',
'collector': 'Diamond collector name',
'collect_by_instance': 'Collect metrics for instances separately',
'collect_without_dimension': 'Collect metrics without dimension'
})
return config | [
"def",
"get_default_config_help",
"(",
"self",
")",
":",
"config",
"=",
"super",
"(",
"cloudwatchHandler",
",",
"self",
")",
".",
"get_default_config_help",
"(",
")",
"config",
".",
"update",
"(",
"{",
"'region'",
":",
"'AWS region'",
",",
"'metric'",
":",
"... | Returns the help text for the configuration options for this handler | [
"Returns",
"the",
"help",
"text",
"for",
"the",
"configuration",
"options",
"for",
"this",
"handler"
] | python | train |
PrefPy/prefpy | prefpy/mov.py | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mov.py#L533-L556 | def MoV_SNTV(profile, K):
"""
Returns an integer that represents the winning candidate given an election profile.
Tie-breaking rule: numerically increasing order
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc" and elecType != "csv":
print("ERROR: unsupported profile type")
exit()
m = profile.numCands
candScoresMap = MechanismPlurality().getCandScoresMap(profile)
if K >= m:
return float("inf")
# print(candScoresMap)
sorted_items = sorted(candScoresMap.items(), key=lambda x: x[1], reverse=True)
sorted_dict = {key: value for key, value in sorted_items}
sorted_cand = list(sorted_dict.keys())
MoV = math.floor((sorted_dict[sorted_cand[K - 1]] - sorted_dict[sorted_cand[K]]) / 2) + 1
return MoV | [
"def",
"MoV_SNTV",
"(",
"profile",
",",
"K",
")",
":",
"# Currently, we expect the profile to contain complete ordering over candidates.",
"elecType",
"=",
"profile",
".",
"getElecType",
"(",
")",
"if",
"elecType",
"!=",
"\"soc\"",
"and",
"elecType",
"!=",
"\"toc\"",
... | Returns an integer that represents the winning candidate given an election profile.
Tie-breaking rule: numerically increasing order
:ivar Profile profile: A Profile object that represents an election profile. | [
"Returns",
"an",
"integer",
"that",
"represents",
"the",
"winning",
"candidate",
"given",
"an",
"election",
"profile",
".",
"Tie",
"-",
"breaking",
"rule",
":",
"numerically",
"increasing",
"order"
] | python | train |
mcash/merchant-api-python-sdk | mcash/mapi_client/mapi_client.py | https://github.com/mcash/merchant-api-python-sdk/blob/ebe8734126790354b71077aca519ff263235944e/mcash/mapi_client/mapi_client.py#L101-L109 | def _depaginate_all(self, url):
"""GETs the url provided and traverses the 'next' url that's
returned while storing the data in a list. Returns a single list of all
items.
"""
items = []
for x in self._depagination_generator(url):
items += x
return items | [
"def",
"_depaginate_all",
"(",
"self",
",",
"url",
")",
":",
"items",
"=",
"[",
"]",
"for",
"x",
"in",
"self",
".",
"_depagination_generator",
"(",
"url",
")",
":",
"items",
"+=",
"x",
"return",
"items"
] | GETs the url provided and traverses the 'next' url that's
returned while storing the data in a list. Returns a single list of all
items. | [
"GETs",
"the",
"url",
"provided",
"and",
"traverses",
"the",
"next",
"url",
"that",
"s",
"returned",
"while",
"storing",
"the",
"data",
"in",
"a",
"list",
".",
"Returns",
"a",
"single",
"list",
"of",
"all",
"items",
"."
] | python | train |
koordinates/python-client | koordinates/publishing.py | https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/publishing.py#L29-L35 | def create(self, publish):
"""
Creates a new publish group.
"""
target_url = self.client.get_url('PUBLISH', 'POST', 'create')
r = self.client.request('POST', target_url, json=publish._serialize())
return self.create_from_result(r.json()) | [
"def",
"create",
"(",
"self",
",",
"publish",
")",
":",
"target_url",
"=",
"self",
".",
"client",
".",
"get_url",
"(",
"'PUBLISH'",
",",
"'POST'",
",",
"'create'",
")",
"r",
"=",
"self",
".",
"client",
".",
"request",
"(",
"'POST'",
",",
"target_url",
... | Creates a new publish group. | [
"Creates",
"a",
"new",
"publish",
"group",
"."
] | python | train |
LionelAuroux/pyrser | pyrser/type_system/symbol.py | https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/type_system/symbol.py#L51-L61 | def get_scope_names(self) -> list:
"""
Return the list of all contained scope from global to local
"""
# allow global scope to have an None string instance
lscope = []
for scope in reversed(self.get_scope_list()):
if scope.name is not None:
# handle fun/block scope decoration
lscope.append(scope.name)
return lscope | [
"def",
"get_scope_names",
"(",
"self",
")",
"->",
"list",
":",
"# allow global scope to have an None string instance",
"lscope",
"=",
"[",
"]",
"for",
"scope",
"in",
"reversed",
"(",
"self",
".",
"get_scope_list",
"(",
")",
")",
":",
"if",
"scope",
".",
"name"... | Return the list of all contained scope from global to local | [
"Return",
"the",
"list",
"of",
"all",
"contained",
"scope",
"from",
"global",
"to",
"local"
] | python | test |
andersinno/python-database-sanitizer | database_sanitizer/dump/mysql.py | https://github.com/andersinno/python-database-sanitizer/blob/742bc1f43526b60f322a48f18c900f94fd446ed4/database_sanitizer/dump/mysql.py#L42-L66 | def sanitize(url, config):
"""
Obtains dump of MySQL database by executing `mysqldump` command and
sanitizes it output.
:param url: URL to the database which is going to be sanitized, parsed by
Python's URL parser.
:type url: urllib.urlparse.ParseResult
:param config: Optional sanitizer configuration to be used for sanitation
of the values stored in the database.
:type config: database_sanitizer.config.Configuration|None
"""
if url.scheme != "mysql":
raise ValueError("Unsupported database type: '%s'" % (url.scheme,))
args, env = get_mysqldump_args_and_env_from_url(url=url)
process = subprocess.Popen(
args=["mysqldump"] + args,
env=env,
stdout=subprocess.PIPE,
)
return sanitize_from_stream(stream=process.stdout, config=config) | [
"def",
"sanitize",
"(",
"url",
",",
"config",
")",
":",
"if",
"url",
".",
"scheme",
"!=",
"\"mysql\"",
":",
"raise",
"ValueError",
"(",
"\"Unsupported database type: '%s'\"",
"%",
"(",
"url",
".",
"scheme",
",",
")",
")",
"args",
",",
"env",
"=",
"get_my... | Obtains dump of MySQL database by executing `mysqldump` command and
sanitizes it output.
:param url: URL to the database which is going to be sanitized, parsed by
Python's URL parser.
:type url: urllib.urlparse.ParseResult
:param config: Optional sanitizer configuration to be used for sanitation
of the values stored in the database.
:type config: database_sanitizer.config.Configuration|None | [
"Obtains",
"dump",
"of",
"MySQL",
"database",
"by",
"executing",
"mysqldump",
"command",
"and",
"sanitizes",
"it",
"output",
"."
] | python | train |
dereneaton/ipyrad | ipyrad/analysis/structure.py | https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/structure.py#L147-L151 | def result_files(self):
""" returns a list of files that have finished structure """
reps = OPJ(self.workdir, self.name+"-K-*-rep-*_f")
repfiles = glob.glob(reps)
return repfiles | [
"def",
"result_files",
"(",
"self",
")",
":",
"reps",
"=",
"OPJ",
"(",
"self",
".",
"workdir",
",",
"self",
".",
"name",
"+",
"\"-K-*-rep-*_f\"",
")",
"repfiles",
"=",
"glob",
".",
"glob",
"(",
"reps",
")",
"return",
"repfiles"
] | returns a list of files that have finished structure | [
"returns",
"a",
"list",
"of",
"files",
"that",
"have",
"finished",
"structure"
] | python | valid |
HPENetworking/PYHPEIMC | build/lib/pyhpimc/auth.py | https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/build/lib/pyhpimc/auth.py#L55-L73 | def get_auth(self):
"""
This method requests an authentication object from the HPE IMC NMS and returns an HTTPDigest Auth Object
:return:
"""
url = self.h_url + self.server + ":" + self.port
auth = requests.auth.HTTPDigestAuth(self.username,self.password)
auth_url = "/imcrs"
f_url = url + auth_url
try:
r = requests.get(f_url, auth=auth, headers=headers, verify=False)
return r.status_code
# checks for reqeusts exceptions
except requests.exceptions.RequestException as e:
return ("Error:\n" + str(e) + '\n\nThe IMC server address is invalid. Please try again')
set_imc_creds()
if r.status_code != 200: # checks for valid IMC credentials
return ("Error:\n" + str(e) +"Error: \n You're credentials are invalid. Please try again\n\n")
set_imc_creds() | [
"def",
"get_auth",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"h_url",
"+",
"self",
".",
"server",
"+",
"\":\"",
"+",
"self",
".",
"port",
"auth",
"=",
"requests",
".",
"auth",
".",
"HTTPDigestAuth",
"(",
"self",
".",
"username",
",",
"self",
... | This method requests an authentication object from the HPE IMC NMS and returns an HTTPDigest Auth Object
:return: | [
"This",
"method",
"requests",
"an",
"authentication",
"object",
"from",
"the",
"HPE",
"IMC",
"NMS",
"and",
"returns",
"an",
"HTTPDigest",
"Auth",
"Object",
":",
"return",
":"
] | python | train |
objectrocket/python-client | objectrocket/instances/redis.py | https://github.com/objectrocket/python-client/blob/a65868c7511ff49a5fbe304e53bf592b7fc6d5ef/objectrocket/instances/redis.py#L36-L53 | def get_connection(self, internal=False):
"""Get a live connection to this instance.
:param bool internal: Whether or not to use a DC internal network connection.
:rtype: :py:class:`redis.client.StrictRedis`
"""
# Determine the connection string to use.
connect_string = self.connect_string
if internal:
connect_string = self.internal_connect_string
# Stripe Redis protocol prefix coming from the API.
connect_string = connect_string.strip('redis://')
host, port = connect_string.split(':')
# Build and return the redis client.
return redis.StrictRedis(host=host, port=port, password=self._password) | [
"def",
"get_connection",
"(",
"self",
",",
"internal",
"=",
"False",
")",
":",
"# Determine the connection string to use.",
"connect_string",
"=",
"self",
".",
"connect_string",
"if",
"internal",
":",
"connect_string",
"=",
"self",
".",
"internal_connect_string",
"# S... | Get a live connection to this instance.
:param bool internal: Whether or not to use a DC internal network connection.
:rtype: :py:class:`redis.client.StrictRedis` | [
"Get",
"a",
"live",
"connection",
"to",
"this",
"instance",
"."
] | python | train |
chaosmail/python-fs | fs/fs.py | https://github.com/chaosmail/python-fs/blob/2567922ced9387e327e65f3244caff3b7af35684/fs/fs.py#L307-L309 | def add_suffix(path, suffix=""):
"""Adds a suffix to a filename *path*"""
return join(dirname(path), basename(path, ext=False) + suffix + extname(path)) | [
"def",
"add_suffix",
"(",
"path",
",",
"suffix",
"=",
"\"\"",
")",
":",
"return",
"join",
"(",
"dirname",
"(",
"path",
")",
",",
"basename",
"(",
"path",
",",
"ext",
"=",
"False",
")",
"+",
"suffix",
"+",
"extname",
"(",
"path",
")",
")"
] | Adds a suffix to a filename *path* | [
"Adds",
"a",
"suffix",
"to",
"a",
"filename",
"*",
"path",
"*"
] | python | train |
globocom/GloboNetworkAPI-client-python | networkapiclient/Ip.py | https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Ip.py#L142-L170 | def get_available_ip4(self, id_network):
"""
Get a available IP in the network ipv4
:param id_network: Network identifier. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{'ip': {'ip': < available_ip >}}
:raise IpNotAvailableError: Network dont have available IP for insert a new IP
:raise NetworkIPv4NotFoundError: Network is not found
:raise UserNotAuthorizedError: User dont have permission to get a available IP
:raise InvalidParameterError: Network identifier is null or invalid.
:raise XMLError: Networkapi failed to generate the XML response.
:raise DataBaseError: Networkapi failed to access the database.
"""
if not is_valid_int_param(id_network):
raise InvalidParameterError(
u'Network identifier is invalid or was not informed.')
url = 'ip/availableip4/' + str(id_network) + "/"
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml) | [
"def",
"get_available_ip4",
"(",
"self",
",",
"id_network",
")",
":",
"if",
"not",
"is_valid_int_param",
"(",
"id_network",
")",
":",
"raise",
"InvalidParameterError",
"(",
"u'Network identifier is invalid or was not informed.'",
")",
"url",
"=",
"'ip/availableip4/'",
"... | Get a available IP in the network ipv4
:param id_network: Network identifier. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{'ip': {'ip': < available_ip >}}
:raise IpNotAvailableError: Network dont have available IP for insert a new IP
:raise NetworkIPv4NotFoundError: Network is not found
:raise UserNotAuthorizedError: User dont have permission to get a available IP
:raise InvalidParameterError: Network identifier is null or invalid.
:raise XMLError: Networkapi failed to generate the XML response.
:raise DataBaseError: Networkapi failed to access the database. | [
"Get",
"a",
"available",
"IP",
"in",
"the",
"network",
"ipv4"
] | python | train |
delph-in/pydelphin | delphin/mrs/query.py | https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/mrs/query.py#L257-L290 | def find_subgraphs_by_preds(xmrs, preds, connected=None):
"""
Yield subgraphs matching a list of predicates.
Predicates may match multiple EPs/nodes in the *xmrs*, meaning that
more than one subgraph is possible. Also, predicates in *preds*
match in number, so if a predicate appears twice in *preds*, there
will be two matching EPs/nodes in each subgraph.
Args:
xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to
use
preds: iterable of predicates to include in subgraphs
connected (bool, optional): if `True`, all yielded subgraphs
must be connected, as determined by
:meth:`Xmrs.is_connected() <delphin.mrs.xmrs.Xmrs.is_connected>`.
Yields:
A :class:`~delphin.mrs.xmrs.Xmrs` object for each subgraphs found.
"""
preds = list(preds)
count = len(preds)
# find all lists of nodeids such that the lists have no repeated nids;
# keep them as a list (fixme: why not just get sets?)
nidsets = set(
tuple(sorted(ns))
for ns in filter(
lambda ns: len(set(ns)) == count,
product(*[select_nodeids(xmrs, pred=p) for p in preds])
)
)
for nidset in nidsets:
sg = xmrs.subgraph(nidset)
if connected is None or sg.is_connected() == connected:
yield sg | [
"def",
"find_subgraphs_by_preds",
"(",
"xmrs",
",",
"preds",
",",
"connected",
"=",
"None",
")",
":",
"preds",
"=",
"list",
"(",
"preds",
")",
"count",
"=",
"len",
"(",
"preds",
")",
"# find all lists of nodeids such that the lists have no repeated nids;",
"# keep t... | Yield subgraphs matching a list of predicates.
Predicates may match multiple EPs/nodes in the *xmrs*, meaning that
more than one subgraph is possible. Also, predicates in *preds*
match in number, so if a predicate appears twice in *preds*, there
will be two matching EPs/nodes in each subgraph.
Args:
xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to
use
preds: iterable of predicates to include in subgraphs
connected (bool, optional): if `True`, all yielded subgraphs
must be connected, as determined by
:meth:`Xmrs.is_connected() <delphin.mrs.xmrs.Xmrs.is_connected>`.
Yields:
A :class:`~delphin.mrs.xmrs.Xmrs` object for each subgraphs found. | [
"Yield",
"subgraphs",
"matching",
"a",
"list",
"of",
"predicates",
"."
] | python | train |
collectiveacuity/labPack | labpack/platforms/docker.py | https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/docker.py#L537-L562 | def command(self, sys_command):
'''
a method to run a system command in a separate shell
:param sys_command: string with docker command
:return: string output from docker
'''
title = '%s.command' % self.__class__.__name__
# validate inputs
input_fields = {
'sys_command': sys_command
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
from subprocess import check_output, STDOUT, CalledProcessError
try:
output = check_output(sys_command, shell=True, stderr=STDOUT).decode('utf-8')
except CalledProcessError as err:
raise Exception(err.output.decode('ascii', 'ignore'))
return output | [
"def",
"command",
"(",
"self",
",",
"sys_command",
")",
":",
"title",
"=",
"'%s.command'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs\r",
"input_fields",
"=",
"{",
"'sys_command'",
":",
"sys_command",
"}",
"for",
"key",
",",
"value",
... | a method to run a system command in a separate shell
:param sys_command: string with docker command
:return: string output from docker | [
"a",
"method",
"to",
"run",
"a",
"system",
"command",
"in",
"a",
"separate",
"shell",
":",
"param",
"sys_command",
":",
"string",
"with",
"docker",
"command",
":",
"return",
":",
"string",
"output",
"from",
"docker"
] | python | train |
horazont/aioxmpp | aioxmpp/xml.py | https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/xml.py#L459-L476 | def endElementNS(self, name, qname):
"""
End a previously started element. `name` must be a ``(namespace_uri,
localname)`` tuple and `qname` is ignored.
"""
if self._ns_prefixes_floating_out:
raise RuntimeError("namespace prefix has not been closed")
if self._pending_start_element == name:
self._pending_start_element = False
self._write(b"/>")
else:
self._write(b"</")
self._write(self._qname(name).encode("utf-8"))
self._write(b">")
self._curr_ns_map, self._ns_prefixes_floating_out, self._ns_counter = \
self._ns_map_stack.pop() | [
"def",
"endElementNS",
"(",
"self",
",",
"name",
",",
"qname",
")",
":",
"if",
"self",
".",
"_ns_prefixes_floating_out",
":",
"raise",
"RuntimeError",
"(",
"\"namespace prefix has not been closed\"",
")",
"if",
"self",
".",
"_pending_start_element",
"==",
"name",
... | End a previously started element. `name` must be a ``(namespace_uri,
localname)`` tuple and `qname` is ignored. | [
"End",
"a",
"previously",
"started",
"element",
".",
"name",
"must",
"be",
"a",
"(",
"namespace_uri",
"localname",
")",
"tuple",
"and",
"qname",
"is",
"ignored",
"."
] | python | train |
wbond/oscrypto | oscrypto/_osx/util.py | https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_osx/util.py#L42-L60 | def _extract_error():
"""
Extracts the last OS error message into a python unicode string
:return:
A unicode string error message
"""
error_num = errno()
try:
error_string = os.strerror(error_num)
except (ValueError):
return str_cls(error_num)
if isinstance(error_string, str_cls):
return error_string
return _try_decode(error_string) | [
"def",
"_extract_error",
"(",
")",
":",
"error_num",
"=",
"errno",
"(",
")",
"try",
":",
"error_string",
"=",
"os",
".",
"strerror",
"(",
"error_num",
")",
"except",
"(",
"ValueError",
")",
":",
"return",
"str_cls",
"(",
"error_num",
")",
"if",
"isinstan... | Extracts the last OS error message into a python unicode string
:return:
A unicode string error message | [
"Extracts",
"the",
"last",
"OS",
"error",
"message",
"into",
"a",
"python",
"unicode",
"string"
] | python | valid |
caseyjlaw/rtpipe | rtpipe/calpipe.py | https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/calpipe.py#L132-L323 | def run(self, refant=[], antsel=[], uvrange='', fluxname='', fluxname_full='', band='', spw0='', spw1='', flaglist=[]):
""" Run calibration pipeline. Assumes L-band.
refant is list of antenna name strings (e.g., ['ea10']). default is to calculate based on distance from array center.
antsel is list of ants to use (or reject) (e.g., ['!ea08'])
uvrange is string giving uvrange (e.g., '<5klambda')
fluxname, fluxname_full, and band are used to find flux calibrator info (e.g., '3C48', '0137+331=3C48', 'L').
spw0 is spw selection for gain cal before bp cal (e.g., '0~1:60~75')
spw1 is spw selection for gain cal after bp cal (e.g., '0~1:6~122')
flaglist is the list of flag commands (e.g., ["mode='unflag'", "mode='shadow'", "mode='manual' antenna='ea11'"])
"""
os.chdir(self.workdir)
if not len(refant):
refant = self.find_refants()
antposname = self.fileroot + '.antpos' # antpos
delayname = self.fileroot + '.delay' # delay cal
g0name = self.fileroot + '.g0' # initial gain correction before bp
b1name = self.fileroot + '.b1' # bandpass file
g1name = self.fileroot + '.g1' # gain cal per scan
g2name = self.fileroot + '.g2' # flux scale applied
# overload auto detected flux cal info, if desired
if fluxname:
self.fluxname = fluxname
if band:
self.band = band
if fluxname_full:
self.fluxname_full = fluxname
# if flux calibrator available, use its model
if self.fluxname and self.band:
if self.band == 'P':
calband = 'L'
else:
calband = self.band
fluxmodel = '/home/casa/packages/RHEL5/release/casapy-41.0.24668-001-64b/data/nrao/VLA/CalModels/' + self.fluxname + '_' + calband + '.im'
else:
fluxmodel = ''
# set up MS file
msfile = self.genms()
# flag data
if flaglist:
self.flagdata(msfile, flaglist=flaglist)
elif os.path.exists(os.path.join(self.workdir, 'flags.txt')):
self.flagdata(msfile, flagfile=os.path.join(self.workdir, 'flags.txt'))
else:
print 'No flagging.'
# Calibrate!
if fluxmodel:
if not os.path.exists(g0name):
print 'Applying flux model for BP calibrator...'
cfg = tl.SetjyConfig()
cfg.vis = msfile
cfg.scan = self.bpstr
cfg.modimage = fluxmodel
cfg.standard = 'Perley-Butler 2010' # for some reason 2013 standard can't find 3C48
tl.setjy(cfg)
print 'Starting initial gain cal...'
cfg = tl.GaincalConfig()
cfg.vis = msfile
cfg.caltable = g0name
cfg.gaintable = []
cfg.scan = self.bpstr
cfg.gaintype = 'G'
cfg.solint = 'inf'
cfg.spw = spw0
cfg.refant = refant
cfg.minsnr = 5.
cfg.calmode = 'p'
cfg.antenna = antsel
cfg.uvrange = uvrange
tl.gaincal(cfg)
else:
print '%s exists' % g0name
if not os.path.exists(b1name):
print 'Starting bp cal...'
cfg = tl.GaincalConfig()
cfg.vis = msfile
cfg.caltable = b1name
cfg.gaintable = [g0name]
cfg.scan = self.bpstr
cfg.spw = spw1
cfg.gaintype = 'BPOLY'
cfg.degamp = 5
cfg.degphase = 2
cfg.maskedge = 6
cfg.solint = 'inf'
cfg.combine = ['scan']
cfg.solnorm = True
cfg.refant = refant
cfg.antenna = antsel
cfg.uvrange = uvrange
tl.gaincal(cfg)
else:
print '%s exists' % b1name
if not os.path.exists(g1name) or not os.path.exists(g2name):
print 'Starting gain cal...'
cfg = tl.GaincalConfig()
cfg.vis = msfile
cfg.caltable = g1name
cfg.gaintable = [b1name]
cfg.scan = self.allstr
cfg.gaintype = 'G'
cfg.solint = 'inf'
cfg.spw = spw1
cfg.refant = refant
cfg.minsnr = 5.
cfg.calmode='ap'
cfg.antenna = antsel
cfg.uvrange = uvrange
tl.gaincal(cfg)
print 'Transferring flux scale...'
cfg = tl.FluxscaleConfig()
cfg.vis = msfile
cfg.caltable = g1name
cfg.fluxtable = g2name
cfg.reference = self.fluxname_full
tl.fluxscale(cfg)
else:
print 'either %s or %s exist' % (g1name, g2name)
else: # without fluxscale
if not os.path.exists(g0name):
print 'Starting initial gain cal...'
cfg = tl.GaincalConfig()
cfg.vis = msfile
cfg.caltable = g0name
cfg.gaintable = []
cfg.scan = self.bpstr
cfg.gaintype = 'G'
cfg.solint = 'inf'
cfg.spw = spw0
cfg.refant = refant
cfg.minsnr = 5.
cfg.calmode = 'p'
cfg.antenna = antsel
cfg.uvrange = uvrange
tl.gaincal(cfg)
else:
print '%s exists' % g0name
if not os.path.exists(b1name):
print 'Starting bp cal...'
cfg = tl.GaincalConfig()
cfg.vis = msfile
cfg.caltable = b1name
cfg.gaintable = [g0name]
cfg.scan = self.bpstr
cfg.spw = spw1
cfg.gaintype = 'BPOLY'
cfg.degamp = 5
cfg.degphase = 2
cfg.maskedge = 6
cfg.solint = 'inf'
cfg.combine = ['scan']
cfg.solnorm = True
cfg.refant = refant
cfg.antenna = antsel
cfg.uvrange = uvrange
tl.gaincal(cfg)
else:
print '%s exists' % b1name
if not os.path.exists(g1name):
print 'Starting gain cal...'
cfg = tl.GaincalConfig()
cfg.vis = msfile
cfg.caltable = g1name
cfg.gaintable = [b1name]
cfg.scan = self.allstr
cfg.gaintype = 'G'
cfg.solint = 'inf'
cfg.spw = spw1
cfg.refant = refant
cfg.minsnr = 5.
cfg.calmode='ap'
cfg.antenna = antsel
cfg.uvrange = uvrange
tl.gaincal(cfg)
else:
print '%s exists' % g1name
return 0 | [
"def",
"run",
"(",
"self",
",",
"refant",
"=",
"[",
"]",
",",
"antsel",
"=",
"[",
"]",
",",
"uvrange",
"=",
"''",
",",
"fluxname",
"=",
"''",
",",
"fluxname_full",
"=",
"''",
",",
"band",
"=",
"''",
",",
"spw0",
"=",
"''",
",",
"spw1",
"=",
"... | Run calibration pipeline. Assumes L-band.
refant is list of antenna name strings (e.g., ['ea10']). default is to calculate based on distance from array center.
antsel is list of ants to use (or reject) (e.g., ['!ea08'])
uvrange is string giving uvrange (e.g., '<5klambda')
fluxname, fluxname_full, and band are used to find flux calibrator info (e.g., '3C48', '0137+331=3C48', 'L').
spw0 is spw selection for gain cal before bp cal (e.g., '0~1:60~75')
spw1 is spw selection for gain cal after bp cal (e.g., '0~1:6~122')
flaglist is the list of flag commands (e.g., ["mode='unflag'", "mode='shadow'", "mode='manual' antenna='ea11'"]) | [
"Run",
"calibration",
"pipeline",
".",
"Assumes",
"L",
"-",
"band",
".",
"refant",
"is",
"list",
"of",
"antenna",
"name",
"strings",
"(",
"e",
".",
"g",
".",
"[",
"ea10",
"]",
")",
".",
"default",
"is",
"to",
"calculate",
"based",
"on",
"distance",
"... | python | train |
lowandrew/OLCTools | coreGenome/coretyper.py | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/coretyper.py#L100-L139 | def annotatethreads(self):
"""
Use prokka to annotate each strain
"""
# Move the files to subfolders and create objects
self.runmetadata = createobject.ObjectCreation(self)
# Fix headers
self.headers()
printtime('Performing prokka analyses', self.start)
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.annotate, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata.samples:
# Create the prokka attribute in the metadata object
setattr(sample, 'prokka', GenObject())
sample.prokka.outputdir = os.path.join(sample.general.outputdirectory, 'prokka')
if not os.path.isdir(sample.prokka.outputdir):
os.makedirs(sample.prokka.outputdir)
# TODO Incorporate MASH/rMLST/user inputted genus, species results in the system call
# Create the system call
# prokka 2014-SEQ-0275.fasta --force --genus Escherichia --species coli --usegenus --addgenes
# --prefix 2014-SEQ-0275 --locustag EC0275 --outputdir /path/to/sequences/2014-SEQ-0275/prokka
sample.prokka.command = 'prokka {} ' \
'--force ' \
'--genus {} ' \
'--species {} ' \
'--usegenus ' \
'--addgenes ' \
'--prefix {} ' \
'--locustag {} ' \
'--outdir {}' \
.format(sample.general.fixedheaders,
self.genus, self.species, sample.name, sample.name, sample.prokka.outputdir)
self.queue.put(sample)
self.queue.join() | [
"def",
"annotatethreads",
"(",
"self",
")",
":",
"# Move the files to subfolders and create objects",
"self",
".",
"runmetadata",
"=",
"createobject",
".",
"ObjectCreation",
"(",
"self",
")",
"# Fix headers",
"self",
".",
"headers",
"(",
")",
"printtime",
"(",
"'Per... | Use prokka to annotate each strain | [
"Use",
"prokka",
"to",
"annotate",
"each",
"strain"
] | python | train |
project-ncl/pnc-cli | pnc_cli/productversions.py | https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/productversions.py#L117-L123 | def update_product_version(id, **kwargs):
"""
Update the ProductVersion with ID id with new values.
"""
content = update_product_version_raw(id, **kwargs)
if content:
return utils.format_json(content) | [
"def",
"update_product_version",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"content",
"=",
"update_product_version_raw",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"if",
"content",
":",
"return",
"utils",
".",
"format_json",
"(",
"content",
")"
] | Update the ProductVersion with ID id with new values. | [
"Update",
"the",
"ProductVersion",
"with",
"ID",
"id",
"with",
"new",
"values",
"."
] | python | train |
raghakot/keras-vis | docs/md_autogen.py | https://github.com/raghakot/keras-vis/blob/668b0e11dab93f3487f23c17e07f40554a8939e9/docs/md_autogen.py#L327-L392 | def module2md(self, module):
"""Takes an imported module object and create a Markdown string containing functions and classes.
"""
modname = module.__name__
path = self.get_src_path(module, append_base=False)
path = "[{}]({})".format(path, os.path.join(self.github_link, path))
found = set()
classes = []
line_nos = []
for name, obj in getmembers(module, inspect.isclass):
# handle classes
found.add(name)
if not name.startswith("_") and hasattr(obj, "__module__") and obj.__module__ == modname:
classes.append(self.class2md(obj))
line_nos.append(self.get_line_no(obj) or 0)
classes = order_by_line_nos(classes, line_nos)
# Since functions can have multiple aliases.
func2names = defaultdict(list)
for name, obj in getmembers(module, inspect.isfunction):
func2names[obj].append(name)
functions = []
line_nos = []
for obj in func2names:
names = func2names[obj]
found.update(names)
# Include if within module or included modules within __init__.py and exclude from global variables
is_module_within_init = '__init__.py' in path and obj.__module__.startswith(modname)
if is_module_within_init:
found.add(obj.__module__.replace(modname + '.', ''))
if hasattr(obj, "__module__") and (obj.__module__ == modname or is_module_within_init):
names = list(filter(lambda name: not name.startswith("_"), names))
if len(names) > 0:
functions.append(self.func2md(obj, names=names))
line_nos.append(self.get_line_no(obj) or 0)
functions = order_by_line_nos(functions, line_nos)
variables = []
line_nos = []
for name, obj in module.__dict__.items():
if not name.startswith("_") and name not in found:
if hasattr(obj, "__module__") and obj.__module__ != modname:
continue
if hasattr(obj, "__name__") and not obj.__name__.startswith(modname):
continue
comments = inspect.getcomments(obj)
comments = ": %s" % comments if comments else ""
variables.append("- **%s**%s" % (name, comments))
line_nos.append(self.get_line_no(obj) or 0)
variables = order_by_line_nos(variables, line_nos)
if variables:
new_list = ["**Global Variables**", "---------------"]
new_list.extend(variables)
variables = new_list
string = MODULE_TEMPLATE.format(path=path,
global_vars="\n".join(variables) if variables else "",
functions="\n".join(functions) if functions else "",
classes="".join(classes) if classes else "")
return string | [
"def",
"module2md",
"(",
"self",
",",
"module",
")",
":",
"modname",
"=",
"module",
".",
"__name__",
"path",
"=",
"self",
".",
"get_src_path",
"(",
"module",
",",
"append_base",
"=",
"False",
")",
"path",
"=",
"\"[{}]({})\"",
".",
"format",
"(",
"path",
... | Takes an imported module object and create a Markdown string containing functions and classes. | [
"Takes",
"an",
"imported",
"module",
"object",
"and",
"create",
"a",
"Markdown",
"string",
"containing",
"functions",
"and",
"classes",
"."
] | python | train |
blockstack/blockstack-core | blockstack/lib/atlas.py | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L674-L710 | def atlasdb_get_zonefiles_by_block( from_block, to_block, offset, count, name=None, con=None, path=None ):
"""
Look up all zonefile hashes in a block range. Optionally filter by name.
Returns [{'name': ..., 'zonefile_hash': ..., 'block_height': ..., 'txid': ..., 'inv_index': ...}]
"""
ret = None
if count > 100:
return {'error' : 'Count must be less than 100'}
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = 'SELECT name,zonefile_hash,txid,block_height,inv_index FROM zonefiles WHERE block_height >= ? AND block_height <= ?'
args = (from_block, to_block)
if name:
sql += ' AND name = ?'
args += (name,)
sql += 'ORDER BY inv_index LIMIT ? OFFSET ?;'
args += (count, offset)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
ret = []
for zfinfo in res:
ret.append({
'name' : zfinfo['name'],
'zonefile_hash' : zfinfo['zonefile_hash'],
'block_height' : zfinfo['block_height'],
'txid' : zfinfo['txid'],
'inv_index': zfinfo['inv_index'],
})
return ret | [
"def",
"atlasdb_get_zonefiles_by_block",
"(",
"from_block",
",",
"to_block",
",",
"offset",
",",
"count",
",",
"name",
"=",
"None",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"ret",
"=",
"None",
"if",
"count",
">",
"100",
":",
"return... | Look up all zonefile hashes in a block range. Optionally filter by name.
Returns [{'name': ..., 'zonefile_hash': ..., 'block_height': ..., 'txid': ..., 'inv_index': ...}] | [
"Look",
"up",
"all",
"zonefile",
"hashes",
"in",
"a",
"block",
"range",
".",
"Optionally",
"filter",
"by",
"name",
".",
"Returns",
"[",
"{",
"name",
":",
"...",
"zonefile_hash",
":",
"...",
"block_height",
":",
"...",
"txid",
":",
"...",
"inv_index",
":"... | python | train |
Azure/azure-sdk-for-python | azure-servicemanagement-legacy/azure/servicemanagement/sqldatabasemanagementservice.py | https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/sqldatabasemanagementservice.py#L218-L246 | def update_firewall_rule(self, server_name, name, start_ip_address,
end_ip_address):
'''
Update a firewall rule for an Azure SQL Database server.
server_name:
Name of the server to set the firewall rule on.
name:
The name of the firewall rule to update.
start_ip_address:
The lowest IP address in the range of the server-level firewall
setting. IP addresses equal to or greater than this can attempt to
connect to the server. The lowest possible IP address is 0.0.0.0.
end_ip_address:
The highest IP address in the range of the server-level firewall
setting. IP addresses equal to or less than this can attempt to
connect to the server. The highest possible IP address is
255.255.255.255.
'''
_validate_not_none('server_name', server_name)
_validate_not_none('name', name)
_validate_not_none('start_ip_address', start_ip_address)
_validate_not_none('end_ip_address', end_ip_address)
return self._perform_put(
self._get_firewall_rules_path(server_name, name),
_SqlManagementXmlSerializer.update_firewall_rule_to_xml(
name, start_ip_address, end_ip_address
)
) | [
"def",
"update_firewall_rule",
"(",
"self",
",",
"server_name",
",",
"name",
",",
"start_ip_address",
",",
"end_ip_address",
")",
":",
"_validate_not_none",
"(",
"'server_name'",
",",
"server_name",
")",
"_validate_not_none",
"(",
"'name'",
",",
"name",
")",
"_val... | Update a firewall rule for an Azure SQL Database server.
server_name:
Name of the server to set the firewall rule on.
name:
The name of the firewall rule to update.
start_ip_address:
The lowest IP address in the range of the server-level firewall
setting. IP addresses equal to or greater than this can attempt to
connect to the server. The lowest possible IP address is 0.0.0.0.
end_ip_address:
The highest IP address in the range of the server-level firewall
setting. IP addresses equal to or less than this can attempt to
connect to the server. The highest possible IP address is
255.255.255.255. | [
"Update",
"a",
"firewall",
"rule",
"for",
"an",
"Azure",
"SQL",
"Database",
"server",
"."
] | python | test |
ray-project/ray | python/ray/utils.py | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/utils.py#L32-L51 | def format_error_message(exception_message, task_exception=False):
"""Improve the formatting of an exception thrown by a remote function.
This method takes a traceback from an exception and makes it nicer by
removing a few uninformative lines and adding some space to indent the
remaining lines nicely.
Args:
exception_message (str): A message generated by traceback.format_exc().
Returns:
A string of the formatted exception message.
"""
lines = exception_message.split("\n")
if task_exception:
# For errors that occur inside of tasks, remove lines 1 and 2 which are
# always the same, they just contain information about the worker code.
lines = lines[0:1] + lines[3:]
pass
return "\n".join(lines) | [
"def",
"format_error_message",
"(",
"exception_message",
",",
"task_exception",
"=",
"False",
")",
":",
"lines",
"=",
"exception_message",
".",
"split",
"(",
"\"\\n\"",
")",
"if",
"task_exception",
":",
"# For errors that occur inside of tasks, remove lines 1 and 2 which ar... | Improve the formatting of an exception thrown by a remote function.
This method takes a traceback from an exception and makes it nicer by
removing a few uninformative lines and adding some space to indent the
remaining lines nicely.
Args:
exception_message (str): A message generated by traceback.format_exc().
Returns:
A string of the formatted exception message. | [
"Improve",
"the",
"formatting",
"of",
"an",
"exception",
"thrown",
"by",
"a",
"remote",
"function",
"."
] | python | train |
astropy/photutils | photutils/isophote/geometry.py | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/geometry.py#L509-L537 | def reset_sma(self, step):
"""
Change the direction of semimajor axis growth, from outwards to
inwards.
Parameters
----------
step : float
The current step value.
Returns
-------
sma, new_step : float
The new semimajor axis length and the new step value to
initiate the shrinking of the semimajor axis length. This is
the step value that should be used when calling the
:meth:`~photutils.isophote.EllipseGeometry.update_sma`
method.
"""
if self.linear_growth:
sma = self.sma - step
step = -step
else:
aux = 1. / (1. + step)
sma = self.sma * aux
step = aux - 1.
return sma, step | [
"def",
"reset_sma",
"(",
"self",
",",
"step",
")",
":",
"if",
"self",
".",
"linear_growth",
":",
"sma",
"=",
"self",
".",
"sma",
"-",
"step",
"step",
"=",
"-",
"step",
"else",
":",
"aux",
"=",
"1.",
"/",
"(",
"1.",
"+",
"step",
")",
"sma",
"=",... | Change the direction of semimajor axis growth, from outwards to
inwards.
Parameters
----------
step : float
The current step value.
Returns
-------
sma, new_step : float
The new semimajor axis length and the new step value to
initiate the shrinking of the semimajor axis length. This is
the step value that should be used when calling the
:meth:`~photutils.isophote.EllipseGeometry.update_sma`
method. | [
"Change",
"the",
"direction",
"of",
"semimajor",
"axis",
"growth",
"from",
"outwards",
"to",
"inwards",
"."
] | python | train |
dropbox/stone | stone/frontend/ir_generator.py | https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/ir_generator.py#L805-L839 | def _populate_field_defaults(self):
"""
Populate the defaults of each field. This is done in a separate pass
because defaults that specify a union tag require the union to have
been defined.
"""
for namespace in self.api.namespaces.values():
for data_type in namespace.data_types:
# Only struct fields can have default
if not isinstance(data_type, Struct):
continue
for field in data_type.fields:
if not field._ast_node.has_default:
continue
if isinstance(field._ast_node.default, AstTagRef):
default_value = TagRef(
field.data_type, field._ast_node.default.tag)
else:
default_value = field._ast_node.default
if not (field._ast_node.type_ref.nullable and default_value is None):
# Verify that the type of the default value is correct for this field
try:
if field.data_type.name in ('Float32', 'Float64'):
# You can assign int to the default value of float type
# However float type should always have default value in float
default_value = float(default_value)
field.data_type.check(default_value)
except ValueError as e:
raise InvalidSpec(
'Field %s has an invalid default: %s' %
(quote(field._ast_node.name), e),
field._ast_node.lineno, field._ast_node.path)
field.set_default(default_value) | [
"def",
"_populate_field_defaults",
"(",
"self",
")",
":",
"for",
"namespace",
"in",
"self",
".",
"api",
".",
"namespaces",
".",
"values",
"(",
")",
":",
"for",
"data_type",
"in",
"namespace",
".",
"data_types",
":",
"# Only struct fields can have default",
"if",... | Populate the defaults of each field. This is done in a separate pass
because defaults that specify a union tag require the union to have
been defined. | [
"Populate",
"the",
"defaults",
"of",
"each",
"field",
".",
"This",
"is",
"done",
"in",
"a",
"separate",
"pass",
"because",
"defaults",
"that",
"specify",
"a",
"union",
"tag",
"require",
"the",
"union",
"to",
"have",
"been",
"defined",
"."
] | python | train |
aleju/imgaug | imgaug/external/poly_point_isect_py2py3.py | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/external/poly_point_isect_py2py3.py#L796-L805 | def clear(self):
"""T.clear() -> None. Remove all items from T."""
def _clear(node):
if node is not None:
_clear(node.left)
_clear(node.right)
node.free()
_clear(self._root)
self._count = 0
self._root = None | [
"def",
"clear",
"(",
"self",
")",
":",
"def",
"_clear",
"(",
"node",
")",
":",
"if",
"node",
"is",
"not",
"None",
":",
"_clear",
"(",
"node",
".",
"left",
")",
"_clear",
"(",
"node",
".",
"right",
")",
"node",
".",
"free",
"(",
")",
"_clear",
"... | T.clear() -> None. Remove all items from T. | [
"T",
".",
"clear",
"()",
"-",
">",
"None",
".",
"Remove",
"all",
"items",
"from",
"T",
"."
] | python | valid |
hyperledger/indy-sdk | wrappers/python/indy/anoncreds.py | https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/anoncreds.py#L635-L672 | async def prover_get_credential(wallet_handle: int,
cred_id: str) -> str:
"""
Gets human readable credential by the given id.
:param wallet_handle: wallet handler (created by open_wallet).
:param cred_id: Identifier by which requested credential is stored in the wallet
:return: credential json
{
"referent": string, // cred_id in the wallet
"attrs": {"key1":"raw_value1", "key2":"raw_value2"},
"schema_id": string,
"cred_def_id": string,
"rev_reg_id": Optional<string>,
"cred_rev_id": Optional<string>
}
"""
logger = logging.getLogger(__name__)
logger.debug("prover_get_credential: >>> wallet_handle: %r, cred_id: %r",
wallet_handle,
cred_id)
if not hasattr(prover_get_credential, "cb"):
logger.debug("prover_get_credential: Creating callback")
prover_get_credential.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_cred_id = c_char_p(cred_id.encode('utf-8'))
credentials_json = await do_call('indy_prover_get_credential',
c_wallet_handle,
c_cred_id,
prover_get_credential.cb)
res = credentials_json.decode()
logger.debug("prover_get_credential: <<< res: %r", res)
return res | [
"async",
"def",
"prover_get_credential",
"(",
"wallet_handle",
":",
"int",
",",
"cred_id",
":",
"str",
")",
"->",
"str",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"\"prover_get_credential: >>> wallet_hand... | Gets human readable credential by the given id.
:param wallet_handle: wallet handler (created by open_wallet).
:param cred_id: Identifier by which requested credential is stored in the wallet
:return: credential json
{
"referent": string, // cred_id in the wallet
"attrs": {"key1":"raw_value1", "key2":"raw_value2"},
"schema_id": string,
"cred_def_id": string,
"rev_reg_id": Optional<string>,
"cred_rev_id": Optional<string>
} | [
"Gets",
"human",
"readable",
"credential",
"by",
"the",
"given",
"id",
"."
] | python | train |
pypa/pipenv | pipenv/core.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/core.py#L1003-L1105 | def do_lock(
ctx=None,
system=False,
clear=False,
pre=False,
keep_outdated=False,
write=True,
pypi_mirror=None,
):
"""Executes the freeze functionality."""
cached_lockfile = {}
if not pre:
pre = project.settings.get("allow_prereleases")
if keep_outdated:
if not project.lockfile_exists:
raise exceptions.PipenvOptionsError(
"--keep-outdated", ctx=ctx,
message="Pipfile.lock must exist to use --keep-outdated!"
)
cached_lockfile = project.lockfile_content
# Create the lockfile.
lockfile = project._lockfile
# Cleanup lockfile.
for section in ("default", "develop"):
for k, v in lockfile[section].copy().items():
if not hasattr(v, "keys"):
del lockfile[section][k]
# Ensure that develop inherits from default.
dev_packages = project.dev_packages.copy()
dev_packages = overwrite_dev(project.packages, dev_packages)
# Resolve dev-package dependencies, with pip-tools.
for is_dev in [True, False]:
pipfile_section = "dev-packages" if is_dev else "packages"
lockfile_section = "develop" if is_dev else "default"
if project.pipfile_exists:
packages = project.parsed_pipfile.get(pipfile_section, {})
else:
packages = getattr(project, pipfile_section.replace("-", "_"))
if write:
# Alert the user of progress.
click.echo(
u"{0} {1} {2}".format(
crayons.normal(u"Locking"),
crayons.red(u"[{0}]".format(pipfile_section.replace("_", "-"))),
crayons.normal(fix_utf8("dependencies…")),
),
err=True,
)
# Mutates the lockfile
venv_resolve_deps(
packages,
which=which,
project=project,
dev=is_dev,
clear=clear,
pre=pre,
allow_global=system,
pypi_mirror=pypi_mirror,
pipfile=packages,
lockfile=lockfile,
keep_outdated=keep_outdated
)
# Support for --keep-outdated…
if keep_outdated:
from pipenv.vendor.packaging.utils import canonicalize_name
for section_name, section in (
("default", project.packages),
("develop", project.dev_packages),
):
for package_specified in section.keys():
if not is_pinned(section[package_specified]):
canonical_name = canonicalize_name(package_specified)
if canonical_name in cached_lockfile[section_name]:
lockfile[section_name][canonical_name] = cached_lockfile[
section_name
][canonical_name].copy()
for key in ["default", "develop"]:
packages = set(cached_lockfile[key].keys())
new_lockfile = set(lockfile[key].keys())
missing = packages - new_lockfile
for missing_pkg in missing:
lockfile[key][missing_pkg] = cached_lockfile[key][missing_pkg].copy()
# Overwrite any develop packages with default packages.
lockfile["develop"].update(overwrite_dev(lockfile.get("default", {}), lockfile["develop"]))
if write:
project.write_lockfile(lockfile)
click.echo(
"{0}".format(
crayons.normal(
"Updated Pipfile.lock ({0})!".format(
lockfile["_meta"].get("hash", {}).get("sha256")[-6:]
),
bold=True,
)
),
err=True,
)
else:
return lockfile | [
"def",
"do_lock",
"(",
"ctx",
"=",
"None",
",",
"system",
"=",
"False",
",",
"clear",
"=",
"False",
",",
"pre",
"=",
"False",
",",
"keep_outdated",
"=",
"False",
",",
"write",
"=",
"True",
",",
"pypi_mirror",
"=",
"None",
",",
")",
":",
"cached_lockf... | Executes the freeze functionality. | [
"Executes",
"the",
"freeze",
"functionality",
"."
] | python | train |
ihgazni2/elist | elist/elist.py | https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L4807-L4818 | def same_values(l1,l2):
'''
from elist.elist import *
l1 = [1,2,3,5]
l2 = [0,2,3,4]
same_values(l1,l2)
'''
rslt = []
for i in range(0,l1.__len__()):
if(l1[i]==l2[i]):
rslt.append(l1[i])
return(rslt) | [
"def",
"same_values",
"(",
"l1",
",",
"l2",
")",
":",
"rslt",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"l1",
".",
"__len__",
"(",
")",
")",
":",
"if",
"(",
"l1",
"[",
"i",
"]",
"==",
"l2",
"[",
"i",
"]",
")",
":",
"rslt",
... | from elist.elist import *
l1 = [1,2,3,5]
l2 = [0,2,3,4]
same_values(l1,l2) | [
"from",
"elist",
".",
"elist",
"import",
"*",
"l1",
"=",
"[",
"1",
"2",
"3",
"5",
"]",
"l2",
"=",
"[",
"0",
"2",
"3",
"4",
"]",
"same_values",
"(",
"l1",
"l2",
")"
] | python | valid |
funilrys/PyFunceble | PyFunceble/core.py | https://github.com/funilrys/PyFunceble/blob/cdf69cbde120199171f7158e1c33635753e6e2f5/PyFunceble/core.py#L199-L215 | def _entry_management_url(self):
"""
Manage the loading of the url system.
"""
if (
self.url_file # pylint: disable=no-member
and not self._entry_management_url_download(
self.url_file # pylint: disable=no-member
)
): # pylint: disable=no-member
# The current url_file is not a URL.
# We initiate the filename as the file we have to test.
PyFunceble.INTERN[
"file_to_test"
] = self.url_file | [
"def",
"_entry_management_url",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"url_file",
"# pylint: disable=no-member",
"and",
"not",
"self",
".",
"_entry_management_url_download",
"(",
"self",
".",
"url_file",
"# pylint: disable=no-member",
")",
")",
":",
"# pyli... | Manage the loading of the url system. | [
"Manage",
"the",
"loading",
"of",
"the",
"url",
"system",
"."
] | python | test |
Alignak-monitoring/alignak | alignak/action.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/action.py#L245-L262 | def get_local_environnement(self):
"""
Mix the environment and the environment variables into a new local
environment dictionary
Note: We cannot just update the global os.environ because this
would effect all other checks.
:return: local environment variables
:rtype: dict
"""
# Do not use copy.copy() here, as the resulting copy still
# changes the real environment (it is still a os._Environment
# instance).
local_env = os.environ.copy()
for local_var in self.env:
local_env[local_var] = self.env[local_var]
return local_env | [
"def",
"get_local_environnement",
"(",
"self",
")",
":",
"# Do not use copy.copy() here, as the resulting copy still",
"# changes the real environment (it is still a os._Environment",
"# instance).",
"local_env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"for",
"local_v... | Mix the environment and the environment variables into a new local
environment dictionary
Note: We cannot just update the global os.environ because this
would effect all other checks.
:return: local environment variables
:rtype: dict | [
"Mix",
"the",
"environment",
"and",
"the",
"environment",
"variables",
"into",
"a",
"new",
"local",
"environment",
"dictionary"
] | python | train |
kamicut/tilepie | tilepie/proj.py | https://github.com/kamicut/tilepie/blob/103ae2be1c3c4e6f7ec4a3bdd265ffcddee92b96/tilepie/proj.py#L73-L82 | def tile_bbox(self, tile_indices):
"""
Returns the WGS84 bbox of the specified tile
"""
(z, x, y) = tile_indices
topleft = (x * self.tilesize, (y + 1) * self.tilesize)
bottomright = ((x + 1) * self.tilesize, y * self.tilesize)
nw = self.unproject_pixels(topleft, z)
se = self.unproject_pixels(bottomright, z)
return nw + se | [
"def",
"tile_bbox",
"(",
"self",
",",
"tile_indices",
")",
":",
"(",
"z",
",",
"x",
",",
"y",
")",
"=",
"tile_indices",
"topleft",
"=",
"(",
"x",
"*",
"self",
".",
"tilesize",
",",
"(",
"y",
"+",
"1",
")",
"*",
"self",
".",
"tilesize",
")",
"bo... | Returns the WGS84 bbox of the specified tile | [
"Returns",
"the",
"WGS84",
"bbox",
"of",
"the",
"specified",
"tile"
] | python | train |
jasonrbriggs/stomp.py | stomp/transport.py | https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/transport.py#L720-L807 | def attempt_connection(self):
"""
Try connecting to the (host, port) tuples specified at construction time.
"""
self.connection_error = False
sleep_exp = 1
connect_count = 0
while self.running and self.socket is None and (
connect_count < self.__reconnect_attempts_max or
self.__reconnect_attempts_max == -1 ):
for host_and_port in self.__host_and_ports:
try:
log.info("Attempting connection to host %s, port %s", host_and_port[0], host_and_port[1])
self.socket = get_socket(host_and_port[0], host_and_port[1], self.__timeout)
self.__enable_keepalive()
need_ssl = self.__need_ssl(host_and_port)
if need_ssl: # wrap socket
ssl_params = self.get_ssl(host_and_port)
if ssl_params['ca_certs']:
cert_validation = ssl.CERT_REQUIRED
else:
cert_validation = ssl.CERT_NONE
try:
tls_context = ssl.create_default_context(cafile=ssl_params['ca_certs'])
except AttributeError:
tls_context = None
if tls_context:
# Wrap the socket for TLS
certfile = ssl_params['cert_file']
keyfile = ssl_params['key_file']
password = ssl_params.get('password')
if certfile and not keyfile:
keyfile = certfile
if certfile:
tls_context.load_cert_chain(certfile, keyfile, password)
if cert_validation is None or cert_validation == ssl.CERT_NONE:
tls_context.check_hostname = False
tls_context.verify_mode = cert_validation
self.socket = tls_context.wrap_socket(self.socket, server_hostname=host_and_port[0])
else:
# Old-style wrap_socket where we don't have a modern SSLContext (so no SNI)
self.socket = ssl.wrap_socket(
self.socket,
keyfile=ssl_params['key_file'],
certfile=ssl_params['cert_file'],
cert_reqs=cert_validation,
ca_certs=ssl_params['ca_certs'],
ssl_version=ssl_params['ssl_version'])
self.socket.settimeout(self.__timeout)
if self.blocking is not None:
self.socket.setblocking(self.blocking)
#
# Validate server cert
#
if need_ssl and ssl_params['cert_validator']:
cert = self.socket.getpeercert()
(ok, errmsg) = ssl_params['cert_validator'](cert, host_and_port[0])
if not ok:
raise SSLError("Server certificate validation failed: %s", errmsg)
self.current_host_and_port = host_and_port
log.info("Established connection to host %s, port %s", host_and_port[0], host_and_port[1])
break
except socket.error:
self.socket = None
connect_count += 1
log.warning("Could not connect to host %s, port %s", host_and_port[0], host_and_port[1], exc_info=1)
if self.socket is None:
sleep_duration = (min(self.__reconnect_sleep_max,
((self.__reconnect_sleep_initial / (1.0 + self.__reconnect_sleep_increase))
* math.pow(1.0 + self.__reconnect_sleep_increase, sleep_exp)))
* (1.0 + random.random() * self.__reconnect_sleep_jitter))
sleep_end = monotonic() + sleep_duration
log.debug("Sleeping for %.1f seconds before attempting reconnect", sleep_duration)
while self.running and monotonic() < sleep_end:
time.sleep(0.2)
if sleep_duration < self.__reconnect_sleep_max:
sleep_exp += 1
if not self.socket:
raise exception.ConnectFailedException() | [
"def",
"attempt_connection",
"(",
"self",
")",
":",
"self",
".",
"connection_error",
"=",
"False",
"sleep_exp",
"=",
"1",
"connect_count",
"=",
"0",
"while",
"self",
".",
"running",
"and",
"self",
".",
"socket",
"is",
"None",
"and",
"(",
"connect_count",
"... | Try connecting to the (host, port) tuples specified at construction time. | [
"Try",
"connecting",
"to",
"the",
"(",
"host",
"port",
")",
"tuples",
"specified",
"at",
"construction",
"time",
"."
] | python | train |
pylast/pylast | src/pylast/__init__.py | https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1919-L1927 | def get_top_artists(self, limit=None, cacheable=True):
"""Returns a sequence of the most played artists."""
params = self._get_params()
if limit:
params["limit"] = limit
doc = self._request("geo.getTopArtists", cacheable, params)
return _extract_top_artists(doc, self) | [
"def",
"get_top_artists",
"(",
"self",
",",
"limit",
"=",
"None",
",",
"cacheable",
"=",
"True",
")",
":",
"params",
"=",
"self",
".",
"_get_params",
"(",
")",
"if",
"limit",
":",
"params",
"[",
"\"limit\"",
"]",
"=",
"limit",
"doc",
"=",
"self",
"."... | Returns a sequence of the most played artists. | [
"Returns",
"a",
"sequence",
"of",
"the",
"most",
"played",
"artists",
"."
] | python | train |
zarr-developers/zarr | zarr/core.py | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L2027-L2062 | def append(self, data, axis=0):
"""Append `data` to `axis`.
Parameters
----------
data : array_like
Data to be appended.
axis : int
Axis along which to append.
Returns
-------
new_shape : tuple
Notes
-----
The size of all dimensions other than `axis` must match between this
array and `data`.
Examples
--------
>>> import numpy as np
>>> import zarr
>>> a = np.arange(10000000, dtype='i4').reshape(10000, 1000)
>>> z = zarr.array(a, chunks=(1000, 100))
>>> z.shape
(10000, 1000)
>>> z.append(a)
(20000, 1000)
>>> z.append(np.vstack([a, a]), axis=1)
(20000, 2000)
>>> z.shape
(20000, 2000)
"""
return self._write_op(self._append_nosync, data, axis=axis) | [
"def",
"append",
"(",
"self",
",",
"data",
",",
"axis",
"=",
"0",
")",
":",
"return",
"self",
".",
"_write_op",
"(",
"self",
".",
"_append_nosync",
",",
"data",
",",
"axis",
"=",
"axis",
")"
] | Append `data` to `axis`.
Parameters
----------
data : array_like
Data to be appended.
axis : int
Axis along which to append.
Returns
-------
new_shape : tuple
Notes
-----
The size of all dimensions other than `axis` must match between this
array and `data`.
Examples
--------
>>> import numpy as np
>>> import zarr
>>> a = np.arange(10000000, dtype='i4').reshape(10000, 1000)
>>> z = zarr.array(a, chunks=(1000, 100))
>>> z.shape
(10000, 1000)
>>> z.append(a)
(20000, 1000)
>>> z.append(np.vstack([a, a]), axis=1)
(20000, 2000)
>>> z.shape
(20000, 2000) | [
"Append",
"data",
"to",
"axis",
"."
] | python | train |
bxlab/bx-python | lib/bx/motif/pwm.py | https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/motif/pwm.py#L95-L107 | def to_logodds_scoring_matrix( self, background=None, correction=DEFAULT_CORRECTION ):
"""
Create a standard logodds scoring matrix.
"""
alphabet_size = len( self.alphabet )
if background is None:
background = ones( alphabet_size, float32 ) / alphabet_size
# Row totals as a one column array
totals = numpy.sum( self.values, 1 )[:,newaxis]
values = log2( maximum( self.values, correction ) ) \
- log2( totals ) \
- log2( maximum( background, correction ) )
return ScoringMatrix.create_from_other( self, values.astype( float32 ) ) | [
"def",
"to_logodds_scoring_matrix",
"(",
"self",
",",
"background",
"=",
"None",
",",
"correction",
"=",
"DEFAULT_CORRECTION",
")",
":",
"alphabet_size",
"=",
"len",
"(",
"self",
".",
"alphabet",
")",
"if",
"background",
"is",
"None",
":",
"background",
"=",
... | Create a standard logodds scoring matrix. | [
"Create",
"a",
"standard",
"logodds",
"scoring",
"matrix",
"."
] | python | train |
fumitoh/modelx | modelx/core/space.py | https://github.com/fumitoh/modelx/blob/0180da34d052c44fb94dab9e115e218bbebfc9c3/modelx/core/space.py#L1241-L1265 | def set_attr(self, name, value):
"""Implementation of attribute setting
``space.name = value`` by user script
Called from ``Space.__setattr__``
"""
if not is_valid_name(name):
raise ValueError("Invalid name '%s'" % name)
if name in self.namespace:
if name in self.refs:
if name in self.self_refs:
self.new_ref(name, value)
else:
raise KeyError("Ref '%s' cannot be changed" % name)
elif name in self.cells:
if self.cells[name].is_scalar():
self.cells[name].set_value((), value)
else:
raise AttributeError("Cells '%s' is not a scalar." % name)
else:
raise ValueError
else:
self.new_ref(name, value) | [
"def",
"set_attr",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"if",
"not",
"is_valid_name",
"(",
"name",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid name '%s'\"",
"%",
"name",
")",
"if",
"name",
"in",
"self",
".",
"namespace",
":",
"if",
"n... | Implementation of attribute setting
``space.name = value`` by user script
Called from ``Space.__setattr__`` | [
"Implementation",
"of",
"attribute",
"setting"
] | python | valid |
explosion/spaCy | spacy/util.py | https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L484-L504 | def stepping(start, stop, steps):
"""Yield an infinite series of values that step from a start value to a
final value over some number of steps. Each step is (stop-start)/steps.
After the final value is reached, the generator continues yielding that
value.
EXAMPLE:
>>> sizes = stepping(1., 200., 100)
>>> assert next(sizes) == 1.
>>> assert next(sizes) == 1 * (200.-1.) / 100
>>> assert next(sizes) == 1 + (200.-1.) / 100 + (200.-1.) / 100
"""
def clip(value):
return max(value, stop) if (start > stop) else min(value, stop)
curr = float(start)
while True:
yield clip(curr)
curr += (stop - start) / steps | [
"def",
"stepping",
"(",
"start",
",",
"stop",
",",
"steps",
")",
":",
"def",
"clip",
"(",
"value",
")",
":",
"return",
"max",
"(",
"value",
",",
"stop",
")",
"if",
"(",
"start",
">",
"stop",
")",
"else",
"min",
"(",
"value",
",",
"stop",
")",
"... | Yield an infinite series of values that step from a start value to a
final value over some number of steps. Each step is (stop-start)/steps.
After the final value is reached, the generator continues yielding that
value.
EXAMPLE:
>>> sizes = stepping(1., 200., 100)
>>> assert next(sizes) == 1.
>>> assert next(sizes) == 1 * (200.-1.) / 100
>>> assert next(sizes) == 1 + (200.-1.) / 100 + (200.-1.) / 100 | [
"Yield",
"an",
"infinite",
"series",
"of",
"values",
"that",
"step",
"from",
"a",
"start",
"value",
"to",
"a",
"final",
"value",
"over",
"some",
"number",
"of",
"steps",
".",
"Each",
"step",
"is",
"(",
"stop",
"-",
"start",
")",
"/",
"steps",
"."
] | python | train |
Clinical-Genomics/scout | scout/server/utils.py | https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/utils.py#L33-L59 | def institute_and_case(store, institute_id, case_name=None):
"""Fetch insitiute and case objects."""
institute_obj = store.institute(institute_id)
if institute_obj is None and institute_id != 'favicon.ico':
flash("Can't find institute: {}".format(institute_id), 'warning')
return abort(404)
if case_name:
if case_name:
case_obj = store.case(institute_id=institute_id, display_name=case_name)
if case_obj is None:
return abort(404)
# validate that user has access to the institute
if not current_user.is_admin:
if institute_id not in current_user.institutes:
if not case_name or not any(inst_id in case_obj['collaborators'] for inst_id in
current_user.institutes):
# you don't have access!!
flash("You don't have acccess to: {}".format(institute_id),'danger')
return abort(403)
# you have access!
if case_name:
return institute_obj, case_obj
else:
return institute_obj | [
"def",
"institute_and_case",
"(",
"store",
",",
"institute_id",
",",
"case_name",
"=",
"None",
")",
":",
"institute_obj",
"=",
"store",
".",
"institute",
"(",
"institute_id",
")",
"if",
"institute_obj",
"is",
"None",
"and",
"institute_id",
"!=",
"'favicon.ico'",... | Fetch insitiute and case objects. | [
"Fetch",
"insitiute",
"and",
"case",
"objects",
"."
] | python | test |
Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_0/work_item_tracking/work_item_tracking_client.py | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/work_item_tracking/work_item_tracking_client.py#L399-L417 | def get_fields(self, project=None, expand=None):
"""GetFields.
Returns information for all fields.
:param str project: Project ID or project name
:param str expand: Use ExtensionFields to include extension fields, otherwise exclude them. Unless the feature flag for this parameter is enabled, extension fields are always included.
:rtype: [WorkItemField]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='b51fd764-e5c2-4b9b-aaf7-3395cf4bdd94',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemField]', self._unwrap_collection(response)) | [
"def",
"get_fields",
"(",
"self",
",",
"project",
"=",
"None",
",",
"expand",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
... | GetFields.
Returns information for all fields.
:param str project: Project ID or project name
:param str expand: Use ExtensionFields to include extension fields, otherwise exclude them. Unless the feature flag for this parameter is enabled, extension fields are always included.
:rtype: [WorkItemField] | [
"GetFields",
".",
"Returns",
"information",
"for",
"all",
"fields",
".",
":",
"param",
"str",
"project",
":",
"Project",
"ID",
"or",
"project",
"name",
":",
"param",
"str",
"expand",
":",
"Use",
"ExtensionFields",
"to",
"include",
"extension",
"fields",
"oth... | python | train |
raamana/hiwenet | hiwenet/more_metrics.py | https://github.com/raamana/hiwenet/blob/b12699b3722fd0a6a835e7d7ca4baf58fb181809/hiwenet/more_metrics.py#L52-L78 | def diff_medians_abs(array_one, array_two):
"""
Computes the absolute (symmetric) difference in medians between two arrays of values.
Given arrays will be flattened (to 1D array) regardless of dimension,
and any non-finite/NaN values will be ignored.
Parameters
----------
array_one, array_two : iterable
Two arrays of values, possibly of different length.
Returns
-------
diff_medians : float
scalar measuring the difference in medians, ignoring NaNs/non-finite values.
Raises
------
ValueError
If one or more of the arrays are empty.
"""
abs_diff_medians = np.abs(diff_medians(array_one, array_two))
return abs_diff_medians | [
"def",
"diff_medians_abs",
"(",
"array_one",
",",
"array_two",
")",
":",
"abs_diff_medians",
"=",
"np",
".",
"abs",
"(",
"diff_medians",
"(",
"array_one",
",",
"array_two",
")",
")",
"return",
"abs_diff_medians"
] | Computes the absolute (symmetric) difference in medians between two arrays of values.
Given arrays will be flattened (to 1D array) regardless of dimension,
and any non-finite/NaN values will be ignored.
Parameters
----------
array_one, array_two : iterable
Two arrays of values, possibly of different length.
Returns
-------
diff_medians : float
scalar measuring the difference in medians, ignoring NaNs/non-finite values.
Raises
------
ValueError
If one or more of the arrays are empty. | [
"Computes",
"the",
"absolute",
"(",
"symmetric",
")",
"difference",
"in",
"medians",
"between",
"two",
"arrays",
"of",
"values",
"."
] | python | train |
wtsi-hgi/python-common | hgicommon/threading/counting_lock.py | https://github.com/wtsi-hgi/python-common/blob/0376a6b574ff46e82e509e90b6cb3693a3dbb577/hgicommon/threading/counting_lock.py#L55-L61 | def release(self):
""" Wraps Lock.release """
self._lock.release()
with self._stat_lock:
self._locked = False
self._last_released = datetime.now() | [
"def",
"release",
"(",
"self",
")",
":",
"self",
".",
"_lock",
".",
"release",
"(",
")",
"with",
"self",
".",
"_stat_lock",
":",
"self",
".",
"_locked",
"=",
"False",
"self",
".",
"_last_released",
"=",
"datetime",
".",
"now",
"(",
")"
] | Wraps Lock.release | [
"Wraps",
"Lock",
".",
"release"
] | python | valid |
zhmcclient/python-zhmcclient | zhmcclient/_manager.py | https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_manager.py#L457-L524 | def _matches_prop(self, obj, prop_name, prop_match):
"""
Return a boolean indicating whether a resource object matches with
a single property against a property match value.
This is used for client-side filtering.
Depending on the specified property, this method retrieves the resource
properties from the HMC.
Parameters:
obj (BaseResource):
Resource object.
prop_match:
Property match value that is used to match the actual value of
the specified property against, as follows:
- If the match value is a list or tuple, this method is invoked
recursively to find whether one or more match values inthe list
match.
- Else if the property is of string type, its value is matched by
interpreting the match value as a regular expression.
- Else the property value is matched by exact value comparison
with the match value.
Returns:
bool: Boolean indicating whether the resource object matches w.r.t.
the specified property and the match value.
"""
if isinstance(prop_match, (list, tuple)):
# List items are logically ORed, so one matching item suffices.
for pm in prop_match:
if self._matches_prop(obj, prop_name, pm):
return True
else:
# Some lists of resources do not have all properties, for example
# Hipersocket adapters do not have a "card-location" property.
# If a filter property does not exist on a resource, the resource
# does not match.
try:
prop_value = obj.get_property(prop_name)
except KeyError:
return False
if isinstance(prop_value, six.string_types):
# HMC resource property is Enum String or (non-enum) String,
# and is both matched by regexp matching. Ideally, regexp
# matching should only be done for non-enum strings, but
# distinguishing them is not possible given that the client
# has no knowledge about the properties.
# The regexp matching implemented in the HMC requires begin and
# end of the string value to match, even if the '^' for begin
# and '$' for end are not specified in the pattern. The code
# here is consistent with that: We add end matching to the
# pattern, and begin matching is done by re.match()
# automatically.
re_match = prop_match + '$'
m = re.match(re_match, prop_value)
if m:
return True
else:
if prop_value == prop_match:
return True
return False | [
"def",
"_matches_prop",
"(",
"self",
",",
"obj",
",",
"prop_name",
",",
"prop_match",
")",
":",
"if",
"isinstance",
"(",
"prop_match",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"# List items are logically ORed, so one matching item suffices.",
"for",
"pm",
... | Return a boolean indicating whether a resource object matches with
a single property against a property match value.
This is used for client-side filtering.
Depending on the specified property, this method retrieves the resource
properties from the HMC.
Parameters:
obj (BaseResource):
Resource object.
prop_match:
Property match value that is used to match the actual value of
the specified property against, as follows:
- If the match value is a list or tuple, this method is invoked
recursively to find whether one or more match values inthe list
match.
- Else if the property is of string type, its value is matched by
interpreting the match value as a regular expression.
- Else the property value is matched by exact value comparison
with the match value.
Returns:
bool: Boolean indicating whether the resource object matches w.r.t.
the specified property and the match value. | [
"Return",
"a",
"boolean",
"indicating",
"whether",
"a",
"resource",
"object",
"matches",
"with",
"a",
"single",
"property",
"against",
"a",
"property",
"match",
"value",
".",
"This",
"is",
"used",
"for",
"client",
"-",
"side",
"filtering",
"."
] | python | train |
matthew-brett/delocate | delocate/wheeltools.py | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/wheeltools.py#L35-L81 | def rewrite_record(bdist_dir):
""" Rewrite RECORD file with hashes for all files in `wheel_sdir`
Copied from :method:`wheel.bdist_wheel.bdist_wheel.write_record`
Will also unsign wheel
Parameters
----------
bdist_dir : str
Path of unpacked wheel file
"""
info_dirs = glob.glob(pjoin(bdist_dir, '*.dist-info'))
if len(info_dirs) != 1:
raise WheelToolsError("Should be exactly one `*.dist_info` directory")
record_path = pjoin(info_dirs[0], 'RECORD')
record_relpath = relpath(record_path, bdist_dir)
# Unsign wheel - because we're invalidating the record hash
sig_path = pjoin(info_dirs[0], 'RECORD.jws')
if exists(sig_path):
os.unlink(sig_path)
def walk():
for dir, dirs, files in os.walk(bdist_dir):
for f in files:
yield pjoin(dir, f)
def skip(path):
"""Wheel hashes every possible file."""
return (path == record_relpath)
with _open_for_csv(record_path, 'w+') as record_file:
writer = csv.writer(record_file)
for path in walk():
relative_path = relpath(path, bdist_dir)
if skip(relative_path):
hash = ''
size = ''
else:
with open(path, 'rb') as f:
data = f.read()
digest = hashlib.sha256(data).digest()
hash = 'sha256=' + native(urlsafe_b64encode(digest))
size = len(data)
path_for_record = relpath(
path, bdist_dir).replace(psep, '/')
writer.writerow((path_for_record, hash, size)) | [
"def",
"rewrite_record",
"(",
"bdist_dir",
")",
":",
"info_dirs",
"=",
"glob",
".",
"glob",
"(",
"pjoin",
"(",
"bdist_dir",
",",
"'*.dist-info'",
")",
")",
"if",
"len",
"(",
"info_dirs",
")",
"!=",
"1",
":",
"raise",
"WheelToolsError",
"(",
"\"Should be ex... | Rewrite RECORD file with hashes for all files in `wheel_sdir`
Copied from :method:`wheel.bdist_wheel.bdist_wheel.write_record`
Will also unsign wheel
Parameters
----------
bdist_dir : str
Path of unpacked wheel file | [
"Rewrite",
"RECORD",
"file",
"with",
"hashes",
"for",
"all",
"files",
"in",
"wheel_sdir"
] | python | train |
DataBiosphere/toil | src/toil/utils/toilStats.py | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L226-L235 | def refineData(root, options):
""" walk down from the root and gather up the important bits.
"""
worker = root.worker
job = root.jobs
jobTypesTree = root.job_types
jobTypes = []
for childName in jobTypesTree:
jobTypes.append(jobTypesTree[childName])
return root, worker, job, jobTypes | [
"def",
"refineData",
"(",
"root",
",",
"options",
")",
":",
"worker",
"=",
"root",
".",
"worker",
"job",
"=",
"root",
".",
"jobs",
"jobTypesTree",
"=",
"root",
".",
"job_types",
"jobTypes",
"=",
"[",
"]",
"for",
"childName",
"in",
"jobTypesTree",
":",
... | walk down from the root and gather up the important bits. | [
"walk",
"down",
"from",
"the",
"root",
"and",
"gather",
"up",
"the",
"important",
"bits",
"."
] | python | train |
DataBiosphere/dsub | dsub/providers/google_v2.py | https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/providers/google_v2.py#L577-L583 | def _build_user_environment(self, envs, inputs, outputs, mounts):
"""Returns a dictionary of for the user container environment."""
envs = {env.name: env.value for env in envs}
envs.update(providers_util.get_file_environment_variables(inputs))
envs.update(providers_util.get_file_environment_variables(outputs))
envs.update(providers_util.get_file_environment_variables(mounts))
return envs | [
"def",
"_build_user_environment",
"(",
"self",
",",
"envs",
",",
"inputs",
",",
"outputs",
",",
"mounts",
")",
":",
"envs",
"=",
"{",
"env",
".",
"name",
":",
"env",
".",
"value",
"for",
"env",
"in",
"envs",
"}",
"envs",
".",
"update",
"(",
"provider... | Returns a dictionary of for the user container environment. | [
"Returns",
"a",
"dictionary",
"of",
"for",
"the",
"user",
"container",
"environment",
"."
] | python | valid |
realestate-com-au/dashmat | dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/binding.py | https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/binding.py#L533-L587 | def get(self, path_segment, owner=None, app=None, sharing=None, **query):
"""Performs a GET operation from the REST path segment with the given
namespace and query.
This method is named to match the HTTP method. ``get`` makes at least
one round trip to the server, one additional round trip for each 303
status returned, and at most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
If *owner*, *app*, and *sharing* are omitted, this method uses the
default :class:`Context` namespace. All other keyword arguments are
included in the URL as query parameters.
:raises AuthenticationError: Raised when the ``Context`` object is not
logged in.
:raises HTTPError: Raised when an error occurred in a GET operation from
*path_segment*.
:param path_segment: A REST path segment.
:type path_segment: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
c = binding.connect(...)
c.get('apps/local') == \\
{'body': ...a response reader object...,
'headers': [('content-length', '26208'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 16:30:35 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'OK',
'status': 200}
c.get('nonexistant/path') # raises HTTPError
c.logout()
c.get('apps/local') # raises AuthenticationError
"""
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
logging.debug("GET request to %s (body: %s)", path, repr(query))
response = self.http.get(path, self._auth_headers, **query)
return response | [
"def",
"get",
"(",
"self",
",",
"path_segment",
",",
"owner",
"=",
"None",
",",
"app",
"=",
"None",
",",
"sharing",
"=",
"None",
",",
"*",
"*",
"query",
")",
":",
"path",
"=",
"self",
".",
"authority",
"+",
"self",
".",
"_abspath",
"(",
"path_segme... | Performs a GET operation from the REST path segment with the given
namespace and query.
This method is named to match the HTTP method. ``get`` makes at least
one round trip to the server, one additional round trip for each 303
status returned, and at most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
If *owner*, *app*, and *sharing* are omitted, this method uses the
default :class:`Context` namespace. All other keyword arguments are
included in the URL as query parameters.
:raises AuthenticationError: Raised when the ``Context`` object is not
logged in.
:raises HTTPError: Raised when an error occurred in a GET operation from
*path_segment*.
:param path_segment: A REST path segment.
:type path_segment: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
c = binding.connect(...)
c.get('apps/local') == \\
{'body': ...a response reader object...,
'headers': [('content-length', '26208'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 16:30:35 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'OK',
'status': 200}
c.get('nonexistant/path') # raises HTTPError
c.logout()
c.get('apps/local') # raises AuthenticationError | [
"Performs",
"a",
"GET",
"operation",
"from",
"the",
"REST",
"path",
"segment",
"with",
"the",
"given",
"namespace",
"and",
"query",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_address_table.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_address_table.py#L56-L76 | def mac_address_table_static_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
mac_address_table = ET.SubElement(config, "mac-address-table", xmlns="urn:brocade.com:mgmt:brocade-mac-address-table")
static = ET.SubElement(mac_address_table, "static")
mac_address_key = ET.SubElement(static, "mac-address")
mac_address_key.text = kwargs.pop('mac_address')
forward_key = ET.SubElement(static, "forward")
forward_key.text = kwargs.pop('forward')
interface_name_key = ET.SubElement(static, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
vlan_key = ET.SubElement(static, "vlan")
vlan_key.text = kwargs.pop('vlan')
vlanid_key = ET.SubElement(static, "vlanid")
vlanid_key.text = kwargs.pop('vlanid')
interface_type = ET.SubElement(static, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"mac_address_table_static_interface_type",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"mac_address_table",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"mac-address-table\"",
",",
... | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
sawcordwell/pymdptoolbox | src/experimental/mdpsql.py | https://github.com/sawcordwell/pymdptoolbox/blob/7c96789cc80e280437005c12065cf70266c11636/src/experimental/mdpsql.py#L50-L88 | def exampleRand(S, A):
"""WARNING: This will delete a database with the same name as 'db'."""
db = "MDP-%sx%s.db" % (S, A)
if os.path.exists(db):
os.remove(db)
conn = sqlite3.connect(db)
with conn:
c = conn.cursor()
cmd = '''
CREATE TABLE info (name TEXT, value INTEGER);
INSERT INTO info VALUES('states', %s);
INSERT INTO info VALUES('actions', %s);''' % (S, A)
c.executescript(cmd)
for a in range(1, A+1):
cmd = '''
CREATE TABLE transition%s (row INTEGER, col INTEGER, prob REAL);
CREATE TABLE reward%s (state INTEGER PRIMARY KEY ASC, val REAL);
''' % (a, a)
c.executescript(cmd)
cmd = "INSERT INTO reward%s(val) VALUES(?)" % a
c.executemany(cmd, zip(random(S).tolist()))
for s in xrange(1, S+1):
# to be usefully represented as a sparse matrix, the number of
# nonzero entries should be less than 1/3 of dimesion of the
# matrix, so S/3
n = randint(1, S//3)
# timeit [90894] * 20330
# ==> 10000 loops, best of 3: 141 us per loop
# timeit (90894*np.ones(20330, dtype=int)).tolist()
# ==> 1000 loops, best of 3: 548 us per loop
col = (permutation(arange(1,S+1))[0:n]).tolist()
val = random(n)
val = (val / val.sum()).tolist()
cmd = "INSERT INTO transition%s VALUES(?, ?, ?)" % a
c.executemany(cmd, zip([s] * n, col, val))
cmd = "CREATE UNIQUE INDEX Pidx%s ON transition%s (row, col);" % (a, a)
c.execute(cmd)
# return the name of teh database
return db | [
"def",
"exampleRand",
"(",
"S",
",",
"A",
")",
":",
"db",
"=",
"\"MDP-%sx%s.db\"",
"%",
"(",
"S",
",",
"A",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"db",
")",
":",
"os",
".",
"remove",
"(",
"db",
")",
"conn",
"=",
"sqlite3",
".",
"c... | WARNING: This will delete a database with the same name as 'db'. | [
"WARNING",
":",
"This",
"will",
"delete",
"a",
"database",
"with",
"the",
"same",
"name",
"as",
"db",
"."
] | python | train |
adamhajari/spyre | spyre/server.py | https://github.com/adamhajari/spyre/blob/5dd9f6de072e99af636ab7e7393d249761c56e69/spyre/server.py#L469-L480 | def launch_in_notebook(self, port=9095, width=900, height=600):
"""launch the app within an iframe in ipython notebook"""
from IPython.lib import backgroundjobs as bg
from IPython.display import HTML
jobs = bg.BackgroundJobManager()
jobs.new(self.launch, kw=dict(port=port))
frame = HTML(
'<iframe src=http://localhost:{} width={} height={}></iframe>'
.format(port, width, height)
)
return frame | [
"def",
"launch_in_notebook",
"(",
"self",
",",
"port",
"=",
"9095",
",",
"width",
"=",
"900",
",",
"height",
"=",
"600",
")",
":",
"from",
"IPython",
".",
"lib",
"import",
"backgroundjobs",
"as",
"bg",
"from",
"IPython",
".",
"display",
"import",
"HTML",... | launch the app within an iframe in ipython notebook | [
"launch",
"the",
"app",
"within",
"an",
"iframe",
"in",
"ipython",
"notebook"
] | python | train |
materialsproject/pymatgen | pymatgen/core/structure.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/structure.py#L1377-L1400 | def get_miller_index_from_site_indexes(self, site_ids, round_dp=4,
verbose=True):
"""
Get the Miller index of a plane from a set of sites indexes.
A minimum of 3 sites are required. If more than 3 sites are given
the best plane that minimises the distance to all points will be
calculated.
Args:
site_ids (list of int): A list of site indexes to consider. A
minimum of three site indexes are required. If more than three
sites are provided, the best plane that minimises the distance
to all sites will be calculated.
round_dp (int, optional): The number of decimal places to round the
miller index to.
verbose (bool, optional): Whether to print warnings.
Returns:
(tuple): The Miller index.
"""
return self.lattice.get_miller_index_from_coords(
self.frac_coords[site_ids], coords_are_cartesian=False,
round_dp=round_dp, verbose=verbose) | [
"def",
"get_miller_index_from_site_indexes",
"(",
"self",
",",
"site_ids",
",",
"round_dp",
"=",
"4",
",",
"verbose",
"=",
"True",
")",
":",
"return",
"self",
".",
"lattice",
".",
"get_miller_index_from_coords",
"(",
"self",
".",
"frac_coords",
"[",
"site_ids",
... | Get the Miller index of a plane from a set of sites indexes.
A minimum of 3 sites are required. If more than 3 sites are given
the best plane that minimises the distance to all points will be
calculated.
Args:
site_ids (list of int): A list of site indexes to consider. A
minimum of three site indexes are required. If more than three
sites are provided, the best plane that minimises the distance
to all sites will be calculated.
round_dp (int, optional): The number of decimal places to round the
miller index to.
verbose (bool, optional): Whether to print warnings.
Returns:
(tuple): The Miller index. | [
"Get",
"the",
"Miller",
"index",
"of",
"a",
"plane",
"from",
"a",
"set",
"of",
"sites",
"indexes",
"."
] | python | train |
DataKitchen/DKCloudCommand | DKCloudCommand/modules/DKCloudAPIMock.py | https://github.com/DataKitchen/DKCloudCommand/blob/1cf9cb08ab02f063eef6b5c4b327af142991daa3/DKCloudCommand/modules/DKCloudAPIMock.py#L37-L48 | def delete_orderrun(self, orderrun_id):
"""
:param self: self
:param orderrun_id: string ; 'good' return a good value ; 'bad' return a bad value
:rtype: DKReturnCode
"""
rc = DKReturnCode()
if orderrun_id == 'good':
rc.set(rc.DK_SUCCESS, None, None)
else:
rc.set(rc.DK_FAIL, 'ServingDeleteV2: unable to delete OrderRun')
return rc | [
"def",
"delete_orderrun",
"(",
"self",
",",
"orderrun_id",
")",
":",
"rc",
"=",
"DKReturnCode",
"(",
")",
"if",
"orderrun_id",
"==",
"'good'",
":",
"rc",
".",
"set",
"(",
"rc",
".",
"DK_SUCCESS",
",",
"None",
",",
"None",
")",
"else",
":",
"rc",
".",... | :param self: self
:param orderrun_id: string ; 'good' return a good value ; 'bad' return a bad value
:rtype: DKReturnCode | [
":",
"param",
"self",
":",
"self",
":",
"param",
"orderrun_id",
":",
"string",
";",
"good",
"return",
"a",
"good",
"value",
";",
"bad",
"return",
"a",
"bad",
"value",
":",
"rtype",
":",
"DKReturnCode"
] | python | train |
adaptive-learning/proso-apps | proso_models/models.py | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_models/models.py#L577-L603 | def get_item_type_id_from_identifier(self, identifier, item_types=None):
"""
Get an ID of item type for the given identifier. Identifier is a string of
the following form:
<model_prefix>/<model_identifier>
where <model_prefix> is any suffix of database table of the given model
which uniquely specifies the table, and <model_identifier> is
identifier of the object.
Args:
identifier (str): item identifier
item_types (dict): ID -> item type JSON
Returns:
int: ID of the corresponding item type
"""
if item_types is None:
item_types = ItemType.objects.get_all_types()
identifier_type, _ = identifier.split('/')
item_types = [it for it in item_types.values() if it['table'].endswith(identifier_type)]
if len(item_types) > 1:
raise Exception('There is more than one item type for name "{}".'.format(identifier_type))
if len(item_types) == 0:
raise Exception('There is no item type for name "{}".'.format(identifier_type))
return item_types[0]['id'] | [
"def",
"get_item_type_id_from_identifier",
"(",
"self",
",",
"identifier",
",",
"item_types",
"=",
"None",
")",
":",
"if",
"item_types",
"is",
"None",
":",
"item_types",
"=",
"ItemType",
".",
"objects",
".",
"get_all_types",
"(",
")",
"identifier_type",
",",
"... | Get an ID of item type for the given identifier. Identifier is a string of
the following form:
<model_prefix>/<model_identifier>
where <model_prefix> is any suffix of database table of the given model
which uniquely specifies the table, and <model_identifier> is
identifier of the object.
Args:
identifier (str): item identifier
item_types (dict): ID -> item type JSON
Returns:
int: ID of the corresponding item type | [
"Get",
"an",
"ID",
"of",
"item",
"type",
"for",
"the",
"given",
"identifier",
".",
"Identifier",
"is",
"a",
"string",
"of",
"the",
"following",
"form",
":"
] | python | train |
dade-ai/snipy | snipy/io/fileutil.py | https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/io/fileutil.py#L92-L103 | def loadfile(filepath, mmap_mode=None):
"""
:param filepath:
:param mmap_mode: {None, ‘r+’, ‘r’, ‘w+’, ‘c’} see. joblib.load
:return:
"""
import joblib
try:
return joblib.load(filepath, mmap_mode=mmap_mode)
except IOError:
return None | [
"def",
"loadfile",
"(",
"filepath",
",",
"mmap_mode",
"=",
"None",
")",
":",
"import",
"joblib",
"try",
":",
"return",
"joblib",
".",
"load",
"(",
"filepath",
",",
"mmap_mode",
"=",
"mmap_mode",
")",
"except",
"IOError",
":",
"return",
"None"
] | :param filepath:
:param mmap_mode: {None, ‘r+’, ‘r’, ‘w+’, ‘c’} see. joblib.load
:return: | [
":",
"param",
"filepath",
":",
":",
"param",
"mmap_mode",
":",
"{",
"None",
"‘r",
"+",
"’",
"‘r’",
"‘w",
"+",
"’",
"‘c’",
"}",
"see",
".",
"joblib",
".",
"load",
":",
"return",
":"
] | python | valid |
celiao/rtsimple | rtsimple/lists.py | https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/lists.py#L58-L73 | def movies_box_office(self, **kwargs):
"""Gets the top box office earning movies from the API.
Sorted by most recent weekend gross ticket sales.
Args:
limit (optional): limits the number of movies returned, default=10
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('movies_box_office')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | [
"def",
"movies_box_office",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"self",
".",
"_get_path",
"(",
"'movies_box_office'",
")",
"response",
"=",
"self",
".",
"_GET",
"(",
"path",
",",
"kwargs",
")",
"self",
".",
"_set_attrs_to_values",
... | Gets the top box office earning movies from the API.
Sorted by most recent weekend gross ticket sales.
Args:
limit (optional): limits the number of movies returned, default=10
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API. | [
"Gets",
"the",
"top",
"box",
"office",
"earning",
"movies",
"from",
"the",
"API",
".",
"Sorted",
"by",
"most",
"recent",
"weekend",
"gross",
"ticket",
"sales",
"."
] | python | train |
estnltk/estnltk | estnltk/syntax/syntax_preprocessing.py | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L95-L153 | def convert_vm_json_to_mrf( vabamorf_json ):
''' Converts from vabamorf's JSON output, given as dict, into pre-syntactic mrf
format, given as a list of lines, as in the output of etmrf.
The aimed format looks something like this:
<s>
Kolmandaks
kolmandaks+0 //_D_ //
kolmas+ks //_O_ sg tr //
kihutas
kihuta+s //_V_ s //
end
end+0 //_Y_ ? //
ise+0 //_P_ sg p //
soomlane
soomlane+0 //_S_ sg n //
</s>
'''
if not isinstance( vabamorf_json, dict ):
raise Exception(' Expected dict as an input argument! ')
json_sentences = []
# 1) flatten paragraphs
if 'paragraphs' in vabamorf_json:
for pr in vabamorf_json['paragraphs']:
if 'sentences' in pr:
for sent in pr['sentences']:
json_sentences.append( sent )
# 2) flatten sentences
elif 'sentences' in vabamorf_json:
for sent in vabamorf_json['sentences']:
json_sentences.append( sent )
# 3) Iterate over sentences and perform conversion
results = []
for sentJson in json_sentences:
results.append('<s>')
for wordJson in sentJson['words']:
if wordJson['text'] == '<s>' or wordJson['text'] == '</s>':
continue
wordStr = wordJson['text']
# Escape double quotation marks
wordStr = _esc_double_quotes( wordStr )
results.append( wordStr )
for analysisJson in wordJson['analysis']:
root = analysisJson['root']
root = _esc_double_quotes( root )
# NB! ending="0" erineb ending=""-st:
# 1) eestlane (ending="0");
# 2) Rio (ending="") de (ending="") Jaineros;
ending = analysisJson[ENDING]
pos = analysisJson['partofspeech']
clitic = analysisJson['clitic']
form = analysisJson['form']
if pos == 'Z':
results.append( ''.join([' ',root,' //_Z_ //']) )
else:
results.append( ''.join([' ',root,'+',ending,clitic,' //', '_',pos,'_ ',form,' //']) )
if 'analysis' not in wordJson:
results.append( ' '+'####' )
results.append('</s>')
return results | [
"def",
"convert_vm_json_to_mrf",
"(",
"vabamorf_json",
")",
":",
"if",
"not",
"isinstance",
"(",
"vabamorf_json",
",",
"dict",
")",
":",
"raise",
"Exception",
"(",
"' Expected dict as an input argument! '",
")",
"json_sentences",
"=",
"[",
"]",
"# 1) flatten paragraph... | Converts from vabamorf's JSON output, given as dict, into pre-syntactic mrf
format, given as a list of lines, as in the output of etmrf.
The aimed format looks something like this:
<s>
Kolmandaks
kolmandaks+0 //_D_ //
kolmas+ks //_O_ sg tr //
kihutas
kihuta+s //_V_ s //
end
end+0 //_Y_ ? //
ise+0 //_P_ sg p //
soomlane
soomlane+0 //_S_ sg n //
</s> | [
"Converts",
"from",
"vabamorf",
"s",
"JSON",
"output",
"given",
"as",
"dict",
"into",
"pre",
"-",
"syntactic",
"mrf",
"format",
"given",
"as",
"a",
"list",
"of",
"lines",
"as",
"in",
"the",
"output",
"of",
"etmrf",
".",
"The",
"aimed",
"format",
"looks",... | python | train |
johnnoone/aioconsul | aioconsul/client/kv_endpoint.py | https://github.com/johnnoone/aioconsul/blob/02f7a529d7dc2e49bed942111067aa5faf320e90/aioconsul/client/kv_endpoint.py#L70-L120 | async def get(self, key, *, dc=None, watch=None, consistency=None):
"""Returns the specified key
Parameters:
key (str): Key to fetch
watch (Blocking): Do a blocking query
consistency (Consistency): Force consistency
Returns:
ObjectMeta: where value is the queried kv value
Object will look like::
{
"CreateIndex": 100,
"ModifyIndex": 200,
"LockIndex": 200,
"Key": "zip",
"Flags": 0,
"Value": b"my data",
"Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e"
}
**CreateIndex** is the internal index value that represents when
the entry was created.
**ModifyIndex** is the last index that modified this key.
This index corresponds to the X-Consul-Index header value that is
returned in responses, and it can be used to establish blocking
queries. You can even perform blocking queries against entire
subtrees of the KV store.
**LockIndex** is the number of times this key has successfully been
acquired in a lock. If the lock is held, the Session key provides
the session that owns the lock.
**Key** is simply the full path of the entry.
**Flags** is an opaque unsigned integer that can be attached to each
entry. Clients can choose to use this however makes sense for their
application.
**Value** is a :class:`~aioconsul.typing.Payload` object,
it depends on **Flags**.
"""
response = await self._read(key,
dc=dc,
watch=watch,
consistency=consistency)
result = response.body[0]
result["Value"] = decode_value(result["Value"], result["Flags"])
return consul(result, meta=extract_meta(response.headers)) | [
"async",
"def",
"get",
"(",
"self",
",",
"key",
",",
"*",
",",
"dc",
"=",
"None",
",",
"watch",
"=",
"None",
",",
"consistency",
"=",
"None",
")",
":",
"response",
"=",
"await",
"self",
".",
"_read",
"(",
"key",
",",
"dc",
"=",
"dc",
",",
"watc... | Returns the specified key
Parameters:
key (str): Key to fetch
watch (Blocking): Do a blocking query
consistency (Consistency): Force consistency
Returns:
ObjectMeta: where value is the queried kv value
Object will look like::
{
"CreateIndex": 100,
"ModifyIndex": 200,
"LockIndex": 200,
"Key": "zip",
"Flags": 0,
"Value": b"my data",
"Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e"
}
**CreateIndex** is the internal index value that represents when
the entry was created.
**ModifyIndex** is the last index that modified this key.
This index corresponds to the X-Consul-Index header value that is
returned in responses, and it can be used to establish blocking
queries. You can even perform blocking queries against entire
subtrees of the KV store.
**LockIndex** is the number of times this key has successfully been
acquired in a lock. If the lock is held, the Session key provides
the session that owns the lock.
**Key** is simply the full path of the entry.
**Flags** is an opaque unsigned integer that can be attached to each
entry. Clients can choose to use this however makes sense for their
application.
**Value** is a :class:`~aioconsul.typing.Payload` object,
it depends on **Flags**. | [
"Returns",
"the",
"specified",
"key"
] | python | train |
pypa/pipenv | pipenv/environment.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L51-L71 | def safe_import(self, name):
"""Helper utility for reimporting previously imported modules while inside the env"""
module = None
if name not in self._modules:
self._modules[name] = importlib.import_module(name)
module = self._modules[name]
if not module:
dist = next(iter(
dist for dist in self.base_working_set if dist.project_name == name
), None)
if dist:
dist.activate()
module = importlib.import_module(name)
if name in sys.modules:
try:
six.moves.reload_module(module)
six.moves.reload_module(sys.modules[name])
except TypeError:
del sys.modules[name]
sys.modules[name] = self._modules[name]
return module | [
"def",
"safe_import",
"(",
"self",
",",
"name",
")",
":",
"module",
"=",
"None",
"if",
"name",
"not",
"in",
"self",
".",
"_modules",
":",
"self",
".",
"_modules",
"[",
"name",
"]",
"=",
"importlib",
".",
"import_module",
"(",
"name",
")",
"module",
"... | Helper utility for reimporting previously imported modules while inside the env | [
"Helper",
"utility",
"for",
"reimporting",
"previously",
"imported",
"modules",
"while",
"inside",
"the",
"env"
] | python | train |
RudolfCardinal/pythonlib | cardinal_pythonlib/json/serialize.py | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L626-L633 | def json_encode(obj: Instance, **kwargs) -> str:
"""
Encodes an object to JSON using our custom encoder.
The ``**kwargs`` can be used to pass things like ``'indent'``, for
formatting.
"""
return json.dumps(obj, cls=JsonClassEncoder, **kwargs) | [
"def",
"json_encode",
"(",
"obj",
":",
"Instance",
",",
"*",
"*",
"kwargs",
")",
"->",
"str",
":",
"return",
"json",
".",
"dumps",
"(",
"obj",
",",
"cls",
"=",
"JsonClassEncoder",
",",
"*",
"*",
"kwargs",
")"
] | Encodes an object to JSON using our custom encoder.
The ``**kwargs`` can be used to pass things like ``'indent'``, for
formatting. | [
"Encodes",
"an",
"object",
"to",
"JSON",
"using",
"our",
"custom",
"encoder",
"."
] | python | train |
dpkp/kafka-python | kafka/admin/client.py | https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/admin/client.py#L208-L217 | def close(self):
"""Close the KafkaAdminClient connection to the Kafka broker."""
if not hasattr(self, '_closed') or self._closed:
log.info("KafkaAdminClient already closed.")
return
self._metrics.close()
self._client.close()
self._closed = True
log.debug("KafkaAdminClient is now closed.") | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_closed'",
")",
"or",
"self",
".",
"_closed",
":",
"log",
".",
"info",
"(",
"\"KafkaAdminClient already closed.\"",
")",
"return",
"self",
".",
"_metrics",
".",
"close",
"... | Close the KafkaAdminClient connection to the Kafka broker. | [
"Close",
"the",
"KafkaAdminClient",
"connection",
"to",
"the",
"Kafka",
"broker",
"."
] | python | train |
tjcsl/ion | intranet/apps/polls/models.py | https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/polls/models.py#L17-L20 | def this_year(self):
""" Get AnnouncementRequests from this school year only. """
start_date, end_date = get_date_range_this_year()
return self.filter(start_time__gte=start_date, start_time__lte=end_date) | [
"def",
"this_year",
"(",
"self",
")",
":",
"start_date",
",",
"end_date",
"=",
"get_date_range_this_year",
"(",
")",
"return",
"self",
".",
"filter",
"(",
"start_time__gte",
"=",
"start_date",
",",
"start_time__lte",
"=",
"end_date",
")"
] | Get AnnouncementRequests from this school year only. | [
"Get",
"AnnouncementRequests",
"from",
"this",
"school",
"year",
"only",
"."
] | python | train |
BlueBrain/hpcbench | hpcbench/benchmark/standard.py | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/benchmark/standard.py#L296-L303 | def froms(self):
"""Group metrics according to the `from` property.
"""
eax = {}
for name, config in six.iteritems(self._metrics):
from_ = self._get_property(config, 'from', default=self.stdout)
eax.setdefault(from_, {})[name] = config
return eax | [
"def",
"froms",
"(",
"self",
")",
":",
"eax",
"=",
"{",
"}",
"for",
"name",
",",
"config",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_metrics",
")",
":",
"from_",
"=",
"self",
".",
"_get_property",
"(",
"config",
",",
"'from'",
",",
"defaul... | Group metrics according to the `from` property. | [
"Group",
"metrics",
"according",
"to",
"the",
"from",
"property",
"."
] | python | train |
annoviko/pyclustering | pyclustering/core/som_wrapper.py | https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/core/som_wrapper.py#L148-L158 | def som_get_winner_number(som_pointer):
"""!
@brief Returns of number of winner at the last step of learning process.
@param[in] som_pointer (c_pointer): pointer to object of self-organized map.
"""
ccore = ccore_library.get()
ccore.som_get_winner_number.restype = c_size_t
return ccore.som_get_winner_number(som_pointer) | [
"def",
"som_get_winner_number",
"(",
"som_pointer",
")",
":",
"ccore",
"=",
"ccore_library",
".",
"get",
"(",
")",
"ccore",
".",
"som_get_winner_number",
".",
"restype",
"=",
"c_size_t",
"return",
"ccore",
".",
"som_get_winner_number",
"(",
"som_pointer",
")"
] | !
@brief Returns of number of winner at the last step of learning process.
@param[in] som_pointer (c_pointer): pointer to object of self-organized map. | [
"!"
] | python | valid |
gem/oq-engine | openquake/hazardlib/geo/point.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/point.py#L122-L137 | def azimuth(self, point):
"""
Compute the azimuth (in decimal degrees) between this point
and the given point.
:param point:
Destination point.
:type point:
Instance of :class:`Point`
:returns:
The azimuth, value in a range ``[0, 360)``.
:rtype:
float
"""
return geodetic.azimuth(self.longitude, self.latitude,
point.longitude, point.latitude) | [
"def",
"azimuth",
"(",
"self",
",",
"point",
")",
":",
"return",
"geodetic",
".",
"azimuth",
"(",
"self",
".",
"longitude",
",",
"self",
".",
"latitude",
",",
"point",
".",
"longitude",
",",
"point",
".",
"latitude",
")"
] | Compute the azimuth (in decimal degrees) between this point
and the given point.
:param point:
Destination point.
:type point:
Instance of :class:`Point`
:returns:
The azimuth, value in a range ``[0, 360)``.
:rtype:
float | [
"Compute",
"the",
"azimuth",
"(",
"in",
"decimal",
"degrees",
")",
"between",
"this",
"point",
"and",
"the",
"given",
"point",
"."
] | python | train |
radjkarl/fancyWidgets | fancywidgets/pyQtBased/FwTabWidget.py | https://github.com/radjkarl/fancyWidgets/blob/ffe0d5747c5296c78575f0e0909af915a4a5698f/fancywidgets/pyQtBased/FwTabWidget.py#L70-L81 | def _mkAddBtnVisible(self):
"""
Ensure that the Add button is visible also when there are no tabs
"""
if not self._btn_add_height:
# self._btn_add_height = self.cornerWidget().height()
self._btn_add_height = self._cwBtn.height()
if self.count() == 0:
# self.cornerWidget().setMinimumHeight(self._btn_add_height - 8)
self._cwBtn.setMinimumHeight(self._btn_add_height - 8)
self.setMinimumHeight(self._btn_add_height) | [
"def",
"_mkAddBtnVisible",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_btn_add_height",
":",
"# self._btn_add_height = self.cornerWidget().height()",
"self",
".",
"_btn_add_height",
"=",
"self",
".",
"_cwBtn",
".",
"height",
"(",
")",
"if",
"self"... | Ensure that the Add button is visible also when there are no tabs | [
"Ensure",
"that",
"the",
"Add",
"button",
"is",
"visible",
"also",
"when",
"there",
"are",
"no",
"tabs"
] | python | train |
waqasbhatti/astrobase | astrobase/lcproc/lcsfeatures.py | https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcproc/lcsfeatures.py#L332-L483 | def serial_starfeatures(lclist,
outdir,
lc_catalog_pickle,
neighbor_radius_arcsec,
maxobjects=None,
deredden=True,
custom_bandpasses=None,
lcformat='hat-sql',
lcformatdir=None):
'''This drives the `get_starfeatures` function for a collection of LCs.
Parameters
----------
lclist : list of str
The list of light curve file names to process.
outdir : str
The output directory where the results will be placed.
lc_catalog_pickle : str
The path to a catalog containing at a dict with least:
- an object ID array accessible with `dict['objects']['objectid']`
- an LC filename array accessible with `dict['objects']['lcfname']`
- a `scipy.spatial.KDTree` or `cKDTree` object to use for finding
neighbors for each object accessible with `dict['kdtree']`
A catalog pickle of the form needed can be produced using
:py:func:`astrobase.lcproc.catalogs.make_lclist` or
:py:func:`astrobase.lcproc.catalogs.filter_lclist`.
neighbor_radius_arcsec : float
This indicates the radius in arcsec to search for neighbors for this
object using the light curve catalog's `kdtree`, `objlist`, `lcflist`,
and in GAIA.
maxobjects : int
The number of objects to process from `lclist`.
deredden : bool
This controls if the colors and any color classifications will be
dereddened using 2MASS DUST.
custom_bandpasses : dict or None
This is a dict used to define any custom bandpasses in the
`in_objectinfo` dict you want to make this function aware of and
generate colors for. Use the format below for this dict::
{
'<bandpass_key_1>':{'dustkey':'<twomass_dust_key_1>',
'label':'<band_label_1>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
.
...
.
'<bandpass_key_N>':{'dustkey':'<twomass_dust_key_N>',
'label':'<band_label_N>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
}
Where:
`bandpass_key` is a key to use to refer to this bandpass in the
`objectinfo` dict, e.g. 'sdssg' for SDSS g band
`twomass_dust_key` is the key to use in the 2MASS DUST result table for
reddening per band-pass. For example, given the following DUST result
table (using http://irsa.ipac.caltech.edu/applications/DUST/)::
|Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|
|char |float |float |float |float |float|
| |microns| |mags | |mags |
CTIO U 0.3734 4.107 0.209 4.968 0.253
CTIO B 0.4309 3.641 0.186 4.325 0.221
CTIO V 0.5517 2.682 0.137 3.240 0.165
.
.
...
The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to
skip DUST lookup and want to pass in a specific reddening magnitude
for your bandpass, use a float for the value of
`twomass_dust_key`. If you want to skip DUST lookup entirely for
this bandpass, use None for the value of `twomass_dust_key`.
`band_label` is the label to use for this bandpass, e.g. 'W1' for
WISE-1 band, 'u' for SDSS u, etc.
The 'colors' list contains color definitions for all colors you want
to generate using this bandpass. this list contains elements of the
form::
['<bandkey1>-<bandkey2>','<BAND1> - <BAND2>']
where the the first item is the bandpass keys making up this color,
and the second item is the label for this color to be used by the
frontends. An example::
['sdssu-sdssg','u - g']
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
Returns
-------
list of str
A list of all star features pickles produced.
'''
# make sure to make the output directory if it doesn't exist
if not os.path.exists(outdir):
os.makedirs(outdir)
if maxobjects:
lclist = lclist[:maxobjects]
# read in the kdtree pickle
with open(lc_catalog_pickle, 'rb') as infd:
kdt_dict = pickle.load(infd)
kdt = kdt_dict['kdtree']
objlist = kdt_dict['objects']['objectid']
objlcfl = kdt_dict['objects']['lcfname']
tasks = [(x, outdir, kdt, objlist, objlcfl,
neighbor_radius_arcsec,
deredden, custom_bandpasses,
lcformat, lcformatdir) for x in lclist]
for task in tqdm(tasks):
result = _starfeatures_worker(task)
return result | [
"def",
"serial_starfeatures",
"(",
"lclist",
",",
"outdir",
",",
"lc_catalog_pickle",
",",
"neighbor_radius_arcsec",
",",
"maxobjects",
"=",
"None",
",",
"deredden",
"=",
"True",
",",
"custom_bandpasses",
"=",
"None",
",",
"lcformat",
"=",
"'hat-sql'",
",",
"lcf... | This drives the `get_starfeatures` function for a collection of LCs.
Parameters
----------
lclist : list of str
The list of light curve file names to process.
outdir : str
The output directory where the results will be placed.
lc_catalog_pickle : str
The path to a catalog containing at a dict with least:
- an object ID array accessible with `dict['objects']['objectid']`
- an LC filename array accessible with `dict['objects']['lcfname']`
- a `scipy.spatial.KDTree` or `cKDTree` object to use for finding
neighbors for each object accessible with `dict['kdtree']`
A catalog pickle of the form needed can be produced using
:py:func:`astrobase.lcproc.catalogs.make_lclist` or
:py:func:`astrobase.lcproc.catalogs.filter_lclist`.
neighbor_radius_arcsec : float
This indicates the radius in arcsec to search for neighbors for this
object using the light curve catalog's `kdtree`, `objlist`, `lcflist`,
and in GAIA.
maxobjects : int
The number of objects to process from `lclist`.
deredden : bool
This controls if the colors and any color classifications will be
dereddened using 2MASS DUST.
custom_bandpasses : dict or None
This is a dict used to define any custom bandpasses in the
`in_objectinfo` dict you want to make this function aware of and
generate colors for. Use the format below for this dict::
{
'<bandpass_key_1>':{'dustkey':'<twomass_dust_key_1>',
'label':'<band_label_1>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
.
...
.
'<bandpass_key_N>':{'dustkey':'<twomass_dust_key_N>',
'label':'<band_label_N>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
}
Where:
`bandpass_key` is a key to use to refer to this bandpass in the
`objectinfo` dict, e.g. 'sdssg' for SDSS g band
`twomass_dust_key` is the key to use in the 2MASS DUST result table for
reddening per band-pass. For example, given the following DUST result
table (using http://irsa.ipac.caltech.edu/applications/DUST/)::
|Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|
|char |float |float |float |float |float|
| |microns| |mags | |mags |
CTIO U 0.3734 4.107 0.209 4.968 0.253
CTIO B 0.4309 3.641 0.186 4.325 0.221
CTIO V 0.5517 2.682 0.137 3.240 0.165
.
.
...
The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to
skip DUST lookup and want to pass in a specific reddening magnitude
for your bandpass, use a float for the value of
`twomass_dust_key`. If you want to skip DUST lookup entirely for
this bandpass, use None for the value of `twomass_dust_key`.
`band_label` is the label to use for this bandpass, e.g. 'W1' for
WISE-1 band, 'u' for SDSS u, etc.
The 'colors' list contains color definitions for all colors you want
to generate using this bandpass. this list contains elements of the
form::
['<bandkey1>-<bandkey2>','<BAND1> - <BAND2>']
where the the first item is the bandpass keys making up this color,
and the second item is the label for this color to be used by the
frontends. An example::
['sdssu-sdssg','u - g']
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
Returns
-------
list of str
A list of all star features pickles produced. | [
"This",
"drives",
"the",
"get_starfeatures",
"function",
"for",
"a",
"collection",
"of",
"LCs",
"."
] | python | valid |
NYUCCL/psiTurk | psiturk/amt_services.py | https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/amt_services.py#L436-L453 | def setup_mturk_connection(self):
''' Connect to turk '''
if ((self.aws_access_key_id == 'YourAccessKeyId') or
(self.aws_secret_access_key == 'YourSecretAccessKey')):
print "AWS access key not set in ~/.psiturkconfig; please enter a valid access key."
assert False
if self.is_sandbox:
endpoint_url = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
else:
endpoint_url = 'https://mturk-requester.us-east-1.amazonaws.com'
self.mtc = boto3.client('mturk',
region_name='us-east-1',
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
endpoint_url=endpoint_url)
return True | [
"def",
"setup_mturk_connection",
"(",
"self",
")",
":",
"if",
"(",
"(",
"self",
".",
"aws_access_key_id",
"==",
"'YourAccessKeyId'",
")",
"or",
"(",
"self",
".",
"aws_secret_access_key",
"==",
"'YourSecretAccessKey'",
")",
")",
":",
"print",
"\"AWS access key not ... | Connect to turk | [
"Connect",
"to",
"turk"
] | python | train |
gusutabopb/aioinflux | aioinflux/iterutils.py | https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/iterutils.py#L6-L48 | def iterpoints(resp: dict, parser: Optional[Callable] = None) -> Iterator[Any]:
"""Iterates a response JSON yielding data point by point.
Can be used with both regular and chunked responses.
By default, returns just a plain list of values representing each point,
without column names, or other metadata.
In case a specific format is needed, an optional ``parser`` argument can be passed.
``parser`` is a function/callable that takes data point values
and, optionally, a ``meta`` parameter containing which takes a
dictionary containing all or a subset of the following:
``{'columns', 'name', 'tags', 'statement_id'}``.
Sample parser functions:
.. code:: python
# Function optional meta argument
def parser(*x, meta):
return dict(zip(meta['columns'], x))
# Namedtuple (callable)
from collections import namedtuple
parser = namedtuple('MyPoint', ['col1', 'col2', 'col3'])
:param resp: Dictionary containing parsed JSON (output from InfluxDBClient.query)
:param parser: Optional parser function/callable
:return: Generator object
"""
for statement in resp['results']:
if 'series' not in statement:
continue
for series in statement['series']:
if parser is None:
return (x for x in series['values'])
elif 'meta' in inspect.signature(parser).parameters:
meta = {k: series[k] for k in series if k != 'values'}
meta['statement_id'] = statement['statement_id']
return (parser(*x, meta=meta) for x in series['values'])
else:
return (parser(*x) for x in series['values'])
return iter([]) | [
"def",
"iterpoints",
"(",
"resp",
":",
"dict",
",",
"parser",
":",
"Optional",
"[",
"Callable",
"]",
"=",
"None",
")",
"->",
"Iterator",
"[",
"Any",
"]",
":",
"for",
"statement",
"in",
"resp",
"[",
"'results'",
"]",
":",
"if",
"'series'",
"not",
"in"... | Iterates a response JSON yielding data point by point.
Can be used with both regular and chunked responses.
By default, returns just a plain list of values representing each point,
without column names, or other metadata.
In case a specific format is needed, an optional ``parser`` argument can be passed.
``parser`` is a function/callable that takes data point values
and, optionally, a ``meta`` parameter containing which takes a
dictionary containing all or a subset of the following:
``{'columns', 'name', 'tags', 'statement_id'}``.
Sample parser functions:
.. code:: python
# Function optional meta argument
def parser(*x, meta):
return dict(zip(meta['columns'], x))
# Namedtuple (callable)
from collections import namedtuple
parser = namedtuple('MyPoint', ['col1', 'col2', 'col3'])
:param resp: Dictionary containing parsed JSON (output from InfluxDBClient.query)
:param parser: Optional parser function/callable
:return: Generator object | [
"Iterates",
"a",
"response",
"JSON",
"yielding",
"data",
"point",
"by",
"point",
"."
] | python | train |
Stufinite/djangoApiDec | djangoApiDec/djangoApiDec.py | https://github.com/Stufinite/djangoApiDec/blob/8b2d5776b3413b1b850df12a92f30526c05c0a46/djangoApiDec/djangoApiDec.py#L101-L112 | def getJsonFromApi(view, request):
"""Return json from querying Web Api
Args:
view: django view function.
request: http request object got from django.
Returns: json format dictionary
"""
jsonText = view(request)
jsonText = json.loads(jsonText.content.decode('utf-8'))
return jsonText | [
"def",
"getJsonFromApi",
"(",
"view",
",",
"request",
")",
":",
"jsonText",
"=",
"view",
"(",
"request",
")",
"jsonText",
"=",
"json",
".",
"loads",
"(",
"jsonText",
".",
"content",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"return",
"jsonText"
] | Return json from querying Web Api
Args:
view: django view function.
request: http request object got from django.
Returns: json format dictionary | [
"Return",
"json",
"from",
"querying",
"Web",
"Api"
] | python | valid |
rosenbrockc/acorn | acorn/logging/database.py | https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/database.py#L306-L321 | def record(ekey, entry, diff=False):
"""Records the specified entry to the key-value store under the specified
entity key.
Args:
ekey (str): fqdn/uuid of the method/object to store the entry for.
entry (dict): attributes and values gleaned from the execution.
diff (bool): when True, the "c" element of `entry` will be diffed
against previous entries under the same `ekey` if their method
(attribute "m") matches.
"""
taskdb = active_db()
taskdb.record(ekey, entry, diff)
# The task database save method makes sure that we only save as often as
# specified in the configuration file.
taskdb.save() | [
"def",
"record",
"(",
"ekey",
",",
"entry",
",",
"diff",
"=",
"False",
")",
":",
"taskdb",
"=",
"active_db",
"(",
")",
"taskdb",
".",
"record",
"(",
"ekey",
",",
"entry",
",",
"diff",
")",
"# The task database save method makes sure that we only save as often as... | Records the specified entry to the key-value store under the specified
entity key.
Args:
ekey (str): fqdn/uuid of the method/object to store the entry for.
entry (dict): attributes and values gleaned from the execution.
diff (bool): when True, the "c" element of `entry` will be diffed
against previous entries under the same `ekey` if their method
(attribute "m") matches. | [
"Records",
"the",
"specified",
"entry",
"to",
"the",
"key",
"-",
"value",
"store",
"under",
"the",
"specified",
"entity",
"key",
"."
] | python | train |
GNS3/gns3-server | gns3server/ubridge/hypervisor.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/ubridge/hypervisor.py#L155-L183 | def start(self):
"""
Starts the uBridge hypervisor process.
"""
env = os.environ.copy()
if sys.platform.startswith("win"):
# add the Npcap directory to $PATH to force uBridge to use npcap DLL instead of Winpcap (if installed)
system_root = os.path.join(os.path.expandvars("%SystemRoot%"), "System32", "Npcap")
if os.path.isdir(system_root):
env["PATH"] = system_root + ';' + env["PATH"]
yield from self._check_ubridge_version(env)
try:
command = self._build_command()
log.info("starting ubridge: {}".format(command))
self._stdout_file = os.path.join(self._working_dir, "ubridge.log")
log.info("logging to {}".format(self._stdout_file))
with open(self._stdout_file, "w", encoding="utf-8") as fd:
self._process = yield from asyncio.create_subprocess_exec(*command,
stdout=fd,
stderr=subprocess.STDOUT,
cwd=self._working_dir,
env=env)
log.info("ubridge started PID={}".format(self._process.pid))
except (OSError, PermissionError, subprocess.SubprocessError) as e:
ubridge_stdout = self.read_stdout()
log.error("Could not start ubridge: {}\n{}".format(e, ubridge_stdout))
raise UbridgeError("Could not start ubridge: {}\n{}".format(e, ubridge_stdout)) | [
"def",
"start",
"(",
"self",
")",
":",
"env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"\"win\"",
")",
":",
"# add the Npcap directory to $PATH to force uBridge to use npcap DLL instead of Winpcap (if in... | Starts the uBridge hypervisor process. | [
"Starts",
"the",
"uBridge",
"hypervisor",
"process",
"."
] | python | train |
wummel/linkchecker | linkcheck/bookmarks/safari.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/bookmarks/safari.py#L34-L49 | def find_bookmark_file ():
"""Return the bookmark file of the Default profile.
Returns absolute filename if found, or empty string if no bookmark file
could be found.
"""
if sys.platform != 'darwin':
return u""
try:
dirname = get_profile_dir()
if os.path.isdir(dirname):
fname = os.path.join(dirname, u"Bookmarks.plist")
if os.path.isfile(fname):
return fname
except Exception:
pass
return u"" | [
"def",
"find_bookmark_file",
"(",
")",
":",
"if",
"sys",
".",
"platform",
"!=",
"'darwin'",
":",
"return",
"u\"\"",
"try",
":",
"dirname",
"=",
"get_profile_dir",
"(",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"dirname",
")",
":",
"fname",
"=",
... | Return the bookmark file of the Default profile.
Returns absolute filename if found, or empty string if no bookmark file
could be found. | [
"Return",
"the",
"bookmark",
"file",
"of",
"the",
"Default",
"profile",
".",
"Returns",
"absolute",
"filename",
"if",
"found",
"or",
"empty",
"string",
"if",
"no",
"bookmark",
"file",
"could",
"be",
"found",
"."
] | python | train |
agile-geoscience/striplog | striplog/striplog.py | https://github.com/agile-geoscience/striplog/blob/8033b673a151f96c29802b43763e863519a3124c/striplog/striplog.py#L1869-L1900 | def merge_overlaps(self):
"""
Merges overlaps by merging overlapping Intervals.
The function takes no arguments and returns ``None``. It operates on
the striplog 'in place'
TODO: This function will not work if any interval overlaps more than
one other intervals at either its base or top.
"""
overlaps = np.array(self.find_overlaps(index=True))
if not overlaps.any():
return
for overlap in overlaps:
before = self[overlap].copy()
after = self[overlap + 1].copy()
# Get rid of the before and after pieces.
del self[overlap]
del self[overlap]
# Make the new piece.
new_segment = before.merge(after)
# Insert it.
self.__insert(overlap, new_segment)
overlaps += 1
return | [
"def",
"merge_overlaps",
"(",
"self",
")",
":",
"overlaps",
"=",
"np",
".",
"array",
"(",
"self",
".",
"find_overlaps",
"(",
"index",
"=",
"True",
")",
")",
"if",
"not",
"overlaps",
".",
"any",
"(",
")",
":",
"return",
"for",
"overlap",
"in",
"overla... | Merges overlaps by merging overlapping Intervals.
The function takes no arguments and returns ``None``. It operates on
the striplog 'in place'
TODO: This function will not work if any interval overlaps more than
one other intervals at either its base or top. | [
"Merges",
"overlaps",
"by",
"merging",
"overlapping",
"Intervals",
"."
] | python | test |
genialis/resolwe | resolwe/elastic/pagination.py | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/pagination.py#L47-L54 | def get_offset(self, request):
"""Return offset parameter."""
try:
return _positive_int(
get_query_param(request, self.offset_query_param),
)
except (KeyError, ValueError):
return 0 | [
"def",
"get_offset",
"(",
"self",
",",
"request",
")",
":",
"try",
":",
"return",
"_positive_int",
"(",
"get_query_param",
"(",
"request",
",",
"self",
".",
"offset_query_param",
")",
",",
")",
"except",
"(",
"KeyError",
",",
"ValueError",
")",
":",
"retur... | Return offset parameter. | [
"Return",
"offset",
"parameter",
"."
] | python | train |
nickmilon/Hellas | Hellas/Sparta.py | https://github.com/nickmilon/Hellas/blob/542e4778692fbec90753942946f20100412ec9ee/Hellas/Sparta.py#L308-L317 | def chunks_str(str, n, separator="\n", fill_blanks_last=True):
"""returns lines with max n characters
:Example:
>>> print (chunks_str('123456X', 3))
123
456
X
"""
return separator.join(chunks(str, n)) | [
"def",
"chunks_str",
"(",
"str",
",",
"n",
",",
"separator",
"=",
"\"\\n\"",
",",
"fill_blanks_last",
"=",
"True",
")",
":",
"return",
"separator",
".",
"join",
"(",
"chunks",
"(",
"str",
",",
"n",
")",
")"
] | returns lines with max n characters
:Example:
>>> print (chunks_str('123456X', 3))
123
456
X | [
"returns",
"lines",
"with",
"max",
"n",
"characters"
] | python | train |
wonambi-python/wonambi | wonambi/widgets/traces.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/traces.py#L400-L414 | def create_chan_labels(self):
"""Create the channel labels, but don't plot them yet.
Notes
-----
It's necessary to have the width of the labels, so that we can adjust
the main scene.
"""
self.idx_label = []
for one_grp in self.parent.channels.groups:
for one_label in one_grp['chan_to_plot']:
item = QGraphicsSimpleTextItem(one_label)
item.setBrush(QBrush(QColor(one_grp['color'])))
item.setFlag(QGraphicsItem.ItemIgnoresTransformations)
self.idx_label.append(item) | [
"def",
"create_chan_labels",
"(",
"self",
")",
":",
"self",
".",
"idx_label",
"=",
"[",
"]",
"for",
"one_grp",
"in",
"self",
".",
"parent",
".",
"channels",
".",
"groups",
":",
"for",
"one_label",
"in",
"one_grp",
"[",
"'chan_to_plot'",
"]",
":",
"item",... | Create the channel labels, but don't plot them yet.
Notes
-----
It's necessary to have the width of the labels, so that we can adjust
the main scene. | [
"Create",
"the",
"channel",
"labels",
"but",
"don",
"t",
"plot",
"them",
"yet",
"."
] | python | train |
yunojuno/elasticsearch-django | elasticsearch_django/models.py | https://github.com/yunojuno/elasticsearch-django/blob/e8d98d32bcd77f1bedb8f1a22b6523ca44ffd489/elasticsearch_django/models.py#L350-L369 | def index_search_document(self, *, index):
"""
Create or replace search document in named index.
Checks the local cache to see if the document has changed,
and if not aborts the update, else pushes to ES, and then
resets the local cache. Cache timeout is set as "cache_expiry"
in the settings, and defaults to 60s.
"""
cache_key = self.search_document_cache_key
new_doc = self.as_search_document(index=index)
cached_doc = cache.get(cache_key)
if new_doc == cached_doc:
logger.debug("Search document for %r is unchanged, ignoring update.", self)
return []
cache.set(cache_key, new_doc, timeout=get_setting("cache_expiry", 60))
get_client().index(
index=index, doc_type=self.search_doc_type, body=new_doc, id=self.pk
) | [
"def",
"index_search_document",
"(",
"self",
",",
"*",
",",
"index",
")",
":",
"cache_key",
"=",
"self",
".",
"search_document_cache_key",
"new_doc",
"=",
"self",
".",
"as_search_document",
"(",
"index",
"=",
"index",
")",
"cached_doc",
"=",
"cache",
".",
"g... | Create or replace search document in named index.
Checks the local cache to see if the document has changed,
and if not aborts the update, else pushes to ES, and then
resets the local cache. Cache timeout is set as "cache_expiry"
in the settings, and defaults to 60s. | [
"Create",
"or",
"replace",
"search",
"document",
"in",
"named",
"index",
"."
] | python | train |
infothrill/python-dyndnsc | dyndnsc/common/load.py | https://github.com/infothrill/python-dyndnsc/blob/2196d48aa6098da9835a7611fbdb0b5f0fbf51e4/dyndnsc/common/load.py#L9-L24 | def load_class(module_name, class_name):
"""Return class object specified by module name and class name.
Return None if module failed to be imported.
:param module_name: string module name
:param class_name: string class name
"""
try:
plugmod = import_module(module_name)
except Exception as exc:
warn("Importing built-in plugin %s.%s raised an exception: %r" %
(module_name, class_name, repr(exc)), ImportWarning)
return None
else:
return getattr(plugmod, class_name) | [
"def",
"load_class",
"(",
"module_name",
",",
"class_name",
")",
":",
"try",
":",
"plugmod",
"=",
"import_module",
"(",
"module_name",
")",
"except",
"Exception",
"as",
"exc",
":",
"warn",
"(",
"\"Importing built-in plugin %s.%s raised an exception: %r\"",
"%",
"(",... | Return class object specified by module name and class name.
Return None if module failed to be imported.
:param module_name: string module name
:param class_name: string class name | [
"Return",
"class",
"object",
"specified",
"by",
"module",
"name",
"and",
"class",
"name",
"."
] | python | train |
JoelBender/bacpypes | py34/bacpypes/primitivedata.py | https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py34/bacpypes/primitivedata.py#L377-L385 | def Pop(self):
"""Remove the tag from the front of the list and return it."""
if self.tagList:
tag = self.tagList[0]
del self.tagList[0]
else:
tag = None
return tag | [
"def",
"Pop",
"(",
"self",
")",
":",
"if",
"self",
".",
"tagList",
":",
"tag",
"=",
"self",
".",
"tagList",
"[",
"0",
"]",
"del",
"self",
".",
"tagList",
"[",
"0",
"]",
"else",
":",
"tag",
"=",
"None",
"return",
"tag"
] | Remove the tag from the front of the list and return it. | [
"Remove",
"the",
"tag",
"from",
"the",
"front",
"of",
"the",
"list",
"and",
"return",
"it",
"."
] | python | train |
ejhigson/nestcheck | nestcheck/write_polychord_output.py | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/write_polychord_output.py#L118-L154 | def run_dead_birth_array(run, **kwargs):
"""Converts input run into an array of the format of a PolyChord
<root>_dead-birth.txt file. Note that this in fact includes live points
remaining at termination as well as dead points.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
kwargs: dict, optional
Options for check_ns_run.
Returns
-------
samples: 2d numpy array
Array of dead points and any remaining live points at termination.
Has #parameters + 2 columns:
param_1, param_2, ... , logl, birth_logl
"""
nestcheck.ns_run_utils.check_ns_run(run, **kwargs)
threads = nestcheck.ns_run_utils.get_run_threads(run)
samp_arrays = []
ndim = run['theta'].shape[1]
for th in threads:
samp_arr = np.zeros((th['theta'].shape[0], ndim + 2))
samp_arr[:, :ndim] = th['theta']
samp_arr[:, ndim] = th['logl']
samp_arr[1:, ndim + 1] = th['logl'][:-1]
if th['thread_min_max'][0, 0] == -np.inf:
samp_arr[0, ndim + 1] = -1e30
else:
samp_arr[0, ndim + 1] = th['thread_min_max'][0, 0]
samp_arrays.append(samp_arr)
samples = np.vstack(samp_arrays)
samples = samples[np.argsort(samples[:, ndim]), :]
return samples | [
"def",
"run_dead_birth_array",
"(",
"run",
",",
"*",
"*",
"kwargs",
")",
":",
"nestcheck",
".",
"ns_run_utils",
".",
"check_ns_run",
"(",
"run",
",",
"*",
"*",
"kwargs",
")",
"threads",
"=",
"nestcheck",
".",
"ns_run_utils",
".",
"get_run_threads",
"(",
"r... | Converts input run into an array of the format of a PolyChord
<root>_dead-birth.txt file. Note that this in fact includes live points
remaining at termination as well as dead points.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
kwargs: dict, optional
Options for check_ns_run.
Returns
-------
samples: 2d numpy array
Array of dead points and any remaining live points at termination.
Has #parameters + 2 columns:
param_1, param_2, ... , logl, birth_logl | [
"Converts",
"input",
"run",
"into",
"an",
"array",
"of",
"the",
"format",
"of",
"a",
"PolyChord",
"<root",
">",
"_dead",
"-",
"birth",
".",
"txt",
"file",
".",
"Note",
"that",
"this",
"in",
"fact",
"includes",
"live",
"points",
"remaining",
"at",
"termin... | python | train |
vinci1it2000/schedula | schedula/utils/blue.py | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/blue.py#L49-L87 | def register(self, obj=None, memo=None):
"""
Creates a :class:`Blueprint.cls` and calls each deferred operation.
:param obj:
The initialized object with which to call all deferred operations.
:type obj: object
:param memo:
A dictionary to cache registered Blueprints.
:type memo: dict[Blueprint,T]
:return:
The initialized object.
:rtype: Blueprint.cls | Blueprint
**--------------------------------------------------------------------**
Example::
>>> import schedula as sh
>>> blue = sh.BlueDispatcher().add_func(len, ['lenght'])
>>> blue.register()
<schedula.dispatcher.Dispatcher object at ...>
"""
if memo and self in memo:
obj = memo[self]
if obj is not None:
return obj
if obj is None:
obj = _safe_call(self.cls, *self.args, memo=memo, **self.kwargs)
for method, kwargs in self.deferred:
_safe_call(getattr(obj, method), memo=memo, **kwargs)
if memo is not None:
memo[self] = obj
return obj | [
"def",
"register",
"(",
"self",
",",
"obj",
"=",
"None",
",",
"memo",
"=",
"None",
")",
":",
"if",
"memo",
"and",
"self",
"in",
"memo",
":",
"obj",
"=",
"memo",
"[",
"self",
"]",
"if",
"obj",
"is",
"not",
"None",
":",
"return",
"obj",
"if",
"ob... | Creates a :class:`Blueprint.cls` and calls each deferred operation.
:param obj:
The initialized object with which to call all deferred operations.
:type obj: object
:param memo:
A dictionary to cache registered Blueprints.
:type memo: dict[Blueprint,T]
:return:
The initialized object.
:rtype: Blueprint.cls | Blueprint
**--------------------------------------------------------------------**
Example::
>>> import schedula as sh
>>> blue = sh.BlueDispatcher().add_func(len, ['lenght'])
>>> blue.register()
<schedula.dispatcher.Dispatcher object at ...> | [
"Creates",
"a",
":",
"class",
":",
"Blueprint",
".",
"cls",
"and",
"calls",
"each",
"deferred",
"operation",
"."
] | python | train |
edx/edx-enterprise | enterprise/api/v1/serializers.py | https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/api/v1/serializers.py#L438-L459 | def to_representation(self, instance):
"""
Return the updated program data dictionary.
Arguments:
instance (dict): The program data.
Returns:
dict: The updated program data.
"""
updated_program = copy.deepcopy(instance)
enterprise_customer_catalog = self.context['enterprise_customer_catalog']
updated_program['enrollment_url'] = enterprise_customer_catalog.get_program_enrollment_url(
updated_program['uuid']
)
for course in updated_program['courses']:
course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url(course['key'])
for course_run in course['course_runs']:
course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url(
course_run['key']
)
return updated_program | [
"def",
"to_representation",
"(",
"self",
",",
"instance",
")",
":",
"updated_program",
"=",
"copy",
".",
"deepcopy",
"(",
"instance",
")",
"enterprise_customer_catalog",
"=",
"self",
".",
"context",
"[",
"'enterprise_customer_catalog'",
"]",
"updated_program",
"[",
... | Return the updated program data dictionary.
Arguments:
instance (dict): The program data.
Returns:
dict: The updated program data. | [
"Return",
"the",
"updated",
"program",
"data",
"dictionary",
"."
] | python | valid |
angr/angr | angr/analyses/binary_optimizer.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/binary_optimizer.py#L616-L648 | def _dead_assignment_elimination(self, function, data_graph): #pylint:disable=unused-argument
"""
Remove assignments to registers that has no consumers, but immediately killed.
BROKEN - DO NOT USE IT
:param angr.knowledge.Function function:
:param networkx.MultiDiGraph data_graph:
:return: None
"""
register_pvs = set()
for node in data_graph.nodes():
if isinstance(node.variable, SimRegisterVariable) and \
node.variable.reg is not None and \
node.variable.reg < 40:
register_pvs.add(node)
for reg in register_pvs:
# does it have a consumer?
out_edges = data_graph.out_edges(reg, data=True)
consumers = [ ]
killers = [ ]
for _, _, data in out_edges:
if 'type' in data and data['type'] == 'kill':
killers.append(data)
else:
consumers.append(data)
if not consumers and killers:
# we can remove the assignment!
da = DeadAssignment(reg)
self.dead_assignments.append(da) | [
"def",
"_dead_assignment_elimination",
"(",
"self",
",",
"function",
",",
"data_graph",
")",
":",
"#pylint:disable=unused-argument",
"register_pvs",
"=",
"set",
"(",
")",
"for",
"node",
"in",
"data_graph",
".",
"nodes",
"(",
")",
":",
"if",
"isinstance",
"(",
... | Remove assignments to registers that has no consumers, but immediately killed.
BROKEN - DO NOT USE IT
:param angr.knowledge.Function function:
:param networkx.MultiDiGraph data_graph:
:return: None | [
"Remove",
"assignments",
"to",
"registers",
"that",
"has",
"no",
"consumers",
"but",
"immediately",
"killed",
"."
] | python | train |
MrKiven/Todo.py | Todos/todo.py | https://github.com/MrKiven/Todo.py/blob/945090ce29daad740b9adf34ac8e859026fed3d5/Todos/todo.py#L54-L73 | def init(self):
"""init `todo` file
if file exists, then initialization self.todos
and record current max index of todos
: when add a new todo, the `idx` via only `self.current_max_idx + 1`
"""
if os.path.isdir(self.path):
raise InvalidTodoFile
if os.path.exists(self.path):
with open(self.path, 'r') as f:
tls = [tl.strip() for tl in f if tl]
todos = map(_todo_from_file, tls)
self.todos = todos
for todo in todos:
if self.current_max_idx < todo['idx']:
self.current_max_idx = todo['idx']
else:
logger.warning('No todo files found, initialization a empty todo file')
with open(self.path, 'w') as f:
f.flush() | [
"def",
"init",
"(",
"self",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"path",
")",
":",
"raise",
"InvalidTodoFile",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"path",
")",
":",
"with",
"open",
"(",
"self",
... | init `todo` file
if file exists, then initialization self.todos
and record current max index of todos
: when add a new todo, the `idx` via only `self.current_max_idx + 1` | [
"init",
"todo",
"file",
"if",
"file",
"exists",
"then",
"initialization",
"self",
".",
"todos",
"and",
"record",
"current",
"max",
"index",
"of",
"todos",
":",
"when",
"add",
"a",
"new",
"todo",
"the",
"idx",
"via",
"only",
"self",
".",
"current_max_idx",
... | python | train |
ccubed/PyMoe | Pymoe/Kitsu/user.py | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/user.py#L11-L29 | def search(self, term):
"""
Search for a user by name.
:param str term: What to search for.
:return: The results as a SearchWrapper iterator or None if no results.
:rtype: SearchWrapper or None
"""
r = requests.get(self.apiurl + "/users", params={"filter[name]": term}, headers=self.header)
if r.status_code != 200:
raise ServerError
jsd = r.json()
if jsd['meta']['count']:
return SearchWrapper(jsd['data'], jsd['links']['next'] if 'next' in jsd['links'] else None, self.header)
else:
return None | [
"def",
"search",
"(",
"self",
",",
"term",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"apiurl",
"+",
"\"/users\"",
",",
"params",
"=",
"{",
"\"filter[name]\"",
":",
"term",
"}",
",",
"headers",
"=",
"self",
".",
"header",
")",
"i... | Search for a user by name.
:param str term: What to search for.
:return: The results as a SearchWrapper iterator or None if no results.
:rtype: SearchWrapper or None | [
"Search",
"for",
"a",
"user",
"by",
"name",
"."
] | python | train |
serge-sans-paille/pythran | pythran/tables.py | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/tables.py#L161-L170 | def update_effects(self, node):
"""
Combiner when we update the first argument of a function.
It turn type of first parameter in combination of all others
parameters types.
"""
return [self.combine(node.args[0], node_args_k, register=True,
aliasing_type=True)
for node_args_k in node.args[1:]] | [
"def",
"update_effects",
"(",
"self",
",",
"node",
")",
":",
"return",
"[",
"self",
".",
"combine",
"(",
"node",
".",
"args",
"[",
"0",
"]",
",",
"node_args_k",
",",
"register",
"=",
"True",
",",
"aliasing_type",
"=",
"True",
")",
"for",
"node_args_k",... | Combiner when we update the first argument of a function.
It turn type of first parameter in combination of all others
parameters types. | [
"Combiner",
"when",
"we",
"update",
"the",
"first",
"argument",
"of",
"a",
"function",
"."
] | python | train |
phaethon/kamene | kamene/contrib/gsm_um.py | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L941-L951 | def rrInitialisationRequest():
"""RR Initialisation Request Section 9.1.28.a"""
a = TpPd(pd=0x6)
b = MessageType(mesType=0x3c) # 00111100
c = CiphKeySeqNrAndMacModeAndChannelCodingRequest()
e = MobileStationClassmark2()
f = Tlli()
g = ChannelRequestDescription()
h = GprsMeasurementResults()
packet = a / b / c / e / f / g / h
return packet | [
"def",
"rrInitialisationRequest",
"(",
")",
":",
"a",
"=",
"TpPd",
"(",
"pd",
"=",
"0x6",
")",
"b",
"=",
"MessageType",
"(",
"mesType",
"=",
"0x3c",
")",
"# 00111100",
"c",
"=",
"CiphKeySeqNrAndMacModeAndChannelCodingRequest",
"(",
")",
"e",
"=",
"MobileStat... | RR Initialisation Request Section 9.1.28.a | [
"RR",
"Initialisation",
"Request",
"Section",
"9",
".",
"1",
".",
"28",
".",
"a"
] | python | train |
annoviko/pyclustering | pyclustering/nnet/hysteresis.py | https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/nnet/hysteresis.py#L347-L367 | def _calculate_states(self, solution, t, step, int_step):
"""!
@brief Calculates new states for neurons using differential calculus. Returns new states for neurons.
@param[in] solution (solve_type): Type solver of the differential equation.
@param[in] t (double): Current time of simulation.
@param[in] step (double): Step of solution at the end of which states of oscillators should be calculated.
@param[in] int_step (double): Step differentiation that is used for solving differential equation.
@return (list) New states for neurons.
"""
next_states = [0] * self._num_osc;
for index in range (0, self._num_osc, 1):
result = odeint(self._neuron_states, self._states[index], numpy.arange(t - step, t, int_step), (index , ));
next_states[index] = result[len(result) - 1][0];
self._outputs = [val for val in self._outputs_buffer];
return next_states; | [
"def",
"_calculate_states",
"(",
"self",
",",
"solution",
",",
"t",
",",
"step",
",",
"int_step",
")",
":",
"next_states",
"=",
"[",
"0",
"]",
"*",
"self",
".",
"_num_osc",
"for",
"index",
"in",
"range",
"(",
"0",
",",
"self",
".",
"_num_osc",
",",
... | !
@brief Calculates new states for neurons using differential calculus. Returns new states for neurons.
@param[in] solution (solve_type): Type solver of the differential equation.
@param[in] t (double): Current time of simulation.
@param[in] step (double): Step of solution at the end of which states of oscillators should be calculated.
@param[in] int_step (double): Step differentiation that is used for solving differential equation.
@return (list) New states for neurons. | [
"!"
] | python | valid |
RiotGames/cloud-inquisitor | plugins/public/cinq-collector-dns/cinq_collector_dns/__init__.py | https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/plugins/public/cinq-collector-dns/cinq_collector_dns/__init__.py#L233-L263 | def __cloudflare_request(self, *, account, path, args=None):
"""Helper function to interact with the CloudFlare API.
Args:
account (:obj:`CloudFlareAccount`): CloudFlare Account object
path (`str`): URL endpoint to communicate with
args (:obj:`dict` of `str`: `str`): A dictionary of arguments for the endpoint to consume
Returns:
`dict`
"""
if not args:
args = {}
if not self.cloudflare_initialized[account.account_id]:
self.cloudflare_session[account.account_id] = requests.Session()
self.cloudflare_session[account.account_id].headers.update({
'X-Auth-Email': account.email,
'X-Auth-Key': account.api_key,
'Content-Type': 'application/json'
})
self.cloudflare_initialized[account.account_id] = True
if 'per_page' not in args:
args['per_page'] = 100
response = self.cloudflare_session[account.account_id].get(account.endpoint + path, params=args)
if response.status_code != 200:
raise CloudFlareError('Request failed: {}'.format(response.text))
return response.json() | [
"def",
"__cloudflare_request",
"(",
"self",
",",
"*",
",",
"account",
",",
"path",
",",
"args",
"=",
"None",
")",
":",
"if",
"not",
"args",
":",
"args",
"=",
"{",
"}",
"if",
"not",
"self",
".",
"cloudflare_initialized",
"[",
"account",
".",
"account_id... | Helper function to interact with the CloudFlare API.
Args:
account (:obj:`CloudFlareAccount`): CloudFlare Account object
path (`str`): URL endpoint to communicate with
args (:obj:`dict` of `str`: `str`): A dictionary of arguments for the endpoint to consume
Returns:
`dict` | [
"Helper",
"function",
"to",
"interact",
"with",
"the",
"CloudFlare",
"API",
"."
] | python | train |
wbond/certvalidator | certvalidator/ocsp_client.py | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/ocsp_client.py#L14-L109 | def fetch(cert, issuer, hash_algo='sha1', nonce=True, user_agent=None, timeout=10):
"""
Fetches an OCSP response for a certificate
:param cert:
An asn1cyrpto.x509.Certificate object to get an OCSP reponse for
:param issuer:
An asn1crypto.x509.Certificate object that is the issuer of cert
:param hash_algo:
A unicode string of "sha1" or "sha256"
:param nonce:
A boolean - if the nonce extension should be used to prevent replay
attacks
:param user_agent:
The HTTP user agent to use when requesting the OCSP response. If None,
a default is used in the format "certvalidation 1.0.0".
:param timeout:
The number of seconds after which an HTTP request should timeout
:raises:
urllib.error.URLError/urllib2.URLError - when a URL/HTTP error occurs
socket.error - when a socket error occurs
:return:
An asn1crypto.ocsp.OCSPResponse object
"""
if not isinstance(cert, x509.Certificate):
raise TypeError('cert must be an instance of asn1crypto.x509.Certificate, not %s' % type_name(cert))
if not isinstance(issuer, x509.Certificate):
raise TypeError('issuer must be an instance of asn1crypto.x509.Certificate, not %s' % type_name(issuer))
if hash_algo not in set(['sha1', 'sha256']):
raise ValueError('hash_algo must be one of "sha1", "sha256", not %s' % repr(hash_algo))
if not isinstance(nonce, bool):
raise TypeError('nonce must be a bool, not %s' % type_name(nonce))
if user_agent is None:
user_agent = 'certvalidator %s' % __version__
elif not isinstance(user_agent, str_cls):
raise TypeError('user_agent must be a unicode string, not %s' % type_name(user_agent))
cert_id = ocsp.CertId({
'hash_algorithm': algos.DigestAlgorithm({'algorithm': hash_algo}),
'issuer_name_hash': getattr(cert.issuer, hash_algo),
'issuer_key_hash': getattr(issuer.public_key, hash_algo),
'serial_number': cert.serial_number,
})
request = ocsp.Request({
'req_cert': cert_id,
})
tbs_request = ocsp.TBSRequest({
'request_list': ocsp.Requests([request]),
})
if nonce:
nonce_extension = ocsp.TBSRequestExtension({
'extn_id': 'nonce',
'critical': False,
'extn_value': core.OctetString(core.OctetString(os.urandom(16)).dump())
})
tbs_request['request_extensions'] = ocsp.TBSRequestExtensions([nonce_extension])
ocsp_request = ocsp.OCSPRequest({
'tbs_request': tbs_request,
})
last_e = None
for ocsp_url in cert.ocsp_urls:
try:
request = Request(ocsp_url)
request.add_header('Accept', 'application/ocsp-response')
request.add_header('Content-Type', 'application/ocsp-request')
request.add_header('User-Agent', user_agent)
response = urlopen(request, ocsp_request.dump(), timeout)
ocsp_response = ocsp.OCSPResponse.load(response.read())
request_nonce = ocsp_request.nonce_value
response_nonce = ocsp_response.nonce_value
if request_nonce and response_nonce and request_nonce.native != response_nonce.native:
raise errors.OCSPValidationError(
'Unable to verify OCSP response since the request and response nonces do not match'
)
return ocsp_response
except (URLError) as e:
last_e = e
raise last_e | [
"def",
"fetch",
"(",
"cert",
",",
"issuer",
",",
"hash_algo",
"=",
"'sha1'",
",",
"nonce",
"=",
"True",
",",
"user_agent",
"=",
"None",
",",
"timeout",
"=",
"10",
")",
":",
"if",
"not",
"isinstance",
"(",
"cert",
",",
"x509",
".",
"Certificate",
")",... | Fetches an OCSP response for a certificate
:param cert:
An asn1cyrpto.x509.Certificate object to get an OCSP reponse for
:param issuer:
An asn1crypto.x509.Certificate object that is the issuer of cert
:param hash_algo:
A unicode string of "sha1" or "sha256"
:param nonce:
A boolean - if the nonce extension should be used to prevent replay
attacks
:param user_agent:
The HTTP user agent to use when requesting the OCSP response. If None,
a default is used in the format "certvalidation 1.0.0".
:param timeout:
The number of seconds after which an HTTP request should timeout
:raises:
urllib.error.URLError/urllib2.URLError - when a URL/HTTP error occurs
socket.error - when a socket error occurs
:return:
An asn1crypto.ocsp.OCSPResponse object | [
"Fetches",
"an",
"OCSP",
"response",
"for",
"a",
"certificate"
] | python | train |
cloudbase/python-hnvclient | hnv/client.py | https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L167-L176 | def _get(cls, resource_id, parent_id, grandparent_id):
""""Retrieves the required resource."""
client = cls._get_client()
endpoint = cls._endpoint.format(resource_id=resource_id or "",
parent_id=parent_id or "",
grandparent_id=grandparent_id or "")
raw_data = client.get_resource(endpoint)
raw_data["parentResourceID"] = parent_id
raw_data["grandParentResourceID"] = grandparent_id
return cls.from_raw_data(raw_data) | [
"def",
"_get",
"(",
"cls",
",",
"resource_id",
",",
"parent_id",
",",
"grandparent_id",
")",
":",
"client",
"=",
"cls",
".",
"_get_client",
"(",
")",
"endpoint",
"=",
"cls",
".",
"_endpoint",
".",
"format",
"(",
"resource_id",
"=",
"resource_id",
"or",
"... | Retrieves the required resource. | [
"Retrieves",
"the",
"required",
"resource",
"."
] | python | train |
trailofbits/manticore | manticore/platforms/linux.py | https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/linux.py#L1127-L1141 | def _open(self, f):
"""
Adds a file descriptor to the current file descriptor list
:rtype: int
:param f: the file descriptor to add.
:return: the index of the file descriptor in the file descr. list
"""
if None in self.files:
fd = self.files.index(None)
self.files[fd] = f
else:
fd = len(self.files)
self.files.append(f)
return fd | [
"def",
"_open",
"(",
"self",
",",
"f",
")",
":",
"if",
"None",
"in",
"self",
".",
"files",
":",
"fd",
"=",
"self",
".",
"files",
".",
"index",
"(",
"None",
")",
"self",
".",
"files",
"[",
"fd",
"]",
"=",
"f",
"else",
":",
"fd",
"=",
"len",
... | Adds a file descriptor to the current file descriptor list
:rtype: int
:param f: the file descriptor to add.
:return: the index of the file descriptor in the file descr. list | [
"Adds",
"a",
"file",
"descriptor",
"to",
"the",
"current",
"file",
"descriptor",
"list"
] | python | valid |
ldomic/lintools | lintools/data.py | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/data.py#L130-L152 | def find_the_closest_atoms(self,topology):
"""
This function defines the ligand atoms that are closest to the residues that will be plotted
in the final graph.
"""
# The measurements are made to ligand molecule without hydrogen atoms (ligand_noH) because the
# hydrogen atoms are not plotted in the final graph
self.universe.load_new(topology)
self.universe.ligand_noH = self.universe.ligand.select_atoms("not name H*")
ligand_positions = self.universe.ligand_noH.positions
for residue in self.dict_of_plotted_res.keys():
residue_selection = self.universe.select_atoms("resname "+residue[0]+" and resid "+residue[1]+" and segid "+ residue[2])
residue_positions = residue_selection.positions
dist_array = MDAnalysis.analysis.distances.distance_array(ligand_positions,residue_positions)
min_values_per_atom={}
i=0
for atom in self.universe.ligand_noH:
min_values_per_atom[atom.name]=dist_array[i].min()
i+=1
sorted_min_values = sorted(min_values_per_atom.items(), key=operator.itemgetter(1))
self.closest_atoms[residue]=[(sorted_min_values[0][0],sorted_min_values[0][1])] | [
"def",
"find_the_closest_atoms",
"(",
"self",
",",
"topology",
")",
":",
"# The measurements are made to ligand molecule without hydrogen atoms (ligand_noH) because the",
"# hydrogen atoms are not plotted in the final graph",
"self",
".",
"universe",
".",
"load_new",
"(",
"topology",... | This function defines the ligand atoms that are closest to the residues that will be plotted
in the final graph. | [
"This",
"function",
"defines",
"the",
"ligand",
"atoms",
"that",
"are",
"closest",
"to",
"the",
"residues",
"that",
"will",
"be",
"plotted",
"in",
"the",
"final",
"graph",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.