repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
adaptive-learning/proso-apps | proso_user/views.py | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_user/views.py#L22-L89 | def profile(request, status=200):
"""
Get the user's profile. If the user has no assigned profile, the HTTP 404
is returned. Make a POST request to modify the user's profile.
GET parameters:
html
turn on the HTML version of the API
username:
username of user (only for users with public profile)
stats:
attache addition user statistics
POST parameters (JSON):
send_emails:
switcher turning on sending e-mails to user
public:
swicher making the user's profile publicly available
user:
password:
user's password
password_check:
user's password again to check it
first_name (optional):
user's first name
last_name (optional):
user's last name
"""
if request.method == 'GET':
if request.GET.get("username", False):
try:
user_profile = User.objects.get(username=request.GET.get("username"),
userprofile__public=True).userprofile
except ObjectDoesNotExist:
raise Http404("user not found or have not public profile")
else:
user_id = get_user_id(request)
if get_config('proso_user', 'google.openid.migration', default=True) and not is_user_id_overridden(request):
migrated_user = migrate_google_openid_user(request.user)
if migrated_user is not None:
auth.logout(request)
migrated_user.backend = 'social.backends.google.GoogleOAuth2'
auth.login(request, migrated_user)
user_profile = get_object_or_404(UserProfile, user_id=user_id)
return render_json(
request, user_profile, status=status,
template='user_profile.html', help_text=profile.__doc__)
elif request.method == 'POST':
with transaction.atomic():
to_save = json_body(request.body.decode("utf-8"))
user_id = get_user_id(request)
user_profile = get_object_or_404(UserProfile, user_id=user_id)
user = to_save.get('user', None)
if 'send_emails' in to_save:
user_profile.send_emails = bool(to_save['send_emails'])
if 'public' in to_save:
user_profile.public = bool(to_save['public'])
if user:
error = _save_user(request, user, new=False)
if error:
return render_json(request, error, template='user_json.html', status=400)
if 'properties' in to_save:
user_profile.save_properties(to_save['properties'])
user_profile.save()
request.method = "GET"
return profile(request, status=202)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | [
"def",
"profile",
"(",
"request",
",",
"status",
"=",
"200",
")",
":",
"if",
"request",
".",
"method",
"==",
"'GET'",
":",
"if",
"request",
".",
"GET",
".",
"get",
"(",
"\"username\"",
",",
"False",
")",
":",
"try",
":",
"user_profile",
"=",
"User",
... | Get the user's profile. If the user has no assigned profile, the HTTP 404
is returned. Make a POST request to modify the user's profile.
GET parameters:
html
turn on the HTML version of the API
username:
username of user (only for users with public profile)
stats:
attache addition user statistics
POST parameters (JSON):
send_emails:
switcher turning on sending e-mails to user
public:
swicher making the user's profile publicly available
user:
password:
user's password
password_check:
user's password again to check it
first_name (optional):
user's first name
last_name (optional):
user's last name | [
"Get",
"the",
"user",
"s",
"profile",
".",
"If",
"the",
"user",
"has",
"no",
"assigned",
"profile",
"the",
"HTTP",
"404",
"is",
"returned",
".",
"Make",
"a",
"POST",
"request",
"to",
"modify",
"the",
"user",
"s",
"profile",
"."
] | python | train |
jxtech/wechatpy | wechatpy/pay/api/withhold.py | https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/pay/api/withhold.py#L176-L202 | def apply_cancel_signing(self, contract_id=None, plan_id=None, contract_code=None,
contract_termination_remark=None, version="1.0"):
"""
申请解约
https://pay.weixin.qq.com/wiki/doc/api/pap.php?chapter=18_4&index=6
:param contract_id: 合同ID
:param plan_id: 模板ID
:param contract_code: 合同号
:param contract_termination_remark: 解约原因
:param version: 版本号
:return:
"""
if not (contract_id or (plan_id and contract_code)):
raise ValueError("contract_id and (plan_id, contract_code) must be a choice.")
data = {
"appid": self.appid,
"mch_id": self.mch_id,
"plan_id": plan_id,
"contract_code": contract_code,
"contract_id": contract_id,
"contract_termination_remark": contract_termination_remark,
"version": version,
"nonce_str": None,
}
return self._post("papay/deletecontract", data=data) | [
"def",
"apply_cancel_signing",
"(",
"self",
",",
"contract_id",
"=",
"None",
",",
"plan_id",
"=",
"None",
",",
"contract_code",
"=",
"None",
",",
"contract_termination_remark",
"=",
"None",
",",
"version",
"=",
"\"1.0\"",
")",
":",
"if",
"not",
"(",
"contrac... | 申请解约
https://pay.weixin.qq.com/wiki/doc/api/pap.php?chapter=18_4&index=6
:param contract_id: 合同ID
:param plan_id: 模板ID
:param contract_code: 合同号
:param contract_termination_remark: 解约原因
:param version: 版本号
:return: | [
"申请解约"
] | python | train |
coleifer/walrus | walrus/rate_limit.py | https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/rate_limit.py#L31-L60 | def limit(self, key):
"""
Function to log an event with the given key. If the ``key`` has not
exceeded their alotted events, then the function returns ``False`` to
indicate that no limit is being imposed.
If the ``key`` has exceeded the number of events, then the function
returns ``True`` indicating rate-limiting should occur.
:param str key: A key identifying the source of the event.
:returns: Boolean indicating whether the event should be rate-limited
or not.
"""
if self._debug:
return False
counter = self.database.List(self.name + ':' + key)
n = len(counter)
is_limited = False
if n < self._limit:
counter.prepend(str(time.time()))
else:
oldest = float(counter[-1])
if time.time() - oldest < self._per:
is_limited = True
else:
counter.prepend(str(time.time()))
del counter[:self._limit]
counter.pexpire(int(self._per * 2000))
return is_limited | [
"def",
"limit",
"(",
"self",
",",
"key",
")",
":",
"if",
"self",
".",
"_debug",
":",
"return",
"False",
"counter",
"=",
"self",
".",
"database",
".",
"List",
"(",
"self",
".",
"name",
"+",
"':'",
"+",
"key",
")",
"n",
"=",
"len",
"(",
"counter",
... | Function to log an event with the given key. If the ``key`` has not
exceeded their alotted events, then the function returns ``False`` to
indicate that no limit is being imposed.
If the ``key`` has exceeded the number of events, then the function
returns ``True`` indicating rate-limiting should occur.
:param str key: A key identifying the source of the event.
:returns: Boolean indicating whether the event should be rate-limited
or not. | [
"Function",
"to",
"log",
"an",
"event",
"with",
"the",
"given",
"key",
".",
"If",
"the",
"key",
"has",
"not",
"exceeded",
"their",
"alotted",
"events",
"then",
"the",
"function",
"returns",
"False",
"to",
"indicate",
"that",
"no",
"limit",
"is",
"being",
... | python | train |
mitsei/dlkit | dlkit/services/commenting.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/commenting.py#L884-L895 | def _set_book_view(self, session):
"""Sets the underlying book view to match current view"""
if self._book_view == FEDERATED:
try:
session.use_federated_book_view()
except AttributeError:
pass
else:
try:
session.use_isolated_book_view()
except AttributeError:
pass | [
"def",
"_set_book_view",
"(",
"self",
",",
"session",
")",
":",
"if",
"self",
".",
"_book_view",
"==",
"FEDERATED",
":",
"try",
":",
"session",
".",
"use_federated_book_view",
"(",
")",
"except",
"AttributeError",
":",
"pass",
"else",
":",
"try",
":",
"ses... | Sets the underlying book view to match current view | [
"Sets",
"the",
"underlying",
"book",
"view",
"to",
"match",
"current",
"view"
] | python | train |
PythonCharmers/python-future | src/future/backports/urllib/request.py | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/urllib/request.py#L2395-L2409 | def getproxies_environment():
"""Return a dictionary of scheme -> proxy server URL mappings.
Scan the environment for variables named <scheme>_proxy;
this seems to be the standard convention. If you need a
different way, you can pass a proxies dictionary to the
[Fancy]URLopener constructor.
"""
proxies = {}
for name, value in os.environ.items():
name = name.lower()
if value and name[-6:] == '_proxy':
proxies[name[:-6]] = value
return proxies | [
"def",
"getproxies_environment",
"(",
")",
":",
"proxies",
"=",
"{",
"}",
"for",
"name",
",",
"value",
"in",
"os",
".",
"environ",
".",
"items",
"(",
")",
":",
"name",
"=",
"name",
".",
"lower",
"(",
")",
"if",
"value",
"and",
"name",
"[",
"-",
"... | Return a dictionary of scheme -> proxy server URL mappings.
Scan the environment for variables named <scheme>_proxy;
this seems to be the standard convention. If you need a
different way, you can pass a proxies dictionary to the
[Fancy]URLopener constructor. | [
"Return",
"a",
"dictionary",
"of",
"scheme",
"-",
">",
"proxy",
"server",
"URL",
"mappings",
"."
] | python | train |
stephantul/reach | reach/reach.py | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L618-L647 | def similarity(self, i1, i2):
"""
Compute the similarity between two sets of items.
Parameters
----------
i1 : object
The first set of items.
i2 : object
The second set of item.
Returns
-------
sim : array of floats
An array of similarity scores between 1 and 0.
"""
try:
if i1 in self.items:
i1 = [i1]
except TypeError:
pass
try:
if i2 in self.items:
i2 = [i2]
except TypeError:
pass
i1_vec = np.stack([self.norm_vectors[self.items[x]] for x in i1])
i2_vec = np.stack([self.norm_vectors[self.items[x]] for x in i2])
return i1_vec.dot(i2_vec.T) | [
"def",
"similarity",
"(",
"self",
",",
"i1",
",",
"i2",
")",
":",
"try",
":",
"if",
"i1",
"in",
"self",
".",
"items",
":",
"i1",
"=",
"[",
"i1",
"]",
"except",
"TypeError",
":",
"pass",
"try",
":",
"if",
"i2",
"in",
"self",
".",
"items",
":",
... | Compute the similarity between two sets of items.
Parameters
----------
i1 : object
The first set of items.
i2 : object
The second set of item.
Returns
-------
sim : array of floats
An array of similarity scores between 1 and 0. | [
"Compute",
"the",
"similarity",
"between",
"two",
"sets",
"of",
"items",
"."
] | python | train |
fake-name/ChromeController | ChromeController/Generator/Generated.py | https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L3834-L3862 | def DOM_setFileInputFiles(self, files, **kwargs):
"""
Function path: DOM.setFileInputFiles
Domain: DOM
Method name: setFileInputFiles
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'files' (type: array) -> Array of file paths to set.
Optional arguments:
'nodeId' (type: NodeId) -> Identifier of the node.
'backendNodeId' (type: BackendNodeId) -> Identifier of the backend node.
'objectId' (type: Runtime.RemoteObjectId) -> JavaScript object id of the node wrapper.
No return value.
Description: Sets files for the given file input element.
"""
assert isinstance(files, (list, tuple)
), "Argument 'files' must be of type '['list', 'tuple']'. Received type: '%s'" % type(
files)
expected = ['nodeId', 'backendNodeId', 'objectId']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['nodeId', 'backendNodeId', 'objectId']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('DOM.setFileInputFiles', files=
files, **kwargs)
return subdom_funcs | [
"def",
"DOM_setFileInputFiles",
"(",
"self",
",",
"files",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"isinstance",
"(",
"files",
",",
"(",
"list",
",",
"tuple",
")",
")",
",",
"\"Argument 'files' must be of type '['list', 'tuple']'. Received type: '%s'\"",
"%",
... | Function path: DOM.setFileInputFiles
Domain: DOM
Method name: setFileInputFiles
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'files' (type: array) -> Array of file paths to set.
Optional arguments:
'nodeId' (type: NodeId) -> Identifier of the node.
'backendNodeId' (type: BackendNodeId) -> Identifier of the backend node.
'objectId' (type: Runtime.RemoteObjectId) -> JavaScript object id of the node wrapper.
No return value.
Description: Sets files for the given file input element. | [
"Function",
"path",
":",
"DOM",
".",
"setFileInputFiles",
"Domain",
":",
"DOM",
"Method",
"name",
":",
"setFileInputFiles",
"WARNING",
":",
"This",
"function",
"is",
"marked",
"Experimental",
"!",
"Parameters",
":",
"Required",
"arguments",
":",
"files",
"(",
... | python | train |
dmbee/seglearn | seglearn/transform.py | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L761-L816 | def transform(self, X, y=None, sample_weight=None):
'''
Transforms the time series data with linear direct value interpolation
If y is a time series and passed, it will be transformed as well
The time dimension is removed from the data
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
X_new : array-like, shape [n_series, ]
transformed time series data
y_new : array-like, shape [n_series]
expanded target vector
sample_weight_new : array-like or None
None is returned if target is changed. Otherwise it is returned unchanged.
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
yt = y
swt = sample_weight
N = len(Xt) # number of series
D = Xt[0].shape[1] - 1 # number of data channels
# 1st channel is time
t = [Xt[i][:, 0] for i in np.arange(N)]
t_lin = [np.arange(Xt[i][0, 0], Xt[i][-1, 0], self.sample_period) for i in np.arange(N)]
if D == 1:
Xt = [self._interp(t_lin[i], t[i], Xt[i][:, 1], kind=self.kind) for i in np.arange(N)]
elif D > 1:
Xt = [np.column_stack([self._interp(t_lin[i], t[i], Xt[i][:, j], kind=self.kind)
for j in range(1, D + 1)]) for i in np.arange(N)]
if Xc is not None:
Xt = TS_Data(Xt, Xc)
if yt is not None and len(np.atleast_1d(yt[0])) > 1:
# y is a time series
swt = None
if self.categorical_target is True:
yt = [self._interp(t_lin[i], t[i], yt[i], kind='nearest') for i in np.arange(N)]
else:
yt = [self._interp(t_lin[i], t[i], yt[i], kind=self.kind) for i in np.arange(N)]
else:
# y is static - leave y alone
pass
return Xt, yt, swt | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"sample_weight",
"=",
"None",
")",
":",
"check_ts_data",
"(",
"X",
",",
"y",
")",
"Xt",
",",
"Xc",
"=",
"get_ts_data_parts",
"(",
"X",
")",
"yt",
"=",
"y",
"swt",
"=",
"sample... | Transforms the time series data with linear direct value interpolation
If y is a time series and passed, it will be transformed as well
The time dimension is removed from the data
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
X_new : array-like, shape [n_series, ]
transformed time series data
y_new : array-like, shape [n_series]
expanded target vector
sample_weight_new : array-like or None
None is returned if target is changed. Otherwise it is returned unchanged. | [
"Transforms",
"the",
"time",
"series",
"data",
"with",
"linear",
"direct",
"value",
"interpolation",
"If",
"y",
"is",
"a",
"time",
"series",
"and",
"passed",
"it",
"will",
"be",
"transformed",
"as",
"well",
"The",
"time",
"dimension",
"is",
"removed",
"from"... | python | train |
orb-framework/orb | orb/core/column_types/string.py | https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/column_types/string.py#L110-L122 | def store(self, value, context=None):
"""
Converts the value to one that is safe to store on a record within
the record values dictionary
:param value | <variant>
:return <variant>
"""
if isinstance(value, (str, unicode)) and self.testFlag(self.Flags.Encrypted):
value = orb.system.security().encrypt(value)
return super(AbstractStringColumn, self).store(value, context=context) | [
"def",
"store",
"(",
"self",
",",
"value",
",",
"context",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"(",
"str",
",",
"unicode",
")",
")",
"and",
"self",
".",
"testFlag",
"(",
"self",
".",
"Flags",
".",
"Encrypted",
")",
":",
... | Converts the value to one that is safe to store on a record within
the record values dictionary
:param value | <variant>
:return <variant> | [
"Converts",
"the",
"value",
"to",
"one",
"that",
"is",
"safe",
"to",
"store",
"on",
"a",
"record",
"within",
"the",
"record",
"values",
"dictionary"
] | python | train |
fastai/fastai | old/fastai/structured.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/old/fastai/structured.py#L45-L68 | def get_sample(df,n):
""" Gets a random sample of n rows from df, without replacement.
Parameters:
-----------
df: A pandas data frame, that you wish to sample from.
n: The number of rows you wish to sample.
Returns:
--------
return value: A random sample of n rows of df.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
>>> get_sample(df, 2)
col1 col2
1 2 b
2 3 a
"""
idxs = sorted(np.random.permutation(len(df))[:n])
return df.iloc[idxs].copy() | [
"def",
"get_sample",
"(",
"df",
",",
"n",
")",
":",
"idxs",
"=",
"sorted",
"(",
"np",
".",
"random",
".",
"permutation",
"(",
"len",
"(",
"df",
")",
")",
"[",
":",
"n",
"]",
")",
"return",
"df",
".",
"iloc",
"[",
"idxs",
"]",
".",
"copy",
"("... | Gets a random sample of n rows from df, without replacement.
Parameters:
-----------
df: A pandas data frame, that you wish to sample from.
n: The number of rows you wish to sample.
Returns:
--------
return value: A random sample of n rows of df.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
>>> get_sample(df, 2)
col1 col2
1 2 b
2 3 a | [
"Gets",
"a",
"random",
"sample",
"of",
"n",
"rows",
"from",
"df",
"without",
"replacement",
".",
"Parameters",
":",
"-----------",
"df",
":",
"A",
"pandas",
"data",
"frame",
"that",
"you",
"wish",
"to",
"sample",
"from",
".",
"n",
":",
"The",
"number",
... | python | train |
trevisanj/a99 | a99/gui/xmisc.py | https://github.com/trevisanj/a99/blob/193e6e3c9b3e4f4a0ba7eb3eece846fe7045c539/a99/gui/xmisc.py#L463-L483 | def __signalReceived(self, *args):
"""Received signal. Cancel previous timer and store args to be forwarded later."""
if self.__disconnecting:
return
with self.__lock:
self.__args = args
if self.__rateLimit == 0:
self.__timer.stop()
self.__timer.start((self.__delay * 1000) + 1)
else:
now = time.time()
if self.__lastFlushTime is None:
leakTime = 0
else:
lastFlush = self.__lastFlushTime
leakTime = max(0, (lastFlush + (1.0 / self.__rateLimit)) - now)
self.__timer.stop()
# Note: original was min() below.
timeout = (max(leakTime, self.__delay) * 1000) + 1
self.__timer.start(timeout) | [
"def",
"__signalReceived",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"self",
".",
"__disconnecting",
":",
"return",
"with",
"self",
".",
"__lock",
":",
"self",
".",
"__args",
"=",
"args",
"if",
"self",
".",
"__rateLimit",
"==",
"0",
":",
"self",
... | Received signal. Cancel previous timer and store args to be forwarded later. | [
"Received",
"signal",
".",
"Cancel",
"previous",
"timer",
"and",
"store",
"args",
"to",
"be",
"forwarded",
"later",
"."
] | python | train |
swharden/SWHLab | swhlab/common.py | https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/swhlab/common.py#L163-L184 | def abfSort(IDs):
"""
given a list of goofy ABF names, return it sorted intelligently.
This places things like 16o01001 after 16901001.
"""
IDs=list(IDs)
monO=[]
monN=[]
monD=[]
good=[]
for ID in IDs:
if ID is None:
continue
if 'o' in ID:
monO.append(ID)
elif 'n' in ID:
monN.append(ID)
elif 'd' in ID:
monD.append(ID)
else:
good.append(ID)
return sorted(good)+sorted(monO)+sorted(monN)+sorted(monD) | [
"def",
"abfSort",
"(",
"IDs",
")",
":",
"IDs",
"=",
"list",
"(",
"IDs",
")",
"monO",
"=",
"[",
"]",
"monN",
"=",
"[",
"]",
"monD",
"=",
"[",
"]",
"good",
"=",
"[",
"]",
"for",
"ID",
"in",
"IDs",
":",
"if",
"ID",
"is",
"None",
":",
"continue... | given a list of goofy ABF names, return it sorted intelligently.
This places things like 16o01001 after 16901001. | [
"given",
"a",
"list",
"of",
"goofy",
"ABF",
"names",
"return",
"it",
"sorted",
"intelligently",
".",
"This",
"places",
"things",
"like",
"16o01001",
"after",
"16901001",
"."
] | python | valid |
rfosterslo/wagtailplus | wagtailplus/wagtailrollbacks/apps.py | https://github.com/rfosterslo/wagtailplus/blob/22cac857175d8a6f77e470751831c14a92ccd768/wagtailplus/wagtailrollbacks/apps.py#L35-L43 | def add_rollback_panels(self):
"""
Adds rollback panel to applicable model class's edit handlers.
"""
from wagtailplus.utils.edit_handlers import add_panel_to_edit_handler
from wagtailplus.wagtailrollbacks.edit_handlers import HistoryPanel
for model in self.applicable_models:
add_panel_to_edit_handler(model, HistoryPanel, _(u'History')) | [
"def",
"add_rollback_panels",
"(",
"self",
")",
":",
"from",
"wagtailplus",
".",
"utils",
".",
"edit_handlers",
"import",
"add_panel_to_edit_handler",
"from",
"wagtailplus",
".",
"wagtailrollbacks",
".",
"edit_handlers",
"import",
"HistoryPanel",
"for",
"model",
"in",... | Adds rollback panel to applicable model class's edit handlers. | [
"Adds",
"rollback",
"panel",
"to",
"applicable",
"model",
"class",
"s",
"edit",
"handlers",
"."
] | python | train |
VingtCinq/python-mailchimp | mailchimp3/helpers.py | https://github.com/VingtCinq/python-mailchimp/blob/1b472f1b64fdde974732ac4b7ed48908bb707260/mailchimp3/helpers.py#L103-L125 | def merge_results(x, y):
"""
Given two dicts, x and y, merge them into a new dict as a shallow copy.
The result only differs from `x.update(y)` in the way that it handles list
values when both x and y have list values for the same key. In which case
the returned dictionary, z, has a value according to:
z[key] = x[key] + z[key]
:param x: The first dictionary
:type x: :py:class:`dict`
:param y: The second dictionary
:type y: :py:class:`dict`
:returns: The merged dictionary
:rtype: :py:class:`dict`
"""
z = x.copy()
for key, value in y.items():
if isinstance(value, list) and isinstance(z.get(key), list):
z[key] += value
else:
z[key] = value
return z | [
"def",
"merge_results",
"(",
"x",
",",
"y",
")",
":",
"z",
"=",
"x",
".",
"copy",
"(",
")",
"for",
"key",
",",
"value",
"in",
"y",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
"and",
"isinstance",
"(",
"z",
... | Given two dicts, x and y, merge them into a new dict as a shallow copy.
The result only differs from `x.update(y)` in the way that it handles list
values when both x and y have list values for the same key. In which case
the returned dictionary, z, has a value according to:
z[key] = x[key] + z[key]
:param x: The first dictionary
:type x: :py:class:`dict`
:param y: The second dictionary
:type y: :py:class:`dict`
:returns: The merged dictionary
:rtype: :py:class:`dict` | [
"Given",
"two",
"dicts",
"x",
"and",
"y",
"merge",
"them",
"into",
"a",
"new",
"dict",
"as",
"a",
"shallow",
"copy",
"."
] | python | valid |
ctuning/ck | ck/kernel.py | https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L3635-L3695 | def parse_cid(i):
"""
Input: {
cid - in format (REPO_UOA:)MODULE_UOA:DATA_UOA
(cur_cid) - output of function 'detect_cid_in_current_path'
(ignore_error) - if 'yes', ignore wrong format
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
data_uoa - data UOA
module_uoa - module UOA
(repo_uoa) - repo UOA
}
"""
r={'return':0}
c=i['cid'].strip()
ie=i.get('ignore_error','')
cc=i.get('cur_cid', {})
a0=cc.get('repo_uoa','')
m0=cc.get('module_uoa','')
d0=cc.get('data_uoa','')
if c.startswith(cfg['detect_cur_cid']) or c.startswith(cfg['detect_cur_cid1']):
c=c[1:]
x=c.split(':')
if len(x)<2 and m0=='':
if ie!='yes':
return {'return':1, 'error':'unknown CID format'}
else:
return r
if c=='':
r['repo_uoa']=a0
r['module_uoa']=m0
r['data_uoa']=d0
elif len(x)==1:
if a0!='': r['repo_uoa']=a0
r['module_uoa']=m0
r['data_uoa']=x[0]
elif len(x)==2:
if a0!='': r['repo_uoa']=a0
r['module_uoa']=x[0]
r['data_uoa']=x[1]
elif len(x)==3:
r['repo_uoa']=x[0]
r['module_uoa']=x[1]
r['data_uoa']=x[2]
else:
if ie!='yes':
return {'return':1, 'error':'unknown CID format'}
return r | [
"def",
"parse_cid",
"(",
"i",
")",
":",
"r",
"=",
"{",
"'return'",
":",
"0",
"}",
"c",
"=",
"i",
"[",
"'cid'",
"]",
".",
"strip",
"(",
")",
"ie",
"=",
"i",
".",
"get",
"(",
"'ignore_error'",
",",
"''",
")",
"cc",
"=",
"i",
".",
"get",
"(",
... | Input: {
cid - in format (REPO_UOA:)MODULE_UOA:DATA_UOA
(cur_cid) - output of function 'detect_cid_in_current_path'
(ignore_error) - if 'yes', ignore wrong format
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
data_uoa - data UOA
module_uoa - module UOA
(repo_uoa) - repo UOA
} | [
"Input",
":",
"{",
"cid",
"-",
"in",
"format",
"(",
"REPO_UOA",
":",
")",
"MODULE_UOA",
":",
"DATA_UOA",
"(",
"cur_cid",
")",
"-",
"output",
"of",
"function",
"detect_cid_in_current_path",
"(",
"ignore_error",
")",
"-",
"if",
"yes",
"ignore",
"wrong",
"for... | python | train |
aquatix/python-utilkit | utilkit/fileutil.py | https://github.com/aquatix/python-utilkit/blob/1b4a4175381d2175592208619315f399610f915c/utilkit/fileutil.py#L72-L88 | def list_files(dirname, extension=None):
"""
List all files in directory `dirname`, option to filter on file extension
"""
f = []
for (dirpath, dirnames, filenames) in os.walk(dirname):
f.extend(filenames)
break
if extension is not None:
# Filter on extension
filtered = []
for filename in f:
fn, ext = os.path.splitext(filename)
if ext.lower() == '.' + extension.lower():
filtered.append(filename)
f = filtered
return f | [
"def",
"list_files",
"(",
"dirname",
",",
"extension",
"=",
"None",
")",
":",
"f",
"=",
"[",
"]",
"for",
"(",
"dirpath",
",",
"dirnames",
",",
"filenames",
")",
"in",
"os",
".",
"walk",
"(",
"dirname",
")",
":",
"f",
".",
"extend",
"(",
"filenames"... | List all files in directory `dirname`, option to filter on file extension | [
"List",
"all",
"files",
"in",
"directory",
"dirname",
"option",
"to",
"filter",
"on",
"file",
"extension"
] | python | train |
gwastro/pycbc | pycbc/transforms.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/transforms.py#L520-L550 | def transform(self, maps):
"""This function transforms from chirp distance to luminosity distance,
given the chirp mass.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy as np
>>> from pycbc import transforms
>>> t = transforms.ChirpDistanceToDistance()
>>> t.transform({'chirp_distance': np.array([40.]), 'mchirp': np.array([1.2])})
{'mchirp': array([ 1.2]), 'chirp_distance': array([ 40.]), 'distance': array([ 39.48595679])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
out[parameters.distance] = \
conversions.distance_from_chirp_distance_mchirp(
maps[parameters.chirp_distance],
maps[parameters.mchirp],
ref_mass=self.ref_mass)
return self.format_output(maps, out) | [
"def",
"transform",
"(",
"self",
",",
"maps",
")",
":",
"out",
"=",
"{",
"}",
"out",
"[",
"parameters",
".",
"distance",
"]",
"=",
"conversions",
".",
"distance_from_chirp_distance_mchirp",
"(",
"maps",
"[",
"parameters",
".",
"chirp_distance",
"]",
",",
"... | This function transforms from chirp distance to luminosity distance,
given the chirp mass.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy as np
>>> from pycbc import transforms
>>> t = transforms.ChirpDistanceToDistance()
>>> t.transform({'chirp_distance': np.array([40.]), 'mchirp': np.array([1.2])})
{'mchirp': array([ 1.2]), 'chirp_distance': array([ 40.]), 'distance': array([ 39.48595679])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values. | [
"This",
"function",
"transforms",
"from",
"chirp",
"distance",
"to",
"luminosity",
"distance",
"given",
"the",
"chirp",
"mass",
"."
] | python | train |
radjkarl/appBase | appbase/Session.py | https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L461-L469 | def _autoSave(self):
"""save state into 'autosave' """
a = 'autoSave'
path = self.path
if not path:
path = self.dir.join('%s.%s' % (a, self.FTYPE))
self._createdAutosaveFile = path
self.tmp_dir_save_session = self.tmp_dir_session.join(a).mkdir()
self._save(a, path) | [
"def",
"_autoSave",
"(",
"self",
")",
":",
"a",
"=",
"'autoSave'",
"path",
"=",
"self",
".",
"path",
"if",
"not",
"path",
":",
"path",
"=",
"self",
".",
"dir",
".",
"join",
"(",
"'%s.%s'",
"%",
"(",
"a",
",",
"self",
".",
"FTYPE",
")",
")",
"se... | save state into 'autosave' | [
"save",
"state",
"into",
"autosave"
] | python | train |
DecBayComp/RWA-python | rwa/generic.py | https://github.com/DecBayComp/RWA-python/blob/734a52e15a0e8c244d84d74acf3fd64721074732/rwa/generic.py#L276-L309 | def pokeVisited(self, objname, obj, record, existing, visited=None, _stack=None, **kwargs):
"""
Serialize an already serialized object.
If the underlying store supports linking, this is the place where to make links.
The default implementation delegates to :meth:`pokeStorable` or :meth:`pokeNative`.
Arguments:
objname (any): record reference.
obj (any): object to be serialized.
existing (any): absolute reference of the record which the object
was already serialized into.
visited (dict): already serialized objects.
_stack (CallStack): stack of parent object names.
"""
if self.hasPythonType(obj):
storable = self.byPythonType(obj).asVersion()
self.pokeStorable(storable, objname, obj, record, visited=visited, \
_stack=_stack, **kwargs)
else:
try:
self.pokeNative(objname, obj, record)
except (SystemExit, KeyboardInterrupt):
raise
except:
self.dump_stack(_stack)
raise | [
"def",
"pokeVisited",
"(",
"self",
",",
"objname",
",",
"obj",
",",
"record",
",",
"existing",
",",
"visited",
"=",
"None",
",",
"_stack",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"hasPythonType",
"(",
"obj",
")",
":",
"st... | Serialize an already serialized object.
If the underlying store supports linking, this is the place where to make links.
The default implementation delegates to :meth:`pokeStorable` or :meth:`pokeNative`.
Arguments:
objname (any): record reference.
obj (any): object to be serialized.
existing (any): absolute reference of the record which the object
was already serialized into.
visited (dict): already serialized objects.
_stack (CallStack): stack of parent object names. | [
"Serialize",
"an",
"already",
"serialized",
"object",
"."
] | python | train |
delph-in/pydelphin | delphin/lib/pegre.py | https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/lib/pegre.py#L154-L165 | def not_next(e):
"""
Create a PEG function for negative lookahead.
"""
def match_not_next(s, grm=None, pos=0):
try:
e(s, grm, pos)
except PegreError as ex:
return PegreResult(s, Ignore, (pos, pos))
else:
raise PegreError('Negative lookahead failed', pos)
return match_not_next | [
"def",
"not_next",
"(",
"e",
")",
":",
"def",
"match_not_next",
"(",
"s",
",",
"grm",
"=",
"None",
",",
"pos",
"=",
"0",
")",
":",
"try",
":",
"e",
"(",
"s",
",",
"grm",
",",
"pos",
")",
"except",
"PegreError",
"as",
"ex",
":",
"return",
"Pegre... | Create a PEG function for negative lookahead. | [
"Create",
"a",
"PEG",
"function",
"for",
"negative",
"lookahead",
"."
] | python | train |
scikit-hep/probfit | probfit/plotting.py | https://github.com/scikit-hep/probfit/blob/de3593798ea3877dd2785062bed6877dd9058a02/probfit/plotting.py#L519-L550 | def draw_pdf(f, arg, bound, bins=100, scale=1.0, density=True,
normed_pdf=False, ax=None, **kwds):
"""
draw pdf with given argument and bounds.
**Arguments**
* **f** your pdf. The first argument is assumed to be independent
variable
* **arg** argument can be tuple or list
* **bound** tuple(xmin,xmax)
* **bins** number of bins to plot pdf. Default 100.
* **scale** multiply pdf by given number. Default 1.0.
* **density** plot density instead of expected count in each bin
(pdf*bin width). Default True.
* **normed_pdf** Normalize pdf in given bound. Default False
* The rest of keyword argument will be pass to pyplot.plot
**Returns**
x, y of what's being plot
"""
edges = np.linspace(bound[0], bound[1], bins)
return draw_pdf_with_edges(f, arg, edges, ax=ax, scale=scale, density=density,
normed_pdf=normed_pdf, **kwds) | [
"def",
"draw_pdf",
"(",
"f",
",",
"arg",
",",
"bound",
",",
"bins",
"=",
"100",
",",
"scale",
"=",
"1.0",
",",
"density",
"=",
"True",
",",
"normed_pdf",
"=",
"False",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"edges",
"=",
"np",... | draw pdf with given argument and bounds.
**Arguments**
* **f** your pdf. The first argument is assumed to be independent
variable
* **arg** argument can be tuple or list
* **bound** tuple(xmin,xmax)
* **bins** number of bins to plot pdf. Default 100.
* **scale** multiply pdf by given number. Default 1.0.
* **density** plot density instead of expected count in each bin
(pdf*bin width). Default True.
* **normed_pdf** Normalize pdf in given bound. Default False
* The rest of keyword argument will be pass to pyplot.plot
**Returns**
x, y of what's being plot | [
"draw",
"pdf",
"with",
"given",
"argument",
"and",
"bounds",
"."
] | python | train |
graphite-project/carbonate | carbonate/stale.py | https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/stale.py#L11-L22 | def data(path, hours, offset=0):
"""
Does the metric at ``path`` have any whisper data newer than ``hours``?
If ``offset`` is not None, view the ``hours`` prior to ``offset`` hours
ago, instead of from right now.
"""
now = time.time()
end = now - _to_sec(offset) # Will default to now
start = end - _to_sec(hours)
_data = whisper.fetch(path, start, end)
return all(x is None for x in _data[-1]) | [
"def",
"data",
"(",
"path",
",",
"hours",
",",
"offset",
"=",
"0",
")",
":",
"now",
"=",
"time",
".",
"time",
"(",
")",
"end",
"=",
"now",
"-",
"_to_sec",
"(",
"offset",
")",
"# Will default to now",
"start",
"=",
"end",
"-",
"_to_sec",
"(",
"hours... | Does the metric at ``path`` have any whisper data newer than ``hours``?
If ``offset`` is not None, view the ``hours`` prior to ``offset`` hours
ago, instead of from right now. | [
"Does",
"the",
"metric",
"at",
"path",
"have",
"any",
"whisper",
"data",
"newer",
"than",
"hours",
"?"
] | python | train |
Neurita/boyle | boyle/nifti/roi.py | https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/roi.py#L63-L90 | def pick_rois(rois_img, roi_values, bg_val=0):
""" Return the `rois_img` only with the ROI values from `roi_values`.
Parameters
----------
rois_img: niimg-like
roi_values: list of int or float
The list of values from rois_img.
bg_val: int or float
The background value of `rois_img`.
Returns
-------
subset_rois_img: nibabel.Nifti2Image
"""
img = read_img(rois_img)
img_data = img.get_data()
if bg_val == 0:
out = np.zeros(img_data.shape, dtype=img_data.dtype)
else:
out = np.ones(img_data.shape, dtype=img_data.dtype) * bg_val
for r in roi_values:
out[img_data == r] = r
return nib.Nifti2Image(out, affine=img.affine, header=img.header) | [
"def",
"pick_rois",
"(",
"rois_img",
",",
"roi_values",
",",
"bg_val",
"=",
"0",
")",
":",
"img",
"=",
"read_img",
"(",
"rois_img",
")",
"img_data",
"=",
"img",
".",
"get_data",
"(",
")",
"if",
"bg_val",
"==",
"0",
":",
"out",
"=",
"np",
".",
"zero... | Return the `rois_img` only with the ROI values from `roi_values`.
Parameters
----------
rois_img: niimg-like
roi_values: list of int or float
The list of values from rois_img.
bg_val: int or float
The background value of `rois_img`.
Returns
-------
subset_rois_img: nibabel.Nifti2Image | [
"Return",
"the",
"rois_img",
"only",
"with",
"the",
"ROI",
"values",
"from",
"roi_values",
".",
"Parameters",
"----------",
"rois_img",
":",
"niimg",
"-",
"like"
] | python | valid |
saltstack/salt | salt/modules/iptables.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/iptables.py#L774-L800 | def new_chain(table='filter', chain=None, family='ipv4'):
'''
.. versionadded:: 2014.1.0
Create new custom chain to the specified table.
CLI Example:
.. code-block:: bash
salt '*' iptables.new_chain filter CUSTOM_CHAIN
IPv6:
salt '*' iptables.new_chain filter CUSTOM_CHAIN family=ipv6
'''
if not chain:
return 'Error: Chain needs to be specified'
wait = '--wait' if _has_option('--wait', family) else ''
cmd = '{0} {1} -t {2} -N {3}'.format(
_iptables_cmd(family), wait, table, chain)
out = __salt__['cmd.run'](cmd)
if not out:
out = True
return out | [
"def",
"new_chain",
"(",
"table",
"=",
"'filter'",
",",
"chain",
"=",
"None",
",",
"family",
"=",
"'ipv4'",
")",
":",
"if",
"not",
"chain",
":",
"return",
"'Error: Chain needs to be specified'",
"wait",
"=",
"'--wait'",
"if",
"_has_option",
"(",
"'--wait'",
... | .. versionadded:: 2014.1.0
Create new custom chain to the specified table.
CLI Example:
.. code-block:: bash
salt '*' iptables.new_chain filter CUSTOM_CHAIN
IPv6:
salt '*' iptables.new_chain filter CUSTOM_CHAIN family=ipv6 | [
"..",
"versionadded",
"::",
"2014",
".",
"1",
".",
"0"
] | python | train |
20c/vodka | vodka/bartender.py | https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/bartender.py#L105-L121 | def newapp(path):
"""
Generates all files for a new vodka app at the specified location.
Will generate to current directory if no path is specified
"""
app_path = os.path.join(VODKA_INSTALL_DIR, "resources", "blank_app")
if not os.path.exists(path):
os.makedirs(path)
elif os.path.exists(os.path.join(path, "application.py")):
click.error("There already exists a vodka app at %s, please specify a different path" % path)
os.makedirs(os.path.join(path, "plugins"))
shutil.copy(os.path.join(app_path, "application.py"), os.path.join(path, "application.py"))
shutil.copy(os.path.join(app_path, "__init__.py"), os.path.join(path, "__init__.py"))
shutil.copy(os.path.join(app_path, "plugins", "example.py"), os.path.join(path, "plugins", "example.py"))
shutil.copy(os.path.join(app_path, "plugins", "__init__.py"), os.path.join(path, "plugins", "__init__.py")) | [
"def",
"newapp",
"(",
"path",
")",
":",
"app_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"VODKA_INSTALL_DIR",
",",
"\"resources\"",
",",
"\"blank_app\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"make... | Generates all files for a new vodka app at the specified location.
Will generate to current directory if no path is specified | [
"Generates",
"all",
"files",
"for",
"a",
"new",
"vodka",
"app",
"at",
"the",
"specified",
"location",
"."
] | python | train |
smira/py-numa | numa.py | https://github.com/smira/py-numa/blob/eb38979c61028eb9422a4ad1eda0387cd93ea390/numa.py#L262-L274 | def bind(nodemask):
"""
Binds the current thread and its children to the nodes specified in nodemask.
They will only run on the CPUs of the specified nodes and only be able to allocate memory from them.
@param nodemask: node mask
@type nodemask: C{set}
"""
mask = set_to_numa_nodemask(nodemask)
bitmask = libnuma.numa_allocate_nodemask()
libnuma.copy_nodemask_to_bitmask(byref(mask), bitmask)
libnuma.numa_bind(bitmask)
libnuma.numa_bitmask_free(bitmask) | [
"def",
"bind",
"(",
"nodemask",
")",
":",
"mask",
"=",
"set_to_numa_nodemask",
"(",
"nodemask",
")",
"bitmask",
"=",
"libnuma",
".",
"numa_allocate_nodemask",
"(",
")",
"libnuma",
".",
"copy_nodemask_to_bitmask",
"(",
"byref",
"(",
"mask",
")",
",",
"bitmask",... | Binds the current thread and its children to the nodes specified in nodemask.
They will only run on the CPUs of the specified nodes and only be able to allocate memory from them.
@param nodemask: node mask
@type nodemask: C{set} | [
"Binds",
"the",
"current",
"thread",
"and",
"its",
"children",
"to",
"the",
"nodes",
"specified",
"in",
"nodemask",
".",
"They",
"will",
"only",
"run",
"on",
"the",
"CPUs",
"of",
"the",
"specified",
"nodes",
"and",
"only",
"be",
"able",
"to",
"allocate",
... | python | train |
osrg/ryu | ryu/services/protocols/bgp/processor.py | https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/processor.py#L178-L190 | def _compare_by_version(path1, path2):
"""Returns the current/latest learned path.
Checks if given paths are from same source/peer and then compares their
version number to determine which path is received later. If paths are from
different source/peer return None.
"""
if path1.source == path2.source:
if path1.source_version_num > path2.source_version_num:
return path1
else:
return path2
return None | [
"def",
"_compare_by_version",
"(",
"path1",
",",
"path2",
")",
":",
"if",
"path1",
".",
"source",
"==",
"path2",
".",
"source",
":",
"if",
"path1",
".",
"source_version_num",
">",
"path2",
".",
"source_version_num",
":",
"return",
"path1",
"else",
":",
"re... | Returns the current/latest learned path.
Checks if given paths are from same source/peer and then compares their
version number to determine which path is received later. If paths are from
different source/peer return None. | [
"Returns",
"the",
"current",
"/",
"latest",
"learned",
"path",
"."
] | python | train |
PMEAL/OpenPNM | openpnm/io/MAT.py | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/io/MAT.py#L29-L60 | def save(cls, network, phases=[], filename=''):
r"""
Write Network to a Mat file for exporting to Matlab.
Parameters
----------
network : OpenPNM Network Object
filename : string
Desired file name, defaults to network name if not given
phases : list of phase objects ([])
Phases that have properties we want to write to file
"""
project, network, phases = cls._parse_args(network=network,
phases=phases)
network = network[0]
# Write to file
if filename == '':
filename = project.name
filename = cls._parse_filename(filename=filename, ext='mat')
d = Dict.to_dict(network=network, phases=phases, interleave=True)
d = FlatDict(d, delimiter='|')
d = sanitize_dict(d)
new_d = {}
for key in list(d.keys()):
new_key = key.replace('|', '_').replace('.', '_')
new_d[new_key] = d.pop(key)
spio.savemat(file_name=filename, mdict=new_d) | [
"def",
"save",
"(",
"cls",
",",
"network",
",",
"phases",
"=",
"[",
"]",
",",
"filename",
"=",
"''",
")",
":",
"project",
",",
"network",
",",
"phases",
"=",
"cls",
".",
"_parse_args",
"(",
"network",
"=",
"network",
",",
"phases",
"=",
"phases",
"... | r"""
Write Network to a Mat file for exporting to Matlab.
Parameters
----------
network : OpenPNM Network Object
filename : string
Desired file name, defaults to network name if not given
phases : list of phase objects ([])
Phases that have properties we want to write to file | [
"r",
"Write",
"Network",
"to",
"a",
"Mat",
"file",
"for",
"exporting",
"to",
"Matlab",
"."
] | python | train |
kstaniek/condoor | condoor/connection.py | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/connection.py#L226-L230 | def _chain_indices(self):
"""Get the deque of chain indices starting with last successful index."""
chain_indices = deque(range(len(self.connection_chains)))
chain_indices.rotate(self._last_chain_index)
return chain_indices | [
"def",
"_chain_indices",
"(",
"self",
")",
":",
"chain_indices",
"=",
"deque",
"(",
"range",
"(",
"len",
"(",
"self",
".",
"connection_chains",
")",
")",
")",
"chain_indices",
".",
"rotate",
"(",
"self",
".",
"_last_chain_index",
")",
"return",
"chain_indice... | Get the deque of chain indices starting with last successful index. | [
"Get",
"the",
"deque",
"of",
"chain",
"indices",
"starting",
"with",
"last",
"successful",
"index",
"."
] | python | train |
zwischenloesung/ardu-report-lib | libardurep/datareporter.py | https://github.com/zwischenloesung/ardu-report-lib/blob/51bd4a07e036065aafcb1273b151bea3fdfa50fa/libardurep/datareporter.py#L68-L87 | def log_post(self, url=None, credentials=None, do_verify_certificate=True):
"""
Write to a remote host via HTTP POST
"""
if url is None:
url = self.url
if credentials is None:
credentials = self.credentials
if do_verify_certificate is None:
do_verify_certificate = self.do_verify_certificate
if credentials and "base64" in credentials:
headers = {"Content-Type": "application/json", \
'Authorization': 'Basic %s' % credentials["base64"]}
else:
headers = {"Content-Type": "application/json"}
try:
request = requests.post(url, headers=headers, \
data=self.store.get_json(), verify=do_verify_certificate)
except httplib.IncompleteRead as e:
request = e.partial | [
"def",
"log_post",
"(",
"self",
",",
"url",
"=",
"None",
",",
"credentials",
"=",
"None",
",",
"do_verify_certificate",
"=",
"True",
")",
":",
"if",
"url",
"is",
"None",
":",
"url",
"=",
"self",
".",
"url",
"if",
"credentials",
"is",
"None",
":",
"cr... | Write to a remote host via HTTP POST | [
"Write",
"to",
"a",
"remote",
"host",
"via",
"HTTP",
"POST"
] | python | valid |
inspirehep/harvesting-kit | harvestingkit/utils.py | https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/utils.py#L213-L220 | def fix_dashes(string):
"""Fix bad Unicode special dashes in string."""
string = string.replace(u'\u05BE', '-')
string = string.replace(u'\u1806', '-')
string = string.replace(u'\u2E3A', '-')
string = string.replace(u'\u2E3B', '-')
string = unidecode(string)
return re.sub(r'--+', '-', string) | [
"def",
"fix_dashes",
"(",
"string",
")",
":",
"string",
"=",
"string",
".",
"replace",
"(",
"u'\\u05BE'",
",",
"'-'",
")",
"string",
"=",
"string",
".",
"replace",
"(",
"u'\\u1806'",
",",
"'-'",
")",
"string",
"=",
"string",
".",
"replace",
"(",
"u'\\u... | Fix bad Unicode special dashes in string. | [
"Fix",
"bad",
"Unicode",
"special",
"dashes",
"in",
"string",
"."
] | python | valid |
pvlib/pvlib-python | pvlib/clearsky.py | https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/clearsky.py#L304-L325 | def _linearly_scale(inputmatrix, inputmin, inputmax, outputmin, outputmax):
"""linearly scale input to output, used by Linke turbidity lookup"""
inputrange = inputmax - inputmin
outputrange = outputmax - outputmin
delta = outputrange/inputrange # number of indices per input unit
inputmin = inputmin + 1.0 / delta / 2.0 # shift to center of index
outputmax = outputmax - 1 # shift index to zero indexing
outputmatrix = (inputmatrix - inputmin) * delta + outputmin
err = IndexError('Input, %g, is out of range (%g, %g).' %
(inputmatrix, inputmax - inputrange, inputmax))
# round down if input is within half an index or else raise index error
if outputmatrix > outputmax:
if np.around(outputmatrix - outputmax, 1) <= 0.5:
outputmatrix = outputmax
else:
raise err
elif outputmatrix < outputmin:
if np.around(outputmin - outputmatrix, 1) <= 0.5:
outputmatrix = outputmin
else:
raise err
return outputmatrix | [
"def",
"_linearly_scale",
"(",
"inputmatrix",
",",
"inputmin",
",",
"inputmax",
",",
"outputmin",
",",
"outputmax",
")",
":",
"inputrange",
"=",
"inputmax",
"-",
"inputmin",
"outputrange",
"=",
"outputmax",
"-",
"outputmin",
"delta",
"=",
"outputrange",
"/",
"... | linearly scale input to output, used by Linke turbidity lookup | [
"linearly",
"scale",
"input",
"to",
"output",
"used",
"by",
"Linke",
"turbidity",
"lookup"
] | python | train |
yinkaisheng/Python-UIAutomation-for-Windows | uiautomation/uiautomation.py | https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L6038-L6054 | def ShowWindow(self, cmdShow: int, waitTime: float = OPERATION_WAIT_TIME) -> bool:
"""
Get a native handle from self or ancestors until valid and call native `ShowWindow` with cmdShow.
cmdShow: int, a value in in class `SW`.
waitTime: float.
Return bool, True if succeed otherwise False.
"""
handle = self.NativeWindowHandle
if not handle:
control = self
while not handle:
control = control.GetParentControl()
handle = control.NativeWindowHandle
if handle:
ret = ShowWindow(handle, cmdShow)
time.sleep(waitTime)
return ret | [
"def",
"ShowWindow",
"(",
"self",
",",
"cmdShow",
":",
"int",
",",
"waitTime",
":",
"float",
"=",
"OPERATION_WAIT_TIME",
")",
"->",
"bool",
":",
"handle",
"=",
"self",
".",
"NativeWindowHandle",
"if",
"not",
"handle",
":",
"control",
"=",
"self",
"while",
... | Get a native handle from self or ancestors until valid and call native `ShowWindow` with cmdShow.
cmdShow: int, a value in in class `SW`.
waitTime: float.
Return bool, True if succeed otherwise False. | [
"Get",
"a",
"native",
"handle",
"from",
"self",
"or",
"ancestors",
"until",
"valid",
"and",
"call",
"native",
"ShowWindow",
"with",
"cmdShow",
".",
"cmdShow",
":",
"int",
"a",
"value",
"in",
"in",
"class",
"SW",
".",
"waitTime",
":",
"float",
".",
"Retur... | python | valid |
zeth/inputs | inputs.py | https://github.com/zeth/inputs/blob/a46681dbf77d6ab07834f550e5855c1f50701f99/inputs.py#L2421-L2426 | def _get_path_infomation(self):
"""Get useful infomation from the device path."""
long_identifier = self._device_path.split('/')[4]
protocol, remainder = long_identifier.split('-', 1)
identifier, _, device_type = remainder.rsplit('-', 2)
return (protocol, identifier, device_type) | [
"def",
"_get_path_infomation",
"(",
"self",
")",
":",
"long_identifier",
"=",
"self",
".",
"_device_path",
".",
"split",
"(",
"'/'",
")",
"[",
"4",
"]",
"protocol",
",",
"remainder",
"=",
"long_identifier",
".",
"split",
"(",
"'-'",
",",
"1",
")",
"ident... | Get useful infomation from the device path. | [
"Get",
"useful",
"infomation",
"from",
"the",
"device",
"path",
"."
] | python | train |
ryan-roemer/django-cloud-browser | cloud_browser/common.py | https://github.com/ryan-roemer/django-cloud-browser/blob/b06cdd24885a6309e843ed924dbf1705b67e7f48/cloud_browser/common.py#L146-L163 | def path_parts(path):
"""Split path into container, object.
:param path: Path to resource (including container).
:type path: `string`
:return: Container, storage object tuple.
:rtype: `tuple` of `string`, `string`
"""
path = path if path is not None else ''
container_path = object_path = ''
parts = path_list(path)
if len(parts) >= 1:
container_path = parts[0]
if len(parts) > 1:
object_path = path_join(*parts[1:])
return container_path, object_path | [
"def",
"path_parts",
"(",
"path",
")",
":",
"path",
"=",
"path",
"if",
"path",
"is",
"not",
"None",
"else",
"''",
"container_path",
"=",
"object_path",
"=",
"''",
"parts",
"=",
"path_list",
"(",
"path",
")",
"if",
"len",
"(",
"parts",
")",
">=",
"1",... | Split path into container, object.
:param path: Path to resource (including container).
:type path: `string`
:return: Container, storage object tuple.
:rtype: `tuple` of `string`, `string` | [
"Split",
"path",
"into",
"container",
"object",
"."
] | python | train |
edx/ease | ease/util_functions.py | https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/util_functions.py#L193-L233 | def get_vocab(text, score, max_feats=750, max_feats2=200):
"""
Uses a fisher test to find words that are significant in that they separate
high scoring essays from low scoring essays.
text is a list of input essays.
score is a list of scores, with score[n] corresponding to text[n]
max_feats is the maximum number of features to consider in the first pass
max_feats2 is the maximum number of features to consider in the second (final) pass
Returns a list of words that constitute the significant vocabulary
"""
dict = CountVectorizer(ngram_range=(1,2), max_features=max_feats)
dict_mat = dict.fit_transform(text)
set_score = numpy.asarray(score, dtype=numpy.int)
med_score = numpy.median(set_score)
new_score = set_score
if(med_score == 0):
med_score = 1
new_score[set_score < med_score] = 0
new_score[set_score >= med_score] = 1
fish_vals = []
for col_num in range(0, dict_mat.shape[1]):
loop_vec = dict_mat.getcol(col_num).toarray()
good_loop_vec = loop_vec[new_score == 1]
bad_loop_vec = loop_vec[new_score == 0]
good_loop_present = len(good_loop_vec[good_loop_vec > 0])
good_loop_missing = len(good_loop_vec[good_loop_vec == 0])
bad_loop_present = len(bad_loop_vec[bad_loop_vec > 0])
bad_loop_missing = len(bad_loop_vec[bad_loop_vec == 0])
fish_val = pvalue(good_loop_present, bad_loop_present, good_loop_missing, bad_loop_missing).two_tail
fish_vals.append(fish_val)
cutoff = 1
if(len(fish_vals) > max_feats2):
cutoff = sorted(fish_vals)[max_feats2]
good_cols = numpy.asarray([num for num in range(0, dict_mat.shape[1]) if fish_vals[num] <= cutoff])
getVar = lambda searchList, ind: [searchList[i] for i in ind]
vocab = getVar(dict.get_feature_names(), good_cols)
return vocab | [
"def",
"get_vocab",
"(",
"text",
",",
"score",
",",
"max_feats",
"=",
"750",
",",
"max_feats2",
"=",
"200",
")",
":",
"dict",
"=",
"CountVectorizer",
"(",
"ngram_range",
"=",
"(",
"1",
",",
"2",
")",
",",
"max_features",
"=",
"max_feats",
")",
"dict_ma... | Uses a fisher test to find words that are significant in that they separate
high scoring essays from low scoring essays.
text is a list of input essays.
score is a list of scores, with score[n] corresponding to text[n]
max_feats is the maximum number of features to consider in the first pass
max_feats2 is the maximum number of features to consider in the second (final) pass
Returns a list of words that constitute the significant vocabulary | [
"Uses",
"a",
"fisher",
"test",
"to",
"find",
"words",
"that",
"are",
"significant",
"in",
"that",
"they",
"separate",
"high",
"scoring",
"essays",
"from",
"low",
"scoring",
"essays",
".",
"text",
"is",
"a",
"list",
"of",
"input",
"essays",
".",
"score",
... | python | valid |
napalm-automation/napalm-nxos | napalm_nxos_ssh/nxos_ssh.py | https://github.com/napalm-automation/napalm-nxos/blob/936d641c99e068817abf247e0e5571fc31b3a92a/napalm_nxos_ssh/nxos_ssh.py#L657-L660 | def _save_to_checkpoint(self, filename):
"""Save the current running config to the given file."""
command = 'checkpoint file {}'.format(filename)
self.device.send_command(command) | [
"def",
"_save_to_checkpoint",
"(",
"self",
",",
"filename",
")",
":",
"command",
"=",
"'checkpoint file {}'",
".",
"format",
"(",
"filename",
")",
"self",
".",
"device",
".",
"send_command",
"(",
"command",
")"
] | Save the current running config to the given file. | [
"Save",
"the",
"current",
"running",
"config",
"to",
"the",
"given",
"file",
"."
] | python | train |
jmbhughes/suvi-trainer | suvitrainer/gui.py | https://github.com/jmbhughes/suvi-trainer/blob/3d89894a4a037286221974c7eb5634d229b4f5d4/suvitrainer/gui.py#L447-L457 | def make_options_frame(self):
""" make the frame that allows for configuration and classification"""
self.tab_frame = ttk.Notebook(self.option_frame, width=800)
self.tab_configure = tk.Frame(self.tab_frame)
self.tab_classify = tk.Frame(self.tab_frame)
self.make_configure_tab()
self.make_classify_tab()
self.tab_frame.add(self.tab_configure, text="Configure")
self.tab_frame.add(self.tab_classify, text="Classify")
self.tab_frame.pack(fill=tk.BOTH, expand=True) | [
"def",
"make_options_frame",
"(",
"self",
")",
":",
"self",
".",
"tab_frame",
"=",
"ttk",
".",
"Notebook",
"(",
"self",
".",
"option_frame",
",",
"width",
"=",
"800",
")",
"self",
".",
"tab_configure",
"=",
"tk",
".",
"Frame",
"(",
"self",
".",
"tab_fr... | make the frame that allows for configuration and classification | [
"make",
"the",
"frame",
"that",
"allows",
"for",
"configuration",
"and",
"classification"
] | python | train |
hydraplatform/hydra-base | hydra_base/util/__init__.py | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/util/__init__.py#L132-L218 | def get_val(dataset, timestamp=None):
"""
Turn the string value of a dataset into an appropriate
value, be it a decimal value, array or time series.
If a timestamp is passed to this function,
return the values appropriate to the requested times.
If the timestamp is *before* the start of the timeseries data, return None
If the timestamp is *after* the end of the timeseries data, return the last
value.
The raw flag indicates whether timeseries should be returned raw -- exactly
as they are in the DB (a timeseries being a list of timeseries data objects,
for example) or as a single python dictionary
"""
if dataset.type == 'array':
#TODO: design a mechansim to retrieve this data if it's stored externally
return json.loads(dataset.value)
elif dataset.type == 'descriptor':
return str(dataset.value)
elif dataset.type == 'scalar':
return Decimal(str(dataset.value))
elif dataset.type == 'timeseries':
#TODO: design a mechansim to retrieve this data if it's stored externally
val = dataset.value
seasonal_year = config.get('DEFAULT','seasonal_year', '1678')
seasonal_key = config.get('DEFAULT', 'seasonal_key', '9999')
val = dataset.value.replace(seasonal_key, seasonal_year)
timeseries = pd.read_json(val, convert_axes=True)
if timestamp is None:
return timeseries
else:
try:
idx = timeseries.index
#Seasonal timeseries are stored in the year
#1678 (the lowest year pandas allows for valid times).
#Therefore if the timeseries is seasonal,
#the request must be a seasonal request, not a
#standard request
if type(idx) == pd.DatetimeIndex:
if set(idx.year) == set([int(seasonal_year)]):
if isinstance(timestamp, list):
seasonal_timestamp = []
for t in timestamp:
t_1900 = t.replace(year=int(seasonal_year))
seasonal_timestamp.append(t_1900)
timestamp = seasonal_timestamp
else:
timestamp = [timestamp.replace(year=int(seasonal_year))]
pandas_ts = timeseries.reindex(timestamp, method='ffill')
#If there are no values at all, just return None
if len(pandas_ts.dropna()) == 0:
return None
#Replace all numpy NAN values with None
pandas_ts = pandas_ts.where(pandas_ts.notnull(), None)
val_is_array = False
if len(pandas_ts.columns) > 1:
val_is_array = True
if val_is_array:
if type(timestamp) is list and len(timestamp) == 1:
ret_val = pandas_ts.loc[timestamp[0]].values.tolist()
else:
ret_val = pandas_ts.loc[timestamp].values.tolist()
else:
col_name = pandas_ts.loc[timestamp].columns[0]
if type(timestamp) is list and len(timestamp) == 1:
ret_val = pandas_ts.loc[timestamp[0]].loc[col_name]
else:
ret_val = pandas_ts.loc[timestamp][col_name].values.tolist()
return ret_val
except Exception as e:
log.critical("Unable to retrive data. Check timestamps.")
log.critical(e) | [
"def",
"get_val",
"(",
"dataset",
",",
"timestamp",
"=",
"None",
")",
":",
"if",
"dataset",
".",
"type",
"==",
"'array'",
":",
"#TODO: design a mechansim to retrieve this data if it's stored externally",
"return",
"json",
".",
"loads",
"(",
"dataset",
".",
"value",
... | Turn the string value of a dataset into an appropriate
value, be it a decimal value, array or time series.
If a timestamp is passed to this function,
return the values appropriate to the requested times.
If the timestamp is *before* the start of the timeseries data, return None
If the timestamp is *after* the end of the timeseries data, return the last
value.
The raw flag indicates whether timeseries should be returned raw -- exactly
as they are in the DB (a timeseries being a list of timeseries data objects,
for example) or as a single python dictionary | [
"Turn",
"the",
"string",
"value",
"of",
"a",
"dataset",
"into",
"an",
"appropriate",
"value",
"be",
"it",
"a",
"decimal",
"value",
"array",
"or",
"time",
"series",
"."
] | python | train |
eventable/vobject | vobject/behavior.py | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/vobject/behavior.py#L144-L169 | def serialize(cls, obj, buf, lineLength, validate=True):
"""
Set implicit parameters, do encoding, return unicode string.
If validate is True, raise VObjectError if the line doesn't validate
after implicit parameters are generated.
Default is to call base.defaultSerialize.
"""
cls.generateImplicitParameters(obj)
if validate:
cls.validate(obj, raiseException=True)
if obj.isNative:
transformed = obj.transformFromNative()
undoTransform = True
else:
transformed = obj
undoTransform = False
out = base.defaultSerialize(transformed, buf, lineLength)
if undoTransform:
obj.transformToNative()
return out | [
"def",
"serialize",
"(",
"cls",
",",
"obj",
",",
"buf",
",",
"lineLength",
",",
"validate",
"=",
"True",
")",
":",
"cls",
".",
"generateImplicitParameters",
"(",
"obj",
")",
"if",
"validate",
":",
"cls",
".",
"validate",
"(",
"obj",
",",
"raiseException"... | Set implicit parameters, do encoding, return unicode string.
If validate is True, raise VObjectError if the line doesn't validate
after implicit parameters are generated.
Default is to call base.defaultSerialize. | [
"Set",
"implicit",
"parameters",
"do",
"encoding",
"return",
"unicode",
"string",
"."
] | python | train |
vmlaker/mpipe | src/UnorderedWorker.py | https://github.com/vmlaker/mpipe/blob/5a1804cf64271931f0cd3e4fff3e2b38291212dd/src/UnorderedWorker.py#L42-L73 | def assemble(
cls,
args,
input_tube,
output_tubes,
size,
disable_result,
do_stop_task,
):
"""Create, assemble and start workers.
Workers are created of class *cls*, initialized with *args*, and given
task/result communication channels *input_tube* and *output_tubes*.
The number of workers created is according to *size* parameter.
*do_stop_task* indicates whether doTask() will be called for "stop" request.
"""
# Create the workers.
workers = []
for ii in range(size):
worker = cls(**args)
worker.init2(
input_tube,
output_tubes,
size,
disable_result,
do_stop_task,
)
workers.append(worker)
# Start the workers.
for worker in workers:
worker.start() | [
"def",
"assemble",
"(",
"cls",
",",
"args",
",",
"input_tube",
",",
"output_tubes",
",",
"size",
",",
"disable_result",
",",
"do_stop_task",
",",
")",
":",
"# Create the workers.",
"workers",
"=",
"[",
"]",
"for",
"ii",
"in",
"range",
"(",
"size",
")",
"... | Create, assemble and start workers.
Workers are created of class *cls*, initialized with *args*, and given
task/result communication channels *input_tube* and *output_tubes*.
The number of workers created is according to *size* parameter.
*do_stop_task* indicates whether doTask() will be called for "stop" request. | [
"Create",
"assemble",
"and",
"start",
"workers",
".",
"Workers",
"are",
"created",
"of",
"class",
"*",
"cls",
"*",
"initialized",
"with",
"*",
"args",
"*",
"and",
"given",
"task",
"/",
"result",
"communication",
"channels",
"*",
"input_tube",
"*",
"and",
"... | python | train |
bioidiap/bob.ip.facedetect | bob/ip/facedetect/train/TrainingSet.py | https://github.com/bioidiap/bob.ip.facedetect/blob/601da5141ca7302ad36424d1421b33190ba46779/bob/ip/facedetect/train/TrainingSet.py#L294-L438 | def sample(self, model = None, maximum_number_of_positives = None, maximum_number_of_negatives = None, positive_indices = None, negative_indices = None):
"""sample([model], [maximum_number_of_positives], [maximum_number_of_negatives], [positive_indices], [negative_indices]) -> positives, negatives
Returns positive and negative samples from the set of positives and negatives.
This reads the previously extracted feature file (or all of them, in case features were extracted in parallel) and returns features.
If the ``model`` is not specified, a random sub-selection of positive and negative features is returned.
When the ``model`` is given, all patches are first classified with the given ``model``, and the ones that are mis-classified most are returned.
The number of returned positives and negatives can be limited by specifying the ``maximum_number_of_positives`` and ``maximum_number_of_negatives``.
This function keeps track of the positives and negatives that it once has returned, so it does not return the same positive or negative feature twice.
However, when you have to restart training from a given point, you can set the ``positive_indices`` and ``negative_indices`` parameters, to retrieve the features for the given indices.
In this case, no additional features are selected, but the given sets of indices are stored internally.
.. note::
The ``positive_indices`` and ``negative_indices`` only have an effect, when ``model`` is ``None``.
**Parameters:**
``model`` : :py:class:`bob.learn.boosting.BoostedMachine` or ``None``
If given, the ``model`` is used to predict the training features, and the highest mis-predicted features are returned
``maximum_number_of_positives, maximum_number_of_negatives`` : int
The maximum number of positive and negative features to be returned
``positive_indices, negative_indices`` : set(int) or ``None``
The set of positive and negative indices to extract features for, instead of randomly choosing indices; only considered when ``model = None``
**Returns:**
``positives, negatives`` : array_like(2D, uint16)
The new set of training features for the positive class (faces) and negative class (background).
"""
# get all existing feature files
feature_file = self._feature_file(index = 0)
if os.path.exists(feature_file):
feature_files = [feature_file]
else:
feature_files = []
i = 1
while True:
feature_file = self._feature_file(index = i)
if not os.path.exists(feature_file):
break
feature_files.append(feature_file)
i += 1
features = []
labels = []
# make a first iteration through the feature files and count the number of positives and negatives
positive_count, negative_count = 0, 0
logger.info("Reading %d feature files", len(feature_files))
for feature_file in feature_files:
logger.debug(".. Loading file %s", feature_file)
hdf5 = bob.io.base.HDF5File(feature_file)
positive_count += hdf5.get("TotalPositives")
negative_count += hdf5.get("TotalNegatives")
del hdf5
if model is None:
# get a list of indices and store them, so that we don't re-use them next time
if positive_indices is None:
positive_indices = set(quasi_random_indices(positive_count, maximum_number_of_positives))
if negative_indices is None:
negative_indices = set(quasi_random_indices(negative_count, maximum_number_of_negatives))
self.positive_indices |= positive_indices
self.negative_indices |= negative_indices
# now, iterate through the files again and sample
positive_indices = collections.deque(sorted(positive_indices))
negative_indices = collections.deque(sorted(negative_indices))
logger.info("Extracting %d of %d positive and %d of %d negative samples" % (len(positive_indices), positive_count, len(negative_indices), negative_count))
positive_count, negative_count = 0, 0
for feature_file in feature_files:
hdf5 = bob.io.base.HDF5File(feature_file)
for image in sorted(hdf5.sub_groups(recursive=False, relative=True)):
hdf5.cd(image)
for scale in sorted(hdf5.keys(relative=True)):
read = hdf5.get(scale)
size = read.shape[0]
if scale.startswith("Positives"):
# copy positive data
while positive_indices and positive_count <= positive_indices[0] and positive_count + size > positive_indices[0]:
assert positive_indices[0] >= positive_count
features.append(read[positive_indices.popleft() - positive_count, :])
labels.append(1)
positive_count += size
else:
# copy negative data
while negative_indices and negative_count <= negative_indices[0] and negative_count + size > negative_indices[0]:
assert negative_indices[0] >= negative_count
features.append(read[negative_indices.popleft() - negative_count, :])
labels.append(-1)
negative_count += size
hdf5.cd("..")
# return features and labels
return numpy.array(features), numpy.array(labels)
else:
positive_count -= len(self.positive_indices)
negative_count -= len(self.negative_indices)
logger.info("Getting worst %d of %d positive and worst %d of %d negative examples", min(maximum_number_of_positives, positive_count), positive_count, min(maximum_number_of_negatives, negative_count), negative_count)
# compute the worst features based on the current model
worst_positives, worst_negatives = [], []
positive_count, negative_count = 0, 0
for feature_file in feature_files:
hdf5 = bob.io.base.HDF5File(feature_file)
for image in sorted(hdf5.sub_groups(recursive=False, relative=True)):
hdf5.cd(image)
for scale in sorted(hdf5.keys(relative=True)):
read = hdf5.get(scale)
size = read.shape[0]
prediction = bob.blitz.array((size,), numpy.float64)
# forward features through the model
result = model.forward(read, prediction)
if scale.startswith("Positives"):
indices = [i for i in range(size) if positive_count + i not in self.positive_indices]
worst_positives.extend([(prediction[i], positive_count + i, read[i]) for i in indices if prediction[i] <= 0])
positive_count += size
else:
indices = [i for i in range(size) if negative_count + i not in self.negative_indices]
worst_negatives.extend([(prediction[i], negative_count + i, read[i]) for i in indices if prediction[i] >= 0])
negative_count += size
hdf5.cd("..")
# cut off good results
if maximum_number_of_positives is not None and len(worst_positives) > maximum_number_of_positives:
# keep only the positives with the low predictions (i.e., the worst)
worst_positives = sorted(worst_positives, key=lambda k: k[0])[:maximum_number_of_positives]
if maximum_number_of_negatives is not None and len(worst_negatives) > maximum_number_of_negatives:
# keep only the negatives with the high predictions (i.e., the worst)
worst_negatives = sorted(worst_negatives, reverse=True, key=lambda k: k[0])[:maximum_number_of_negatives]
# mark all indices to be used
self.positive_indices |= set(k[1] for k in worst_positives)
self.negative_indices |= set(k[1] for k in worst_negatives)
# finally, collect features and labels
return numpy.array([f[2] for f in worst_positives] + [f[2] for f in worst_negatives]), numpy.array([1]*len(worst_positives) + [-1]*len(worst_negatives)) | [
"def",
"sample",
"(",
"self",
",",
"model",
"=",
"None",
",",
"maximum_number_of_positives",
"=",
"None",
",",
"maximum_number_of_negatives",
"=",
"None",
",",
"positive_indices",
"=",
"None",
",",
"negative_indices",
"=",
"None",
")",
":",
"# get all existing fea... | sample([model], [maximum_number_of_positives], [maximum_number_of_negatives], [positive_indices], [negative_indices]) -> positives, negatives
Returns positive and negative samples from the set of positives and negatives.
This reads the previously extracted feature file (or all of them, in case features were extracted in parallel) and returns features.
If the ``model`` is not specified, a random sub-selection of positive and negative features is returned.
When the ``model`` is given, all patches are first classified with the given ``model``, and the ones that are mis-classified most are returned.
The number of returned positives and negatives can be limited by specifying the ``maximum_number_of_positives`` and ``maximum_number_of_negatives``.
This function keeps track of the positives and negatives that it once has returned, so it does not return the same positive or negative feature twice.
However, when you have to restart training from a given point, you can set the ``positive_indices`` and ``negative_indices`` parameters, to retrieve the features for the given indices.
In this case, no additional features are selected, but the given sets of indices are stored internally.
.. note::
The ``positive_indices`` and ``negative_indices`` only have an effect, when ``model`` is ``None``.
**Parameters:**
``model`` : :py:class:`bob.learn.boosting.BoostedMachine` or ``None``
If given, the ``model`` is used to predict the training features, and the highest mis-predicted features are returned
``maximum_number_of_positives, maximum_number_of_negatives`` : int
The maximum number of positive and negative features to be returned
``positive_indices, negative_indices`` : set(int) or ``None``
The set of positive and negative indices to extract features for, instead of randomly choosing indices; only considered when ``model = None``
**Returns:**
``positives, negatives`` : array_like(2D, uint16)
The new set of training features for the positive class (faces) and negative class (background). | [
"sample",
"(",
"[",
"model",
"]",
"[",
"maximum_number_of_positives",
"]",
"[",
"maximum_number_of_negatives",
"]",
"[",
"positive_indices",
"]",
"[",
"negative_indices",
"]",
")",
"-",
">",
"positives",
"negatives"
] | python | train |
albertz/py_better_exchook | better_exchook.py | https://github.com/albertz/py_better_exchook/blob/3d524a027d7fc4e83e47e39a1978849561da69b3/better_exchook.py#L1382-L1387 | def from_frame_summary(cls, f):
"""
:param FrameSummary f:
:rtype: DummyFrame
"""
return cls(filename=f.filename, lineno=f.lineno, name=f.name, f_locals=f.locals) | [
"def",
"from_frame_summary",
"(",
"cls",
",",
"f",
")",
":",
"return",
"cls",
"(",
"filename",
"=",
"f",
".",
"filename",
",",
"lineno",
"=",
"f",
".",
"lineno",
",",
"name",
"=",
"f",
".",
"name",
",",
"f_locals",
"=",
"f",
".",
"locals",
")"
] | :param FrameSummary f:
:rtype: DummyFrame | [
":",
"param",
"FrameSummary",
"f",
":",
":",
"rtype",
":",
"DummyFrame"
] | python | train |
acorg/dark-matter | dark/fasta.py | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/fasta.py#L17-L30 | def dedupFasta(reads):
"""
Remove sequence duplicates (based on sequence) from FASTA.
@param reads: a C{dark.reads.Reads} instance.
@return: a generator of C{dark.reads.Read} instances with no duplicates.
"""
seen = set()
add = seen.add
for read in reads:
hash_ = md5(read.sequence.encode('UTF-8')).digest()
if hash_ not in seen:
add(hash_)
yield read | [
"def",
"dedupFasta",
"(",
"reads",
")",
":",
"seen",
"=",
"set",
"(",
")",
"add",
"=",
"seen",
".",
"add",
"for",
"read",
"in",
"reads",
":",
"hash_",
"=",
"md5",
"(",
"read",
".",
"sequence",
".",
"encode",
"(",
"'UTF-8'",
")",
")",
".",
"digest... | Remove sequence duplicates (based on sequence) from FASTA.
@param reads: a C{dark.reads.Reads} instance.
@return: a generator of C{dark.reads.Read} instances with no duplicates. | [
"Remove",
"sequence",
"duplicates",
"(",
"based",
"on",
"sequence",
")",
"from",
"FASTA",
"."
] | python | train |
CloverHealth/temple | temple/check.py | https://github.com/CloverHealth/temple/blob/d7b75da2459f72ba74d6f3b6e1ab95c3d1b92ccd/temple/check.py#L54-L60 | def _has_branch(branch):
"""Return True if the target branch exists."""
ret = temple.utils.shell('git rev-parse --verify {}'.format(branch),
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
check=False)
return ret.returncode == 0 | [
"def",
"_has_branch",
"(",
"branch",
")",
":",
"ret",
"=",
"temple",
".",
"utils",
".",
"shell",
"(",
"'git rev-parse --verify {}'",
".",
"format",
"(",
"branch",
")",
",",
"stderr",
"=",
"subprocess",
".",
"DEVNULL",
",",
"stdout",
"=",
"subprocess",
".",... | Return True if the target branch exists. | [
"Return",
"True",
"if",
"the",
"target",
"branch",
"exists",
"."
] | python | valid |
nerdynick/PySQLPool | src/PySQLPool/connection.py | https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/connection.py#L117-L122 | def lock(self, block=True):
"""
Lock connection from being used else where
"""
self._locked = True
return self._lock.acquire(block) | [
"def",
"lock",
"(",
"self",
",",
"block",
"=",
"True",
")",
":",
"self",
".",
"_locked",
"=",
"True",
"return",
"self",
".",
"_lock",
".",
"acquire",
"(",
"block",
")"
] | Lock connection from being used else where | [
"Lock",
"connection",
"from",
"being",
"used",
"else",
"where"
] | python | train |
androguard/androguard | androguard/gui/sourcewindow.py | https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/gui/sourcewindow.py#L383-L395 | def keyPressEvent(self, event):
"""Keyboard shortcuts"""
key = event.key()
if key == QtCore.Qt.Key_X:
self.actionXref()
elif key == QtCore.Qt.Key_G:
self.actionGoto()
elif key == QtCore.Qt.Key_X:
self.actionXref()
elif key == QtCore.Qt.Key_I:
self.actionInfo()
elif key == QtCore.Qt.Key_R:
self.reload_java_sources() | [
"def",
"keyPressEvent",
"(",
"self",
",",
"event",
")",
":",
"key",
"=",
"event",
".",
"key",
"(",
")",
"if",
"key",
"==",
"QtCore",
".",
"Qt",
".",
"Key_X",
":",
"self",
".",
"actionXref",
"(",
")",
"elif",
"key",
"==",
"QtCore",
".",
"Qt",
".",... | Keyboard shortcuts | [
"Keyboard",
"shortcuts"
] | python | train |
pantsbuild/pants | src/python/pants/base/parse_context.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/base/parse_context.py#L73-L93 | def create_object_if_not_exists(self, alias, name=None, *args, **kwargs):
"""Constructs the type with the given alias using the given args and kwargs.
NB: aliases may be the alias' object type itself if that type is known.
:API: public
:param alias: Either the type alias or the type itself.
:type alias: string|type
:param *args: These pass through to the underlying callable object.
:param **kwargs: These pass through to the underlying callable object.
:returns: The created object, or an existing object with the same `name`.
"""
if name is None:
raise ValueError("Method requires an object `name`.")
obj_creator = functools.partial(self.create_object,
alias,
name=name,
*args,
**kwargs)
return self._storage.add_if_not_exists(name, obj_creator) | [
"def",
"create_object_if_not_exists",
"(",
"self",
",",
"alias",
",",
"name",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"name",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Method requires an object `name`.\"",
")",
"obj_cr... | Constructs the type with the given alias using the given args and kwargs.
NB: aliases may be the alias' object type itself if that type is known.
:API: public
:param alias: Either the type alias or the type itself.
:type alias: string|type
:param *args: These pass through to the underlying callable object.
:param **kwargs: These pass through to the underlying callable object.
:returns: The created object, or an existing object with the same `name`. | [
"Constructs",
"the",
"type",
"with",
"the",
"given",
"alias",
"using",
"the",
"given",
"args",
"and",
"kwargs",
"."
] | python | train |
jgorset/django-respite | respite/formats.py | https://github.com/jgorset/django-respite/blob/719469d11baf91d05917bab1623bd82adc543546/respite/formats.py#L69-L79 | def find_by_extension(extension):
"""
Find and return a format by extension.
:param extension: A string describing the extension of the format.
"""
for format in FORMATS:
if extension in format.extensions:
return format
raise UnknownFormat('No format found with extension "%s"' % extension) | [
"def",
"find_by_extension",
"(",
"extension",
")",
":",
"for",
"format",
"in",
"FORMATS",
":",
"if",
"extension",
"in",
"format",
".",
"extensions",
":",
"return",
"format",
"raise",
"UnknownFormat",
"(",
"'No format found with extension \"%s\"'",
"%",
"extension",
... | Find and return a format by extension.
:param extension: A string describing the extension of the format. | [
"Find",
"and",
"return",
"a",
"format",
"by",
"extension",
"."
] | python | train |
bpannier/simpletr64 | simpletr64/discover.py | https://github.com/bpannier/simpletr64/blob/31081139f4e6c85084a56de1617df73927135466/simpletr64/discover.py#L114-L265 | def discoverParticularHost(host, service="ssdp:all", deviceDefinitionURL=None, timeout=1, retries=2,
ipAddress="239.255.255.250", port=1900, proxies=None):
"""Discover a particular host and find the best response.
This tries to find the most specific discovery result for the given host. Only the discovery result contains
the URL to the XML tree which initializes the device definition. If an URL is already known it should be
provided to avoid additional latency for a broader first device discovery.
This method also do some magic to find the best result for the given host as UPnP devices behave sometimes
strangely. This call is costly the result if any should be cached.
:param str host: the host to find
:param service: the service type or list of service types if known to search for
:type service: str or list[str]
:param str deviceDefinitionURL: if provided it is used to skip a first device discovery
:param float timeout: the time to wait for each retry
:param int retries: the amount of times how often the device is tried to discover
:param str ipAddress: the multicast ip address to discover devices
:param int port: the port to discover devices
:param str proxies: proxy definition as defined here:
`Proxy definition <http://docs.python-requests.org/en/latest/user/advanced/#proxies>`_
:return: If the device have been found the response is returned otherwise None
:rtype: DiscoveryResponse
:raises ValueError: if problems with reading or parsing the xml device definition occurs
:raises requests.exceptions.ConnectionError: when the device definitions can not be downloaded
:raises requests.exceptions.ConnectTimeout: when download time out
Example:
::
proxies = {"http": "http://localhost:8888"}
result = discoverParticularHost("192.168.0.1", proxies=proxies)
if result is not None:
print("Host: " + result.locationHost + " Port: " + result.locationPort + " Device definitions: " + \\
result.location)
.. seealso::
:class:`~simpletr64.DiscoveryResponse`, :meth:`~simpletr64.Discover.discover`
"""
# get all IP addresses for the given host
ipResults = socket.getaddrinfo(host, 80)
if len(ipResults) == 0:
return None
ipAddresses = []
# remember all ip addresses for the given host
for ipAdrTupple in ipResults:
ipAddresses.append(ipAdrTupple[4][0])
bestPick = None
services = []
if deviceDefinitionURL is None:
# no xml definition given, so lets search for one
# search for all devices first
discoverResults = Discover.discover(service=service, timeout=timeout, retries=retries,
ipAddress=ipAddress, port=port)
for result in discoverResults:
if result.locationHost in ipAddresses:
# now we found a result for that host, pick the best service type if multiple results for the host
# are found
if Discover.rateServiceTypeInResult(result) > Discover.rateServiceTypeInResult(bestPick):
bestPick = result
# remember all services
if result.service not in services:
services.append(result.service)
if bestPick is None:
return None
else:
# create response with given parameter
bestPick = DiscoveryResponse.create(deviceDefinitionURL, service=service)
# some routers do not advice their TR64 capabilities but their UPnp which is only a subset of actions.
# Try to find out if the given XML definition path will give us a better service type.
# load xml definition
# some devices response differently without a User-Agent
headers = {"User-Agent": "Mozilla/5.0; SimpleTR64-3"}
request = requests.get(bestPick.location, proxies=proxies, headers=headers, timeout=float(timeout))
if request.status_code != 200:
errorStr = DeviceTR64._extractErrorString(request)
raise ValueError('Could not get CPE definitions for "' + bestPick.location + '": ' +
str(request.status_code) + ' - ' + request.reason + " -- " + errorStr)
# parse xml
try:
root = ET.fromstring(request.text.encode('utf-8'))
except Exception as e:
raise ValueError("Could not parse CPE definitions for '" + bestPick.location + "': " + str(e))
# find the first deviceType in the document tree
for element in root.getiterator():
# check if element tag name ends on deviceType, skip XML namespace
if element.tag.lower().endswith("devicetype"):
serviceFound = element.text
# remember the service found if it does not exist yet
if serviceFound not in services:
services.append(serviceFound)
# create a specific service just to check if we found it already
serviceFound = serviceFound.replace("schemas-upnp-org", "dslforum-org")
# test if we already have the best service type then we dont need to do an other discovery request
if serviceFound == bestPick.service:
return bestPick
for service in services:
# we search for the specific device tyoe version as of specified in TR64 protocol.
# some devices returns different results depending on the given service type, so lets be
# very specific
specificService = service.replace("schemas-upnp-org", "dslforum-org")
if specificService not in services:
services.append(specificService)
# we do an other discovery request with more specific service/device type
discoverResultsSpecific = Discover.discover(service=services, timeout=float(timeout), retries=retries,
ipAddress=ipAddress, port=port)
# iterate through all results to find the most specific one
evenBetterPick = None
for specificResult in discoverResultsSpecific:
if specificResult.locationHost in ipAddresses:
if Discover.rateServiceTypeInResult(specificResult) > \
Discover.rateServiceTypeInResult(evenBetterPick):
evenBetterPick = specificResult
if evenBetterPick is not None:
# best we could find
return evenBetterPick
# we found first deviceType tag in the XML structure, no need to go further
break
if deviceDefinitionURL is not None:
# we created our own response, so no result found
return None
# we found only an unspecific result, return it anyway
return bestPick | [
"def",
"discoverParticularHost",
"(",
"host",
",",
"service",
"=",
"\"ssdp:all\"",
",",
"deviceDefinitionURL",
"=",
"None",
",",
"timeout",
"=",
"1",
",",
"retries",
"=",
"2",
",",
"ipAddress",
"=",
"\"239.255.255.250\"",
",",
"port",
"=",
"1900",
",",
"prox... | Discover a particular host and find the best response.
This tries to find the most specific discovery result for the given host. Only the discovery result contains
the URL to the XML tree which initializes the device definition. If an URL is already known it should be
provided to avoid additional latency for a broader first device discovery.
This method also do some magic to find the best result for the given host as UPnP devices behave sometimes
strangely. This call is costly the result if any should be cached.
:param str host: the host to find
:param service: the service type or list of service types if known to search for
:type service: str or list[str]
:param str deviceDefinitionURL: if provided it is used to skip a first device discovery
:param float timeout: the time to wait for each retry
:param int retries: the amount of times how often the device is tried to discover
:param str ipAddress: the multicast ip address to discover devices
:param int port: the port to discover devices
:param str proxies: proxy definition as defined here:
`Proxy definition <http://docs.python-requests.org/en/latest/user/advanced/#proxies>`_
:return: If the device have been found the response is returned otherwise None
:rtype: DiscoveryResponse
:raises ValueError: if problems with reading or parsing the xml device definition occurs
:raises requests.exceptions.ConnectionError: when the device definitions can not be downloaded
:raises requests.exceptions.ConnectTimeout: when download time out
Example:
::
proxies = {"http": "http://localhost:8888"}
result = discoverParticularHost("192.168.0.1", proxies=proxies)
if result is not None:
print("Host: " + result.locationHost + " Port: " + result.locationPort + " Device definitions: " + \\
result.location)
.. seealso::
:class:`~simpletr64.DiscoveryResponse`, :meth:`~simpletr64.Discover.discover` | [
"Discover",
"a",
"particular",
"host",
"and",
"find",
"the",
"best",
"response",
"."
] | python | train |
frnsys/broca | broca/tokenize/keyword/apriori.py | https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/tokenize/keyword/apriori.py#L21-L62 | def tokenize(self, docs):
"""
The first pass consists of converting documents
into "transactions" (sets of their tokens)
and the initial frequency/support filtering.
Then iterate until we close in on a final set.
`docs` can be any iterator or generator so long as it yields lists.
Each list represents a document (i.e. is a list of tokens).
For example, it can be a list of lists of nouns and noun phrases if trying
to identify aspects, where each list represents a sentence or document.
`min_sup` defines the minimum frequency (as a ratio over the total) necessary to
keep a candidate.
"""
if self.min_sup < 1/len(docs):
raise Exception('`min_sup` must be greater than or equal to `1/len(docs)`.')
# First pass
candidates = set()
transactions = []
# Use nouns and noun phrases.
for doc in POSTokenizer().tokenize(docs):
transaction = set(doc)
candidates = candidates.union({(t,) for t in transaction})
transactions.append(transaction)
freq_set = filter_support(candidates, transactions, self.min_sup)
# Iterate
k = 2
last_set = set()
while freq_set != set():
last_set = freq_set
cands = generate_candidates(freq_set, k)
freq_set = filter_support(cands, transactions, self.min_sup)
k += 1
# Map documents to their keywords.
keywords = flatten(last_set)
return prune([[kw for kw in keywords if kw in doc] for doc in docs]) | [
"def",
"tokenize",
"(",
"self",
",",
"docs",
")",
":",
"if",
"self",
".",
"min_sup",
"<",
"1",
"/",
"len",
"(",
"docs",
")",
":",
"raise",
"Exception",
"(",
"'`min_sup` must be greater than or equal to `1/len(docs)`.'",
")",
"# First pass",
"candidates",
"=",
... | The first pass consists of converting documents
into "transactions" (sets of their tokens)
and the initial frequency/support filtering.
Then iterate until we close in on a final set.
`docs` can be any iterator or generator so long as it yields lists.
Each list represents a document (i.e. is a list of tokens).
For example, it can be a list of lists of nouns and noun phrases if trying
to identify aspects, where each list represents a sentence or document.
`min_sup` defines the minimum frequency (as a ratio over the total) necessary to
keep a candidate. | [
"The",
"first",
"pass",
"consists",
"of",
"converting",
"documents",
"into",
"transactions",
"(",
"sets",
"of",
"their",
"tokens",
")",
"and",
"the",
"initial",
"frequency",
"/",
"support",
"filtering",
"."
] | python | train |
shexSpec/grammar | parsers/python/pyshexc/parser_impl/shex_node_expression_parser.py | https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/shex_node_expression_parser.py#L69-L84 | def visitValueSetValue(self, ctx: ShExDocParser.ValueSetValueContext):
""" valueSetValue: iriRange | literalRange | languageRange |
'.' (iriExclusion+ | literalExclusion+ | languageExclusion+) """
if ctx.iriRange() or ctx.literalRange() or ctx.languageRange():
self.visitChildren(ctx)
else: # '.' branch - wild card with exclusions
if ctx.iriExclusion():
vs_value = IriStemRange(Wildcard(), [])
self._iri_exclusions(vs_value, ctx.iriExclusion())
elif ctx.literalExclusion():
vs_value = LiteralStemRange(Wildcard(), [])
self._literal_exclusions(vs_value, ctx.literalExclusion())
else:
vs_value = LanguageStemRange(Wildcard(), [])
self._language_exclusions(vs_value, ctx.languageExclusion())
self.nodeconstraint.values.append(vs_value) | [
"def",
"visitValueSetValue",
"(",
"self",
",",
"ctx",
":",
"ShExDocParser",
".",
"ValueSetValueContext",
")",
":",
"if",
"ctx",
".",
"iriRange",
"(",
")",
"or",
"ctx",
".",
"literalRange",
"(",
")",
"or",
"ctx",
".",
"languageRange",
"(",
")",
":",
"self... | valueSetValue: iriRange | literalRange | languageRange |
'.' (iriExclusion+ | literalExclusion+ | languageExclusion+) | [
"valueSetValue",
":",
"iriRange",
"|",
"literalRange",
"|",
"languageRange",
"|",
".",
"(",
"iriExclusion",
"+",
"|",
"literalExclusion",
"+",
"|",
"languageExclusion",
"+",
")"
] | python | train |
aparo/pyes | pyes/mappings.py | https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/mappings.py#L489-L520 | def get_properties_by_type(self, type, recursive=True, parent_path=""):
"""
Returns a sorted list of fields that match the type.
:param type the type of the field "string","integer" or a list of types
:param recursive recurse to sub object
:returns a sorted list of fields the match the type
"""
if parent_path:
parent_path += "."
if isinstance(type, str):
if type == "*":
type = set(MAPPING_NAME_TYPE.keys()) - set(["nested", "multi_field", "multifield"])
else:
type = [type]
properties = []
for prop in list(self.properties.values()):
if prop.type in type:
properties.append((parent_path + prop.name, prop))
continue
elif prop.type == "multi_field" and prop.name in prop.fields and prop.fields[prop.name].type in type:
properties.append((parent_path + prop.name, prop))
continue
if not recursive:
continue
if prop.type in ["nested", "object"]:
properties.extend(
prop.get_properties_by_type(type, recursive=recursive, parent_path=parent_path + prop.name))
return sorted(properties) | [
"def",
"get_properties_by_type",
"(",
"self",
",",
"type",
",",
"recursive",
"=",
"True",
",",
"parent_path",
"=",
"\"\"",
")",
":",
"if",
"parent_path",
":",
"parent_path",
"+=",
"\".\"",
"if",
"isinstance",
"(",
"type",
",",
"str",
")",
":",
"if",
"typ... | Returns a sorted list of fields that match the type.
:param type the type of the field "string","integer" or a list of types
:param recursive recurse to sub object
:returns a sorted list of fields the match the type | [
"Returns",
"a",
"sorted",
"list",
"of",
"fields",
"that",
"match",
"the",
"type",
"."
] | python | train |
tanghaibao/jcvi | jcvi/annotation/ahrd.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/ahrd.py#L248-L528 | def fix_text(s, ignore_sym_pat=False):
if not ignore_sym_pat:
# Fix descriptions like D7TDB1 (
s = re.sub("([A-Z0-9]){6} \(", "", s)
s = s.split(";")[0]
# Fix parantheses containing names
s = s.translate(None, "[]")
s = s.replace("(-)", "[-]")
s = s.replace("(+)", "[+]")
s = s.replace("(Uncharacterized protein)", "")
if not ignore_sym_pat:
s = s.translate(None, "()")
# fix minor typos, seen in `autonaming` output
# change 'protei ' to 'protein '
# change 'hypthetical' to 'hypothetical'
# fix string starting with 'ytochrome'
if 'protei ' in s: s = s.replace('protei ', 'protein ')
if 'hypthetical' in s: s = s.replace('hypthetical', 'hypothetical')
if s.startswith('ytochrome'): s = s.replace('ytochrome', 'cytochrome')
# before trimming off at the first ";", check if name has glycosidic
# linkage information (e.g 1,3 or 1,4). If so, also check if multiple
# linkages are separated by ";". If so, replace ";" by "-"
m = re.findall(glycosidic_link_pat, s)
if m and ";" in s:
s = re.sub(";\s*", "-", s)
# remove underscore from description
s = re.sub("_", " ", s)
# Cellular locations
# Any word that matches e.g. AT5G54690
# Any word that matches e.g. Os02g0234800
# (fragment)
# UPF
# Remove 'DDB_G\d+' ID
# '_At[0-9]+g[0-9]+' to ''
for pat in (loc_pat, osg_pat, frag_pat, upf_pat, ddb_pat):
# below is a hack since word boundaries don't work on /
s = s.strip() + " "
s = re.sub(pat, "", s)
# '? => '
s = re.sub(apos_pat, "'", s)
# > => none
s = re.sub(gt_pat, "", s)
# reduce runs such as -- '''
s = re.sub(r"[-]+", "-", s)
s = re.sub(r"[']+", "'", s)
s = s.strip()
# -like to -like protein
s = re.sub(like_pat, "-like protein", s)
# 'repeat$' to 'repeat protein'
if re.search(repeat_pat, s):
s += "-containing protein"
# 'binding$' to 'binding protein'
if re.search(binding_pat, s):
s += " protein"
if re.match(Protein_pat, s):
s = re.sub(Protein_pat, "", s)
# 'domain$' to 'domain-containing protein'
if re.search(domain_pat, s):
s += "-containing protein"
if re.search(r"-domain", s):
s = re.sub(r"-domain", " domain", s)
if re.match(Protein_pat, s):
s = re.sub(Protein_pat, "", s)
# 'related$' to '-like protein'
if re.search(related_pat, s):
s = re.sub(related_pat, "-like protein", s)
if re.match(Protein_pat, s) and not re.match(r"Protein kinase", s):
s = re.sub(Protein_pat, "", s)
# '[0-9]+ homolog' to '-like protein'
if re.search(homolog_pat1, s):
s = re.sub(homolog_pat1, "-like protein", s)
if re.match(Protein_pat, s):
s = re.sub(Protein_pat, "", s)
# 'Protein\s+(.*)\s+homolog' to '$1-like protein'
match = re.search(homolog_pat2, s)
if match and not re.match(r"Protein kinase", s):
ret = match.group(1)
s = re.sub(homolog_pat2, ret + "-like protein", s)
s = re.sub(r"^\s+", "", s)
s = s.capitalize()
# 'homolog protein' to '-like protein'
# 'homologue$' to '-like protein'
# 'homolog$' to '-like protein'
for pat in (homolog_pat3, homolog_pat5, homolog_pat6):
if re.search(pat, s):
s = re.sub(pat, "-like protein", s)
# 'Agenet domain-containing protein / bromo-adjacent homology (BAH) domain-containing protein'
# to 'Agenet and bromo-adjacent homology (BAH) domain-containing protein'
if re.search(agenet_pat, s):
s = re.sub(agenet_pat, "Agenet and ", s)
# plural to singular
if re.search(plural_pat, s):
if (s.find('biogenesis') == -1 and s.find('Topors') == -1) or (not re.search(with_and_pat, s)):
s = re.sub(r"s$", "", s)
# 'like_TBP' or 'likeTBP' to 'like TBP'
if re.search(tbp_pat, s):
s = re.sub(tbp_pat, "like TBP", s)
# 'protein protein' to 'protein'
if re.search(prot_pat, s):
s = re.sub(prot_pat, "protein", s)
# 'dimerisation' to 'dimerization'
if re.search(dimer_pat, s):
s = re.sub(dimer_pat, "dimerization", s)
# Any AHRD that matches e.g. "AT5G54690-like protein"
# Any AHRD that contains the words '^Belongs|^Encoded|^Expression|^highly'
for pat in (atg_pat, athila_pat1):
if re.search(pat, s):
s = Unknown
# remove 'arabidopsis[ thaliana]' and/or embedded Atg IDs
for pat in (atg_id_pat, athila_pat2, athila_pat3, athila_pat4):
# below is a hack since word boundaries don't work on /
s = s.strip() + " "
s = re.sub(pat, "", s)
# remove "\s+LENGTH=\d+" from TAIR deflines
if re.search(length_pat, s):
s = re.sub(length_pat, "", s)
# if name has a dot followed by a space (". ") in it and contains multiple
# parts separated by a comma, strip name starting from first occurrence of ","
if re.search(r"\. ", s):
if re.search(r",", s):
s = s.split(",")[0]
# if name contains any of the disallowed words,
# remove word occurrence from name
# if name contains references to any other organism, trim name upto
# that occurrence
for pat in (disallow_pat, organism_pat):
if re.search(pat, s):
s = re.sub(pat, "", s)
s = s.strip()
if not ignore_sym_pat:
# 'homolog \d+' to '-like protein'
if re.search(homolog_pat4, s):
s = re.sub(homolog_pat4, "", s)
# Trailing protein numeric copy (e.g. Myb 1)
if re.search(trail_pat, s):
s = re.sub(trail_pat, "", s)
# if name is entirely a gene symbol-like (all capital letters, maybe followed by numbers)
# add a "-like protein" at the end
if (re.search(sym_pat, s) or re.search(lc_sym_pat, s)) \
and not re.search(spada_pat, s):
s = s + "-like protein"
# if gene symbol in parantheses at EOL, remove symbol
if re.search(eol_sym_pat, s):
s = re.sub(eol_sym_pat, "", s)
# if name terminates at a symbol([^A-Za-z0-9_]), trim it off
if re.search(r"\W{1,}$", s) and not re.search(r"\)$", s):
s = re.sub("\W{1,}$", "", s)
if "uncharacterized" in s:
s = "uncharacterized protein"
# change sulfer to sulfur
if re.search(sulfer_pat, s):
s = re.sub(sulfer_pat, "sulfur", s)
# change sulph to sulf
if re.search(sulph_pat, s):
s = re.sub(sulph_pat, "sulf", s)
# change monoxy to monooxy
if re.search(monoxy_pat, s):
s = re.sub(monoxy_pat, "monooxy", s)
# change proteine to protein
if re.search(proteine_pat, s):
s = re.sub(proteine_pat, "protein", s)
# change signalling to signaling
if re.search(signalling_pat, s):
s = re.sub(signalling_pat, "signaling", s)
# change aluminium to aluminum
if re.search(aluminium_pat, s):
s = re.sub(aluminium_pat, "aluminum", s)
# change haem to heme
if re.search(haem_pat, s):
s = re.sub(haem_pat, "heme", s)
# chage haemo to hemo
if re.search(haemo_pat, s):
s = re.sub(haemo_pat, "hemo", s)
# change assessory to accessory
if re.search(assessory_pat, s):
s = re.sub(assessory_pat, "accessory", s)
# change -ise/-ised/-isation to -ize/-ized/-ization
match = re.search(ise_pat, s)
if match:
ret = match.group(1)
if match.group(2):
suff = match.group(2)
s = re.sub(ise_pat, "{0}ize{1}".format(ret, suff), s)
else:
s = re.sub(ise_pat, "{0}ize".format(ret), s)
match = re.search(isation_pat, s)
if match:
ret = match.group(1)
s = re.sub(isation_pat, "{0}ization".format(ret), s)
# change -bre to -ber
match = re.search(bre_pat, s)
if match:
ret = match.group(1)
s = re.sub(bre_pat, "{0}ber".format(ret), s)
if not s.startswith(Hypothetical):
# 'Candidate|Hypothetical|Novel|Predicted|Possible|Probable|Uncharacterized' to 'Putative'
if s.startswith('Uncharacterized') and any(pat in s for pat in ('UCP', 'UPF', 'protein')):
pass
else:
if re.search(put_pat, s):
s = re.sub(put_pat, "Putative", s)
"""
case (qr/^Histone-lysine/) { $ahrd =~ s/,\s+H\d{1}\s+lysine\-\d+//gs; }
"""
sl = s.lower()
# Any mention of `clone` or `contig` is not informative
if "clone" in sl or "contig" in sl:
s = Unknown
# All that's left is `protein` is not informative
if sl in ("protein", "protein, putative", ""):
s = Unknown
if Unknown.lower() in sl:
s = Unknown
if "FUNCTIONS IN".lower() in sl and "unknown" in sl:
s = Unknown
if "LOCATED IN".lower() in sl:
s = Unknown
s = re.sub(r"[,]*\s+putative$", "", s)
if s == Unknown or s.strip() == "protein":
s = Hypothetical
# Compact all spaces
s = ' '.join(s.split())
assert s.strip()
return s | [
"def",
"fix_text",
"(",
"s",
",",
"ignore_sym_pat",
"=",
"False",
")",
":",
"if",
"not",
"ignore_sym_pat",
":",
"# Fix descriptions like D7TDB1 (",
"s",
"=",
"re",
".",
"sub",
"(",
"\"([A-Z0-9]){6} \\(\"",
",",
"\"\"",
",",
"s",
")",
"s",
"=",
"s",
".",
... | case (qr/^Histone-lysine/) { $ahrd =~ s/,\s+H\d{1}\s+lysine\-\d+//gs; } | [
"case",
"(",
"qr",
"/",
"^Histone",
"-",
"lysine",
"/",
")",
"{",
"$ahrd",
"=",
"~",
"s",
"/",
"\\",
"s",
"+",
"H",
"\\",
"d",
"{",
"1",
"}",
"\\",
"s",
"+",
"lysine",
"\\",
"-",
"\\",
"d",
"+",
"//",
"gs",
";",
"}"
] | python | train |
timothyb0912/pylogit | pylogit/bootstrap_sampler.py | https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_sampler.py#L207-L242 | def create_deepcopied_groupby_dict(orig_df, obs_id_col):
"""
Will create a dictionary where each key corresponds to a unique value in
`orig_df[obs_id_col]` and each value corresponds to all of the rows of
`orig_df` where `orig_df[obs_id_col] == key`.
Parameters
----------
orig_df : pandas DataFrame.
Should be long-format dataframe containing the data used to estimate
the desired choice model.
obs_id_col : str.
Should be a column name within `orig_df`. Should denote the original
observation id column.
Returns
-------
groupby_dict : dict.
Each key will be a unique value in `orig_df[obs_id_col]` and each value
will be the rows of `orig_df` where `orig_df[obs_id_col] == key`.
"""
# Get the observation id values
obs_id_vals = orig_df[obs_id_col].values
# Get the unique observation ids
unique_obs_ids = np.unique(obs_id_vals)
# Initialize the dictionary to be returned.
groupby_dict = {}
# Populate the dictionary with dataframes for each individual.
for obs_id in unique_obs_ids:
# Filter out only the rows corresponding to the current observation id.
desired_rows = obs_id_vals == obs_id
# Add the desired dataframe to the dictionary.
groupby_dict[obs_id] = orig_df.loc[desired_rows].copy(deep=True)
# Return the desired object.
return groupby_dict | [
"def",
"create_deepcopied_groupby_dict",
"(",
"orig_df",
",",
"obs_id_col",
")",
":",
"# Get the observation id values",
"obs_id_vals",
"=",
"orig_df",
"[",
"obs_id_col",
"]",
".",
"values",
"# Get the unique observation ids",
"unique_obs_ids",
"=",
"np",
".",
"unique",
... | Will create a dictionary where each key corresponds to a unique value in
`orig_df[obs_id_col]` and each value corresponds to all of the rows of
`orig_df` where `orig_df[obs_id_col] == key`.
Parameters
----------
orig_df : pandas DataFrame.
Should be long-format dataframe containing the data used to estimate
the desired choice model.
obs_id_col : str.
Should be a column name within `orig_df`. Should denote the original
observation id column.
Returns
-------
groupby_dict : dict.
Each key will be a unique value in `orig_df[obs_id_col]` and each value
will be the rows of `orig_df` where `orig_df[obs_id_col] == key`. | [
"Will",
"create",
"a",
"dictionary",
"where",
"each",
"key",
"corresponds",
"to",
"a",
"unique",
"value",
"in",
"orig_df",
"[",
"obs_id_col",
"]",
"and",
"each",
"value",
"corresponds",
"to",
"all",
"of",
"the",
"rows",
"of",
"orig_df",
"where",
"orig_df",
... | python | train |
saltstack/salt | salt/states/zabbix_template.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/zabbix_template.py#L375-L699 | def present(name, params, static_host_list=True, **kwargs):
'''
Creates Zabbix Template object or if differs update it according defined parameters. See Zabbix API documentation.
Zabbix API version: >3.0
:param name: Zabbix Template name
:param params: Additional parameters according to Zabbix API documentation
:param static_host_list: If hosts assigned to the template are controlled
only by this state or can be also assigned externally
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. note::
If there is a need to get a value from current zabbix online (e.g. ids of host groups you want the template
to be associated with), put a dictionary with two keys "query_object" and "query_name" instead of the value.
In this example we want to create template named "Testing Template", assign it to hostgroup Templates,
link it to two ceph nodes and create a macro.
.. note::
IMPORTANT NOTE:
Objects (except for template name) are identified by name (or by other key in some exceptional cases)
so changing name of object means deleting old one and creating new one with new ID !!!
.. note::
NOT SUPPORTED FEATURES:
- linked templates
- trigger dependencies
- groups and group prototypes for host prototypes
SLS Example:
.. code-block:: yaml
zabbix-template-present:
zabbix_template.present:
- name: Testing Template
# Do not touch existing assigned hosts
# True will detach all other hosts than defined here
- static_host_list: False
- params:
description: Template for Ceph nodes
groups:
# groups must already exist
# template must be at least in one hostgroup
- groupid:
query_object: hostgroup
query_name: Templates
macros:
- macro: "{$CEPH_CLUSTER_NAME}"
value: ceph
hosts:
# hosts must already exist
- hostid:
query_object: host
query_name: ceph-osd-01
- hostid:
query_object: host
query_name: ceph-osd-02
# templates:
# Linked templates - not supported by state module but can be linked manually (will not be touched)
applications:
- name: Ceph OSD
items:
- name: Ceph OSD avg fill item
key_: ceph.osd_avg_fill
type: 2
value_type: 0
delay: 60
units: '%'
description: 'Average fill of OSD'
applications:
- applicationid:
query_object: application
query_name: Ceph OSD
triggers:
- description: "Ceph OSD filled more that 90%"
expression: "{{'{'}}Testing Template:ceph.osd_avg_fill.last(){{'}'}}>90"
priority: 4
discoveries:
- name: Mounted filesystem discovery
key_: vfs.fs.discovery
type: 0
delay: 60
itemprototypes:
- name: Free disk space on {{'{#'}}FSNAME}
key_: vfs.fs.size[{{'{#'}}FSNAME},free]
type: 0
value_type: 3
delay: 60
applications:
- applicationid:
query_object: application
query_name: Ceph OSD
triggerprototypes:
- description: "Free disk space is less than 20% on volume {{'{#'}}FSNAME{{'}'}}"
expression: "{{'{'}}Testing Template:vfs.fs.size[{{'{#'}}FSNAME},free].last(){{'}'}}<20"
graphs:
- name: Ceph OSD avg fill graph
width: 900
height: 200
graphtype: 0
gitems:
- color: F63100
itemid:
query_object: item
query_name: Ceph OSD avg fill item
screens:
- name: Ceph
hsize: 1
vsize: 1
screenitems:
- x: 0
y: 0
resourcetype: 0
resourceid:
query_object: graph
query_name: Ceph OSD avg fill graph
'''
zabbix_id_mapper = __salt__['zabbix.get_zabbix_id_mapper']()
dry_run = __opts__['test']
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
params['host'] = name
del CHANGE_STACK[:]
# Divide template yaml definition into parts
# - template definition itself
# - simple template components
# - components that have other sub-components
# (e.g. discoveries - where parent ID is needed in advance for sub-component manipulation)
template_definition = {}
template_components = {}
discovery_components = []
for attr in params:
if attr in TEMPLATE_COMPONENT_ORDER and six.text_type(attr) != 'discoveries':
template_components[attr] = params[attr]
elif six.text_type(attr) == 'discoveries':
d_rules = []
for d_rule in params[attr]:
d_rule_components = {'query_pid': {'component': attr,
'filter_val': d_rule[TEMPLATE_COMPONENT_DEF[attr]['filter']]}}
for proto_name in DISCOVERYRULE_COMPONENT_ORDER:
if proto_name in d_rule:
d_rule_components[proto_name] = d_rule[proto_name]
del d_rule[proto_name]
discovery_components.append(d_rule_components)
d_rules.append(d_rule)
template_components[attr] = d_rules
else:
template_definition[attr] = params[attr]
# if a component is not defined, it means to remove existing items during update (empty list)
for attr in TEMPLATE_COMPONENT_ORDER:
if attr not in template_components:
template_components[attr] = []
# if a component is not defined, it means to remove existing items during update (empty list)
for attr in TEMPLATE_RELATIONS:
template_definition[attr] = params[attr] if attr in params and params[attr] else []
defined_obj = __salt__['zabbix.substitute_params'](template_definition, **kwargs)
log.info('SUBSTITUTED template_definition: %s', six.text_type(json.dumps(defined_obj, indent=4)))
tmpl_get = __salt__['zabbix.run_query']('template.get',
{'output': 'extend', 'selectGroups': 'groupid', 'selectHosts': 'hostid',
'selectTemplates': 'templateid', 'selectMacros': 'extend',
'filter': {'host': name}},
**kwargs)
log.info('TEMPLATE get result: %s', six.text_type(json.dumps(tmpl_get, indent=4)))
existing_obj = __salt__['zabbix.substitute_params'](tmpl_get[0], **kwargs) \
if tmpl_get and len(tmpl_get) == 1 else False
if existing_obj:
template_id = existing_obj[zabbix_id_mapper['template']]
if not static_host_list:
# Prepare objects for comparison
defined_wo_hosts = defined_obj
if 'hosts' in defined_obj:
defined_hosts = defined_obj['hosts']
del defined_wo_hosts['hosts']
else:
defined_hosts = []
existing_wo_hosts = existing_obj
if 'hosts' in existing_obj:
existing_hosts = existing_obj['hosts']
del existing_wo_hosts['hosts']
else:
existing_hosts = []
# Compare host list separately from the rest of the object comparison since the merged list is needed for
# update
hosts_list = _diff_and_merge_host_list(defined_hosts, existing_hosts)
# Compare objects without hosts
diff_params = __salt__['zabbix.compare_params'](defined_wo_hosts, existing_wo_hosts, True)
# Merge comparison results together
if ('new' in diff_params and 'hosts' in diff_params['new']) or hosts_list:
diff_params['new']['hosts'] = hosts_list
else:
diff_params = __salt__['zabbix.compare_params'](defined_obj, existing_obj, True)
if diff_params['new']:
diff_params['new'][zabbix_id_mapper['template']] = template_id
diff_params['old'][zabbix_id_mapper['template']] = template_id
log.info('TEMPLATE: update params: %s', six.text_type(json.dumps(diff_params, indent=4)))
CHANGE_STACK.append({'component': 'template', 'action': 'update', 'params': diff_params['new']})
if not dry_run:
tmpl_update = __salt__['zabbix.run_query']('template.update', diff_params['new'], **kwargs)
log.info('TEMPLATE update result: %s', six.text_type(tmpl_update))
else:
CHANGE_STACK.append({'component': 'template', 'action': 'create', 'params': defined_obj})
if not dry_run:
tmpl_create = __salt__['zabbix.run_query']('template.create', defined_obj, **kwargs)
log.info('TEMPLATE create result: %s', tmpl_create)
if tmpl_create:
template_id = tmpl_create['templateids'][0]
log.info('\n\ntemplate_components: %s', json.dumps(template_components, indent=4))
log.info('\n\ndiscovery_components: %s', json.dumps(discovery_components, indent=4))
log.info('\n\nCurrent CHANGE_STACK: %s', six.text_type(json.dumps(CHANGE_STACK, indent=4)))
if existing_obj or not dry_run:
for component in TEMPLATE_COMPONENT_ORDER:
log.info('\n\n\n\n\nCOMPONENT: %s\n\n', six.text_type(json.dumps(component)))
# 1) query for components which belongs to the template
existing_c_list = _get_existing_template_c_list(component, template_id, **kwargs)
existing_c_list_subs = __salt__['zabbix.substitute_params'](existing_c_list, **kwargs) \
if existing_c_list else []
if component in template_components:
defined_c_list_subs = __salt__['zabbix.substitute_params'](
template_components[component],
extend_params={TEMPLATE_COMPONENT_DEF[component]['qselectpid']: template_id},
filter_key=TEMPLATE_COMPONENT_DEF[component]['filter'],
**kwargs)
else:
defined_c_list_subs = []
# 2) take lists of particular component and compare -> do create, update and delete actions
_manage_component(component, template_id, defined_c_list_subs, existing_c_list_subs, **kwargs)
log.info('\n\nCurrent CHANGE_STACK: %s', six.text_type(json.dumps(CHANGE_STACK, indent=4)))
for d_rule_component in discovery_components:
# query for parent id -> "query_pid": {"filter_val": "vfs.fs.discovery", "component": "discoveries"}
q_def = d_rule_component['query_pid']
c_def = TEMPLATE_COMPONENT_DEF[q_def['component']]
q_object = c_def['qtype']
q_params = dict(c_def['output'])
q_params.update({c_def['qselectpid']: template_id})
q_params.update({'filter': {c_def['filter']: q_def['filter_val']}})
parent_id = __salt__['zabbix.get_object_id_by_params'](q_object, q_params, **kwargs)
for proto_name in DISCOVERYRULE_COMPONENT_ORDER:
log.info('\n\n\n\n\nPROTOTYPE_NAME: %s\n\n', six.text_type(json.dumps(proto_name)))
existing_p_list = _get_existing_template_c_list(proto_name, parent_id, **kwargs)
existing_p_list_subs = __salt__['zabbix.substitute_params'](existing_p_list, **kwargs)\
if existing_p_list else []
if proto_name in d_rule_component:
defined_p_list_subs = __salt__['zabbix.substitute_params'](
d_rule_component[proto_name],
extend_params={c_def['qselectpid']: template_id},
**kwargs)
else:
defined_p_list_subs = []
_manage_component(proto_name,
parent_id,
defined_p_list_subs,
existing_p_list_subs,
template_id=template_id,
**kwargs)
log.info('\n\nCurrent CHANGE_STACK: %s', six.text_type(json.dumps(CHANGE_STACK, indent=4)))
if not CHANGE_STACK:
ret['result'] = True
ret['comment'] = 'Zabbix Template "{0}" already exists and corresponds to a definition.'.format(name)
else:
tmpl_action = next((item for item in CHANGE_STACK
if item['component'] == 'template' and item['action'] == 'create'), None)
if tmpl_action:
ret['result'] = True
if dry_run:
ret['comment'] = 'Zabbix Template "{0}" would be created.'.format(name)
ret['changes'] = {name: {'old': 'Zabbix Template "{0}" does not exist.'.format(name),
'new': 'Zabbix Template "{0}" would be created '
'according definition.'.format(name)}}
else:
ret['comment'] = 'Zabbix Template "{0}" created.'.format(name)
ret['changes'] = {name: {'old': 'Zabbix Template "{0}" did not exist.'.format(name),
'new': 'Zabbix Template "{0}" created according definition.'.format(name)}}
else:
ret['result'] = True
if dry_run:
ret['comment'] = 'Zabbix Template "{0}" would be updated.'.format(name)
ret['changes'] = {name: {'old': 'Zabbix Template "{0}" differs.'.format(name),
'new': 'Zabbix Template "{0}" would be updated '
'according definition.'.format(name)}}
else:
ret['comment'] = 'Zabbix Template "{0}" updated.'.format(name)
ret['changes'] = {name: {'old': 'Zabbix Template "{0}" differed.'.format(name),
'new': 'Zabbix Template "{0}" updated according definition.'.format(name)}}
return ret | [
"def",
"present",
"(",
"name",
",",
"params",
",",
"static_host_list",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"zabbix_id_mapper",
"=",
"__salt__",
"[",
"'zabbix.get_zabbix_id_mapper'",
"]",
"(",
")",
"dry_run",
"=",
"__opts__",
"[",
"'test'",
"]",
... | Creates Zabbix Template object or if differs update it according defined parameters. See Zabbix API documentation.
Zabbix API version: >3.0
:param name: Zabbix Template name
:param params: Additional parameters according to Zabbix API documentation
:param static_host_list: If hosts assigned to the template are controlled
only by this state or can be also assigned externally
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. note::
If there is a need to get a value from current zabbix online (e.g. ids of host groups you want the template
to be associated with), put a dictionary with two keys "query_object" and "query_name" instead of the value.
In this example we want to create template named "Testing Template", assign it to hostgroup Templates,
link it to two ceph nodes and create a macro.
.. note::
IMPORTANT NOTE:
Objects (except for template name) are identified by name (or by other key in some exceptional cases)
so changing name of object means deleting old one and creating new one with new ID !!!
.. note::
NOT SUPPORTED FEATURES:
- linked templates
- trigger dependencies
- groups and group prototypes for host prototypes
SLS Example:
.. code-block:: yaml
zabbix-template-present:
zabbix_template.present:
- name: Testing Template
# Do not touch existing assigned hosts
# True will detach all other hosts than defined here
- static_host_list: False
- params:
description: Template for Ceph nodes
groups:
# groups must already exist
# template must be at least in one hostgroup
- groupid:
query_object: hostgroup
query_name: Templates
macros:
- macro: "{$CEPH_CLUSTER_NAME}"
value: ceph
hosts:
# hosts must already exist
- hostid:
query_object: host
query_name: ceph-osd-01
- hostid:
query_object: host
query_name: ceph-osd-02
# templates:
# Linked templates - not supported by state module but can be linked manually (will not be touched)
applications:
- name: Ceph OSD
items:
- name: Ceph OSD avg fill item
key_: ceph.osd_avg_fill
type: 2
value_type: 0
delay: 60
units: '%'
description: 'Average fill of OSD'
applications:
- applicationid:
query_object: application
query_name: Ceph OSD
triggers:
- description: "Ceph OSD filled more that 90%"
expression: "{{'{'}}Testing Template:ceph.osd_avg_fill.last(){{'}'}}>90"
priority: 4
discoveries:
- name: Mounted filesystem discovery
key_: vfs.fs.discovery
type: 0
delay: 60
itemprototypes:
- name: Free disk space on {{'{#'}}FSNAME}
key_: vfs.fs.size[{{'{#'}}FSNAME},free]
type: 0
value_type: 3
delay: 60
applications:
- applicationid:
query_object: application
query_name: Ceph OSD
triggerprototypes:
- description: "Free disk space is less than 20% on volume {{'{#'}}FSNAME{{'}'}}"
expression: "{{'{'}}Testing Template:vfs.fs.size[{{'{#'}}FSNAME},free].last(){{'}'}}<20"
graphs:
- name: Ceph OSD avg fill graph
width: 900
height: 200
graphtype: 0
gitems:
- color: F63100
itemid:
query_object: item
query_name: Ceph OSD avg fill item
screens:
- name: Ceph
hsize: 1
vsize: 1
screenitems:
- x: 0
y: 0
resourcetype: 0
resourceid:
query_object: graph
query_name: Ceph OSD avg fill graph | [
"Creates",
"Zabbix",
"Template",
"object",
"or",
"if",
"differs",
"update",
"it",
"according",
"defined",
"parameters",
".",
"See",
"Zabbix",
"API",
"documentation",
"."
] | python | train |
Jajcus/pyxmpp2 | pyxmpp2/ext/muc/muc.py | https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/muc/muc.py#L848-L895 | def join(self, room, nick, handler, password = None, history_maxchars = None,
history_maxstanzas = None, history_seconds = None, history_since = None):
"""
Create and return a new room state object and request joining
to a MUC room.
:Parameters:
- `room`: the name of a room to be joined
- `nick`: the nickname to be used in the room
- `handler`: is an object to handle room events.
- `password`: password for the room, if any
- `history_maxchars`: limit of the total number of characters in
history.
- `history_maxstanzas`: limit of the total number of messages in
history.
- `history_seconds`: send only messages received in the last
`history_seconds` seconds.
- `history_since`: Send only the messages received since the
dateTime specified (UTC).
:Types:
- `room`: `JID`
- `nick`: `unicode`
- `handler`: `MucRoomHandler`
- `password`: `unicode`
- `history_maxchars`: `int`
- `history_maxstanzas`: `int`
- `history_seconds`: `int`
- `history_since`: `datetime.datetime`
:return: the room state object created.
:returntype: `MucRoomState`
"""
if not room.node or room.resource:
raise ValueError("Invalid room JID")
room_jid = JID(room.node, room.domain, nick)
cur_rs = self.rooms.get(room_jid.bare().as_unicode())
if cur_rs and cur_rs.joined:
raise RuntimeError("Room already joined")
rs=MucRoomState(self, self.stream.me, room_jid, handler)
self.rooms[room_jid.bare().as_unicode()]=rs
rs.join(password, history_maxchars, history_maxstanzas,
history_seconds, history_since)
return rs | [
"def",
"join",
"(",
"self",
",",
"room",
",",
"nick",
",",
"handler",
",",
"password",
"=",
"None",
",",
"history_maxchars",
"=",
"None",
",",
"history_maxstanzas",
"=",
"None",
",",
"history_seconds",
"=",
"None",
",",
"history_since",
"=",
"None",
")",
... | Create and return a new room state object and request joining
to a MUC room.
:Parameters:
- `room`: the name of a room to be joined
- `nick`: the nickname to be used in the room
- `handler`: is an object to handle room events.
- `password`: password for the room, if any
- `history_maxchars`: limit of the total number of characters in
history.
- `history_maxstanzas`: limit of the total number of messages in
history.
- `history_seconds`: send only messages received in the last
`history_seconds` seconds.
- `history_since`: Send only the messages received since the
dateTime specified (UTC).
:Types:
- `room`: `JID`
- `nick`: `unicode`
- `handler`: `MucRoomHandler`
- `password`: `unicode`
- `history_maxchars`: `int`
- `history_maxstanzas`: `int`
- `history_seconds`: `int`
- `history_since`: `datetime.datetime`
:return: the room state object created.
:returntype: `MucRoomState` | [
"Create",
"and",
"return",
"a",
"new",
"room",
"state",
"object",
"and",
"request",
"joining",
"to",
"a",
"MUC",
"room",
"."
] | python | valid |
PmagPy/PmagPy | programs/demag_gui.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/demag_gui.py#L7637-L7685 | def on_zijd_mark(self, event):
"""
Get mouse position on double right click find the interpretation in
range of mouse
position then mark that interpretation bad or good
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit
"""
if not array(self.CART_rot).any():
return
pos = event.GetPosition()
width, height = self.canvas1.get_width_height()
pos[1] = height - pos[1]
xpick_data, ypick_data = pos
xdata_org = list(self.CART_rot[:, 0]) + list(self.CART_rot[:, 0])
ydata_org = list(-1*self.CART_rot[:, 1]) + list(-1*self.CART_rot[:, 2])
data_corrected = self.zijplot.transData.transform(
vstack([xdata_org, ydata_org]).T)
xdata, ydata = data_corrected.T
xdata = list(map(float, xdata))
ydata = list(map(float, ydata))
e = 4e0
index = None
for i, (x, y) in enumerate(zip(xdata, ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
index = i
break
if index != None:
steps = self.Data[self.s]['zijdblock']
if self.Data[self.s]['measurement_flag'][index % len(steps)] == "g":
self.mark_meas_bad(index % len(steps))
else:
self.mark_meas_good(index % len(steps))
pmag.magic_write(os.path.join(
self.WD, "magic_measurements.txt"), self.mag_meas_data, "magic_measurements")
self.recalculate_current_specimen_interpreatations()
if self.ie_open:
self.ie.update_current_fit_data()
self.calculate_high_levels_data()
self.update_selection() | [
"def",
"on_zijd_mark",
"(",
"self",
",",
"event",
")",
":",
"if",
"not",
"array",
"(",
"self",
".",
"CART_rot",
")",
".",
"any",
"(",
")",
":",
"return",
"pos",
"=",
"event",
".",
"GetPosition",
"(",
")",
"width",
",",
"height",
"=",
"self",
".",
... | Get mouse position on double right click find the interpretation in
range of mouse
position then mark that interpretation bad or good
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit | [
"Get",
"mouse",
"position",
"on",
"double",
"right",
"click",
"find",
"the",
"interpretation",
"in",
"range",
"of",
"mouse",
"position",
"then",
"mark",
"that",
"interpretation",
"bad",
"or",
"good"
] | python | train |
chriso/gauged | gauged/writer.py | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/writer.py#L210-L223 | def parse_query(self, query):
"""Parse a query string and return an iterator which yields
(key, value)"""
writer = self.writer
if writer is None:
raise GaugedUseAfterFreeError
Gauged.writer_parse_query(writer, query)
position = 0
writer_contents = writer.contents
size = writer_contents.buffer_size
pointers = writer_contents.buffer
while position < size:
yield pointers[position], pointers[position+1]
position += 2 | [
"def",
"parse_query",
"(",
"self",
",",
"query",
")",
":",
"writer",
"=",
"self",
".",
"writer",
"if",
"writer",
"is",
"None",
":",
"raise",
"GaugedUseAfterFreeError",
"Gauged",
".",
"writer_parse_query",
"(",
"writer",
",",
"query",
")",
"position",
"=",
... | Parse a query string and return an iterator which yields
(key, value) | [
"Parse",
"a",
"query",
"string",
"and",
"return",
"an",
"iterator",
"which",
"yields",
"(",
"key",
"value",
")"
] | python | train |
mwhooker/jones | jones/jones.py | https://github.com/mwhooker/jones/blob/121e89572ca063f456b8e94cbb8cbee26c307a8f/jones/jones.py#L153-L171 | def set_config(self, env, conf, version):
"""
Set conf to env under service.
pass None to env for root.
"""
if not isinstance(conf, collections.Mapping):
raise ValueError("conf must be a collections.Mapping")
self._set(
self._get_env_path(env),
conf,
version
)
path = self._get_env_path(env)
"""Update env's children with new config."""
for child in zkutil.walk(self.zk, path):
self._update_view(Env(child[len(self.conf_path)+1:])) | [
"def",
"set_config",
"(",
"self",
",",
"env",
",",
"conf",
",",
"version",
")",
":",
"if",
"not",
"isinstance",
"(",
"conf",
",",
"collections",
".",
"Mapping",
")",
":",
"raise",
"ValueError",
"(",
"\"conf must be a collections.Mapping\"",
")",
"self",
".",... | Set conf to env under service.
pass None to env for root. | [
"Set",
"conf",
"to",
"env",
"under",
"service",
"."
] | python | train |
aestrivex/bctpy | bct/algorithms/physical_connectivity.py | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/physical_connectivity.py#L65-L162 | def rentian_scaling(A, xyz, n, seed=None):
'''
Physical Rentian scaling (or more simply Rentian scaling) is a property
of systems that are cost-efficiently embedded into physical space. It is
what is called a "topo-physical" property because it combines information
regarding the topological organization of the graph with information
about the physical placement of connections. Rentian scaling is present
in very large scale integrated circuits, the C. elegans neuronal network,
and morphometric and diffusion-based graphs of human anatomical networks.
Rentian scaling is determined by partitioning the system into cubes,
counting the number of nodes inside of each cube (N), and the number of
edges traversing the boundary of each cube (E). If the system displays
Rentian scaling, these two variables N and E will scale with one another
in loglog space. The Rent's exponent is given by the slope of log10(E)
vs. log10(N), and can be reported alone or can be compared to the
theoretical minimum Rent's exponent to determine how cost efficiently the
network has been embedded into physical space. Note: if a system displays
Rentian scaling, it does not automatically mean that the system is
cost-efficiently embedded (although it does suggest that). Validation
occurs when comparing to the theoretical minimum Rent's exponent for that
system.
Parameters
----------
A : NxN np.ndarray
unweighted, binary, symmetric adjacency matrix
xyz : Nx3 np.ndarray
vector of node placement coordinates
n : int
Number of partitions to compute. Each partition is a data point; you
want a large enough number to adequately compute Rent's exponent.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
N : Mx1 np.ndarray
Number of nodes in each of the M partitions
E : Mx1 np.ndarray
Notes
-----
Subsequent Analysis:
Rentian scaling plots are then created by: figure; loglog(E,N,'*');
To determine the Rent's exponent, p, it is important not to use
partitions which may
be affected by boundary conditions. In Bassett et al. 2010 PLoS CB, only
partitions with N<M/2 were used in the estimation of the Rent's exponent.
Thus, we can define N_prime = N(find(N<M/2)) and
E_prime = E(find(N<M/2)).
Next we need to determine the slope of Eprime vs. Nprime in loglog space,
which is the Rent's
exponent. There are many ways of doing this with more or less statistical
rigor. Robustfit in MATLAB is one such option:
[b,stats] = robustfit(log10(N_prime),log10(E_prime))
Then the Rent's exponent is b(1,2) and the standard error of the
estimation is given by stats.se(1,2).
Note: n=5000 was used in Bassett et al. 2010 in PLoS CB.
'''
rng = get_rng(seed)
m = np.size(xyz, axis=0) # find number of nodes in system
# rescale coordinates so they are all greater than unity
xyzn = xyz - np.tile(np.min(xyz, axis=0) - 1, (m, 1))
# find the absolute minimum and maximum over all directions
nmax = np.max(xyzn)
nmin = np.min(xyzn)
count = 0
N = np.zeros((n,))
E = np.zeros((n,))
# create partitions and count the number of nodes inside the partition (n)
# and the number of edges traversing the boundary of the partition (e)
while count < n:
# define cube endpoints
randx = np.sort((1 + nmax - nmin) * rng.random_sample((2,)))
# find nodes in cube
l1 = xyzn[:, 0] > randx[0]
l2 = xyzn[:, 0] < randx[1]
l3 = xyzn[:, 1] > randx[0]
l4 = xyzn[:, 1] < randx[1]
l5 = xyzn[:, 2] > randx[0]
l6 = xyzn[:, 2] < randx[1]
L, = np.where((l1 & l2 & l3 & l4 & l5 & l6).flatten())
if np.size(L):
# count edges crossing at the boundary of the cube
E[count] = np.sum(A[np.ix_(L, np.setdiff1d(range(m), L))])
# count nodes inside of the cube
N[count] = np.size(L)
count += 1
return N, E | [
"def",
"rentian_scaling",
"(",
"A",
",",
"xyz",
",",
"n",
",",
"seed",
"=",
"None",
")",
":",
"rng",
"=",
"get_rng",
"(",
"seed",
")",
"m",
"=",
"np",
".",
"size",
"(",
"xyz",
",",
"axis",
"=",
"0",
")",
"# find number of nodes in system",
"# rescale... | Physical Rentian scaling (or more simply Rentian scaling) is a property
of systems that are cost-efficiently embedded into physical space. It is
what is called a "topo-physical" property because it combines information
regarding the topological organization of the graph with information
about the physical placement of connections. Rentian scaling is present
in very large scale integrated circuits, the C. elegans neuronal network,
and morphometric and diffusion-based graphs of human anatomical networks.
Rentian scaling is determined by partitioning the system into cubes,
counting the number of nodes inside of each cube (N), and the number of
edges traversing the boundary of each cube (E). If the system displays
Rentian scaling, these two variables N and E will scale with one another
in loglog space. The Rent's exponent is given by the slope of log10(E)
vs. log10(N), and can be reported alone or can be compared to the
theoretical minimum Rent's exponent to determine how cost efficiently the
network has been embedded into physical space. Note: if a system displays
Rentian scaling, it does not automatically mean that the system is
cost-efficiently embedded (although it does suggest that). Validation
occurs when comparing to the theoretical minimum Rent's exponent for that
system.
Parameters
----------
A : NxN np.ndarray
unweighted, binary, symmetric adjacency matrix
xyz : Nx3 np.ndarray
vector of node placement coordinates
n : int
Number of partitions to compute. Each partition is a data point; you
want a large enough number to adequately compute Rent's exponent.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
N : Mx1 np.ndarray
Number of nodes in each of the M partitions
E : Mx1 np.ndarray
Notes
-----
Subsequent Analysis:
Rentian scaling plots are then created by: figure; loglog(E,N,'*');
To determine the Rent's exponent, p, it is important not to use
partitions which may
be affected by boundary conditions. In Bassett et al. 2010 PLoS CB, only
partitions with N<M/2 were used in the estimation of the Rent's exponent.
Thus, we can define N_prime = N(find(N<M/2)) and
E_prime = E(find(N<M/2)).
Next we need to determine the slope of Eprime vs. Nprime in loglog space,
which is the Rent's
exponent. There are many ways of doing this with more or less statistical
rigor. Robustfit in MATLAB is one such option:
[b,stats] = robustfit(log10(N_prime),log10(E_prime))
Then the Rent's exponent is b(1,2) and the standard error of the
estimation is given by stats.se(1,2).
Note: n=5000 was used in Bassett et al. 2010 in PLoS CB. | [
"Physical",
"Rentian",
"scaling",
"(",
"or",
"more",
"simply",
"Rentian",
"scaling",
")",
"is",
"a",
"property",
"of",
"systems",
"that",
"are",
"cost",
"-",
"efficiently",
"embedded",
"into",
"physical",
"space",
".",
"It",
"is",
"what",
"is",
"called",
"... | python | train |
nickjj/ansigenome | ansigenome/scan.py | https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L324-L337 | def tally_role_columns(self):
"""
Sum up all of the stat columns.
"""
totals = self.report["totals"]
roles = self.report["roles"]
totals["dependencies"] = sum(roles[item]
["total_dependencies"] for item in roles)
totals["defaults"] = sum(roles[item]
["total_defaults"] for item in roles)
totals["facts"] = sum(roles[item]["total_facts"] for item in roles)
totals["files"] = sum(roles[item]["total_files"] for item in roles)
totals["lines"] = sum(roles[item]["total_lines"] for item in roles) | [
"def",
"tally_role_columns",
"(",
"self",
")",
":",
"totals",
"=",
"self",
".",
"report",
"[",
"\"totals\"",
"]",
"roles",
"=",
"self",
".",
"report",
"[",
"\"roles\"",
"]",
"totals",
"[",
"\"dependencies\"",
"]",
"=",
"sum",
"(",
"roles",
"[",
"item",
... | Sum up all of the stat columns. | [
"Sum",
"up",
"all",
"of",
"the",
"stat",
"columns",
"."
] | python | train |
resync/resync | resync/list_base_with_index.py | https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/list_base_with_index.py#L136-L178 | def read_component_sitemap(
self, sitemapindex_uri, sitemap_uri, sitemap, sitemapindex_is_file):
"""Read a component sitemap of a Resource List with index.
Each component must be a sitemap with the
"""
if (sitemapindex_is_file):
if (not self.is_file_uri(sitemap_uri)):
# Attempt to map URI to local file
remote_uri = sitemap_uri
sitemap_uri = self.mapper.src_to_dst(remote_uri)
self.logger.info(
"Mapped %s to local file %s" %
(remote_uri, sitemap_uri))
else:
# The individual sitemaps should be at a URL (scheme/server/path)
# that the sitemapindex URL can speak authoritatively about
if (self.check_url_authority and
not UrlAuthority(sitemapindex_uri).has_authority_over(sitemap_uri)):
raise ListBaseIndexError(
"The sitemapindex (%s) refers to sitemap at a location it does not have authority over (%s)" %
(sitemapindex_uri, sitemap_uri))
try:
fh = URLopener().open(sitemap_uri)
self.num_files += 1
except IOError as e:
raise ListBaseIndexError(
"Failed to load sitemap from %s listed in sitemap index %s (%s)" %
(sitemap_uri, sitemapindex_uri, str(e)))
# Get the Content-Length if we can (works fine for local files)
try:
self.content_length = int(fh.info()['Content-Length'])
self.bytes_read += self.content_length
except KeyError:
# If we don't get a length then c'est la vie
pass
self.logger.info(
"Reading sitemap from %s (%d bytes)" %
(sitemap_uri, self.content_length))
component = sitemap.parse_xml(fh=fh, sitemapindex=False)
# Copy resources into self, check any metadata
for r in component:
self.resources.add(r) | [
"def",
"read_component_sitemap",
"(",
"self",
",",
"sitemapindex_uri",
",",
"sitemap_uri",
",",
"sitemap",
",",
"sitemapindex_is_file",
")",
":",
"if",
"(",
"sitemapindex_is_file",
")",
":",
"if",
"(",
"not",
"self",
".",
"is_file_uri",
"(",
"sitemap_uri",
")",
... | Read a component sitemap of a Resource List with index.
Each component must be a sitemap with the | [
"Read",
"a",
"component",
"sitemap",
"of",
"a",
"Resource",
"List",
"with",
"index",
"."
] | python | train |
ascribe/pyspool | spool/spoolex.py | https://github.com/ascribe/pyspool/blob/f8b10df1e7d2ea7950dde433c1cb6d5225112f4f/spool/spoolex.py#L121-L135 | def chain(tree, edition_number):
"""
Args:
tree (dict): Tree history of all editions of a piece.
edition_number (int): The edition number to check for.
In the case of a piece (master edition), an empty
string (``''``) or zero (``0``) can be passed.
Returns:
list: The chain of ownsership of a particular
edition of the piece ordered by time.
"""
# return the chain for an edition_number sorted by the timestamp
return sorted(tree.get(edition_number, []), key=lambda d: d['timestamp_utc']) | [
"def",
"chain",
"(",
"tree",
",",
"edition_number",
")",
":",
"# return the chain for an edition_number sorted by the timestamp",
"return",
"sorted",
"(",
"tree",
".",
"get",
"(",
"edition_number",
",",
"[",
"]",
")",
",",
"key",
"=",
"lambda",
"d",
":",
"d",
... | Args:
tree (dict): Tree history of all editions of a piece.
edition_number (int): The edition number to check for.
In the case of a piece (master edition), an empty
string (``''``) or zero (``0``) can be passed.
Returns:
list: The chain of ownsership of a particular
edition of the piece ordered by time. | [
"Args",
":",
"tree",
"(",
"dict",
")",
":",
"Tree",
"history",
"of",
"all",
"editions",
"of",
"a",
"piece",
".",
"edition_number",
"(",
"int",
")",
":",
"The",
"edition",
"number",
"to",
"check",
"for",
".",
"In",
"the",
"case",
"of",
"a",
"piece",
... | python | train |
abilian/abilian-core | abilian/web/tags/extension.py | https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/web/tags/extension.py#L90-L97 | def entity_tags_form(self, entity, ns=None):
"""Construct a form class with a field for tags in namespace `ns`."""
if ns is None:
ns = self.entity_default_ns(entity)
field = TagsField(label=_l("Tags"), ns=ns)
cls = type("EntityNSTagsForm", (_TagsForm,), {"tags": field})
return cls | [
"def",
"entity_tags_form",
"(",
"self",
",",
"entity",
",",
"ns",
"=",
"None",
")",
":",
"if",
"ns",
"is",
"None",
":",
"ns",
"=",
"self",
".",
"entity_default_ns",
"(",
"entity",
")",
"field",
"=",
"TagsField",
"(",
"label",
"=",
"_l",
"(",
"\"Tags\... | Construct a form class with a field for tags in namespace `ns`. | [
"Construct",
"a",
"form",
"class",
"with",
"a",
"field",
"for",
"tags",
"in",
"namespace",
"ns",
"."
] | python | train |
AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L10221-L10238 | def reclat(rectan):
"""
Convert from rectangular coordinates to latitudinal coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/reclat_c.html
:param rectan: Rectangular coordinates of a point.
:type rectan: 3-Element Array of floats
:return: Distance from the origin, Longitude in radians, Latitude in radians
:rtype: tuple
"""
rectan = stypes.toDoubleVector(rectan)
radius = ctypes.c_double(0)
longitude = ctypes.c_double(0)
latitude = ctypes.c_double(0)
libspice.reclat_c(rectan, ctypes.byref(radius), ctypes.byref(longitude),
ctypes.byref(latitude))
return radius.value, longitude.value, latitude.value | [
"def",
"reclat",
"(",
"rectan",
")",
":",
"rectan",
"=",
"stypes",
".",
"toDoubleVector",
"(",
"rectan",
")",
"radius",
"=",
"ctypes",
".",
"c_double",
"(",
"0",
")",
"longitude",
"=",
"ctypes",
".",
"c_double",
"(",
"0",
")",
"latitude",
"=",
"ctypes"... | Convert from rectangular coordinates to latitudinal coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/reclat_c.html
:param rectan: Rectangular coordinates of a point.
:type rectan: 3-Element Array of floats
:return: Distance from the origin, Longitude in radians, Latitude in radians
:rtype: tuple | [
"Convert",
"from",
"rectangular",
"coordinates",
"to",
"latitudinal",
"coordinates",
"."
] | python | train |
sammchardy/python-kucoin | kucoin/client.py | https://github.com/sammchardy/python-kucoin/blob/a4cacde413804784bd313f27a0ad37234888be29/kucoin/client.py#L377-L455 | def get_account_activity(self, account_id, start=None, end=None, page=None, limit=None):
"""Get list of account activity
https://docs.kucoin.com/#get-account-history
:param account_id: ID for account - from list_accounts()
:type account_id: string
:param start: (optional) Start time as unix timestamp
:type start: string
:param end: (optional) End time as unix timestamp
:type end: string
:param page: (optional) Current page - default 1
:type page: int
:param limit: (optional) Number of results to return - default 50
:type limit: int
.. code:: python
history = client.get_account_activity('5bd6e9216d99522a52e458d6')
history = client.get_account_activity('5bd6e9216d99522a52e458d6', start='1540296039000')
history = client.get_account_activity('5bd6e9216d99522a52e458d6', page=2, page_size=10)
:returns: API Response
.. code-block:: python
{
"currentPage": 1,
"pageSize": 10,
"totalNum": 2,
"totalPage": 1,
"items": [
{
"currency": "KCS",
"amount": "0.0998",
"fee": "0",
"balance": "1994.040596",
"bizType": "withdraw",
"direction": "in",
"createdAt": 1540296039000,
"context": {
"orderId": "5bc7f080b39c5c03286eef8a",
"currency": "BTC"
}
},
{
"currency": "KCS",
"amount": "0.0998",
"fee": "0",
"balance": "1994.140396",
"bizType": "trade exchange",
"direction": "in",
"createdAt": 1540296039000,
"context": {
"orderId": "5bc7f080b39c5c03286eef8e",
"tradeId": "5bc7f080b3949c03286eef8a",
"symbol": "BTC-USD"
}
}
]
}
:raises: KucoinResponseException, KucoinAPIException
"""
data = {}
if start:
data['startAt'] = start
if end:
data['endAt'] = end
if page:
data['currentPage'] = page
if limit:
data['pageSize'] = limit
return self._get('accounts/{}/ledgers'.format(account_id), True, data=data) | [
"def",
"get_account_activity",
"(",
"self",
",",
"account_id",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"page",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"data",
"=",
"{",
"}",
"if",
"start",
":",
"data",
"[",
"'startAt'",
"... | Get list of account activity
https://docs.kucoin.com/#get-account-history
:param account_id: ID for account - from list_accounts()
:type account_id: string
:param start: (optional) Start time as unix timestamp
:type start: string
:param end: (optional) End time as unix timestamp
:type end: string
:param page: (optional) Current page - default 1
:type page: int
:param limit: (optional) Number of results to return - default 50
:type limit: int
.. code:: python
history = client.get_account_activity('5bd6e9216d99522a52e458d6')
history = client.get_account_activity('5bd6e9216d99522a52e458d6', start='1540296039000')
history = client.get_account_activity('5bd6e9216d99522a52e458d6', page=2, page_size=10)
:returns: API Response
.. code-block:: python
{
"currentPage": 1,
"pageSize": 10,
"totalNum": 2,
"totalPage": 1,
"items": [
{
"currency": "KCS",
"amount": "0.0998",
"fee": "0",
"balance": "1994.040596",
"bizType": "withdraw",
"direction": "in",
"createdAt": 1540296039000,
"context": {
"orderId": "5bc7f080b39c5c03286eef8a",
"currency": "BTC"
}
},
{
"currency": "KCS",
"amount": "0.0998",
"fee": "0",
"balance": "1994.140396",
"bizType": "trade exchange",
"direction": "in",
"createdAt": 1540296039000,
"context": {
"orderId": "5bc7f080b39c5c03286eef8e",
"tradeId": "5bc7f080b3949c03286eef8a",
"symbol": "BTC-USD"
}
}
]
}
:raises: KucoinResponseException, KucoinAPIException | [
"Get",
"list",
"of",
"account",
"activity"
] | python | train |
studionow/pybrightcove | pybrightcove/video.py | https://github.com/studionow/pybrightcove/blob/19c946b689a80156e070fe9bc35589c4b768e614/pybrightcove/video.py#L623-L630 | def get_status(video_id, _connection=None):
"""
Get the status of a video given the ``video_id`` parameter.
"""
c = _connection
if not c:
c = connection.APIConnection()
return c.post('get_upload_status', video_id=video_id) | [
"def",
"get_status",
"(",
"video_id",
",",
"_connection",
"=",
"None",
")",
":",
"c",
"=",
"_connection",
"if",
"not",
"c",
":",
"c",
"=",
"connection",
".",
"APIConnection",
"(",
")",
"return",
"c",
".",
"post",
"(",
"'get_upload_status'",
",",
"video_i... | Get the status of a video given the ``video_id`` parameter. | [
"Get",
"the",
"status",
"of",
"a",
"video",
"given",
"the",
"video_id",
"parameter",
"."
] | python | train |
glormph/msstitch | src/app/writers/tsv.py | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/writers/tsv.py#L1-L12 | def write_tsv(headerfields, features, outfn):
"""Writes header and generator of lines to tab separated file.
headerfields - list of field names in header in correct order
features - generates 1 list per line that belong to header
outfn - filename to output to. Overwritten if exists
"""
with open(outfn, 'w') as fp:
write_tsv_line_from_list(headerfields, fp)
for line in features:
write_tsv_line_from_list([str(line[field]) for field
in headerfields], fp) | [
"def",
"write_tsv",
"(",
"headerfields",
",",
"features",
",",
"outfn",
")",
":",
"with",
"open",
"(",
"outfn",
",",
"'w'",
")",
"as",
"fp",
":",
"write_tsv_line_from_list",
"(",
"headerfields",
",",
"fp",
")",
"for",
"line",
"in",
"features",
":",
"writ... | Writes header and generator of lines to tab separated file.
headerfields - list of field names in header in correct order
features - generates 1 list per line that belong to header
outfn - filename to output to. Overwritten if exists | [
"Writes",
"header",
"and",
"generator",
"of",
"lines",
"to",
"tab",
"separated",
"file",
"."
] | python | train |
delfick/harpoon | harpoon/actions.py | https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/actions.py#L172-L192 | def make_all(collector, **kwargs):
"""Creates all the images in layered order"""
configuration = collector.configuration
push = configuration["harpoon"].do_push
only_pushable = configuration["harpoon"].only_pushable
if push:
only_pushable = True
tag = kwargs.get("artifact", NotSpecified)
if tag is NotSpecified:
tag = configuration["harpoon"].tag
images = configuration["images"]
for layer in Builder().layered(images, only_pushable=only_pushable):
for _, image in layer:
if tag is not NotSpecified:
image.tag = tag
Builder().make_image(image, images, ignore_deps=True, ignore_parent=True)
print("Created image {0}".format(image.image_name))
if push and image.image_index:
Syncer().push(image) | [
"def",
"make_all",
"(",
"collector",
",",
"*",
"*",
"kwargs",
")",
":",
"configuration",
"=",
"collector",
".",
"configuration",
"push",
"=",
"configuration",
"[",
"\"harpoon\"",
"]",
".",
"do_push",
"only_pushable",
"=",
"configuration",
"[",
"\"harpoon\"",
"... | Creates all the images in layered order | [
"Creates",
"all",
"the",
"images",
"in",
"layered",
"order"
] | python | train |
cs50/lib50 | lib50/_api.py | https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L129-L202 | def files(patterns,
require_tags=("require",),
include_tags=("include",),
exclude_tags=("exclude",),
root=".",
always_exclude=("**/.git*", "**/.lfs*", "**/.c9*", "**/.~c9*")):
"""
Takes a list of lib50._config.TaggedValue returns which files should be included and excluded from `root`.
Any pattern tagged with a tag
from include_tags will be included
from require_tags can only be a file, that will then be included. MissingFilesError is raised if missing
from exclude_tags will be excluded
Any pattern in always_exclude will always be excluded.
"""
require_tags = list(require_tags)
include_tags = list(include_tags)
exclude_tags = list(exclude_tags)
# Ensure every tag starts with !
for tags in [require_tags, include_tags, exclude_tags]:
for i, tag in enumerate(tags):
tags[i] = tag if tag.startswith("!") else "!" + tag
with cd(root):
# Include everything by default
included = _glob("*")
excluded = set()
if patterns:
missing_files = []
# Per line in files
for pattern in patterns:
# Include all files that are tagged with !require
if pattern.tag in require_tags:
file = str(Path(pattern.value))
if not Path(file).exists():
missing_files.append(file)
else:
try:
excluded.remove(file)
except KeyError:
pass
else:
included.add(file)
# Include all files that are tagged with !include
elif pattern.tag in include_tags:
new_included = _glob(pattern.value)
excluded -= new_included
included.update(new_included)
# Exclude all files that are tagged with !exclude
elif pattern.tag in exclude_tags:
new_excluded = _glob(pattern.value)
included -= new_excluded
excluded.update(new_excluded)
if missing_files:
raise MissingFilesError(missing_files)
# Exclude all files that match a pattern from always_exclude
for line in always_exclude:
included -= _glob(line)
# Exclude any files that are not valid utf8
invalid = set()
for file in included:
try:
file.encode("utf8")
except UnicodeEncodeError:
excluded.add(file.encode("utf8", "replace").decode())
invalid.add(file)
included -= invalid
return included, excluded | [
"def",
"files",
"(",
"patterns",
",",
"require_tags",
"=",
"(",
"\"require\"",
",",
")",
",",
"include_tags",
"=",
"(",
"\"include\"",
",",
")",
",",
"exclude_tags",
"=",
"(",
"\"exclude\"",
",",
")",
",",
"root",
"=",
"\".\"",
",",
"always_exclude",
"="... | Takes a list of lib50._config.TaggedValue returns which files should be included and excluded from `root`.
Any pattern tagged with a tag
from include_tags will be included
from require_tags can only be a file, that will then be included. MissingFilesError is raised if missing
from exclude_tags will be excluded
Any pattern in always_exclude will always be excluded. | [
"Takes",
"a",
"list",
"of",
"lib50",
".",
"_config",
".",
"TaggedValue",
"returns",
"which",
"files",
"should",
"be",
"included",
"and",
"excluded",
"from",
"root",
".",
"Any",
"pattern",
"tagged",
"with",
"a",
"tag",
"from",
"include_tags",
"will",
"be",
... | python | train |
ArduPilot/MAVProxy | MAVProxy/modules/mavproxy_misc.py | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_misc.py#L140-L145 | def cmd_reboot(self, args):
'''reboot autopilot'''
if len(args) > 0 and args[0] == 'bootloader':
self.master.reboot_autopilot(True)
else:
self.master.reboot_autopilot() | [
"def",
"cmd_reboot",
"(",
"self",
",",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
">",
"0",
"and",
"args",
"[",
"0",
"]",
"==",
"'bootloader'",
":",
"self",
".",
"master",
".",
"reboot_autopilot",
"(",
"True",
")",
"else",
":",
"self",
".",
... | reboot autopilot | [
"reboot",
"autopilot"
] | python | train |
kislyuk/aegea | aegea/packages/github3/repos/repo.py | https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/repos/repo.py#L600-L618 | def create_hook(self, name, config, events=['push'], active=True):
"""Create a hook on this repository.
:param str name: (required), name of the hook
:param dict config: (required), key-value pairs which act as settings
for this hook
:param list events: (optional), events the hook is triggered for
:param bool active: (optional), whether the hook is actually
triggered
:returns: :class:`Hook <github3.repos.hook.Hook>` if successful,
otherwise None
"""
json = None
if name and config and isinstance(config, dict):
url = self._build_url('hooks', base_url=self._api)
data = {'name': name, 'config': config, 'events': events,
'active': active}
json = self._json(self._post(url, data=data), 201)
return Hook(json, self) if json else None | [
"def",
"create_hook",
"(",
"self",
",",
"name",
",",
"config",
",",
"events",
"=",
"[",
"'push'",
"]",
",",
"active",
"=",
"True",
")",
":",
"json",
"=",
"None",
"if",
"name",
"and",
"config",
"and",
"isinstance",
"(",
"config",
",",
"dict",
")",
"... | Create a hook on this repository.
:param str name: (required), name of the hook
:param dict config: (required), key-value pairs which act as settings
for this hook
:param list events: (optional), events the hook is triggered for
:param bool active: (optional), whether the hook is actually
triggered
:returns: :class:`Hook <github3.repos.hook.Hook>` if successful,
otherwise None | [
"Create",
"a",
"hook",
"on",
"this",
"repository",
"."
] | python | train |
PrefPy/prefpy | prefpy/mechanismMcmcSampleGenerator.py | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L98-L121 | def getNextSample(self, V):
"""
Generate the next sample by randomly shuffling candidates.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the current sample.
"""
positions = range(0, len(self.wmg))
randPoss = random.sample(positions, self.shuffleSize)
flipSet = copy.deepcopy(randPoss)
randPoss.sort()
W = copy.deepcopy(V)
for j in range(0, self.shuffleSize):
W[randPoss[j]] = V[flipSet[j]]
# Check whether we should change to the new ranking.
prMW = 1.0
prMV = 1.0
acceptanceRatio = self.calcAcceptanceRatio(V, W)
prob = min(1.0,(prMW/prMV)*acceptanceRatio)
if random.random() <= prob:
V = W
return V | [
"def",
"getNextSample",
"(",
"self",
",",
"V",
")",
":",
"positions",
"=",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"wmg",
")",
")",
"randPoss",
"=",
"random",
".",
"sample",
"(",
"positions",
",",
"self",
".",
"shuffleSize",
")",
"flipSet",
... | Generate the next sample by randomly shuffling candidates.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the current sample. | [
"Generate",
"the",
"next",
"sample",
"by",
"randomly",
"shuffling",
"candidates",
"."
] | python | train |
CivicSpleen/ambry | ambry/exporters/ckan/core.py | https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/exporters/ckan/core.py#L141-L147 | def is_exported(bundle):
""" Returns True if dataset is already exported to CKAN. Otherwise returns False. """
if not ckan:
raise EnvironmentError(MISSING_CREDENTIALS_MSG)
params = {'q': 'name:{}'.format(bundle.dataset.vid.lower())}
resp = ckan.action.package_search(**params)
return len(resp['results']) > 0 | [
"def",
"is_exported",
"(",
"bundle",
")",
":",
"if",
"not",
"ckan",
":",
"raise",
"EnvironmentError",
"(",
"MISSING_CREDENTIALS_MSG",
")",
"params",
"=",
"{",
"'q'",
":",
"'name:{}'",
".",
"format",
"(",
"bundle",
".",
"dataset",
".",
"vid",
".",
"lower",
... | Returns True if dataset is already exported to CKAN. Otherwise returns False. | [
"Returns",
"True",
"if",
"dataset",
"is",
"already",
"exported",
"to",
"CKAN",
".",
"Otherwise",
"returns",
"False",
"."
] | python | train |
scanny/python-pptx | pptx/oxml/chart/series.py | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/chart/series.py#L168-L178 | def get_or_add_dPt_for_point(self, idx):
"""
Return the `c:dPt` child representing the visual properties of the
data point at index *idx*.
"""
matches = self.xpath('c:dPt[c:idx[@val="%d"]]' % idx)
if matches:
return matches[0]
dPt = self._add_dPt()
dPt.idx.val = idx
return dPt | [
"def",
"get_or_add_dPt_for_point",
"(",
"self",
",",
"idx",
")",
":",
"matches",
"=",
"self",
".",
"xpath",
"(",
"'c:dPt[c:idx[@val=\"%d\"]]'",
"%",
"idx",
")",
"if",
"matches",
":",
"return",
"matches",
"[",
"0",
"]",
"dPt",
"=",
"self",
".",
"_add_dPt",
... | Return the `c:dPt` child representing the visual properties of the
data point at index *idx*. | [
"Return",
"the",
"c",
":",
"dPt",
"child",
"representing",
"the",
"visual",
"properties",
"of",
"the",
"data",
"point",
"at",
"index",
"*",
"idx",
"*",
"."
] | python | train |
Danielhiversen/pyTibber | tibber/__init__.py | https://github.com/Danielhiversen/pyTibber/blob/114ebc3dd49f6affd93665b0862d4cbdea03e9ef/tibber/__init__.py#L541-L546 | async def rt_unsubscribe(self):
"""Unsubscribe to Tibber rt subscription."""
if self._subscription_id is None:
_LOGGER.error("Not subscribed.")
return
await self._tibber_control.sub_manager.unsubscribe(self._subscription_id) | [
"async",
"def",
"rt_unsubscribe",
"(",
"self",
")",
":",
"if",
"self",
".",
"_subscription_id",
"is",
"None",
":",
"_LOGGER",
".",
"error",
"(",
"\"Not subscribed.\"",
")",
"return",
"await",
"self",
".",
"_tibber_control",
".",
"sub_manager",
".",
"unsubscrib... | Unsubscribe to Tibber rt subscription. | [
"Unsubscribe",
"to",
"Tibber",
"rt",
"subscription",
"."
] | python | valid |
insilichem/ommprotocol | ommprotocol/io.py | https://github.com/insilichem/ommprotocol/blob/7283fddba7203e5ac3542fdab41fc1279d3b444e/ommprotocol/io.py#L284-L309 | def from_charmm(cls, path, positions=None, forcefield=None, strict=True, **kwargs):
"""
Loads PSF Charmm structure from `path`. Requires `charmm_parameters`.
Parameters
----------
path : str
Path to PSF file
forcefield : list of str
Paths to Charmm parameters files, such as *.par or *.str. REQUIRED
Returns
-------
psf : SystemHandler
SystemHandler with topology. Charmm parameters are embedded in
the `master` attribute.
"""
psf = CharmmPsfFile(path)
if strict and forcefield is None:
raise ValueError('PSF files require key `forcefield`.')
if strict and positions is None:
raise ValueError('PSF files require key `positions`.')
psf.parmset = CharmmParameterSet(*forcefield)
psf.loadParameters(psf.parmset)
return cls(master=psf, topology=psf.topology, positions=positions, path=path,
**kwargs) | [
"def",
"from_charmm",
"(",
"cls",
",",
"path",
",",
"positions",
"=",
"None",
",",
"forcefield",
"=",
"None",
",",
"strict",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"psf",
"=",
"CharmmPsfFile",
"(",
"path",
")",
"if",
"strict",
"and",
"forcef... | Loads PSF Charmm structure from `path`. Requires `charmm_parameters`.
Parameters
----------
path : str
Path to PSF file
forcefield : list of str
Paths to Charmm parameters files, such as *.par or *.str. REQUIRED
Returns
-------
psf : SystemHandler
SystemHandler with topology. Charmm parameters are embedded in
the `master` attribute. | [
"Loads",
"PSF",
"Charmm",
"structure",
"from",
"path",
".",
"Requires",
"charmm_parameters",
"."
] | python | train |
dropbox/pygerduty | pygerduty/events.py | https://github.com/dropbox/pygerduty/blob/11b28bfb66306aa7fc2b95ab9df65eb97ea831cf/pygerduty/events.py#L57-L65 | def resolve_incident(self, incident_key,
description=None, details=None):
""" Causes the referenced incident to enter resolved state.
Send a resolve event when the problem that caused the initial
trigger has been fixed.
"""
return self.create_event(description, "resolve",
details, incident_key) | [
"def",
"resolve_incident",
"(",
"self",
",",
"incident_key",
",",
"description",
"=",
"None",
",",
"details",
"=",
"None",
")",
":",
"return",
"self",
".",
"create_event",
"(",
"description",
",",
"\"resolve\"",
",",
"details",
",",
"incident_key",
")"
] | Causes the referenced incident to enter resolved state.
Send a resolve event when the problem that caused the initial
trigger has been fixed. | [
"Causes",
"the",
"referenced",
"incident",
"to",
"enter",
"resolved",
"state",
".",
"Send",
"a",
"resolve",
"event",
"when",
"the",
"problem",
"that",
"caused",
"the",
"initial",
"trigger",
"has",
"been",
"fixed",
"."
] | python | train |
svinota/mdns | mdns/zeroconf.py | https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L425-L429 | def reset_ttl(self, other):
"""Sets this record's TTL and created time to that of
another record."""
self.created = other.created
self.ttl = other.ttl | [
"def",
"reset_ttl",
"(",
"self",
",",
"other",
")",
":",
"self",
".",
"created",
"=",
"other",
".",
"created",
"self",
".",
"ttl",
"=",
"other",
".",
"ttl"
] | Sets this record's TTL and created time to that of
another record. | [
"Sets",
"this",
"record",
"s",
"TTL",
"and",
"created",
"time",
"to",
"that",
"of",
"another",
"record",
"."
] | python | train |
dslackw/slpkg | slpkg/binary/install.py | https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/binary/install.py#L208-L216 | def clear_masters(self):
"""Clear master packages if already exist in dependencies
or if added to install two or more times
"""
packages = []
for mas in Utils().remove_dbs(self.packages):
if mas not in self.dependencies:
packages.append(mas)
self.packages = packages | [
"def",
"clear_masters",
"(",
"self",
")",
":",
"packages",
"=",
"[",
"]",
"for",
"mas",
"in",
"Utils",
"(",
")",
".",
"remove_dbs",
"(",
"self",
".",
"packages",
")",
":",
"if",
"mas",
"not",
"in",
"self",
".",
"dependencies",
":",
"packages",
".",
... | Clear master packages if already exist in dependencies
or if added to install two or more times | [
"Clear",
"master",
"packages",
"if",
"already",
"exist",
"in",
"dependencies",
"or",
"if",
"added",
"to",
"install",
"two",
"or",
"more",
"times"
] | python | train |
freevoid/django-datafilters | datafilters/views.py | https://github.com/freevoid/django-datafilters/blob/99051b3b3e97946981c0e9697576b0100093287c/datafilters/views.py#L33-L43 | def get_context_data(self, **kwargs):
"""
Add filter form to the context.
TODO: Currently we construct the filter form object twice - in
get_queryset and here, in get_context_data. Will need to figure out a
good way to eliminate extra initialization.
"""
context = super(FilterFormMixin, self).get_context_data(**kwargs)
context[self.context_filterform_name] = self.get_filter()
return context | [
"def",
"get_context_data",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"context",
"=",
"super",
"(",
"FilterFormMixin",
",",
"self",
")",
".",
"get_context_data",
"(",
"*",
"*",
"kwargs",
")",
"context",
"[",
"self",
".",
"context_filterform_name",
"]"... | Add filter form to the context.
TODO: Currently we construct the filter form object twice - in
get_queryset and here, in get_context_data. Will need to figure out a
good way to eliminate extra initialization. | [
"Add",
"filter",
"form",
"to",
"the",
"context",
"."
] | python | train |
ontio/ontology-python-sdk | ontology/io/binary_writer.py | https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L337-L354 | def write_fixed_str(self, value, length):
"""
Write a string value to the stream.
Args:
value (str): value to write to the stream.
length (int): length of the string to write.
"""
towrite = value.encode('utf-8')
slen = len(towrite)
if slen > length:
raise SDKException(ErrorCode.param_err('string longer than fixed length: %s' % length))
self.write_bytes(towrite)
diff = length - slen
while diff > 0:
self.write_byte(0)
diff -= 1 | [
"def",
"write_fixed_str",
"(",
"self",
",",
"value",
",",
"length",
")",
":",
"towrite",
"=",
"value",
".",
"encode",
"(",
"'utf-8'",
")",
"slen",
"=",
"len",
"(",
"towrite",
")",
"if",
"slen",
">",
"length",
":",
"raise",
"SDKException",
"(",
"ErrorCo... | Write a string value to the stream.
Args:
value (str): value to write to the stream.
length (int): length of the string to write. | [
"Write",
"a",
"string",
"value",
"to",
"the",
"stream",
"."
] | python | train |
phn/angles | angles.py | https://github.com/phn/angles/blob/5c30ed7c3a7412177daaed180bf3b2351b287589/angles.py#L406-L477 | def sexa2deci(sign, hd, mm, ss, todeg=False):
"""Combine sexagesimal components into a decimal number.
Parameters
----------
sign : int
Sign of the number: 1 for +ve, -1 for negative.
hd : float
The hour or degree like part.
mm : float
The minute or arc-minute like part.
ss : float
The second or arc-second like part.
todeg : bool
If True then convert to degrees, assuming that the input value
is in hours. Default is False.
Returns
-------
d : float
The decimal equivalent of the sexagesimal number.
Raises
------
ValueError
This exception is raised if `sign` is not -1 or 1.
Notes
-----
The angle returned is::
sign * (hd + mm / 60.0 + ss / 3600.0)
In sexagesimal notation the sign applies to the whole quantity and
not to each part separately. So the `sign` is asked separately, and
applied to the whole quantity.
If the sexagesimal quantity is in hours, then we frequently want to
convert it into degrees. If the `todeg == True` then the given
value is assumed to be in hours, and the returned value will be in
degrees.
Examples
--------
>>> d = sexa2deci(1,12,0,0.0)
>>> d
12.0
>>> d = sexa2deci(1,12,0,0.0,todeg=True)
>>> d
180.0
>>> x = sexa2deci(1,9,12.456,0.0)
>>> assert round(x,4) == 9.2076
>>> x = sexa2deci(1,11,30,27.0)
>>> assert round(x, 4) == 11.5075
"""
divisors = [1.0, 60.0, 3600.0]
d = 0.0
# sexages[0] is sign.
if sign not in (-1, 1):
raise ValueError("Sign has to be -1 or 1.")
sexages = [sign, hd, mm, ss]
for i, divis in zip(sexages[1:], divisors):
d += i / divis
# Add proper sign.
d *= sexages[0]
if todeg:
d = h2d(d)
return d | [
"def",
"sexa2deci",
"(",
"sign",
",",
"hd",
",",
"mm",
",",
"ss",
",",
"todeg",
"=",
"False",
")",
":",
"divisors",
"=",
"[",
"1.0",
",",
"60.0",
",",
"3600.0",
"]",
"d",
"=",
"0.0",
"# sexages[0] is sign.",
"if",
"sign",
"not",
"in",
"(",
"-",
"... | Combine sexagesimal components into a decimal number.
Parameters
----------
sign : int
Sign of the number: 1 for +ve, -1 for negative.
hd : float
The hour or degree like part.
mm : float
The minute or arc-minute like part.
ss : float
The second or arc-second like part.
todeg : bool
If True then convert to degrees, assuming that the input value
is in hours. Default is False.
Returns
-------
d : float
The decimal equivalent of the sexagesimal number.
Raises
------
ValueError
This exception is raised if `sign` is not -1 or 1.
Notes
-----
The angle returned is::
sign * (hd + mm / 60.0 + ss / 3600.0)
In sexagesimal notation the sign applies to the whole quantity and
not to each part separately. So the `sign` is asked separately, and
applied to the whole quantity.
If the sexagesimal quantity is in hours, then we frequently want to
convert it into degrees. If the `todeg == True` then the given
value is assumed to be in hours, and the returned value will be in
degrees.
Examples
--------
>>> d = sexa2deci(1,12,0,0.0)
>>> d
12.0
>>> d = sexa2deci(1,12,0,0.0,todeg=True)
>>> d
180.0
>>> x = sexa2deci(1,9,12.456,0.0)
>>> assert round(x,4) == 9.2076
>>> x = sexa2deci(1,11,30,27.0)
>>> assert round(x, 4) == 11.5075 | [
"Combine",
"sexagesimal",
"components",
"into",
"a",
"decimal",
"number",
"."
] | python | train |
osrg/ryu | ryu/services/protocols/vrrp/api.py | https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/vrrp/api.py#L48-L54 | def vrrp_list(app, instance_name=None):
"""list instances.
returns EventVRRPListReply([VRRPInstance]).
"""
list_request = vrrp_event.EventVRRPListRequest(instance_name)
list_request.dst = vrrp_event.VRRP_MANAGER_NAME
return app.send_request(list_request) | [
"def",
"vrrp_list",
"(",
"app",
",",
"instance_name",
"=",
"None",
")",
":",
"list_request",
"=",
"vrrp_event",
".",
"EventVRRPListRequest",
"(",
"instance_name",
")",
"list_request",
".",
"dst",
"=",
"vrrp_event",
".",
"VRRP_MANAGER_NAME",
"return",
"app",
".",... | list instances.
returns EventVRRPListReply([VRRPInstance]). | [
"list",
"instances",
".",
"returns",
"EventVRRPListReply",
"(",
"[",
"VRRPInstance",
"]",
")",
"."
] | python | train |
saltstack/salt | salt/cloud/clouds/profitbricks.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/profitbricks.py#L1146-L1179 | def _get_system_volume(vm_):
'''
Construct VM system volume list from cloud profile config
'''
# Override system volume size if 'disk_size' is defined in cloud profile
disk_size = get_size(vm_)['disk']
if 'disk_size' in vm_:
disk_size = vm_['disk_size']
# Construct the system volume
volume = Volume(
name='{0} Storage'.format(vm_['name']),
size=disk_size,
disk_type=get_disk_type(vm_)
)
if 'image_password' in vm_:
image_password = vm_['image_password']
volume.image_password = image_password
# Retrieve list of SSH public keys
ssh_keys = get_public_keys(vm_)
volume.ssh_keys = ssh_keys
if 'image_alias' in vm_.keys():
volume.image_alias = vm_['image_alias']
else:
volume.image = get_image(vm_)['id']
# Set volume availability zone if defined in the cloud profile
if 'disk_availability_zone' in vm_:
volume.availability_zone = vm_['disk_availability_zone']
return volume | [
"def",
"_get_system_volume",
"(",
"vm_",
")",
":",
"# Override system volume size if 'disk_size' is defined in cloud profile",
"disk_size",
"=",
"get_size",
"(",
"vm_",
")",
"[",
"'disk'",
"]",
"if",
"'disk_size'",
"in",
"vm_",
":",
"disk_size",
"=",
"vm_",
"[",
"'d... | Construct VM system volume list from cloud profile config | [
"Construct",
"VM",
"system",
"volume",
"list",
"from",
"cloud",
"profile",
"config"
] | python | train |
torfsen/service | src/service/__init__.py | https://github.com/torfsen/service/blob/d0dd824fce9237825c1943b30cd14f7b0f5957a6/src/service/__init__.py#L267-L277 | def _get_signal_event(self, s):
'''
Get the event for a signal.
Checks if the signal has been enabled and raises a
``ValueError`` if not.
'''
try:
return self._signal_events[int(s)]
except KeyError:
raise ValueError('Signal {} has not been enabled'.format(s)) | [
"def",
"_get_signal_event",
"(",
"self",
",",
"s",
")",
":",
"try",
":",
"return",
"self",
".",
"_signal_events",
"[",
"int",
"(",
"s",
")",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'Signal {} has not been enabled'",
".",
"format",
"(",
... | Get the event for a signal.
Checks if the signal has been enabled and raises a
``ValueError`` if not. | [
"Get",
"the",
"event",
"for",
"a",
"signal",
"."
] | python | train |
Tanganelli/CoAPthon3 | coapthon/layers/forwardLayer.py | https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/layers/forwardLayer.py#L44-L80 | def receive_request_reverse(self, transaction):
"""
Setup the transaction for forwarding purposes on Reverse Proxies.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:return: the edited transaction
"""
path = str("/" + transaction.request.uri_path)
transaction.response = Response()
transaction.response.destination = transaction.request.source
transaction.response.token = transaction.request.token
if path == defines.DISCOVERY_URL:
transaction = self._server.resourceLayer.discover(transaction)
else:
new = False
if transaction.request.code == defines.Codes.POST.number:
new_paths = self._server.root.with_prefix(path)
new_path = "/"
for tmp in new_paths:
if len(tmp) > len(new_path):
new_path = tmp
if path != new_path:
new = True
path = new_path
try:
resource = self._server.root[path]
except KeyError:
resource = None
if resource is None or path == '/':
# Not Found
transaction.response.code = defines.Codes.NOT_FOUND.number
else:
transaction.resource = resource
transaction = self._handle_request(transaction, new)
return transaction | [
"def",
"receive_request_reverse",
"(",
"self",
",",
"transaction",
")",
":",
"path",
"=",
"str",
"(",
"\"/\"",
"+",
"transaction",
".",
"request",
".",
"uri_path",
")",
"transaction",
".",
"response",
"=",
"Response",
"(",
")",
"transaction",
".",
"response"... | Setup the transaction for forwarding purposes on Reverse Proxies.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:return: the edited transaction | [
"Setup",
"the",
"transaction",
"for",
"forwarding",
"purposes",
"on",
"Reverse",
"Proxies",
".",
":",
"type",
"transaction",
":",
"Transaction",
":",
"param",
"transaction",
":",
"the",
"transaction",
"that",
"owns",
"the",
"request",
":",
"rtype",
":",
"Trans... | python | train |
allianceauth/allianceauth | allianceauth/services/modules/teamspeak3/util/ts3.py | https://github.com/allianceauth/allianceauth/blob/6585b07e96571a99a4d6dc03cc03f9b8c8f690ca/allianceauth/services/modules/teamspeak3/util/ts3.py#L190-L202 | def _unescape_str(value):
"""
Unescape a TS3 compatible string into a normal string
@param value: Value
@type value: string/int
"""
if isinstance(value, int):
return "%d" % value
value = value.replace(r"\\", "\\")
for i, j in ts3_escape.items():
value = value.replace(j, i)
return value | [
"def",
"_unescape_str",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"return",
"\"%d\"",
"%",
"value",
"value",
"=",
"value",
".",
"replace",
"(",
"r\"\\\\\"",
",",
"\"\\\\\"",
")",
"for",
"i",
",",
"j",
"in",
"ts3_... | Unescape a TS3 compatible string into a normal string
@param value: Value
@type value: string/int | [
"Unescape",
"a",
"TS3",
"compatible",
"string",
"into",
"a",
"normal",
"string"
] | python | train |
guaix-ucm/numina | numina/core/pipeline.py | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipeline.py#L294-L323 | def select_profile(self, obresult):
"""Select instrument profile based on OB"""
logger = logging.getLogger(__name__)
logger.debug('calling default profile selector')
# check configuration
insconf = obresult.configuration
if insconf != 'default':
key = insconf
date_obs = None
keyname = 'uuid'
else:
# get first possible image
ref = obresult.get_sample_frame()
if ref is None:
key = obresult.instrument
date_obs = None
keyname = 'name'
else:
extr = self.datamodel.extractor_map['fits']
date_obs = extr.extract('observation_date', ref)
key = extr.extract('insconf', ref)
if key is not None:
keyname = 'uuid'
else:
key = extr.extract('instrument', ref)
keyname = 'name'
return key, date_obs, keyname | [
"def",
"select_profile",
"(",
"self",
",",
"obresult",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"'calling default profile selector'",
")",
"# check configuration",
"insconf",
"=",
"obresult",
".",
"... | Select instrument profile based on OB | [
"Select",
"instrument",
"profile",
"based",
"on",
"OB"
] | python | train |
h2oai/datatable | datatable/utils/misc.py | https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/datatable/utils/misc.py#L130-L166 | def normalize_range(e, n):
"""
Return the range tuple normalized for an ``n``-element object.
The semantics of a range is slightly different than that of a slice.
In particular, a range is similar to a list in meaning (and on Py2 it was
eagerly expanded into a list). Thus we do not allow the range to generate
indices that would be invalid for an ``n``-array. Furthermore, we restrict
the range to produce only positive or only negative indices. For example,
``range(2, -2, -1)`` expands into ``[2, 1, 0, -1]``, and it is confusing
to treat the last "-1" as the last element in the list.
:param e: a range object representing a selector
:param n: number of elements in a sequence to which ``e`` is applied
:returns: tuple ``(start, count, step)`` derived from ``e``, or None
if the range is invalid.
"""
if e.step > 0:
count = max(0, (e.stop - e.start - 1) // e.step + 1)
else:
count = max(0, (e.start - e.stop - 1) // -e.step + 1)
if count == 0:
return (0, 0, e.step)
start = e.start
finish = e.start + (count - 1) * e.step
if start >= 0:
if start >= n or finish < 0 or finish >= n:
return None
else:
start += n
finish += n
if start < 0 or start >= n or finish < 0 or finish >= n:
return None
assert count >= 0
return (start, count, e.step) | [
"def",
"normalize_range",
"(",
"e",
",",
"n",
")",
":",
"if",
"e",
".",
"step",
">",
"0",
":",
"count",
"=",
"max",
"(",
"0",
",",
"(",
"e",
".",
"stop",
"-",
"e",
".",
"start",
"-",
"1",
")",
"//",
"e",
".",
"step",
"+",
"1",
")",
"else"... | Return the range tuple normalized for an ``n``-element object.
The semantics of a range is slightly different than that of a slice.
In particular, a range is similar to a list in meaning (and on Py2 it was
eagerly expanded into a list). Thus we do not allow the range to generate
indices that would be invalid for an ``n``-array. Furthermore, we restrict
the range to produce only positive or only negative indices. For example,
``range(2, -2, -1)`` expands into ``[2, 1, 0, -1]``, and it is confusing
to treat the last "-1" as the last element in the list.
:param e: a range object representing a selector
:param n: number of elements in a sequence to which ``e`` is applied
:returns: tuple ``(start, count, step)`` derived from ``e``, or None
if the range is invalid. | [
"Return",
"the",
"range",
"tuple",
"normalized",
"for",
"an",
"n",
"-",
"element",
"object",
"."
] | python | train |
probcomp/crosscat | src/utils/data_utils.py | https://github.com/probcomp/crosscat/blob/4a05bddb06a45f3b7b3e05e095720f16257d1535/src/utils/data_utils.py#L341-L356 | def convert_code_to_value(M_c, cidx, code):
"""
For a column with categorical data, this function takes the 'code':
the integer used to represent a specific value, and returns the corresponding
raw value (e.g. 'Joe' or 234.23409), which is always encoded as a string.
Note that the underlying store 'value_to_code' is unfortunately named backwards.
TODO: fix the backwards naming.
"""
if M_c['column_metadata'][cidx]['modeltype'] == 'normal_inverse_gamma':
return float(code)
else:
try:
return M_c['column_metadata'][cidx]['value_to_code'][int(code)]
except KeyError:
return M_c['column_metadata'][cidx]['value_to_code'][str(int(code))] | [
"def",
"convert_code_to_value",
"(",
"M_c",
",",
"cidx",
",",
"code",
")",
":",
"if",
"M_c",
"[",
"'column_metadata'",
"]",
"[",
"cidx",
"]",
"[",
"'modeltype'",
"]",
"==",
"'normal_inverse_gamma'",
":",
"return",
"float",
"(",
"code",
")",
"else",
":",
... | For a column with categorical data, this function takes the 'code':
the integer used to represent a specific value, and returns the corresponding
raw value (e.g. 'Joe' or 234.23409), which is always encoded as a string.
Note that the underlying store 'value_to_code' is unfortunately named backwards.
TODO: fix the backwards naming. | [
"For",
"a",
"column",
"with",
"categorical",
"data",
"this",
"function",
"takes",
"the",
"code",
":",
"the",
"integer",
"used",
"to",
"represent",
"a",
"specific",
"value",
"and",
"returns",
"the",
"corresponding",
"raw",
"value",
"(",
"e",
".",
"g",
".",
... | python | train |
CellProfiler/centrosome | centrosome/cpmorphology.py | https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/cpmorphology.py#L1972-L1996 | def lines_intersect(pt1_p, pt2_p, pt1_q, pt2_q):
'''Return true if two line segments intersect
pt1_p, pt2_p - endpoints of first line segment
pt1_q, pt2_q - endpoints of second line segment
'''
#
# The idea here is to do the cross-product of the vector from
# point 1 to point 2 of one segment against the cross products from
# both points of the other segment. If any of the cross products are zero,
# the point is colinear with the line. If the cross products differ in
# sign, then one point is on one side of the line and the other is on
# the other. If that happens for both, then the lines must cross.
#
for pt1_a, pt2_a, pt1_b, pt2_b in ((pt1_p, pt2_p, pt1_q, pt2_q),
(pt1_q, pt2_q, pt1_p, pt2_p)):
v_a = pt2_a-pt1_a
cross_a_1b = np.cross(v_a, pt1_b-pt2_a)
if cross_a_1b == 0 and colinear_intersection_test(pt1_a, pt2_a, pt1_b):
return True
cross_a_2b = np.cross(v_a, pt2_b-pt2_a)
if cross_a_2b == 0 and colinear_intersection_test(pt1_a, pt2_a, pt2_b):
return True
if (cross_a_1b < 0) == (cross_a_2b < 0):
return False
return True | [
"def",
"lines_intersect",
"(",
"pt1_p",
",",
"pt2_p",
",",
"pt1_q",
",",
"pt2_q",
")",
":",
"#",
"# The idea here is to do the cross-product of the vector from",
"# point 1 to point 2 of one segment against the cross products from ",
"# both points of the other segment. If any of the c... | Return true if two line segments intersect
pt1_p, pt2_p - endpoints of first line segment
pt1_q, pt2_q - endpoints of second line segment | [
"Return",
"true",
"if",
"two",
"line",
"segments",
"intersect",
"pt1_p",
"pt2_p",
"-",
"endpoints",
"of",
"first",
"line",
"segment",
"pt1_q",
"pt2_q",
"-",
"endpoints",
"of",
"second",
"line",
"segment"
] | python | train |
MartinThoma/memtop | memtop/__init__.py | https://github.com/MartinThoma/memtop/blob/504d251f1951922db84883c2e660ba7e754d1546/memtop/__init__.py#L68-L73 | def signal_handler(signal_name, frame):
"""Quit signal handler."""
sys.stdout.flush()
print("\nSIGINT in frame signal received. Quitting...")
sys.stdout.flush()
sys.exit(0) | [
"def",
"signal_handler",
"(",
"signal_name",
",",
"frame",
")",
":",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"print",
"(",
"\"\\nSIGINT in frame signal received. Quitting...\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"sys",
".",
"exit",
"("... | Quit signal handler. | [
"Quit",
"signal",
"handler",
"."
] | python | train |
datascopeanalytics/scrubadub | scrubadub/__init__.py | https://github.com/datascopeanalytics/scrubadub/blob/914bda49a16130b44af43df6a2f84755477c407c/scrubadub/__init__.py#L10-L16 | def clean(text, cls=None, **kwargs):
"""Public facing function to clean ``text`` using the scrubber ``cls`` by
replacing all personal information with ``{{PLACEHOLDERS}}``.
"""
cls = cls or Scrubber
scrubber = cls()
return scrubber.clean(text, **kwargs) | [
"def",
"clean",
"(",
"text",
",",
"cls",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"cls",
"=",
"cls",
"or",
"Scrubber",
"scrubber",
"=",
"cls",
"(",
")",
"return",
"scrubber",
".",
"clean",
"(",
"text",
",",
"*",
"*",
"kwargs",
")"
] | Public facing function to clean ``text`` using the scrubber ``cls`` by
replacing all personal information with ``{{PLACEHOLDERS}}``. | [
"Public",
"facing",
"function",
"to",
"clean",
"text",
"using",
"the",
"scrubber",
"cls",
"by",
"replacing",
"all",
"personal",
"information",
"with",
"{{",
"PLACEHOLDERS",
"}}",
"."
] | python | train |
castelao/oceansdb | oceansdb/utils.py | https://github.com/castelao/oceansdb/blob/a154c5b845845a602800f9bc53d1702d4cb0f9c5/oceansdb/utils.py#L61-L102 | def dbsource(dbname, var, resolution=None, tscale=None):
"""Return which file(s) to use according to dbname, var, etc
"""
db_cfg = {}
cfg_dir = 'datasource'
cfg_files = pkg_resources.resource_listdir('oceansdb', cfg_dir)
cfg_files = [f for f in cfg_files if f[-5:] == '.json']
for src_cfg in cfg_files:
text = pkg_resources.resource_string(
'oceansdb', os.path.join(cfg_dir, src_cfg))
text = text.decode('UTF-8', 'replace')
cfg = json.loads(text)
for c in cfg:
assert c not in db_cfg, "Trying to overwrite %s"
db_cfg[c] = cfg[c]
dbpath = oceansdb_dir()
datafiles = []
cfg = db_cfg[dbname]
if (resolution is None):
resolution = cfg['vars'][var]['default_resolution']
if (tscale is None):
tscale = cfg['vars'][var][resolution]["default_tscale"]
for c in cfg['vars'][var][resolution][tscale]:
download_file(outputdir=dbpath, **c)
if 'filename' in c:
filename = os.path.join(dbpath, c['filename'])
else:
filename = os.path.join(dbpath,
os.path.basename(urlparse(c['url']).path))
if 'varnames' in cfg['vars'][var][resolution]:
datafiles.append(Dataset_flex(filename,
aliases=cfg['vars'][var][resolution]['varnames']))
else:
datafiles.append(Dataset_flex(filename))
return datafiles | [
"def",
"dbsource",
"(",
"dbname",
",",
"var",
",",
"resolution",
"=",
"None",
",",
"tscale",
"=",
"None",
")",
":",
"db_cfg",
"=",
"{",
"}",
"cfg_dir",
"=",
"'datasource'",
"cfg_files",
"=",
"pkg_resources",
".",
"resource_listdir",
"(",
"'oceansdb'",
",",... | Return which file(s) to use according to dbname, var, etc | [
"Return",
"which",
"file",
"(",
"s",
")",
"to",
"use",
"according",
"to",
"dbname",
"var",
"etc"
] | python | train |
watson-developer-cloud/python-sdk | ibm_watson/personality_insights_v3.py | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/personality_insights_v3.py#L454-L479 | def _from_dict(cls, _dict):
"""Initialize a ConsumptionPreferencesCategory object from a json dictionary."""
args = {}
if 'consumption_preference_category_id' in _dict:
args['consumption_preference_category_id'] = _dict.get(
'consumption_preference_category_id')
else:
raise ValueError(
'Required property \'consumption_preference_category_id\' not present in ConsumptionPreferencesCategory JSON'
)
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError(
'Required property \'name\' not present in ConsumptionPreferencesCategory JSON'
)
if 'consumption_preferences' in _dict:
args['consumption_preferences'] = [
ConsumptionPreferences._from_dict(x)
for x in (_dict.get('consumption_preferences'))
]
else:
raise ValueError(
'Required property \'consumption_preferences\' not present in ConsumptionPreferencesCategory JSON'
)
return cls(**args) | [
"def",
"_from_dict",
"(",
"cls",
",",
"_dict",
")",
":",
"args",
"=",
"{",
"}",
"if",
"'consumption_preference_category_id'",
"in",
"_dict",
":",
"args",
"[",
"'consumption_preference_category_id'",
"]",
"=",
"_dict",
".",
"get",
"(",
"'consumption_preference_cate... | Initialize a ConsumptionPreferencesCategory object from a json dictionary. | [
"Initialize",
"a",
"ConsumptionPreferencesCategory",
"object",
"from",
"a",
"json",
"dictionary",
"."
] | python | train |
saltstack/salt | salt/modules/aix_group.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aix_group.py#L78-L96 | def info(name):
'''
Return information about a group
CLI Example:
.. code-block:: bash
salt '*' group.info foo
'''
try:
grinfo = grp.getgrnam(name)
except KeyError:
return {}
else:
return {'name': grinfo.gr_name,
'passwd': grinfo.gr_passwd,
'gid': grinfo.gr_gid,
'members': grinfo.gr_mem} | [
"def",
"info",
"(",
"name",
")",
":",
"try",
":",
"grinfo",
"=",
"grp",
".",
"getgrnam",
"(",
"name",
")",
"except",
"KeyError",
":",
"return",
"{",
"}",
"else",
":",
"return",
"{",
"'name'",
":",
"grinfo",
".",
"gr_name",
",",
"'passwd'",
":",
"gr... | Return information about a group
CLI Example:
.. code-block:: bash
salt '*' group.info foo | [
"Return",
"information",
"about",
"a",
"group"
] | python | train |
wummel/linkchecker | linkcheck/i18n.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/i18n.py#L70-L94 | def init (domain, directory, loc=None):
"""Initialize this gettext i18n module. Searches for supported languages
and installs the gettext translator class."""
global default_language, default_encoding, default_domain, default_directory
default_directory = directory
default_domain = domain
if os.path.isdir(directory):
# get supported languages
for lang in os.listdir(directory):
path = os.path.join(directory, lang, 'LC_MESSAGES')
mo_file = os.path.join(path, '%s.mo' % domain)
if os.path.exists(mo_file):
supported_languages.add(lang)
if loc is None:
loc, encoding = get_locale()
else:
encoding = get_locale()[1]
if loc in supported_languages:
default_language = loc
else:
default_language = "en"
# Even if the default language is not supported, the encoding should
# be installed. Otherwise the Python installation is borked.
default_encoding = encoding
install_language(default_language) | [
"def",
"init",
"(",
"domain",
",",
"directory",
",",
"loc",
"=",
"None",
")",
":",
"global",
"default_language",
",",
"default_encoding",
",",
"default_domain",
",",
"default_directory",
"default_directory",
"=",
"directory",
"default_domain",
"=",
"domain",
"if",... | Initialize this gettext i18n module. Searches for supported languages
and installs the gettext translator class. | [
"Initialize",
"this",
"gettext",
"i18n",
"module",
".",
"Searches",
"for",
"supported",
"languages",
"and",
"installs",
"the",
"gettext",
"translator",
"class",
"."
] | python | train |
monarch-initiative/dipper | dipper/models/Evidence.py | https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Evidence.py#L63-L81 | def add_data_individual(self, data_curie, label=None, ind_type=None):
"""
Add data individual
:param data_curie: str either curie formatted or long string,
long strings will be converted to bnodes
:param type: str curie
:param label: str
:return: None
"""
part_length = len(data_curie.split(':'))
if part_length == 0:
curie = "_:{}".format(data_curie)
elif part_length > 2:
raise ValueError("Misformatted curie {}".format(data_curie))
else:
curie = data_curie
self.model.addIndividualToGraph(curie, label, ind_type)
return | [
"def",
"add_data_individual",
"(",
"self",
",",
"data_curie",
",",
"label",
"=",
"None",
",",
"ind_type",
"=",
"None",
")",
":",
"part_length",
"=",
"len",
"(",
"data_curie",
".",
"split",
"(",
"':'",
")",
")",
"if",
"part_length",
"==",
"0",
":",
"cur... | Add data individual
:param data_curie: str either curie formatted or long string,
long strings will be converted to bnodes
:param type: str curie
:param label: str
:return: None | [
"Add",
"data",
"individual",
":",
"param",
"data_curie",
":",
"str",
"either",
"curie",
"formatted",
"or",
"long",
"string",
"long",
"strings",
"will",
"be",
"converted",
"to",
"bnodes",
":",
"param",
"type",
":",
"str",
"curie",
":",
"param",
"label",
":"... | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.