repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
quadrismegistus/prosodic | prosodic/entity.py | https://github.com/quadrismegistus/prosodic/blob/8af66ed9be40c922d03a0b09bc11c87d2061b618/prosodic/entity.py#L972-L1115 | def genfsms(self,meter=None):
"""Generate FSM images. Requires networkx and GraphViz."""
if (hasattr(self,'allParses')):
name=self.getName()
import networkx as nx
m2int={'w':'0','s':'1'}
gs={}
gs['weight']=['str_weight']
gs['stress']=['str_stress']
gs['stressweight']=['str_stress','str_weight']
gs['meterweight']=['str_meter','str_weight']
gs['meterstress']=['str_meter','str_stress']
#gs['numseg']=['str_numseg']
#gs['metershape']=['str_meter','getShape']
gs['meter']=['str_meter']
#gs['shape']=['str_shape']
#gs['weightshape']=['str_weight','str_shape']
Gs={}
if len(self.bestParses(meter=meter))==1: # if this is a direct input or a single line
parses=[]
for _parses in self.allParses():
parses+=_parses
use_labels = True
else:
parses=self.bestParses()
use_labels = False
for gtype in gs.keys():
G=nx.DiGraph()
sumweight={}
nodetypes=[]
linelens=[]
for parse in parses:
node1=None
node2=None
posnum=0
linelens.append(len(parse.positions))
for pos in parse.positions:
has_viol = bool(sum(pos.constraintScores.values()))
for unit in pos.slots:
spelling=unit.children[0].str_orth()
posnum+=1
if hasattr(being,'line_maxsylls'):
if posnum>int(being.line_maxsylls):
break
nodestr=""
unit.meter=pos.meterVal
for strcaller in sorted(gs[gtype]):
z=unit.findattr(strcaller,'children')
if type(z)==type([]):
nodestr+="".join( [str(x()) for x in z] )
else:
nodestr+=str(z())
if not nodestr: continue
if use_labels:
nodestr+='_'+ (spelling.upper() if unit.meter=='s' else spelling.lower())
nodestr=str(posnum)+"_"+str(nodestr)
if (not nodestr in nodetypes):
nodetypes.append(nodestr)
#node=str(posnum)+"_"+str(nodestr)
node=nodestr
if not node1:
node1=node
continue
node2=node
if G.has_edge(node1,node2):
G[node1][node2]['weight']+=1
else:
G.add_edge(node1,node2,weight=1)
try:
sumweight[(str(node1)[0],str(node2)[0])]+=1
except KeyError:
sumweight[(str(node1)[0],str(node2)[0])]=1
node1=node2
if not linelens: continue
maxlinesize=6
nodes=G.nodes()
#for n1,nbrs in G.adjacency_iter():
# for n2,eattr in nbrs.items():
for n1,n2,eattr in G.edges(data=True):
count=G[n1][n2]['weight']
G[n1][n2]['weight']=count/sumweight[(n1[0],n2[0])]
strfreq=str(G[n1][n2]['weight']*100)
if len(strfreq)>3:
strfreq=strfreq[0:3]
G[n1][n2]['label']=str(count)+" ["+str(strfreq)+"%]"
G[n1][n2]['fontsize']=10
G[n1][n2]['penwidth']=G[n1][n2]['weight']*maxlinesize
G.node[n1]['width']=1
G.node[n2]['width']=1
G[n1][n2]['weight']=0
#G[n1][n2]['style']="setlinewidth("+str(int(G[n1][n2]['weight']*maxlinesize)+1)+")"
#print G[n1][n2]['style']
#G[n1][n2]['arrowhead']='none'
import math
"""avglinelen=int(max(linelens))
for n in range(2,avglinelen):
for ntype in nodetypes:
node1=str(n-1)+"_"+ntype
node2=str(n)+"_"+ntype
if not G.has_edge(node1,node2):
G.add_edge(node1,node2,weight=0,penwidth=0,color='white')
"""
fn='results/fsms/'+str(gtype)+"."+name+'.png'
print ">> saved: "+fn+""
#plt.savefig(fn)
#nx.write_dot(G,fn)
pyd=nx.to_pydot(G)
pyd.set_rankdir('LR')
for node in pyd.get_node_list():
node.set_orientation('portrait')
import prosodic as p
fnfn=os.path.join(p.dir_prosodic,fn)
_path=os.path.split(fnfn)[0]
if not os.path.exists(_path):
os.makedirs(_path)
pyd.write_png(fnfn, prog='dot')
else:
if not self.children:
return ""
elif type(self.children[0])==type([]):
return []
else:
[child.genfsms() for child in self.children] | [
"def",
"genfsms",
"(",
"self",
",",
"meter",
"=",
"None",
")",
":",
"if",
"(",
"hasattr",
"(",
"self",
",",
"'allParses'",
")",
")",
":",
"name",
"=",
"self",
".",
"getName",
"(",
")",
"import",
"networkx",
"as",
"nx",
"m2int",
"=",
"{",
"'w'",
"... | Generate FSM images. Requires networkx and GraphViz. | [
"Generate",
"FSM",
"images",
".",
"Requires",
"networkx",
"and",
"GraphViz",
"."
] | python | train |
openai/universe | universe/vncdriver/vnc_client.py | https://github.com/openai/universe/blob/cc9ce6ec241821bfb0f3b85dd455bd36e4ee7a8c/universe/vncdriver/vnc_client.py#L398-L402 | def send_ClientCutText(self, message):
"""The client has new text in its clipboard.
"""
self.sendMessage(struct.pack("!BxxxI", 6, len(message)))
self.sendMessage(message) | [
"def",
"send_ClientCutText",
"(",
"self",
",",
"message",
")",
":",
"self",
".",
"sendMessage",
"(",
"struct",
".",
"pack",
"(",
"\"!BxxxI\"",
",",
"6",
",",
"len",
"(",
"message",
")",
")",
")",
"self",
".",
"sendMessage",
"(",
"message",
")"
] | The client has new text in its clipboard. | [
"The",
"client",
"has",
"new",
"text",
"in",
"its",
"clipboard",
"."
] | python | train |
spotify/luigi | luigi/execution_summary.py | https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/execution_summary.py#L257-L268 | def _ranging_attributes(attributes, param_class):
"""
Checks if there is a continuous range
"""
next_attributes = {param_class.next_in_enumeration(attribute) for attribute in attributes}
in_first = attributes.difference(next_attributes)
in_second = next_attributes.difference(attributes)
if len(in_first) == 1 and len(in_second) == 1:
for x in attributes:
if {param_class.next_in_enumeration(x)} == in_second:
return next(iter(in_first)), x
return None, None | [
"def",
"_ranging_attributes",
"(",
"attributes",
",",
"param_class",
")",
":",
"next_attributes",
"=",
"{",
"param_class",
".",
"next_in_enumeration",
"(",
"attribute",
")",
"for",
"attribute",
"in",
"attributes",
"}",
"in_first",
"=",
"attributes",
".",
"differen... | Checks if there is a continuous range | [
"Checks",
"if",
"there",
"is",
"a",
"continuous",
"range"
] | python | train |
fabioz/PyDev.Debugger | _pydev_bundle/pydev_umd.py | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydev_bundle/pydev_umd.py#L102-L123 | def _get_globals():
"""Return current Python interpreter globals namespace"""
if _get_globals_callback is not None:
return _get_globals_callback()
else:
try:
from __main__ import __dict__ as namespace
except ImportError:
try:
# The import fails on IronPython
import __main__
namespace = __main__.__dict__
except:
namespace
shell = namespace.get('__ipythonshell__')
if shell is not None and hasattr(shell, 'user_ns'):
# IPython 0.12+ kernel
return shell.user_ns
else:
# Python interpreter
return namespace
return namespace | [
"def",
"_get_globals",
"(",
")",
":",
"if",
"_get_globals_callback",
"is",
"not",
"None",
":",
"return",
"_get_globals_callback",
"(",
")",
"else",
":",
"try",
":",
"from",
"__main__",
"import",
"__dict__",
"as",
"namespace",
"except",
"ImportError",
":",
"try... | Return current Python interpreter globals namespace | [
"Return",
"current",
"Python",
"interpreter",
"globals",
"namespace"
] | python | train |
google/grr | grr/server/grr_response_server/databases/mysql_clients.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_clients.py#L542-L562 | def ListClientsForKeywords(self, keywords, start_time=None, cursor=None):
"""Lists the clients associated with keywords."""
keywords = set(keywords)
hash_to_kw = {mysql_utils.Hash(kw): kw for kw in keywords}
result = {kw: [] for kw in keywords}
query = """
SELECT keyword_hash, client_id
FROM client_keywords
FORCE INDEX (client_index_by_keyword_hash)
WHERE keyword_hash IN ({})
""".format(", ".join(["%s"] * len(result)))
args = list(iterkeys(hash_to_kw))
if start_time:
query += " AND timestamp >= FROM_UNIXTIME(%s)"
args.append(mysql_utils.RDFDatetimeToTimestamp(start_time))
cursor.execute(query, args)
for kw_hash, cid in cursor.fetchall():
result[hash_to_kw[kw_hash]].append(db_utils.IntToClientID(cid))
return result | [
"def",
"ListClientsForKeywords",
"(",
"self",
",",
"keywords",
",",
"start_time",
"=",
"None",
",",
"cursor",
"=",
"None",
")",
":",
"keywords",
"=",
"set",
"(",
"keywords",
")",
"hash_to_kw",
"=",
"{",
"mysql_utils",
".",
"Hash",
"(",
"kw",
")",
":",
... | Lists the clients associated with keywords. | [
"Lists",
"the",
"clients",
"associated",
"with",
"keywords",
"."
] | python | train |
stanfordnlp/stanza | stanza/nlp/corenlp.py | https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/nlp/corenlp.py#L51-L77 | def _request(self, text, properties, retries=0):
"""Send a request to the CoreNLP server.
:param (str | unicode) text: raw text for the CoreNLPServer to parse
:param (dict) properties: properties that the server expects
:return: request result
"""
text = to_unicode(text) # ensures unicode
try:
r = requests.post(self.server, params={'properties': str(properties)}, data=text.encode('utf-8'))
r.raise_for_status()
return r
except requests.ConnectionError as e:
if retries > 5:
logging.critical('Max retries exceeded!')
raise e
else:
logging.critical(repr(e))
logging.critical("It seems like we've temporarily ran out of ports. Taking a 30s break...")
time.sleep(30)
logging.critical("Retrying...")
return self._request(text, properties, retries=retries+1)
except requests.HTTPError:
if r.text == "CoreNLP request timed out. Your document may be too long.":
raise TimeoutException(r.text)
else:
raise AnnotationException(r.text) | [
"def",
"_request",
"(",
"self",
",",
"text",
",",
"properties",
",",
"retries",
"=",
"0",
")",
":",
"text",
"=",
"to_unicode",
"(",
"text",
")",
"# ensures unicode",
"try",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"self",
".",
"server",
",",
"par... | Send a request to the CoreNLP server.
:param (str | unicode) text: raw text for the CoreNLPServer to parse
:param (dict) properties: properties that the server expects
:return: request result | [
"Send",
"a",
"request",
"to",
"the",
"CoreNLP",
"server",
"."
] | python | train |
google/openhtf | examples/phase_groups.py | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/examples/phase_groups.py#L97-L117 | def run_nested_groups():
"""Run the nested groups example.
This example shows a PhaseGroup in a PhaseGroup. No phase is terminal, so all
are run in the order;
main_phase
inner_main_phase
inner_teardown_phase
teardown_phase
"""
test = htf.Test(
htf.PhaseGroup(
main=[
main_phase,
htf.PhaseGroup.with_teardown(inner_teardown_phase)(
inner_main_phase),
],
teardown=[teardown_phase]
)
)
test.execute() | [
"def",
"run_nested_groups",
"(",
")",
":",
"test",
"=",
"htf",
".",
"Test",
"(",
"htf",
".",
"PhaseGroup",
"(",
"main",
"=",
"[",
"main_phase",
",",
"htf",
".",
"PhaseGroup",
".",
"with_teardown",
"(",
"inner_teardown_phase",
")",
"(",
"inner_main_phase",
... | Run the nested groups example.
This example shows a PhaseGroup in a PhaseGroup. No phase is terminal, so all
are run in the order;
main_phase
inner_main_phase
inner_teardown_phase
teardown_phase | [
"Run",
"the",
"nested",
"groups",
"example",
"."
] | python | train |
callowayproject/django-staff | staff/views.py | https://github.com/callowayproject/django-staff/blob/248e2b69be36ee9f702ff5ba92cd1fb58597cd41/staff/views.py#L24-L46 | def userinfo_json(request, user_id):
"""
Return the user's information in a json object
"""
data = {'first_name': '',
'last_name': '',
'email': '',
'slug': '',
'bio': '',
'phone': '',
'is_active': False}
try:
member = StaffMember.objects.get(pk=user_id)
for key in data.keys():
if hasattr(member, key):
data[key] = getattr(member, key, '')
except StaffMember.DoesNotExist:
pass
return HttpResponse(json.dumps(data),
mimetype='application/json') | [
"def",
"userinfo_json",
"(",
"request",
",",
"user_id",
")",
":",
"data",
"=",
"{",
"'first_name'",
":",
"''",
",",
"'last_name'",
":",
"''",
",",
"'email'",
":",
"''",
",",
"'slug'",
":",
"''",
",",
"'bio'",
":",
"''",
",",
"'phone'",
":",
"''",
"... | Return the user's information in a json object | [
"Return",
"the",
"user",
"s",
"information",
"in",
"a",
"json",
"object"
] | python | train |
jjgomera/iapws | iapws/_iapws.py | https://github.com/jjgomera/iapws/blob/1e5812aab38212fb8a63736f61cdcfa427d223b1/iapws/_iapws.py#L1027-L1077 | def _Kw(rho, T):
"""Equation for the ionization constant of ordinary water
Parameters
----------
rho : float
Density, [kg/m³]
T : float
Temperature, [K]
Returns
-------
pKw : float
Ionization constant in -log10(kw), [-]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 0 ≤ ρ ≤ 1250
* 273.15 ≤ T ≤ 1073.15
Examples
--------
>>> _Kw(1000, 300)
13.906565
References
----------
IAPWS, Release on the Ionization Constant of H2O,
http://www.iapws.org/relguide/Ionization.pdf
"""
# Check input parameters
if rho < 0 or rho > 1250 or T < 273.15 or T > 1073.15:
raise NotImplementedError("Incoming out of bound")
# The internal method of calculation use rho in g/cm³
d = rho/1000.
# Water molecular weight different
Mw = 18.015268
gamma = [6.1415e-1, 4.825133e4, -6.770793e4, 1.01021e7]
pKg = 0
for i, g in enumerate(gamma):
pKg += g/T**i
Q = d*exp(-0.864671+8659.19/T-22786.2/T**2*d**(2./3))
pKw = -12*(log10(1+Q)-Q/(Q+1)*d*(0.642044-56.8534/T-0.375754*d)) + \
pKg+2*log10(Mw/1000)
return pKw | [
"def",
"_Kw",
"(",
"rho",
",",
"T",
")",
":",
"# Check input parameters",
"if",
"rho",
"<",
"0",
"or",
"rho",
">",
"1250",
"or",
"T",
"<",
"273.15",
"or",
"T",
">",
"1073.15",
":",
"raise",
"NotImplementedError",
"(",
"\"Incoming out of bound\"",
")",
"#... | Equation for the ionization constant of ordinary water
Parameters
----------
rho : float
Density, [kg/m³]
T : float
Temperature, [K]
Returns
-------
pKw : float
Ionization constant in -log10(kw), [-]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 0 ≤ ρ ≤ 1250
* 273.15 ≤ T ≤ 1073.15
Examples
--------
>>> _Kw(1000, 300)
13.906565
References
----------
IAPWS, Release on the Ionization Constant of H2O,
http://www.iapws.org/relguide/Ionization.pdf | [
"Equation",
"for",
"the",
"ionization",
"constant",
"of",
"ordinary",
"water"
] | python | train |
roclark/sportsreference | sportsreference/nba/boxscore.py | https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nba/boxscore.py#L1115-L1124 | def home_wins(self):
"""
Returns an ``int`` of the number of games the home team won after the
conclusion of the game.
"""
try:
wins, losses = re.findall(r'\d+', self._home_record)
return wins
except ValueError:
return 0 | [
"def",
"home_wins",
"(",
"self",
")",
":",
"try",
":",
"wins",
",",
"losses",
"=",
"re",
".",
"findall",
"(",
"r'\\d+'",
",",
"self",
".",
"_home_record",
")",
"return",
"wins",
"except",
"ValueError",
":",
"return",
"0"
] | Returns an ``int`` of the number of games the home team won after the
conclusion of the game. | [
"Returns",
"an",
"int",
"of",
"the",
"number",
"of",
"games",
"the",
"home",
"team",
"won",
"after",
"the",
"conclusion",
"of",
"the",
"game",
"."
] | python | train |
psd-tools/psd-tools | src/psd_tools/api/psd_image.py | https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/api/psd_image.py#L294-L300 | def viewbox(self):
"""
Return bounding box of the viewport.
:return: (left, top, right, bottom) `tuple`.
"""
return self.left, self.top, self.right, self.bottom | [
"def",
"viewbox",
"(",
"self",
")",
":",
"return",
"self",
".",
"left",
",",
"self",
".",
"top",
",",
"self",
".",
"right",
",",
"self",
".",
"bottom"
] | Return bounding box of the viewport.
:return: (left, top, right, bottom) `tuple`. | [
"Return",
"bounding",
"box",
"of",
"the",
"viewport",
"."
] | python | train |
singularityhub/sregistry-cli | sregistry/client/build.py | https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/client/build.py#L173-L186 | def list_logs(args, container_name=None):
'''list a specific log for a builder, or the latest log if none provided
Parameters
==========
args: the argparse object to look for a container name
container_name: a default container name set to be None (show latest log)
'''
from sregistry.main import Client as cli
if len(args.commands) > 0:
container_name = args.commands.pop(0)
cli.logs(container_name)
sys.exit(0) | [
"def",
"list_logs",
"(",
"args",
",",
"container_name",
"=",
"None",
")",
":",
"from",
"sregistry",
".",
"main",
"import",
"Client",
"as",
"cli",
"if",
"len",
"(",
"args",
".",
"commands",
")",
">",
"0",
":",
"container_name",
"=",
"args",
".",
"comman... | list a specific log for a builder, or the latest log if none provided
Parameters
==========
args: the argparse object to look for a container name
container_name: a default container name set to be None (show latest log) | [
"list",
"a",
"specific",
"log",
"for",
"a",
"builder",
"or",
"the",
"latest",
"log",
"if",
"none",
"provided"
] | python | test |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py#L886-L897 | def nas_auto_qos_set_dscp(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nas = ET.SubElement(config, "nas", xmlns="urn:brocade.com:mgmt:brocade-qos")
auto_qos = ET.SubElement(nas, "auto-qos")
set = ET.SubElement(auto_qos, "set")
dscp = ET.SubElement(set, "dscp")
dscp.text = kwargs.pop('dscp')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"nas_auto_qos_set_dscp",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"nas",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"nas\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:broc... | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
tensorflow/tensor2tensor | tensor2tensor/models/research/autoencoders.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1157-L1164 | def autoencoder_residual_discrete_big():
"""Residual discrete autoencoder model, big version."""
hparams = autoencoder_residual_discrete()
hparams.hidden_size = 128
hparams.max_hidden_size = 4096
hparams.bottleneck_noise = 0.1
hparams.residual_dropout = 0.4
return hparams | [
"def",
"autoencoder_residual_discrete_big",
"(",
")",
":",
"hparams",
"=",
"autoencoder_residual_discrete",
"(",
")",
"hparams",
".",
"hidden_size",
"=",
"128",
"hparams",
".",
"max_hidden_size",
"=",
"4096",
"hparams",
".",
"bottleneck_noise",
"=",
"0.1",
"hparams"... | Residual discrete autoencoder model, big version. | [
"Residual",
"discrete",
"autoencoder",
"model",
"big",
"version",
"."
] | python | train |
CartoDB/cartoframes | cartoframes/datasets.py | https://github.com/CartoDB/cartoframes/blob/c94238a545f3dec45963dac3892540942b6f0df8/cartoframes/datasets.py#L213-L219 | def get_columns(context, query):
"""Get list of cartoframes.columns.Column"""
table_info = context.sql_client.send(query)
if 'fields' in table_info:
return Column.from_sql_api_fields(table_info['fields'])
return None | [
"def",
"get_columns",
"(",
"context",
",",
"query",
")",
":",
"table_info",
"=",
"context",
".",
"sql_client",
".",
"send",
"(",
"query",
")",
"if",
"'fields'",
"in",
"table_info",
":",
"return",
"Column",
".",
"from_sql_api_fields",
"(",
"table_info",
"[",
... | Get list of cartoframes.columns.Column | [
"Get",
"list",
"of",
"cartoframes",
".",
"columns",
".",
"Column"
] | python | train |
Azure/azure-cosmos-python | azure/cosmos/cosmos_client.py | https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/cosmos_client.py#L639-L662 | def ReadPermission(self, permission_link, options=None):
"""Reads a permission.
:param str permission_link:
The link to the permission.
:param dict options:
The request options for the request.
:return:
The read permission.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(permission_link)
permission_id = base.GetResourceIdOrFullNameFromLink(permission_link)
return self.Read(path,
'permissions',
permission_id,
None,
options) | [
"def",
"ReadPermission",
"(",
"self",
",",
"permission_link",
",",
"options",
"=",
"None",
")",
":",
"if",
"options",
"is",
"None",
":",
"options",
"=",
"{",
"}",
"path",
"=",
"base",
".",
"GetPathFromLink",
"(",
"permission_link",
")",
"permission_id",
"=... | Reads a permission.
:param str permission_link:
The link to the permission.
:param dict options:
The request options for the request.
:return:
The read permission.
:rtype:
dict | [
"Reads",
"a",
"permission",
"."
] | python | train |
zimeon/iiif | iiif/manipulator_gen.py | https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_gen.py#L62-L73 | def do_region(self, x, y, w, h):
"""Record region."""
if (x is None):
self.rx = 0
self.ry = 0
self.rw = self.width
self.rh = self.height
else:
self.rx = x
self.ry = y
self.rw = w
self.rh = h | [
"def",
"do_region",
"(",
"self",
",",
"x",
",",
"y",
",",
"w",
",",
"h",
")",
":",
"if",
"(",
"x",
"is",
"None",
")",
":",
"self",
".",
"rx",
"=",
"0",
"self",
".",
"ry",
"=",
"0",
"self",
".",
"rw",
"=",
"self",
".",
"width",
"self",
"."... | Record region. | [
"Record",
"region",
"."
] | python | train |
Qiskit/qiskit-terra | qiskit/pulse/ops.py | https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/pulse/ops.py#L22-L36 | def union(*schedules: List[Union[ScheduleComponent, Tuple[int, ScheduleComponent]]],
name: str = None) -> Schedule:
"""Create a union (and also shift if desired) of all input `Schedule`s.
Args:
*schedules: Schedules to take the union of
name: Name of the new schedule. Defaults to first element of `schedules`
"""
if name is None and schedules:
sched = schedules[0]
if isinstance(sched, (list, tuple)):
name = sched[1].name
else:
name = sched.name
return Schedule(*schedules, name=name) | [
"def",
"union",
"(",
"*",
"schedules",
":",
"List",
"[",
"Union",
"[",
"ScheduleComponent",
",",
"Tuple",
"[",
"int",
",",
"ScheduleComponent",
"]",
"]",
"]",
",",
"name",
":",
"str",
"=",
"None",
")",
"->",
"Schedule",
":",
"if",
"name",
"is",
"None... | Create a union (and also shift if desired) of all input `Schedule`s.
Args:
*schedules: Schedules to take the union of
name: Name of the new schedule. Defaults to first element of `schedules` | [
"Create",
"a",
"union",
"(",
"and",
"also",
"shift",
"if",
"desired",
")",
"of",
"all",
"input",
"Schedule",
"s",
"."
] | python | test |
Crunch-io/crunch-cube | src/cr/cube/crunch_cube.py | https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/crunch_cube.py#L1361-L1378 | def is_weighted(self):
"""True if weights have been applied to the measure(s) for this cube.
Unweighted counts are available for all cubes. Weighting applies to
any other measures provided by the cube.
"""
cube_dict = self._cube_dict
if cube_dict.get("query", {}).get("weight") is not None:
return True
if cube_dict.get("weight_var") is not None:
return True
if cube_dict.get("weight_url") is not None:
return True
unweighted_counts = cube_dict["result"]["counts"]
count_data = cube_dict["result"]["measures"].get("count", {}).get("data")
if unweighted_counts != count_data:
return True
return False | [
"def",
"is_weighted",
"(",
"self",
")",
":",
"cube_dict",
"=",
"self",
".",
"_cube_dict",
"if",
"cube_dict",
".",
"get",
"(",
"\"query\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"weight\"",
")",
"is",
"not",
"None",
":",
"return",
"True",
"if",
"cube... | True if weights have been applied to the measure(s) for this cube.
Unweighted counts are available for all cubes. Weighting applies to
any other measures provided by the cube. | [
"True",
"if",
"weights",
"have",
"been",
"applied",
"to",
"the",
"measure",
"(",
"s",
")",
"for",
"this",
"cube",
"."
] | python | train |
nathforge/validatesns | src/validatesns/__init__.py | https://github.com/nathforge/validatesns/blob/39f7f7d1fae215746bb9763856045b501fae05f4/src/validatesns/__init__.py#L28-L69 | def validate(
message,
get_certificate=lambda url: urlopen(url).read(),
certificate_url_regex=DEFAULT_CERTIFICATE_URL_REGEX,
max_age=DEFAULT_MAX_AGE
):
"""
Validate a decoded SNS message.
Parameters:
message:
Decoded SNS message.
get_certificate:
Function that receives a URL, and returns the certificate from that
URL as a string. The default doesn't implement caching.
certificate_url_regex:
Regex that validates the signing certificate URL. Default value
checks it's hosted on an AWS-controlled domain, in the format
"https://sns.<data-center>.amazonaws.com/"
max_age:
Maximum age of an SNS message before it fails validation, expressed
as a `datetime.timedelta`. Defaults to one hour, the max. lifetime
of an SNS message.
"""
# Check the signing certicate URL.
SigningCertURLValidator(certificate_url_regex).validate(message)
# Check the message age.
if not isinstance(max_age, datetime.timedelta):
raise ValueError("max_age must be None or a timedelta object")
MessageAgeValidator(max_age).validate(message)
# Passed the basic checks, let's download the cert.
# We've validated the URL, so aren't worried about a malicious server.
certificate = get_certificate(message["SigningCertURL"])
# Check the cryptographic signature.
SignatureValidator(certificate).validate(message) | [
"def",
"validate",
"(",
"message",
",",
"get_certificate",
"=",
"lambda",
"url",
":",
"urlopen",
"(",
"url",
")",
".",
"read",
"(",
")",
",",
"certificate_url_regex",
"=",
"DEFAULT_CERTIFICATE_URL_REGEX",
",",
"max_age",
"=",
"DEFAULT_MAX_AGE",
")",
":",
"# Ch... | Validate a decoded SNS message.
Parameters:
message:
Decoded SNS message.
get_certificate:
Function that receives a URL, and returns the certificate from that
URL as a string. The default doesn't implement caching.
certificate_url_regex:
Regex that validates the signing certificate URL. Default value
checks it's hosted on an AWS-controlled domain, in the format
"https://sns.<data-center>.amazonaws.com/"
max_age:
Maximum age of an SNS message before it fails validation, expressed
as a `datetime.timedelta`. Defaults to one hour, the max. lifetime
of an SNS message. | [
"Validate",
"a",
"decoded",
"SNS",
"message",
"."
] | python | test |
LogicalDash/LiSE | ELiDE/ELiDE/board/board.py | https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/board/board.py#L147-L201 | def on_touch_down(self, touch):
"""Check for collisions and select an appropriate entity."""
if hasattr(self, '_lasttouch') and self._lasttouch == touch:
return
if not self.collide_point(*touch.pos):
return
touch.push()
touch.apply_transform_2d(self.to_local)
if self.app.selection:
if self.app.selection.collide_point(*touch.pos):
Logger.debug("Board: hit selection")
touch.grab(self.app.selection)
pawns = list(self.pawns_at(*touch.pos))
if pawns:
Logger.debug("Board: hit {} pawns".format(len(pawns)))
self.selection_candidates = pawns
if self.app.selection in self.selection_candidates:
self.selection_candidates.remove(self.app.selection)
touch.pop()
return True
spots = list(self.spots_at(*touch.pos))
if spots:
Logger.debug("Board: hit {} spots".format(len(spots)))
self.selection_candidates = spots
if self.adding_portal:
self.origspot = self.selection_candidates.pop(0)
self.protodest = Dummy(
name='protodest',
pos=touch.pos,
size=(0, 0)
)
self.add_widget(self.protodest)
self.protodest.on_touch_down(touch)
self.protoportal = self.proto_arrow_cls(
origin=self.origspot,
destination=self.protodest
)
self.add_widget(self.protoportal)
if self.reciprocal_portal:
self.protoportal2 = self.proto_arrow_cls(
destination=self.origspot,
origin=self.protodest
)
self.add_widget(self.protoportal2)
touch.pop()
return True
arrows = list(self.arrows_at(*touch.pos))
if arrows:
Logger.debug("Board: hit {} arrows".format(len(arrows)))
self.selection_candidates = arrows
if self.app.selection in self.selection_candidates:
self.selection_candidates.remove(self.app.selection)
touch.pop()
return True
touch.pop() | [
"def",
"on_touch_down",
"(",
"self",
",",
"touch",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'_lasttouch'",
")",
"and",
"self",
".",
"_lasttouch",
"==",
"touch",
":",
"return",
"if",
"not",
"self",
".",
"collide_point",
"(",
"*",
"touch",
".",
"pos... | Check for collisions and select an appropriate entity. | [
"Check",
"for",
"collisions",
"and",
"select",
"an",
"appropriate",
"entity",
"."
] | python | train |
sirfoga/pyhal | hal/charts/plotter.py | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/charts/plotter.py#L159-L301 | def plot_type(self, func, mins, maxs, precision, kind):
"""Plots function
:param func: function to plot
:param mins: minimum of values (x, y ...)
:param maxs: maximum of values (x, y ...)
:param precision: precision to plot
:param kind: kind of plot, "slice", "countour"
"""
min_x, min_y, min_z = mins[0], mins[1], mins[2]
max_x, max_y, max_z = maxs[0], maxs[1], maxs[2]
def set_labels(graph, label_x, label_y, label_z):
"""Sets given labels to axes of graph
:param graph: plot
:param label_x: new label on x axis
:param label_y: new label on y axis
:param label_z: new label on z axis
"""
graph.set_xlabel(label_x)
graph.set_ylabel(label_y)
graph.set_zlabel(label_z)
def set_limits(graph):
"""Set chart limits to axes of graph
:param graph: plot
"""
graph.set_xlim(min_x, max_x)
graph.set_ylim(min_y, max_y)
graph.set_zlim(min_z, max_z)
def get_precision(min_val, max_val):
"""Calculates precision
:param min_val: minimum
:param max_val: maximum
:return: precision: prevision of values
"""
return int((max_val - min_val) * (1 + precision))
def get_precision_delta(min_val, max_val):
"""Calculates precision delta
:param min_val: minimum
:param max_val: maximum
:return: delta: Precision delta
"""
return float(max_val - min_val) / float(10 * precision)
def plot_slice():
""" Plots slice
:return: shows plot
"""
chart = plt.axes(projection="3d") # general settings
points_x = get_precision(min_x, max_x)
points_y = get_precision(min_y, max_z)
x_axis = numpy.outer(linspace(min_x, max_x, points_x), points_x)
y_axis = numpy.outer(
linspace(min_y, max_y, points_y).flatten(), points_y
).T
def update(val):
"""Updates chart with value
:param val: value
"""
chart.clear()
x_const = slider.val
z_axis = func(x_const, x_axis, y_axis)
chart.plot_surface(
x_axis, y_axis, z_axis, alpha=0.3, linewidth=2.0
)
set_labels(chart, "y", "z", "w")
# slider
axis_slider = plt.axes([0.12, 0.03, 0.78, 0.03], axisbg="white")
slider = Slider(axis_slider, "x", min_x, max_x, valinit=min_x)
slider.on_changed(update)
set_limits(chart)
self.show_plot()
slider.on_changed(update)
set_labels(chart, "y", "z", "w")
def plot_countour():
"""Plots countour
"""
# general settings
fig = plt.figure()
chart = fig.gca(projection="3d")
# create axes
x_axis = numpy.arange(min_x, max_x, get_precision_delta(
min_x, max_x)).tolist()
y_axis = numpy.arange(min_y, max_y, get_precision_delta(
min_y, max_y)).tolist()
x_axis, y_axis = numpy.meshgrid(x_axis, y_axis)
def update(val):
"""Updates chart with value
:param val: value
"""
chart.clear() # re-plot
x_const = slider.val
z_axis = []
# add new points
for i, _ in enumerate(x_axis):
z_axis.append(func(x_const, x_axis[i], y_axis[i]))
# show
chart.contour(
x_axis, y_axis, z_axis, zdir="x", offset=min_x
)
chart.contour(
x_axis, y_axis, z_axis, zdir="y", offset=min_y
)
chart.contour(
x_axis, y_axis, z_axis, zdir="z", offset=min_z
)
chart.contour(x_axis, y_axis, z_axis, extend3d=True)
set_labels(chart, "y", "z", "w")
# slider
axis_slider = plt.axes([0.12, 0.03, 0.78, 0.03], axisbg="white")
slider = Slider(axis_slider, "x", min_x, max_x, valinit=min_x)
slider.on_changed(update)
set_limits(chart)
if kind == "slice":
plot_slice()
elif kind == "countour":
plot_countour()
self.show_plot() | [
"def",
"plot_type",
"(",
"self",
",",
"func",
",",
"mins",
",",
"maxs",
",",
"precision",
",",
"kind",
")",
":",
"min_x",
",",
"min_y",
",",
"min_z",
"=",
"mins",
"[",
"0",
"]",
",",
"mins",
"[",
"1",
"]",
",",
"mins",
"[",
"2",
"]",
"max_x",
... | Plots function
:param func: function to plot
:param mins: minimum of values (x, y ...)
:param maxs: maximum of values (x, y ...)
:param precision: precision to plot
:param kind: kind of plot, "slice", "countour" | [
"Plots",
"function"
] | python | train |
CGATOxford/UMI-tools | umi_tools/Utilities.py | https://github.com/CGATOxford/UMI-tools/blob/c4b5d84aac391d59916d294f8f4f8f5378abcfbe/umi_tools/Utilities.py#L530-L543 | def getHeader():
"""return a header string with command line options and timestamp
"""
system, host, release, version, machine = os.uname()
return "# UMI-tools version: %s\n# output generated by %s\n# job started at %s on %s -- %s\n# pid: %i, system: %s %s %s %s" %\
(__version__,
" ".join(sys.argv),
time.asctime(time.localtime(time.time())),
host,
global_id,
os.getpid(),
system, release, version, machine) | [
"def",
"getHeader",
"(",
")",
":",
"system",
",",
"host",
",",
"release",
",",
"version",
",",
"machine",
"=",
"os",
".",
"uname",
"(",
")",
"return",
"\"# UMI-tools version: %s\\n# output generated by %s\\n# job started at %s on %s -- %s\\n# pid: %i, system: %s %s %s %s\""... | return a header string with command line options and timestamp | [
"return",
"a",
"header",
"string",
"with",
"command",
"line",
"options",
"and",
"timestamp"
] | python | train |
deepmind/pysc2 | pysc2/lib/remote_controller.py | https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/remote_controller.py#L148-L174 | def _connect(self, host, port, proc, timeout_seconds):
"""Connect to the websocket, retrying as needed. Returns the socket."""
if ":" in host and not host.startswith("["): # Support ipv6 addresses.
host = "[%s]" % host
url = "ws://%s:%s/sc2api" % (host, port)
was_running = False
for i in range(timeout_seconds):
is_running = proc and proc.running
was_running = was_running or is_running
if (i >= timeout_seconds // 4 or was_running) and not is_running:
logging.warning(
"SC2 isn't running, so bailing early on the websocket connection.")
break
logging.info("Connecting to: %s, attempt: %s, running: %s", url, i,
is_running)
try:
return websocket.create_connection(url, timeout=timeout_seconds)
except socket.error:
pass # SC2 hasn't started listening yet.
except websocket.WebSocketBadStatusException as err:
if err.status_code == 404:
pass # SC2 is listening, but hasn't set up the /sc2api endpoint yet.
else:
raise
time.sleep(1)
raise ConnectError("Failed to connect to the SC2 websocket. Is it up?") | [
"def",
"_connect",
"(",
"self",
",",
"host",
",",
"port",
",",
"proc",
",",
"timeout_seconds",
")",
":",
"if",
"\":\"",
"in",
"host",
"and",
"not",
"host",
".",
"startswith",
"(",
"\"[\"",
")",
":",
"# Support ipv6 addresses.",
"host",
"=",
"\"[%s]\"",
"... | Connect to the websocket, retrying as needed. Returns the socket. | [
"Connect",
"to",
"the",
"websocket",
"retrying",
"as",
"needed",
".",
"Returns",
"the",
"socket",
"."
] | python | train |
saltstack/salt | salt/log/setup.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/log/setup.py#L742-L824 | def setup_extended_logging(opts):
'''
Setup any additional logging handlers, internal or external
'''
if is_extended_logging_configured() is True:
# Don't re-configure external loggers
return
# Explicit late import of salt's loader
import salt.loader
# Let's keep a reference to the current logging handlers
initial_handlers = logging.root.handlers[:]
# Load any additional logging handlers
providers = salt.loader.log_handlers(opts)
# Let's keep track of the new logging handlers so we can sync the stored
# log records with them
additional_handlers = []
for name, get_handlers_func in six.iteritems(providers):
logging.getLogger(__name__).info('Processing `log_handlers.%s`', name)
# Keep a reference to the logging handlers count before getting the
# possible additional ones.
initial_handlers_count = len(logging.root.handlers)
handlers = get_handlers_func()
if isinstance(handlers, types.GeneratorType):
handlers = list(handlers)
elif handlers is False or handlers == [False]:
# A false return value means not configuring any logging handler on
# purpose
logging.getLogger(__name__).info(
'The `log_handlers.%s.setup_handlers()` function returned '
'`False` which means no logging handler was configured on '
'purpose. Continuing...', name
)
continue
else:
# Make sure we have an iterable
handlers = [handlers]
for handler in handlers:
if not handler and \
len(logging.root.handlers) == initial_handlers_count:
logging.getLogger(__name__).info(
'The `log_handlers.%s`, did not return any handlers '
'and the global handlers count did not increase. This '
'could be a sign of `log_handlers.%s` not working as '
'supposed', name, name
)
continue
logging.getLogger(__name__).debug(
'Adding the \'%s\' provided logging handler: \'%s\'',
name, handler
)
additional_handlers.append(handler)
logging.root.addHandler(handler)
for handler in logging.root.handlers:
if handler in initial_handlers:
continue
additional_handlers.append(handler)
# Sync the null logging handler messages with the temporary handler
if LOGGING_STORE_HANDLER is not None:
LOGGING_STORE_HANDLER.sync_with_handlers(additional_handlers)
else:
logging.getLogger(__name__).debug(
'LOGGING_STORE_HANDLER is already None, can\'t sync messages '
'with it'
)
# Remove the temporary queue logging handler
__remove_queue_logging_handler()
# Remove the temporary null logging handler (if it exists)
__remove_null_logging_handler()
global __EXTERNAL_LOGGERS_CONFIGURED
__EXTERNAL_LOGGERS_CONFIGURED = True | [
"def",
"setup_extended_logging",
"(",
"opts",
")",
":",
"if",
"is_extended_logging_configured",
"(",
")",
"is",
"True",
":",
"# Don't re-configure external loggers",
"return",
"# Explicit late import of salt's loader",
"import",
"salt",
".",
"loader",
"# Let's keep a referenc... | Setup any additional logging handlers, internal or external | [
"Setup",
"any",
"additional",
"logging",
"handlers",
"internal",
"or",
"external"
] | python | train |
MostAwesomeDude/gentleman | gentleman/base.py | https://github.com/MostAwesomeDude/gentleman/blob/17fb8ffb922aa4af9d8bcab85e452c9311d41805/gentleman/base.py#L1256-L1273 | def RenameGroup(r, group, new_name):
"""
Changes the name of a node group.
@type group: string
@param group: Node group name
@type new_name: string
@param new_name: New node group name
@rtype: int
@return: job id
"""
body = {
"new_name": new_name,
}
return r.request("put", "/2/groups/%s/rename" % group, content=body) | [
"def",
"RenameGroup",
"(",
"r",
",",
"group",
",",
"new_name",
")",
":",
"body",
"=",
"{",
"\"new_name\"",
":",
"new_name",
",",
"}",
"return",
"r",
".",
"request",
"(",
"\"put\"",
",",
"\"/2/groups/%s/rename\"",
"%",
"group",
",",
"content",
"=",
"body"... | Changes the name of a node group.
@type group: string
@param group: Node group name
@type new_name: string
@param new_name: New node group name
@rtype: int
@return: job id | [
"Changes",
"the",
"name",
"of",
"a",
"node",
"group",
"."
] | python | train |
PyCQA/pylint | pylint/message/message_handler_mix_in.py | https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/message/message_handler_mix_in.py#L187-L196 | def get_message_state_scope(self, msgid, line=None, confidence=UNDEFINED):
"""Returns the scope at which a message was enabled/disabled."""
if self.config.confidence and confidence.name not in self.config.confidence:
return MSG_STATE_CONFIDENCE
try:
if line in self.file_state._module_msgs_state[msgid]:
return MSG_STATE_SCOPE_MODULE
except (KeyError, TypeError):
return MSG_STATE_SCOPE_CONFIG
return None | [
"def",
"get_message_state_scope",
"(",
"self",
",",
"msgid",
",",
"line",
"=",
"None",
",",
"confidence",
"=",
"UNDEFINED",
")",
":",
"if",
"self",
".",
"config",
".",
"confidence",
"and",
"confidence",
".",
"name",
"not",
"in",
"self",
".",
"config",
".... | Returns the scope at which a message was enabled/disabled. | [
"Returns",
"the",
"scope",
"at",
"which",
"a",
"message",
"was",
"enabled",
"/",
"disabled",
"."
] | python | test |
intake/intake | intake/gui/base.py | https://github.com/intake/intake/blob/277b96bfdee39d8a3048ea5408c6d6716d568336/intake/gui/base.py#L268-L273 | def source(self, source):
"""When the source gets updated, update the select widget"""
if isinstance(source, list):
# if source is a list, get first item or None
source = source[0] if len(source) > 0 else None
self._source = source | [
"def",
"source",
"(",
"self",
",",
"source",
")",
":",
"if",
"isinstance",
"(",
"source",
",",
"list",
")",
":",
"# if source is a list, get first item or None",
"source",
"=",
"source",
"[",
"0",
"]",
"if",
"len",
"(",
"source",
")",
">",
"0",
"else",
"... | When the source gets updated, update the select widget | [
"When",
"the",
"source",
"gets",
"updated",
"update",
"the",
"select",
"widget"
] | python | train |
aiidateam/aiida-ase | aiida_ase/calculations/ase.py | https://github.com/aiidateam/aiida-ase/blob/688a01fa872717ee3babdb1f10405b306371cf44/aiida_ase/calculations/ase.py#L64-L365 | def _prepare_for_submission(self,tempfolder, inputdict):
"""
This is the routine to be called when you want to create
the input files and related stuff with a plugin.
:param tempfolder: a aiida.common.folders.Folder subclass where
the plugin should put all its files.
:param inputdict: a dictionary with the input nodes, as they would
be returned by get_inputdata_dict (without the Code!)
"""
try:
code = inputdict.pop(self.get_linkname('code'))
except KeyError:
raise InputValidationError("No code specified for this "
"calculation")
try:
parameters = inputdict.pop(self.get_linkname('parameters'))
except KeyError:
raise InputValidationError("No parameters specified for this "
"calculation")
if not isinstance(parameters, ParameterData):
raise InputValidationError("parameters is not of type "
"ParameterData")
try:
structure = inputdict.pop(self.get_linkname('structure'))
except KeyError:
raise InputValidationError("No structure specified for this "
"calculation")
if not isinstance(structure,StructureData):
raise InputValidationError("structure node is not of type"
"StructureData")
try:
settings = inputdict.pop(self.get_linkname('settings'),None)
except KeyError:
pass
if settings is not None:
if not isinstance(parameters, ParameterData):
raise InputValidationError("parameters is not of type "
"ParameterData")
try:
kpoints = inputdict.pop(self.get_linkname('kpoints'),None)
except KeyError:
pass
if kpoints is not None:
if not isinstance(kpoints, KpointsData):
raise InputValidationError("kpoints is not of type KpointsData")
##############################
# END OF INITIAL INPUT CHECK #
##############################
# default atom getter: I will always retrieve the total energy at least
default_atoms_getters = [ ["total_energy",""] ]
# ================================
# save the structure in ase format
atoms = structure.get_ase()
atoms.write(tempfolder.get_abs_path(self._input_aseatoms))
# ================== prepare the arguments of functions ================
parameters_dict = parameters.get_dict()
settings_dict = settings.get_dict() if settings is not None else {}
# ==================== fix the args of the optimizer
optimizer = parameters_dict.pop("optimizer",None)
if optimizer is not None:
# Validation
if not isinstance(optimizer,dict):
raise InputValidationError("optimizer key must contain a dictionary")
# get the name of the optimizer
optimizer_name = optimizer.pop("name",None)
if optimizer_name is None:
raise InputValidationError("Don't have access to the optimizer name")
# prepare the arguments to be passed to the optimizer class
optimizer_argsstr = "atoms, " + convert_the_args(optimizer.pop("args",[]))
# prepare the arguments to be passed to optimizer.run()
optimizer_runargsstr = convert_the_args(optimizer.pop("run_args",[]))
# prepare the import string
optimizer_import_string = get_optimizer_impstr(optimizer_name)
# ================= determine the calculator name and its import ====
calculator = parameters_dict.pop("calculator",{})
calculator_import_string = get_calculator_impstr(calculator.pop("name",None))
# =================== prepare the arguments for the calculator call
read_calc_args = calculator.pop("args",[])
#calc_args = calculator.pop("args",None)
if read_calc_args is None:
calc_argsstr = ""
else:
# transform a in "a" if a is a string (needed for formatting)
calc_args = {}
for k,v in read_calc_args.iteritems():
if isinstance(v, basestring):
the_v = '"{}"'.format(v)
else:
the_v = v
calc_args[k] = the_v
def return_a_function(v):
try:
has_magic = "@function" in v.keys()
except AttributeError:
has_magic = False
if has_magic:
args_dict = {}
for k2,v2 in v['args'].iteritems():
if isinstance(v2,basestring):
the_v = '"{}"'.format(v2)
else:
the_v = v2
args_dict[k2] = the_v
v2 = "{}({})".format(v['@function'],
", ".join(["{}={}".format(k_,v_)
for k_,v_ in args_dict.iteritems()]))
return v2
else:
return v
tmp_list = [ "{}={}".format(k,return_a_function(v))
for k,v in calc_args.iteritems() ]
calc_argsstr = ", ".join( tmp_list )
# add kpoints if present
if kpoints:
#TODO: here only the mesh is supported
# maybe kpoint lists are supported as well in ASE calculators
try:
mesh = kpoints.get_kpoints_mesh()[0]
except AttributeError:
raise InputValidationError("Coudn't find a mesh of kpoints"
" in the KpointsData")
calc_argsstr = ", ".join( [calc_argsstr] + ["kpts=({},{},{})".format( *mesh )] )
# =============== prepare the methods of atoms.get(), to save results
atoms_getters = default_atoms_getters + convert_the_getters( parameters_dict.pop("atoms_getters",[]) )
# =============== prepare the methods of calculator.get(), to save results
calculator_getters = convert_the_getters( parameters_dict.pop("calculator_getters",[]) )
# ===================== build the strings with the module imports
all_imports = ["import ase", 'import ase.io', "import json",
"import numpy", calculator_import_string]
if optimizer is not None:
all_imports.append(optimizer_import_string)
try:
if "PW" in calc_args['mode'].values():
all_imports.append("from gpaw import PW")
except KeyError:
pass
extra_imports = parameters_dict.pop("extra_imports",[])
for i in extra_imports:
if isinstance(i,basestring):
all_imports.append("import {}".format(i))
elif isinstance(i,(list,tuple)):
if not all( [isinstance(j,basestring) for j in i] ):
raise ValueError("extra import must contain strings")
if len(i)==2:
all_imports.append("from {} import {}".format(*i))
elif len(i)==3:
all_imports.append("from {} import {} as {}".format(*i))
else:
raise ValueError("format for extra imports not recognized")
else:
raise ValueError("format for extra imports not recognized")
if self.get_withmpi():
all_imports.append( "from ase.parallel import paropen" )
all_imports_string = "\n".join(all_imports) + "\n"
# =================== prepare the python script ========================
input_txt = ""
input_txt += get_file_header()
input_txt += "# calculation pk: {}\n".format(self.pk)
input_txt += "\n"
input_txt += all_imports_string
input_txt += "\n"
pre_lines = parameters_dict.pop("pre_lines",None)
if pre_lines is not None:
if not isinstance(pre_lines,(list,tuple)):
raise ValueError("Prelines must be a list of strings")
if not all( [isinstance(_,basestring) for _ in pre_lines] ):
raise ValueError("Prelines must be a list of strings")
input_txt += "\n".join(pre_lines) + "\n\n"
input_txt += "atoms = ase.io.read('{}')\n".format(self._input_aseatoms)
input_txt += "\n"
input_txt += "calculator = custom_calculator({})\n".format(calc_argsstr)
input_txt += "atoms.set_calculator(calculator)\n"
input_txt += "\n"
if optimizer is not None:
# here block the trajectory file name: trajectory = 'aiida.traj'
input_txt += "optimizer = custom_optimizer({})\n".format(optimizer_argsstr)
input_txt += "optimizer.run({})\n".format(optimizer_runargsstr)
input_txt += "\n"
# now dump / calculate the results
input_txt += "results = {}\n"
for getter,getter_args in atoms_getters:
input_txt += "results['{}'] = atoms.get_{}({})\n".format(getter,
getter,
getter_args)
input_txt += "\n"
for getter,getter_args in calculator_getters:
input_txt += "results['{}'] = calculator.get_{}({})\n".format(getter,
getter,
getter_args)
input_txt += "\n"
# Convert to lists
input_txt += "for k,v in results.iteritems():\n"
input_txt += " if isinstance(results[k],(numpy.matrix,numpy.ndarray)):\n"
input_txt += " results[k] = results[k].tolist()\n"
input_txt += "\n"
post_lines = parameters_dict.pop("post_lines",None)
if post_lines is not None:
if not isinstance(post_lines,(list,tuple)):
raise ValueError("Postlines must be a list of strings")
if not all( [isinstance(_,basestring) for _ in post_lines] ):
raise ValueError("Postlines must be a list of strings")
input_txt += "\n".join(post_lines) + "\n\n"
# Dump results to file
right_open = "paropen" if self.get_withmpi() else "open"
input_txt += "with {}('{}', 'w') as f:\n".format(right_open, self._OUTPUT_FILE_NAME)
input_txt += " json.dump(results,f)"
input_txt += "\n"
# Dump trajectory if present
if optimizer is not None:
input_txt += "atoms.write('{}')\n".format(self._output_aseatoms)
input_txt += "\n"
# write all the input script to a file
input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME)
with open(input_filename,'w') as infile:
infile.write(input_txt)
# ============================ calcinfo ================================
# TODO: look at the qmmm infoL: it might be necessary to put
# some singlefiles in the directory.
# right now it has to be taken care in the pre_lines
local_copy_list = []
remote_copy_list = []
additional_retrieve_list = settings_dict.pop("ADDITIONAL_RETRIEVE_LIST",[])
calcinfo = CalcInfo()
calcinfo.uuid = self.uuid
# Empty command line by default
# calcinfo.cmdline_params = settings_dict.pop('CMDLINE', [])
calcinfo.local_copy_list = local_copy_list
calcinfo.remote_copy_list = remote_copy_list
codeinfo = CodeInfo()
codeinfo.cmdline_params = [self._INPUT_FILE_NAME]
#calcinfo.stdin_name = self._INPUT_FILE_NAME
codeinfo.stdout_name = self._TXT_OUTPUT_FILE_NAME
codeinfo.code_uuid = code.uuid
calcinfo.codes_info = [codeinfo]
# Retrieve files
calcinfo.retrieve_list = []
calcinfo.retrieve_list.append(self._OUTPUT_FILE_NAME)
calcinfo.retrieve_list.append(self._output_aseatoms)
calcinfo.retrieve_list += additional_retrieve_list
# TODO: I should have two ways of running it: with gpaw-python in parallel
# and executing python if in serial
return calcinfo | [
"def",
"_prepare_for_submission",
"(",
"self",
",",
"tempfolder",
",",
"inputdict",
")",
":",
"try",
":",
"code",
"=",
"inputdict",
".",
"pop",
"(",
"self",
".",
"get_linkname",
"(",
"'code'",
")",
")",
"except",
"KeyError",
":",
"raise",
"InputValidationErr... | This is the routine to be called when you want to create
the input files and related stuff with a plugin.
:param tempfolder: a aiida.common.folders.Folder subclass where
the plugin should put all its files.
:param inputdict: a dictionary with the input nodes, as they would
be returned by get_inputdata_dict (without the Code!) | [
"This",
"is",
"the",
"routine",
"to",
"be",
"called",
"when",
"you",
"want",
"to",
"create",
"the",
"input",
"files",
"and",
"related",
"stuff",
"with",
"a",
"plugin",
".",
":",
"param",
"tempfolder",
":",
"a",
"aiida",
".",
"common",
".",
"folders",
"... | python | train |
yjzhang/uncurl_python | uncurl/ensemble.py | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/ensemble.py#L235-L247 | def poisson_consensus_se(data, k, n_runs=10, **se_params):
"""
Initializes Poisson State Estimation using a consensus Poisson clustering.
"""
clusters = []
for i in range(n_runs):
assignments, means = poisson_cluster(data, k)
clusters.append(assignments)
clusterings = np.vstack(clusters)
consensus = CE.cluster_ensembles(clusterings, verbose=False, N_clusters_max=k)
init_m, init_w = nmf_init(data, consensus, k, 'basic')
M, W, ll = poisson_estimate_state(data, k, init_means=init_m, init_weights=init_w, **se_params)
return M, W, ll | [
"def",
"poisson_consensus_se",
"(",
"data",
",",
"k",
",",
"n_runs",
"=",
"10",
",",
"*",
"*",
"se_params",
")",
":",
"clusters",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n_runs",
")",
":",
"assignments",
",",
"means",
"=",
"poisson_cluster",
... | Initializes Poisson State Estimation using a consensus Poisson clustering. | [
"Initializes",
"Poisson",
"State",
"Estimation",
"using",
"a",
"consensus",
"Poisson",
"clustering",
"."
] | python | train |
AkihikoITOH/capybara | capybara/virtualenv/lib/python2.7/site-packages/flask/config.py | https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/flask/config.py#L138-L165 | def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object
"""
if isinstance(obj, string_types):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key) | [
"def",
"from_object",
"(",
"self",
",",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"string_types",
")",
":",
"obj",
"=",
"import_string",
"(",
"obj",
")",
"for",
"key",
"in",
"dir",
"(",
"obj",
")",
":",
"if",
"key",
".",
"isupper",
"(",... | Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object | [
"Updates",
"the",
"values",
"from",
"the",
"given",
"object",
".",
"An",
"object",
"can",
"be",
"of",
"one",
"of",
"the",
"following",
"two",
"types",
":"
] | python | test |
Esri/ArcREST | src/arcrest/manageorg/_content.py | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_content.py#L1977-L2043 | def __init(self, folder='/'):
"""loads the property data into the class"""
params = {
"f" : "json"
}
if folder is None or folder == "/":
folder = 'root'
result_template = {
"username": "",
"total": 0,
"start": 1,
"num": 0,
"nextStart": 0,
"currentFolder": None,
"items": [],
"folders": []
}
nextStart = 1
self._json_dict = {}
self._json = ""
while nextStart > -1:
res = self.search(start=nextStart, num=100)
if self._location.find(res['username'].lower()) > -1:
self._location = self._location.replace(res['username'].lower(), res['username'])
self._url = self._url.replace(res['username'].lower(), res['username'])
nextStart = int(res['nextStart'])
result_template['username'] = res['username']
result_template["total"] = res["total"]
result_template['nextStart'] = res['nextStart']
result_template['start'] = res['start']
result_template['num'] = res['num']
#Added so the root has content to match when in a folder,
#not sure if this is best practice or not. Did not add
#username and created
#if res['currentFolder'] is None:
#result_template['currentFolder'] = {
#'title': 'root',
#'id': None,
#'created' : None,
#'username' : None
#}
#result_template['folders'].insert(0, result_template['currentFolder'])
#else:
result_template['currentFolder'] = res['currentFolder']
for item in res['items']:
if item not in result_template['items']:
result_template['items'].append(item)
#if 'folders' in res and \
#folder.lower() == 'root':
#for folder in res['folders']:
#if folder not in result_template['folders']:
#result_template['folders'].append(folder)
self._json_dict = result_template
self._json = json.dumps(result_template)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in result_template.items():
if k in attributes:
setattr(self, "_"+ k, result_template[k])
else:
print( k, " - attribute not implemented in Content.User class.")
self._loadFolders() | [
"def",
"__init",
"(",
"self",
",",
"folder",
"=",
"'/'",
")",
":",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
"}",
"if",
"folder",
"is",
"None",
"or",
"folder",
"==",
"\"/\"",
":",
"folder",
"=",
"'root'",
"result_template",
"=",
"{",
"\"username\""... | loads the property data into the class | [
"loads",
"the",
"property",
"data",
"into",
"the",
"class"
] | python | train |
liip/taxi | taxi/aliases.py | https://github.com/liip/taxi/blob/269423c1f1ab571bd01a522819afe3e325bfbff6/taxi/aliases.py#L94-L117 | def filter_from_mapping(self, mapping, backend=None):
"""
Return mappings that either exactly correspond to the given `mapping`
tuple, or, if the second item of `mapping` is `None`, include mappings
that only match the first item of `mapping` (useful to show all
mappings for a given project).
"""
def mapping_filter(key_item):
key, item = key_item
return (
(mapping is None or item.mapping == mapping or
(mapping[1] is None and item.mapping is not None and item.mapping[0] == mapping[0])) and
(backend is None or item.backend == backend)
)
items = [item for item in six.iteritems(self) if mapping_filter(item)]
aliases = collections.OrderedDict(
sorted(items, key=lambda alias: alias[1].mapping
if alias[1] is not None else (0, 0))
)
return aliases | [
"def",
"filter_from_mapping",
"(",
"self",
",",
"mapping",
",",
"backend",
"=",
"None",
")",
":",
"def",
"mapping_filter",
"(",
"key_item",
")",
":",
"key",
",",
"item",
"=",
"key_item",
"return",
"(",
"(",
"mapping",
"is",
"None",
"or",
"item",
".",
"... | Return mappings that either exactly correspond to the given `mapping`
tuple, or, if the second item of `mapping` is `None`, include mappings
that only match the first item of `mapping` (useful to show all
mappings for a given project). | [
"Return",
"mappings",
"that",
"either",
"exactly",
"correspond",
"to",
"the",
"given",
"mapping",
"tuple",
"or",
"if",
"the",
"second",
"item",
"of",
"mapping",
"is",
"None",
"include",
"mappings",
"that",
"only",
"match",
"the",
"first",
"item",
"of",
"mapp... | python | train |
kivy/python-for-android | pythonforandroid/bootstrap.py | https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstrap.py#L224-L229 | def distribute_javaclasses(self, javaclass_dir, dest_dir="src"):
'''Copy existing javaclasses from build dir to current dist dir.'''
info('Copying java files')
ensure_dir(dest_dir)
for filename in glob.glob(javaclass_dir):
shprint(sh.cp, '-a', filename, dest_dir) | [
"def",
"distribute_javaclasses",
"(",
"self",
",",
"javaclass_dir",
",",
"dest_dir",
"=",
"\"src\"",
")",
":",
"info",
"(",
"'Copying java files'",
")",
"ensure_dir",
"(",
"dest_dir",
")",
"for",
"filename",
"in",
"glob",
".",
"glob",
"(",
"javaclass_dir",
")"... | Copy existing javaclasses from build dir to current dist dir. | [
"Copy",
"existing",
"javaclasses",
"from",
"build",
"dir",
"to",
"current",
"dist",
"dir",
"."
] | python | train |
dwkim78/upsilon | upsilon/extract_features/extract_features.py | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L564-L600 | def get_features2(self):
"""
Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values
"""
feature_names = []
feature_values = []
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
# Omit input variables such as date, mag, err, etc.
if not (name == 'date' or name == 'mag' or name == 'err'
or name == 'n_threads' or name == 'min_period'):
# Filter some other unnecessary features.
if not (name == 'f' or name == 'f_phase'
or name == 'period_log10FAP'
or name == 'weight' or name == 'weighted_sum'
or name == 'median' or name == 'mean' or name == 'std'):
feature_names.append(name)
# Sort by the names.
# Sorting should be done to keep maintaining the same order of features.
feature_names.sort()
# Get feature values.
for name in feature_names:
feature_values.append(all_vars[name])
return feature_names, feature_values | [
"def",
"get_features2",
"(",
"self",
")",
":",
"feature_names",
"=",
"[",
"]",
"feature_values",
"=",
"[",
"]",
"# Get all the names of features.",
"all_vars",
"=",
"vars",
"(",
"self",
")",
"for",
"name",
"in",
"all_vars",
".",
"keys",
"(",
")",
":",
"# O... | Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values | [
"Return",
"all",
"features",
"with",
"its",
"names",
"."
] | python | train |
Qiskit/qiskit-terra | qiskit/quantum_info/operators/operator.py | https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/operator.py#L402-L416 | def _format_state(self, state):
"""Format input state so it is statevector or density matrix"""
state = np.array(state)
shape = state.shape
ndim = state.ndim
if ndim > 2:
raise QiskitError('Input state is not a vector or matrix.')
# Flatten column-vector to vector
if ndim == 2:
if shape[1] != 1 and shape[1] != shape[0]:
raise QiskitError('Input state is not a vector or matrix.')
if shape[1] == 1:
# flatten colum-vector to vector
state = np.reshape(state, shape[0])
return state | [
"def",
"_format_state",
"(",
"self",
",",
"state",
")",
":",
"state",
"=",
"np",
".",
"array",
"(",
"state",
")",
"shape",
"=",
"state",
".",
"shape",
"ndim",
"=",
"state",
".",
"ndim",
"if",
"ndim",
">",
"2",
":",
"raise",
"QiskitError",
"(",
"'In... | Format input state so it is statevector or density matrix | [
"Format",
"input",
"state",
"so",
"it",
"is",
"statevector",
"or",
"density",
"matrix"
] | python | test |
ArduPilot/MAVProxy | MAVProxy/modules/mavproxy_example.py | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_example.py#L59-L67 | def status(self):
'''returns information about module'''
self.status_callcount += 1
self.last_bored = time.time() # status entertains us
return("status called %(status_callcount)d times. My target positions=%(packets_mytarget)u Other target positions=%(packets_mytarget)u" %
{"status_callcount": self.status_callcount,
"packets_mytarget": self.packets_mytarget,
"packets_othertarget": self.packets_othertarget,
}) | [
"def",
"status",
"(",
"self",
")",
":",
"self",
".",
"status_callcount",
"+=",
"1",
"self",
".",
"last_bored",
"=",
"time",
".",
"time",
"(",
")",
"# status entertains us",
"return",
"(",
"\"status called %(status_callcount)d times. My target positions=%(packets_mytarg... | returns information about module | [
"returns",
"information",
"about",
"module"
] | python | train |
Sanji-IO/sanji | sanji/core.py | https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/core.py#L271-L292 | def stop(self, *args, **kwargs):
"""
exit
"""
_logger.debug("Bundle [%s] has been shutting down" %
self.bundle.profile["name"])
if hasattr(self, 'before_stop') and \
hasattr(self.before_stop, '__call__'):
_logger.debug("Invoking before_stop...")
self.before_stop()
self._conn.disconnect()
self._session.stop()
self.stop_event.set()
# TODO: shutdown all threads
for thread, stop in self.thread_list:
stop()
for thread, stop in self.thread_list:
thread.join()
self.is_ready.clear() | [
"def",
"stop",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_logger",
".",
"debug",
"(",
"\"Bundle [%s] has been shutting down\"",
"%",
"self",
".",
"bundle",
".",
"profile",
"[",
"\"name\"",
"]",
")",
"if",
"hasattr",
"(",
"self",
... | exit | [
"exit"
] | python | train |
NoneGG/aredis | aredis/connection.py | https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/connection.py#L651-L663 | async def on_connect(self):
"""
Initialize the connection, authenticate and select a database and send READONLY if it is
set during object initialization.
"""
if self.db:
warnings.warn('SELECT DB is not allowed in cluster mode')
self.db = ''
await super(ClusterConnection, self).on_connect()
if self.readonly:
await self.send_command('READONLY')
if nativestr(await self.read_response()) != 'OK':
raise ConnectionError('READONLY command failed') | [
"async",
"def",
"on_connect",
"(",
"self",
")",
":",
"if",
"self",
".",
"db",
":",
"warnings",
".",
"warn",
"(",
"'SELECT DB is not allowed in cluster mode'",
")",
"self",
".",
"db",
"=",
"''",
"await",
"super",
"(",
"ClusterConnection",
",",
"self",
")",
... | Initialize the connection, authenticate and select a database and send READONLY if it is
set during object initialization. | [
"Initialize",
"the",
"connection",
"authenticate",
"and",
"select",
"a",
"database",
"and",
"send",
"READONLY",
"if",
"it",
"is",
"set",
"during",
"object",
"initialization",
"."
] | python | train |
fredrike/pypoint | pypoint/__init__.py | https://github.com/fredrike/pypoint/blob/b5c9a701d2b7e24d796aa7f8c410360b61d8ec8a/pypoint/__init__.py#L295-L302 | def device_status(self):
"""Status of device."""
return {
'active': self.device['active'],
'offline': self.device['offline'],
'last_update': self.last_update,
'battery_level': self.battery_level,
} | [
"def",
"device_status",
"(",
"self",
")",
":",
"return",
"{",
"'active'",
":",
"self",
".",
"device",
"[",
"'active'",
"]",
",",
"'offline'",
":",
"self",
".",
"device",
"[",
"'offline'",
"]",
",",
"'last_update'",
":",
"self",
".",
"last_update",
",",
... | Status of device. | [
"Status",
"of",
"device",
"."
] | python | train |
internetarchive/doublethink | doublethink/rethinker.py | https://github.com/internetarchive/doublethink/blob/f7fc7da725c9b572d473c717b3dad9af98a7a2b4/doublethink/rethinker.py#L141-L155 | def _server_whitelist(self):
'''
Returns list of servers that have not errored in the last five minutes.
If all servers have errored in the last five minutes, returns list with
one item, the server that errored least recently.
'''
whitelist = []
for server in self.servers:
if (server not in self.last_error
or self.last_error[server] < time.time() - self.PENALTY_BOX_TIME):
whitelist.append(server)
if not whitelist:
whitelist.append(sorted(
self.last_error.items(), key=lambda kv: kv[1])[0][0])
return whitelist | [
"def",
"_server_whitelist",
"(",
"self",
")",
":",
"whitelist",
"=",
"[",
"]",
"for",
"server",
"in",
"self",
".",
"servers",
":",
"if",
"(",
"server",
"not",
"in",
"self",
".",
"last_error",
"or",
"self",
".",
"last_error",
"[",
"server",
"]",
"<",
... | Returns list of servers that have not errored in the last five minutes.
If all servers have errored in the last five minutes, returns list with
one item, the server that errored least recently. | [
"Returns",
"list",
"of",
"servers",
"that",
"have",
"not",
"errored",
"in",
"the",
"last",
"five",
"minutes",
".",
"If",
"all",
"servers",
"have",
"errored",
"in",
"the",
"last",
"five",
"minutes",
"returns",
"list",
"with",
"one",
"item",
"the",
"server",... | python | train |
dtmilano/AndroidViewClient | src/com/dtmilano/android/adb/adbclient.py | https://github.com/dtmilano/AndroidViewClient/blob/7e6e83fde63af99e5e4ab959712ecf94f9881aa2/src/com/dtmilano/android/adb/adbclient.py#L994-L1011 | def isLocked(self):
'''
Checks if the device screen is locked.
@return True if the device screen is locked
'''
self.__checkTransport()
lockScreenRE = re.compile('mShowingLockscreen=(true|false)')
dwp = self.shell('dumpsys window policy')
m = lockScreenRE.search(dwp)
if m:
return m.group(1) == 'true'
dreamingLockscreenRE = re.compile('mDreamingLockscreen=(true|false)')
m = dreamingLockscreenRE.search(dwp)
if m:
return m.group(1) == 'true'
raise RuntimeError("Couldn't determine screen lock state") | [
"def",
"isLocked",
"(",
"self",
")",
":",
"self",
".",
"__checkTransport",
"(",
")",
"lockScreenRE",
"=",
"re",
".",
"compile",
"(",
"'mShowingLockscreen=(true|false)'",
")",
"dwp",
"=",
"self",
".",
"shell",
"(",
"'dumpsys window policy'",
")",
"m",
"=",
"l... | Checks if the device screen is locked.
@return True if the device screen is locked | [
"Checks",
"if",
"the",
"device",
"screen",
"is",
"locked",
"."
] | python | train |
soasme/dogeon | dson/decoder.py | https://github.com/soasme/dogeon/blob/496b9a5b099946d14434ed0cd7a94a270f607207/dson/decoder.py#L357-L370 | def raw_decode(self, s, idx=0):
r"""Decode a DSON document from ``s`` (a ``str`` or ``unicode``
beginning with a DSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a DSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No DSON object could be decoded")
return obj, end | [
"def",
"raw_decode",
"(",
"self",
",",
"s",
",",
"idx",
"=",
"0",
")",
":",
"try",
":",
"obj",
",",
"end",
"=",
"self",
".",
"scan_once",
"(",
"s",
",",
"idx",
")",
"except",
"StopIteration",
":",
"raise",
"ValueError",
"(",
"\"No DSON object could be ... | r"""Decode a DSON document from ``s`` (a ``str`` or ``unicode``
beginning with a DSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a DSON document from a string that may
have extraneous data at the end. | [
"r",
"Decode",
"a",
"DSON",
"document",
"from",
"s",
"(",
"a",
"str",
"or",
"unicode",
"beginning",
"with",
"a",
"DSON",
"document",
")",
"and",
"return",
"a",
"2",
"-",
"tuple",
"of",
"the",
"Python",
"representation",
"and",
"the",
"index",
"in",
"s"... | python | train |
Kunstmord/datalib | src/dataset.py | https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L408-L427 | def return_single_features_base(dbpath, set_object, object_id):
"""
Generic function which returns the features of an object specified by the object_id
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
object_id : int, id of object in database
Returns
-------
features : dict containing the features
"""
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
tmp_object = session.query(set_object).get(object_id)
session.close()
return tmp_object.features | [
"def",
"return_single_features_base",
"(",
"dbpath",
",",
"set_object",
",",
"object_id",
")",
":",
"engine",
"=",
"create_engine",
"(",
"'sqlite:////'",
"+",
"dbpath",
")",
"session_cl",
"=",
"sessionmaker",
"(",
"bind",
"=",
"engine",
")",
"session",
"=",
"s... | Generic function which returns the features of an object specified by the object_id
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
object_id : int, id of object in database
Returns
-------
features : dict containing the features | [
"Generic",
"function",
"which",
"returns",
"the",
"features",
"of",
"an",
"object",
"specified",
"by",
"the",
"object_id"
] | python | train |
google/apitools | apitools/base/py/encoding_helper.py | https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/encoding_helper.py#L464-L485 | def _EncodeUnknownFields(message):
"""Remap unknown fields in message out of message.source."""
source = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message))
if source is None:
return message
# CopyProtoMessage uses _ProtoJsonApiTools, which uses this message. Use
# the vanilla protojson-based copy function to avoid infinite recursion.
result = _CopyProtoMessageVanillaProtoJson(message)
pairs_field = message.field_by_name(source)
if not isinstance(pairs_field, messages.MessageField):
raise exceptions.InvalidUserInputError(
'Invalid pairs field %s' % pairs_field)
pairs_type = pairs_field.message_type
value_field = pairs_type.field_by_name('value')
value_variant = value_field.variant
pairs = getattr(message, source)
codec = _ProtoJsonApiTools.Get()
for pair in pairs:
encoded_value = codec.encode_field(value_field, pair.value)
result.set_unrecognized_field(pair.key, encoded_value, value_variant)
setattr(result, source, [])
return result | [
"def",
"_EncodeUnknownFields",
"(",
"message",
")",
":",
"source",
"=",
"_UNRECOGNIZED_FIELD_MAPPINGS",
".",
"get",
"(",
"type",
"(",
"message",
")",
")",
"if",
"source",
"is",
"None",
":",
"return",
"message",
"# CopyProtoMessage uses _ProtoJsonApiTools, which uses t... | Remap unknown fields in message out of message.source. | [
"Remap",
"unknown",
"fields",
"in",
"message",
"out",
"of",
"message",
".",
"source",
"."
] | python | train |
jldantas/libmft | libmft/attribute.py | https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L756-L792 | def _from_binary_stdinfo(cls, binary_stream):
"""See base class."""
'''
TIMESTAMPS(32)
Creation time - 8
File altered time - 8
MFT/Metadata altered time - 8
Accessed time - 8
Flags - 4 (FileInfoFlags)
Maximum number of versions - 4
Version number - 4
Class id - 4
Owner id - 4 (NTFS 3+)
Security id - 4 (NTFS 3+)
Quota charged - 8 (NTFS 3+)
Update Sequence Number (USN) - 8 (NTFS 3+)
'''
if len(binary_stream) == cls._REPR.size: #check if it is v3 by size of the stram
t_created, t_changed, t_mft_changed, t_accessed, flags, m_ver, ver, \
c_id, o_id, s_id, quota_charged, usn = cls._REPR.unpack(binary_stream)
nw_obj = cls(
( Timestamps((convert_filetime(t_created), convert_filetime(t_changed),
convert_filetime(t_mft_changed), convert_filetime(t_accessed))
), FileInfoFlags(flags), m_ver, ver, c_id, o_id, s_id, quota_charged, usn))
else:
#if the content is not using v3 extension, added the missing stuff for consistency
t_created, t_changed, t_mft_changed, t_accessed, flags, m_ver, ver, \
c_id = cls._REPR_NO_NFTS_3_EXTENSION.unpack(binary_stream)
nw_obj = cls(
( Timestamps((convert_filetime(t_created), convert_filetime(t_changed),
convert_filetime(t_mft_changed), convert_filetime(t_accessed))
), FileInfoFlags(flags), m_ver, ver, c_id, None, None, None, None))
_MOD_LOGGER.debug("Attempted to unpack STANDARD_INFORMATION from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj | [
"def",
"_from_binary_stdinfo",
"(",
"cls",
",",
"binary_stream",
")",
":",
"'''\n TIMESTAMPS(32)\n Creation time - 8\n File altered time - 8\n MFT/Metadata altered time - 8\n Accessed time - 8\n Flags - 4 (FileInfoFlags)\n Maximum numb... | See base class. | [
"See",
"base",
"class",
"."
] | python | train |
coldfix/udiskie | udiskie/mount.py | https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/mount.py#L576-L608 | async def losetup(self, image, read_only=True, offset=None, size=None,
no_part_scan=None):
"""
Setup a loop device.
:param str image: path of the image file
:param bool read_only:
:param int offset:
:param int size:
:param bool no_part_scan:
:returns: the device object for the loop device
"""
try:
device = self.udisks.find(image)
except FileNotFoundError:
pass
else:
self._log.info(_('not setting up {0}: already up', device))
return device
if not os.path.isfile(image):
self._log.error(_('not setting up {0}: not a file', image))
return None
self._log.debug(_('setting up {0}', image))
fd = os.open(image, os.O_RDONLY)
device = await self.udisks.loop_setup(fd, {
'offset': offset,
'size': size,
'read-only': read_only,
'no-part-scan': no_part_scan,
})
self._log.info(_('set up {0} as {1}', image,
device.device_presentation))
return device | [
"async",
"def",
"losetup",
"(",
"self",
",",
"image",
",",
"read_only",
"=",
"True",
",",
"offset",
"=",
"None",
",",
"size",
"=",
"None",
",",
"no_part_scan",
"=",
"None",
")",
":",
"try",
":",
"device",
"=",
"self",
".",
"udisks",
".",
"find",
"(... | Setup a loop device.
:param str image: path of the image file
:param bool read_only:
:param int offset:
:param int size:
:param bool no_part_scan:
:returns: the device object for the loop device | [
"Setup",
"a",
"loop",
"device",
"."
] | python | train |
bcbio/bcbio-nextgen | bcbio/rnaseq/sailfish.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/sailfish.py#L170-L176 | def _libtype_string(fq1, fq2, strandedness):
"""
supports just the Tophat unstranded/firstrand/secondstrand
"""
libtype = "-l I" if fq2 else "-l "
strand = _sailfish_strand_string(strandedness)
return libtype + strand | [
"def",
"_libtype_string",
"(",
"fq1",
",",
"fq2",
",",
"strandedness",
")",
":",
"libtype",
"=",
"\"-l I\"",
"if",
"fq2",
"else",
"\"-l \"",
"strand",
"=",
"_sailfish_strand_string",
"(",
"strandedness",
")",
"return",
"libtype",
"+",
"strand"
] | supports just the Tophat unstranded/firstrand/secondstrand | [
"supports",
"just",
"the",
"Tophat",
"unstranded",
"/",
"firstrand",
"/",
"secondstrand"
] | python | train |
jopohl/urh | src/urh/awre/FormatFinder.py | https://github.com/jopohl/urh/blob/2eb33b125c8407964cd1092843cde5010eb88aae/src/urh/awre/FormatFinder.py#L77-L106 | def cluster_lengths(self):
"""
This method clusters some bitvectors based on their length. An example output is
2: [0.5, 1]
4: [1, 0.75, 1, 1]
Meaning there were two message lengths: 2 and 4 bit.
(0.5, 1) means, the first bit was equal in 50% of cases (meaning maximum difference) and bit 2 was equal in all messages
A simple XOR would not work as it would be error prone.
:rtype: dict[int, tuple[np.ndarray, int]]
"""
number_ones = dict() # dict of tuple. 0 = number ones vector, 1 = number of blocks for this vector
for vector in self.bitvectors:
vec_len = 4 * (len(vector) // 4)
if vec_len == 0:
continue
if vec_len not in number_ones:
number_ones[vec_len] = [np.zeros(vec_len, dtype=int), 0]
number_ones[vec_len][0] += vector[0:vec_len]
number_ones[vec_len][1] += 1
# Calculate the relative numbers and normalize the equalness so e.g. 0.3 becomes 0.7
return {vl: (np.vectorize(lambda x: x if x >= 0.5 else 1 - x)(number_ones[vl][0] / number_ones[vl][1]))
for vl in number_ones if number_ones[vl][1] >= self.MIN_MESSAGES_PER_CLUSTER} | [
"def",
"cluster_lengths",
"(",
"self",
")",
":",
"number_ones",
"=",
"dict",
"(",
")",
"# dict of tuple. 0 = number ones vector, 1 = number of blocks for this vector",
"for",
"vector",
"in",
"self",
".",
"bitvectors",
":",
"vec_len",
"=",
"4",
"*",
"(",
"len",
"(",
... | This method clusters some bitvectors based on their length. An example output is
2: [0.5, 1]
4: [1, 0.75, 1, 1]
Meaning there were two message lengths: 2 and 4 bit.
(0.5, 1) means, the first bit was equal in 50% of cases (meaning maximum difference) and bit 2 was equal in all messages
A simple XOR would not work as it would be error prone.
:rtype: dict[int, tuple[np.ndarray, int]] | [
"This",
"method",
"clusters",
"some",
"bitvectors",
"based",
"on",
"their",
"length",
".",
"An",
"example",
"output",
"is"
] | python | train |
CalebBell/fpi | fpi/drag.py | https://github.com/CalebBell/fpi/blob/6e6da3b9d0c17e10cc0886c97bc1bb8aeba2cca5/fpi/drag.py#L105-L153 | def Barati_high(Re):
r'''Calculates drag coefficient of a smooth sphere using the method in
[1]_.
.. math::
C_D = 8\times 10^{-6}\left[(Re/6530)^2 + \tanh(Re) - 8\ln(Re)/\ln(10)\right]
- 0.4119\exp(-2.08\times10^{43}/[Re + Re^2]^4)
-2.1344\exp(-\{[\ln(Re^2 + 10.7563)/\ln(10)]^2 + 9.9867\}/Re)
+0.1357\exp(-[(Re/1620)^2 + 10370]/Re)
- 8.5\times 10^{-3}\{2\ln[\tanh(\tanh(Re))]/\ln(10) - 2825.7162\}/Re
+ 2.4795
Parameters
----------
Re : float
Reynolds number of the sphere, [-]
Returns
-------
Cd : float
Drag coefficient [-]
Notes
-----
Range is Re <= 1E6
This model is the wider-range model the authors developed.
At sufficiently low diameters or Re values, drag is no longer a phenomena.
Examples
--------
Maching example in [1]_, in a table of calculated values.
>>> Barati_high(200.)
0.7730544082789523
References
----------
.. [1] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz
Ahmadi. "Development of Empirical Models with High Accuracy for
Estimation of Drag Coefficient of Flow around a Smooth Sphere: An
Evolutionary Approach." Powder Technology 257 (May 2014): 11-19.
doi:10.1016/j.powtec.2014.02.045.
'''
Cd = (8E-6*((Re/6530.)**2 + tanh(Re) - 8*log(Re)/log(10.))
- 0.4119*exp(-2.08E43/(Re+Re**2)**4)
- 2.1344*exp(-((log(Re**2 + 10.7563)/log(10))**2 + 9.9867)/Re)
+ 0.1357*exp(-((Re/1620.)**2 + 10370.)/Re)
- 8.5E-3*(2*log(tanh(tanh(Re)))/log(10) - 2825.7162)/Re + 2.4795)
return Cd | [
"def",
"Barati_high",
"(",
"Re",
")",
":",
"Cd",
"=",
"(",
"8E-6",
"*",
"(",
"(",
"Re",
"/",
"6530.",
")",
"**",
"2",
"+",
"tanh",
"(",
"Re",
")",
"-",
"8",
"*",
"log",
"(",
"Re",
")",
"/",
"log",
"(",
"10.",
")",
")",
"-",
"0.4119",
"*",... | r'''Calculates drag coefficient of a smooth sphere using the method in
[1]_.
.. math::
C_D = 8\times 10^{-6}\left[(Re/6530)^2 + \tanh(Re) - 8\ln(Re)/\ln(10)\right]
- 0.4119\exp(-2.08\times10^{43}/[Re + Re^2]^4)
-2.1344\exp(-\{[\ln(Re^2 + 10.7563)/\ln(10)]^2 + 9.9867\}/Re)
+0.1357\exp(-[(Re/1620)^2 + 10370]/Re)
- 8.5\times 10^{-3}\{2\ln[\tanh(\tanh(Re))]/\ln(10) - 2825.7162\}/Re
+ 2.4795
Parameters
----------
Re : float
Reynolds number of the sphere, [-]
Returns
-------
Cd : float
Drag coefficient [-]
Notes
-----
Range is Re <= 1E6
This model is the wider-range model the authors developed.
At sufficiently low diameters or Re values, drag is no longer a phenomena.
Examples
--------
Maching example in [1]_, in a table of calculated values.
>>> Barati_high(200.)
0.7730544082789523
References
----------
.. [1] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz
Ahmadi. "Development of Empirical Models with High Accuracy for
Estimation of Drag Coefficient of Flow around a Smooth Sphere: An
Evolutionary Approach." Powder Technology 257 (May 2014): 11-19.
doi:10.1016/j.powtec.2014.02.045. | [
"r",
"Calculates",
"drag",
"coefficient",
"of",
"a",
"smooth",
"sphere",
"using",
"the",
"method",
"in",
"[",
"1",
"]",
"_",
"."
] | python | train |
aleju/imgaug | imgaug/external/poly_point_isect_py2py3.py | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/external/poly_point_isect_py2py3.py#L902-L941 | def prev_item(self, key, default=_sentinel):
"""Get predecessor (k,v) pair of key, raises KeyError if key is min key
or key does not exist. optimized for pypy.
"""
# removed graingets version, because it was little slower on CPython and much slower on pypy
# this version runs about 4x faster with pypy than the Cython version
# Note: Code sharing of prev_item() and floor_item() is possible, but has always a speed penalty.
node = self._root
prev_node = None
while node is not None:
cmp = self._cmp(self._cmp_data, key, node.key)
if cmp == 0:
break
elif cmp < 0:
node = node.left
else:
if (prev_node is None) or self._cmp(self._cmp_data, prev_node.key, node.key) < 0:
prev_node = node
node = node.right
if node is None: # stay at dead end (None)
if default is _sentinel:
raise KeyError(str(key))
return default
# found node of key
if node.left is not None:
# find biggest node of left subtree
node = node.left
while node.right is not None:
node = node.right
if prev_node is None:
prev_node = node
elif self._cmp(self._cmp_data, prev_node.key, node.key) < 0:
prev_node = node
elif prev_node is None: # given key is smallest in tree
if default is _sentinel:
raise KeyError(str(key))
return default
return prev_node.key, prev_node.value | [
"def",
"prev_item",
"(",
"self",
",",
"key",
",",
"default",
"=",
"_sentinel",
")",
":",
"# removed graingets version, because it was little slower on CPython and much slower on pypy",
"# this version runs about 4x faster with pypy than the Cython version",
"# Note: Code sharing of prev_... | Get predecessor (k,v) pair of key, raises KeyError if key is min key
or key does not exist. optimized for pypy. | [
"Get",
"predecessor",
"(",
"k",
"v",
")",
"pair",
"of",
"key",
"raises",
"KeyError",
"if",
"key",
"is",
"min",
"key",
"or",
"key",
"does",
"not",
"exist",
".",
"optimized",
"for",
"pypy",
"."
] | python | valid |
xolox/python-vcs-repo-mgr | vcs_repo_mgr/backends/hg.py | https://github.com/xolox/python-vcs-repo-mgr/blob/fdad2441a3e7ba5deeeddfa1c2f5ebc00c393aed/vcs_repo_mgr/backends/hg.py#L208-L223 | def get_commit_command(self, message, author=None):
"""
Get the command to commit changes to tracked files in the working tree.
This method uses the ``hg remove --after`` to match the semantics of
``git commit --all`` (which is _not_ the same as ``hg commit
--addremove``) however ``hg remove --after`` is _very_ verbose (it
comments on every existing file in the repository) and it ignores the
``--quiet`` option. This explains why I've decided to silence the
standard error stream (though I feel I may regret this later).
"""
tokens = ['hg remove --after 2>/dev/null; hg commit']
if author:
tokens.append('--user=%s' % quote(author.combined))
tokens.append('--message=%s' % quote(message))
return [' '.join(tokens)] | [
"def",
"get_commit_command",
"(",
"self",
",",
"message",
",",
"author",
"=",
"None",
")",
":",
"tokens",
"=",
"[",
"'hg remove --after 2>/dev/null; hg commit'",
"]",
"if",
"author",
":",
"tokens",
".",
"append",
"(",
"'--user=%s'",
"%",
"quote",
"(",
"author"... | Get the command to commit changes to tracked files in the working tree.
This method uses the ``hg remove --after`` to match the semantics of
``git commit --all`` (which is _not_ the same as ``hg commit
--addremove``) however ``hg remove --after`` is _very_ verbose (it
comments on every existing file in the repository) and it ignores the
``--quiet`` option. This explains why I've decided to silence the
standard error stream (though I feel I may regret this later). | [
"Get",
"the",
"command",
"to",
"commit",
"changes",
"to",
"tracked",
"files",
"in",
"the",
"working",
"tree",
"."
] | python | train |
minio/minio-py | minio/api.py | https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/api.py#L1151-L1197 | def _list_incomplete_uploads(self, bucket_name, prefix='',
recursive=False, is_aggregate_size=True):
"""
List incomplete uploads list all previously uploaded incomplete multipart objects.
:param bucket_name: Bucket name to list uploaded objects.
:param prefix: String specifying objects returned must begin with.
:param recursive: If yes, returns all incomplete objects for a specified prefix.
:return: An generator of incomplete uploads in alphabetical order.
"""
is_valid_bucket_name(bucket_name)
# If someone explicitly set prefix to None convert it to empty string.
if prefix is None:
prefix = ''
# Initialize query parameters.
query = {
'uploads': '',
'max-uploads': '1000',
'prefix': prefix
}
if not recursive:
query['delimiter'] = '/'
key_marker, upload_id_marker = '', ''
is_truncated = True
while is_truncated:
if key_marker:
query['key-marker'] = key_marker
if upload_id_marker:
query['upload-id-marker'] = upload_id_marker
response = self._url_open('GET',
bucket_name=bucket_name,
query=query)
(uploads, is_truncated, key_marker,
upload_id_marker) = parse_list_multipart_uploads(response.data,
bucket_name)
for upload in uploads:
if is_aggregate_size:
upload.size = self._get_total_multipart_upload_size(
upload.bucket_name,
upload.object_name,
upload.upload_id)
yield upload | [
"def",
"_list_incomplete_uploads",
"(",
"self",
",",
"bucket_name",
",",
"prefix",
"=",
"''",
",",
"recursive",
"=",
"False",
",",
"is_aggregate_size",
"=",
"True",
")",
":",
"is_valid_bucket_name",
"(",
"bucket_name",
")",
"# If someone explicitly set prefix to None ... | List incomplete uploads list all previously uploaded incomplete multipart objects.
:param bucket_name: Bucket name to list uploaded objects.
:param prefix: String specifying objects returned must begin with.
:param recursive: If yes, returns all incomplete objects for a specified prefix.
:return: An generator of incomplete uploads in alphabetical order. | [
"List",
"incomplete",
"uploads",
"list",
"all",
"previously",
"uploaded",
"incomplete",
"multipart",
"objects",
"."
] | python | train |
praw-dev/prawtools | prawtools/stats.py | https://github.com/praw-dev/prawtools/blob/571d5c28c2222f6f8dbbca8c815b8da0a776ab85/prawtools/stats.py#L184-L218 | def process_commenters(self):
"""Group comments by author."""
for index, submission in enumerate(self.submissions.values()):
if submission.num_comments == 0:
continue
real_submission = self.reddit.submission(id=submission.id)
real_submission.comment_sort = 'top'
for i in range(3):
try:
real_submission.comments.replace_more(limit=0)
break
except RequestException:
if i >= 2:
raise
logger.debug('Failed to fetch submission {}, retrying'
.format(submission.id))
self.comments.extend(MiniComment(comment, submission)
for comment in real_submission.comments.list()
if self.distinguished
or comment.distinguished is None)
if index % 50 == 49:
logger.debug('Completed: {:4d}/{} submissions'
.format(index + 1, len(self.submissions)))
# Clean up to reduce memory usage
submission = None
gc.collect()
self.comments.sort(key=lambda x: x.created_utc)
for comment in self.comments:
if comment.author:
self.commenters[comment.author].append(comment) | [
"def",
"process_commenters",
"(",
"self",
")",
":",
"for",
"index",
",",
"submission",
"in",
"enumerate",
"(",
"self",
".",
"submissions",
".",
"values",
"(",
")",
")",
":",
"if",
"submission",
".",
"num_comments",
"==",
"0",
":",
"continue",
"real_submiss... | Group comments by author. | [
"Group",
"comments",
"by",
"author",
"."
] | python | train |
thomasleese/mo | mo/frontends.py | https://github.com/thomasleese/mo/blob/b757f52b42e51ad19c14724ceb7c5db5d52abaea/mo/frontends.py#L152-L179 | def serialise(self, obj):
"""
Take an object from the project or the runner and serialise it into a
dictionary.
Parameters
----------
obj : object
An object to serialise.
Returns
-------
object
A serialised version of the input object.
"""
if isinstance(obj, (list, VariableCollection, StepCollection)):
return [self.serialise(element) for element in obj]
elif isinstance(obj, dict):
return {k: self.serialise(v) for k, v in obj.items()}
elif isinstance(obj, str):
return obj
elif isinstance(obj, (Event, Task, Variable, Step)):
return self.serialise(obj._asdict())
elif obj is None:
return None
else:
raise TypeError(type(obj)) | [
"def",
"serialise",
"(",
"self",
",",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"list",
",",
"VariableCollection",
",",
"StepCollection",
")",
")",
":",
"return",
"[",
"self",
".",
"serialise",
"(",
"element",
")",
"for",
"element",
"i... | Take an object from the project or the runner and serialise it into a
dictionary.
Parameters
----------
obj : object
An object to serialise.
Returns
-------
object
A serialised version of the input object. | [
"Take",
"an",
"object",
"from",
"the",
"project",
"or",
"the",
"runner",
"and",
"serialise",
"it",
"into",
"a",
"dictionary",
"."
] | python | train |
tempodb/tempodb-python | tempodb/protocol/objects.py | https://github.com/tempodb/tempodb-python/blob/8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3/tempodb/protocol/objects.py#L277-L312 | def from_data(self, time, value, series_id=None, key=None, tz=None):
"""Create a DataPoint object from data, rather than a JSON object or
string. This should be used by user code to construct DataPoints from
Python-based data like Datetime objects and floats.
The series_id and key arguments are only necessary if you are doing a
multi write, in which case those arguments can be used to specify which
series the DataPoint belongs to.
If needed, the tz argument should be an Olsen database compliant string
specifying the time zone for this DataPoint. This argument is most
often used internally when reading data from TempoDB.
:param time: the point in time for this reading
:type time: ISO8601 string or Datetime
:param value: the value for this reading
:type value: int or float
:param string series_id: (optional) a series ID for this point
:param string key: (optional) a key for this point
:param string tz: (optional) a timezone for this point
:rtype: :class:`DataPoint`"""
t = check_time_param(time)
if type(value) in [float, int]:
v = value
else:
raise ValueError('Values must be int or float. Got "%s".' %
str(value))
j = {
't': t,
'v': v,
'id': series_id,
'key': key
}
return DataPoint(j, None, tz=tz) | [
"def",
"from_data",
"(",
"self",
",",
"time",
",",
"value",
",",
"series_id",
"=",
"None",
",",
"key",
"=",
"None",
",",
"tz",
"=",
"None",
")",
":",
"t",
"=",
"check_time_param",
"(",
"time",
")",
"if",
"type",
"(",
"value",
")",
"in",
"[",
"flo... | Create a DataPoint object from data, rather than a JSON object or
string. This should be used by user code to construct DataPoints from
Python-based data like Datetime objects and floats.
The series_id and key arguments are only necessary if you are doing a
multi write, in which case those arguments can be used to specify which
series the DataPoint belongs to.
If needed, the tz argument should be an Olsen database compliant string
specifying the time zone for this DataPoint. This argument is most
often used internally when reading data from TempoDB.
:param time: the point in time for this reading
:type time: ISO8601 string or Datetime
:param value: the value for this reading
:type value: int or float
:param string series_id: (optional) a series ID for this point
:param string key: (optional) a key for this point
:param string tz: (optional) a timezone for this point
:rtype: :class:`DataPoint` | [
"Create",
"a",
"DataPoint",
"object",
"from",
"data",
"rather",
"than",
"a",
"JSON",
"object",
"or",
"string",
".",
"This",
"should",
"be",
"used",
"by",
"user",
"code",
"to",
"construct",
"DataPoints",
"from",
"Python",
"-",
"based",
"data",
"like",
"Date... | python | train |
librosa/librosa | librosa/segment.py | https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/segment.py#L563-L655 | def subsegment(data, frames, n_segments=4, axis=-1):
'''Sub-divide a segmentation by feature clustering.
Given a set of frame boundaries (`frames`), and a data matrix (`data`),
each successive interval defined by `frames` is partitioned into
`n_segments` by constrained agglomerative clustering.
.. note::
If an interval spans fewer than `n_segments` frames, then each
frame becomes a sub-segment.
Parameters
----------
data : np.ndarray
Data matrix to use in clustering
frames : np.ndarray [shape=(n_boundaries,)], dtype=int, non-negative]
Array of beat or segment boundaries, as provided by
`librosa.beat.beat_track`,
`librosa.onset.onset_detect`,
or `agglomerative`.
n_segments : int > 0
Maximum number of frames to sub-divide each interval.
axis : int
Axis along which to apply the segmentation.
By default, the last index (-1) is taken.
Returns
-------
boundaries : np.ndarray [shape=(n_subboundaries,)]
List of sub-divided segment boundaries
See Also
--------
agglomerative : Temporal segmentation
librosa.onset.onset_detect : Onset detection
librosa.beat.beat_track : Beat tracking
Notes
-----
This function caches at level 30.
Examples
--------
Load audio, detect beat frames, and subdivide in twos by CQT
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=8)
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=512)
>>> beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=512)
>>> cqt = np.abs(librosa.cqt(y, sr=sr, hop_length=512))
>>> subseg = librosa.segment.subsegment(cqt, beats, n_segments=2)
>>> subseg_t = librosa.frames_to_time(subseg, sr=sr, hop_length=512)
>>> subseg
array([ 0, 2, 4, 21, 23, 26, 43, 55, 63, 72, 83,
97, 102, 111, 122, 137, 142, 153, 162, 180, 182, 185,
202, 210, 221, 231, 241, 256, 261, 271, 281, 296, 301,
310, 320, 339, 341, 344, 361, 368, 382, 389, 401, 416,
420, 430, 436, 451, 456, 465, 476, 489, 496, 503, 515,
527, 535, 544, 553, 558, 571, 578, 590, 607, 609, 638])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(librosa.amplitude_to_db(cqt,
... ref=np.max),
... y_axis='cqt_hz', x_axis='time')
>>> lims = plt.gca().get_ylim()
>>> plt.vlines(beat_times, lims[0], lims[1], color='lime', alpha=0.9,
... linewidth=2, label='Beats')
>>> plt.vlines(subseg_t, lims[0], lims[1], color='linen', linestyle='--',
... linewidth=1.5, alpha=0.5, label='Sub-beats')
>>> plt.legend(frameon=True, shadow=True)
>>> plt.title('CQT + Beat and sub-beat markers')
>>> plt.tight_layout()
'''
frames = util.fix_frames(frames, x_min=0, x_max=data.shape[axis], pad=True)
if n_segments < 1:
raise ParameterError('n_segments must be a positive integer')
boundaries = []
idx_slices = [slice(None)] * data.ndim
for seg_start, seg_end in zip(frames[:-1], frames[1:]):
idx_slices[axis] = slice(seg_start, seg_end)
boundaries.extend(seg_start + agglomerative(data[tuple(idx_slices)],
min(seg_end - seg_start, n_segments),
axis=axis))
return np.ascontiguousarray(boundaries) | [
"def",
"subsegment",
"(",
"data",
",",
"frames",
",",
"n_segments",
"=",
"4",
",",
"axis",
"=",
"-",
"1",
")",
":",
"frames",
"=",
"util",
".",
"fix_frames",
"(",
"frames",
",",
"x_min",
"=",
"0",
",",
"x_max",
"=",
"data",
".",
"shape",
"[",
"ax... | Sub-divide a segmentation by feature clustering.
Given a set of frame boundaries (`frames`), and a data matrix (`data`),
each successive interval defined by `frames` is partitioned into
`n_segments` by constrained agglomerative clustering.
.. note::
If an interval spans fewer than `n_segments` frames, then each
frame becomes a sub-segment.
Parameters
----------
data : np.ndarray
Data matrix to use in clustering
frames : np.ndarray [shape=(n_boundaries,)], dtype=int, non-negative]
Array of beat or segment boundaries, as provided by
`librosa.beat.beat_track`,
`librosa.onset.onset_detect`,
or `agglomerative`.
n_segments : int > 0
Maximum number of frames to sub-divide each interval.
axis : int
Axis along which to apply the segmentation.
By default, the last index (-1) is taken.
Returns
-------
boundaries : np.ndarray [shape=(n_subboundaries,)]
List of sub-divided segment boundaries
See Also
--------
agglomerative : Temporal segmentation
librosa.onset.onset_detect : Onset detection
librosa.beat.beat_track : Beat tracking
Notes
-----
This function caches at level 30.
Examples
--------
Load audio, detect beat frames, and subdivide in twos by CQT
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=8)
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=512)
>>> beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=512)
>>> cqt = np.abs(librosa.cqt(y, sr=sr, hop_length=512))
>>> subseg = librosa.segment.subsegment(cqt, beats, n_segments=2)
>>> subseg_t = librosa.frames_to_time(subseg, sr=sr, hop_length=512)
>>> subseg
array([ 0, 2, 4, 21, 23, 26, 43, 55, 63, 72, 83,
97, 102, 111, 122, 137, 142, 153, 162, 180, 182, 185,
202, 210, 221, 231, 241, 256, 261, 271, 281, 296, 301,
310, 320, 339, 341, 344, 361, 368, 382, 389, 401, 416,
420, 430, 436, 451, 456, 465, 476, 489, 496, 503, 515,
527, 535, 544, 553, 558, 571, 578, 590, 607, 609, 638])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(librosa.amplitude_to_db(cqt,
... ref=np.max),
... y_axis='cqt_hz', x_axis='time')
>>> lims = plt.gca().get_ylim()
>>> plt.vlines(beat_times, lims[0], lims[1], color='lime', alpha=0.9,
... linewidth=2, label='Beats')
>>> plt.vlines(subseg_t, lims[0], lims[1], color='linen', linestyle='--',
... linewidth=1.5, alpha=0.5, label='Sub-beats')
>>> plt.legend(frameon=True, shadow=True)
>>> plt.title('CQT + Beat and sub-beat markers')
>>> plt.tight_layout() | [
"Sub",
"-",
"divide",
"a",
"segmentation",
"by",
"feature",
"clustering",
"."
] | python | test |
tanghaibao/goatools | goatools/obo_parser.py | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/obo_parser.py#L198-L203 | def has_child(self, term):
"""Return True if this GO object has a child GO ID."""
for parent in self.children:
if parent.item_id == term or parent.has_child(term):
return True
return False | [
"def",
"has_child",
"(",
"self",
",",
"term",
")",
":",
"for",
"parent",
"in",
"self",
".",
"children",
":",
"if",
"parent",
".",
"item_id",
"==",
"term",
"or",
"parent",
".",
"has_child",
"(",
"term",
")",
":",
"return",
"True",
"return",
"False"
] | Return True if this GO object has a child GO ID. | [
"Return",
"True",
"if",
"this",
"GO",
"object",
"has",
"a",
"child",
"GO",
"ID",
"."
] | python | train |
bsolomon1124/pyfinance | pyfinance/options.py | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/options.py#L351-L383 | def summary(self, St=None):
"""Tabular summary of strategy composition, broken out by option.
Returns
-------
pd.DataFrame
Columns: kind, position, strike, price, St, payoff, profit.
"""
St = self.St if St is None else St
if self.options:
payoffs = [op.payoff(St=St) for op in self.options]
profits = [op.profit(St=St) for op in self.options]
strikes = [op.K for op in self.options]
prices = [op.price for op in self.options]
exprs = [St] * len(self.options)
kinds = [op.kind for op in self.options]
poss = [op.pos for op in self.options]
res = OrderedDict(
[
("kind", kinds),
("position", poss),
("strike", strikes),
("price", prices),
("St", exprs),
("payoff", payoffs),
("profit", profits),
]
)
return DataFrame(res)
else:
return None | [
"def",
"summary",
"(",
"self",
",",
"St",
"=",
"None",
")",
":",
"St",
"=",
"self",
".",
"St",
"if",
"St",
"is",
"None",
"else",
"St",
"if",
"self",
".",
"options",
":",
"payoffs",
"=",
"[",
"op",
".",
"payoff",
"(",
"St",
"=",
"St",
")",
"fo... | Tabular summary of strategy composition, broken out by option.
Returns
-------
pd.DataFrame
Columns: kind, position, strike, price, St, payoff, profit. | [
"Tabular",
"summary",
"of",
"strategy",
"composition",
"broken",
"out",
"by",
"option",
".",
"Returns",
"-------",
"pd",
".",
"DataFrame",
"Columns",
":",
"kind",
"position",
"strike",
"price",
"St",
"payoff",
"profit",
"."
] | python | train |
google/grr | grr/core/grr_response_core/lib/rdfvalues/structs.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalues/structs.py#L2026-L2063 | def FromDict(self, dictionary):
"""Initializes itself from a given dictionary."""
dynamic_fields = []
for key, value in iteritems(dictionary):
field_type_info = self.type_infos.get(key)
if isinstance(field_type_info, ProtoEmbedded):
nested_value = field_type_info.GetDefault(container=self)
nested_value.FromDict(value)
self.Set(key, nested_value)
elif isinstance(field_type_info, ProtoList):
if isinstance(field_type_info.delegate, ProtoEmbedded):
nested_values = []
for v in value:
nested_value = field_type_info.delegate.GetDefault(container=self)
nested_value.FromDict(v)
nested_values.append(nested_value)
self.Set(key, nested_values)
else:
self.Set(key, value)
elif isinstance(field_type_info, ProtoDynamicEmbedded):
dynamic_fields.append(field_type_info)
elif field_type_info.proto_type_name == "bytes":
self.Set(key, base64.decodestring(value or ""))
else:
self.Set(key, value)
# Process dynamic fields after all other fields, because most probably
# their class is determined by one of the previously set fields.
for dynamic_field in dynamic_fields:
nested_value = dynamic_field.GetDefault(container=self)
if nested_value is None:
raise RuntimeError("Can't initialize dynamic field %s, probably some "
"necessary fields weren't supplied." %
dynamic_field.name)
nested_value.FromDict(dictionary[dynamic_field.name])
self.Set(dynamic_field.name, nested_value) | [
"def",
"FromDict",
"(",
"self",
",",
"dictionary",
")",
":",
"dynamic_fields",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"dictionary",
")",
":",
"field_type_info",
"=",
"self",
".",
"type_infos",
".",
"get",
"(",
"key",
")",
"i... | Initializes itself from a given dictionary. | [
"Initializes",
"itself",
"from",
"a",
"given",
"dictionary",
"."
] | python | train |
opencobra/cobrapy | cobra/core/gene.py | https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/core/gene.py#L218-L277 | def remove_from_model(self, model=None,
make_dependent_reactions_nonfunctional=True):
"""Removes the association
Parameters
----------
model : cobra model
The model to remove the gene from
make_dependent_reactions_nonfunctional : bool
If True then replace the gene with 'False' in the gene
association, else replace the gene with 'True'
.. deprecated :: 0.4
Use cobra.manipulation.delete_model_genes to simulate knockouts
and cobra.manipulation.remove_genes to remove genes from
the model.
"""
warn("Use cobra.manipulation.remove_genes instead")
if model is not None:
if model != self._model:
raise Exception("%s is a member of %s, not %s" %
(repr(self), repr(self._model), repr(model)))
if self._model is None:
raise Exception('%s is not in a model' % repr(self))
if make_dependent_reactions_nonfunctional:
gene_state = 'False'
else:
gene_state = 'True'
the_gene_re = re.compile('(^|(?<=( |\()))%s(?=( |\)|$))' %
re.escape(self.id))
# remove reference to the gene in all groups
associated_groups = self._model.get_associated_groups(self)
for group in associated_groups:
group.remove_members(self)
self._model.genes.remove(self)
self._model = None
for the_reaction in list(self._reaction):
the_reaction._gene_reaction_rule = the_gene_re.sub(
gene_state, the_reaction.gene_reaction_rule)
the_reaction._genes.remove(self)
# Now, deactivate the reaction if its gene association evaluates
# to False
the_gene_reaction_relation = the_reaction.gene_reaction_rule
for other_gene in the_reaction._genes:
other_gene_re = re.compile('(^|(?<=( |\()))%s(?=( |\)|$))' %
re.escape(other_gene.id))
the_gene_reaction_relation = other_gene_re.sub(
'True',
the_gene_reaction_relation)
if not eval(the_gene_reaction_relation):
the_reaction.lower_bound = 0
the_reaction.upper_bound = 0
self._reaction.clear() | [
"def",
"remove_from_model",
"(",
"self",
",",
"model",
"=",
"None",
",",
"make_dependent_reactions_nonfunctional",
"=",
"True",
")",
":",
"warn",
"(",
"\"Use cobra.manipulation.remove_genes instead\"",
")",
"if",
"model",
"is",
"not",
"None",
":",
"if",
"model",
"... | Removes the association
Parameters
----------
model : cobra model
The model to remove the gene from
make_dependent_reactions_nonfunctional : bool
If True then replace the gene with 'False' in the gene
association, else replace the gene with 'True'
.. deprecated :: 0.4
Use cobra.manipulation.delete_model_genes to simulate knockouts
and cobra.manipulation.remove_genes to remove genes from
the model. | [
"Removes",
"the",
"association"
] | python | valid |
DLR-RM/RAFCON | source/rafcon/gui/backup/session.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/backup/session.py#L76-L186 | def restore_session_from_runtime_config():
""" Restore stored tabs from runtime config
The method checks if the last status of a state machine is in the backup or in tis original path and loads it
from there. The original path of these state machines are also insert into the recently opened state machines
list.
"""
# TODO add a dirty lock for a crashed rafcon instance also into backup session feature
# TODO in case a dialog is needed to give the user control
# TODO combine this and auto-backup in one structure/controller/observer
from rafcon.gui.singleton import state_machine_manager_model, global_runtime_config, global_gui_config
from rafcon.gui.models.auto_backup import recover_state_machine_from_backup
from rafcon.gui.singleton import main_window_controller
# check if session storage exists
open_tabs = global_runtime_config.get_config_value('open_tabs', None)
if open_tabs is None:
logger.info("No session found for recovery")
return
# load and restore state machines like they were opened before
open_sm = []
for idx, tab_meta_dict in enumerate(open_tabs):
start_time = time.time()
backup_meta_dict = tab_meta_dict['backup_meta']
from_backup_path = None
open_sm.append(None)
# TODO do this decision before storing or maybe store the last stored time in the auto backup?!
# pick folder name dependent on time, and backup meta data existence
# problem is that the backup time is maybe not the best choice
if 'last_backup' in backup_meta_dict:
last_backup_time = storage_utils.get_float_time_for_string(backup_meta_dict['last_backup']['time'])
if 'last_saved' in backup_meta_dict:
last_save_time = storage_utils.get_float_time_for_string(backup_meta_dict['last_saved']['time'])
backup_marked_dirty = backup_meta_dict['last_backup']['marked_dirty']
if last_backup_time > last_save_time and backup_marked_dirty:
from_backup_path = backup_meta_dict['last_backup']['file_system_path']
else:
from_backup_path = backup_meta_dict['last_backup']['file_system_path']
elif 'last_saved' in backup_meta_dict:
# print("### open last saved", sm_meta_dict['last_saved']['file_system_path'])
pass
else:
logger.error("A tab was stored into session storage dictionary {0} without any recovery path"
"".format(backup_meta_dict))
continue
# check in case that the backup folder is valid or use last saved path
if from_backup_path is not None and not os.path.isdir(from_backup_path):
logger.warning("The backup of tab {0} from backup path {1} was not possible. "
"The last saved path will be used for recovery, which could result is loss of changes."
"".format(idx, from_backup_path))
from_backup_path = None
# open state machine
if from_backup_path is not None:
# open state machine, recover mark dirty flags, cleans dirty lock files
logger.info("Restoring from backup {0}".format(from_backup_path))
state_machine_m = recover_state_machine_from_backup(from_backup_path)
else:
if 'last_saved' not in backup_meta_dict or backup_meta_dict['last_saved']['file_system_path'] is None:
continue
path = backup_meta_dict['last_saved']['file_system_path']
if not os.path.isdir(path):
logger.warning("The tab can not be open. The backup of tab {0} from common path {1} was not "
"possible.".format(idx, path))
continue
# logger.info("backup from last saved", path, sm_meta_dict)
state_machine = storage.load_state_machine_from_path(path)
state_machine_manager_model.state_machine_manager.add_state_machine(state_machine)
wait_for_gui()
state_machine_m = state_machine_manager_model.state_machines[state_machine.state_machine_id]
duration = time.time() - start_time
stat = state_machine_m.state_machine.root_state.get_states_statistics(0)
logger.info("It took {0:.3}s to restore {1} states with {2} hierarchy levels.".format(duration, stat[0], stat[1]))
open_sm[idx] = state_machine_m
global_runtime_config.extend_recently_opened_by_current_open_state_machines()
wait_for_gui()
# restore all state machine selections separate to avoid states-editor and state editor creation problems
for idx, tab_meta_dict in enumerate(open_tabs):
state_machine_m = open_sm[idx]
if state_machine_m is None: # state machine could not be open
return
# restore state machine selection
selected_model_set = []
for core_element_identifier in tab_meta_dict['selection']:
selected_model_set.append(state_machine_m.get_state_model_by_path(core_element_identifier))
state_machine_m.selection.set(selected_model_set)
# restore backup-ed tab selection
selected_page_number = global_runtime_config.get_config_value('selected_state_machine_page_number', None)
if selected_page_number is not None:
selected_state_machine_page_number = selected_page_number
if selected_state_machine_page_number is None:
return
state_machines_editor_ctrl = main_window_controller.get_controller('state_machines_editor_ctrl')
if not state_machines_editor_ctrl.view['notebook'].get_n_pages() >= selected_page_number:
logger.warning("Page id {0} does not exist so session restore can not re-create selection."
"".format(selected_page_number))
return
notebook = state_machines_editor_ctrl.view['notebook']
page = state_machines_editor_ctrl.on_switch_page(notebook, None, selected_page_number)
selected_sm_id = state_machine_manager_model.selected_state_machine_id
if not selected_sm_id == state_machines_editor_ctrl.get_state_machine_id_for_page(page):
logger.warning("Selection of page was not set correctly so session restore can not re-create selection.")
return | [
"def",
"restore_session_from_runtime_config",
"(",
")",
":",
"# TODO add a dirty lock for a crashed rafcon instance also into backup session feature",
"# TODO in case a dialog is needed to give the user control",
"# TODO combine this and auto-backup in one structure/controller/observer",
"from",
"... | Restore stored tabs from runtime config
The method checks if the last status of a state machine is in the backup or in tis original path and loads it
from there. The original path of these state machines are also insert into the recently opened state machines
list. | [
"Restore",
"stored",
"tabs",
"from",
"runtime",
"config"
] | python | train |
by46/simplekit | simplekit/docker/docker.py | https://github.com/by46/simplekit/blob/33f3ce6de33accc185e1057f096af41859db5976/simplekit/docker/docker.py#L126-L138 | def get_containers_by_name(self, name):
"""
get all task which relative with task name
:param name: :class:`str`, task name
:return: :class:`list`, container list
"""
code, containers = self.get_containers()
if code != httplib.OK:
return []
return [container for container in containers if
any(map(lambda x: x.startswith(name), container.Names))] | [
"def",
"get_containers_by_name",
"(",
"self",
",",
"name",
")",
":",
"code",
",",
"containers",
"=",
"self",
".",
"get_containers",
"(",
")",
"if",
"code",
"!=",
"httplib",
".",
"OK",
":",
"return",
"[",
"]",
"return",
"[",
"container",
"for",
"container... | get all task which relative with task name
:param name: :class:`str`, task name
:return: :class:`list`, container list | [
"get",
"all",
"task",
"which",
"relative",
"with",
"task",
"name",
":",
"param",
"name",
":",
":",
"class",
":",
"str",
"task",
"name",
":",
"return",
":",
":",
"class",
":",
"list",
"container",
"list"
] | python | train |
GNS3/gns3-server | gns3server/controller/link.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/link.py#L152-L175 | def update_filters(self, filters):
"""
Modify the filters list.
Filter with value 0 will be dropped because not active
"""
new_filters = {}
for (filter, values) in filters.items():
new_values = []
for value in values:
if isinstance(value, str):
new_values.append(value.strip("\n "))
else:
new_values.append(int(value))
values = new_values
if len(values) != 0 and values[0] != 0 and values[0] != '':
new_filters[filter] = values
if new_filters != self.filters:
self._filters = new_filters
if self._created:
yield from self.update()
self._project.controller.notification.emit("link.updated", self.__json__())
self._project.dump() | [
"def",
"update_filters",
"(",
"self",
",",
"filters",
")",
":",
"new_filters",
"=",
"{",
"}",
"for",
"(",
"filter",
",",
"values",
")",
"in",
"filters",
".",
"items",
"(",
")",
":",
"new_values",
"=",
"[",
"]",
"for",
"value",
"in",
"values",
":",
... | Modify the filters list.
Filter with value 0 will be dropped because not active | [
"Modify",
"the",
"filters",
"list",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/phonon/dos.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/phonon/dos.py#L346-L362 | def get_element_dos(self):
"""
Get element projected Dos.
Returns:
dict of {Element: Dos}
"""
el_dos = {}
for site, atom_dos in self.pdos.items():
el = site.specie
if el not in el_dos:
el_dos[el] = np.array(atom_dos)
else:
el_dos[el] += np.array(atom_dos)
return {el: PhononDos(self.frequencies, densities)
for el, densities in el_dos.items()} | [
"def",
"get_element_dos",
"(",
"self",
")",
":",
"el_dos",
"=",
"{",
"}",
"for",
"site",
",",
"atom_dos",
"in",
"self",
".",
"pdos",
".",
"items",
"(",
")",
":",
"el",
"=",
"site",
".",
"specie",
"if",
"el",
"not",
"in",
"el_dos",
":",
"el_dos",
... | Get element projected Dos.
Returns:
dict of {Element: Dos} | [
"Get",
"element",
"projected",
"Dos",
"."
] | python | train |
jazzband/django-mongonaut | mongonaut/forms/forms.py | https://github.com/jazzband/django-mongonaut/blob/5485b2e029dff8ae267a4cb39c92d0a72cb5b144/mongonaut/forms/forms.py#L35-L45 | def set_fields(self):
"""Sets existing data to form fields."""
# Get dictionary map of current model
if self.is_initialized:
self.model_map_dict = self.create_document_dictionary(self.model_instance)
else:
self.model_map_dict = self.create_document_dictionary(self.model)
form_field_dict = self.get_form_field_dict(self.model_map_dict)
self.set_form_fields(form_field_dict) | [
"def",
"set_fields",
"(",
"self",
")",
":",
"# Get dictionary map of current model",
"if",
"self",
".",
"is_initialized",
":",
"self",
".",
"model_map_dict",
"=",
"self",
".",
"create_document_dictionary",
"(",
"self",
".",
"model_instance",
")",
"else",
":",
"sel... | Sets existing data to form fields. | [
"Sets",
"existing",
"data",
"to",
"form",
"fields",
"."
] | python | valid |
radjkarl/imgProcessor | imgProcessor/camera/PerspectiveCorrection.py | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/PerspectiveCorrection.py#L506-L514 | def foreground(self, quad=None):
'''return foreground (quad) mask'''
fg = np.zeros(shape=self._newBorders[::-1], dtype=np.uint8)
if quad is None:
quad = self.quad
else:
quad = quad.astype(np.int32)
cv2.fillConvexPoly(fg, quad, 1)
return fg.astype(bool) | [
"def",
"foreground",
"(",
"self",
",",
"quad",
"=",
"None",
")",
":",
"fg",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"self",
".",
"_newBorders",
"[",
":",
":",
"-",
"1",
"]",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"if",
"quad",
"is",
... | return foreground (quad) mask | [
"return",
"foreground",
"(",
"quad",
")",
"mask"
] | python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_dataflash_logger.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_dataflash_logger.py#L98-L114 | def start_new_log(self):
'''open a new dataflash log, reset state'''
filename = self.new_log_filepath()
self.block_cnt = 0
self.logfile = open(filename, 'w+b')
print("DFLogger: logging started (%s)" % (filename))
self.prev_cnt = 0
self.download = 0
self.prev_download = 0
self.last_idle_status_printed_time = time.time()
self.last_status_time = time.time()
self.missing_blocks = {}
self.acking_blocks = {}
self.blocks_to_ack_and_nack = []
self.missing_found = 0
self.abandoned = 0 | [
"def",
"start_new_log",
"(",
"self",
")",
":",
"filename",
"=",
"self",
".",
"new_log_filepath",
"(",
")",
"self",
".",
"block_cnt",
"=",
"0",
"self",
".",
"logfile",
"=",
"open",
"(",
"filename",
",",
"'w+b'",
")",
"print",
"(",
"\"DFLogger: logging start... | open a new dataflash log, reset state | [
"open",
"a",
"new",
"dataflash",
"log",
"reset",
"state"
] | python | train |
gem/oq-engine | openquake/hazardlib/gsim/abrahamson_2015.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/abrahamson_2015.py#L178-L198 | def _compute_site_response_term(self, C, sites, pga1000):
"""
Compute and return site response model term
This GMPE adopts the same site response scaling model of
Walling et al (2008) as implemented in the Abrahamson & Silva (2008)
GMPE. The functional form is retained here.
"""
vs_star = sites.vs30.copy()
vs_star[vs_star > 1000.0] = 1000.
arg = vs_star / C["vlin"]
site_resp_term = C["theta12"] * np.log(arg)
# Get linear scaling term
idx = sites.vs30 >= C["vlin"]
site_resp_term[idx] += (C["b"] * self.CONSTS["n"] * np.log(arg[idx]))
# Get nonlinear scaling term
idx = np.logical_not(idx)
site_resp_term[idx] += (
-C["b"] * np.log(pga1000[idx] + self.CONSTS["c"]) +
C["b"] * np.log(pga1000[idx] + self.CONSTS["c"] *
(arg[idx] ** self.CONSTS["n"])))
return site_resp_term | [
"def",
"_compute_site_response_term",
"(",
"self",
",",
"C",
",",
"sites",
",",
"pga1000",
")",
":",
"vs_star",
"=",
"sites",
".",
"vs30",
".",
"copy",
"(",
")",
"vs_star",
"[",
"vs_star",
">",
"1000.0",
"]",
"=",
"1000.",
"arg",
"=",
"vs_star",
"/",
... | Compute and return site response model term
This GMPE adopts the same site response scaling model of
Walling et al (2008) as implemented in the Abrahamson & Silva (2008)
GMPE. The functional form is retained here. | [
"Compute",
"and",
"return",
"site",
"response",
"model",
"term",
"This",
"GMPE",
"adopts",
"the",
"same",
"site",
"response",
"scaling",
"model",
"of",
"Walling",
"et",
"al",
"(",
"2008",
")",
"as",
"implemented",
"in",
"the",
"Abrahamson",
"&",
"Silva",
"... | python | train |
shreyaspotnis/rampage | rampage/daq/daq.py | https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/daq/daq.py#L80-L100 | def set_digital_line_state(line_name, state):
"""Set the state of a single digital line.
line_name (str) - The physical name of the line.
e.g line_name="Dev1/port0/line3"
This should be a single digital line. Specifying more than one would
result in unexpected behaviour. For example "Dev1/port0/line0:5" is
not allowed.
see http://zone.ni.com/reference/en-XX/help/370466W-01/mxcncpts/physchannames/
for details of naming lines.
state (bool) - state=True sets the line to high, state=False sets to low.
"""
# get the line number from the line name. Thats the number of bits to shift
bits_to_shift = int(line_name.split('line')[-1])
dig_data = np.ones(2, dtype="uint32")*bool(state)*(2**bits_to_shift)
# Note here that the number of samples written here are 2, which is the
# minimum required for a buffered write. If we configure a timing for the
# write, it is considered buffered.
# see http://zone.ni.com/reference/en-XX/help/370471Y-01/daqmxcfunc/daqmxwritedigitalu32/
DigitalOutputTask(line_name, dig_data).StartAndWait() | [
"def",
"set_digital_line_state",
"(",
"line_name",
",",
"state",
")",
":",
"# get the line number from the line name. Thats the number of bits to shift",
"bits_to_shift",
"=",
"int",
"(",
"line_name",
".",
"split",
"(",
"'line'",
")",
"[",
"-",
"1",
"]",
")",
"dig_dat... | Set the state of a single digital line.
line_name (str) - The physical name of the line.
e.g line_name="Dev1/port0/line3"
This should be a single digital line. Specifying more than one would
result in unexpected behaviour. For example "Dev1/port0/line0:5" is
not allowed.
see http://zone.ni.com/reference/en-XX/help/370466W-01/mxcncpts/physchannames/
for details of naming lines.
state (bool) - state=True sets the line to high, state=False sets to low. | [
"Set",
"the",
"state",
"of",
"a",
"single",
"digital",
"line",
"."
] | python | train |
desbma/sacad | sacad/cover.py | https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/cover.py#L460-L491 | async def crunch(image_data, format, silent=False):
""" Crunch image data, and return the processed data, or orignal data if operation failed. """
if (((format is CoverImageFormat.PNG) and (not HAS_OPTIPNG)) or
((format is CoverImageFormat.JPEG) and (not HAS_JPEGOPTIM))):
return image_data
with mkstemp_ctx.mkstemp(suffix=".%s" % (format.name.lower())) as tmp_out_filepath:
if not silent:
logging.getLogger("Cover").info("Crunching %s image..." % (format.name.upper()))
with open(tmp_out_filepath, "wb") as tmp_out_file:
tmp_out_file.write(image_data)
size_before = len(image_data)
if format is CoverImageFormat.PNG:
cmd = ["optipng", "-quiet", "-o1"]
elif format is CoverImageFormat.JPEG:
cmd = ["jpegoptim", "-q", "--strip-all"]
cmd.append(tmp_out_filepath)
p = await asyncio.create_subprocess_exec(*cmd,
stdin=asyncio.subprocess.DEVNULL,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL)
await p.wait()
if p.returncode != 0:
if not silent:
logging.getLogger("Cover").warning("Crunching image failed")
return image_data
with open(tmp_out_filepath, "rb") as tmp_out_file:
crunched_image_data = tmp_out_file.read()
size_after = len(crunched_image_data)
pct_saved = 100 * (size_before - size_after) / size_before
if not silent:
logging.getLogger("Cover").debug("Crunching image saved %.2f%% filesize" % (pct_saved))
return crunched_image_data | [
"async",
"def",
"crunch",
"(",
"image_data",
",",
"format",
",",
"silent",
"=",
"False",
")",
":",
"if",
"(",
"(",
"(",
"format",
"is",
"CoverImageFormat",
".",
"PNG",
")",
"and",
"(",
"not",
"HAS_OPTIPNG",
")",
")",
"or",
"(",
"(",
"format",
"is",
... | Crunch image data, and return the processed data, or orignal data if operation failed. | [
"Crunch",
"image",
"data",
"and",
"return",
"the",
"processed",
"data",
"or",
"orignal",
"data",
"if",
"operation",
"failed",
"."
] | python | train |
vtkiorg/vtki | vtki/pointset.py | https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/pointset.py#L766-L835 | def subdivide(self, nsub, subfilter='linear', inplace=False):
"""
Increase the number of triangles in a single, connected triangular
mesh.
Uses one of the following vtk subdivision filters to subdivide a mesh.
vtkButterflySubdivisionFilter
vtkLoopSubdivisionFilter
vtkLinearSubdivisionFilter
Linear subdivision results in the fastest mesh subdivision, but it
does not smooth mesh edges, but rather splits each triangle into 4
smaller triangles.
Butterfly and loop subdivision perform smoothing when dividing, and may
introduce artifacts into the mesh when dividing.
Subdivision filter appears to fail for multiple part meshes. Should
be one single mesh.
Parameters
----------
nsub : int
Number of subdivisions. Each subdivision creates 4 new triangles,
so the number of resulting triangles is nface*4**nsub where nface
is the current number of faces.
subfilter : string, optional
Can be one of the following: 'butterfly', 'loop', 'linear'
inplace : bool, optional
Updates mesh in-place while returning nothing.
Returns
-------
mesh : Polydata object
vtki polydata object. None when inplace=True
Examples
--------
>>> from vtki import examples
>>> import vtki
>>> mesh = vtki.PolyData(examples.planefile)
>>> submesh = mesh.subdivide(1, 'loop') # doctest:+SKIP
alternatively, update mesh in-place
>>> mesh.subdivide(1, 'loop', inplace=True) # doctest:+SKIP
"""
subfilter = subfilter.lower()
if subfilter == 'linear':
sfilter = vtk.vtkLinearSubdivisionFilter()
elif subfilter == 'butterfly':
sfilter = vtk.vtkButterflySubdivisionFilter()
elif subfilter == 'loop':
sfilter = vtk.vtkLoopSubdivisionFilter()
else:
raise Exception("Subdivision filter must be one of the following: " +
"'butterfly', 'loop', or 'linear'")
# Subdivide
sfilter.SetNumberOfSubdivisions(nsub)
sfilter.SetInputData(self)
sfilter.Update()
submesh = _get_output(sfilter)
if inplace:
self.overwrite(submesh)
else:
return submesh | [
"def",
"subdivide",
"(",
"self",
",",
"nsub",
",",
"subfilter",
"=",
"'linear'",
",",
"inplace",
"=",
"False",
")",
":",
"subfilter",
"=",
"subfilter",
".",
"lower",
"(",
")",
"if",
"subfilter",
"==",
"'linear'",
":",
"sfilter",
"=",
"vtk",
".",
"vtkLi... | Increase the number of triangles in a single, connected triangular
mesh.
Uses one of the following vtk subdivision filters to subdivide a mesh.
vtkButterflySubdivisionFilter
vtkLoopSubdivisionFilter
vtkLinearSubdivisionFilter
Linear subdivision results in the fastest mesh subdivision, but it
does not smooth mesh edges, but rather splits each triangle into 4
smaller triangles.
Butterfly and loop subdivision perform smoothing when dividing, and may
introduce artifacts into the mesh when dividing.
Subdivision filter appears to fail for multiple part meshes. Should
be one single mesh.
Parameters
----------
nsub : int
Number of subdivisions. Each subdivision creates 4 new triangles,
so the number of resulting triangles is nface*4**nsub where nface
is the current number of faces.
subfilter : string, optional
Can be one of the following: 'butterfly', 'loop', 'linear'
inplace : bool, optional
Updates mesh in-place while returning nothing.
Returns
-------
mesh : Polydata object
vtki polydata object. None when inplace=True
Examples
--------
>>> from vtki import examples
>>> import vtki
>>> mesh = vtki.PolyData(examples.planefile)
>>> submesh = mesh.subdivide(1, 'loop') # doctest:+SKIP
alternatively, update mesh in-place
>>> mesh.subdivide(1, 'loop', inplace=True) # doctest:+SKIP | [
"Increase",
"the",
"number",
"of",
"triangles",
"in",
"a",
"single",
"connected",
"triangular",
"mesh",
"."
] | python | train |
ptmcg/littletable | littletable.py | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1349-L1374 | def unique(self, key=None):
"""
Create a new table of objects,containing no duplicate values.
@param key: (default=None) optional callable for computing a representative unique key for each
object in the table. If None, then a key will be composed as a tuple of all the values in the object.
@type key: callable, takes the record as an argument, and returns the key value or tuple to be used
to represent uniqueness.
"""
if isinstance(key, basestring):
key = lambda r, attr=key: getattr(r, attr, None)
ret = self.copy_template()
seen = set()
for ob in self:
if key is None:
try:
ob_dict = vars(ob)
except TypeError:
ob_dict = dict((k, getattr(ob, k)) for k in _object_attrnames(ob))
reckey = tuple(sorted(ob_dict.items()))
else:
reckey = key(ob)
if reckey not in seen:
seen.add(reckey)
ret.insert(ob)
return ret | [
"def",
"unique",
"(",
"self",
",",
"key",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"basestring",
")",
":",
"key",
"=",
"lambda",
"r",
",",
"attr",
"=",
"key",
":",
"getattr",
"(",
"r",
",",
"attr",
",",
"None",
")",
"ret",
"=... | Create a new table of objects,containing no duplicate values.
@param key: (default=None) optional callable for computing a representative unique key for each
object in the table. If None, then a key will be composed as a tuple of all the values in the object.
@type key: callable, takes the record as an argument, and returns the key value or tuple to be used
to represent uniqueness. | [
"Create",
"a",
"new",
"table",
"of",
"objects",
"containing",
"no",
"duplicate",
"values",
"."
] | python | train |
SmokinCaterpillar/pypet | pypet/pypetlogging.py | https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/pypetlogging.py#L550-L574 | def _handle_config_parsing(self, log_config):
""" Checks for filenames within a config file and translates them.
Moreover, directories for the files are created as well.
:param log_config: Config file as a stream (like StringIO)
"""
parser = NoInterpolationParser()
parser.readfp(log_config)
rename_func = lambda string: rename_log_file(string,
env_name=self.env_name,
traj_name=self.traj_name,
set_name=self.set_name,
run_name=self.run_name)
sections = parser.sections()
for section in sections:
options = parser.options(section)
for option in options:
if option == 'args':
self._check_and_replace_parser_args(parser, section, option,
rename_func=rename_func)
return parser | [
"def",
"_handle_config_parsing",
"(",
"self",
",",
"log_config",
")",
":",
"parser",
"=",
"NoInterpolationParser",
"(",
")",
"parser",
".",
"readfp",
"(",
"log_config",
")",
"rename_func",
"=",
"lambda",
"string",
":",
"rename_log_file",
"(",
"string",
",",
"e... | Checks for filenames within a config file and translates them.
Moreover, directories for the files are created as well.
:param log_config: Config file as a stream (like StringIO) | [
"Checks",
"for",
"filenames",
"within",
"a",
"config",
"file",
"and",
"translates",
"them",
"."
] | python | test |
dbcli/cli_helpers | cli_helpers/tabular_output/preprocessors.py | https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/tabular_output/preprocessors.py#L11-L24 | def truncate_string(data, headers, max_field_width=None, **_):
"""Truncate very long strings. Only needed for tabular
representation, because trying to tabulate very long data
is problematic in terms of performance, and does not make any
sense visually.
:param iterable data: An :term:`iterable` (e.g. list) of rows.
:param iterable headers: The column headers.
:param int max_field_width: Width to truncate field for display
:return: The processed data and headers.
:rtype: tuple
"""
return (([utils.truncate_string(v, max_field_width) for v in row] for row in data),
[utils.truncate_string(h, max_field_width) for h in headers]) | [
"def",
"truncate_string",
"(",
"data",
",",
"headers",
",",
"max_field_width",
"=",
"None",
",",
"*",
"*",
"_",
")",
":",
"return",
"(",
"(",
"[",
"utils",
".",
"truncate_string",
"(",
"v",
",",
"max_field_width",
")",
"for",
"v",
"in",
"row",
"]",
"... | Truncate very long strings. Only needed for tabular
representation, because trying to tabulate very long data
is problematic in terms of performance, and does not make any
sense visually.
:param iterable data: An :term:`iterable` (e.g. list) of rows.
:param iterable headers: The column headers.
:param int max_field_width: Width to truncate field for display
:return: The processed data and headers.
:rtype: tuple | [
"Truncate",
"very",
"long",
"strings",
".",
"Only",
"needed",
"for",
"tabular",
"representation",
"because",
"trying",
"to",
"tabulate",
"very",
"long",
"data",
"is",
"problematic",
"in",
"terms",
"of",
"performance",
"and",
"does",
"not",
"make",
"any",
"sens... | python | test |
saltstack/salt | salt/modules/sysmod.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/sysmod.py#L920-L939 | def state_schema(module=''):
'''
Return a JSON Schema for the given state function(s)
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' sys.state_schema
salt '*' sys.state_schema pkg.installed
'''
specs = state_argspec(module)
schemas = []
for state_mod, state_spec in specs.items():
schemas.append(_argspec_to_schema(state_mod, state_spec))
return schemas | [
"def",
"state_schema",
"(",
"module",
"=",
"''",
")",
":",
"specs",
"=",
"state_argspec",
"(",
"module",
")",
"schemas",
"=",
"[",
"]",
"for",
"state_mod",
",",
"state_spec",
"in",
"specs",
".",
"items",
"(",
")",
":",
"schemas",
".",
"append",
"(",
... | Return a JSON Schema for the given state function(s)
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' sys.state_schema
salt '*' sys.state_schema pkg.installed | [
"Return",
"a",
"JSON",
"Schema",
"for",
"the",
"given",
"state",
"function",
"(",
"s",
")"
] | python | train |
tornadoweb/tornado | tornado/iostream.py | https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/iostream.py#L447-L494 | def read_into(self, buf: bytearray, partial: bool = False) -> Awaitable[int]:
"""Asynchronously read a number of bytes.
``buf`` must be a writable buffer into which data will be read.
If ``partial`` is true, the callback is run as soon as any bytes
have been read. Otherwise, it is run when the ``buf`` has been
entirely filled with read data.
.. versionadded:: 5.0
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned
`.Future` instead.
"""
future = self._start_read()
# First copy data already in read buffer
available_bytes = self._read_buffer_size
n = len(buf)
if available_bytes >= n:
end = self._read_buffer_pos + n
buf[:] = memoryview(self._read_buffer)[self._read_buffer_pos : end]
del self._read_buffer[:end]
self._after_user_read_buffer = self._read_buffer
elif available_bytes > 0:
buf[:available_bytes] = memoryview(self._read_buffer)[
self._read_buffer_pos :
]
# Set up the supplied buffer as our temporary read buffer.
# The original (if it had any data remaining) has been
# saved for later.
self._user_read_buffer = True
self._read_buffer = buf
self._read_buffer_pos = 0
self._read_buffer_size = available_bytes
self._read_bytes = n
self._read_partial = partial
try:
self._try_inline_read()
except:
future.add_done_callback(lambda f: f.exception())
raise
return future | [
"def",
"read_into",
"(",
"self",
",",
"buf",
":",
"bytearray",
",",
"partial",
":",
"bool",
"=",
"False",
")",
"->",
"Awaitable",
"[",
"int",
"]",
":",
"future",
"=",
"self",
".",
"_start_read",
"(",
")",
"# First copy data already in read buffer",
"availabl... | Asynchronously read a number of bytes.
``buf`` must be a writable buffer into which data will be read.
If ``partial`` is true, the callback is run as soon as any bytes
have been read. Otherwise, it is run when the ``buf`` has been
entirely filled with read data.
.. versionadded:: 5.0
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned
`.Future` instead. | [
"Asynchronously",
"read",
"a",
"number",
"of",
"bytes",
"."
] | python | train |
planetarypy/pvl | pvl/decoder.py | https://github.com/planetarypy/pvl/blob/ed92b284c4208439b033d28c9c176534c0faac0e/pvl/decoder.py#L433-L452 | def parse_object(self, stream):
"""Block Name must match Block Name in paired End Object Statement
if Block Name is present in End Object Statement StatementDelim.
BeginObjectStmt ::=
BeginObjectKeywd WSC AssignmentSymbol WSC BlockName StatementDelim
"""
self.expect_in(stream, self.begin_object_tokens)
self.ensure_assignment(stream)
name = self.next_token(stream)
self.skip_statement_delimiter(stream)
statements = self.parse_block(stream, self.has_end_object)
self.expect_in(stream, self.end_object_tokens)
self.parse_end_assignment(stream, name)
self.skip_statement_delimiter(stream)
return name.decode('utf-8'), PVLObject(statements) | [
"def",
"parse_object",
"(",
"self",
",",
"stream",
")",
":",
"self",
".",
"expect_in",
"(",
"stream",
",",
"self",
".",
"begin_object_tokens",
")",
"self",
".",
"ensure_assignment",
"(",
"stream",
")",
"name",
"=",
"self",
".",
"next_token",
"(",
"stream",... | Block Name must match Block Name in paired End Object Statement
if Block Name is present in End Object Statement StatementDelim.
BeginObjectStmt ::=
BeginObjectKeywd WSC AssignmentSymbol WSC BlockName StatementDelim | [
"Block",
"Name",
"must",
"match",
"Block",
"Name",
"in",
"paired",
"End",
"Object",
"Statement",
"if",
"Block",
"Name",
"is",
"present",
"in",
"End",
"Object",
"Statement",
"StatementDelim",
"."
] | python | train |
hendrix/hendrix | hendrix/contrib/concurrency/messaging.py | https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/concurrency/messaging.py#L125-L139 | def send_json_message(address, message, **kwargs):
"""
a shortcut for message sending
"""
data = {
'message': message,
}
if not kwargs.get('subject_id'):
data['subject_id'] = address
data.update(kwargs)
hxdispatcher.send(address, data) | [
"def",
"send_json_message",
"(",
"address",
",",
"message",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"{",
"'message'",
":",
"message",
",",
"}",
"if",
"not",
"kwargs",
".",
"get",
"(",
"'subject_id'",
")",
":",
"data",
"[",
"'subject_id'",
"]",
... | a shortcut for message sending | [
"a",
"shortcut",
"for",
"message",
"sending"
] | python | train |
openpermissions/perch | perch/model.py | https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/model.py#L541-L549 | def update(self, user, **kwargs):
"""If parent resource is not an editable state, should not be able to update"""
yield self.get_parent()
if not self.parent.editable:
err = 'Cannot update child of {} resource'.format(self.parent.state.name)
raise exceptions.Unauthorized(err)
yield super(SubResource, self).update(user, **kwargs) | [
"def",
"update",
"(",
"self",
",",
"user",
",",
"*",
"*",
"kwargs",
")",
":",
"yield",
"self",
".",
"get_parent",
"(",
")",
"if",
"not",
"self",
".",
"parent",
".",
"editable",
":",
"err",
"=",
"'Cannot update child of {} resource'",
".",
"format",
"(",
... | If parent resource is not an editable state, should not be able to update | [
"If",
"parent",
"resource",
"is",
"not",
"an",
"editable",
"state",
"should",
"not",
"be",
"able",
"to",
"update"
] | python | train |
flowersteam/explauto | explauto/sensorimotor_model/inverse/cma.py | https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L8353-L8359 | def tablet(self, x, rot=0):
"""Tablet test objective function"""
if rot and rot is not fcts.tablet:
x = rotate(x)
x = [x] if isscalar(x[0]) else x # scalar into list
f = [1e6 * x[0]**2 + sum(x[1:]**2) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar | [
"def",
"tablet",
"(",
"self",
",",
"x",
",",
"rot",
"=",
"0",
")",
":",
"if",
"rot",
"and",
"rot",
"is",
"not",
"fcts",
".",
"tablet",
":",
"x",
"=",
"rotate",
"(",
"x",
")",
"x",
"=",
"[",
"x",
"]",
"if",
"isscalar",
"(",
"x",
"[",
"0",
... | Tablet test objective function | [
"Tablet",
"test",
"objective",
"function"
] | python | train |
chrislit/abydos | abydos/distance/_typo.py | https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/distance/_typo.py#L392-L435 | def dist_typo(
src, tar, metric='euclidean', cost=(1, 1, 0.5, 0.5), layout='QWERTY'
):
"""Return the normalized typo distance between two strings.
This is a wrapper for :py:meth:`Typo.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
metric : str
Supported values include: ``euclidean``, ``manhattan``,
``log-euclidean``, and ``log-manhattan``
cost : tuple
A 4-tuple representing the cost of the four possible edits: inserts,
deletes, substitutions, and shift, respectively (by default:
(1, 1, 0.5, 0.5)) The substitution & shift costs should be
significantly less than the cost of an insertion & deletion unless a
log metric is used.
layout : str
Name of the keyboard layout to use (Currently supported:
``QWERTY``, ``Dvorak``, ``AZERTY``, ``QWERTZ``)
Returns
-------
float
Normalized typo distance
Examples
--------
>>> round(dist_typo('cat', 'hat'), 12)
0.527046283086
>>> round(dist_typo('Niall', 'Neil'), 12)
0.565028142929
>>> round(dist_typo('Colin', 'Cuilen'), 12)
0.569035609563
>>> dist_typo('ATCG', 'TAGC')
0.625
"""
return Typo().dist(src, tar, metric, cost, layout) | [
"def",
"dist_typo",
"(",
"src",
",",
"tar",
",",
"metric",
"=",
"'euclidean'",
",",
"cost",
"=",
"(",
"1",
",",
"1",
",",
"0.5",
",",
"0.5",
")",
",",
"layout",
"=",
"'QWERTY'",
")",
":",
"return",
"Typo",
"(",
")",
".",
"dist",
"(",
"src",
","... | Return the normalized typo distance between two strings.
This is a wrapper for :py:meth:`Typo.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
metric : str
Supported values include: ``euclidean``, ``manhattan``,
``log-euclidean``, and ``log-manhattan``
cost : tuple
A 4-tuple representing the cost of the four possible edits: inserts,
deletes, substitutions, and shift, respectively (by default:
(1, 1, 0.5, 0.5)) The substitution & shift costs should be
significantly less than the cost of an insertion & deletion unless a
log metric is used.
layout : str
Name of the keyboard layout to use (Currently supported:
``QWERTY``, ``Dvorak``, ``AZERTY``, ``QWERTZ``)
Returns
-------
float
Normalized typo distance
Examples
--------
>>> round(dist_typo('cat', 'hat'), 12)
0.527046283086
>>> round(dist_typo('Niall', 'Neil'), 12)
0.565028142929
>>> round(dist_typo('Colin', 'Cuilen'), 12)
0.569035609563
>>> dist_typo('ATCG', 'TAGC')
0.625 | [
"Return",
"the",
"normalized",
"typo",
"distance",
"between",
"two",
"strings",
"."
] | python | valid |
pandas-dev/pandas | pandas/core/panel.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L991-L1054 | def apply(self, func, axis='major', **kwargs):
"""
Apply function along axis (or axes) of the Panel.
Parameters
----------
func : function
Function to apply to each combination of 'other' axes
e.g. if axis = 'items', the combination of major_axis/minor_axis
will each be passed as a Series; if axis = ('items', 'major'),
DataFrames of items & major axis will be passed
axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two
axes
**kwargs
Additional keyword arguments will be passed to the function.
Returns
-------
result : Panel, DataFrame, or Series
Examples
--------
Returns a Panel with the square root of each element
>>> p = pd.Panel(np.random.rand(4, 3, 2)) # doctest: +SKIP
>>> p.apply(np.sqrt)
Equivalent to p.sum(1), returning a DataFrame
>>> p.apply(lambda x: x.sum(), axis=1) # doctest: +SKIP
Equivalent to previous:
>>> p.apply(lambda x: x.sum(), axis='major') # doctest: +SKIP
Return the shapes of each DataFrame over axis 2 (i.e the shapes of
items x major), as a Series
>>> p.apply(lambda x: x.shape, axis=(0,1)) # doctest: +SKIP
"""
if kwargs and not isinstance(func, np.ufunc):
f = lambda x: func(x, **kwargs)
else:
f = func
# 2d-slabs
if isinstance(axis, (tuple, list)) and len(axis) == 2:
return self._apply_2d(f, axis=axis)
axis = self._get_axis_number(axis)
# try ufunc like
if isinstance(f, np.ufunc):
try:
with np.errstate(all='ignore'):
result = np.apply_along_axis(func, axis, self.values)
return self._wrap_result(result, axis=axis)
except (AttributeError):
pass
# 1d
return self._apply_1d(f, axis=axis) | [
"def",
"apply",
"(",
"self",
",",
"func",
",",
"axis",
"=",
"'major'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"kwargs",
"and",
"not",
"isinstance",
"(",
"func",
",",
"np",
".",
"ufunc",
")",
":",
"f",
"=",
"lambda",
"x",
":",
"func",
"(",
"x"... | Apply function along axis (or axes) of the Panel.
Parameters
----------
func : function
Function to apply to each combination of 'other' axes
e.g. if axis = 'items', the combination of major_axis/minor_axis
will each be passed as a Series; if axis = ('items', 'major'),
DataFrames of items & major axis will be passed
axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two
axes
**kwargs
Additional keyword arguments will be passed to the function.
Returns
-------
result : Panel, DataFrame, or Series
Examples
--------
Returns a Panel with the square root of each element
>>> p = pd.Panel(np.random.rand(4, 3, 2)) # doctest: +SKIP
>>> p.apply(np.sqrt)
Equivalent to p.sum(1), returning a DataFrame
>>> p.apply(lambda x: x.sum(), axis=1) # doctest: +SKIP
Equivalent to previous:
>>> p.apply(lambda x: x.sum(), axis='major') # doctest: +SKIP
Return the shapes of each DataFrame over axis 2 (i.e the shapes of
items x major), as a Series
>>> p.apply(lambda x: x.shape, axis=(0,1)) # doctest: +SKIP | [
"Apply",
"function",
"along",
"axis",
"(",
"or",
"axes",
")",
"of",
"the",
"Panel",
"."
] | python | train |
Microsoft/nni | examples/tuners/weight_sharing/ga_customer_tuner/customer_tuner.py | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/tuners/weight_sharing/ga_customer_tuner/customer_tuner.py#L199-L222 | def receive_trial_result(self, parameter_id, parameters, value):
'''
Record an observation of the objective function
parameter_id : int
parameters : dict of parameters
value: final metrics of the trial, including reward
'''
logger.debug('acquiring lock for param {}'.format(parameter_id))
self.thread_lock.acquire()
logger.debug('lock for current acquired')
reward = extract_scalar_reward(value)
if self.optimize_mode is OptimizeMode.Minimize:
reward = -reward
logger.debug('receive trial result is:\n')
logger.debug(str(parameters))
logger.debug(str(reward))
indiv = Individual(indiv_id=int(os.path.split(parameters['save_dir'])[1]),
graph_cfg=graph_loads(parameters['graph']), result=reward)
self.population.append(indiv)
logger.debug('releasing lock')
self.thread_lock.release()
self.events[indiv.indiv_id].set() | [
"def",
"receive_trial_result",
"(",
"self",
",",
"parameter_id",
",",
"parameters",
",",
"value",
")",
":",
"logger",
".",
"debug",
"(",
"'acquiring lock for param {}'",
".",
"format",
"(",
"parameter_id",
")",
")",
"self",
".",
"thread_lock",
".",
"acquire",
... | Record an observation of the objective function
parameter_id : int
parameters : dict of parameters
value: final metrics of the trial, including reward | [
"Record",
"an",
"observation",
"of",
"the",
"objective",
"function",
"parameter_id",
":",
"int",
"parameters",
":",
"dict",
"of",
"parameters",
"value",
":",
"final",
"metrics",
"of",
"the",
"trial",
"including",
"reward"
] | python | train |
nion-software/nionswift | nion/swift/model/FileStorageSystem.py | https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/FileStorageSystem.py#L601-L658 | def auto_migrate_storage_system(*, persistent_storage_system=None, new_persistent_storage_system=None, data_item_uuids=None, deletions: typing.List[uuid.UUID] = None, utilized_deletions: typing.Set[uuid.UUID] = None, ignore_older_files: bool = True):
"""Migrate items from the storage system to the object context.
Files in data_item_uuids have already been loaded and are ignored (not migrated).
Files in deletes have been deleted in object context and are ignored (not migrated) and then added
to the utilized deletions list.
Data items will have persistent_object_context set upon return, but caller will need to call finish_reading
on each of the data items.
"""
storage_handlers = persistent_storage_system.find_data_items()
ReaderInfo = collections.namedtuple("ReaderInfo", ["properties", "changed_ref", "large_format", "storage_handler", "identifier"])
reader_info_list = list()
for storage_handler in storage_handlers:
try:
large_format = isinstance(storage_handler, HDF5Handler.HDF5Handler)
properties = Migration.transform_to_latest(storage_handler.read_properties())
reader_info = ReaderInfo(properties, [False], large_format, storage_handler, storage_handler.reference)
reader_info_list.append(reader_info)
except Exception as e:
logging.debug("Error reading %s", storage_handler.reference)
import traceback
traceback.print_exc()
traceback.print_stack()
library_storage_properties = persistent_storage_system.library_storage_properties
for deletion in copy.deepcopy(library_storage_properties.get("data_item_deletions", list())):
if not deletion in deletions:
deletions.append(deletion)
preliminary_library_updates = dict()
library_updates = dict()
if not ignore_older_files:
Migration.migrate_to_latest(reader_info_list, preliminary_library_updates)
good_reader_info_list = list()
count = len(reader_info_list)
for index, reader_info in enumerate(reader_info_list):
storage_handler = reader_info.storage_handler
properties = reader_info.properties
try:
version = properties.get("version", 0)
if version == DataItem.DataItem.writer_version:
data_item_uuid = uuid.UUID(properties["uuid"])
if not data_item_uuid in data_item_uuids:
if str(data_item_uuid) in deletions:
utilized_deletions.add(data_item_uuid)
else:
auto_migrate_data_item(reader_info, persistent_storage_system, new_persistent_storage_system, index, count)
good_reader_info_list.append(reader_info)
data_item_uuids.add(data_item_uuid)
library_update = preliminary_library_updates.get(data_item_uuid)
if library_update:
library_updates[data_item_uuid] = library_update
except Exception as e:
logging.debug("Error reading %s", storage_handler.reference)
import traceback
traceback.print_exc()
traceback.print_stack()
return good_reader_info_list, library_updates | [
"def",
"auto_migrate_storage_system",
"(",
"*",
",",
"persistent_storage_system",
"=",
"None",
",",
"new_persistent_storage_system",
"=",
"None",
",",
"data_item_uuids",
"=",
"None",
",",
"deletions",
":",
"typing",
".",
"List",
"[",
"uuid",
".",
"UUID",
"]",
"=... | Migrate items from the storage system to the object context.
Files in data_item_uuids have already been loaded and are ignored (not migrated).
Files in deletes have been deleted in object context and are ignored (not migrated) and then added
to the utilized deletions list.
Data items will have persistent_object_context set upon return, but caller will need to call finish_reading
on each of the data items. | [
"Migrate",
"items",
"from",
"the",
"storage",
"system",
"to",
"the",
"object",
"context",
"."
] | python | train |
kensho-technologies/graphql-compiler | graphql_compiler/schema_generation/schema_properties.py | https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_properties.py#L126-L132 | def _parse_date_default_value(property_name, default_value_string):
"""Parse and return the default value for a date property."""
# OrientDB doesn't use ISO-8601 datetime format, so we have to parse it manually
# and then turn it into a python datetime object. strptime() will raise an exception
# if the provided value cannot be parsed correctly.
parsed_value = time.strptime(default_value_string, ORIENTDB_DATE_FORMAT)
return datetime.date(parsed_value.tm_year, parsed_value.tm_mon, parsed_value.tm_mday) | [
"def",
"_parse_date_default_value",
"(",
"property_name",
",",
"default_value_string",
")",
":",
"# OrientDB doesn't use ISO-8601 datetime format, so we have to parse it manually",
"# and then turn it into a python datetime object. strptime() will raise an exception",
"# if the provided value ca... | Parse and return the default value for a date property. | [
"Parse",
"and",
"return",
"the",
"default",
"value",
"for",
"a",
"date",
"property",
"."
] | python | train |
TUNE-Archive/freight_forwarder | freight_forwarder/container/host_config.py | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/container/host_config.py#L568-L592 | def volumes_from(self, value):
"""
:param value:
:return:
"""
volumes_from = []
if isinstance(value, list):
for volume_from in value:
if not isinstance(volume_from, six.string_types):
raise TypeError("each bind must be a str. {0} was passed".format(volume_from))
volumes_from.append(self._convert_volume_from(volume_from))
elif isinstance(value, six.string_types):
volumes_from.append(self._convert_volume_from(value))
elif value is None:
pass
else:
raise ValueError(
"""When passing binds they must be in one of the
following formats: container_path, host_path:container_path,
or host_path:container_path:permissions"""
)
self._volumes_from = volumes_from | [
"def",
"volumes_from",
"(",
"self",
",",
"value",
")",
":",
"volumes_from",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"for",
"volume_from",
"in",
"value",
":",
"if",
"not",
"isinstance",
"(",
"volume_from",
",",
"six",
"."... | :param value:
:return: | [
":",
"param",
"value",
":",
":",
"return",
":"
] | python | train |
log2timeline/plaso | plaso/multi_processing/psort.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/multi_processing/psort.py#L222-L344 | def _AnalyzeEvents(self, storage_writer, analysis_plugins, event_filter=None):
"""Analyzes events in a plaso storage.
Args:
storage_writer (StorageWriter): storage writer.
analysis_plugins (dict[str, AnalysisPlugin]): analysis plugins that
should be run and their names.
event_filter (Optional[FilterObject]): event filter.
Returns:
collections.Counter: counter containing information about the events
processed and filtered.
Raises:
RuntimeError: if a non-recoverable situation is encountered.
"""
self._status = definitions.STATUS_INDICATOR_RUNNING
self._number_of_consumed_events = 0
self._number_of_consumed_reports = 0
self._number_of_consumed_sources = 0
self._number_of_consumed_warnings = 0
self._number_of_produced_events = 0
self._number_of_produced_reports = 0
self._number_of_produced_sources = 0
self._number_of_produced_warnings = 0
number_of_filtered_events = 0
logger.debug('Processing events.')
filter_limit = getattr(event_filter, 'limit', None)
for event in storage_writer.GetSortedEvents():
event_data_identifier = event.GetEventDataIdentifier()
if event_data_identifier:
event_data = storage_writer.GetEventDataByIdentifier(
event_data_identifier)
if event_data:
for attribute_name, attribute_value in event_data.GetAttributes():
setattr(event, attribute_name, attribute_value)
event_identifier = event.GetIdentifier()
event.tag = self._event_tag_index.GetEventTagByIdentifier(
storage_writer, event_identifier)
if event_filter:
filter_match = event_filter.Match(event)
else:
filter_match = None
# pylint: disable=singleton-comparison
if filter_match == False:
number_of_filtered_events += 1
continue
for event_queue in self._event_queues.values():
# TODO: Check for premature exit of analysis plugins.
event_queue.PushItem(event)
self._number_of_consumed_events += 1
if (event_filter and filter_limit and
filter_limit == self._number_of_consumed_events):
break
logger.debug('Finished pushing events to analysis plugins.')
# Signal that we have finished adding events.
for event_queue in self._event_queues.values():
event_queue.PushItem(plaso_queue.QueueAbort(), block=False)
logger.debug('Processing analysis plugin results.')
# TODO: use a task based approach.
plugin_names = [plugin_name for plugin_name in analysis_plugins.keys()]
while plugin_names:
for plugin_name in list(plugin_names):
if self._abort:
break
# TODO: temporary solution.
task = tasks.Task()
task.identifier = plugin_name
merge_ready = storage_writer.CheckTaskReadyForMerge(task)
if merge_ready:
storage_writer.PrepareMergeTaskStorage(task)
self._status = definitions.STATUS_INDICATOR_MERGING
event_queue = self._event_queues[plugin_name]
del self._event_queues[plugin_name]
event_queue.Close()
storage_merge_reader = storage_writer.StartMergeTaskStorage(task)
storage_merge_reader.MergeAttributeContainers(
callback=self._MergeEventTag)
# TODO: temporary solution.
plugin_names.remove(plugin_name)
self._status = definitions.STATUS_INDICATOR_RUNNING
self._number_of_produced_event_tags = (
storage_writer.number_of_event_tags)
self._number_of_produced_reports = (
storage_writer.number_of_analysis_reports)
try:
storage_writer.StopTaskStorage(abort=self._abort)
except (IOError, OSError) as exception:
logger.error('Unable to stop task storage with error: {0!s}'.format(
exception))
if self._abort:
logger.debug('Processing aborted.')
else:
logger.debug('Processing completed.')
events_counter = collections.Counter()
events_counter['Events filtered'] = number_of_filtered_events
events_counter['Events processed'] = self._number_of_consumed_events
return events_counter | [
"def",
"_AnalyzeEvents",
"(",
"self",
",",
"storage_writer",
",",
"analysis_plugins",
",",
"event_filter",
"=",
"None",
")",
":",
"self",
".",
"_status",
"=",
"definitions",
".",
"STATUS_INDICATOR_RUNNING",
"self",
".",
"_number_of_consumed_events",
"=",
"0",
"sel... | Analyzes events in a plaso storage.
Args:
storage_writer (StorageWriter): storage writer.
analysis_plugins (dict[str, AnalysisPlugin]): analysis plugins that
should be run and their names.
event_filter (Optional[FilterObject]): event filter.
Returns:
collections.Counter: counter containing information about the events
processed and filtered.
Raises:
RuntimeError: if a non-recoverable situation is encountered. | [
"Analyzes",
"events",
"in",
"a",
"plaso",
"storage",
"."
] | python | train |
sunt05/SuPy | docs/source/proc_var_info/gen_df_state_csv.py | https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/gen_df_state_csv.py#L105-L178 | def gen_df_site(
list_csv_in=list_table,
url_base=url_repo_input_site)->pd.DataFrame:
'''Generate description info of supy output results as a dataframe
Parameters
----------
path_csv_out : str, optional
path to the output csv file (the default is 'df_output.csv')
list_csv_in : list, optional
list of file names for csv files with meta info (the default is url_repo_input_site, which is defined at the top of this file)
url_base : URL, optional
URL to the input dir of repo base (the default is url_repo_input, which is defined at the top of this file)
Returns
-------
pd.DataFrame
full path to the output csv file
'''
# list of URLs
list_url_table = [
url_base/table for table in list_csv_in
]
try:
df_var_info = pd.concat([pd.read_csv(f) for f in list_url_table])
# df_var_info = pd.concat(
# [pd.read_csv(f) for f in list_url_table],
# sort=False)
except:
for url in list_url_table:
if not url.get().ok:
print(f'{url} not existing!')
else:
# clean meta info
df_var_info_x = df_var_info\
.drop(['No.', 'Use'], axis=1)\
.set_index('Column Name')
df_var_info_x.index = df_var_info_x.index.map(
lambda x: x.replace('`', ''))
# retrieve SUEWS-related variables
dict_var_full = sp.supy_load.exp_dict_full(
sp.supy_load.dict_var2SiteSelect)
dict_var_ref_suews = {
k: extract_var_suews(dict_var_full, k)
for k in dict_var_full
}
df_var_ref_suews = pd.DataFrame(
{k: ', '.join(dict_var_ref_suews[k])
for k in dict_var_ref_suews},
index=[0]).T.rename({
0: 'SUEWS-related variables'
}, axis=1)
# retrive supy variable description
dict_var_desc = {
k: '\n'.join(df_var_info_x.loc[v].values.flatten())
for k, v in dict_var_ref_suews.items()
}
df_var_desc = pd.DataFrame(dict_var_desc, index=[0]).T\
.rename(columns={0: 'Description'})
# retrieve variable dimensionality
df_var_dim = gen_df_dim(df_init_sample)
df_var_site_raw = pd.concat(
[df_var_dim, df_var_desc, df_var_ref_suews],
axis=1, sort=False)
df_var_site = df_var_site_raw.filter(items=set_input, axis=0).dropna()
return df_var_site | [
"def",
"gen_df_site",
"(",
"list_csv_in",
"=",
"list_table",
",",
"url_base",
"=",
"url_repo_input_site",
")",
"->",
"pd",
".",
"DataFrame",
":",
"# list of URLs",
"list_url_table",
"=",
"[",
"url_base",
"/",
"table",
"for",
"table",
"in",
"list_csv_in",
"]",
... | Generate description info of supy output results as a dataframe
Parameters
----------
path_csv_out : str, optional
path to the output csv file (the default is 'df_output.csv')
list_csv_in : list, optional
list of file names for csv files with meta info (the default is url_repo_input_site, which is defined at the top of this file)
url_base : URL, optional
URL to the input dir of repo base (the default is url_repo_input, which is defined at the top of this file)
Returns
-------
pd.DataFrame
full path to the output csv file | [
"Generate",
"description",
"info",
"of",
"supy",
"output",
"results",
"as",
"a",
"dataframe"
] | python | train |
woolfson-group/isambard | isambard/ampal/ligands.py | https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/ligands.py#L45-L50 | def category_count(self):
"""Returns the number of categories in `categories`."""
category_dict = self.categories
count_dict = {category: len(
category_dict[category]) for category in category_dict}
return count_dict | [
"def",
"category_count",
"(",
"self",
")",
":",
"category_dict",
"=",
"self",
".",
"categories",
"count_dict",
"=",
"{",
"category",
":",
"len",
"(",
"category_dict",
"[",
"category",
"]",
")",
"for",
"category",
"in",
"category_dict",
"}",
"return",
"count_... | Returns the number of categories in `categories`. | [
"Returns",
"the",
"number",
"of",
"categories",
"in",
"categories",
"."
] | python | train |
mathiasertl/django-ca | ca/django_ca/management/base.py | https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/management/base.py#L281-L290 | def add_format(self, parser, default=Encoding.PEM, help_text=None, opts=None):
"""Add the --format option."""
if opts is None:
opts = ['-f', '--format']
if help_text is None:
help_text = 'The format to use ("ASN1" is an alias for "DER", default: %(default)s).'
help_text = help_text % {'default': default.name}
parser.add_argument(*opts, metavar='{PEM,ASN1,DER}', default=default,
action=FormatAction, help=help_text) | [
"def",
"add_format",
"(",
"self",
",",
"parser",
",",
"default",
"=",
"Encoding",
".",
"PEM",
",",
"help_text",
"=",
"None",
",",
"opts",
"=",
"None",
")",
":",
"if",
"opts",
"is",
"None",
":",
"opts",
"=",
"[",
"'-f'",
",",
"'--format'",
"]",
"if"... | Add the --format option. | [
"Add",
"the",
"--",
"format",
"option",
"."
] | python | train |
cackharot/suds-py3 | suds/properties.py | https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/properties.py#L173-L184 | def validate(self, value):
"""
Validate the I{value} is of the correct class.
@param value: The value to validate.
@type value: any
@raise AttributeError: When I{value} is invalid.
"""
if value is None:
return
if len(self.classes) and not isinstance(value, self.classes):
msg = '"%s" must be: %s' % (self.name, self.classes)
raise AttributeError(msg) | [
"def",
"validate",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"if",
"len",
"(",
"self",
".",
"classes",
")",
"and",
"not",
"isinstance",
"(",
"value",
",",
"self",
".",
"classes",
")",
":",
"msg",
"=",
"'\"%s\"... | Validate the I{value} is of the correct class.
@param value: The value to validate.
@type value: any
@raise AttributeError: When I{value} is invalid. | [
"Validate",
"the",
"I",
"{",
"value",
"}",
"is",
"of",
"the",
"correct",
"class",
"."
] | python | train |
sirfoga/pyhal | hal/internet/email/templates.py | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/email/templates.py#L45-L58 | def get_mime_message(self):
"""Gets email MIME message
:return: Email formatted as HTML ready to be sent
"""
message = MIMEText(
"<html>" +
self.get_email_header() +
get_email_content(self.content_file) +
self.get_email_footer() +
"</html>", "html"
)
message["subject"] = self.email_subject
return message | [
"def",
"get_mime_message",
"(",
"self",
")",
":",
"message",
"=",
"MIMEText",
"(",
"\"<html>\"",
"+",
"self",
".",
"get_email_header",
"(",
")",
"+",
"get_email_content",
"(",
"self",
".",
"content_file",
")",
"+",
"self",
".",
"get_email_footer",
"(",
")",
... | Gets email MIME message
:return: Email formatted as HTML ready to be sent | [
"Gets",
"email",
"MIME",
"message"
] | python | train |
UCL-INGI/INGInious | inginious/frontend/pages/api/_api_page.py | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/api/_api_page.py#L31-L33 | def DELETE(self, *args, **kwargs):
""" DELETE request """
return self._handle_api(self.API_DELETE, args, kwargs) | [
"def",
"DELETE",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_handle_api",
"(",
"self",
".",
"API_DELETE",
",",
"args",
",",
"kwargs",
")"
] | DELETE request | [
"DELETE",
"request"
] | python | train |
kplindegaard/smbus2 | smbus2/smbus2.py | https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L292-L308 | def _set_address(self, address, force=None):
"""
Set i2c slave address to use for subsequent calls.
:param address:
:type address: int
:param force:
:type force: Boolean
"""
force = force if force is not None else self.force
if self.address != address or self._force_last != force:
if force is True:
ioctl(self.fd, I2C_SLAVE_FORCE, address)
else:
ioctl(self.fd, I2C_SLAVE, address)
self.address = address
self._force_last = force | [
"def",
"_set_address",
"(",
"self",
",",
"address",
",",
"force",
"=",
"None",
")",
":",
"force",
"=",
"force",
"if",
"force",
"is",
"not",
"None",
"else",
"self",
".",
"force",
"if",
"self",
".",
"address",
"!=",
"address",
"or",
"self",
".",
"_forc... | Set i2c slave address to use for subsequent calls.
:param address:
:type address: int
:param force:
:type force: Boolean | [
"Set",
"i2c",
"slave",
"address",
"to",
"use",
"for",
"subsequent",
"calls",
"."
] | python | train |
pereorga/csvshuf | csvshuf/csvshuf.py | https://github.com/pereorga/csvshuf/blob/70fdd4f512ef980bffe9cc51bfe59fea116d7c2f/csvshuf/csvshuf.py#L19-L24 | def shuffle_sattolo(items):
"""Shuffle items in place using Sattolo's algorithm."""
_randrange = random.randrange
for i in reversed(range(1, len(items))):
j = _randrange(i) # 0 <= j < i
items[j], items[i] = items[i], items[j] | [
"def",
"shuffle_sattolo",
"(",
"items",
")",
":",
"_randrange",
"=",
"random",
".",
"randrange",
"for",
"i",
"in",
"reversed",
"(",
"range",
"(",
"1",
",",
"len",
"(",
"items",
")",
")",
")",
":",
"j",
"=",
"_randrange",
"(",
"i",
")",
"# 0 <= j < i"... | Shuffle items in place using Sattolo's algorithm. | [
"Shuffle",
"items",
"in",
"place",
"using",
"Sattolo",
"s",
"algorithm",
"."
] | python | train |
cocaine/cocaine-tools | cocaine/tools/dispatch.py | https://github.com/cocaine/cocaine-tools/blob/d8834f8e04ca42817d5f4e368d471484d4b3419f/cocaine/tools/dispatch.py#L1863-L1876 | def access_storage_edit(name, cid, uid, perm, **kwargs):
"""
Edits ACL for the specified collection.
Creates if necessary.
"""
ctx = Context(**kwargs)
ctx.execute_action('access:storage:edit', **{
'storage': ctx.repo.create_secure_service('storage'),
'name': name,
'cids': cid,
'uids': uid,
'perm': perm,
}) | [
"def",
"access_storage_edit",
"(",
"name",
",",
"cid",
",",
"uid",
",",
"perm",
",",
"*",
"*",
"kwargs",
")",
":",
"ctx",
"=",
"Context",
"(",
"*",
"*",
"kwargs",
")",
"ctx",
".",
"execute_action",
"(",
"'access:storage:edit'",
",",
"*",
"*",
"{",
"'... | Edits ACL for the specified collection.
Creates if necessary. | [
"Edits",
"ACL",
"for",
"the",
"specified",
"collection",
"."
] | python | train |
crs4/hl7apy | hl7apy/core.py | https://github.com/crs4/hl7apy/blob/91be488e9274f6ec975519a1d9c17045bc91bf74/hl7apy/core.py#L435-L447 | def _find_name(self, name):
"""
Find the reference of a child having the given name
:type name: ``str``
:param name: the child name (e.g. PID)
:return: the element structure (see :func:`load_reference <hl7apy.load_reference>`) or ``None`` if the
element has not been found
"""
name = name.upper()
element = self.element.find_child_reference(name)
return element['name'] if element is not None else None | [
"def",
"_find_name",
"(",
"self",
",",
"name",
")",
":",
"name",
"=",
"name",
".",
"upper",
"(",
")",
"element",
"=",
"self",
".",
"element",
".",
"find_child_reference",
"(",
"name",
")",
"return",
"element",
"[",
"'name'",
"]",
"if",
"element",
"is",... | Find the reference of a child having the given name
:type name: ``str``
:param name: the child name (e.g. PID)
:return: the element structure (see :func:`load_reference <hl7apy.load_reference>`) or ``None`` if the
element has not been found | [
"Find",
"the",
"reference",
"of",
"a",
"child",
"having",
"the",
"given",
"name"
] | python | train |
annoviko/pyclustering | pyclustering/cluster/bang.py | https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/bang.py#L1167-L1191 | def __expand_cluster_block(self, block, cluster_index, leaf_blocks, unhandled_block_indexes):
"""!
@brief Expand cluster from specific block that is considered as a central block.
@param[in] block (bang_block): Block that is considered as a central block for cluster.
@param[in] cluster_index (uint): Index of cluster that is assigned to blocks that forms new cluster.
@param[in] leaf_blocks (list): Leaf BANG-blocks that are considered during cluster formation.
@param[in] unhandled_block_indexes (set): Set of candidates (BANG block indexes) to become a cluster member. The
parameter helps to reduce traversing among BANG-block providing only restricted set of block that
should be considered.
"""
block.set_cluster(cluster_index)
self.__update_cluster_dendrogram(cluster_index, [block])
neighbors = self.__find_block_neighbors(block, leaf_blocks, unhandled_block_indexes)
self.__update_cluster_dendrogram(cluster_index, neighbors)
for neighbor in neighbors:
neighbor.set_cluster(cluster_index)
neighbor_neighbors = self.__find_block_neighbors(neighbor, leaf_blocks, unhandled_block_indexes)
self.__update_cluster_dendrogram(cluster_index, neighbor_neighbors)
neighbors += neighbor_neighbors | [
"def",
"__expand_cluster_block",
"(",
"self",
",",
"block",
",",
"cluster_index",
",",
"leaf_blocks",
",",
"unhandled_block_indexes",
")",
":",
"block",
".",
"set_cluster",
"(",
"cluster_index",
")",
"self",
".",
"__update_cluster_dendrogram",
"(",
"cluster_index",
... | !
@brief Expand cluster from specific block that is considered as a central block.
@param[in] block (bang_block): Block that is considered as a central block for cluster.
@param[in] cluster_index (uint): Index of cluster that is assigned to blocks that forms new cluster.
@param[in] leaf_blocks (list): Leaf BANG-blocks that are considered during cluster formation.
@param[in] unhandled_block_indexes (set): Set of candidates (BANG block indexes) to become a cluster member. The
parameter helps to reduce traversing among BANG-block providing only restricted set of block that
should be considered. | [
"!"
] | python | valid |
databricks/spark-sklearn | python/spark_sklearn/converter.py | https://github.com/databricks/spark-sklearn/blob/cbde36f6311b73d967e2ec8a97040dfd71eca579/python/spark_sklearn/converter.py#L131-L163 | def toPandas(self, df):
"""
This is similar to the Spark DataFrame built-in toPandas() method, but it handles
MLlib Vector columns differently. It converts MLlib Vectors into rows of
scipy.sparse.csr_matrix, which is generally friendlier for PyData tools like scikit-learn.
.. note:: Experimental: This will likely be replaced in later releases with improved APIs.
:param df: Spark DataFrame
:return: Pandas dataframe
"""
cols = df.columns
# Convert any MLlib Vector columns to scipy.sparse.csr_matrix
matrixCols = []
def toscipy(v):
if isinstance(v, DenseVector):
return csr_matrix((v.values, np.array(range(v.size)), np.array([0, v.size])),
shape=(1, v.size))
elif isinstance(v, SparseVector):
return csr_matrix((v.values, v.indices, np.array([0, len(v.indices)])),
shape=(1, v.size))
else:
raise TypeError("Converter.toPandas found unknown Vector type: %s" % type(v))
tosparse = udf(lambda v: toscipy(v), CSRVectorUDT())
for i in range(len(cols)):
c = cols[i]
if isinstance(df.schema.fields[i].dataType, VectorUDT):
cols[i] = tosparse(df[c]).alias(c)
matrixCols.append(c)
else:
cols[i] = df[c]
return df.select(*cols).toPandas() | [
"def",
"toPandas",
"(",
"self",
",",
"df",
")",
":",
"cols",
"=",
"df",
".",
"columns",
"# Convert any MLlib Vector columns to scipy.sparse.csr_matrix",
"matrixCols",
"=",
"[",
"]",
"def",
"toscipy",
"(",
"v",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"De... | This is similar to the Spark DataFrame built-in toPandas() method, but it handles
MLlib Vector columns differently. It converts MLlib Vectors into rows of
scipy.sparse.csr_matrix, which is generally friendlier for PyData tools like scikit-learn.
.. note:: Experimental: This will likely be replaced in later releases with improved APIs.
:param df: Spark DataFrame
:return: Pandas dataframe | [
"This",
"is",
"similar",
"to",
"the",
"Spark",
"DataFrame",
"built",
"-",
"in",
"toPandas",
"()",
"method",
"but",
"it",
"handles",
"MLlib",
"Vector",
"columns",
"differently",
".",
"It",
"converts",
"MLlib",
"Vectors",
"into",
"rows",
"of",
"scipy",
".",
... | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.