text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def channels_voice_greeting_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/voice-api/greetings#create-greetings"
api_path = "/api/v2/channels/voice/greetings.json"
return self.call(api_path, method="POST", data=data, **kwargs) | [
"def",
"channels_voice_greeting_create",
"(",
"self",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/channels/voice/greetings.json\"",
"return",
"self",
".",
"call",
"(",
"api_path",
",",
"method",
"=",
"\"POST\"",
",",
"data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")"
] | 68.75 | 28.75 |
def do_indent(s, width=4, indentfirst=False):
"""Return a copy of the passed string, each line indented by
4 spaces. The first line is not indented. If you want to
change the number of spaces or indent the first line too
you can pass additional parameters to the filter:
.. sourcecode:: jinja
{{ mytext|indent(2, true) }}
indent by two spaces and indent the first line too.
"""
indention = u' ' * width
rv = (u'\n' + indention).join(s.splitlines())
if indentfirst:
rv = indention + rv
return rv | [
"def",
"do_indent",
"(",
"s",
",",
"width",
"=",
"4",
",",
"indentfirst",
"=",
"False",
")",
":",
"indention",
"=",
"u' '",
"*",
"width",
"rv",
"=",
"(",
"u'\\n'",
"+",
"indention",
")",
".",
"join",
"(",
"s",
".",
"splitlines",
"(",
")",
")",
"if",
"indentfirst",
":",
"rv",
"=",
"indention",
"+",
"rv",
"return",
"rv"
] | 34.3125 | 16.375 |
def legacy_write(self, request_id, msg, max_doc_size, with_last_error):
"""Send OP_INSERT, etc., optionally returning response as a dict.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `request_id`: an int.
- `msg`: bytes, an OP_INSERT, OP_UPDATE, or OP_DELETE message,
perhaps with a getlasterror command appended.
- `max_doc_size`: size in bytes of the largest document in `msg`.
- `with_last_error`: True if a getlasterror command is appended.
"""
self._raise_if_not_writable(not with_last_error)
self.send_message(msg, max_doc_size)
if with_last_error:
reply = self.receive_message(request_id)
return helpers._check_gle_response(reply.command_response()) | [
"def",
"legacy_write",
"(",
"self",
",",
"request_id",
",",
"msg",
",",
"max_doc_size",
",",
"with_last_error",
")",
":",
"self",
".",
"_raise_if_not_writable",
"(",
"not",
"with_last_error",
")",
"self",
".",
"send_message",
"(",
"msg",
",",
"max_doc_size",
")",
"if",
"with_last_error",
":",
"reply",
"=",
"self",
".",
"receive_message",
"(",
"request_id",
")",
"return",
"helpers",
".",
"_check_gle_response",
"(",
"reply",
".",
"command_response",
"(",
")",
")"
] | 44.055556 | 21.611111 |
def download_object(container_name, object_name, destination_path, profile,
overwrite_existing=False, delete_on_failure=True, **libcloud_kwargs):
'''
Download an object to the specified destination path.
:param container_name: Container name
:type container_name: ``str``
:param object_name: Object name
:type object_name: ``str``
:param destination_path: Full path to a file or a directory where the
incoming file will be saved.
:type destination_path: ``str``
:param profile: The profile key
:type profile: ``str``
:param overwrite_existing: True to overwrite an existing file,
defaults to False.
:type overwrite_existing: ``bool``
:param delete_on_failure: True to delete a partially downloaded file if
the download was not successful (hash
mismatch / file size).
:type delete_on_failure: ``bool``
:param libcloud_kwargs: Extra arguments for the driver's download_object method
:type libcloud_kwargs: ``dict``
:return: True if an object has been successfully downloaded, False
otherwise.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.download_object MyFolder me.jpg /tmp/me.jpg profile1
'''
conn = _get_driver(profile=profile)
obj = conn.get_object(container_name, object_name)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
return conn.download_object(obj, destination_path, overwrite_existing, delete_on_failure, **libcloud_kwargs) | [
"def",
"download_object",
"(",
"container_name",
",",
"object_name",
",",
"destination_path",
",",
"profile",
",",
"overwrite_existing",
"=",
"False",
",",
"delete_on_failure",
"=",
"True",
",",
"*",
"*",
"libcloud_kwargs",
")",
":",
"conn",
"=",
"_get_driver",
"(",
"profile",
"=",
"profile",
")",
"obj",
"=",
"conn",
".",
"get_object",
"(",
"container_name",
",",
"object_name",
")",
"libcloud_kwargs",
"=",
"salt",
".",
"utils",
".",
"args",
".",
"clean_kwargs",
"(",
"*",
"*",
"libcloud_kwargs",
")",
"return",
"conn",
".",
"download_object",
"(",
"obj",
",",
"destination_path",
",",
"overwrite_existing",
",",
"delete_on_failure",
",",
"*",
"*",
"libcloud_kwargs",
")"
] | 36.333333 | 26.244444 |
def zharkov_panh(v, temp, v0, a0, m, n, z, t_ref=300.,
three_r=3. * constants.R):
"""
calculate pressure from anharmonicity for Zharkov equation
the equation is from Dorogokupets 2015
:param v: unit-cell volume in A^3
:param temp: temperature in K
:param v0: unit-cell volume in A^3 at 1 bar
:param a0: parameter in K-1 for the Zharkov equation
:param m: parameter for the Zharkov equation
:param n: number of elements in a chemical formula
:param z: number of formula unit in a unit cell
:param three_r: 3 times gas constant
:return: anharmonic contribution for pressure in GPa
"""
v_mol = vol_uc2mol(v, z)
x = v / v0
a = a0 * np.power(x, m)
def f(t):
return three_r * n / 2. * a * m / v_mol * np.power(t, 2.) * 1.e-9
return f(temp) - f(t_ref) | [
"def",
"zharkov_panh",
"(",
"v",
",",
"temp",
",",
"v0",
",",
"a0",
",",
"m",
",",
"n",
",",
"z",
",",
"t_ref",
"=",
"300.",
",",
"three_r",
"=",
"3.",
"*",
"constants",
".",
"R",
")",
":",
"v_mol",
"=",
"vol_uc2mol",
"(",
"v",
",",
"z",
")",
"x",
"=",
"v",
"/",
"v0",
"a",
"=",
"a0",
"*",
"np",
".",
"power",
"(",
"x",
",",
"m",
")",
"def",
"f",
"(",
"t",
")",
":",
"return",
"three_r",
"*",
"n",
"/",
"2.",
"*",
"a",
"*",
"m",
"/",
"v_mol",
"*",
"np",
".",
"power",
"(",
"t",
",",
"2.",
")",
"*",
"1.e-9",
"return",
"f",
"(",
"temp",
")",
"-",
"f",
"(",
"t_ref",
")"
] | 34.208333 | 15.208333 |
def register_file_monitor(filename, target=None):
"""Maps a specific file/directory modification event to a signal.
:param str|unicode filename: File or a directory to watch for its modification.
:param int|Signal|str|unicode target: Existing signal to raise
or Signal Target to register signal implicitly.
Available targets:
* ``workers`` - run the signal handler on all the workers
* ``workerN`` - run the signal handler only on worker N
* ``worker``/``worker0`` - run the signal handler on the first available worker
* ``active-workers`` - run the signal handlers on all the active [non-cheaped] workers
* ``mules`` - run the signal handler on all of the mules
* ``muleN`` - run the signal handler on mule N
* ``mule``/``mule0`` - run the signal handler on the first available mule
* ``spooler`` - run the signal on the first available spooler
* ``farmN/farm_XXX`` - run the signal handler in the mule farm N or named XXX
:raises ValueError: If unable to register monitor.
"""
return _automate_signal(target, func=lambda sig: uwsgi.add_file_monitor(int(sig), filename)) | [
"def",
"register_file_monitor",
"(",
"filename",
",",
"target",
"=",
"None",
")",
":",
"return",
"_automate_signal",
"(",
"target",
",",
"func",
"=",
"lambda",
"sig",
":",
"uwsgi",
".",
"add_file_monitor",
"(",
"int",
"(",
"sig",
")",
",",
"filename",
")",
")"
] | 48.2 | 31.88 |
async def send_event(self, con, name, payload):
"""Send an event to a client connection.
This method will push an event message to the client with the given
name and payload. You need to have access to the the ``connection``
object for the client, which is only available once the client has
connected and passed to self.prepare_conn(connection).
Args:
con (websockets.Connection): The connection to use to send
the event.
name (str): The name of the event to send.
payload (object): The msgpack-serializable object so send
as the event's payload.
"""
message = dict(type="event", name=name, payload=payload)
encoded = pack(message)
await con.send(encoded) | [
"async",
"def",
"send_event",
"(",
"self",
",",
"con",
",",
"name",
",",
"payload",
")",
":",
"message",
"=",
"dict",
"(",
"type",
"=",
"\"event\"",
",",
"name",
"=",
"name",
",",
"payload",
"=",
"payload",
")",
"encoded",
"=",
"pack",
"(",
"message",
")",
"await",
"con",
".",
"send",
"(",
"encoded",
")"
] | 41.578947 | 21.631579 |
async def CharmArchiveSha256(self, urls):
'''
urls : typing.Sequence[~CharmURL]
Returns -> typing.Sequence[~StringResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='CharmArchiveSha256',
version=5,
params=_params)
_params['urls'] = urls
reply = await self.rpc(msg)
return reply | [
"async",
"def",
"CharmArchiveSha256",
"(",
"self",
",",
"urls",
")",
":",
"# map input types to rpc msg",
"_params",
"=",
"dict",
"(",
")",
"msg",
"=",
"dict",
"(",
"type",
"=",
"'Uniter'",
",",
"request",
"=",
"'CharmArchiveSha256'",
",",
"version",
"=",
"5",
",",
"params",
"=",
"_params",
")",
"_params",
"[",
"'urls'",
"]",
"=",
"urls",
"reply",
"=",
"await",
"self",
".",
"rpc",
"(",
"msg",
")",
"return",
"reply"
] | 31.571429 | 11.142857 |
def multiChoiceParam(parameters, name, type_converter = str):
""" multi choice parameter values.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'
:returns dictionary: value -> values
"""
param = parameters.find(".//MultiChoiceParam[@Name='{name}']".format(name=name))
value = param.find('Value')
values = param.find('Values')
return [type_converter(values[int(item.text)].text) for item in value.findall('Item')] | [
"def",
"multiChoiceParam",
"(",
"parameters",
",",
"name",
",",
"type_converter",
"=",
"str",
")",
":",
"param",
"=",
"parameters",
".",
"find",
"(",
"\".//MultiChoiceParam[@Name='{name}']\"",
".",
"format",
"(",
"name",
"=",
"name",
")",
")",
"value",
"=",
"param",
".",
"find",
"(",
"'Value'",
")",
"values",
"=",
"param",
".",
"find",
"(",
"'Values'",
")",
"return",
"[",
"type_converter",
"(",
"values",
"[",
"int",
"(",
"item",
".",
"text",
")",
"]",
".",
"text",
")",
"for",
"item",
"in",
"value",
".",
"findall",
"(",
"'Item'",
")",
"]"
] | 53.909091 | 20 |
def role_get(user):
'''
List roles for user
user : string
username
CLI Example:
.. code-block:: bash
salt '*' rbac.role_get leo
'''
user_roles = []
## read user_attr file (user:qualifier:res1:res2:attr)
with salt.utils.files.fopen('/etc/user_attr', 'r') as user_attr:
for role in user_attr:
role = salt.utils.stringutils.to_unicode(role)
role = role.strip().strip().split(':')
# skip comments and non complaint lines
if len(role) != 5:
continue
# skip other users
if role[0] != user:
continue
# parse attr
attrs = {}
for attr in role[4].strip().split(';'):
attr_key, attr_val = attr.strip().split('=')
if attr_key in ['auths', 'profiles', 'roles']:
attrs[attr_key] = attr_val.strip().split(',')
else:
attrs[attr_key] = attr_val
if 'roles' in attrs:
user_roles.extend(attrs['roles'])
return list(set(user_roles)) | [
"def",
"role_get",
"(",
"user",
")",
":",
"user_roles",
"=",
"[",
"]",
"## read user_attr file (user:qualifier:res1:res2:attr)",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"'/etc/user_attr'",
",",
"'r'",
")",
"as",
"user_attr",
":",
"for",
"role",
"in",
"user_attr",
":",
"role",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"role",
")",
"role",
"=",
"role",
".",
"strip",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"':'",
")",
"# skip comments and non complaint lines",
"if",
"len",
"(",
"role",
")",
"!=",
"5",
":",
"continue",
"# skip other users",
"if",
"role",
"[",
"0",
"]",
"!=",
"user",
":",
"continue",
"# parse attr",
"attrs",
"=",
"{",
"}",
"for",
"attr",
"in",
"role",
"[",
"4",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
"';'",
")",
":",
"attr_key",
",",
"attr_val",
"=",
"attr",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'='",
")",
"if",
"attr_key",
"in",
"[",
"'auths'",
",",
"'profiles'",
",",
"'roles'",
"]",
":",
"attrs",
"[",
"attr_key",
"]",
"=",
"attr_val",
".",
"strip",
"(",
")",
".",
"split",
"(",
"','",
")",
"else",
":",
"attrs",
"[",
"attr_key",
"]",
"=",
"attr_val",
"if",
"'roles'",
"in",
"attrs",
":",
"user_roles",
".",
"extend",
"(",
"attrs",
"[",
"'roles'",
"]",
")",
"return",
"list",
"(",
"set",
"(",
"user_roles",
")",
")"
] | 26.829268 | 21.853659 |
def write_block(self, block_, body):
"""Outputs the boilerplate necessary for code blocks like functions.
Args:
block_: The Block object representing the code block.
body: String containing Go code making up the body of the code block.
"""
self.write('for ; πF.State() >= 0; πF.PopCheckpoint() {')
with self.indent_block():
self.write('switch πF.State() {')
self.write('case 0:')
for checkpoint in block_.checkpoints:
self.write_tmpl('case $state: goto Label$state', state=checkpoint)
self.write('default: panic("unexpected function state")')
self.write('}')
# Assume that body is aligned with goto labels.
with self.indent_block(-1):
self.write(body)
self.write('}') | [
"def",
"write_block",
"(",
"self",
",",
"block_",
",",
"body",
")",
":",
"self",
".",
"write",
"(",
"'for ; πF.State() >= 0; πF.PopCheckpoint() {')",
"",
"with",
"self",
".",
"indent_block",
"(",
")",
":",
"self",
".",
"write",
"(",
"'switch πF.State() {')",
"",
"self",
".",
"write",
"(",
"'case 0:'",
")",
"for",
"checkpoint",
"in",
"block_",
".",
"checkpoints",
":",
"self",
".",
"write_tmpl",
"(",
"'case $state: goto Label$state'",
",",
"state",
"=",
"checkpoint",
")",
"self",
".",
"write",
"(",
"'default: panic(\"unexpected function state\")'",
")",
"self",
".",
"write",
"(",
"'}'",
")",
"# Assume that body is aligned with goto labels.",
"with",
"self",
".",
"indent_block",
"(",
"-",
"1",
")",
":",
"self",
".",
"write",
"(",
"body",
")",
"self",
".",
"write",
"(",
"'}'",
")"
] | 39.157895 | 16.368421 |
def multi_option(*param_decls, **attrs):
"""modify help text and indicate option is permitted multiple times
:param param_decls:
:param attrs:
:return:
"""
attrhelp = attrs.get('help', None)
if attrhelp is not None:
newhelp = attrhelp + " (multiple occurrence permitted)"
attrs['help'] = newhelp
attrs['multiple'] = True
return click.option(*param_decls, **attrs) | [
"def",
"multi_option",
"(",
"*",
"param_decls",
",",
"*",
"*",
"attrs",
")",
":",
"attrhelp",
"=",
"attrs",
".",
"get",
"(",
"'help'",
",",
"None",
")",
"if",
"attrhelp",
"is",
"not",
"None",
":",
"newhelp",
"=",
"attrhelp",
"+",
"\" (multiple occurrence permitted)\"",
"attrs",
"[",
"'help'",
"]",
"=",
"newhelp",
"attrs",
"[",
"'multiple'",
"]",
"=",
"True",
"return",
"click",
".",
"option",
"(",
"*",
"param_decls",
",",
"*",
"*",
"attrs",
")"
] | 28.857143 | 15.142857 |
def _create_request_map(cls, input_map):
"""Create request map."""
mapped = super(Certificate, cls)._create_request_map(input_map)
if mapped.get('service') == CertificateType.developer:
mapped['service'] = CertificateType.bootstrap
return mapped | [
"def",
"_create_request_map",
"(",
"cls",
",",
"input_map",
")",
":",
"mapped",
"=",
"super",
"(",
"Certificate",
",",
"cls",
")",
".",
"_create_request_map",
"(",
"input_map",
")",
"if",
"mapped",
".",
"get",
"(",
"'service'",
")",
"==",
"CertificateType",
".",
"developer",
":",
"mapped",
"[",
"'service'",
"]",
"=",
"CertificateType",
".",
"bootstrap",
"return",
"mapped"
] | 47.333333 | 14.833333 |
def schedule(self, callback, timeout=100):
'''Schedule a function to be called repeated time.
This method can be used to perform animations.
**Example**
This is a typical way to perform an animation, just::
from chemlab.graphics.qt import QtViewer
from chemlab.graphics.renderers import SphereRenderer
v = QtViewer()
sr = v.add_renderer(SphereRenderer, centers, radii, colors)
def update():
# calculate new_positions
sr.update_positions(new_positions)
v.widget.repaint()
v.schedule(update)
v.run()
.. note:: remember to call QtViewer.widget.repaint() each
once you want to update the display.
**Parameters**
callback: function()
A function that takes no arguments that will be
called at intervals.
timeout: int
Time in milliseconds between calls of the *callback*
function.
**Returns**
a `QTimer`, to stop the animation you can use `Qtimer.stop`
'''
timer = QTimer(self)
timer.timeout.connect(callback)
timer.start(timeout)
return timer | [
"def",
"schedule",
"(",
"self",
",",
"callback",
",",
"timeout",
"=",
"100",
")",
":",
"timer",
"=",
"QTimer",
"(",
"self",
")",
"timer",
".",
"timeout",
".",
"connect",
"(",
"callback",
")",
"timer",
".",
"start",
"(",
"timeout",
")",
"return",
"timer"
] | 30.395349 | 20.860465 |
def getAttributeValueData(self, index):
"""
Return the data of the attribute at the given index
:param index: index of the attribute
"""
offset = self._get_attribute_offset(index)
return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_DATA] | [
"def",
"getAttributeValueData",
"(",
"self",
",",
"index",
")",
":",
"offset",
"=",
"self",
".",
"_get_attribute_offset",
"(",
"index",
")",
"return",
"self",
".",
"m_attributes",
"[",
"offset",
"+",
"const",
".",
"ATTRIBUTE_IX_VALUE_DATA",
"]"
] | 35.75 | 13.25 |
def angvel(target, current, scale):
'''Use sigmoid function to choose a delta that will help smoothly steer from current angle to target angle.'''
delta = target - current
while delta < -180:
delta += 360;
while delta > 180:
delta -= 360;
return (old_div(2.0, (1.0 + math.exp(old_div(-delta,scale))))) - 1.0 | [
"def",
"angvel",
"(",
"target",
",",
"current",
",",
"scale",
")",
":",
"delta",
"=",
"target",
"-",
"current",
"while",
"delta",
"<",
"-",
"180",
":",
"delta",
"+=",
"360",
"while",
"delta",
">",
"180",
":",
"delta",
"-=",
"360",
"return",
"(",
"old_div",
"(",
"2.0",
",",
"(",
"1.0",
"+",
"math",
".",
"exp",
"(",
"old_div",
"(",
"-",
"delta",
",",
"scale",
")",
")",
")",
")",
")",
"-",
"1.0"
] | 42 | 24.5 |
def H3(self):
"Correlation."
multiplied = np.dot(self.levels[:, np.newaxis] + 1,
self.levels[np.newaxis] + 1)
repeated = np.tile(multiplied[np.newaxis], (self.nobjects, 1, 1))
summed = (repeated * self.P).sum(2).sum(1)
h3 = (summed - self.mux * self.muy) / (self.sigmax * self.sigmay)
h3[np.isinf(h3)] = 0
return h3 | [
"def",
"H3",
"(",
"self",
")",
":",
"multiplied",
"=",
"np",
".",
"dot",
"(",
"self",
".",
"levels",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"+",
"1",
",",
"self",
".",
"levels",
"[",
"np",
".",
"newaxis",
"]",
"+",
"1",
")",
"repeated",
"=",
"np",
".",
"tile",
"(",
"multiplied",
"[",
"np",
".",
"newaxis",
"]",
",",
"(",
"self",
".",
"nobjects",
",",
"1",
",",
"1",
")",
")",
"summed",
"=",
"(",
"repeated",
"*",
"self",
".",
"P",
")",
".",
"sum",
"(",
"2",
")",
".",
"sum",
"(",
"1",
")",
"h3",
"=",
"(",
"summed",
"-",
"self",
".",
"mux",
"*",
"self",
".",
"muy",
")",
"/",
"(",
"self",
".",
"sigmax",
"*",
"self",
".",
"sigmay",
")",
"h3",
"[",
"np",
".",
"isinf",
"(",
"h3",
")",
"]",
"=",
"0",
"return",
"h3"
] | 43.444444 | 21.222222 |
def _read_object(self, correlation_id, parameters):
"""
Reads configuration file, parameterizes its content and converts it into JSON object.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param parameters: values to parameters the configuration.
:return: a JSON object with configuration.
"""
path = self.get_path()
if path == None:
raise ConfigException(correlation_id, "NO_PATH", "Missing config file path")
if not os.path.isfile(path):
raise FileException(correlation_id, 'FILE_NOT_FOUND', 'Config file was not found at ' + path)
try:
with open(path, 'r') as file:
config = file.read()
config = self._parameterize(config, parameters)
return yaml.load(config)
except Exception as ex:
raise FileException(
correlation_id,
"READ_FAILED",
"Failed reading configuration " + path + ": " + str(ex)
).with_details("path", path).with_cause(ex) | [
"def",
"_read_object",
"(",
"self",
",",
"correlation_id",
",",
"parameters",
")",
":",
"path",
"=",
"self",
".",
"get_path",
"(",
")",
"if",
"path",
"==",
"None",
":",
"raise",
"ConfigException",
"(",
"correlation_id",
",",
"\"NO_PATH\"",
",",
"\"Missing config file path\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"raise",
"FileException",
"(",
"correlation_id",
",",
"'FILE_NOT_FOUND'",
",",
"'Config file was not found at '",
"+",
"path",
")",
"try",
":",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"file",
":",
"config",
"=",
"file",
".",
"read",
"(",
")",
"config",
"=",
"self",
".",
"_parameterize",
"(",
"config",
",",
"parameters",
")",
"return",
"yaml",
".",
"load",
"(",
"config",
")",
"except",
"Exception",
"as",
"ex",
":",
"raise",
"FileException",
"(",
"correlation_id",
",",
"\"READ_FAILED\"",
",",
"\"Failed reading configuration \"",
"+",
"path",
"+",
"\": \"",
"+",
"str",
"(",
"ex",
")",
")",
".",
"with_details",
"(",
"\"path\"",
",",
"path",
")",
".",
"with_cause",
"(",
"ex",
")"
] | 38.551724 | 22.758621 |
def check_2d(inp):
"""
Check input to be a matrix. Converts lists of lists to np.ndarray.
Also allows the input to be a scipy sparse matrix.
Parameters
----------
inp : obj
Input matrix
Returns
-------
numpy.ndarray, scipy.sparse or None
Input matrix or None
Examples
--------
>>> check_2d([[0, 1], [2, 3]])
[[0, 1], [2, 3]]
>>> check_2d('test')
None
"""
if isinstance(inp, list):
return check_2d(np.array(inp))
if isinstance(inp, (np.ndarray, np.matrixlib.defmatrix.matrix)):
if inp.ndim == 2: # input is a dense matrix
return inp
if sps.issparse(inp):
if inp.ndim == 2: # input is a sparse matrix
return inp | [
"def",
"check_2d",
"(",
"inp",
")",
":",
"if",
"isinstance",
"(",
"inp",
",",
"list",
")",
":",
"return",
"check_2d",
"(",
"np",
".",
"array",
"(",
"inp",
")",
")",
"if",
"isinstance",
"(",
"inp",
",",
"(",
"np",
".",
"ndarray",
",",
"np",
".",
"matrixlib",
".",
"defmatrix",
".",
"matrix",
")",
")",
":",
"if",
"inp",
".",
"ndim",
"==",
"2",
":",
"# input is a dense matrix",
"return",
"inp",
"if",
"sps",
".",
"issparse",
"(",
"inp",
")",
":",
"if",
"inp",
".",
"ndim",
"==",
"2",
":",
"# input is a sparse matrix",
"return",
"inp"
] | 22.030303 | 21.727273 |
def ar_path_to_x_path(ar_path, dest_element=None):
# type: (str, typing.Optional[str]) -> str
"""Get path in translation-dictionary."""
ar_path_elements = ar_path.strip('/').split('/')
xpath = "."
for element in ar_path_elements[:-1]:
xpath += "//A:SHORT-NAME[text()='" + element + "']/.."
if dest_element:
xpath += "//A:" + dest_element + "/A:SHORT-NAME[text()='" + ar_path_elements[-1] + "']/.."
else:
xpath += "//A:SHORT-NAME[text()='" + ar_path_elements[-1] + "']/.."
return xpath | [
"def",
"ar_path_to_x_path",
"(",
"ar_path",
",",
"dest_element",
"=",
"None",
")",
":",
"# type: (str, typing.Optional[str]) -> str",
"ar_path_elements",
"=",
"ar_path",
".",
"strip",
"(",
"'/'",
")",
".",
"split",
"(",
"'/'",
")",
"xpath",
"=",
"\".\"",
"for",
"element",
"in",
"ar_path_elements",
"[",
":",
"-",
"1",
"]",
":",
"xpath",
"+=",
"\"//A:SHORT-NAME[text()='\"",
"+",
"element",
"+",
"\"']/..\"",
"if",
"dest_element",
":",
"xpath",
"+=",
"\"//A:\"",
"+",
"dest_element",
"+",
"\"/A:SHORT-NAME[text()='\"",
"+",
"ar_path_elements",
"[",
"-",
"1",
"]",
"+",
"\"']/..\"",
"else",
":",
"xpath",
"+=",
"\"//A:SHORT-NAME[text()='\"",
"+",
"ar_path_elements",
"[",
"-",
"1",
"]",
"+",
"\"']/..\"",
"return",
"xpath"
] | 37.785714 | 23.142857 |
def detect_member(row, key):
''' properly detects if a an attribute exists '''
(target, tkey, tvalue) = dict_crawl(row, key)
if target:
return True
return False | [
"def",
"detect_member",
"(",
"row",
",",
"key",
")",
":",
"(",
"target",
",",
"tkey",
",",
"tvalue",
")",
"=",
"dict_crawl",
"(",
"row",
",",
"key",
")",
"if",
"target",
":",
"return",
"True",
"return",
"False"
] | 29.833333 | 17.5 |
def get_subgraph_list(self):
"""Get the list of Subgraph instances.
This method returns the list of Subgraph instances
in the graph.
"""
sgraph_objs = list()
for sgraph in self.obj_dict['subgraphs']:
obj_dict_list = self.obj_dict['subgraphs'][sgraph]
sgraph_objs.extend(
[Subgraph(obj_dict=obj_d)
for obj_d in obj_dict_list])
return sgraph_objs | [
"def",
"get_subgraph_list",
"(",
"self",
")",
":",
"sgraph_objs",
"=",
"list",
"(",
")",
"for",
"sgraph",
"in",
"self",
".",
"obj_dict",
"[",
"'subgraphs'",
"]",
":",
"obj_dict_list",
"=",
"self",
".",
"obj_dict",
"[",
"'subgraphs'",
"]",
"[",
"sgraph",
"]",
"sgraph_objs",
".",
"extend",
"(",
"[",
"Subgraph",
"(",
"obj_dict",
"=",
"obj_d",
")",
"for",
"obj_d",
"in",
"obj_dict_list",
"]",
")",
"return",
"sgraph_objs"
] | 28.875 | 18.0625 |
def portgroups_configured(name, dvs, portgroups):
'''
Configures portgroups on a DVS.
Creates/updates/removes portgroups in a provided DVS
dvs
Name of the DVS
portgroups
Portgroup dict representations (see module sysdocs)
'''
datacenter = _get_datacenter_name()
log.info('Running state %s on DVS \'%s\', datacenter \'%s\'',
name, dvs, datacenter)
changes_required = False
ret = {'name': name,
'changes': {},
'result': None,
'comment': None}
comments = []
changes = {}
changes_required = False
try:
#TODO portroups validation
si = __salt__['vsphere.get_service_instance_via_proxy']()
current_pgs = __salt__['vsphere.list_dvportgroups'](
dvs=dvs, service_instance=si)
expected_pg_names = []
for pg in portgroups:
pg_name = pg['name']
expected_pg_names.append(pg_name)
del pg['name']
log.info('Checking pg \'%s\'', pg_name)
filtered_current_pgs = \
[p for p in current_pgs if p.get('name') == pg_name]
if not filtered_current_pgs:
changes_required = True
if __opts__['test']:
comments.append('State {0} will create a new portgroup '
'\'{1}\' in DVS \'{2}\', datacenter '
'\'{3}\''.format(name, pg_name, dvs,
datacenter))
else:
__salt__['vsphere.create_dvportgroup'](
portgroup_dict=pg, portgroup_name=pg_name, dvs=dvs,
service_instance=si)
comments.append('Created a new portgroup \'{0}\' in DVS '
'\'{1}\', datacenter \'{2}\''
''.format(pg_name, dvs, datacenter))
log.info(comments[-1])
changes.update({pg_name: {'new': pg}})
else:
# Porgroup already exists. Checking the config
log.trace('Portgroup \'%s\' found in DVS \'%s\', datacenter '
'\'%s\'. Checking for any updates.',
pg_name, dvs, datacenter)
current_pg = filtered_current_pgs[0]
diff_dict = _get_diff_dict(current_pg, pg)
if diff_dict:
changes_required = True
if __opts__['test']:
changes_strings = \
_get_changes_from_diff_dict(diff_dict)
log.trace('changes_strings = %s', changes_strings)
comments.append(
'State {0} will update portgroup \'{1}\' in '
'DVS \'{2}\', datacenter \'{3}\':\n{4}'
''.format(name, pg_name, dvs, datacenter,
'\n'.join(['\t{0}'.format(c) for c in
changes_strings])))
else:
__salt__['vsphere.update_dvportgroup'](
portgroup_dict=pg, portgroup=pg_name, dvs=dvs,
service_instance=si)
comments.append('Updated portgroup \'{0}\' in DVS '
'\'{1}\', datacenter \'{2}\''
''.format(pg_name, dvs, datacenter))
log.info(comments[-1])
changes.update(
{pg_name: {'new':
_get_val2_dict_from_diff_dict(diff_dict),
'old':
_get_val1_dict_from_diff_dict(diff_dict)}})
# Add the uplink portgroup to the expected pg names
uplink_pg = __salt__['vsphere.list_uplink_dvportgroup'](
dvs=dvs, service_instance=si)
expected_pg_names.append(uplink_pg['name'])
# Remove any extra portgroups
for current_pg in current_pgs:
if current_pg['name'] not in expected_pg_names:
changes_required = True
if __opts__['test']:
comments.append('State {0} will remove '
'the portgroup \'{1}\' from DVS \'{2}\', '
'datacenter \'{3}\''
''.format(name, current_pg['name'], dvs,
datacenter))
else:
__salt__['vsphere.remove_dvportgroup'](
portgroup=current_pg['name'], dvs=dvs,
service_instance=si)
comments.append('Removed the portgroup \'{0}\' from DVS '
'\'{1}\', datacenter \'{2}\''
''.format(current_pg['name'], dvs,
datacenter))
log.info(comments[-1])
changes.update({current_pg['name']:
{'old': current_pg}})
__salt__['vsphere.disconnect'](si)
except salt.exceptions.CommandExecutionError as exc:
log.exception('Encountered error')
if si:
__salt__['vsphere.disconnect'](si)
if not __opts__['test']:
ret['result'] = False
ret.update({'comment': exc.strerror,
'result': False if not __opts__['test'] else None})
return ret
if not changes_required:
# We have no changes
ret.update({'comment': ('All portgroups in DVS \'{0}\', datacenter '
'\'{1}\' exist and are correctly configured. '
'Nothing to be done.'.format(dvs, datacenter)),
'result': True})
else:
ret.update({
'comment': '\n'.join(comments),
'changes': changes,
'result': None if __opts__['test'] else True,
})
return ret | [
"def",
"portgroups_configured",
"(",
"name",
",",
"dvs",
",",
"portgroups",
")",
":",
"datacenter",
"=",
"_get_datacenter_name",
"(",
")",
"log",
".",
"info",
"(",
"'Running state %s on DVS \\'%s\\', datacenter \\'%s\\''",
",",
"name",
",",
"dvs",
",",
"datacenter",
")",
"changes_required",
"=",
"False",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"None",
",",
"'comment'",
":",
"None",
"}",
"comments",
"=",
"[",
"]",
"changes",
"=",
"{",
"}",
"changes_required",
"=",
"False",
"try",
":",
"#TODO portroups validation",
"si",
"=",
"__salt__",
"[",
"'vsphere.get_service_instance_via_proxy'",
"]",
"(",
")",
"current_pgs",
"=",
"__salt__",
"[",
"'vsphere.list_dvportgroups'",
"]",
"(",
"dvs",
"=",
"dvs",
",",
"service_instance",
"=",
"si",
")",
"expected_pg_names",
"=",
"[",
"]",
"for",
"pg",
"in",
"portgroups",
":",
"pg_name",
"=",
"pg",
"[",
"'name'",
"]",
"expected_pg_names",
".",
"append",
"(",
"pg_name",
")",
"del",
"pg",
"[",
"'name'",
"]",
"log",
".",
"info",
"(",
"'Checking pg \\'%s\\''",
",",
"pg_name",
")",
"filtered_current_pgs",
"=",
"[",
"p",
"for",
"p",
"in",
"current_pgs",
"if",
"p",
".",
"get",
"(",
"'name'",
")",
"==",
"pg_name",
"]",
"if",
"not",
"filtered_current_pgs",
":",
"changes_required",
"=",
"True",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"comments",
".",
"append",
"(",
"'State {0} will create a new portgroup '",
"'\\'{1}\\' in DVS \\'{2}\\', datacenter '",
"'\\'{3}\\''",
".",
"format",
"(",
"name",
",",
"pg_name",
",",
"dvs",
",",
"datacenter",
")",
")",
"else",
":",
"__salt__",
"[",
"'vsphere.create_dvportgroup'",
"]",
"(",
"portgroup_dict",
"=",
"pg",
",",
"portgroup_name",
"=",
"pg_name",
",",
"dvs",
"=",
"dvs",
",",
"service_instance",
"=",
"si",
")",
"comments",
".",
"append",
"(",
"'Created a new portgroup \\'{0}\\' in DVS '",
"'\\'{1}\\', datacenter \\'{2}\\''",
"''",
".",
"format",
"(",
"pg_name",
",",
"dvs",
",",
"datacenter",
")",
")",
"log",
".",
"info",
"(",
"comments",
"[",
"-",
"1",
"]",
")",
"changes",
".",
"update",
"(",
"{",
"pg_name",
":",
"{",
"'new'",
":",
"pg",
"}",
"}",
")",
"else",
":",
"# Porgroup already exists. Checking the config",
"log",
".",
"trace",
"(",
"'Portgroup \\'%s\\' found in DVS \\'%s\\', datacenter '",
"'\\'%s\\'. Checking for any updates.'",
",",
"pg_name",
",",
"dvs",
",",
"datacenter",
")",
"current_pg",
"=",
"filtered_current_pgs",
"[",
"0",
"]",
"diff_dict",
"=",
"_get_diff_dict",
"(",
"current_pg",
",",
"pg",
")",
"if",
"diff_dict",
":",
"changes_required",
"=",
"True",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"changes_strings",
"=",
"_get_changes_from_diff_dict",
"(",
"diff_dict",
")",
"log",
".",
"trace",
"(",
"'changes_strings = %s'",
",",
"changes_strings",
")",
"comments",
".",
"append",
"(",
"'State {0} will update portgroup \\'{1}\\' in '",
"'DVS \\'{2}\\', datacenter \\'{3}\\':\\n{4}'",
"''",
".",
"format",
"(",
"name",
",",
"pg_name",
",",
"dvs",
",",
"datacenter",
",",
"'\\n'",
".",
"join",
"(",
"[",
"'\\t{0}'",
".",
"format",
"(",
"c",
")",
"for",
"c",
"in",
"changes_strings",
"]",
")",
")",
")",
"else",
":",
"__salt__",
"[",
"'vsphere.update_dvportgroup'",
"]",
"(",
"portgroup_dict",
"=",
"pg",
",",
"portgroup",
"=",
"pg_name",
",",
"dvs",
"=",
"dvs",
",",
"service_instance",
"=",
"si",
")",
"comments",
".",
"append",
"(",
"'Updated portgroup \\'{0}\\' in DVS '",
"'\\'{1}\\', datacenter \\'{2}\\''",
"''",
".",
"format",
"(",
"pg_name",
",",
"dvs",
",",
"datacenter",
")",
")",
"log",
".",
"info",
"(",
"comments",
"[",
"-",
"1",
"]",
")",
"changes",
".",
"update",
"(",
"{",
"pg_name",
":",
"{",
"'new'",
":",
"_get_val2_dict_from_diff_dict",
"(",
"diff_dict",
")",
",",
"'old'",
":",
"_get_val1_dict_from_diff_dict",
"(",
"diff_dict",
")",
"}",
"}",
")",
"# Add the uplink portgroup to the expected pg names",
"uplink_pg",
"=",
"__salt__",
"[",
"'vsphere.list_uplink_dvportgroup'",
"]",
"(",
"dvs",
"=",
"dvs",
",",
"service_instance",
"=",
"si",
")",
"expected_pg_names",
".",
"append",
"(",
"uplink_pg",
"[",
"'name'",
"]",
")",
"# Remove any extra portgroups",
"for",
"current_pg",
"in",
"current_pgs",
":",
"if",
"current_pg",
"[",
"'name'",
"]",
"not",
"in",
"expected_pg_names",
":",
"changes_required",
"=",
"True",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"comments",
".",
"append",
"(",
"'State {0} will remove '",
"'the portgroup \\'{1}\\' from DVS \\'{2}\\', '",
"'datacenter \\'{3}\\''",
"''",
".",
"format",
"(",
"name",
",",
"current_pg",
"[",
"'name'",
"]",
",",
"dvs",
",",
"datacenter",
")",
")",
"else",
":",
"__salt__",
"[",
"'vsphere.remove_dvportgroup'",
"]",
"(",
"portgroup",
"=",
"current_pg",
"[",
"'name'",
"]",
",",
"dvs",
"=",
"dvs",
",",
"service_instance",
"=",
"si",
")",
"comments",
".",
"append",
"(",
"'Removed the portgroup \\'{0}\\' from DVS '",
"'\\'{1}\\', datacenter \\'{2}\\''",
"''",
".",
"format",
"(",
"current_pg",
"[",
"'name'",
"]",
",",
"dvs",
",",
"datacenter",
")",
")",
"log",
".",
"info",
"(",
"comments",
"[",
"-",
"1",
"]",
")",
"changes",
".",
"update",
"(",
"{",
"current_pg",
"[",
"'name'",
"]",
":",
"{",
"'old'",
":",
"current_pg",
"}",
"}",
")",
"__salt__",
"[",
"'vsphere.disconnect'",
"]",
"(",
"si",
")",
"except",
"salt",
".",
"exceptions",
".",
"CommandExecutionError",
"as",
"exc",
":",
"log",
".",
"exception",
"(",
"'Encountered error'",
")",
"if",
"si",
":",
"__salt__",
"[",
"'vsphere.disconnect'",
"]",
"(",
"si",
")",
"if",
"not",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
".",
"update",
"(",
"{",
"'comment'",
":",
"exc",
".",
"strerror",
",",
"'result'",
":",
"False",
"if",
"not",
"__opts__",
"[",
"'test'",
"]",
"else",
"None",
"}",
")",
"return",
"ret",
"if",
"not",
"changes_required",
":",
"# We have no changes",
"ret",
".",
"update",
"(",
"{",
"'comment'",
":",
"(",
"'All portgroups in DVS \\'{0}\\', datacenter '",
"'\\'{1}\\' exist and are correctly configured. '",
"'Nothing to be done.'",
".",
"format",
"(",
"dvs",
",",
"datacenter",
")",
")",
",",
"'result'",
":",
"True",
"}",
")",
"else",
":",
"ret",
".",
"update",
"(",
"{",
"'comment'",
":",
"'\\n'",
".",
"join",
"(",
"comments",
")",
",",
"'changes'",
":",
"changes",
",",
"'result'",
":",
"None",
"if",
"__opts__",
"[",
"'test'",
"]",
"else",
"True",
",",
"}",
")",
"return",
"ret"
] | 45.462687 | 18.671642 |
def Q_srk(self, k):
"""
function (k). Returns the square root of noise matrix of dynamic model
on iteration k.
k (iteration number). starts at 0
This function is implemented to use SVD prediction step.
"""
ind = self.index[self.Q_time_var_index, k]
Q = self.Q[:, :, ind]
if (Q.shape[0] == 1): # 1-D case handle simplier. No storage
# of the result, just compute it each time.
square_root = np.sqrt(Q)
else:
if self.svd_each_time:
(U, S, Vh) = sp.linalg.svd(Q, full_matrices=False,
compute_uv=True,
overwrite_a=False,
check_finite=True)
square_root = U * np.sqrt(S)
else:
if ind in self.Q_square_root:
square_root = self.Q_square_root[ind]
else:
(U, S, Vh) = sp.linalg.svd(Q, full_matrices=False,
compute_uv=True,
overwrite_a=False,
check_finite=True)
square_root = U * np.sqrt(S)
self.Q_square_root[ind] = square_root
return square_root | [
"def",
"Q_srk",
"(",
"self",
",",
"k",
")",
":",
"ind",
"=",
"self",
".",
"index",
"[",
"self",
".",
"Q_time_var_index",
",",
"k",
"]",
"Q",
"=",
"self",
".",
"Q",
"[",
":",
",",
":",
",",
"ind",
"]",
"if",
"(",
"Q",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
")",
":",
"# 1-D case handle simplier. No storage",
"# of the result, just compute it each time.",
"square_root",
"=",
"np",
".",
"sqrt",
"(",
"Q",
")",
"else",
":",
"if",
"self",
".",
"svd_each_time",
":",
"(",
"U",
",",
"S",
",",
"Vh",
")",
"=",
"sp",
".",
"linalg",
".",
"svd",
"(",
"Q",
",",
"full_matrices",
"=",
"False",
",",
"compute_uv",
"=",
"True",
",",
"overwrite_a",
"=",
"False",
",",
"check_finite",
"=",
"True",
")",
"square_root",
"=",
"U",
"*",
"np",
".",
"sqrt",
"(",
"S",
")",
"else",
":",
"if",
"ind",
"in",
"self",
".",
"Q_square_root",
":",
"square_root",
"=",
"self",
".",
"Q_square_root",
"[",
"ind",
"]",
"else",
":",
"(",
"U",
",",
"S",
",",
"Vh",
")",
"=",
"sp",
".",
"linalg",
".",
"svd",
"(",
"Q",
",",
"full_matrices",
"=",
"False",
",",
"compute_uv",
"=",
"True",
",",
"overwrite_a",
"=",
"False",
",",
"check_finite",
"=",
"True",
")",
"square_root",
"=",
"U",
"*",
"np",
".",
"sqrt",
"(",
"S",
")",
"self",
".",
"Q_square_root",
"[",
"ind",
"]",
"=",
"square_root",
"return",
"square_root"
] | 36.210526 | 21.315789 |
def check_event(self, sim_time):
"""
Check for event occurrance for``Event`` group models at ``sim_time``
Parameters
----------
sim_time : float
The current simulation time
Returns
-------
list
A list of model names who report (an) event(s) at ``sim_time``
"""
ret = []
for model in self.__dict__['Event'].all_models:
if self.__dict__[model].is_time(sim_time):
ret.append(model)
if self.Breaker.is_time(sim_time):
ret.append('Breaker')
return ret | [
"def",
"check_event",
"(",
"self",
",",
"sim_time",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"model",
"in",
"self",
".",
"__dict__",
"[",
"'Event'",
"]",
".",
"all_models",
":",
"if",
"self",
".",
"__dict__",
"[",
"model",
"]",
".",
"is_time",
"(",
"sim_time",
")",
":",
"ret",
".",
"append",
"(",
"model",
")",
"if",
"self",
".",
"Breaker",
".",
"is_time",
"(",
"sim_time",
")",
":",
"ret",
".",
"append",
"(",
"'Breaker'",
")",
"return",
"ret"
] | 25.913043 | 20.347826 |
def import_vboxapi():
"""This import is designed to help when loading vboxapi inside of
alternative Python environments (virtualenvs etc).
:rtype: vboxapi module
"""
try:
import vboxapi
except ImportError:
system = platform.system()
py_mm_ver = sys.version_info[:2]
py_major = sys.version_info[0]
packages = ['vboxapi']
if system == 'Windows':
packages.extend(['win32com', 'win32', 'win32api', 'pywintypes', 'win32comext'])
search = ['C:\\Python%s%s\\Lib\\site-packages' % py_mm_ver,
'C:\\Python%s%s\\Lib\\site-packages\\win32' % py_mm_ver,
'C:\\Python%s%s\\Lib\\site-packages\\win32\\lib' % py_mm_ver,
'C:\\Program Files\\Oracle\\VirtualBox\\sdk\\install',
'C:\\Program Files (x86)\\Oracle\\VirtualBox\\sdk\\install']
for x in ['', py_major]:
search.extend(['C:\\Anaconda%s\\Lib\\site-packages' % x,
'C:\\Anaconda%s\\Lib\\site-packages\\win32' % x,
'C:\\Anaconda%s\\Lib\\site-packages\\win32\\lib' % x])
elif system == 'Linux':
search = ['/usr/lib/python%s.%s/dist-packages' % py_mm_ver,
'/usr/lib/python%s.%s/site-packages' % py_mm_ver,
'/usr/share/pyshared']
elif system == 'Darwin':
search = ['/Library/Python/%s.%s/site-packages' % py_mm_ver]
else:
# No idea where to look...
search = []
# Generates a common prefix from sys.executable in the
# case that vboxapi is installed in a virtualenv.
# This will also help with when we don't know where
# to search because of an unknown platform.
# These paths also help if the system Python is installed
# in a non-standard location.
#
# NOTE: We don't have to worry if these directories don't
# exist as they're checked below.
prefix = os.path.dirname(os.path.dirname(sys.executable))
search.extend([os.path.join(prefix, 'Lib', 'site-packages'),
os.path.join(prefix, 'Lib', 'site-packages', 'win32'),
os.path.join(prefix, 'Lib', 'site-packages', 'win32', 'lib'),
os.path.join(prefix, 'lib', 'site-packages'),
os.path.join(prefix, 'lib', 'dist-packages')])
packages = set(packages)
original_path = copy.copy(sys.path)
for path in search:
if not os.path.isdir(path):
continue
listing = set([os.path.splitext(f)[0] for f in os.listdir(path)])
if packages.intersection(listing):
sys.path.append(path)
packages -= listing
if not packages:
break
else:
# After search each path we still failed to find
# the required set of packages.
raise
import vboxapi
try:
yield vboxapi
finally:
sys.path = original_path
else:
yield vboxapi | [
"def",
"import_vboxapi",
"(",
")",
":",
"try",
":",
"import",
"vboxapi",
"except",
"ImportError",
":",
"system",
"=",
"platform",
".",
"system",
"(",
")",
"py_mm_ver",
"=",
"sys",
".",
"version_info",
"[",
":",
"2",
"]",
"py_major",
"=",
"sys",
".",
"version_info",
"[",
"0",
"]",
"packages",
"=",
"[",
"'vboxapi'",
"]",
"if",
"system",
"==",
"'Windows'",
":",
"packages",
".",
"extend",
"(",
"[",
"'win32com'",
",",
"'win32'",
",",
"'win32api'",
",",
"'pywintypes'",
",",
"'win32comext'",
"]",
")",
"search",
"=",
"[",
"'C:\\\\Python%s%s\\\\Lib\\\\site-packages'",
"%",
"py_mm_ver",
",",
"'C:\\\\Python%s%s\\\\Lib\\\\site-packages\\\\win32'",
"%",
"py_mm_ver",
",",
"'C:\\\\Python%s%s\\\\Lib\\\\site-packages\\\\win32\\\\lib'",
"%",
"py_mm_ver",
",",
"'C:\\\\Program Files\\\\Oracle\\\\VirtualBox\\\\sdk\\\\install'",
",",
"'C:\\\\Program Files (x86)\\\\Oracle\\\\VirtualBox\\\\sdk\\\\install'",
"]",
"for",
"x",
"in",
"[",
"''",
",",
"py_major",
"]",
":",
"search",
".",
"extend",
"(",
"[",
"'C:\\\\Anaconda%s\\\\Lib\\\\site-packages'",
"%",
"x",
",",
"'C:\\\\Anaconda%s\\\\Lib\\\\site-packages\\\\win32'",
"%",
"x",
",",
"'C:\\\\Anaconda%s\\\\Lib\\\\site-packages\\\\win32\\\\lib'",
"%",
"x",
"]",
")",
"elif",
"system",
"==",
"'Linux'",
":",
"search",
"=",
"[",
"'/usr/lib/python%s.%s/dist-packages'",
"%",
"py_mm_ver",
",",
"'/usr/lib/python%s.%s/site-packages'",
"%",
"py_mm_ver",
",",
"'/usr/share/pyshared'",
"]",
"elif",
"system",
"==",
"'Darwin'",
":",
"search",
"=",
"[",
"'/Library/Python/%s.%s/site-packages'",
"%",
"py_mm_ver",
"]",
"else",
":",
"# No idea where to look...",
"search",
"=",
"[",
"]",
"# Generates a common prefix from sys.executable in the",
"# case that vboxapi is installed in a virtualenv.",
"# This will also help with when we don't know where",
"# to search because of an unknown platform.",
"# These paths also help if the system Python is installed",
"# in a non-standard location.",
"#",
"# NOTE: We don't have to worry if these directories don't",
"# exist as they're checked below.",
"prefix",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"sys",
".",
"executable",
")",
")",
"search",
".",
"extend",
"(",
"[",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"'Lib'",
",",
"'site-packages'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"'Lib'",
",",
"'site-packages'",
",",
"'win32'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"'Lib'",
",",
"'site-packages'",
",",
"'win32'",
",",
"'lib'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"'lib'",
",",
"'site-packages'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"'lib'",
",",
"'dist-packages'",
")",
"]",
")",
"packages",
"=",
"set",
"(",
"packages",
")",
"original_path",
"=",
"copy",
".",
"copy",
"(",
"sys",
".",
"path",
")",
"for",
"path",
"in",
"search",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"continue",
"listing",
"=",
"set",
"(",
"[",
"os",
".",
"path",
".",
"splitext",
"(",
"f",
")",
"[",
"0",
"]",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
"]",
")",
"if",
"packages",
".",
"intersection",
"(",
"listing",
")",
":",
"sys",
".",
"path",
".",
"append",
"(",
"path",
")",
"packages",
"-=",
"listing",
"if",
"not",
"packages",
":",
"break",
"else",
":",
"# After search each path we still failed to find",
"# the required set of packages.",
"raise",
"import",
"vboxapi",
"try",
":",
"yield",
"vboxapi",
"finally",
":",
"sys",
".",
"path",
"=",
"original_path",
"else",
":",
"yield",
"vboxapi"
] | 40.881579 | 21.407895 |
def extract(self, msg):
"""Yield an ordered dictionary if msg['type'] is in keys_by_type."""
def normal(key):
v = msg.get(key)
if v is None:
return v
normalizer = self.normalizers.get(key, lambda x: x)
return normalizer(v)
def odict(keys):
return collections.OrderedDict((k, normal(k)) for k in keys)
def match(m):
return (msg.get(k) in v for k, v in m.items()) if m else ()
accept = all(match(self.accept))
reject = any(match(self.reject))
if reject or not accept:
keys = ()
elif self.keys_by_type is None:
keys = [k for k in msg.keys() if k not in self.omit]
else:
keys = self.keys_by_type.get(msg.get('type'))
return odict(keys) | [
"def",
"extract",
"(",
"self",
",",
"msg",
")",
":",
"def",
"normal",
"(",
"key",
")",
":",
"v",
"=",
"msg",
".",
"get",
"(",
"key",
")",
"if",
"v",
"is",
"None",
":",
"return",
"v",
"normalizer",
"=",
"self",
".",
"normalizers",
".",
"get",
"(",
"key",
",",
"lambda",
"x",
":",
"x",
")",
"return",
"normalizer",
"(",
"v",
")",
"def",
"odict",
"(",
"keys",
")",
":",
"return",
"collections",
".",
"OrderedDict",
"(",
"(",
"k",
",",
"normal",
"(",
"k",
")",
")",
"for",
"k",
"in",
"keys",
")",
"def",
"match",
"(",
"m",
")",
":",
"return",
"(",
"msg",
".",
"get",
"(",
"k",
")",
"in",
"v",
"for",
"k",
",",
"v",
"in",
"m",
".",
"items",
"(",
")",
")",
"if",
"m",
"else",
"(",
")",
"accept",
"=",
"all",
"(",
"match",
"(",
"self",
".",
"accept",
")",
")",
"reject",
"=",
"any",
"(",
"match",
"(",
"self",
".",
"reject",
")",
")",
"if",
"reject",
"or",
"not",
"accept",
":",
"keys",
"=",
"(",
")",
"elif",
"self",
".",
"keys_by_type",
"is",
"None",
":",
"keys",
"=",
"[",
"k",
"for",
"k",
"in",
"msg",
".",
"keys",
"(",
")",
"if",
"k",
"not",
"in",
"self",
".",
"omit",
"]",
"else",
":",
"keys",
"=",
"self",
".",
"keys_by_type",
".",
"get",
"(",
"msg",
".",
"get",
"(",
"'type'",
")",
")",
"return",
"odict",
"(",
"keys",
")"
] | 32.6 | 19 |
def updater(f):
"Decorate a function with named arguments into updater for transact"
@functools.wraps(f)
def wrapped_updater(keys, values):
result = f(*values)
return (keys[:len(result)], result)
return wrapped_updater | [
"def",
"updater",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapped_updater",
"(",
"keys",
",",
"values",
")",
":",
"result",
"=",
"f",
"(",
"*",
"values",
")",
"return",
"(",
"keys",
"[",
":",
"len",
"(",
"result",
")",
"]",
",",
"result",
")",
"return",
"wrapped_updater"
] | 34.857143 | 15.142857 |
def centroid_refine_triangulation_by_triangles(self, triangles):
"""
return points defining a refined triangulation obtained by bisection of all edges
in the triangulation that are associated with the triangles in the list provided.
Notes
-----
The triangles are here represented as a single index.
The vertices of triangle i are given by self.simplices[i].
"""
# Remove duplicates from the list of triangles
triangles = np.unique(np.array(triangles))
mlons, mlats = self.face_midpoints(simplices=self.simplices[triangles])
lonv1 = np.concatenate((self.lons, mlons), axis=0)
latv1 = np.concatenate((self.lats, mlats), axis=0)
return lonv1, latv1 | [
"def",
"centroid_refine_triangulation_by_triangles",
"(",
"self",
",",
"triangles",
")",
":",
"# Remove duplicates from the list of triangles",
"triangles",
"=",
"np",
".",
"unique",
"(",
"np",
".",
"array",
"(",
"triangles",
")",
")",
"mlons",
",",
"mlats",
"=",
"self",
".",
"face_midpoints",
"(",
"simplices",
"=",
"self",
".",
"simplices",
"[",
"triangles",
"]",
")",
"lonv1",
"=",
"np",
".",
"concatenate",
"(",
"(",
"self",
".",
"lons",
",",
"mlons",
")",
",",
"axis",
"=",
"0",
")",
"latv1",
"=",
"np",
".",
"concatenate",
"(",
"(",
"self",
".",
"lats",
",",
"mlats",
")",
",",
"axis",
"=",
"0",
")",
"return",
"lonv1",
",",
"latv1"
] | 35.47619 | 27.47619 |
def index_search(right_eigenvectors):
"""Find simplex structure in eigenvectors to begin PCCA+.
Parameters
----------
right_eigenvectors : ndarray
Right eigenvectors of transition matrix
Returns
-------
index : ndarray
Indices of simplex
"""
num_micro, num_eigen = right_eigenvectors.shape
index = np.zeros(num_eigen, 'int')
# first vertex: row with largest norm
index[0] = np.argmax(
[norm(right_eigenvectors[i]) for i in range(num_micro)])
ortho_sys = right_eigenvectors - np.outer(np.ones(num_micro),
right_eigenvectors[index[0]])
for j in range(1, num_eigen):
temp = ortho_sys[index[j - 1]].copy()
for l in range(num_micro):
ortho_sys[l] -= temp * dot(ortho_sys[l], temp)
dist_list = np.array([norm(ortho_sys[l]) for l in range(num_micro)])
index[j] = np.argmax(dist_list)
ortho_sys /= dist_list.max()
return index | [
"def",
"index_search",
"(",
"right_eigenvectors",
")",
":",
"num_micro",
",",
"num_eigen",
"=",
"right_eigenvectors",
".",
"shape",
"index",
"=",
"np",
".",
"zeros",
"(",
"num_eigen",
",",
"'int'",
")",
"# first vertex: row with largest norm",
"index",
"[",
"0",
"]",
"=",
"np",
".",
"argmax",
"(",
"[",
"norm",
"(",
"right_eigenvectors",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"num_micro",
")",
"]",
")",
"ortho_sys",
"=",
"right_eigenvectors",
"-",
"np",
".",
"outer",
"(",
"np",
".",
"ones",
"(",
"num_micro",
")",
",",
"right_eigenvectors",
"[",
"index",
"[",
"0",
"]",
"]",
")",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"num_eigen",
")",
":",
"temp",
"=",
"ortho_sys",
"[",
"index",
"[",
"j",
"-",
"1",
"]",
"]",
".",
"copy",
"(",
")",
"for",
"l",
"in",
"range",
"(",
"num_micro",
")",
":",
"ortho_sys",
"[",
"l",
"]",
"-=",
"temp",
"*",
"dot",
"(",
"ortho_sys",
"[",
"l",
"]",
",",
"temp",
")",
"dist_list",
"=",
"np",
".",
"array",
"(",
"[",
"norm",
"(",
"ortho_sys",
"[",
"l",
"]",
")",
"for",
"l",
"in",
"range",
"(",
"num_micro",
")",
"]",
")",
"index",
"[",
"j",
"]",
"=",
"np",
".",
"argmax",
"(",
"dist_list",
")",
"ortho_sys",
"/=",
"dist_list",
".",
"max",
"(",
")",
"return",
"index"
] | 25.789474 | 22.631579 |
def new(
name,
bucket,
timeout,
memory,
description,
subnet_ids,
security_group_ids
):
""" Create a new lambda project """
config = {}
if timeout:
config['timeout'] = timeout
if memory:
config['memory'] = memory
if description:
config['description'] = description
if subnet_ids:
config['subnet_ids'] = subnet_ids
if security_group_ids:
config['security_group_ids'] = security_group_ids
lambder.create_project(name, bucket, config) | [
"def",
"new",
"(",
"name",
",",
"bucket",
",",
"timeout",
",",
"memory",
",",
"description",
",",
"subnet_ids",
",",
"security_group_ids",
")",
":",
"config",
"=",
"{",
"}",
"if",
"timeout",
":",
"config",
"[",
"'timeout'",
"]",
"=",
"timeout",
"if",
"memory",
":",
"config",
"[",
"'memory'",
"]",
"=",
"memory",
"if",
"description",
":",
"config",
"[",
"'description'",
"]",
"=",
"description",
"if",
"subnet_ids",
":",
"config",
"[",
"'subnet_ids'",
"]",
"=",
"subnet_ids",
"if",
"security_group_ids",
":",
"config",
"[",
"'security_group_ids'",
"]",
"=",
"security_group_ids",
"lambder",
".",
"create_project",
"(",
"name",
",",
"bucket",
",",
"config",
")"
] | 22.130435 | 20.347826 |
def find(self, addr, what, max_search=None, max_symbolic_bytes=None, default=None, step=1,
disable_actions=False, inspect=True, chunk_size=None):
"""
Returns the address of bytes equal to 'what', starting from 'start'. Note that, if you don't specify a default
value, this search could cause the state to go unsat if no possible matching byte exists.
:param addr: The start address.
:param what: What to search for;
:param max_search: Search at most this many bytes.
:param max_symbolic_bytes: Search through at most this many symbolic bytes.
:param default: The default value, if what you're looking for wasn't found.
:param step: The stride that the search should use while scanning memory
:param disable_actions: Whether to inhibit the creation of SimActions for memory access
:param inspect: Whether to trigger SimInspect breakpoints
:returns: An expression representing the address of the matching byte.
"""
addr = _raw_ast(addr)
what = _raw_ast(what)
default = _raw_ast(default)
if isinstance(what, bytes):
# Convert it to a BVV
what = claripy.BVV(what, len(what) * self.state.arch.byte_width)
r,c,m = self._find(addr, what, max_search=max_search, max_symbolic_bytes=max_symbolic_bytes, default=default,
step=step, disable_actions=disable_actions, inspect=inspect, chunk_size=chunk_size)
if o.AST_DEPS in self.state.options and self.category == 'reg':
r = SimActionObject(r, reg_deps=frozenset((addr,)))
return r,c,m | [
"def",
"find",
"(",
"self",
",",
"addr",
",",
"what",
",",
"max_search",
"=",
"None",
",",
"max_symbolic_bytes",
"=",
"None",
",",
"default",
"=",
"None",
",",
"step",
"=",
"1",
",",
"disable_actions",
"=",
"False",
",",
"inspect",
"=",
"True",
",",
"chunk_size",
"=",
"None",
")",
":",
"addr",
"=",
"_raw_ast",
"(",
"addr",
")",
"what",
"=",
"_raw_ast",
"(",
"what",
")",
"default",
"=",
"_raw_ast",
"(",
"default",
")",
"if",
"isinstance",
"(",
"what",
",",
"bytes",
")",
":",
"# Convert it to a BVV",
"what",
"=",
"claripy",
".",
"BVV",
"(",
"what",
",",
"len",
"(",
"what",
")",
"*",
"self",
".",
"state",
".",
"arch",
".",
"byte_width",
")",
"r",
",",
"c",
",",
"m",
"=",
"self",
".",
"_find",
"(",
"addr",
",",
"what",
",",
"max_search",
"=",
"max_search",
",",
"max_symbolic_bytes",
"=",
"max_symbolic_bytes",
",",
"default",
"=",
"default",
",",
"step",
"=",
"step",
",",
"disable_actions",
"=",
"disable_actions",
",",
"inspect",
"=",
"inspect",
",",
"chunk_size",
"=",
"chunk_size",
")",
"if",
"o",
".",
"AST_DEPS",
"in",
"self",
".",
"state",
".",
"options",
"and",
"self",
".",
"category",
"==",
"'reg'",
":",
"r",
"=",
"SimActionObject",
"(",
"r",
",",
"reg_deps",
"=",
"frozenset",
"(",
"(",
"addr",
",",
")",
")",
")",
"return",
"r",
",",
"c",
",",
"m"
] | 55.935484 | 34.516129 |
def disasm_symbol_app(_parser, _, args): # pragma: no cover
"""
Disassemble a symbol from an ELF file.
"""
parser = argparse.ArgumentParser(
prog=_parser.prog,
description=_parser.description,
)
parser.add_argument(
'--syntax', '-s',
choices=AsmSyntax.__members__.keys(),
default=None,
)
parser.add_argument('file', help='ELF file to extract a symbol from')
parser.add_argument('symbol', help='the symbol to disassemble')
args = parser.parse_args(args)
if args.syntax is not None:
syntax = AsmSyntax.__members__[args.syntax]
else:
syntax = None
elf = ELF(args.file)
symbol = elf.get_symbol(args.symbol)
print('\n'.join(disasm(symbol.content, symbol.value, syntax=syntax, target=elf))) | [
"def",
"disasm_symbol_app",
"(",
"_parser",
",",
"_",
",",
"args",
")",
":",
"# pragma: no cover",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"_parser",
".",
"prog",
",",
"description",
"=",
"_parser",
".",
"description",
",",
")",
"parser",
".",
"add_argument",
"(",
"'--syntax'",
",",
"'-s'",
",",
"choices",
"=",
"AsmSyntax",
".",
"__members__",
".",
"keys",
"(",
")",
",",
"default",
"=",
"None",
",",
")",
"parser",
".",
"add_argument",
"(",
"'file'",
",",
"help",
"=",
"'ELF file to extract a symbol from'",
")",
"parser",
".",
"add_argument",
"(",
"'symbol'",
",",
"help",
"=",
"'the symbol to disassemble'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"args",
")",
"if",
"args",
".",
"syntax",
"is",
"not",
"None",
":",
"syntax",
"=",
"AsmSyntax",
".",
"__members__",
"[",
"args",
".",
"syntax",
"]",
"else",
":",
"syntax",
"=",
"None",
"elf",
"=",
"ELF",
"(",
"args",
".",
"file",
")",
"symbol",
"=",
"elf",
".",
"get_symbol",
"(",
"args",
".",
"symbol",
")",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"disasm",
"(",
"symbol",
".",
"content",
",",
"symbol",
".",
"value",
",",
"syntax",
"=",
"syntax",
",",
"target",
"=",
"elf",
")",
")",
")"
] | 31.16 | 17.64 |
def trainSuperimposedSequenceObjects(exp, numRepetitions,
sequences, objects):
"""
Train the network on the given object and sequence simultaneously for N
repetitions each. The total number of training inputs is N * seqLength = M
(Ideally numPoints = seqLength)
Create a list of M random training presentations of the object (To)
Create a list of M=N*seqLength training presentations of the sequence (Ts).
Create a list of M training inputs. The i'th training sensory input is created
by taking the OR of To and Ts. The location input comes from the object.
We only train L4 this way (?).
The other alternative is to train on one object for N repetitions. For each
repetition we choose a random sequence and create OR'ed sensory inputs. Insert
reset between each sequence presentation as before. We need to ensure that
each sequence is seen at least seqLength times.
"""
trainingSensations = {}
objectSDRs = objects.provideObjectsToLearn()
sequenceSDRs = sequences.provideObjectsToLearn()
# Create the order of sequences we will show. Each sequence will be shown
# exactly numRepetitions times.
sequenceOrder = range(len(sequences)) * numRepetitions
random.shuffle(sequenceOrder)
for objectId,sensations in objectSDRs.iteritems():
# Create sequence of random sensations for this object, repeated
# numRepetitions times. The total number of sensations is equal to the
# number of points on the object multiplied by numRepetitions. Each time
# an object is visited, we choose a random sequence to show.
trainingSensations[objectId] = []
for s in range(numRepetitions):
# Get sensations for this object and shuffle them
objectSensations = [sensation for sensation in sensations]
random.shuffle(objectSensations)
# Pick a random sequence and get its sensations.
sequenceId = sequenceOrder.pop()
sequenceSensations = sequenceSDRs[sequenceId]
# Create superimposed sensory sensations. We will only use the location
# SDRs from the object SDRs.
trainingSensations[objectId].extend(createSuperimposedSensorySDRs(
sequenceSensations, objectSensations))
# Train the network on all the SDRs for all the objects
exp.learnObjects(trainingSensations) | [
"def",
"trainSuperimposedSequenceObjects",
"(",
"exp",
",",
"numRepetitions",
",",
"sequences",
",",
"objects",
")",
":",
"trainingSensations",
"=",
"{",
"}",
"objectSDRs",
"=",
"objects",
".",
"provideObjectsToLearn",
"(",
")",
"sequenceSDRs",
"=",
"sequences",
".",
"provideObjectsToLearn",
"(",
")",
"# Create the order of sequences we will show. Each sequence will be shown",
"# exactly numRepetitions times.",
"sequenceOrder",
"=",
"range",
"(",
"len",
"(",
"sequences",
")",
")",
"*",
"numRepetitions",
"random",
".",
"shuffle",
"(",
"sequenceOrder",
")",
"for",
"objectId",
",",
"sensations",
"in",
"objectSDRs",
".",
"iteritems",
"(",
")",
":",
"# Create sequence of random sensations for this object, repeated",
"# numRepetitions times. The total number of sensations is equal to the",
"# number of points on the object multiplied by numRepetitions. Each time",
"# an object is visited, we choose a random sequence to show.",
"trainingSensations",
"[",
"objectId",
"]",
"=",
"[",
"]",
"for",
"s",
"in",
"range",
"(",
"numRepetitions",
")",
":",
"# Get sensations for this object and shuffle them",
"objectSensations",
"=",
"[",
"sensation",
"for",
"sensation",
"in",
"sensations",
"]",
"random",
".",
"shuffle",
"(",
"objectSensations",
")",
"# Pick a random sequence and get its sensations.",
"sequenceId",
"=",
"sequenceOrder",
".",
"pop",
"(",
")",
"sequenceSensations",
"=",
"sequenceSDRs",
"[",
"sequenceId",
"]",
"# Create superimposed sensory sensations. We will only use the location",
"# SDRs from the object SDRs.",
"trainingSensations",
"[",
"objectId",
"]",
".",
"extend",
"(",
"createSuperimposedSensorySDRs",
"(",
"sequenceSensations",
",",
"objectSensations",
")",
")",
"# Train the network on all the SDRs for all the objects",
"exp",
".",
"learnObjects",
"(",
"trainingSensations",
")"
] | 42.037037 | 23.555556 |
def _is_possible_loh(rec, vcf_rec, params, somatic_info, use_status=False, max_normal_depth=None):
"""Check if the VCF record is a het in the normal with sufficient support.
Only returns SNPs, since indels tend to have less precise frequency measurements.
"""
if _is_biallelic_snp(rec) and _passes_plus_germline(rec, use_status=use_status):
stats = _tumor_normal_stats(rec, somatic_info, vcf_rec)
depths = [tz.get_in([x, "depth"], stats) for x in ["normal", "tumor"]]
depths = [d for d in depths if d is not None]
normal_freq = tz.get_in(["normal", "freq"], stats)
tumor_freq = tz.get_in(["tumor", "freq"], stats)
if all([d > params["min_depth"] for d in depths]):
if max_normal_depth and tz.get_in(["normal", "depth"], stats, 0) > max_normal_depth:
return None
if normal_freq is not None:
if normal_freq >= params["min_freq"] and normal_freq <= params["max_freq"]:
return stats
elif (tumor_freq >= params["tumor_only"]["min_freq"] and
tumor_freq <= params["tumor_only"]["max_freq"]):
if (vcf_rec and not _has_population_germline(vcf_rec)) or is_population_germline(rec):
return stats | [
"def",
"_is_possible_loh",
"(",
"rec",
",",
"vcf_rec",
",",
"params",
",",
"somatic_info",
",",
"use_status",
"=",
"False",
",",
"max_normal_depth",
"=",
"None",
")",
":",
"if",
"_is_biallelic_snp",
"(",
"rec",
")",
"and",
"_passes_plus_germline",
"(",
"rec",
",",
"use_status",
"=",
"use_status",
")",
":",
"stats",
"=",
"_tumor_normal_stats",
"(",
"rec",
",",
"somatic_info",
",",
"vcf_rec",
")",
"depths",
"=",
"[",
"tz",
".",
"get_in",
"(",
"[",
"x",
",",
"\"depth\"",
"]",
",",
"stats",
")",
"for",
"x",
"in",
"[",
"\"normal\"",
",",
"\"tumor\"",
"]",
"]",
"depths",
"=",
"[",
"d",
"for",
"d",
"in",
"depths",
"if",
"d",
"is",
"not",
"None",
"]",
"normal_freq",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"normal\"",
",",
"\"freq\"",
"]",
",",
"stats",
")",
"tumor_freq",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"tumor\"",
",",
"\"freq\"",
"]",
",",
"stats",
")",
"if",
"all",
"(",
"[",
"d",
">",
"params",
"[",
"\"min_depth\"",
"]",
"for",
"d",
"in",
"depths",
"]",
")",
":",
"if",
"max_normal_depth",
"and",
"tz",
".",
"get_in",
"(",
"[",
"\"normal\"",
",",
"\"depth\"",
"]",
",",
"stats",
",",
"0",
")",
">",
"max_normal_depth",
":",
"return",
"None",
"if",
"normal_freq",
"is",
"not",
"None",
":",
"if",
"normal_freq",
">=",
"params",
"[",
"\"min_freq\"",
"]",
"and",
"normal_freq",
"<=",
"params",
"[",
"\"max_freq\"",
"]",
":",
"return",
"stats",
"elif",
"(",
"tumor_freq",
">=",
"params",
"[",
"\"tumor_only\"",
"]",
"[",
"\"min_freq\"",
"]",
"and",
"tumor_freq",
"<=",
"params",
"[",
"\"tumor_only\"",
"]",
"[",
"\"max_freq\"",
"]",
")",
":",
"if",
"(",
"vcf_rec",
"and",
"not",
"_has_population_germline",
"(",
"vcf_rec",
")",
")",
"or",
"is_population_germline",
"(",
"rec",
")",
":",
"return",
"stats"
] | 60.619048 | 27.047619 |
def __get_distribution_tags(self, client, arn):
"""Returns a dict containing the tags for a CloudFront distribution
Args:
client (botocore.client.CloudFront): Boto3 CloudFront client object
arn (str): ARN of the distribution to get tags for
Returns:
`dict`
"""
return {
t['Key']: t['Value'] for t in client.list_tags_for_resource(
Resource=arn
)['Tags']['Items']
} | [
"def",
"__get_distribution_tags",
"(",
"self",
",",
"client",
",",
"arn",
")",
":",
"return",
"{",
"t",
"[",
"'Key'",
"]",
":",
"t",
"[",
"'Value'",
"]",
"for",
"t",
"in",
"client",
".",
"list_tags_for_resource",
"(",
"Resource",
"=",
"arn",
")",
"[",
"'Tags'",
"]",
"[",
"'Items'",
"]",
"}"
] | 31.2 | 22.533333 |
def evaluate_callables(data):
"""
Call any callable values in the input dictionary;
return a new dictionary containing the evaluated results.
Useful for lazily evaluating default values in ``build`` methods.
>>> data = {"spam": "ham", "eggs": (lambda: 123)}
>>> result = evaluate_callables(data)
>>> result == {'eggs': 123, 'spam': 'ham'}
True
"""
sequence = ((k, v() if callable(v) else v) for k, v in data.items())
return type(data)(sequence) | [
"def",
"evaluate_callables",
"(",
"data",
")",
":",
"sequence",
"=",
"(",
"(",
"k",
",",
"v",
"(",
")",
"if",
"callable",
"(",
"v",
")",
"else",
"v",
")",
"for",
"k",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
")",
"return",
"type",
"(",
"data",
")",
"(",
"sequence",
")"
] | 36.692308 | 15.923077 |
def forceutc(t: Union[str, datetime.datetime, datetime.date, np.datetime64]) -> Union[datetime.datetime, datetime.date]:
"""
Add UTC to datetime-naive and convert to UTC for datetime aware
input: python datetime (naive, utc, non-utc) or Numpy datetime64 #FIXME add Pandas and AstroPy time classes
output: utc datetime
"""
# need to passthrough None for simpler external logic.
# %% polymorph to datetime
if isinstance(t, str):
t = parse(t)
elif isinstance(t, np.datetime64):
t = t.astype(datetime.datetime)
elif isinstance(t, datetime.datetime):
pass
elif isinstance(t, datetime.date):
return t
elif isinstance(t, (np.ndarray, list, tuple)):
return np.asarray([forceutc(T) for T in t])
else:
raise TypeError('datetime only input')
# %% enforce UTC on datetime
if t.tzinfo is None: # datetime-naive
t = t.replace(tzinfo=UTC)
else: # datetime-aware
t = t.astimezone(UTC) # changes timezone, preserving absolute time. E.g. noon EST = 5PM UTC
return t | [
"def",
"forceutc",
"(",
"t",
":",
"Union",
"[",
"str",
",",
"datetime",
".",
"datetime",
",",
"datetime",
".",
"date",
",",
"np",
".",
"datetime64",
"]",
")",
"->",
"Union",
"[",
"datetime",
".",
"datetime",
",",
"datetime",
".",
"date",
"]",
":",
"# need to passthrough None for simpler external logic.",
"# %% polymorph to datetime",
"if",
"isinstance",
"(",
"t",
",",
"str",
")",
":",
"t",
"=",
"parse",
"(",
"t",
")",
"elif",
"isinstance",
"(",
"t",
",",
"np",
".",
"datetime64",
")",
":",
"t",
"=",
"t",
".",
"astype",
"(",
"datetime",
".",
"datetime",
")",
"elif",
"isinstance",
"(",
"t",
",",
"datetime",
".",
"datetime",
")",
":",
"pass",
"elif",
"isinstance",
"(",
"t",
",",
"datetime",
".",
"date",
")",
":",
"return",
"t",
"elif",
"isinstance",
"(",
"t",
",",
"(",
"np",
".",
"ndarray",
",",
"list",
",",
"tuple",
")",
")",
":",
"return",
"np",
".",
"asarray",
"(",
"[",
"forceutc",
"(",
"T",
")",
"for",
"T",
"in",
"t",
"]",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'datetime only input'",
")",
"# %% enforce UTC on datetime",
"if",
"t",
".",
"tzinfo",
"is",
"None",
":",
"# datetime-naive",
"t",
"=",
"t",
".",
"replace",
"(",
"tzinfo",
"=",
"UTC",
")",
"else",
":",
"# datetime-aware",
"t",
"=",
"t",
".",
"astimezone",
"(",
"UTC",
")",
"# changes timezone, preserving absolute time. E.g. noon EST = 5PM UTC",
"return",
"t"
] | 37.5 | 20.714286 |
def get_filter_form(self, **kwargs):
"""
If there is a filter_form, initializes that
form with the contents of request.GET and
returns it.
"""
form = None
if self.filter_form:
form = self.filter_form(self.request.GET)
elif self.model and hasattr(self.model._meta, '_is_view'):
form = VersionFilterForm(self.request.GET)
return form | [
"def",
"get_filter_form",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"form",
"=",
"None",
"if",
"self",
".",
"filter_form",
":",
"form",
"=",
"self",
".",
"filter_form",
"(",
"self",
".",
"request",
".",
"GET",
")",
"elif",
"self",
".",
"model",
"and",
"hasattr",
"(",
"self",
".",
"model",
".",
"_meta",
",",
"'_is_view'",
")",
":",
"form",
"=",
"VersionFilterForm",
"(",
"self",
".",
"request",
".",
"GET",
")",
"return",
"form"
] | 32 | 14.769231 |
def remove_state(self, state_id, recursive=True, force=False, destroy=True):
"""Remove a state from the container state.
:param state_id: the id of the state to remove
:param recursive: a flag to indicate a recursive disassembling of all substates
:param force: a flag to indicate forcefully deletion of all states (important for the decider state in the
barrier concurrency state)
:param destroy: a flag which indicates if the state should not only be disconnected from the state but also
destroyed, including all its state elements
:raises exceptions.AttributeError: if state.state_id does not
"""
if state_id not in self.states:
raise AttributeError("State_id %s does not exist" % state_id)
if state_id == self.start_state_id:
self.set_start_state(None)
# first delete all transitions and data_flows, which are connected to the state to be deleted
keys_to_delete = []
for key, transition in self.transitions.items():
if transition.from_state == state_id or transition.to_state == state_id:
keys_to_delete.append(key)
for key in keys_to_delete:
self.remove_transition(key, True)
keys_to_delete = []
for key, data_flow in self.data_flows.items():
if data_flow.from_state == state_id or data_flow.to_state == state_id:
keys_to_delete.append(key)
for key in keys_to_delete:
self.remove_data_flow(key)
if recursive and not destroy:
raise AttributeError("The recursive flag requires the destroy flag to be set, too.")
if destroy:
# Recursively delete all transitions, data flows and states within the state to be deleted
self.states[state_id].destroy(recursive)
# final delete the state it self
self.states[state_id].parent = None
return self.states.pop(state_id) | [
"def",
"remove_state",
"(",
"self",
",",
"state_id",
",",
"recursive",
"=",
"True",
",",
"force",
"=",
"False",
",",
"destroy",
"=",
"True",
")",
":",
"if",
"state_id",
"not",
"in",
"self",
".",
"states",
":",
"raise",
"AttributeError",
"(",
"\"State_id %s does not exist\"",
"%",
"state_id",
")",
"if",
"state_id",
"==",
"self",
".",
"start_state_id",
":",
"self",
".",
"set_start_state",
"(",
"None",
")",
"# first delete all transitions and data_flows, which are connected to the state to be deleted",
"keys_to_delete",
"=",
"[",
"]",
"for",
"key",
",",
"transition",
"in",
"self",
".",
"transitions",
".",
"items",
"(",
")",
":",
"if",
"transition",
".",
"from_state",
"==",
"state_id",
"or",
"transition",
".",
"to_state",
"==",
"state_id",
":",
"keys_to_delete",
".",
"append",
"(",
"key",
")",
"for",
"key",
"in",
"keys_to_delete",
":",
"self",
".",
"remove_transition",
"(",
"key",
",",
"True",
")",
"keys_to_delete",
"=",
"[",
"]",
"for",
"key",
",",
"data_flow",
"in",
"self",
".",
"data_flows",
".",
"items",
"(",
")",
":",
"if",
"data_flow",
".",
"from_state",
"==",
"state_id",
"or",
"data_flow",
".",
"to_state",
"==",
"state_id",
":",
"keys_to_delete",
".",
"append",
"(",
"key",
")",
"for",
"key",
"in",
"keys_to_delete",
":",
"self",
".",
"remove_data_flow",
"(",
"key",
")",
"if",
"recursive",
"and",
"not",
"destroy",
":",
"raise",
"AttributeError",
"(",
"\"The recursive flag requires the destroy flag to be set, too.\"",
")",
"if",
"destroy",
":",
"# Recursively delete all transitions, data flows and states within the state to be deleted",
"self",
".",
"states",
"[",
"state_id",
"]",
".",
"destroy",
"(",
"recursive",
")",
"# final delete the state it self",
"self",
".",
"states",
"[",
"state_id",
"]",
".",
"parent",
"=",
"None",
"return",
"self",
".",
"states",
".",
"pop",
"(",
"state_id",
")"
] | 46.809524 | 23.761905 |
def derivesha512address(self):
""" Derive address using ``RIPEMD160(SHA512(x))`` """
pkbin = unhexlify(repr(self._pubkey))
addressbin = ripemd160(hexlify(hashlib.sha512(pkbin).digest()))
return Base58(hexlify(addressbin).decode('ascii')) | [
"def",
"derivesha512address",
"(",
"self",
")",
":",
"pkbin",
"=",
"unhexlify",
"(",
"repr",
"(",
"self",
".",
"_pubkey",
")",
")",
"addressbin",
"=",
"ripemd160",
"(",
"hexlify",
"(",
"hashlib",
".",
"sha512",
"(",
"pkbin",
")",
".",
"digest",
"(",
")",
")",
")",
"return",
"Base58",
"(",
"hexlify",
"(",
"addressbin",
")",
".",
"decode",
"(",
"'ascii'",
")",
")"
] | 53 | 12.8 |
def new_floating_ip(self, **kwargs):
"""
Creates a Floating IP and assigns it to a Droplet or reserves it to a region.
"""
droplet_id = kwargs.get('droplet_id')
region = kwargs.get('region')
if self.api_version == 2:
if droplet_id is not None and region is not None:
raise DoError('Only one of droplet_id and region is required to create a Floating IP. ' \
'Set one of the variables and try again.')
elif droplet_id is None and region is None:
raise DoError('droplet_id or region is required to create a Floating IP. ' \
'Set one of the variables and try again.')
else:
if droplet_id is not None:
params = {'droplet_id': droplet_id}
else:
params = {'region': region}
json = self.request('/floating_ips', params=params, method='POST')
return json['floating_ip']
else:
raise DoError(v2_api_required_str) | [
"def",
"new_floating_ip",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"droplet_id",
"=",
"kwargs",
".",
"get",
"(",
"'droplet_id'",
")",
"region",
"=",
"kwargs",
".",
"get",
"(",
"'region'",
")",
"if",
"self",
".",
"api_version",
"==",
"2",
":",
"if",
"droplet_id",
"is",
"not",
"None",
"and",
"region",
"is",
"not",
"None",
":",
"raise",
"DoError",
"(",
"'Only one of droplet_id and region is required to create a Floating IP. '",
"'Set one of the variables and try again.'",
")",
"elif",
"droplet_id",
"is",
"None",
"and",
"region",
"is",
"None",
":",
"raise",
"DoError",
"(",
"'droplet_id or region is required to create a Floating IP. '",
"'Set one of the variables and try again.'",
")",
"else",
":",
"if",
"droplet_id",
"is",
"not",
"None",
":",
"params",
"=",
"{",
"'droplet_id'",
":",
"droplet_id",
"}",
"else",
":",
"params",
"=",
"{",
"'region'",
":",
"region",
"}",
"json",
"=",
"self",
".",
"request",
"(",
"'/floating_ips'",
",",
"params",
"=",
"params",
",",
"method",
"=",
"'POST'",
")",
"return",
"json",
"[",
"'floating_ip'",
"]",
"else",
":",
"raise",
"DoError",
"(",
"v2_api_required_str",
")"
] | 44.166667 | 20.166667 |
def lrange(self, key, start, end):
"""
Returns the specified elements of the list stored at key.
:param key: The list's key
:type key: :class:`str`, :class:`bytes`
:param int start: zero-based index to start retrieving elements from
:param int end: zero-based index at which to stop retrieving elements
:rtype: list
:raises: :exc:`~tredis.exceptions.TRedisException`
The offsets start and stop are zero-based indexes, with 0 being the
first element of the list (the head of the list), 1 being the next
element and so on.
These offsets can also be negative numbers indicating offsets
starting at the end of the list. For example, -1 is the last element
of the list, -2 the penultimate, and so on.
Note that if you have a list of numbers from 0 to 100,
``lrange(key, 0, 10)`` will return 11 elements, that is, the
rightmost item is included. This may or may not be consistent with
behavior of range-related functions in your programming language of
choice (think Ruby's ``Range.new``, ``Array#slice`` or Python's
:func:`range` function).
Out of range indexes will not produce an error. If start is larger
than the end of the list, an empty list is returned. If stop is
larger than the actual end of the list, Redis will treat it like the
last element of the list.
.. note::
**Time complexity** ``O(S+N)`` where ``S`` is the distance of
start offset from ``HEAD`` for small lists, from nearest end
(``HEAD`` or ``TAIL``) for large lists; and ``N`` is the number
of elements in the specified range.
"""
return self._execute([b'LRANGE', key, start, end]) | [
"def",
"lrange",
"(",
"self",
",",
"key",
",",
"start",
",",
"end",
")",
":",
"return",
"self",
".",
"_execute",
"(",
"[",
"b'LRANGE'",
",",
"key",
",",
"start",
",",
"end",
"]",
")"
] | 43.365854 | 26.439024 |
def transform_ipy_prompt(line):
"""Handle inputs that start classic IPython prompt syntax."""
if not line or line.isspace():
return line
#print 'LINE: %r' % line # dbg
m = _ipy_prompt_re.match(line)
if m:
#print 'MATCH! %r -> %r' % (line, line[len(m.group(0)):]) # dbg
return line[len(m.group(0)):]
else:
return line | [
"def",
"transform_ipy_prompt",
"(",
"line",
")",
":",
"if",
"not",
"line",
"or",
"line",
".",
"isspace",
"(",
")",
":",
"return",
"line",
"#print 'LINE: %r' % line # dbg",
"m",
"=",
"_ipy_prompt_re",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"#print 'MATCH! %r -> %r' % (line, line[len(m.group(0)):]) # dbg",
"return",
"line",
"[",
"len",
"(",
"m",
".",
"group",
"(",
"0",
")",
")",
":",
"]",
"else",
":",
"return",
"line"
] | 30.25 | 17 |
def participation_policy_changed(ob, event):
""" Move all the existing users to a new group """
workspace = IWorkspace(ob)
old_group_name = workspace.group_for_policy(event.old_policy)
old_group = api.group.get(old_group_name)
for member in old_group.getAllGroupMembers():
groups = workspace.get(member.getId()).groups
groups -= set([event.old_policy.title()])
groups.add(event.new_policy.title()) | [
"def",
"participation_policy_changed",
"(",
"ob",
",",
"event",
")",
":",
"workspace",
"=",
"IWorkspace",
"(",
"ob",
")",
"old_group_name",
"=",
"workspace",
".",
"group_for_policy",
"(",
"event",
".",
"old_policy",
")",
"old_group",
"=",
"api",
".",
"group",
".",
"get",
"(",
"old_group_name",
")",
"for",
"member",
"in",
"old_group",
".",
"getAllGroupMembers",
"(",
")",
":",
"groups",
"=",
"workspace",
".",
"get",
"(",
"member",
".",
"getId",
"(",
")",
")",
".",
"groups",
"groups",
"-=",
"set",
"(",
"[",
"event",
".",
"old_policy",
".",
"title",
"(",
")",
"]",
")",
"groups",
".",
"add",
"(",
"event",
".",
"new_policy",
".",
"title",
"(",
")",
")"
] | 48.111111 | 8.777778 |
def process_element(self, element, key, **params):
"""
The process_element method allows a single element to be
operated on given an externally supplied key.
"""
self.p = param.ParamOverrides(self, params)
return self._apply(element, key) | [
"def",
"process_element",
"(",
"self",
",",
"element",
",",
"key",
",",
"*",
"*",
"params",
")",
":",
"self",
".",
"p",
"=",
"param",
".",
"ParamOverrides",
"(",
"self",
",",
"params",
")",
"return",
"self",
".",
"_apply",
"(",
"element",
",",
"key",
")"
] | 40 | 8.285714 |
def token(cls: Type[SIGType], pubkey: str) -> SIGType:
"""
Return SIG instance from pubkey
:param pubkey: Public key of the signature issuer
:return:
"""
sig = cls()
sig.pubkey = pubkey
return sig | [
"def",
"token",
"(",
"cls",
":",
"Type",
"[",
"SIGType",
"]",
",",
"pubkey",
":",
"str",
")",
"->",
"SIGType",
":",
"sig",
"=",
"cls",
"(",
")",
"sig",
".",
"pubkey",
"=",
"pubkey",
"return",
"sig"
] | 25.2 | 15.2 |
def add_store(name, store, saltenv='base'):
'''
Store a certificate to the given store
name
The certificate to store, this can use local paths
or salt:// paths
store
The store to add the certificate to
saltenv
The salt environment to use, this is ignored if a local
path is specified
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
cert_file = __salt__['cp.cache_file'](name, saltenv)
if cert_file is False:
ret['result'] = False
ret['comment'] += 'Certificate file not found.'
else:
cert_serial = __salt__['certutil.get_cert_serial'](cert_file)
serials = __salt__['certutil.get_stored_cert_serials'](store)
if cert_serial not in serials:
out = __salt__['certutil.add_store'](name, store)
if "successfully" in out:
ret['changes']['added'] = name
else:
ret['result'] = False
ret['comment'] += "Failed to store certificate {0}".format(name)
else:
ret['comment'] += "{0} already stored.".format(name)
return ret | [
"def",
"add_store",
"(",
"name",
",",
"store",
",",
"saltenv",
"=",
"'base'",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"cert_file",
"=",
"__salt__",
"[",
"'cp.cache_file'",
"]",
"(",
"name",
",",
"saltenv",
")",
"if",
"cert_file",
"is",
"False",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"+=",
"'Certificate file not found.'",
"else",
":",
"cert_serial",
"=",
"__salt__",
"[",
"'certutil.get_cert_serial'",
"]",
"(",
"cert_file",
")",
"serials",
"=",
"__salt__",
"[",
"'certutil.get_stored_cert_serials'",
"]",
"(",
"store",
")",
"if",
"cert_serial",
"not",
"in",
"serials",
":",
"out",
"=",
"__salt__",
"[",
"'certutil.add_store'",
"]",
"(",
"name",
",",
"store",
")",
"if",
"\"successfully\"",
"in",
"out",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'added'",
"]",
"=",
"name",
"else",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"+=",
"\"Failed to store certificate {0}\"",
".",
"format",
"(",
"name",
")",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"+=",
"\"{0} already stored.\"",
".",
"format",
"(",
"name",
")",
"return",
"ret"
] | 29 | 22.45 |
def get_current_url(request, ignore_params=None):
"""
Giving a django request, return the current http url, possibly ignoring some GET parameters
:param django.http.HttpRequest request: The current request object.
:param set ignore_params: An optional set of GET parameters to ignore
:return: The URL of the current page, possibly omitting some parameters from
``ignore_params`` in the querystring.
:rtype: unicode
"""
if ignore_params is None:
ignore_params = set()
protocol = u'https' if request.is_secure() else u"http"
service_url = u"%s://%s%s" % (protocol, request.get_host(), request.path)
if request.GET:
params = copy_params(request.GET, ignore_params)
if params:
service_url += u"?%s" % urlencode(params)
return service_url | [
"def",
"get_current_url",
"(",
"request",
",",
"ignore_params",
"=",
"None",
")",
":",
"if",
"ignore_params",
"is",
"None",
":",
"ignore_params",
"=",
"set",
"(",
")",
"protocol",
"=",
"u'https'",
"if",
"request",
".",
"is_secure",
"(",
")",
"else",
"u\"http\"",
"service_url",
"=",
"u\"%s://%s%s\"",
"%",
"(",
"protocol",
",",
"request",
".",
"get_host",
"(",
")",
",",
"request",
".",
"path",
")",
"if",
"request",
".",
"GET",
":",
"params",
"=",
"copy_params",
"(",
"request",
".",
"GET",
",",
"ignore_params",
")",
"if",
"params",
":",
"service_url",
"+=",
"u\"?%s\"",
"%",
"urlencode",
"(",
"params",
")",
"return",
"service_url"
] | 43.789474 | 22 |
def get_gnmt_encoder_decoder(cell_type='lstm', attention_cell='scaled_luong', num_layers=2,
num_bi_layers=1, hidden_size=128, dropout=0.0, use_residual=False,
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer=mx.init.LSTMBias(forget_bias=1.0),
h2h_bias_initializer='zeros',
prefix='gnmt_', params=None):
"""Build a pair of GNMT encoder/decoder
Parameters
----------
cell_type : str or type
attention_cell : str or AttentionCell
num_layers : int
num_bi_layers : int
hidden_size : int
dropout : float
use_residual : bool
i2h_weight_initializer : mx.init.Initializer or None
h2h_weight_initializer : mx.init.Initializer or None
i2h_bias_initializer : mx.init.Initializer or None
h2h_bias_initializer : mx.init.Initializer or None
prefix : str, default 'gnmt_'
Prefix for name of `Block`s.
params : Parameter or None
Container for weight sharing between cells.
Created if `None`.
Returns
-------
encoder : GNMTEncoder
decoder : GNMTDecoder
"""
encoder = GNMTEncoder(cell_type=cell_type, num_layers=num_layers, num_bi_layers=num_bi_layers,
hidden_size=hidden_size, dropout=dropout,
use_residual=use_residual,
i2h_weight_initializer=i2h_weight_initializer,
h2h_weight_initializer=h2h_weight_initializer,
i2h_bias_initializer=i2h_bias_initializer,
h2h_bias_initializer=h2h_bias_initializer,
prefix=prefix + 'enc_', params=params)
decoder = GNMTDecoder(cell_type=cell_type, attention_cell=attention_cell, num_layers=num_layers,
hidden_size=hidden_size, dropout=dropout,
use_residual=use_residual,
i2h_weight_initializer=i2h_weight_initializer,
h2h_weight_initializer=h2h_weight_initializer,
i2h_bias_initializer=i2h_bias_initializer,
h2h_bias_initializer=h2h_bias_initializer,
prefix=prefix + 'dec_', params=params)
return encoder, decoder | [
"def",
"get_gnmt_encoder_decoder",
"(",
"cell_type",
"=",
"'lstm'",
",",
"attention_cell",
"=",
"'scaled_luong'",
",",
"num_layers",
"=",
"2",
",",
"num_bi_layers",
"=",
"1",
",",
"hidden_size",
"=",
"128",
",",
"dropout",
"=",
"0.0",
",",
"use_residual",
"=",
"False",
",",
"i2h_weight_initializer",
"=",
"None",
",",
"h2h_weight_initializer",
"=",
"None",
",",
"i2h_bias_initializer",
"=",
"mx",
".",
"init",
".",
"LSTMBias",
"(",
"forget_bias",
"=",
"1.0",
")",
",",
"h2h_bias_initializer",
"=",
"'zeros'",
",",
"prefix",
"=",
"'gnmt_'",
",",
"params",
"=",
"None",
")",
":",
"encoder",
"=",
"GNMTEncoder",
"(",
"cell_type",
"=",
"cell_type",
",",
"num_layers",
"=",
"num_layers",
",",
"num_bi_layers",
"=",
"num_bi_layers",
",",
"hidden_size",
"=",
"hidden_size",
",",
"dropout",
"=",
"dropout",
",",
"use_residual",
"=",
"use_residual",
",",
"i2h_weight_initializer",
"=",
"i2h_weight_initializer",
",",
"h2h_weight_initializer",
"=",
"h2h_weight_initializer",
",",
"i2h_bias_initializer",
"=",
"i2h_bias_initializer",
",",
"h2h_bias_initializer",
"=",
"h2h_bias_initializer",
",",
"prefix",
"=",
"prefix",
"+",
"'enc_'",
",",
"params",
"=",
"params",
")",
"decoder",
"=",
"GNMTDecoder",
"(",
"cell_type",
"=",
"cell_type",
",",
"attention_cell",
"=",
"attention_cell",
",",
"num_layers",
"=",
"num_layers",
",",
"hidden_size",
"=",
"hidden_size",
",",
"dropout",
"=",
"dropout",
",",
"use_residual",
"=",
"use_residual",
",",
"i2h_weight_initializer",
"=",
"i2h_weight_initializer",
",",
"h2h_weight_initializer",
"=",
"h2h_weight_initializer",
",",
"i2h_bias_initializer",
"=",
"i2h_bias_initializer",
",",
"h2h_bias_initializer",
"=",
"h2h_bias_initializer",
",",
"prefix",
"=",
"prefix",
"+",
"'dec_'",
",",
"params",
"=",
"params",
")",
"return",
"encoder",
",",
"decoder"
] | 47.816327 | 23.734694 |
def _check_cell_methods_paren_info(self, paren_contents, var):
"""
Checks that the spacing and/or comment info contained inside the
parentheses in cell_methods is well-formed
"""
valid_info = TestCtx(BaseCheck.MEDIUM,
self.section_titles['7.3'])
# if there are no colons, this is a simple comment
# TODO: are empty comments considered valid?
if ':' not in paren_contents:
valid_info.out_of += 1
valid_info.score += 1
return valid_info
# otherwise, split into k/v pairs
kv_pair_pat = r'(\S+:)\s+(.*(?=\s+\w+:)|[^:]+$)\s*'
# otherwise, we must split further with intervals coming
# first, followed by non-standard comments
# we need the count of the matches, and re.findall() only returns
# groups if they are present and we wish to see if the entire match
# object concatenated together is the same as the original string
pmatches = [m for m in regex.finditer(kv_pair_pat, paren_contents)]
for i, pmatch in enumerate(pmatches):
keyword, val = pmatch.groups()
if keyword == 'interval:':
valid_info.out_of += 2
interval_matches = regex.match(r'^\s*(?P<interval_number>\S+)\s+(?P<interval_units>\S+)\s*$', val)
# attempt to get the number for the interval
if not interval_matches:
valid_info.messages.append('§7.3.3 {}:cell_methods contains an interval specification that does not parse: "{}". Should be in format "interval: <number> <units>"'.format(var.name, val))
else:
try:
float(interval_matches.group('interval_number'))
except ValueError:
valid_info.messages.append('§7.3.3 {}:cell_methods contains an interval value that does not parse as a numeric value: "{}".'.format(var.name, interval_matches.group('interval_number')))
else:
valid_info.score += 1
# then the units
try:
Unit(interval_matches.group('interval_units'))
except ValueError:
valid_info.messages.append('§7.3.3 {}:cell_methods interval units "{}" is not parsable by UDUNITS.'.format(var.name, interval_matches.group('interval_units')))
else:
valid_info.score += 1
elif keyword == 'comment:':
# comments can't really be invalid, except
# if they come first or aren't last, and
# maybe if they contain colons embedded in the
# comment string
valid_info.out_of += 1
if len(pmatches) == 1:
valid_info.messages.append('§7.3.3 If there is no standardized information, the keyword comment: should be omitted for variable {}'.format(var.name))
# otherwise check that the comment is the last
# item in the parentheses
elif i != len(pmatches) - 1:
valid_info.messages.append('§7.3.3 The non-standard "comment:" element must come after any standard elements in cell_methods for variable {}'.format(var.name))
#
else:
valid_info.score += 1
else:
valid_info.out_of += 1
valid_info.messages.append('§7.3.3 Invalid cell_methods keyword "{}" for variable {}. Must be one of [interval, comment]'.format(keyword, var.name))
# Ensure concatenated reconstructed matches are the same as the
# original string. If they're not, there's likely a formatting error
valid_info.assert_true(''.join(m.group(0)
for m in pmatches) == paren_contents,
"§7.3.3 Parenthetical content inside {}:cell_methods is not well formed: {}".format(var.name, paren_contents))
return valid_info | [
"def",
"_check_cell_methods_paren_info",
"(",
"self",
",",
"paren_contents",
",",
"var",
")",
":",
"valid_info",
"=",
"TestCtx",
"(",
"BaseCheck",
".",
"MEDIUM",
",",
"self",
".",
"section_titles",
"[",
"'7.3'",
"]",
")",
"# if there are no colons, this is a simple comment",
"# TODO: are empty comments considered valid?",
"if",
"':'",
"not",
"in",
"paren_contents",
":",
"valid_info",
".",
"out_of",
"+=",
"1",
"valid_info",
".",
"score",
"+=",
"1",
"return",
"valid_info",
"# otherwise, split into k/v pairs",
"kv_pair_pat",
"=",
"r'(\\S+:)\\s+(.*(?=\\s+\\w+:)|[^:]+$)\\s*'",
"# otherwise, we must split further with intervals coming",
"# first, followed by non-standard comments",
"# we need the count of the matches, and re.findall() only returns",
"# groups if they are present and we wish to see if the entire match",
"# object concatenated together is the same as the original string",
"pmatches",
"=",
"[",
"m",
"for",
"m",
"in",
"regex",
".",
"finditer",
"(",
"kv_pair_pat",
",",
"paren_contents",
")",
"]",
"for",
"i",
",",
"pmatch",
"in",
"enumerate",
"(",
"pmatches",
")",
":",
"keyword",
",",
"val",
"=",
"pmatch",
".",
"groups",
"(",
")",
"if",
"keyword",
"==",
"'interval:'",
":",
"valid_info",
".",
"out_of",
"+=",
"2",
"interval_matches",
"=",
"regex",
".",
"match",
"(",
"r'^\\s*(?P<interval_number>\\S+)\\s+(?P<interval_units>\\S+)\\s*$'",
",",
"val",
")",
"# attempt to get the number for the interval",
"if",
"not",
"interval_matches",
":",
"valid_info",
".",
"messages",
".",
"append",
"(",
"'§7.3.3 {}:cell_methods contains an interval specification that does not parse: \"{}\". Should be in format \"interval: <number> <units>\"'.",
"f",
"ormat(",
"v",
"ar.",
"n",
"ame,",
" ",
"al)",
")",
"",
"else",
":",
"try",
":",
"float",
"(",
"interval_matches",
".",
"group",
"(",
"'interval_number'",
")",
")",
"except",
"ValueError",
":",
"valid_info",
".",
"messages",
".",
"append",
"(",
"'§7.3.3 {}:cell_methods contains an interval value that does not parse as a numeric value: \"{}\".'.",
"f",
"ormat(",
"v",
"ar.",
"n",
"ame,",
" ",
"nterval_matches.",
"g",
"roup(",
"'",
"interval_number')",
")",
")",
"",
"else",
":",
"valid_info",
".",
"score",
"+=",
"1",
"# then the units",
"try",
":",
"Unit",
"(",
"interval_matches",
".",
"group",
"(",
"'interval_units'",
")",
")",
"except",
"ValueError",
":",
"valid_info",
".",
"messages",
".",
"append",
"(",
"'§7.3.3 {}:cell_methods interval units \"{}\" is not parsable by UDUNITS.'.",
"f",
"ormat(",
"v",
"ar.",
"n",
"ame,",
" ",
"nterval_matches.",
"g",
"roup(",
"'",
"interval_units')",
")",
")",
"",
"else",
":",
"valid_info",
".",
"score",
"+=",
"1",
"elif",
"keyword",
"==",
"'comment:'",
":",
"# comments can't really be invalid, except",
"# if they come first or aren't last, and",
"# maybe if they contain colons embedded in the",
"# comment string",
"valid_info",
".",
"out_of",
"+=",
"1",
"if",
"len",
"(",
"pmatches",
")",
"==",
"1",
":",
"valid_info",
".",
"messages",
".",
"append",
"(",
"'§7.3.3 If there is no standardized information, the keyword comment: should be omitted for variable {}'.",
"f",
"ormat(",
"v",
"ar.",
"n",
"ame)",
")",
"",
"# otherwise check that the comment is the last",
"# item in the parentheses",
"elif",
"i",
"!=",
"len",
"(",
"pmatches",
")",
"-",
"1",
":",
"valid_info",
".",
"messages",
".",
"append",
"(",
"'§7.3.3 The non-standard \"comment:\" element must come after any standard elements in cell_methods for variable {}'.",
"f",
"ormat(",
"v",
"ar.",
"n",
"ame)",
")",
"",
"#",
"else",
":",
"valid_info",
".",
"score",
"+=",
"1",
"else",
":",
"valid_info",
".",
"out_of",
"+=",
"1",
"valid_info",
".",
"messages",
".",
"append",
"(",
"'§7.3.3 Invalid cell_methods keyword \"{}\" for variable {}. Must be one of [interval, comment]'.",
"f",
"ormat(",
"k",
"eyword,",
" ",
"ar.",
"n",
"ame)",
")",
"",
"# Ensure concatenated reconstructed matches are the same as the",
"# original string. If they're not, there's likely a formatting error",
"valid_info",
".",
"assert_true",
"(",
"''",
".",
"join",
"(",
"m",
".",
"group",
"(",
"0",
")",
"for",
"m",
"in",
"pmatches",
")",
"==",
"paren_contents",
",",
"\"§7.3.3 Parenthetical content inside {}:cell_methods is not well formed: {}\".",
"f",
"ormat(",
"v",
"ar.",
"n",
"ame,",
" ",
"aren_contents)",
")",
"",
"return",
"valid_info"
] | 56.915493 | 28.295775 |
def makeLabel(self, value):
"""Create a label for the specified value.
Create a label string containing the value and its units (if any),
based on the values of self.step, self.span, and self.unitSystem.
"""
value, prefix = format_units(value, self.step,
system=self.unitSystem)
span, spanPrefix = format_units(self.span, self.step,
system=self.unitSystem)
if prefix:
prefix += " "
if value < 0.1:
return "%g %s" % (float(value), prefix)
elif value < 1.0:
return "%.2f %s" % (float(value), prefix)
if span > 10 or spanPrefix != prefix:
if type(value) is float:
return "%.1f %s" % (value, prefix)
else:
return "%d %s" % (int(value), prefix)
elif span > 3:
return "%.1f %s" % (float(value), prefix)
elif span > 0.1:
return "%.2f %s" % (float(value), prefix)
else:
return "%g %s" % (float(value), prefix) | [
"def",
"makeLabel",
"(",
"self",
",",
"value",
")",
":",
"value",
",",
"prefix",
"=",
"format_units",
"(",
"value",
",",
"self",
".",
"step",
",",
"system",
"=",
"self",
".",
"unitSystem",
")",
"span",
",",
"spanPrefix",
"=",
"format_units",
"(",
"self",
".",
"span",
",",
"self",
".",
"step",
",",
"system",
"=",
"self",
".",
"unitSystem",
")",
"if",
"prefix",
":",
"prefix",
"+=",
"\" \"",
"if",
"value",
"<",
"0.1",
":",
"return",
"\"%g %s\"",
"%",
"(",
"float",
"(",
"value",
")",
",",
"prefix",
")",
"elif",
"value",
"<",
"1.0",
":",
"return",
"\"%.2f %s\"",
"%",
"(",
"float",
"(",
"value",
")",
",",
"prefix",
")",
"if",
"span",
">",
"10",
"or",
"spanPrefix",
"!=",
"prefix",
":",
"if",
"type",
"(",
"value",
")",
"is",
"float",
":",
"return",
"\"%.1f %s\"",
"%",
"(",
"value",
",",
"prefix",
")",
"else",
":",
"return",
"\"%d %s\"",
"%",
"(",
"int",
"(",
"value",
")",
",",
"prefix",
")",
"elif",
"span",
">",
"3",
":",
"return",
"\"%.1f %s\"",
"%",
"(",
"float",
"(",
"value",
")",
",",
"prefix",
")",
"elif",
"span",
">",
"0.1",
":",
"return",
"\"%.2f %s\"",
"%",
"(",
"float",
"(",
"value",
")",
",",
"prefix",
")",
"else",
":",
"return",
"\"%g %s\"",
"%",
"(",
"float",
"(",
"value",
")",
",",
"prefix",
")"
] | 40.185185 | 16.444444 |
def hessian(self, x, y, theta_E, r_trunc, center_x=0, center_y=0):
"""
returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy
"""
x_shift = x - center_x
y_shift = y - center_y
dphi_dr = self._dphi_dr(x_shift, y_shift, theta_E, r_trunc)
d2phi_dr2 = self._d2phi_dr2(x_shift, y_shift, theta_E, r_trunc)
dr_dx, dr_dy = self._dr_dx(x, y)
d2r_dx2, d2r_dy2, d2r_dxy = self._d2r_dx2(x_shift, y_shift)
f_xx = d2r_dx2*dphi_dr + dr_dx**2*d2phi_dr2
f_yy = d2r_dy2*dphi_dr + dr_dy**2*d2phi_dr2
f_xy = d2r_dxy*dphi_dr + dr_dx*dr_dy*d2phi_dr2
return f_xx, f_yy, f_xy | [
"def",
"hessian",
"(",
"self",
",",
"x",
",",
"y",
",",
"theta_E",
",",
"r_trunc",
",",
"center_x",
"=",
"0",
",",
"center_y",
"=",
"0",
")",
":",
"x_shift",
"=",
"x",
"-",
"center_x",
"y_shift",
"=",
"y",
"-",
"center_y",
"dphi_dr",
"=",
"self",
".",
"_dphi_dr",
"(",
"x_shift",
",",
"y_shift",
",",
"theta_E",
",",
"r_trunc",
")",
"d2phi_dr2",
"=",
"self",
".",
"_d2phi_dr2",
"(",
"x_shift",
",",
"y_shift",
",",
"theta_E",
",",
"r_trunc",
")",
"dr_dx",
",",
"dr_dy",
"=",
"self",
".",
"_dr_dx",
"(",
"x",
",",
"y",
")",
"d2r_dx2",
",",
"d2r_dy2",
",",
"d2r_dxy",
"=",
"self",
".",
"_d2r_dx2",
"(",
"x_shift",
",",
"y_shift",
")",
"f_xx",
"=",
"d2r_dx2",
"*",
"dphi_dr",
"+",
"dr_dx",
"**",
"2",
"*",
"d2phi_dr2",
"f_yy",
"=",
"d2r_dy2",
"*",
"dphi_dr",
"+",
"dr_dy",
"**",
"2",
"*",
"d2phi_dr2",
"f_xy",
"=",
"d2r_dxy",
"*",
"dphi_dr",
"+",
"dr_dx",
"*",
"dr_dy",
"*",
"d2phi_dr2",
"return",
"f_xx",
",",
"f_yy",
",",
"f_xy"
] | 46.571429 | 14.857143 |
def start_host(session=None):
"""Promote the current process into python plugin host for Nvim.
Start msgpack-rpc event loop for `session`, listening for Nvim requests
and notifications. It registers Nvim commands for loading/unloading
python plugins.
The sys.stdout and sys.stderr streams are redirected to Nvim through
`session`. That means print statements probably won't work as expected
while this function doesn't return.
This function is normally called at program startup and could have been
defined as a separate executable. It is exposed as a library function for
testing purposes only.
"""
plugins = []
for arg in sys.argv:
_, ext = os.path.splitext(arg)
if ext == '.py':
plugins.append(arg)
elif os.path.isdir(arg):
init = os.path.join(arg, '__init__.py')
if os.path.isfile(init):
plugins.append(arg)
# This is a special case to support the old workaround of
# adding an empty .py file to make a package directory
# visible, and it should be removed soon.
for path in list(plugins):
dup = path + ".py"
if os.path.isdir(path) and dup in plugins:
plugins.remove(dup)
# Special case: the legacy scripthost receives a single relative filename
# while the rplugin host will receive absolute paths.
if plugins == ["script_host.py"]:
name = "script"
else:
name = "rplugin"
setup_logging(name)
if not session:
session = stdio_session()
nvim = Nvim.from_session(session)
if nvim.version.api_level < 1:
sys.stderr.write("This version of pynvim "
"requires nvim 0.1.6 or later")
sys.exit(1)
host = Host(nvim)
host.start(plugins) | [
"def",
"start_host",
"(",
"session",
"=",
"None",
")",
":",
"plugins",
"=",
"[",
"]",
"for",
"arg",
"in",
"sys",
".",
"argv",
":",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"arg",
")",
"if",
"ext",
"==",
"'.py'",
":",
"plugins",
".",
"append",
"(",
"arg",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"arg",
")",
":",
"init",
"=",
"os",
".",
"path",
".",
"join",
"(",
"arg",
",",
"'__init__.py'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"init",
")",
":",
"plugins",
".",
"append",
"(",
"arg",
")",
"# This is a special case to support the old workaround of",
"# adding an empty .py file to make a package directory",
"# visible, and it should be removed soon.",
"for",
"path",
"in",
"list",
"(",
"plugins",
")",
":",
"dup",
"=",
"path",
"+",
"\".py\"",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
"and",
"dup",
"in",
"plugins",
":",
"plugins",
".",
"remove",
"(",
"dup",
")",
"# Special case: the legacy scripthost receives a single relative filename",
"# while the rplugin host will receive absolute paths.",
"if",
"plugins",
"==",
"[",
"\"script_host.py\"",
"]",
":",
"name",
"=",
"\"script\"",
"else",
":",
"name",
"=",
"\"rplugin\"",
"setup_logging",
"(",
"name",
")",
"if",
"not",
"session",
":",
"session",
"=",
"stdio_session",
"(",
")",
"nvim",
"=",
"Nvim",
".",
"from_session",
"(",
"session",
")",
"if",
"nvim",
".",
"version",
".",
"api_level",
"<",
"1",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"This version of pynvim \"",
"\"requires nvim 0.1.6 or later\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"host",
"=",
"Host",
"(",
"nvim",
")",
"host",
".",
"start",
"(",
"plugins",
")"
] | 33.245283 | 19.830189 |
def transformString( self, instring ):
"""Extension to scanString, to modify matching text with modified tokens that may
be returned from a parse action. To use transformString, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking transformString() on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. transformString() returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
return "".join(map(_ustr,out)) | [
"def",
"transformString",
"(",
"self",
",",
"instring",
")",
":",
"out",
"=",
"[",
"]",
"lastE",
"=",
"0",
"# force preservation of <TAB>s, to minimize unwanted transformation of string, and to",
"# keep string locs straight between transformString and scanString",
"self",
".",
"keepTabs",
"=",
"True",
"for",
"t",
",",
"s",
",",
"e",
"in",
"self",
".",
"scanString",
"(",
"instring",
")",
":",
"out",
".",
"append",
"(",
"instring",
"[",
"lastE",
":",
"s",
"]",
")",
"if",
"t",
":",
"if",
"isinstance",
"(",
"t",
",",
"ParseResults",
")",
":",
"out",
"+=",
"t",
".",
"asList",
"(",
")",
"elif",
"isinstance",
"(",
"t",
",",
"list",
")",
":",
"out",
"+=",
"t",
"else",
":",
"out",
".",
"append",
"(",
"t",
")",
"lastE",
"=",
"e",
"out",
".",
"append",
"(",
"instring",
"[",
"lastE",
":",
"]",
")",
"return",
"\"\"",
".",
"join",
"(",
"map",
"(",
"_ustr",
",",
"out",
")",
")"
] | 49.166667 | 17.875 |
def rename_document(self, did, name):
'''
Renames the specified document.
Args:
- did (str): Document ID
- name (str): New document name
Returns:
- requests.Response: Onshape response data
'''
payload = {
'name': name
}
return self._api.request('post', '/api/documents/' + did, body=payload) | [
"def",
"rename_document",
"(",
"self",
",",
"did",
",",
"name",
")",
":",
"payload",
"=",
"{",
"'name'",
":",
"name",
"}",
"return",
"self",
".",
"_api",
".",
"request",
"(",
"'post'",
",",
"'/api/documents/'",
"+",
"did",
",",
"body",
"=",
"payload",
")"
] | 23 | 23.588235 |
def doit(self, classes=None, recursive=True, **kwargs):
"""Write out commutator
Write out the commutator according to its definition
$[\Op{A}, \Op{B}] = \Op{A}\Op{B} - \Op{A}\Op{B}$.
See :meth:`.Expression.doit`.
"""
return super().doit(classes, recursive, **kwargs) | [
"def",
"doit",
"(",
"self",
",",
"classes",
"=",
"None",
",",
"recursive",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"super",
"(",
")",
".",
"doit",
"(",
"classes",
",",
"recursive",
",",
"*",
"*",
"kwargs",
")"
] | 34.222222 | 16.888889 |
def get(self, url=None, parse_data=True, key=None, parameters=None):
""" Issue a GET request.
Kwargs:
url (str): Destination URL
parse_data (bool): If true, parse response data
key (string): If parse_data==True, look for this key when parsing data
parameters (dict): Additional GET parameters to append to the URL
Returns:
dict. Response (a dict with keys: success, data, info, body)
Raises:
AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception
"""
return self._fetch("GET", url, post_data=None, parse_data=parse_data, key=key, parameters=parameters) | [
"def",
"get",
"(",
"self",
",",
"url",
"=",
"None",
",",
"parse_data",
"=",
"True",
",",
"key",
"=",
"None",
",",
"parameters",
"=",
"None",
")",
":",
"return",
"self",
".",
"_fetch",
"(",
"\"GET\"",
",",
"url",
",",
"post_data",
"=",
"None",
",",
"parse_data",
"=",
"parse_data",
",",
"key",
"=",
"key",
",",
"parameters",
"=",
"parameters",
")"
] | 42.75 | 29.5625 |
def publish_scene_remove(self, scene_id):
"""publish the removal of a scene"""
self.sequence_number += 1
self.publisher.send_multipart(msgs.MessageBuilder.scene_remove(self.sequence_number, scene_id))
return self.sequence_number | [
"def",
"publish_scene_remove",
"(",
"self",
",",
"scene_id",
")",
":",
"self",
".",
"sequence_number",
"+=",
"1",
"self",
".",
"publisher",
".",
"send_multipart",
"(",
"msgs",
".",
"MessageBuilder",
".",
"scene_remove",
"(",
"self",
".",
"sequence_number",
",",
"scene_id",
")",
")",
"return",
"self",
".",
"sequence_number"
] | 51.2 | 15.2 |
def set_dhw_on(self, until=None):
"""Set DHW to on, either indefinitely, or until a specified time.
When On, the DHW controller will work to keep its target temperature
at/above its target temperature. After the specified time, it will
revert to its scheduled behaviour.
"""
time_until = None if until is None else until.strftime(
'%Y-%m-%dT%H:%M:%SZ')
self._set_dhw(status="Hold", mode="DHWOn", next_time=time_until) | [
"def",
"set_dhw_on",
"(",
"self",
",",
"until",
"=",
"None",
")",
":",
"time_until",
"=",
"None",
"if",
"until",
"is",
"None",
"else",
"until",
".",
"strftime",
"(",
"'%Y-%m-%dT%H:%M:%SZ'",
")",
"self",
".",
"_set_dhw",
"(",
"status",
"=",
"\"Hold\"",
",",
"mode",
"=",
"\"DHWOn\"",
",",
"next_time",
"=",
"time_until",
")"
] | 39.833333 | 21.833333 |
def _parse_number(klass, number):
"""Parse ``number`` into an ``int`` or return ``number`` if a valid
expression for a INFO/FORMAT "Number".
:param str number: ``str`` to parse and check
"""
try:
return int(number)
except ValueError as e:
if number in VALID_NUMBERS:
return number
else:
raise e | [
"def",
"_parse_number",
"(",
"klass",
",",
"number",
")",
":",
"try",
":",
"return",
"int",
"(",
"number",
")",
"except",
"ValueError",
"as",
"e",
":",
"if",
"number",
"in",
"VALID_NUMBERS",
":",
"return",
"number",
"else",
":",
"raise",
"e"
] | 30.692308 | 12.692308 |
def set_patient_medhx_flag(self, patient_id,
medhx_status):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:param patient_id
:param medhx_status - Field in EEHR expects U, G, or D. SP defaults to Null and
errors out if included.
U=Unknown
G=Granted
D=Declined
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SET_PATIENT_MEDHX_FLAG,
patient_id=patient_id,
parameter1=medhx_status
)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SET_PATIENT_MEDHX_FLAG)
return result | [
"def",
"set_patient_medhx_flag",
"(",
"self",
",",
"patient_id",
",",
"medhx_status",
")",
":",
"magic",
"=",
"self",
".",
"_magic_json",
"(",
"action",
"=",
"TouchWorksMagicConstants",
".",
"ACTION_SET_PATIENT_MEDHX_FLAG",
",",
"patient_id",
"=",
"patient_id",
",",
"parameter1",
"=",
"medhx_status",
")",
"response",
"=",
"self",
".",
"_http_request",
"(",
"TouchWorksEndPoints",
".",
"MAGIC_JSON",
",",
"data",
"=",
"magic",
")",
"result",
"=",
"self",
".",
"_get_results_or_raise_if_magic_invalid",
"(",
"magic",
",",
"response",
",",
"TouchWorksMagicConstants",
".",
"RESULT_SET_PATIENT_MEDHX_FLAG",
")",
"return",
"result"
] | 39.304348 | 17.73913 |
def move(self, destination):
"""Reconfigure and move the virtual environment to another path.
Args:
destination (str): The target path of the virtual environment.
Note:
Unlike `relocate`, this method *will* move the virtual to the
given path.
"""
self.relocate(destination)
shutil.move(self.path, destination)
self._path = destination | [
"def",
"move",
"(",
"self",
",",
"destination",
")",
":",
"self",
".",
"relocate",
"(",
"destination",
")",
"shutil",
".",
"move",
"(",
"self",
".",
"path",
",",
"destination",
")",
"self",
".",
"_path",
"=",
"destination"
] | 32 | 19 |
def get_stack_var(name, depth=0):
'''This function may fiddle with the locals of the calling function,
to make it the root function of the fiber. If called from a short-lived
function be sure to use a bigger frame depth.
Returns the fiber state or None.'''
base_frame = _get_base_frame(depth)
if not base_frame:
# Frame not found
raise RuntimeError("Base frame not found")
# Lookup up the frame stack starting at the base frame for the fiber state
level = 0
frame = base_frame
while frame:
locals = frame.f_locals
value = locals.get(name)
if value is not None:
if level > 0:
# Copy a reference of the fiber state in the base frame
base_frame.f_locals[name] = value
return value
if locals.get(SECTION_BOUNDARY_TAG):
return None
frame = frame.f_back
level += 1
return None | [
"def",
"get_stack_var",
"(",
"name",
",",
"depth",
"=",
"0",
")",
":",
"base_frame",
"=",
"_get_base_frame",
"(",
"depth",
")",
"if",
"not",
"base_frame",
":",
"# Frame not found",
"raise",
"RuntimeError",
"(",
"\"Base frame not found\"",
")",
"# Lookup up the frame stack starting at the base frame for the fiber state",
"level",
"=",
"0",
"frame",
"=",
"base_frame",
"while",
"frame",
":",
"locals",
"=",
"frame",
".",
"f_locals",
"value",
"=",
"locals",
".",
"get",
"(",
"name",
")",
"if",
"value",
"is",
"not",
"None",
":",
"if",
"level",
">",
"0",
":",
"# Copy a reference of the fiber state in the base frame",
"base_frame",
".",
"f_locals",
"[",
"name",
"]",
"=",
"value",
"return",
"value",
"if",
"locals",
".",
"get",
"(",
"SECTION_BOUNDARY_TAG",
")",
":",
"return",
"None",
"frame",
"=",
"frame",
".",
"f_back",
"level",
"+=",
"1",
"return",
"None"
] | 34.148148 | 18.296296 |
def get_instance_for_uuid(self, uuid, project_id):
"""Return instance name for given uuid of an instance and project.
:uuid: Instance's UUID
:project_id: UUID of project (tenant)
"""
instance_name = self._inst_info_cache.get((uuid, project_id))
if instance_name:
return instance_name
instances = self._get_instances_for_project(project_id)
for inst in instances:
if inst.id.replace('-', '') == uuid:
LOG.debug('get_instance_for_uuid: name=%s', inst.name)
instance_name = inst.name
self._inst_info_cache[(uuid, project_id)] = instance_name
return instance_name
return instance_name | [
"def",
"get_instance_for_uuid",
"(",
"self",
",",
"uuid",
",",
"project_id",
")",
":",
"instance_name",
"=",
"self",
".",
"_inst_info_cache",
".",
"get",
"(",
"(",
"uuid",
",",
"project_id",
")",
")",
"if",
"instance_name",
":",
"return",
"instance_name",
"instances",
"=",
"self",
".",
"_get_instances_for_project",
"(",
"project_id",
")",
"for",
"inst",
"in",
"instances",
":",
"if",
"inst",
".",
"id",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
"==",
"uuid",
":",
"LOG",
".",
"debug",
"(",
"'get_instance_for_uuid: name=%s'",
",",
"inst",
".",
"name",
")",
"instance_name",
"=",
"inst",
".",
"name",
"self",
".",
"_inst_info_cache",
"[",
"(",
"uuid",
",",
"project_id",
")",
"]",
"=",
"instance_name",
"return",
"instance_name",
"return",
"instance_name"
] | 42.647059 | 14 |
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in parse_http_list(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result | [
"def",
"parse_dict_header",
"(",
"value",
")",
":",
"result",
"=",
"{",
"}",
"for",
"item",
"in",
"parse_http_list",
"(",
"value",
")",
":",
"if",
"'='",
"not",
"in",
"item",
":",
"result",
"[",
"item",
"]",
"=",
"None",
"continue",
"name",
",",
"value",
"=",
"item",
".",
"split",
"(",
"'='",
",",
"1",
")",
"if",
"value",
"[",
":",
"1",
"]",
"==",
"value",
"[",
"-",
"1",
":",
"]",
"==",
"'\"'",
":",
"value",
"=",
"unquote_header_value",
"(",
"value",
"[",
"1",
":",
"-",
"1",
"]",
")",
"result",
"[",
"name",
"]",
"=",
"value",
"return",
"result"
] | 29.709677 | 16.16129 |
def add_comment(self, issue, body, visibility=None, is_internal=False):
"""Add a comment from the current authenticated user on the specified issue and return a Resource for it.
The issue identifier and comment body are required.
:param issue: ID or key of the issue to add the comment to
:type issue: str
:param body: Text of the comment to add
:type body: str
:param visibility: a dict containing two entries: "type" and "value".
"type" is 'role' (or 'group' if the JIRA server has configured
comment visibility for groups) and 'value' is the name of the role
(or group) to which viewing of this comment will be restricted.
:type visibility: Optional[Dict[str, str]]
:param is_internal: Defines whether a comment has to be marked as 'Internal' in Jira Service Desk (Default: False)
:type is_internal: bool
:rtype: Comment
"""
data = {
'body': body,
}
if is_internal:
data.update({
'properties': [
{'key': 'sd.public.comment',
'value': {'internal': is_internal}}
]
})
if visibility is not None:
data['visibility'] = visibility
url = self._get_url('issue/' + str(issue) + '/comment')
r = self._session.post(
url, data=json.dumps(data)
)
comment = Comment(self._options, self._session, raw=json_loads(r))
return comment | [
"def",
"add_comment",
"(",
"self",
",",
"issue",
",",
"body",
",",
"visibility",
"=",
"None",
",",
"is_internal",
"=",
"False",
")",
":",
"data",
"=",
"{",
"'body'",
":",
"body",
",",
"}",
"if",
"is_internal",
":",
"data",
".",
"update",
"(",
"{",
"'properties'",
":",
"[",
"{",
"'key'",
":",
"'sd.public.comment'",
",",
"'value'",
":",
"{",
"'internal'",
":",
"is_internal",
"}",
"}",
"]",
"}",
")",
"if",
"visibility",
"is",
"not",
"None",
":",
"data",
"[",
"'visibility'",
"]",
"=",
"visibility",
"url",
"=",
"self",
".",
"_get_url",
"(",
"'issue/'",
"+",
"str",
"(",
"issue",
")",
"+",
"'/comment'",
")",
"r",
"=",
"self",
".",
"_session",
".",
"post",
"(",
"url",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
")",
"comment",
"=",
"Comment",
"(",
"self",
".",
"_options",
",",
"self",
".",
"_session",
",",
"raw",
"=",
"json_loads",
"(",
"r",
")",
")",
"return",
"comment"
] | 37.121951 | 23.609756 |
def path(self):
"""Return the path to the file, if the ref is a file"""
if not isinstance(self.ref, str):
return None
u = parse_app_url(self.ref)
if u.inner.proto != 'file':
return None
return u.path | [
"def",
"path",
"(",
"self",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"ref",
",",
"str",
")",
":",
"return",
"None",
"u",
"=",
"parse_app_url",
"(",
"self",
".",
"ref",
")",
"if",
"u",
".",
"inner",
".",
"proto",
"!=",
"'file'",
":",
"return",
"None",
"return",
"u",
".",
"path"
] | 21.333333 | 20.75 |
def next(self):
"""
Returns the next sequence of results, given stride and n.
"""
try:
results = self._stride_buffer.pop()
except (IndexError, AttributeError):
self._rebuffer()
results = self._stride_buffer.pop()
if not results:
raise StopIteration
return results | [
"def",
"next",
"(",
"self",
")",
":",
"try",
":",
"results",
"=",
"self",
".",
"_stride_buffer",
".",
"pop",
"(",
")",
"except",
"(",
"IndexError",
",",
"AttributeError",
")",
":",
"self",
".",
"_rebuffer",
"(",
")",
"results",
"=",
"self",
".",
"_stride_buffer",
".",
"pop",
"(",
")",
"if",
"not",
"results",
":",
"raise",
"StopIteration",
"return",
"results"
] | 28 | 14.153846 |
def get_direct_band_gap_dict(self):
"""
Returns a dictionary of information about the direct
band gap
Returns:
a dictionary of the band gaps indexed by spin
along with their band indices and k-point index
"""
if self.is_metal():
raise ValueError("get_direct_band_gap_dict should"
"only be used with non-metals")
direct_gap_dict = {}
for spin, v in self.bands.items():
above = v[np.all(v > self.efermi, axis=1)]
min_above = np.min(above, axis=0)
below = v[np.all(v < self.efermi, axis=1)]
max_below = np.max(below, axis=0)
diff = min_above - max_below
kpoint_index = np.argmin(diff)
band_indices = [np.argmax(below[:, kpoint_index]),
np.argmin(above[:, kpoint_index]) + len(below)]
direct_gap_dict[spin] = {"value": diff[kpoint_index],
"kpoint_index": kpoint_index,
"band_indices": band_indices}
return direct_gap_dict | [
"def",
"get_direct_band_gap_dict",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_metal",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"get_direct_band_gap_dict should\"",
"\"only be used with non-metals\"",
")",
"direct_gap_dict",
"=",
"{",
"}",
"for",
"spin",
",",
"v",
"in",
"self",
".",
"bands",
".",
"items",
"(",
")",
":",
"above",
"=",
"v",
"[",
"np",
".",
"all",
"(",
"v",
">",
"self",
".",
"efermi",
",",
"axis",
"=",
"1",
")",
"]",
"min_above",
"=",
"np",
".",
"min",
"(",
"above",
",",
"axis",
"=",
"0",
")",
"below",
"=",
"v",
"[",
"np",
".",
"all",
"(",
"v",
"<",
"self",
".",
"efermi",
",",
"axis",
"=",
"1",
")",
"]",
"max_below",
"=",
"np",
".",
"max",
"(",
"below",
",",
"axis",
"=",
"0",
")",
"diff",
"=",
"min_above",
"-",
"max_below",
"kpoint_index",
"=",
"np",
".",
"argmin",
"(",
"diff",
")",
"band_indices",
"=",
"[",
"np",
".",
"argmax",
"(",
"below",
"[",
":",
",",
"kpoint_index",
"]",
")",
",",
"np",
".",
"argmin",
"(",
"above",
"[",
":",
",",
"kpoint_index",
"]",
")",
"+",
"len",
"(",
"below",
")",
"]",
"direct_gap_dict",
"[",
"spin",
"]",
"=",
"{",
"\"value\"",
":",
"diff",
"[",
"kpoint_index",
"]",
",",
"\"kpoint_index\"",
":",
"kpoint_index",
",",
"\"band_indices\"",
":",
"band_indices",
"}",
"return",
"direct_gap_dict"
] | 43.384615 | 15.461538 |
def del_device_notification(self, notification_handle, user_handle):
# type: (int, int) -> None
"""Remove a device notification.
:param notification_handle: address of the variable that contains
the handle of the notification
:param user_handle: user handle
"""
if self._port is not None:
adsSyncDelDeviceNotificationReqEx(
self._port, self._adr, notification_handle, user_handle
) | [
"def",
"del_device_notification",
"(",
"self",
",",
"notification_handle",
",",
"user_handle",
")",
":",
"# type: (int, int) -> None\r",
"if",
"self",
".",
"_port",
"is",
"not",
"None",
":",
"adsSyncDelDeviceNotificationReqEx",
"(",
"self",
".",
"_port",
",",
"self",
".",
"_adr",
",",
"notification_handle",
",",
"user_handle",
")"
] | 37.153846 | 16.923077 |
def groups(self):
""" returns the group object """
return Groups(url="%s/groups" % self.root,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initalize=False) | [
"def",
"groups",
"(",
"self",
")",
":",
"return",
"Groups",
"(",
"url",
"=",
"\"%s/groups\"",
"%",
"self",
".",
"root",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
",",
"initalize",
"=",
"False",
")"
] | 43.857143 | 10.714286 |
def set_value(self, column, row, value):
"""Set the value of the Matrix at the specified column and row.
:param integer column: The index for the column (starting at 0)
:param integer row: The index for the row (starting at 0)
:param numeric value: The new value at the given column/row
:raise: Raises an :py:exc:`IndexError` if the index is out of xrange.
"""
self.matrix[column][row] = value | [
"def",
"set_value",
"(",
"self",
",",
"column",
",",
"row",
",",
"value",
")",
":",
"self",
".",
"matrix",
"[",
"column",
"]",
"[",
"row",
"]",
"=",
"value"
] | 45.3 | 21.1 |
def cancel(self, nids=None):
"""
Cancel all the tasks that are in the queue.
nids is an optional list of node identifiers used to filter the tasks.
Returns:
Number of jobs cancelled, negative value if error
"""
if self.has_chrooted:
# TODO: Use paramiko to kill the job?
warnings.warn("Cannot cancel the flow via sshfs!")
return -1
# If we are running with the scheduler, we must send a SIGKILL signal.
if os.path.exists(self.pid_file):
cprint("Found scheduler attached to this flow.", "yellow")
cprint("Sending SIGKILL to the scheduler before cancelling the tasks!", "yellow")
with open(self.pid_file, "rt") as fh:
pid = int(fh.readline())
retcode = os.system("kill -9 %d" % pid)
self.history.info("Sent SIGKILL to the scheduler, retcode: %s" % retcode)
try:
os.remove(self.pid_file)
except IOError:
pass
num_cancelled = 0
for task in self.iflat_tasks(nids=nids):
num_cancelled += task.cancel()
return num_cancelled | [
"def",
"cancel",
"(",
"self",
",",
"nids",
"=",
"None",
")",
":",
"if",
"self",
".",
"has_chrooted",
":",
"# TODO: Use paramiko to kill the job?",
"warnings",
".",
"warn",
"(",
"\"Cannot cancel the flow via sshfs!\"",
")",
"return",
"-",
"1",
"# If we are running with the scheduler, we must send a SIGKILL signal.",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"pid_file",
")",
":",
"cprint",
"(",
"\"Found scheduler attached to this flow.\"",
",",
"\"yellow\"",
")",
"cprint",
"(",
"\"Sending SIGKILL to the scheduler before cancelling the tasks!\"",
",",
"\"yellow\"",
")",
"with",
"open",
"(",
"self",
".",
"pid_file",
",",
"\"rt\"",
")",
"as",
"fh",
":",
"pid",
"=",
"int",
"(",
"fh",
".",
"readline",
"(",
")",
")",
"retcode",
"=",
"os",
".",
"system",
"(",
"\"kill -9 %d\"",
"%",
"pid",
")",
"self",
".",
"history",
".",
"info",
"(",
"\"Sent SIGKILL to the scheduler, retcode: %s\"",
"%",
"retcode",
")",
"try",
":",
"os",
".",
"remove",
"(",
"self",
".",
"pid_file",
")",
"except",
"IOError",
":",
"pass",
"num_cancelled",
"=",
"0",
"for",
"task",
"in",
"self",
".",
"iflat_tasks",
"(",
"nids",
"=",
"nids",
")",
":",
"num_cancelled",
"+=",
"task",
".",
"cancel",
"(",
")",
"return",
"num_cancelled"
] | 35.454545 | 20.848485 |
def begin(self):
""" connects and optionally authenticates a connection."""
self.connect(self.host, self.port)
if self.user:
self.starttls()
self.login(self.user, self.password) | [
"def",
"begin",
"(",
"self",
")",
":",
"self",
".",
"connect",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
"if",
"self",
".",
"user",
":",
"self",
".",
"starttls",
"(",
")",
"self",
".",
"login",
"(",
"self",
".",
"user",
",",
"self",
".",
"password",
")"
] | 36.666667 | 11 |
def contract(self, x):
"""
Run .contract(x) on all segmentlists.
"""
for value in self.itervalues():
value.contract(x)
return self | [
"def",
"contract",
"(",
"self",
",",
"x",
")",
":",
"for",
"value",
"in",
"self",
".",
"itervalues",
"(",
")",
":",
"value",
".",
"contract",
"(",
"x",
")",
"return",
"self"
] | 19.571429 | 10.428571 |
def _create_latent_variables(self):
""" Creates model latent variables
Returns
----------
None (changes model attributes)
"""
self.latent_variables.add_z('Sigma^2 irregular', fam.Flat(transform='exp'), fam.Normal(0,3))
for parm in range(self.z_no-1):
self.latent_variables.add_z('Sigma^2 ' + self.X_names[parm], fam.Flat(transform='exp'), fam.Normal(0,3)) | [
"def",
"_create_latent_variables",
"(",
"self",
")",
":",
"self",
".",
"latent_variables",
".",
"add_z",
"(",
"'Sigma^2 irregular'",
",",
"fam",
".",
"Flat",
"(",
"transform",
"=",
"'exp'",
")",
",",
"fam",
".",
"Normal",
"(",
"0",
",",
"3",
")",
")",
"for",
"parm",
"in",
"range",
"(",
"self",
".",
"z_no",
"-",
"1",
")",
":",
"self",
".",
"latent_variables",
".",
"add_z",
"(",
"'Sigma^2 '",
"+",
"self",
".",
"X_names",
"[",
"parm",
"]",
",",
"fam",
".",
"Flat",
"(",
"transform",
"=",
"'exp'",
")",
",",
"fam",
".",
"Normal",
"(",
"0",
",",
"3",
")",
")"
] | 34.583333 | 25.833333 |
def MetatagDistinctValuesGet(self, metatag_name, namespace = None):
"""
Find the distinct value of a metatag name in a certain namespace
@param metatag_name (string) - Name of the metatag for which to find the distinct values
@param namespace (stirng) - Namespace in which to find the distinct values
@return (bool) - Boolean indicating whether MetatagDistinctValuesGet was successful
"""
ns = "default" if namespace is None else namespace
if self.__SenseApiCall__("/metatag_name/{0}/distinct_values.json", "GET", parameters = {'namespace': ns}):
return True
else:
self.__error__ = "api call unsuccessful"
return False | [
"def",
"MetatagDistinctValuesGet",
"(",
"self",
",",
"metatag_name",
",",
"namespace",
"=",
"None",
")",
":",
"ns",
"=",
"\"default\"",
"if",
"namespace",
"is",
"None",
"else",
"namespace",
"if",
"self",
".",
"__SenseApiCall__",
"(",
"\"/metatag_name/{0}/distinct_values.json\"",
",",
"\"GET\"",
",",
"parameters",
"=",
"{",
"'namespace'",
":",
"ns",
"}",
")",
":",
"return",
"True",
"else",
":",
"self",
".",
"__error__",
"=",
"\"api call unsuccessful\"",
"return",
"False"
] | 51.2 | 29.866667 |
def logpdf(self, mu):
"""
Log PDF for t prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu))
"""
if self.transform is not None:
mu = self.transform(mu)
return ss.t.logpdf(mu, df=self.df0, loc=self.loc0, scale=self.scale0) | [
"def",
"logpdf",
"(",
"self",
",",
"mu",
")",
":",
"if",
"self",
".",
"transform",
"is",
"not",
"None",
":",
"mu",
"=",
"self",
".",
"transform",
"(",
"mu",
")",
"return",
"ss",
".",
"t",
".",
"logpdf",
"(",
"mu",
",",
"df",
"=",
"self",
".",
"df0",
",",
"loc",
"=",
"self",
".",
"loc0",
",",
"scale",
"=",
"self",
".",
"scale0",
")"
] | 24.9375 | 19.5625 |
def _process_service_check(
self, data, url, tag_by_host=False, services_incl_filter=None, services_excl_filter=None, custom_tags=None
):
''' Report a service check, tagged by the service and the backend.
Statuses are defined in `STATUS_TO_SERVICE_CHECK` mapping.
'''
custom_tags = [] if custom_tags is None else custom_tags
service_name = data['pxname']
status = data['status']
haproxy_hostname = to_string(self.hostname)
check_hostname = haproxy_hostname if tag_by_host else ''
if self._is_service_excl_filtered(service_name, services_incl_filter, services_excl_filter):
return
if status in Services.STATUS_TO_SERVICE_CHECK:
service_check_tags = ["service:%s" % service_name]
service_check_tags.extend(custom_tags)
hostname = data['svname']
if data['back_or_front'] == Services.BACKEND:
service_check_tags.append('backend:%s' % hostname)
status = Services.STATUS_TO_SERVICE_CHECK[status]
message = "%s reported %s:%s %s" % (haproxy_hostname, service_name, hostname, status)
self.service_check(
self.SERVICE_CHECK_NAME, status, message=message, hostname=check_hostname, tags=service_check_tags
) | [
"def",
"_process_service_check",
"(",
"self",
",",
"data",
",",
"url",
",",
"tag_by_host",
"=",
"False",
",",
"services_incl_filter",
"=",
"None",
",",
"services_excl_filter",
"=",
"None",
",",
"custom_tags",
"=",
"None",
")",
":",
"custom_tags",
"=",
"[",
"]",
"if",
"custom_tags",
"is",
"None",
"else",
"custom_tags",
"service_name",
"=",
"data",
"[",
"'pxname'",
"]",
"status",
"=",
"data",
"[",
"'status'",
"]",
"haproxy_hostname",
"=",
"to_string",
"(",
"self",
".",
"hostname",
")",
"check_hostname",
"=",
"haproxy_hostname",
"if",
"tag_by_host",
"else",
"''",
"if",
"self",
".",
"_is_service_excl_filtered",
"(",
"service_name",
",",
"services_incl_filter",
",",
"services_excl_filter",
")",
":",
"return",
"if",
"status",
"in",
"Services",
".",
"STATUS_TO_SERVICE_CHECK",
":",
"service_check_tags",
"=",
"[",
"\"service:%s\"",
"%",
"service_name",
"]",
"service_check_tags",
".",
"extend",
"(",
"custom_tags",
")",
"hostname",
"=",
"data",
"[",
"'svname'",
"]",
"if",
"data",
"[",
"'back_or_front'",
"]",
"==",
"Services",
".",
"BACKEND",
":",
"service_check_tags",
".",
"append",
"(",
"'backend:%s'",
"%",
"hostname",
")",
"status",
"=",
"Services",
".",
"STATUS_TO_SERVICE_CHECK",
"[",
"status",
"]",
"message",
"=",
"\"%s reported %s:%s %s\"",
"%",
"(",
"haproxy_hostname",
",",
"service_name",
",",
"hostname",
",",
"status",
")",
"self",
".",
"service_check",
"(",
"self",
".",
"SERVICE_CHECK_NAME",
",",
"status",
",",
"message",
"=",
"message",
",",
"hostname",
"=",
"check_hostname",
",",
"tags",
"=",
"service_check_tags",
")"
] | 48.481481 | 28.407407 |
def sanitize(func):
""" NFC is the normalization form recommended by W3C. """
@functools.wraps(func)
def wrapper(*args, **kwargs):
return normalize('NFC', func(*args, **kwargs))
return wrapper | [
"def",
"sanitize",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"normalize",
"(",
"'NFC'",
",",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"wrapper"
] | 30.142857 | 16.857143 |
def nano_sub(bind, tables):
"""Nanomsg fanout sub. (Experimental)
This sub will use nanomsg to fanout the events.
:param bind: the zmq pub socket or zmq device socket.
:param tables: the events of tables to follow.
"""
logger = logging.getLogger("meepo.sub.nano_sub")
from nanomsg import Socket, PUB
pub_socket = Socket(PUB)
pub_socket.bind(bind)
def _sub(table):
for action in ("write", "update", "delete"):
def _sub(pk, action=action):
msg = bytes("%s_%s %s" % (table, action, pk), 'utf-8')
logger.debug("pub msg %s" % msg)
pub_socket.send(msg)
signal("%s_%s" % (table, action)).connect(_sub, weak=False)
for table in set(tables):
_sub(table) | [
"def",
"nano_sub",
"(",
"bind",
",",
"tables",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"\"meepo.sub.nano_sub\"",
")",
"from",
"nanomsg",
"import",
"Socket",
",",
"PUB",
"pub_socket",
"=",
"Socket",
"(",
"PUB",
")",
"pub_socket",
".",
"bind",
"(",
"bind",
")",
"def",
"_sub",
"(",
"table",
")",
":",
"for",
"action",
"in",
"(",
"\"write\"",
",",
"\"update\"",
",",
"\"delete\"",
")",
":",
"def",
"_sub",
"(",
"pk",
",",
"action",
"=",
"action",
")",
":",
"msg",
"=",
"bytes",
"(",
"\"%s_%s %s\"",
"%",
"(",
"table",
",",
"action",
",",
"pk",
")",
",",
"'utf-8'",
")",
"logger",
".",
"debug",
"(",
"\"pub msg %s\"",
"%",
"msg",
")",
"pub_socket",
".",
"send",
"(",
"msg",
")",
"signal",
"(",
"\"%s_%s\"",
"%",
"(",
"table",
",",
"action",
")",
")",
".",
"connect",
"(",
"_sub",
",",
"weak",
"=",
"False",
")",
"for",
"table",
"in",
"set",
"(",
"tables",
")",
":",
"_sub",
"(",
"table",
")"
] | 29.153846 | 19.692308 |
def get_privileges(self, application=None, name=None, params=None):
"""
`<TODO>`_
:arg application: Application name
:arg name: Privilege name
"""
return self.transport.perform_request(
"GET",
_make_path("_security", "privilege", application, name),
params=params,
) | [
"def",
"get_privileges",
"(",
"self",
",",
"application",
"=",
"None",
",",
"name",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"return",
"self",
".",
"transport",
".",
"perform_request",
"(",
"\"GET\"",
",",
"_make_path",
"(",
"\"_security\"",
",",
"\"privilege\"",
",",
"application",
",",
"name",
")",
",",
"params",
"=",
"params",
",",
")"
] | 29 | 16.666667 |
def get_broker_queue(self, code):
"""
获取股票的经纪队列
:param code: 股票代码
:return: (ret, bid_frame_table, ask_frame_table)或(ret, err_message)
ret == RET_OK 返回pd dataframe数据,数据列格式如下
ret != RET_OK 后面两项为错误字符串
bid_frame_table 经纪买盘数据
===================== =========== ==============================================================
参数 类型 说明
===================== =========== ==============================================================
code str 股票代码
bid_broker_id int 经纪买盘id
bid_broker_name str 经纪买盘名称
bid_broker_pos int 经纪档位
===================== =========== ==============================================================
ask_frame_table 经纪卖盘数据
===================== =========== ==============================================================
参数 类型 说明
===================== =========== ==============================================================
code str 股票代码
ask_broker_id int 经纪卖盘id
ask_broker_name str 经纪卖盘名称
ask_broker_pos int 经纪档位
===================== =========== ==============================================================
"""
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
BrokerQueueQuery.pack_req, BrokerQueueQuery.unpack_rsp)
kargs = {
"code": code,
"conn_id": self.get_sync_conn_id()
}
ret_code, ret_msg, content = query_processor(**kargs)
if ret_code != RET_OK:
return ret_code, ret_msg, ret_msg
(_, bid_list, ask_list) = content
col_bid_list = [
'code', 'bid_broker_id', 'bid_broker_name', 'bid_broker_pos'
]
col_ask_list = [
'code', 'ask_broker_id', 'ask_broker_name', 'ask_broker_pos'
]
bid_frame_table = pd.DataFrame(bid_list, columns=col_bid_list)
ask_frame_table = pd.DataFrame(ask_list, columns=col_ask_list)
return RET_OK, bid_frame_table, ask_frame_table | [
"def",
"get_broker_queue",
"(",
"self",
",",
"code",
")",
":",
"if",
"code",
"is",
"None",
"or",
"is_str",
"(",
"code",
")",
"is",
"False",
":",
"error_str",
"=",
"ERROR_STR_PREFIX",
"+",
"\"the type of param in code is wrong\"",
"return",
"RET_ERROR",
",",
"error_str",
"query_processor",
"=",
"self",
".",
"_get_sync_query_processor",
"(",
"BrokerQueueQuery",
".",
"pack_req",
",",
"BrokerQueueQuery",
".",
"unpack_rsp",
")",
"kargs",
"=",
"{",
"\"code\"",
":",
"code",
",",
"\"conn_id\"",
":",
"self",
".",
"get_sync_conn_id",
"(",
")",
"}",
"ret_code",
",",
"ret_msg",
",",
"content",
"=",
"query_processor",
"(",
"*",
"*",
"kargs",
")",
"if",
"ret_code",
"!=",
"RET_OK",
":",
"return",
"ret_code",
",",
"ret_msg",
",",
"ret_msg",
"(",
"_",
",",
"bid_list",
",",
"ask_list",
")",
"=",
"content",
"col_bid_list",
"=",
"[",
"'code'",
",",
"'bid_broker_id'",
",",
"'bid_broker_name'",
",",
"'bid_broker_pos'",
"]",
"col_ask_list",
"=",
"[",
"'code'",
",",
"'ask_broker_id'",
",",
"'ask_broker_name'",
",",
"'ask_broker_pos'",
"]",
"bid_frame_table",
"=",
"pd",
".",
"DataFrame",
"(",
"bid_list",
",",
"columns",
"=",
"col_bid_list",
")",
"ask_frame_table",
"=",
"pd",
".",
"DataFrame",
"(",
"ask_list",
",",
"columns",
"=",
"col_ask_list",
")",
"return",
"RET_OK",
",",
"bid_frame_table",
",",
"ask_frame_table"
] | 44.610169 | 28.067797 |
def simple_beam_splitter(ax, p0, size=2.54, width=0.1, alpha=0,
format=None, **kwds):
r"""Draw a simple beam splitter."""
if format is None: format = 'k-'
a = size/2
b = width/2
x0 = [a, -a, -a, a, a]
y0 = [b, b, -b, -b, b]
cur_list = [(x0, y0)]
cur_list = rotate_and_traslate(cur_list, alpha, p0)
for curi in cur_list: ax.plot(curi[0], curi[1], format, **kwds) | [
"def",
"simple_beam_splitter",
"(",
"ax",
",",
"p0",
",",
"size",
"=",
"2.54",
",",
"width",
"=",
"0.1",
",",
"alpha",
"=",
"0",
",",
"format",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"if",
"format",
"is",
"None",
":",
"format",
"=",
"'k-'",
"a",
"=",
"size",
"/",
"2",
"b",
"=",
"width",
"/",
"2",
"x0",
"=",
"[",
"a",
",",
"-",
"a",
",",
"-",
"a",
",",
"a",
",",
"a",
"]",
"y0",
"=",
"[",
"b",
",",
"b",
",",
"-",
"b",
",",
"-",
"b",
",",
"b",
"]",
"cur_list",
"=",
"[",
"(",
"x0",
",",
"y0",
")",
"]",
"cur_list",
"=",
"rotate_and_traslate",
"(",
"cur_list",
",",
"alpha",
",",
"p0",
")",
"for",
"curi",
"in",
"cur_list",
":",
"ax",
".",
"plot",
"(",
"curi",
"[",
"0",
"]",
",",
"curi",
"[",
"1",
"]",
",",
"format",
",",
"*",
"*",
"kwds",
")"
] | 32.615385 | 18.769231 |
def checkArgs(args):
"""Checks the arguments and options.
:param args: an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1.
"""
# Check if we have the tped and the tfam files
for fileName in [args.bfile + i for i in [".bed", ".bim", ".fam"]]:
if not os.path.isfile(fileName):
msg = "%(fileName)s: no such file" % locals()
raise ProgramError(msg)
# Check the plate bias file
if not os.path.isfile(args.loop_assoc):
msg = "%s: no such file" % args.loop_assoc
raise ProgramError(msg)
return True | [
"def",
"checkArgs",
"(",
"args",
")",
":",
"# Check if we have the tped and the tfam files",
"for",
"fileName",
"in",
"[",
"args",
".",
"bfile",
"+",
"i",
"for",
"i",
"in",
"[",
"\".bed\"",
",",
"\".bim\"",
",",
"\".fam\"",
"]",
"]",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"fileName",
")",
":",
"msg",
"=",
"\"%(fileName)s: no such file\"",
"%",
"locals",
"(",
")",
"raise",
"ProgramError",
"(",
"msg",
")",
"# Check the plate bias file",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"args",
".",
"loop_assoc",
")",
":",
"msg",
"=",
"\"%s: no such file\"",
"%",
"args",
".",
"loop_assoc",
"raise",
"ProgramError",
"(",
"msg",
")",
"return",
"True"
] | 31.884615 | 20.384615 |
def return_secondary_learner(self):
"""Returns secondary learner using its origin and the given hyperparameters
Returns:
est (estimator): Estimator object
"""
estimator = self.base_learner_origin.return_estimator()
estimator = estimator.set_params(**self.secondary_learner_hyperparameters)
return estimator | [
"def",
"return_secondary_learner",
"(",
"self",
")",
":",
"estimator",
"=",
"self",
".",
"base_learner_origin",
".",
"return_estimator",
"(",
")",
"estimator",
"=",
"estimator",
".",
"set_params",
"(",
"*",
"*",
"self",
".",
"secondary_learner_hyperparameters",
")",
"return",
"estimator"
] | 39.888889 | 17.222222 |
def launch_notebook(argv=None):
"""
Launch a Jupyter Notebook, with custom Untitled filenames and
a prepopulated first cell with necessary boilerplate code.
Notes
-----
To populate the first cell, the function `new_notebook` imported
in notebook.services.contents needs to be monkey patched. Dirty
but functional. Same thing could be achieved with custom.js or
a ContentsManager subclass, but this is easier!
"""
try:
import nbformat.v4 as nbf
from notebook.notebookapp import NotebookApp
from notebook.services.contents import manager
from traitlets.config import Config
except ImportError:
sys.exit("ERROR: Jupyter Notebook not installed in this environment. "
"Try with `conda install ipython jupyter notebook`")
else:
nbf._original_new_notebook = nbf.new_notebook
def _prepopulate_nb_patch():
nb = nbf._original_new_notebook()
cell = nbf.new_code_cell("# Run this cell to complete Chimera initialization\n"
"from pychimera import enable_chimera, enable_chimera_inline, chimera_view\n"
"enable_chimera()\nenable_chimera_inline()\nimport chimera")
nb['cells'].append(cell)
return nb
manager.new_notebook = _prepopulate_nb_patch
app = NotebookApp()
c = Config()
c.FileContentsManager.untitled_notebook = "Untitled PyChimera Notebook"
app.update_config(c)
app.initialize(argv)
app.start() | [
"def",
"launch_notebook",
"(",
"argv",
"=",
"None",
")",
":",
"try",
":",
"import",
"nbformat",
".",
"v4",
"as",
"nbf",
"from",
"notebook",
".",
"notebookapp",
"import",
"NotebookApp",
"from",
"notebook",
".",
"services",
".",
"contents",
"import",
"manager",
"from",
"traitlets",
".",
"config",
"import",
"Config",
"except",
"ImportError",
":",
"sys",
".",
"exit",
"(",
"\"ERROR: Jupyter Notebook not installed in this environment. \"",
"\"Try with `conda install ipython jupyter notebook`\"",
")",
"else",
":",
"nbf",
".",
"_original_new_notebook",
"=",
"nbf",
".",
"new_notebook",
"def",
"_prepopulate_nb_patch",
"(",
")",
":",
"nb",
"=",
"nbf",
".",
"_original_new_notebook",
"(",
")",
"cell",
"=",
"nbf",
".",
"new_code_cell",
"(",
"\"# Run this cell to complete Chimera initialization\\n\"",
"\"from pychimera import enable_chimera, enable_chimera_inline, chimera_view\\n\"",
"\"enable_chimera()\\nenable_chimera_inline()\\nimport chimera\"",
")",
"nb",
"[",
"'cells'",
"]",
".",
"append",
"(",
"cell",
")",
"return",
"nb",
"manager",
".",
"new_notebook",
"=",
"_prepopulate_nb_patch",
"app",
"=",
"NotebookApp",
"(",
")",
"c",
"=",
"Config",
"(",
")",
"c",
".",
"FileContentsManager",
".",
"untitled_notebook",
"=",
"\"Untitled PyChimera Notebook\"",
"app",
".",
"update_config",
"(",
"c",
")",
"app",
".",
"initialize",
"(",
"argv",
")",
"app",
".",
"start",
"(",
")"
] | 43.25 | 21.916667 |
def Floor(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Applies the Floor operator to a vertex.
This maps a vertex to the biggest integer less than or equal to its value
:param input_vertex: the vertex to be floor'd
"""
return Double(context.jvm_view().FloorVertex, label, cast_to_double_vertex(input_vertex)) | [
"def",
"Floor",
"(",
"input_vertex",
":",
"vertex_constructor_param_types",
",",
"label",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"Vertex",
":",
"return",
"Double",
"(",
"context",
".",
"jvm_view",
"(",
")",
".",
"FloorVertex",
",",
"label",
",",
"cast_to_double_vertex",
"(",
"input_vertex",
")",
")"
] | 46.625 | 23.875 |
def retinotopic_field_sign(m, element='vertices', retinotopy=Ellipsis, invert_field=False):
'''
retinotopic_field_sign(mesh) yields a property array of the field sign of every vertex in the
mesh m; this value may not be exactly 1 (same as VF) or -1 (mirror-image) but some value
in-between; this is because the field sign is calculated exactly (1, 0, or -1) for each triangle
in the mesh then is average onto the vertices. To get only the triangle field signs, use
retinotopic_field_sign(m, 'triangles').
The following options are accepted:
* element ('vertices') may be 'vertices' to specify that the vertex signs should be returned
or 'triangles' (or 'faces') to specify that the triangle field signs should be returned.
* retinotopy (Ellipsis) specifies the retinotopic dataset to be used. If se to 'empirical' or
'predicted', the retinotopy data is auto-detected from the given categories; if set to
Ellipsis, a property pair like 'polar_angle' and 'eccentricity' or 'lat' and 'lon' are
searched for using the as_retinotopy function; otherwise, this may be a retinotopy dataset
recognizable by as_retinotopy.
* invert_field (False) specifies that the inverse of the field sign should be returned.
'''
tsign = _retinotopic_field_sign_triangles(m, retinotopy)
t = m.tess if isinstance(m, geo.Mesh) or isinstance(m, geo.Topology) else m
if invert_field: tsign = -tsign
element = element.lower()
if element == 'triangles' or element == 'faces': return tsign
vfs = t.vertex_faces
vfs = np.asarray([np.mean(tsign[list(ii)]) if len(ii) > 0 else 0 for ii in vfs])
return vfs | [
"def",
"retinotopic_field_sign",
"(",
"m",
",",
"element",
"=",
"'vertices'",
",",
"retinotopy",
"=",
"Ellipsis",
",",
"invert_field",
"=",
"False",
")",
":",
"tsign",
"=",
"_retinotopic_field_sign_triangles",
"(",
"m",
",",
"retinotopy",
")",
"t",
"=",
"m",
".",
"tess",
"if",
"isinstance",
"(",
"m",
",",
"geo",
".",
"Mesh",
")",
"or",
"isinstance",
"(",
"m",
",",
"geo",
".",
"Topology",
")",
"else",
"m",
"if",
"invert_field",
":",
"tsign",
"=",
"-",
"tsign",
"element",
"=",
"element",
".",
"lower",
"(",
")",
"if",
"element",
"==",
"'triangles'",
"or",
"element",
"==",
"'faces'",
":",
"return",
"tsign",
"vfs",
"=",
"t",
".",
"vertex_faces",
"vfs",
"=",
"np",
".",
"asarray",
"(",
"[",
"np",
".",
"mean",
"(",
"tsign",
"[",
"list",
"(",
"ii",
")",
"]",
")",
"if",
"len",
"(",
"ii",
")",
">",
"0",
"else",
"0",
"for",
"ii",
"in",
"vfs",
"]",
")",
"return",
"vfs"
] | 64.192308 | 37.038462 |
def _add_global_counter(self):
"""Adds a global counter, called once for setup by @property global_step."""
assert self._global_step is None
# Force this into the top-level namescope. Instead of forcing top-level
# here, we could always call this in __init__() and then keep whatever
# namescopes are around then.
with self.g.as_default(), self.g.name_scope(None):
try:
self._global_step = self.g.get_tensor_by_name('global_step:0')
except KeyError:
self._global_step = tf.Variable(0, name='global_step', trainable=False) | [
"def",
"_add_global_counter",
"(",
"self",
")",
":",
"assert",
"self",
".",
"_global_step",
"is",
"None",
"# Force this into the top-level namescope. Instead of forcing top-level",
"# here, we could always call this in __init__() and then keep whatever",
"# namescopes are around then.",
"with",
"self",
".",
"g",
".",
"as_default",
"(",
")",
",",
"self",
".",
"g",
".",
"name_scope",
"(",
"None",
")",
":",
"try",
":",
"self",
".",
"_global_step",
"=",
"self",
".",
"g",
".",
"get_tensor_by_name",
"(",
"'global_step:0'",
")",
"except",
"KeyError",
":",
"self",
".",
"_global_step",
"=",
"tf",
".",
"Variable",
"(",
"0",
",",
"name",
"=",
"'global_step'",
",",
"trainable",
"=",
"False",
")"
] | 46.916667 | 21.75 |
def decorator(caller, func=None):
"""
decorator(caller) converts a caller function into a decorator;
decorator(caller, func) decorates a function using a caller.
"""
if func is not None: # returns a decorated function
evaldict = func.func_globals.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
return FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, undecorated=func, __wrapped__=func)
else: # returns a decorator
if isinstance(caller, partial):
return partial(decorator, caller)
# otherwise assume caller is a function
first = inspect.getargspec(caller)[0][0] # first arg
evaldict = caller.func_globals.copy()
evaldict['_call_'] = caller
evaldict['decorator'] = decorator
return FunctionMaker.create(
'%s(%s)' % (caller.__name__, first),
'return decorator(_call_, %s)' % first,
evaldict, undecorated=caller, __wrapped__=caller,
doc=caller.__doc__, module=caller.__module__) | [
"def",
"decorator",
"(",
"caller",
",",
"func",
"=",
"None",
")",
":",
"if",
"func",
"is",
"not",
"None",
":",
"# returns a decorated function",
"evaldict",
"=",
"func",
".",
"func_globals",
".",
"copy",
"(",
")",
"evaldict",
"[",
"'_call_'",
"]",
"=",
"caller",
"evaldict",
"[",
"'_func_'",
"]",
"=",
"func",
"return",
"FunctionMaker",
".",
"create",
"(",
"func",
",",
"\"return _call_(_func_, %(shortsignature)s)\"",
",",
"evaldict",
",",
"undecorated",
"=",
"func",
",",
"__wrapped__",
"=",
"func",
")",
"else",
":",
"# returns a decorator",
"if",
"isinstance",
"(",
"caller",
",",
"partial",
")",
":",
"return",
"partial",
"(",
"decorator",
",",
"caller",
")",
"# otherwise assume caller is a function",
"first",
"=",
"inspect",
".",
"getargspec",
"(",
"caller",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"# first arg",
"evaldict",
"=",
"caller",
".",
"func_globals",
".",
"copy",
"(",
")",
"evaldict",
"[",
"'_call_'",
"]",
"=",
"caller",
"evaldict",
"[",
"'decorator'",
"]",
"=",
"decorator",
"return",
"FunctionMaker",
".",
"create",
"(",
"'%s(%s)'",
"%",
"(",
"caller",
".",
"__name__",
",",
"first",
")",
",",
"'return decorator(_call_, %s)'",
"%",
"first",
",",
"evaldict",
",",
"undecorated",
"=",
"caller",
",",
"__wrapped__",
"=",
"caller",
",",
"doc",
"=",
"caller",
".",
"__doc__",
",",
"module",
"=",
"caller",
".",
"__module__",
")"
] | 43.76 | 9.76 |
def Parse(self, stat, file_object, knowledge_base):
"""Parse the History file."""
_, _ = stat, knowledge_base
# TODO(user): Convert this to use the far more intelligent plaso parser.
ie = IEParser(file_object)
for dat in ie.Parse():
yield rdf_webhistory.BrowserHistoryItem(
url=dat["url"],
domain=urlparse.urlparse(dat["url"]).netloc,
access_time=dat.get("mtime"),
program_name="Internet Explorer",
source_urn=file_object.urn) | [
"def",
"Parse",
"(",
"self",
",",
"stat",
",",
"file_object",
",",
"knowledge_base",
")",
":",
"_",
",",
"_",
"=",
"stat",
",",
"knowledge_base",
"# TODO(user): Convert this to use the far more intelligent plaso parser.",
"ie",
"=",
"IEParser",
"(",
"file_object",
")",
"for",
"dat",
"in",
"ie",
".",
"Parse",
"(",
")",
":",
"yield",
"rdf_webhistory",
".",
"BrowserHistoryItem",
"(",
"url",
"=",
"dat",
"[",
"\"url\"",
"]",
",",
"domain",
"=",
"urlparse",
".",
"urlparse",
"(",
"dat",
"[",
"\"url\"",
"]",
")",
".",
"netloc",
",",
"access_time",
"=",
"dat",
".",
"get",
"(",
"\"mtime\"",
")",
",",
"program_name",
"=",
"\"Internet Explorer\"",
",",
"source_urn",
"=",
"file_object",
".",
"urn",
")"
] | 40.916667 | 10.166667 |
def dhcp_request(iface=None, **kargs):
"""Send a DHCP discover request and return the answer"""
if conf.checkIPaddr != 0:
warning("conf.checkIPaddr is not 0, I may not be able to match the answer") # noqa: E501
if iface is None:
iface = conf.iface
fam, hw = get_if_raw_hwaddr(iface)
return srp1(Ether(dst="ff:ff:ff:ff:ff:ff") / IP(src="0.0.0.0", dst="255.255.255.255") / UDP(sport=68, dport=67) / # noqa: E501
BOOTP(chaddr=hw) / DHCP(options=[("message-type", "discover"), "end"]), iface=iface, **kargs) | [
"def",
"dhcp_request",
"(",
"iface",
"=",
"None",
",",
"*",
"*",
"kargs",
")",
":",
"if",
"conf",
".",
"checkIPaddr",
"!=",
"0",
":",
"warning",
"(",
"\"conf.checkIPaddr is not 0, I may not be able to match the answer\"",
")",
"# noqa: E501",
"if",
"iface",
"is",
"None",
":",
"iface",
"=",
"conf",
".",
"iface",
"fam",
",",
"hw",
"=",
"get_if_raw_hwaddr",
"(",
"iface",
")",
"return",
"srp1",
"(",
"Ether",
"(",
"dst",
"=",
"\"ff:ff:ff:ff:ff:ff\"",
")",
"/",
"IP",
"(",
"src",
"=",
"\"0.0.0.0\"",
",",
"dst",
"=",
"\"255.255.255.255\"",
")",
"/",
"UDP",
"(",
"sport",
"=",
"68",
",",
"dport",
"=",
"67",
")",
"/",
"# noqa: E501",
"BOOTP",
"(",
"chaddr",
"=",
"hw",
")",
"/",
"DHCP",
"(",
"options",
"=",
"[",
"(",
"\"message-type\"",
",",
"\"discover\"",
")",
",",
"\"end\"",
"]",
")",
",",
"iface",
"=",
"iface",
",",
"*",
"*",
"kargs",
")"
] | 61 | 29.444444 |
def filter(self, **kwargs):
"""
Returns a list of objects from the database.
The kwargs parameter can contain any number
of attributes. Only objects which contain all
listed attributes and in which all values match
for all listed attributes will be returned.
"""
from sqlalchemy import or_
Statement = self.get_model('statement')
Tag = self.get_model('tag')
session = self.Session()
page_size = kwargs.pop('page_size', 1000)
order_by = kwargs.pop('order_by', None)
tags = kwargs.pop('tags', [])
exclude_text = kwargs.pop('exclude_text', None)
exclude_text_words = kwargs.pop('exclude_text_words', [])
persona_not_startswith = kwargs.pop('persona_not_startswith', None)
search_text_contains = kwargs.pop('search_text_contains', None)
# Convert a single sting into a list if only one tag is provided
if type(tags) == str:
tags = [tags]
if len(kwargs) == 0:
statements = session.query(Statement).filter()
else:
statements = session.query(Statement).filter_by(**kwargs)
if tags:
statements = statements.join(Statement.tags).filter(
Tag.name.in_(tags)
)
if exclude_text:
statements = statements.filter(
~Statement.text.in_(exclude_text)
)
if exclude_text_words:
or_word_query = [
Statement.text.ilike('%' + word + '%') for word in exclude_text_words
]
statements = statements.filter(
~or_(*or_word_query)
)
if persona_not_startswith:
statements = statements.filter(
~Statement.persona.startswith('bot:')
)
if search_text_contains:
or_query = [
Statement.search_text.contains(word) for word in search_text_contains.split(' ')
]
statements = statements.filter(
or_(*or_query)
)
if order_by:
if 'created_at' in order_by:
index = order_by.index('created_at')
order_by[index] = Statement.created_at.asc()
statements = statements.order_by(*order_by)
total_statements = statements.count()
for start_index in range(0, total_statements, page_size):
for statement in statements.slice(start_index, start_index + page_size):
yield self.model_to_object(statement)
session.close() | [
"def",
"filter",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"sqlalchemy",
"import",
"or_",
"Statement",
"=",
"self",
".",
"get_model",
"(",
"'statement'",
")",
"Tag",
"=",
"self",
".",
"get_model",
"(",
"'tag'",
")",
"session",
"=",
"self",
".",
"Session",
"(",
")",
"page_size",
"=",
"kwargs",
".",
"pop",
"(",
"'page_size'",
",",
"1000",
")",
"order_by",
"=",
"kwargs",
".",
"pop",
"(",
"'order_by'",
",",
"None",
")",
"tags",
"=",
"kwargs",
".",
"pop",
"(",
"'tags'",
",",
"[",
"]",
")",
"exclude_text",
"=",
"kwargs",
".",
"pop",
"(",
"'exclude_text'",
",",
"None",
")",
"exclude_text_words",
"=",
"kwargs",
".",
"pop",
"(",
"'exclude_text_words'",
",",
"[",
"]",
")",
"persona_not_startswith",
"=",
"kwargs",
".",
"pop",
"(",
"'persona_not_startswith'",
",",
"None",
")",
"search_text_contains",
"=",
"kwargs",
".",
"pop",
"(",
"'search_text_contains'",
",",
"None",
")",
"# Convert a single sting into a list if only one tag is provided",
"if",
"type",
"(",
"tags",
")",
"==",
"str",
":",
"tags",
"=",
"[",
"tags",
"]",
"if",
"len",
"(",
"kwargs",
")",
"==",
"0",
":",
"statements",
"=",
"session",
".",
"query",
"(",
"Statement",
")",
".",
"filter",
"(",
")",
"else",
":",
"statements",
"=",
"session",
".",
"query",
"(",
"Statement",
")",
".",
"filter_by",
"(",
"*",
"*",
"kwargs",
")",
"if",
"tags",
":",
"statements",
"=",
"statements",
".",
"join",
"(",
"Statement",
".",
"tags",
")",
".",
"filter",
"(",
"Tag",
".",
"name",
".",
"in_",
"(",
"tags",
")",
")",
"if",
"exclude_text",
":",
"statements",
"=",
"statements",
".",
"filter",
"(",
"~",
"Statement",
".",
"text",
".",
"in_",
"(",
"exclude_text",
")",
")",
"if",
"exclude_text_words",
":",
"or_word_query",
"=",
"[",
"Statement",
".",
"text",
".",
"ilike",
"(",
"'%'",
"+",
"word",
"+",
"'%'",
")",
"for",
"word",
"in",
"exclude_text_words",
"]",
"statements",
"=",
"statements",
".",
"filter",
"(",
"~",
"or_",
"(",
"*",
"or_word_query",
")",
")",
"if",
"persona_not_startswith",
":",
"statements",
"=",
"statements",
".",
"filter",
"(",
"~",
"Statement",
".",
"persona",
".",
"startswith",
"(",
"'bot:'",
")",
")",
"if",
"search_text_contains",
":",
"or_query",
"=",
"[",
"Statement",
".",
"search_text",
".",
"contains",
"(",
"word",
")",
"for",
"word",
"in",
"search_text_contains",
".",
"split",
"(",
"' '",
")",
"]",
"statements",
"=",
"statements",
".",
"filter",
"(",
"or_",
"(",
"*",
"or_query",
")",
")",
"if",
"order_by",
":",
"if",
"'created_at'",
"in",
"order_by",
":",
"index",
"=",
"order_by",
".",
"index",
"(",
"'created_at'",
")",
"order_by",
"[",
"index",
"]",
"=",
"Statement",
".",
"created_at",
".",
"asc",
"(",
")",
"statements",
"=",
"statements",
".",
"order_by",
"(",
"*",
"order_by",
")",
"total_statements",
"=",
"statements",
".",
"count",
"(",
")",
"for",
"start_index",
"in",
"range",
"(",
"0",
",",
"total_statements",
",",
"page_size",
")",
":",
"for",
"statement",
"in",
"statements",
".",
"slice",
"(",
"start_index",
",",
"start_index",
"+",
"page_size",
")",
":",
"yield",
"self",
".",
"model_to_object",
"(",
"statement",
")",
"session",
".",
"close",
"(",
")"
] | 32.666667 | 21.025641 |
def _compose(self, name, attributes):
"""Construct a style taking `attributes` from the column styles.
Parameters
----------
name : str
Name of main style (e.g., "header_").
attributes : set of str
Adopt these elements from the column styles.
Returns
-------
The composite style for `name`.
"""
name_style = _safe_get(self.init_style, name, elements.default(name))
if self.init_style is not None and name_style is not None:
result = {}
for col in self.columns:
cstyle = {k: v for k, v in self.style[col].items()
if k in attributes}
result[col] = dict(cstyle, **name_style)
return result | [
"def",
"_compose",
"(",
"self",
",",
"name",
",",
"attributes",
")",
":",
"name_style",
"=",
"_safe_get",
"(",
"self",
".",
"init_style",
",",
"name",
",",
"elements",
".",
"default",
"(",
"name",
")",
")",
"if",
"self",
".",
"init_style",
"is",
"not",
"None",
"and",
"name_style",
"is",
"not",
"None",
":",
"result",
"=",
"{",
"}",
"for",
"col",
"in",
"self",
".",
"columns",
":",
"cstyle",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"style",
"[",
"col",
"]",
".",
"items",
"(",
")",
"if",
"k",
"in",
"attributes",
"}",
"result",
"[",
"col",
"]",
"=",
"dict",
"(",
"cstyle",
",",
"*",
"*",
"name_style",
")",
"return",
"result"
] | 35.136364 | 17.272727 |
def process_rewards(self, rewards):
"""Clips, rounds, and changes to integer type.
Args:
rewards: numpy array of raw (float) rewards.
Returns:
processed_rewards: numpy array of np.int64
"""
min_reward, max_reward = self.reward_range
# Clips at min and max reward.
rewards = np.clip(rewards, min_reward, max_reward)
# Round to (nearest) int and convert to integral type.
rewards = np.around(rewards, decimals=0).astype(np.int64)
return rewards | [
"def",
"process_rewards",
"(",
"self",
",",
"rewards",
")",
":",
"min_reward",
",",
"max_reward",
"=",
"self",
".",
"reward_range",
"# Clips at min and max reward.",
"rewards",
"=",
"np",
".",
"clip",
"(",
"rewards",
",",
"min_reward",
",",
"max_reward",
")",
"# Round to (nearest) int and convert to integral type.",
"rewards",
"=",
"np",
".",
"around",
"(",
"rewards",
",",
"decimals",
"=",
"0",
")",
".",
"astype",
"(",
"np",
".",
"int64",
")",
"return",
"rewards"
] | 28.352941 | 19.352941 |
def value(self, datatype):
"""Return the :class:`SensorValue` for the given data type.
sensor.value(TELLSTICK_TEMPERATURE) is identical to calling
sensor.temperature().
"""
value = self.lib.tdSensorValue(
self.protocol, self.model, self.id, datatype)
return SensorValue(datatype, value['value'], value['timestamp']) | [
"def",
"value",
"(",
"self",
",",
"datatype",
")",
":",
"value",
"=",
"self",
".",
"lib",
".",
"tdSensorValue",
"(",
"self",
".",
"protocol",
",",
"self",
".",
"model",
",",
"self",
".",
"id",
",",
"datatype",
")",
"return",
"SensorValue",
"(",
"datatype",
",",
"value",
"[",
"'value'",
"]",
",",
"value",
"[",
"'timestamp'",
"]",
")"
] | 40.888889 | 15.777778 |
def _parse_text(self, page_text):
"""Extract the s2config and the content from the raw page text."""
# 1 sanitize: remove leading blank lines
# 2 separate "config text" from content, store content
# 3 convert config text + \n to obtain Meta, this is the config.
lines = page_text.split('\n')
i = 0
while lines[i].strip() == '':
i += 1
if i > 0: # i points to the first non-blank line. Else, i is 0, there are no leading blank lines
lines = lines[i:] #remove leading blank lines
i = 0
while lines[i].strip() != '':
i += 1
# i points to the first blank line
cfg_lines = '\n'.join(lines[0:i + 1]) #config lines, plus the empty line
md = markdown.Markdown(extensions=['meta','fenced_code', 'codehilite'],output_format="html5")
md.convert(cfg_lines) # need to trigger the conversion to obtain md.Meta
self._config = md.Meta
self._content = '\n'.join(lines[i+1:]) | [
"def",
"_parse_text",
"(",
"self",
",",
"page_text",
")",
":",
"# 1 sanitize: remove leading blank lines",
"# 2 separate \"config text\" from content, store content",
"# 3 convert config text + \\n to obtain Meta, this is the config.",
"lines",
"=",
"page_text",
".",
"split",
"(",
"'\\n'",
")",
"i",
"=",
"0",
"while",
"lines",
"[",
"i",
"]",
".",
"strip",
"(",
")",
"==",
"''",
":",
"i",
"+=",
"1",
"if",
"i",
">",
"0",
":",
"# i points to the first non-blank line. Else, i is 0, there are no leading blank lines",
"lines",
"=",
"lines",
"[",
"i",
":",
"]",
"#remove leading blank lines",
"i",
"=",
"0",
"while",
"lines",
"[",
"i",
"]",
".",
"strip",
"(",
")",
"!=",
"''",
":",
"i",
"+=",
"1",
"# i points to the first blank line",
"cfg_lines",
"=",
"'\\n'",
".",
"join",
"(",
"lines",
"[",
"0",
":",
"i",
"+",
"1",
"]",
")",
"#config lines, plus the empty line",
"md",
"=",
"markdown",
".",
"Markdown",
"(",
"extensions",
"=",
"[",
"'meta'",
",",
"'fenced_code'",
",",
"'codehilite'",
"]",
",",
"output_format",
"=",
"\"html5\"",
")",
"md",
".",
"convert",
"(",
"cfg_lines",
")",
"# need to trigger the conversion to obtain md.Meta",
"self",
".",
"_config",
"=",
"md",
".",
"Meta",
"self",
".",
"_content",
"=",
"'\\n'",
".",
"join",
"(",
"lines",
"[",
"i",
"+",
"1",
":",
"]",
")"
] | 40.48 | 25.04 |
def friendly_type_name(self) -> str:
"""
:return: friendly type name for the end-user
:rtype: str
"""
_constraints_set = []
if self._must_be_dir:
_constraints_set.append('must be a directory')
if self._must_be_file:
_constraints_set.append('must be a file')
if self._must_exist:
_constraints_set.append('must already exist')
_constraints_as_str = ' (' + ', '.join(_constraints_set) + ')' if _constraints_set else ''
return 'path' + _constraints_as_str | [
"def",
"friendly_type_name",
"(",
"self",
")",
"->",
"str",
":",
"_constraints_set",
"=",
"[",
"]",
"if",
"self",
".",
"_must_be_dir",
":",
"_constraints_set",
".",
"append",
"(",
"'must be a directory'",
")",
"if",
"self",
".",
"_must_be_file",
":",
"_constraints_set",
".",
"append",
"(",
"'must be a file'",
")",
"if",
"self",
".",
"_must_exist",
":",
"_constraints_set",
".",
"append",
"(",
"'must already exist'",
")",
"_constraints_as_str",
"=",
"' ('",
"+",
"', '",
".",
"join",
"(",
"_constraints_set",
")",
"+",
"')'",
"if",
"_constraints_set",
"else",
"''",
"return",
"'path'",
"+",
"_constraints_as_str"
] | 39.571429 | 13.571429 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.