text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def set_who(voevent, date=None, author_ivorn=None): """Sets the minimal 'Who' attributes: date of authoring, AuthorIVORN. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. date(datetime.datetime): Date of authoring. NB Microseconds are ignored, as per the VOEvent spec. author_ivorn(str): Short author identifier, e.g. ``voevent.4pisky.org/ALARRM``. Note that the prefix ``ivo://`` will be prepended internally. """ if author_ivorn is not None: voevent.Who.AuthorIVORN = ''.join(('ivo://', author_ivorn)) if date is not None: voevent.Who.Date = date.replace(microsecond=0).isoformat()
[ "def", "set_who", "(", "voevent", ",", "date", "=", "None", ",", "author_ivorn", "=", "None", ")", ":", "if", "author_ivorn", "is", "not", "None", ":", "voevent", ".", "Who", ".", "AuthorIVORN", "=", "''", ".", "join", "(", "(", "'ivo://'", ",", "author_ivorn", ")", ")", "if", "date", "is", "not", "None", ":", "voevent", ".", "Who", ".", "Date", "=", "date", ".", "replace", "(", "microsecond", "=", "0", ")", ".", "isoformat", "(", ")" ]
42.5625
19.375
def with_metaclass(meta, *bases): """copied from https://github.com/Byron/bcore/blob/master/src/python/butility/future.py#L15""" class metaclass(meta): __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, nbases, d): if nbases is None: return type.__new__(cls, name, (), d) # There may be clients who rely on this attribute to be set to a reasonable value, which is why # we set the __metaclass__ attribute explicitly if not PY3 and '___metaclass__' not in d: d['__metaclass__'] = meta return meta(name, bases, d) return metaclass(meta.__name__ + 'Helper', None, {})
[ "def", "with_metaclass", "(", "meta", ",", "*", "bases", ")", ":", "class", "metaclass", "(", "meta", ")", ":", "__call__", "=", "type", ".", "__call__", "__init__", "=", "type", ".", "__init__", "def", "__new__", "(", "cls", ",", "name", ",", "nbases", ",", "d", ")", ":", "if", "nbases", "is", "None", ":", "return", "type", ".", "__new__", "(", "cls", ",", "name", ",", "(", ")", ",", "d", ")", "# There may be clients who rely on this attribute to be set to a reasonable value, which is why", "# we set the __metaclass__ attribute explicitly", "if", "not", "PY3", "and", "'___metaclass__'", "not", "in", "d", ":", "d", "[", "'__metaclass__'", "]", "=", "meta", "return", "meta", "(", "name", ",", "bases", ",", "d", ")", "return", "metaclass", "(", "meta", ".", "__name__", "+", "'Helper'", ",", "None", ",", "{", "}", ")" ]
46.733333
14.6
def draw(self, data): """Display decoded characters at the current cursor position and advances the cursor if :data:`~pyte.modes.DECAWM` is set. :param str data: text to display. .. versionchanged:: 0.5.0 Character width is taken into account. Specifically, zero-width and unprintable characters do not affect screen state. Full-width characters are rendered into two consecutive character containers. """ data = data.translate( self.g1_charset if self.charset else self.g0_charset) for char in data: char_width = wcwidth(char) # If this was the last column in a line and auto wrap mode is # enabled, move the cursor to the beginning of the next line, # otherwise replace characters already displayed with newly # entered. if self.cursor.x == self.columns: if mo.DECAWM in self.mode: self.dirty.add(self.cursor.y) self.carriage_return() self.linefeed() elif char_width > 0: self.cursor.x -= char_width # If Insert mode is set, new characters move old characters to # the right, otherwise terminal is in Replace mode and new # characters replace old characters at cursor position. if mo.IRM in self.mode and char_width > 0: self.insert_characters(char_width) line = self.buffer[self.cursor.y] if char_width == 1: line[self.cursor.x] = self.cursor.attrs._replace(data=char) elif char_width == 2: # A two-cell character has a stub slot after it. line[self.cursor.x] = self.cursor.attrs._replace(data=char) if self.cursor.x + 1 < self.columns: line[self.cursor.x + 1] = self.cursor.attrs \ ._replace(data="") elif char_width == 0 and unicodedata.combining(char): # A zero-cell character is combined with the previous # character either on this or preceeding line. if self.cursor.x: last = line[self.cursor.x - 1] normalized = unicodedata.normalize("NFC", last.data + char) line[self.cursor.x - 1] = last._replace(data=normalized) elif self.cursor.y: last = self.buffer[self.cursor.y - 1][self.columns - 1] normalized = unicodedata.normalize("NFC", last.data + char) self.buffer[self.cursor.y - 1][self.columns - 1] = \ last._replace(data=normalized) else: break # Unprintable character or doesn't advance the cursor. # .. note:: We can't use :meth:`cursor_forward()`, because that # way, we'll never know when to linefeed. if char_width > 0: self.cursor.x = min(self.cursor.x + char_width, self.columns) self.dirty.add(self.cursor.y)
[ "def", "draw", "(", "self", ",", "data", ")", ":", "data", "=", "data", ".", "translate", "(", "self", ".", "g1_charset", "if", "self", ".", "charset", "else", "self", ".", "g0_charset", ")", "for", "char", "in", "data", ":", "char_width", "=", "wcwidth", "(", "char", ")", "# If this was the last column in a line and auto wrap mode is", "# enabled, move the cursor to the beginning of the next line,", "# otherwise replace characters already displayed with newly", "# entered.", "if", "self", ".", "cursor", ".", "x", "==", "self", ".", "columns", ":", "if", "mo", ".", "DECAWM", "in", "self", ".", "mode", ":", "self", ".", "dirty", ".", "add", "(", "self", ".", "cursor", ".", "y", ")", "self", ".", "carriage_return", "(", ")", "self", ".", "linefeed", "(", ")", "elif", "char_width", ">", "0", ":", "self", ".", "cursor", ".", "x", "-=", "char_width", "# If Insert mode is set, new characters move old characters to", "# the right, otherwise terminal is in Replace mode and new", "# characters replace old characters at cursor position.", "if", "mo", ".", "IRM", "in", "self", ".", "mode", "and", "char_width", ">", "0", ":", "self", ".", "insert_characters", "(", "char_width", ")", "line", "=", "self", ".", "buffer", "[", "self", ".", "cursor", ".", "y", "]", "if", "char_width", "==", "1", ":", "line", "[", "self", ".", "cursor", ".", "x", "]", "=", "self", ".", "cursor", ".", "attrs", ".", "_replace", "(", "data", "=", "char", ")", "elif", "char_width", "==", "2", ":", "# A two-cell character has a stub slot after it.", "line", "[", "self", ".", "cursor", ".", "x", "]", "=", "self", ".", "cursor", ".", "attrs", ".", "_replace", "(", "data", "=", "char", ")", "if", "self", ".", "cursor", ".", "x", "+", "1", "<", "self", ".", "columns", ":", "line", "[", "self", ".", "cursor", ".", "x", "+", "1", "]", "=", "self", ".", "cursor", ".", "attrs", ".", "_replace", "(", "data", "=", "\"\"", ")", "elif", "char_width", "==", "0", "and", "unicodedata", ".", "combining", "(", "char", ")", ":", "# A zero-cell character is combined with the previous", "# character either on this or preceeding line.", "if", "self", ".", "cursor", ".", "x", ":", "last", "=", "line", "[", "self", ".", "cursor", ".", "x", "-", "1", "]", "normalized", "=", "unicodedata", ".", "normalize", "(", "\"NFC\"", ",", "last", ".", "data", "+", "char", ")", "line", "[", "self", ".", "cursor", ".", "x", "-", "1", "]", "=", "last", ".", "_replace", "(", "data", "=", "normalized", ")", "elif", "self", ".", "cursor", ".", "y", ":", "last", "=", "self", ".", "buffer", "[", "self", ".", "cursor", ".", "y", "-", "1", "]", "[", "self", ".", "columns", "-", "1", "]", "normalized", "=", "unicodedata", ".", "normalize", "(", "\"NFC\"", ",", "last", ".", "data", "+", "char", ")", "self", ".", "buffer", "[", "self", ".", "cursor", ".", "y", "-", "1", "]", "[", "self", ".", "columns", "-", "1", "]", "=", "last", ".", "_replace", "(", "data", "=", "normalized", ")", "else", ":", "break", "# Unprintable character or doesn't advance the cursor.", "# .. note:: We can't use :meth:`cursor_forward()`, because that", "# way, we'll never know when to linefeed.", "if", "char_width", ">", "0", ":", "self", ".", "cursor", ".", "x", "=", "min", "(", "self", ".", "cursor", ".", "x", "+", "char_width", ",", "self", ".", "columns", ")", "self", ".", "dirty", ".", "add", "(", "self", ".", "cursor", ".", "y", ")" ]
46.742424
21.969697
def vcenter_connect(self): """ Attempt to connect to vCenter :return: """ try: si = SmartConnect( host=self.host, user=self.user, pwd=self.password, port=self.port) # disconnect vc atexit.register(Disconnect, si) except vim.fault.InvalidLogin as e: print "Unable to connect to vcenter because of: {}".format(e.msg) sys.exit(5) return si
[ "def", "vcenter_connect", "(", "self", ")", ":", "try", ":", "si", "=", "SmartConnect", "(", "host", "=", "self", ".", "host", ",", "user", "=", "self", ".", "user", ",", "pwd", "=", "self", ".", "password", ",", "port", "=", "self", ".", "port", ")", "# disconnect vc", "atexit", ".", "register", "(", "Disconnect", ",", "si", ")", "except", "vim", ".", "fault", ".", "InvalidLogin", "as", "e", ":", "print", "\"Unable to connect to vcenter because of: {}\"", ".", "format", "(", "e", ".", "msg", ")", "sys", ".", "exit", "(", "5", ")", "return", "si" ]
29.411765
12.235294
def refresh_image(self): """Get the most recent camera image.""" url = str.replace(CONST.TIMELINE_IMAGES_ID_URL, '$DEVID$', self.device_id) response = self._abode.send_request("get", url) _LOGGER.debug("Get image response: %s", response.text) return self.update_image_location(json.loads(response.text))
[ "def", "refresh_image", "(", "self", ")", ":", "url", "=", "str", ".", "replace", "(", "CONST", ".", "TIMELINE_IMAGES_ID_URL", ",", "'$DEVID$'", ",", "self", ".", "device_id", ")", "response", "=", "self", ".", "_abode", ".", "send_request", "(", "\"get\"", ",", "url", ")", "_LOGGER", ".", "debug", "(", "\"Get image response: %s\"", ",", "response", ".", "text", ")", "return", "self", ".", "update_image_location", "(", "json", ".", "loads", "(", "response", ".", "text", ")", ")" ]
40.333333
20.888889
def docx_text_from_xml(xml: str, config: TextProcessingConfig) -> str: """ Converts an XML tree of a DOCX file to string contents. Args: xml: raw XML text config: :class:`TextProcessingConfig` control object Returns: contents as a string """ root = ElementTree.fromstring(xml) return docx_text_from_xml_node(root, 0, config)
[ "def", "docx_text_from_xml", "(", "xml", ":", "str", ",", "config", ":", "TextProcessingConfig", ")", "->", "str", ":", "root", "=", "ElementTree", ".", "fromstring", "(", "xml", ")", "return", "docx_text_from_xml_node", "(", "root", ",", "0", ",", "config", ")" ]
28.153846
19.076923
def visit(self, node): '''The main visit function. Visits the passed-in node and calls finalize. ''' for token in self.itervisit(node): pass result = self.finalize() if result is not self: return result
[ "def", "visit", "(", "self", ",", "node", ")", ":", "for", "token", "in", "self", ".", "itervisit", "(", "node", ")", ":", "pass", "result", "=", "self", ".", "finalize", "(", ")", "if", "result", "is", "not", "self", ":", "return", "result" ]
29.555556
17.777778
def _deep_tuple(self, x): """Converts nested `tuple`, `list`, or `dict` to nested `tuple`.""" if isinstance(x, dict): return self._deep_tuple(tuple(sorted(x.items()))) elif isinstance(x, (list, tuple)): return tuple(map(self._deep_tuple, x)) return x
[ "def", "_deep_tuple", "(", "self", ",", "x", ")", ":", "if", "isinstance", "(", "x", ",", "dict", ")", ":", "return", "self", ".", "_deep_tuple", "(", "tuple", "(", "sorted", "(", "x", ".", "items", "(", ")", ")", ")", ")", "elif", "isinstance", "(", "x", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "tuple", "(", "map", "(", "self", ".", "_deep_tuple", ",", "x", ")", ")", "return", "x" ]
34
14.625
def putkeyword(self, keyword, value, makesubrecord=False): """Put the value of a column keyword. (see :func:`table.putcolkeyword`)""" return self._table.putcolkeyword(self._column, keyword, value, makesubrecord)
[ "def", "putkeyword", "(", "self", ",", "keyword", ",", "value", ",", "makesubrecord", "=", "False", ")", ":", "return", "self", ".", "_table", ".", "putcolkeyword", "(", "self", ".", "_column", ",", "keyword", ",", "value", ",", "makesubrecord", ")" ]
58
15.75
def get_config(self, key_name): """ Return configuration value Args: key_name (str): configuration key Returns: The value for the specified configuration key, or if not found in the config the default value specified in the Configuration Handler class specified inside this component """ if key_name in self.config: return self.config.get(key_name) return self.Configuration.default(key_name, inst=self)
[ "def", "get_config", "(", "self", ",", "key_name", ")", ":", "if", "key_name", "in", "self", ".", "config", ":", "return", "self", ".", "config", ".", "get", "(", "key_name", ")", "return", "self", ".", "Configuration", ".", "default", "(", "key_name", ",", "inst", "=", "self", ")" ]
31.6875
19.1875
def find_gui_and_backend(gui=None): """Given a gui string return the gui and mpl backend. Parameters ---------- gui : str Can be one of ('tk','gtk','wx','qt','qt4','inline'). Returns ------- A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg', 'WXAgg','Qt4Agg','module://IPython.zmq.pylab.backend_inline'). """ import matplotlib if gui and gui != 'auto': # select backend based on requested gui backend = backends[gui] else: backend = matplotlib.rcParams['backend'] # In this case, we need to find what the appropriate gui selection call # should be for IPython, so we can activate inputhook accordingly gui = backend2gui.get(backend, None) return gui, backend
[ "def", "find_gui_and_backend", "(", "gui", "=", "None", ")", ":", "import", "matplotlib", "if", "gui", "and", "gui", "!=", "'auto'", ":", "# select backend based on requested gui", "backend", "=", "backends", "[", "gui", "]", "else", ":", "backend", "=", "matplotlib", ".", "rcParams", "[", "'backend'", "]", "# In this case, we need to find what the appropriate gui selection call", "# should be for IPython, so we can activate inputhook accordingly", "gui", "=", "backend2gui", ".", "get", "(", "backend", ",", "None", ")", "return", "gui", ",", "backend" ]
30.56
22.32
def cancel(self, campaign_id): """ Cancel a Regular or Plain-Text Campaign after you send, before all of your recipients receive it. This feature is included with MailChimp Pro. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` """ self.campaign_id = campaign_id return self._mc_client._post(url=self._build_path(campaign_id, 'actions/cancel-send'))
[ "def", "cancel", "(", "self", ",", "campaign_id", ")", ":", "self", ".", "campaign_id", "=", "campaign_id", "return", "self", ".", "_mc_client", ".", "_post", "(", "url", "=", "self", ".", "_build_path", "(", "campaign_id", ",", "'actions/cancel-send'", ")", ")" ]
40.818182
20.636364
def to_dict(self): '''Save this preceding condition into a dictionary.''' d = super(Preceding, self).to_dict() e = {} if self.timeout != 0: e['timeout'] = self.timeout if self.sending_timing: e['sendingTiming'] = self.sending_timing pcs = [] for pc in self._preceding_components: pcs.append(pc.to_dict()) if pcs: e['precedingComponents'] = pcs d['condition'] = {'preceding': e} return d
[ "def", "to_dict", "(", "self", ")", ":", "d", "=", "super", "(", "Preceding", ",", "self", ")", ".", "to_dict", "(", ")", "e", "=", "{", "}", "if", "self", ".", "timeout", "!=", "0", ":", "e", "[", "'timeout'", "]", "=", "self", ".", "timeout", "if", "self", ".", "sending_timing", ":", "e", "[", "'sendingTiming'", "]", "=", "self", ".", "sending_timing", "pcs", "=", "[", "]", "for", "pc", "in", "self", ".", "_preceding_components", ":", "pcs", ".", "append", "(", "pc", ".", "to_dict", "(", ")", ")", "if", "pcs", ":", "e", "[", "'precedingComponents'", "]", "=", "pcs", "d", "[", "'condition'", "]", "=", "{", "'preceding'", ":", "e", "}", "return", "d" ]
33.333333
12.8
def diet_expert(x, hidden_size, params): """A two-layer feed-forward network with relu activation on hidden layer. Uses diet variables. Recomputes hidden layer on backprop to save activation memory. Args: x: a Tensor with shape [batch, io_size] hidden_size: an integer params: a diet variable HParams object. Returns: a Tensor with shape [batch, io_size] """ @fn_with_diet_vars(params) def diet_expert_internal(x): dim = x.get_shape().as_list()[-1] h = tf.layers.dense(x, hidden_size, activation=tf.nn.relu, use_bias=False) y = tf.layers.dense(h, dim, use_bias=False) y *= tf.rsqrt(tf.to_float(dim * hidden_size)) return y return diet_expert_internal(x)
[ "def", "diet_expert", "(", "x", ",", "hidden_size", ",", "params", ")", ":", "@", "fn_with_diet_vars", "(", "params", ")", "def", "diet_expert_internal", "(", "x", ")", ":", "dim", "=", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "-", "1", "]", "h", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "hidden_size", ",", "activation", "=", "tf", ".", "nn", ".", "relu", ",", "use_bias", "=", "False", ")", "y", "=", "tf", ".", "layers", ".", "dense", "(", "h", ",", "dim", ",", "use_bias", "=", "False", ")", "y", "*=", "tf", ".", "rsqrt", "(", "tf", ".", "to_float", "(", "dim", "*", "hidden_size", ")", ")", "return", "y", "return", "diet_expert_internal", "(", "x", ")" ]
28.708333
18.291667
def _handle_root(): """Handles index.html requests.""" res_filename = os.path.join( os.path.dirname(__file__), _PROFILE_HTML) with io.open(res_filename, 'rb') as res_file: content = res_file.read() return content, 'text/html'
[ "def", "_handle_root", "(", ")", ":", "res_filename", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "_PROFILE_HTML", ")", "with", "io", ".", "open", "(", "res_filename", ",", "'rb'", ")", "as", "res_file", ":", "content", "=", "res_file", ".", "read", "(", ")", "return", "content", ",", "'text/html'" ]
39.285714
8.428571
def set_color_in_grid(self, color_in_grid): """Set the pixel at the position of the :paramref:`color_in_grid` to its color. :param color_in_grid: must have the following attributes: - ``color`` is the :ref:`color <png-color>` to set the pixel to - ``x`` is the x position of the pixel - ``y`` is the y position of the pixel .. seealso:: :meth:`set_pixel`, :meth:`set_colors_in_grid` """ self._set_pixel_and_convert_color( color_in_grid.x, color_in_grid.y, color_in_grid.color)
[ "def", "set_color_in_grid", "(", "self", ",", "color_in_grid", ")", ":", "self", ".", "_set_pixel_and_convert_color", "(", "color_in_grid", ".", "x", ",", "color_in_grid", ".", "y", ",", "color_in_grid", ".", "color", ")" ]
39.714286
19.285714
async def set_max_relative_mod(self, max_mod, timeout=OTGW_DEFAULT_TIMEOUT): """ Override the maximum relative modulation from the thermostat. Valid values are 0 through 100. Clear the setting by specifying a non-numeric value. Return the newly accepted value, '-' if a previous value was cleared, or None on failure. This method is a coroutine """ if isinstance(max_mod, int) and not 0 <= max_mod <= 100: return None cmd = OTGW_CMD_MAX_MOD status = {} ret = await self._wait_for_cmd(cmd, max_mod, timeout) if ret not in ['-', None]: ret = int(ret) if ret == '-': status[DATA_SLAVE_MAX_RELATIVE_MOD] = None else: status[DATA_SLAVE_MAX_RELATIVE_MOD] = ret self._update_status(status) return ret
[ "async", "def", "set_max_relative_mod", "(", "self", ",", "max_mod", ",", "timeout", "=", "OTGW_DEFAULT_TIMEOUT", ")", ":", "if", "isinstance", "(", "max_mod", ",", "int", ")", "and", "not", "0", "<=", "max_mod", "<=", "100", ":", "return", "None", "cmd", "=", "OTGW_CMD_MAX_MOD", "status", "=", "{", "}", "ret", "=", "await", "self", ".", "_wait_for_cmd", "(", "cmd", ",", "max_mod", ",", "timeout", ")", "if", "ret", "not", "in", "[", "'-'", ",", "None", "]", ":", "ret", "=", "int", "(", "ret", ")", "if", "ret", "==", "'-'", ":", "status", "[", "DATA_SLAVE_MAX_RELATIVE_MOD", "]", "=", "None", "else", ":", "status", "[", "DATA_SLAVE_MAX_RELATIVE_MOD", "]", "=", "ret", "self", ".", "_update_status", "(", "status", ")", "return", "ret" ]
37.083333
16.333333
def install_caller_instruction(self, token_type="Unrestricted", transaction_id=None): """ Set us up as a caller This will install a new caller_token into the FPS section. This should really only be called to regenerate the caller token. """ response = self.install_payment_instruction("MyRole=='Caller';", token_type=token_type, transaction_id=transaction_id) body = response.read() if(response.status == 200): rs = ResultSet() h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) caller_token = rs.TokenId try: boto.config.save_system_option("FPS", "caller_token", caller_token) except(IOError): boto.config.save_user_option("FPS", "caller_token", caller_token) return caller_token else: raise FPSResponseError(response.status, response.reason, body)
[ "def", "install_caller_instruction", "(", "self", ",", "token_type", "=", "\"Unrestricted\"", ",", "transaction_id", "=", "None", ")", ":", "response", "=", "self", ".", "install_payment_instruction", "(", "\"MyRole=='Caller';\"", ",", "token_type", "=", "token_type", ",", "transaction_id", "=", "transaction_id", ")", "body", "=", "response", ".", "read", "(", ")", "if", "(", "response", ".", "status", "==", "200", ")", ":", "rs", "=", "ResultSet", "(", ")", "h", "=", "handler", ".", "XmlHandler", "(", "rs", ",", "self", ")", "xml", ".", "sax", ".", "parseString", "(", "body", ",", "h", ")", "caller_token", "=", "rs", ".", "TokenId", "try", ":", "boto", ".", "config", ".", "save_system_option", "(", "\"FPS\"", ",", "\"caller_token\"", ",", "caller_token", ")", "except", "(", "IOError", ")", ":", "boto", ".", "config", ".", "save_user_option", "(", "\"FPS\"", ",", "\"caller_token\"", ",", "caller_token", ")", "return", "caller_token", "else", ":", "raise", "FPSResponseError", "(", "response", ".", "status", ",", "response", ".", "reason", ",", "body", ")" ]
46.68
18.04
def OnViewFrozen(self, event): """Show cells as frozen status""" self.grid._view_frozen = not self.grid._view_frozen self.grid.grid_renderer.cell_cache.clear() self.grid.ForceRefresh() event.Skip()
[ "def", "OnViewFrozen", "(", "self", ",", "event", ")", ":", "self", ".", "grid", ".", "_view_frozen", "=", "not", "self", ".", "grid", ".", "_view_frozen", "self", ".", "grid", ".", "grid_renderer", ".", "cell_cache", ".", "clear", "(", ")", "self", ".", "grid", ".", "ForceRefresh", "(", ")", "event", ".", "Skip", "(", ")" ]
25.777778
20.777778
def smiles_to_compound(smiles, assign_descriptors=True): """Convert SMILES text to compound object Raises: ValueError: SMILES with unsupported format """ it = iter(smiles) mol = molecule() try: for token in it: mol(token) result, _ = mol(None) except KeyError as err: raise ValueError("Unsupported Symbol: {}".format(err)) result.graph.remove_node(0) logger.debug(result) if assign_descriptors: molutil.assign_descriptors(result) return result
[ "def", "smiles_to_compound", "(", "smiles", ",", "assign_descriptors", "=", "True", ")", ":", "it", "=", "iter", "(", "smiles", ")", "mol", "=", "molecule", "(", ")", "try", ":", "for", "token", "in", "it", ":", "mol", "(", "token", ")", "result", ",", "_", "=", "mol", "(", "None", ")", "except", "KeyError", "as", "err", ":", "raise", "ValueError", "(", "\"Unsupported Symbol: {}\"", ".", "format", "(", "err", ")", ")", "result", ".", "graph", ".", "remove_node", "(", "0", ")", "logger", ".", "debug", "(", "result", ")", "if", "assign_descriptors", ":", "molutil", ".", "assign_descriptors", "(", "result", ")", "return", "result" ]
27.684211
16.105263
def write_switch(self, module_address, state, callback_fn): """Set relay state.""" _LOGGER.info("write_switch: setstate,{},{}{}" .format(module_address, str(state), chr(13))) self.subscribe("state," + module_address, callback_fn) self.send("setstate,{},{}{}" .format(module_address, str(state), chr(13)))
[ "def", "write_switch", "(", "self", ",", "module_address", ",", "state", ",", "callback_fn", ")", ":", "_LOGGER", ".", "info", "(", "\"write_switch: setstate,{},{}{}\"", ".", "format", "(", "module_address", ",", "str", "(", "state", ")", ",", "chr", "(", "13", ")", ")", ")", "self", ".", "subscribe", "(", "\"state,\"", "+", "module_address", ",", "callback_fn", ")", "self", ".", "send", "(", "\"setstate,{},{}{}\"", ".", "format", "(", "module_address", ",", "str", "(", "state", ")", ",", "chr", "(", "13", ")", ")", ")" ]
53.571429
15.714286
def xmlobject_to_dict(instance, fields=None, exclude=None, prefix=''): """ Generate a dictionary based on the data in an XmlObject instance to pass as a Form's ``initial`` keyword argument. :param instance: instance of :class:`~eulxml.xmlmap.XmlObject` :param fields: optional list of fields - if specified, only the named fields will be included in the data returned :param exclude: optional list of fields to exclude from the data """ data = {} # convert prefix to combining form for convenience if prefix: prefix = '%s-' % prefix else: prefix = '' for name, field in six.iteritems(instance._fields): # not editable? if fields and not name in fields: continue if exclude and name in exclude: continue if isinstance(field, xmlmap.fields.NodeField): nodefield = getattr(instance, name) if nodefield is not None: subprefix = '%s%s' % (prefix, name) node_data = xmlobject_to_dict(nodefield, prefix=subprefix) data.update(node_data) # FIXME: fields/exclude if isinstance(field, xmlmap.fields.NodeListField): for i, child in enumerate(getattr(instance, name)): subprefix = '%s%s-%d' % (prefix, name, i) node_data = xmlobject_to_dict(child, prefix=subprefix) data.update(node_data) # FIXME: fields/exclude else: data[prefix + name] = getattr(instance, name) return data
[ "def", "xmlobject_to_dict", "(", "instance", ",", "fields", "=", "None", ",", "exclude", "=", "None", ",", "prefix", "=", "''", ")", ":", "data", "=", "{", "}", "# convert prefix to combining form for convenience", "if", "prefix", ":", "prefix", "=", "'%s-'", "%", "prefix", "else", ":", "prefix", "=", "''", "for", "name", ",", "field", "in", "six", ".", "iteritems", "(", "instance", ".", "_fields", ")", ":", "# not editable?", "if", "fields", "and", "not", "name", "in", "fields", ":", "continue", "if", "exclude", "and", "name", "in", "exclude", ":", "continue", "if", "isinstance", "(", "field", ",", "xmlmap", ".", "fields", ".", "NodeField", ")", ":", "nodefield", "=", "getattr", "(", "instance", ",", "name", ")", "if", "nodefield", "is", "not", "None", ":", "subprefix", "=", "'%s%s'", "%", "(", "prefix", ",", "name", ")", "node_data", "=", "xmlobject_to_dict", "(", "nodefield", ",", "prefix", "=", "subprefix", ")", "data", ".", "update", "(", "node_data", ")", "# FIXME: fields/exclude", "if", "isinstance", "(", "field", ",", "xmlmap", ".", "fields", ".", "NodeListField", ")", ":", "for", "i", ",", "child", "in", "enumerate", "(", "getattr", "(", "instance", ",", "name", ")", ")", ":", "subprefix", "=", "'%s%s-%d'", "%", "(", "prefix", ",", "name", ",", "i", ")", "node_data", "=", "xmlobject_to_dict", "(", "child", ",", "prefix", "=", "subprefix", ")", "data", ".", "update", "(", "node_data", ")", "# FIXME: fields/exclude", "else", ":", "data", "[", "prefix", "+", "name", "]", "=", "getattr", "(", "instance", ",", "name", ")", "return", "data" ]
40.263158
20.263158
def write_outro (self): """Write outro comments.""" self.stoptime = time.time() duration = self.stoptime - self.starttime self.comment(_("Stopped checking at %(time)s (%(duration)s)") % {"time": strformat.strtime(self.stoptime), "duration": strformat.strduration_long(duration)})
[ "def", "write_outro", "(", "self", ")", ":", "self", ".", "stoptime", "=", "time", ".", "time", "(", ")", "duration", "=", "self", ".", "stoptime", "-", "self", ".", "starttime", "self", ".", "comment", "(", "_", "(", "\"Stopped checking at %(time)s (%(duration)s)\"", ")", "%", "{", "\"time\"", ":", "strformat", ".", "strtime", "(", "self", ".", "stoptime", ")", ",", "\"duration\"", ":", "strformat", ".", "strduration_long", "(", "duration", ")", "}", ")" ]
47.428571
14.428571
def create_volume(self, volume_name: str, driver_spec: str = None): """Create new docker volumes. Only the manager nodes can create a volume Args: volume_name (string): Name for the new docker volume driver_spec (string): Driver for the docker volume """ # Default values if driver_spec: driver = driver_spec else: driver = 'local' # Raise an exception if we are not a manager if not self._manager: raise RuntimeError('Services can only be deleted ' 'on swarm manager nodes') self._client.volumes.create(name=volume_name, driver=driver)
[ "def", "create_volume", "(", "self", ",", "volume_name", ":", "str", ",", "driver_spec", ":", "str", "=", "None", ")", ":", "# Default values", "if", "driver_spec", ":", "driver", "=", "driver_spec", "else", ":", "driver", "=", "'local'", "# Raise an exception if we are not a manager", "if", "not", "self", ".", "_manager", ":", "raise", "RuntimeError", "(", "'Services can only be deleted '", "'on swarm manager nodes'", ")", "self", ".", "_client", ".", "volumes", ".", "create", "(", "name", "=", "volume_name", ",", "driver", "=", "driver", ")" ]
32.904762
20.904762
def model_eval(sess, x, y, predictions, X_test=None, Y_test=None, feed=None, args=None): """ Compute the accuracy of a TF model on some data :param sess: TF session to use :param x: input placeholder :param y: output placeholder (for labels) :param predictions: model output predictions :param X_test: numpy array with training inputs :param Y_test: numpy array with training outputs :param feed: An optional dictionary that is appended to the feeding dictionary before the session runs. Can be used to feed the learning phase of a Keras model for instance. :param args: dict or argparse `Namespace` object. Should contain `batch_size` :return: a float with the accuracy value """ global _model_eval_cache args = _ArgsWrapper(args or {}) assert args.batch_size, "Batch size was not given in args dict" if X_test is None or Y_test is None: raise ValueError("X_test argument and Y_test argument " "must be supplied.") # Define accuracy symbolically key = (y, predictions) if key in _model_eval_cache: correct_preds = _model_eval_cache[key] else: correct_preds = tf.equal(tf.argmax(y, axis=-1), tf.argmax(predictions, axis=-1)) _model_eval_cache[key] = correct_preds # Init result var accuracy = 0.0 with sess.as_default(): # Compute number of batches nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size)) assert nb_batches * args.batch_size >= len(X_test) X_cur = np.zeros((args.batch_size,) + X_test.shape[1:], dtype=X_test.dtype) Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:], dtype=Y_test.dtype) for batch in range(nb_batches): if batch % 100 == 0 and batch > 0: _logger.debug("Batch " + str(batch)) # Must not use the `batch_indices` function here, because it # repeats some examples. # It's acceptable to repeat during training, but not eval. start = batch * args.batch_size end = min(len(X_test), start + args.batch_size) # The last batch may be smaller than all others. This should not # affect the accuarcy disproportionately. cur_batch_size = end - start X_cur[:cur_batch_size] = X_test[start:end] Y_cur[:cur_batch_size] = Y_test[start:end] feed_dict = {x: X_cur, y: Y_cur} if feed is not None: feed_dict.update(feed) cur_corr_preds = correct_preds.eval(feed_dict=feed_dict) accuracy += cur_corr_preds[:cur_batch_size].sum() assert end >= len(X_test) # Divide by number of examples to get final value accuracy /= len(X_test) return accuracy
[ "def", "model_eval", "(", "sess", ",", "x", ",", "y", ",", "predictions", ",", "X_test", "=", "None", ",", "Y_test", "=", "None", ",", "feed", "=", "None", ",", "args", "=", "None", ")", ":", "global", "_model_eval_cache", "args", "=", "_ArgsWrapper", "(", "args", "or", "{", "}", ")", "assert", "args", ".", "batch_size", ",", "\"Batch size was not given in args dict\"", "if", "X_test", "is", "None", "or", "Y_test", "is", "None", ":", "raise", "ValueError", "(", "\"X_test argument and Y_test argument \"", "\"must be supplied.\"", ")", "# Define accuracy symbolically", "key", "=", "(", "y", ",", "predictions", ")", "if", "key", "in", "_model_eval_cache", ":", "correct_preds", "=", "_model_eval_cache", "[", "key", "]", "else", ":", "correct_preds", "=", "tf", ".", "equal", "(", "tf", ".", "argmax", "(", "y", ",", "axis", "=", "-", "1", ")", ",", "tf", ".", "argmax", "(", "predictions", ",", "axis", "=", "-", "1", ")", ")", "_model_eval_cache", "[", "key", "]", "=", "correct_preds", "# Init result var", "accuracy", "=", "0.0", "with", "sess", ".", "as_default", "(", ")", ":", "# Compute number of batches", "nb_batches", "=", "int", "(", "math", ".", "ceil", "(", "float", "(", "len", "(", "X_test", ")", ")", "/", "args", ".", "batch_size", ")", ")", "assert", "nb_batches", "*", "args", ".", "batch_size", ">=", "len", "(", "X_test", ")", "X_cur", "=", "np", ".", "zeros", "(", "(", "args", ".", "batch_size", ",", ")", "+", "X_test", ".", "shape", "[", "1", ":", "]", ",", "dtype", "=", "X_test", ".", "dtype", ")", "Y_cur", "=", "np", ".", "zeros", "(", "(", "args", ".", "batch_size", ",", ")", "+", "Y_test", ".", "shape", "[", "1", ":", "]", ",", "dtype", "=", "Y_test", ".", "dtype", ")", "for", "batch", "in", "range", "(", "nb_batches", ")", ":", "if", "batch", "%", "100", "==", "0", "and", "batch", ">", "0", ":", "_logger", ".", "debug", "(", "\"Batch \"", "+", "str", "(", "batch", ")", ")", "# Must not use the `batch_indices` function here, because it", "# repeats some examples.", "# It's acceptable to repeat during training, but not eval.", "start", "=", "batch", "*", "args", ".", "batch_size", "end", "=", "min", "(", "len", "(", "X_test", ")", ",", "start", "+", "args", ".", "batch_size", ")", "# The last batch may be smaller than all others. This should not", "# affect the accuarcy disproportionately.", "cur_batch_size", "=", "end", "-", "start", "X_cur", "[", ":", "cur_batch_size", "]", "=", "X_test", "[", "start", ":", "end", "]", "Y_cur", "[", ":", "cur_batch_size", "]", "=", "Y_test", "[", "start", ":", "end", "]", "feed_dict", "=", "{", "x", ":", "X_cur", ",", "y", ":", "Y_cur", "}", "if", "feed", "is", "not", "None", ":", "feed_dict", ".", "update", "(", "feed", ")", "cur_corr_preds", "=", "correct_preds", ".", "eval", "(", "feed_dict", "=", "feed_dict", ")", "accuracy", "+=", "cur_corr_preds", "[", ":", "cur_batch_size", "]", ".", "sum", "(", ")", "assert", "end", ">=", "len", "(", "X_test", ")", "# Divide by number of examples to get final value", "accuracy", "/=", "len", "(", "X_test", ")", "return", "accuracy" ]
35.891892
16.243243
def initialize_worker(self, process_num=None): """ reinitialize consumer for process in multiprocesing """ self.initialize(self.grid, self.num_of_paths, self.seed)
[ "def", "initialize_worker", "(", "self", ",", "process_num", "=", "None", ")", ":", "self", ".", "initialize", "(", "self", ".", "grid", ",", "self", ".", "num_of_paths", ",", "self", ".", "seed", ")" ]
38.2
9.8
def add_alias(self, entry): """ Adds id to the current list 'aliased_by' """ assert isinstance(entry, SymbolVAR) self.aliased_by.append(entry)
[ "def", "add_alias", "(", "self", ",", "entry", ")", ":", "assert", "isinstance", "(", "entry", ",", "SymbolVAR", ")", "self", ".", "aliased_by", ".", "append", "(", "entry", ")" ]
34
3.8
def group_id(self): """ Returns the @GROUPID. If derived_from is set, returns that group_id. """ if self.derived_from is not None: return self.derived_from.group_id() if self.file_uuid is None: return None return utils.GROUP_ID_PREFIX + self.file_uuid
[ "def", "group_id", "(", "self", ")", ":", "if", "self", ".", "derived_from", "is", "not", "None", ":", "return", "self", ".", "derived_from", ".", "group_id", "(", ")", "if", "self", ".", "file_uuid", "is", "None", ":", "return", "None", "return", "utils", ".", "GROUP_ID_PREFIX", "+", "self", ".", "file_uuid" ]
29.272727
11.818182
def getLeastUsedCell(self, c): """For the least used cell in a column""" segmentsPerCell = numpy.zeros(self.cellsPerColumn, dtype='uint32') for i in range(self.cellsPerColumn): segmentsPerCell[i] = self.getNumSegmentsInCell(c,i) cellMinUsage = numpy.where(segmentsPerCell==segmentsPerCell.min())[0] # return cellMinUsage[0] # return the first cell with minimum usage # if multiple cells has minimum usage, randomly pick one self._random.getUInt32(len(cellMinUsage)) return cellMinUsage[self._random.getUInt32(len(cellMinUsage))]
[ "def", "getLeastUsedCell", "(", "self", ",", "c", ")", ":", "segmentsPerCell", "=", "numpy", ".", "zeros", "(", "self", ".", "cellsPerColumn", ",", "dtype", "=", "'uint32'", ")", "for", "i", "in", "range", "(", "self", ".", "cellsPerColumn", ")", ":", "segmentsPerCell", "[", "i", "]", "=", "self", ".", "getNumSegmentsInCell", "(", "c", ",", "i", ")", "cellMinUsage", "=", "numpy", ".", "where", "(", "segmentsPerCell", "==", "segmentsPerCell", ".", "min", "(", ")", ")", "[", "0", "]", "# return cellMinUsage[0] # return the first cell with minimum usage", "# if multiple cells has minimum usage, randomly pick one", "self", ".", "_random", ".", "getUInt32", "(", "len", "(", "cellMinUsage", ")", ")", "return", "cellMinUsage", "[", "self", ".", "_random", ".", "getUInt32", "(", "len", "(", "cellMinUsage", ")", ")", "]" ]
46.416667
21
def delete_free_shipping_promotion_by_id(cls, free_shipping_promotion_id, **kwargs): """Delete FreeShippingPromotion Delete an instance of FreeShippingPromotion by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_free_shipping_promotion_by_id(free_shipping_promotion_id, async=True) >>> result = thread.get() :param async bool :param str free_shipping_promotion_id: ID of freeShippingPromotion to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_free_shipping_promotion_by_id_with_http_info(free_shipping_promotion_id, **kwargs) else: (data) = cls._delete_free_shipping_promotion_by_id_with_http_info(free_shipping_promotion_id, **kwargs) return data
[ "def", "delete_free_shipping_promotion_by_id", "(", "cls", ",", "free_shipping_promotion_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_delete_free_shipping_promotion_by_id_with_http_info", "(", "free_shipping_promotion_id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_delete_free_shipping_promotion_by_id_with_http_info", "(", "free_shipping_promotion_id", ",", "*", "*", "kwargs", ")", "return", "data" ]
49.809524
27.809524
def make_ring_dict(self, galkey): """ Make a dictionary mapping the merged component names to list of template files Parameters ---------- galkey : str Unique key for this ring dictionary Returns `model_component.GalpropMergedRingInfo` """ galprop_rings = self.read_galprop_rings_yaml(galkey) galprop_run = galprop_rings['galprop_run'] ring_limits = galprop_rings['ring_limits'] comp_dict = galprop_rings['diffuse_comp_dict'] remove_rings = galprop_rings.get('remove_rings', []) ring_dict = {} nring = len(ring_limits) - 1 for source_name, source_value in comp_dict.items(): base_dict = dict(source_name=source_name, galkey=galkey, galprop_run=galprop_run) for iring in range(nring): sourcekey = "%s_%i" % (source_name, iring) if sourcekey in remove_rings: continue full_key = "%s_%s" % (sourcekey, galkey) rings = range(ring_limits[iring], ring_limits[iring + 1]) base_dict.update(dict(ring=iring, sourcekey=sourcekey, files=self.make_ring_filelist(source_value, rings, galprop_run), merged_gasmap=self.make_merged_name(sourcekey, galkey, False))) ring_dict[full_key] = GalpropMergedRingInfo(**base_dict) self._ring_dicts[galkey] = ring_dict return ring_dict
[ "def", "make_ring_dict", "(", "self", ",", "galkey", ")", ":", "galprop_rings", "=", "self", ".", "read_galprop_rings_yaml", "(", "galkey", ")", "galprop_run", "=", "galprop_rings", "[", "'galprop_run'", "]", "ring_limits", "=", "galprop_rings", "[", "'ring_limits'", "]", "comp_dict", "=", "galprop_rings", "[", "'diffuse_comp_dict'", "]", "remove_rings", "=", "galprop_rings", ".", "get", "(", "'remove_rings'", ",", "[", "]", ")", "ring_dict", "=", "{", "}", "nring", "=", "len", "(", "ring_limits", ")", "-", "1", "for", "source_name", ",", "source_value", "in", "comp_dict", ".", "items", "(", ")", ":", "base_dict", "=", "dict", "(", "source_name", "=", "source_name", ",", "galkey", "=", "galkey", ",", "galprop_run", "=", "galprop_run", ")", "for", "iring", "in", "range", "(", "nring", ")", ":", "sourcekey", "=", "\"%s_%i\"", "%", "(", "source_name", ",", "iring", ")", "if", "sourcekey", "in", "remove_rings", ":", "continue", "full_key", "=", "\"%s_%s\"", "%", "(", "sourcekey", ",", "galkey", ")", "rings", "=", "range", "(", "ring_limits", "[", "iring", "]", ",", "ring_limits", "[", "iring", "+", "1", "]", ")", "base_dict", ".", "update", "(", "dict", "(", "ring", "=", "iring", ",", "sourcekey", "=", "sourcekey", ",", "files", "=", "self", ".", "make_ring_filelist", "(", "source_value", ",", "rings", ",", "galprop_run", ")", ",", "merged_gasmap", "=", "self", ".", "make_merged_name", "(", "sourcekey", ",", "galkey", ",", "False", ")", ")", ")", "ring_dict", "[", "full_key", "]", "=", "GalpropMergedRingInfo", "(", "*", "*", "base_dict", ")", "self", ".", "_ring_dicts", "[", "galkey", "]", "=", "ring_dict", "return", "ring_dict" ]
46.486486
19.054054
def compile(self): """ Build the abstract Parsley tree starting from the root node (recursive) """ if not isinstance(self.parselet, dict): raise ValueError("Parselet must be a dict of some sort. Or use .from_jsonstring(), " \ ".from_jsonfile(), .from_yamlstring(), or .from_yamlfile()") self.parselet_tree = self._compile(self.parselet)
[ "def", "compile", "(", "self", ")", ":", "if", "not", "isinstance", "(", "self", ".", "parselet", ",", "dict", ")", ":", "raise", "ValueError", "(", "\"Parselet must be a dict of some sort. Or use .from_jsonstring(), \"", "\".from_jsonfile(), .from_yamlstring(), or .from_yamlfile()\"", ")", "self", ".", "parselet_tree", "=", "self", ".", "_compile", "(", "self", ".", "parselet", ")" ]
44.888889
20.888889
def hume_process_jsonld(): """Process Hume JSON-LD and return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) jsonld_str = body.get('jsonld') jsonld = json.loads(jsonld_str) hp = hume.process_jsonld(jsonld) return _stmts_from_proc(hp)
[ "def", "hume_process_jsonld", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "jsonld_str", "=", "body", ".", "get", "(", "'jsonld'", ")", "jsonld", "=", "json", ".", "loads", "(", "jsonld_str", ")", "hp", "=", "hume", ".", "process_jsonld", "(", "jsonld", ")", "return", "_stmts_from_proc", "(", "hp", ")" ]
35.5
8.4
def permissions_for(self, member): """Handles permission resolution for the current :class:`Member`. This function takes into consideration the following cases: - Guild owner - Guild roles - Channel overrides - Member overrides Parameters ---------- member: :class:`Member` The member to resolve permissions for. Returns ------- :class:`Permissions` The resolved permissions for the member. """ # The current cases can be explained as: # Guild owner get all permissions -- no questions asked. Otherwise... # The @everyone role gets the first application. # After that, the applied roles that the user has in the channel # (or otherwise) are then OR'd together. # After the role permissions are resolved, the member permissions # have to take into effect. # After all that is done.. you have to do the following: # If manage permissions is True, then all permissions are set to True. # The operation first takes into consideration the denied # and then the allowed. o = self.guild.owner if o is not None and member.id == o.id: return Permissions.all() default = self.guild.default_role base = Permissions(default.permissions.value) roles = member.roles # Apply guild roles that the member has. for role in roles: base.value |= role.permissions.value # Guild-wide Administrator -> True for everything # Bypass all channel-specific overrides if base.administrator: return Permissions.all() # Apply @everyone allow/deny first since it's special try: maybe_everyone = self._overwrites[0] if maybe_everyone.id == self.guild.id: base.handle_overwrite(allow=maybe_everyone.allow, deny=maybe_everyone.deny) remaining_overwrites = self._overwrites[1:] else: remaining_overwrites = self._overwrites except IndexError: remaining_overwrites = self._overwrites # not sure if doing member._roles.get(...) is better than the # set approach. While this is O(N) to re-create into a set for O(1) # the direct approach would just be O(log n) for searching with no # extra memory overhead. For now, I'll keep the set cast # Note that the member.roles accessor up top also creates a # temporary list member_role_ids = {r.id for r in roles} denies = 0 allows = 0 # Apply channel specific role permission overwrites for overwrite in remaining_overwrites: if overwrite.type == 'role' and overwrite.id in member_role_ids: denies |= overwrite.deny allows |= overwrite.allow base.handle_overwrite(allow=allows, deny=denies) # Apply member specific permission overwrites for overwrite in remaining_overwrites: if overwrite.type == 'member' and overwrite.id == member.id: base.handle_overwrite(allow=overwrite.allow, deny=overwrite.deny) break # if you can't send a message in a channel then you can't have certain # permissions as well if not base.send_messages: base.send_tts_messages = False base.mention_everyone = False base.embed_links = False base.attach_files = False # if you can't read a channel then you have no permissions there if not base.read_messages: denied = Permissions.all_channel() base.value &= ~denied.value return base
[ "def", "permissions_for", "(", "self", ",", "member", ")", ":", "# The current cases can be explained as:", "# Guild owner get all permissions -- no questions asked. Otherwise...", "# The @everyone role gets the first application.", "# After that, the applied roles that the user has in the channel", "# (or otherwise) are then OR'd together.", "# After the role permissions are resolved, the member permissions", "# have to take into effect.", "# After all that is done.. you have to do the following:", "# If manage permissions is True, then all permissions are set to True.", "# The operation first takes into consideration the denied", "# and then the allowed.", "o", "=", "self", ".", "guild", ".", "owner", "if", "o", "is", "not", "None", "and", "member", ".", "id", "==", "o", ".", "id", ":", "return", "Permissions", ".", "all", "(", ")", "default", "=", "self", ".", "guild", ".", "default_role", "base", "=", "Permissions", "(", "default", ".", "permissions", ".", "value", ")", "roles", "=", "member", ".", "roles", "# Apply guild roles that the member has.", "for", "role", "in", "roles", ":", "base", ".", "value", "|=", "role", ".", "permissions", ".", "value", "# Guild-wide Administrator -> True for everything", "# Bypass all channel-specific overrides", "if", "base", ".", "administrator", ":", "return", "Permissions", ".", "all", "(", ")", "# Apply @everyone allow/deny first since it's special", "try", ":", "maybe_everyone", "=", "self", ".", "_overwrites", "[", "0", "]", "if", "maybe_everyone", ".", "id", "==", "self", ".", "guild", ".", "id", ":", "base", ".", "handle_overwrite", "(", "allow", "=", "maybe_everyone", ".", "allow", ",", "deny", "=", "maybe_everyone", ".", "deny", ")", "remaining_overwrites", "=", "self", ".", "_overwrites", "[", "1", ":", "]", "else", ":", "remaining_overwrites", "=", "self", ".", "_overwrites", "except", "IndexError", ":", "remaining_overwrites", "=", "self", ".", "_overwrites", "# not sure if doing member._roles.get(...) is better than the", "# set approach. While this is O(N) to re-create into a set for O(1)", "# the direct approach would just be O(log n) for searching with no", "# extra memory overhead. For now, I'll keep the set cast", "# Note that the member.roles accessor up top also creates a", "# temporary list", "member_role_ids", "=", "{", "r", ".", "id", "for", "r", "in", "roles", "}", "denies", "=", "0", "allows", "=", "0", "# Apply channel specific role permission overwrites", "for", "overwrite", "in", "remaining_overwrites", ":", "if", "overwrite", ".", "type", "==", "'role'", "and", "overwrite", ".", "id", "in", "member_role_ids", ":", "denies", "|=", "overwrite", ".", "deny", "allows", "|=", "overwrite", ".", "allow", "base", ".", "handle_overwrite", "(", "allow", "=", "allows", ",", "deny", "=", "denies", ")", "# Apply member specific permission overwrites", "for", "overwrite", "in", "remaining_overwrites", ":", "if", "overwrite", ".", "type", "==", "'member'", "and", "overwrite", ".", "id", "==", "member", ".", "id", ":", "base", ".", "handle_overwrite", "(", "allow", "=", "overwrite", ".", "allow", ",", "deny", "=", "overwrite", ".", "deny", ")", "break", "# if you can't send a message in a channel then you can't have certain", "# permissions as well", "if", "not", "base", ".", "send_messages", ":", "base", ".", "send_tts_messages", "=", "False", "base", ".", "mention_everyone", "=", "False", "base", ".", "embed_links", "=", "False", "base", ".", "attach_files", "=", "False", "# if you can't read a channel then you have no permissions there", "if", "not", "base", ".", "read_messages", ":", "denied", "=", "Permissions", ".", "all_channel", "(", ")", "base", ".", "value", "&=", "~", "denied", ".", "value", "return", "base" ]
36.663366
20.623762
def rmse(self): """Get RMSE for regression model evaluation results. Returns: the RMSE float number. Raises: Exception if the CSV headers do not include 'target' or 'predicted', or BigQuery does not return 'target' or 'predicted' column, or if target or predicted is not number. """ if self._input_csv_files: df = self._get_data_from_csv_files() if 'target' not in df or 'predicted' not in df: raise ValueError('Cannot find "target" or "predicted" column') df = df[['target', 'predicted']].apply(pd.to_numeric) # if df is empty or contains non-numeric, scikit learn will raise error. mse = mean_squared_error(df['target'], df['predicted']) return math.sqrt(mse) elif self._bigquery: query = bq.Query(""" SELECT SQRT(SUM(ABS(predicted-target) * ABS(predicted-target)) / COUNT(*)) as rmse FROM %s""" % self._bigquery) df = self._get_data_from_bigquery([query]) if df.empty: return None return df['rmse'][0]
[ "def", "rmse", "(", "self", ")", ":", "if", "self", ".", "_input_csv_files", ":", "df", "=", "self", ".", "_get_data_from_csv_files", "(", ")", "if", "'target'", "not", "in", "df", "or", "'predicted'", "not", "in", "df", ":", "raise", "ValueError", "(", "'Cannot find \"target\" or \"predicted\" column'", ")", "df", "=", "df", "[", "[", "'target'", ",", "'predicted'", "]", "]", ".", "apply", "(", "pd", ".", "to_numeric", ")", "# if df is empty or contains non-numeric, scikit learn will raise error.", "mse", "=", "mean_squared_error", "(", "df", "[", "'target'", "]", ",", "df", "[", "'predicted'", "]", ")", "return", "math", ".", "sqrt", "(", "mse", ")", "elif", "self", ".", "_bigquery", ":", "query", "=", "bq", ".", "Query", "(", "\"\"\"\n SELECT\n SQRT(SUM(ABS(predicted-target) * ABS(predicted-target)) / COUNT(*)) as rmse\n FROM\n %s\"\"\"", "%", "self", ".", "_bigquery", ")", "df", "=", "self", ".", "_get_data_from_bigquery", "(", "[", "query", "]", ")", "if", "df", ".", "empty", ":", "return", "None", "return", "df", "[", "'rmse'", "]", "[", "0", "]" ]
33.483871
22.580645
def user_with_name(self, given_name=None, sn=None): """Get a unique user object by given name (first/nickname and last).""" results = [] if sn and not given_name: results = User.objects.filter(last_name=sn) elif given_name: query = {'first_name': given_name} if sn: query['last_name'] = sn results = User.objects.filter(**query) if len(results) == 0: # Try their first name as a nickname del query['first_name'] query['nickname'] = given_name results = User.objects.filter(**query) if len(results) == 1: return results.first() return None
[ "def", "user_with_name", "(", "self", ",", "given_name", "=", "None", ",", "sn", "=", "None", ")", ":", "results", "=", "[", "]", "if", "sn", "and", "not", "given_name", ":", "results", "=", "User", ".", "objects", ".", "filter", "(", "last_name", "=", "sn", ")", "elif", "given_name", ":", "query", "=", "{", "'first_name'", ":", "given_name", "}", "if", "sn", ":", "query", "[", "'last_name'", "]", "=", "sn", "results", "=", "User", ".", "objects", ".", "filter", "(", "*", "*", "query", ")", "if", "len", "(", "results", ")", "==", "0", ":", "# Try their first name as a nickname", "del", "query", "[", "'first_name'", "]", "query", "[", "'nickname'", "]", "=", "given_name", "results", "=", "User", ".", "objects", ".", "filter", "(", "*", "*", "query", ")", "if", "len", "(", "results", ")", "==", "1", ":", "return", "results", ".", "first", "(", ")", "return", "None" ]
32.772727
15.727273
def register_alias(self, alias, key): """Aliases provide another accessor for the same key. This enables one to change a name without breaking the application. """ alias = alias.lower() key = key.lower() if alias != key and alias != self._real_key(key): exists = self._aliases.get(alias) if exists is None: # if we alias something that exists in one of the dicts to # another name, we'll never be able to get that value using the # original name, so move the config value to the new _real_key. val = self._config.get(alias) if val: self._config.pop(alias) self._config[key] = val val = self._kvstore.get(alias) if val: self._kvstore.pop(alias) self._kvstore[key] = val val = self._defaults.get(alias) if val: self._defaults.pop(alias) self._defaults[key] = val val = self._override.get(alias) if val: self._override.pop(alias) self._override[key] = val self._aliases[alias] = key else: log.warning("Creating circular reference alias {0} {1} {2}".format( alias, key, self._real_key(key)))
[ "def", "register_alias", "(", "self", ",", "alias", ",", "key", ")", ":", "alias", "=", "alias", ".", "lower", "(", ")", "key", "=", "key", ".", "lower", "(", ")", "if", "alias", "!=", "key", "and", "alias", "!=", "self", ".", "_real_key", "(", "key", ")", ":", "exists", "=", "self", ".", "_aliases", ".", "get", "(", "alias", ")", "if", "exists", "is", "None", ":", "# if we alias something that exists in one of the dicts to", "# another name, we'll never be able to get that value using the", "# original name, so move the config value to the new _real_key.", "val", "=", "self", ".", "_config", ".", "get", "(", "alias", ")", "if", "val", ":", "self", ".", "_config", ".", "pop", "(", "alias", ")", "self", ".", "_config", "[", "key", "]", "=", "val", "val", "=", "self", ".", "_kvstore", ".", "get", "(", "alias", ")", "if", "val", ":", "self", ".", "_kvstore", ".", "pop", "(", "alias", ")", "self", ".", "_kvstore", "[", "key", "]", "=", "val", "val", "=", "self", ".", "_defaults", ".", "get", "(", "alias", ")", "if", "val", ":", "self", ".", "_defaults", ".", "pop", "(", "alias", ")", "self", ".", "_defaults", "[", "key", "]", "=", "val", "val", "=", "self", ".", "_override", ".", "get", "(", "alias", ")", "if", "val", ":", "self", ".", "_override", ".", "pop", "(", "alias", ")", "self", ".", "_override", "[", "key", "]", "=", "val", "self", ".", "_aliases", "[", "alias", "]", "=", "key", "else", ":", "log", ".", "warning", "(", "\"Creating circular reference alias {0} {1} {2}\"", ".", "format", "(", "alias", ",", "key", ",", "self", ".", "_real_key", "(", "key", ")", ")", ")" ]
41.647059
14.470588
def as_dict(self): """ Serializes the object necessary data in a dictionary. :returns: Serialized data in a dictionary. :rtype: dict """ result_dict = super(Profile, self).as_dict() statuses = list() version = None titles = list() descriptions = list() platforms = list() selects = list() for child in self.children: if isinstance(child, Version): version = child.as_dict() elif isinstance(child, Status): statuses.append(child.as_dict()) elif isinstance(child, Title): titles.append(child.as_dict()) elif isinstance(child, Description): descriptions.append(child.as_dict()) elif isinstance(child, Platform): platforms.append(child.as_dict()) elif isinstance(child, Select): selects.append(child.as_dict()) if version is not None: result_dict['version'] = version if len(statuses) > 0: result_dict['statuses'] = statuses if len(titles) > 0: result_dict['titles'] = titles if len(descriptions) > 0: result_dict['descriptions'] = descriptions if len(platforms) > 0: result_dict['platforms'] = platforms if len(selects) > 0: result_dict['selects'] = selects return result_dict
[ "def", "as_dict", "(", "self", ")", ":", "result_dict", "=", "super", "(", "Profile", ",", "self", ")", ".", "as_dict", "(", ")", "statuses", "=", "list", "(", ")", "version", "=", "None", "titles", "=", "list", "(", ")", "descriptions", "=", "list", "(", ")", "platforms", "=", "list", "(", ")", "selects", "=", "list", "(", ")", "for", "child", "in", "self", ".", "children", ":", "if", "isinstance", "(", "child", ",", "Version", ")", ":", "version", "=", "child", ".", "as_dict", "(", ")", "elif", "isinstance", "(", "child", ",", "Status", ")", ":", "statuses", ".", "append", "(", "child", ".", "as_dict", "(", ")", ")", "elif", "isinstance", "(", "child", ",", "Title", ")", ":", "titles", ".", "append", "(", "child", ".", "as_dict", "(", ")", ")", "elif", "isinstance", "(", "child", ",", "Description", ")", ":", "descriptions", ".", "append", "(", "child", ".", "as_dict", "(", ")", ")", "elif", "isinstance", "(", "child", ",", "Platform", ")", ":", "platforms", ".", "append", "(", "child", ".", "as_dict", "(", ")", ")", "elif", "isinstance", "(", "child", ",", "Select", ")", ":", "selects", ".", "append", "(", "child", ".", "as_dict", "(", ")", ")", "if", "version", "is", "not", "None", ":", "result_dict", "[", "'version'", "]", "=", "version", "if", "len", "(", "statuses", ")", ">", "0", ":", "result_dict", "[", "'statuses'", "]", "=", "statuses", "if", "len", "(", "titles", ")", ">", "0", ":", "result_dict", "[", "'titles'", "]", "=", "titles", "if", "len", "(", "descriptions", ")", ">", "0", ":", "result_dict", "[", "'descriptions'", "]", "=", "descriptions", "if", "len", "(", "platforms", ")", ">", "0", ":", "result_dict", "[", "'platforms'", "]", "=", "platforms", "if", "len", "(", "selects", ")", ">", "0", ":", "result_dict", "[", "'selects'", "]", "=", "selects", "return", "result_dict" ]
31.888889
13.355556
def _set_field(self, fieldname, bytestring, transfunc=None): """convienience function to set fields of the tinytag by name. the payload (bytestring) can be changed using the transfunc""" if getattr(self, fieldname): # do not overwrite existing data return value = bytestring if transfunc is None else transfunc(bytestring) if DEBUG: stderr('Setting field "%s" to "%s"' % (fieldname, value)) if fieldname == 'genre' and value.isdigit() and int(value) < len(ID3.ID3V1_GENRES): # funky: id3v1 genre hidden in a id3v2 field value = ID3.ID3V1_GENRES[int(value)] if fieldname in ("track", "disc"): if type(value).__name__ in ('str', 'unicode') and '/' in value: current, total = value.split('/')[:2] setattr(self, "%s_total" % fieldname, total) else: current = value setattr(self, fieldname, current) else: setattr(self, fieldname, value)
[ "def", "_set_field", "(", "self", ",", "fieldname", ",", "bytestring", ",", "transfunc", "=", "None", ")", ":", "if", "getattr", "(", "self", ",", "fieldname", ")", ":", "# do not overwrite existing data", "return", "value", "=", "bytestring", "if", "transfunc", "is", "None", "else", "transfunc", "(", "bytestring", ")", "if", "DEBUG", ":", "stderr", "(", "'Setting field \"%s\" to \"%s\"'", "%", "(", "fieldname", ",", "value", ")", ")", "if", "fieldname", "==", "'genre'", "and", "value", ".", "isdigit", "(", ")", "and", "int", "(", "value", ")", "<", "len", "(", "ID3", ".", "ID3V1_GENRES", ")", ":", "# funky: id3v1 genre hidden in a id3v2 field", "value", "=", "ID3", ".", "ID3V1_GENRES", "[", "int", "(", "value", ")", "]", "if", "fieldname", "in", "(", "\"track\"", ",", "\"disc\"", ")", ":", "if", "type", "(", "value", ")", ".", "__name__", "in", "(", "'str'", ",", "'unicode'", ")", "and", "'/'", "in", "value", ":", "current", ",", "total", "=", "value", ".", "split", "(", "'/'", ")", "[", ":", "2", "]", "setattr", "(", "self", ",", "\"%s_total\"", "%", "fieldname", ",", "total", ")", "else", ":", "current", "=", "value", "setattr", "(", "self", ",", "fieldname", ",", "current", ")", "else", ":", "setattr", "(", "self", ",", "fieldname", ",", "value", ")" ]
51.1
18.5
def update_parent_sequence_map(child_part, delete=False): """Updates the child map of a simple sequence assessment assessment part""" if child_part.has_parent_part(): object_map = child_part.get_assessment_part()._my_map database = 'assessment_authoring' collection_type = 'AssessmentPart' else: object_map = child_part.get_assessment()._my_map database = 'assessment' collection_type = 'Assessment' collection = JSONClientValidated(database, collection=collection_type, runtime=child_part._runtime) if delete and 'childIds' in object_map: object_map['childIds'].remove(str(child_part.get_id())) elif not delete: if 'childIds' not in object_map: object_map['childIds'] = [] object_map['childIds'].append(str(child_part.get_id())) collection.save(object_map)
[ "def", "update_parent_sequence_map", "(", "child_part", ",", "delete", "=", "False", ")", ":", "if", "child_part", ".", "has_parent_part", "(", ")", ":", "object_map", "=", "child_part", ".", "get_assessment_part", "(", ")", ".", "_my_map", "database", "=", "'assessment_authoring'", "collection_type", "=", "'AssessmentPart'", "else", ":", "object_map", "=", "child_part", ".", "get_assessment", "(", ")", ".", "_my_map", "database", "=", "'assessment'", "collection_type", "=", "'Assessment'", "collection", "=", "JSONClientValidated", "(", "database", ",", "collection", "=", "collection_type", ",", "runtime", "=", "child_part", ".", "_runtime", ")", "if", "delete", "and", "'childIds'", "in", "object_map", ":", "object_map", "[", "'childIds'", "]", ".", "remove", "(", "str", "(", "child_part", ".", "get_id", "(", ")", ")", ")", "elif", "not", "delete", ":", "if", "'childIds'", "not", "in", "object_map", ":", "object_map", "[", "'childIds'", "]", "=", "[", "]", "object_map", "[", "'childIds'", "]", ".", "append", "(", "str", "(", "child_part", ".", "get_id", "(", ")", ")", ")", "collection", ".", "save", "(", "object_map", ")" ]
46.2
11.85
def file_handler(self, handler_type, path, prefixed_path, source_storage): """ Create a dict with all kwargs of the `copy_file` or `link_file` method of the super class and add it to the queue for later processing. """ if self.faster: if prefixed_path not in self.found_files: self.found_files[prefixed_path] = (source_storage, path) self.task_queue.put({ 'handler_type': handler_type, 'path': path, 'prefixed_path': prefixed_path, 'source_storage': source_storage }) self.counter += 1 else: if handler_type == 'link': super(Command, self).link_file(path, prefixed_path, source_storage) else: super(Command, self).copy_file(path, prefixed_path, source_storage)
[ "def", "file_handler", "(", "self", ",", "handler_type", ",", "path", ",", "prefixed_path", ",", "source_storage", ")", ":", "if", "self", ".", "faster", ":", "if", "prefixed_path", "not", "in", "self", ".", "found_files", ":", "self", ".", "found_files", "[", "prefixed_path", "]", "=", "(", "source_storage", ",", "path", ")", "self", ".", "task_queue", ".", "put", "(", "{", "'handler_type'", ":", "handler_type", ",", "'path'", ":", "path", ",", "'prefixed_path'", ":", "prefixed_path", ",", "'source_storage'", ":", "source_storage", "}", ")", "self", ".", "counter", "+=", "1", "else", ":", "if", "handler_type", "==", "'link'", ":", "super", "(", "Command", ",", "self", ")", ".", "link_file", "(", "path", ",", "prefixed_path", ",", "source_storage", ")", "else", ":", "super", "(", "Command", ",", "self", ")", ".", "copy_file", "(", "path", ",", "prefixed_path", ",", "source_storage", ")" ]
41.571429
20.047619
def count_het(self, allele=None, axis=None): """Count heterozygous genotypes. Parameters ---------- allele : int, optional Allele index. axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_het(allele=allele) return np.sum(b, axis=axis)
[ "def", "count_het", "(", "self", ",", "allele", "=", "None", ",", "axis", "=", "None", ")", ":", "b", "=", "self", ".", "is_het", "(", "allele", "=", "allele", ")", "return", "np", ".", "sum", "(", "b", ",", "axis", "=", "axis", ")" ]
27.538462
15.615385
def get_all(self, collection_name, since=None): """ method returns all job records from a particular collection that are older than <since> """ if since is None: query = {} else: query = {job.TIMEPERIOD: {'$gte': since}} collection = self.ds.connection(collection_name) cursor = collection.find(query) if cursor.count() == 0: raise LookupError('MongoDB has no job records in collection {0} since {1}'.format(collection_name, since)) return [Job.from_json(document) for document in cursor]
[ "def", "get_all", "(", "self", ",", "collection_name", ",", "since", "=", "None", ")", ":", "if", "since", "is", "None", ":", "query", "=", "{", "}", "else", ":", "query", "=", "{", "job", ".", "TIMEPERIOD", ":", "{", "'$gte'", ":", "since", "}", "}", "collection", "=", "self", ".", "ds", ".", "connection", "(", "collection_name", ")", "cursor", "=", "collection", ".", "find", "(", "query", ")", "if", "cursor", ".", "count", "(", ")", "==", "0", ":", "raise", "LookupError", "(", "'MongoDB has no job records in collection {0} since {1}'", ".", "format", "(", "collection_name", ",", "since", ")", ")", "return", "[", "Job", ".", "from_json", "(", "document", ")", "for", "document", "in", "cursor", "]" ]
47.5
20.583333
def create_choice(choice_list, help_string=NO_HELP, default=NO_DEFAULT): # type: (List[str], str, Union[Any, NO_DEFAULT_TYPE]) -> str """ Create a choice config :param choice_list: :param help_string: :param default: :return: """ # noinspection PyTypeChecker return ParamChoice( help_string=help_string, default=default, choice_list=choice_list, )
[ "def", "create_choice", "(", "choice_list", ",", "help_string", "=", "NO_HELP", ",", "default", "=", "NO_DEFAULT", ")", ":", "# type: (List[str], str, Union[Any, NO_DEFAULT_TYPE]) -> str", "# noinspection PyTypeChecker", "return", "ParamChoice", "(", "help_string", "=", "help_string", ",", "default", "=", "default", ",", "choice_list", "=", "choice_list", ",", ")" ]
30.466667
13.666667
def create_parser(subparsers): ''' :param subparsers: :return: ''' parser = subparsers.add_parser( 'restart', help='Restart a topology', usage="%(prog)s [options] cluster/[role]/[env] <topology-name> [container-id]", add_help=True) args.add_titles(parser) args.add_cluster_role_env(parser) args.add_topology(parser) parser.add_argument( 'container-id', nargs='?', type=int, default=-1, help='Identifier of the container to be restarted') args.add_config(parser) args.add_service_url(parser) args.add_verbose(parser) parser.set_defaults(subcommand='restart') return parser
[ "def", "create_parser", "(", "subparsers", ")", ":", "parser", "=", "subparsers", ".", "add_parser", "(", "'restart'", ",", "help", "=", "'Restart a topology'", ",", "usage", "=", "\"%(prog)s [options] cluster/[role]/[env] <topology-name> [container-id]\"", ",", "add_help", "=", "True", ")", "args", ".", "add_titles", "(", "parser", ")", "args", ".", "add_cluster_role_env", "(", "parser", ")", "args", ".", "add_topology", "(", "parser", ")", "parser", ".", "add_argument", "(", "'container-id'", ",", "nargs", "=", "'?'", ",", "type", "=", "int", ",", "default", "=", "-", "1", ",", "help", "=", "'Identifier of the container to be restarted'", ")", "args", ".", "add_config", "(", "parser", ")", "args", ".", "add_service_url", "(", "parser", ")", "args", ".", "add_verbose", "(", "parser", ")", "parser", ".", "set_defaults", "(", "subcommand", "=", "'restart'", ")", "return", "parser" ]
22.5
22.142857
def compile_patterns_in_dictionary(dictionary): """ Replace all strings in dictionary with compiled version of themselves and return dictionary. """ for key, value in dictionary.items(): if isinstance(value, str): dictionary[key] = re.compile(value) elif isinstance(value, dict): compile_patterns_in_dictionary(value) return dictionary
[ "def", "compile_patterns_in_dictionary", "(", "dictionary", ")", ":", "for", "key", ",", "value", "in", "dictionary", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "dictionary", "[", "key", "]", "=", "re", ".", "compile", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "compile_patterns_in_dictionary", "(", "value", ")", "return", "dictionary" ]
35.363636
6.454545
def add_grid(self): """Add axis and ticks to figure. Notes ----- I know that visvis and pyqtgraphs can do this in much simpler way, but those packages create too large a padding around the figure and this is pretty fast. """ value = self.config.value # X-AXIS # x-bottom self.scene.addLine(value['x_min'], value['y_min'], value['x_min'], value['y_max'], QPen(QColor(LINE_COLOR), LINE_WIDTH)) # at y = 0, dashed self.scene.addLine(value['x_min'], 0, value['x_max'], 0, QPen(QColor(LINE_COLOR), LINE_WIDTH, Qt.DashLine)) # ticks on y-axis y_high = int(floor(value['y_max'])) y_low = int(ceil(value['y_min'])) x_length = (value['x_max'] - value['x_min']) / value['x_tick'] for y in range(y_low, y_high): self.scene.addLine(value['x_min'], y, value['x_min'] + x_length, y, QPen(QColor(LINE_COLOR), LINE_WIDTH)) # Y-AXIS # left axis self.scene.addLine(value['x_min'], value['y_min'], value['x_max'], value['y_min'], QPen(QColor(LINE_COLOR), LINE_WIDTH)) # larger ticks on x-axis every 10 Hz x_high = int(floor(value['x_max'])) x_low = int(ceil(value['x_min'])) y_length = (value['y_max'] - value['y_min']) / value['y_tick'] for x in range(x_low, x_high, 10): self.scene.addLine(x, value['y_min'], x, value['y_min'] + y_length, QPen(QColor(LINE_COLOR), LINE_WIDTH)) # smaller ticks on x-axis every 10 Hz y_length = (value['y_max'] - value['y_min']) / value['y_tick'] / 2 for x in range(x_low, x_high, 5): self.scene.addLine(x, value['y_min'], x, value['y_min'] + y_length, QPen(QColor(LINE_COLOR), LINE_WIDTH))
[ "def", "add_grid", "(", "self", ")", ":", "value", "=", "self", ".", "config", ".", "value", "# X-AXIS", "# x-bottom", "self", ".", "scene", ".", "addLine", "(", "value", "[", "'x_min'", "]", ",", "value", "[", "'y_min'", "]", ",", "value", "[", "'x_min'", "]", ",", "value", "[", "'y_max'", "]", ",", "QPen", "(", "QColor", "(", "LINE_COLOR", ")", ",", "LINE_WIDTH", ")", ")", "# at y = 0, dashed", "self", ".", "scene", ".", "addLine", "(", "value", "[", "'x_min'", "]", ",", "0", ",", "value", "[", "'x_max'", "]", ",", "0", ",", "QPen", "(", "QColor", "(", "LINE_COLOR", ")", ",", "LINE_WIDTH", ",", "Qt", ".", "DashLine", ")", ")", "# ticks on y-axis", "y_high", "=", "int", "(", "floor", "(", "value", "[", "'y_max'", "]", ")", ")", "y_low", "=", "int", "(", "ceil", "(", "value", "[", "'y_min'", "]", ")", ")", "x_length", "=", "(", "value", "[", "'x_max'", "]", "-", "value", "[", "'x_min'", "]", ")", "/", "value", "[", "'x_tick'", "]", "for", "y", "in", "range", "(", "y_low", ",", "y_high", ")", ":", "self", ".", "scene", ".", "addLine", "(", "value", "[", "'x_min'", "]", ",", "y", ",", "value", "[", "'x_min'", "]", "+", "x_length", ",", "y", ",", "QPen", "(", "QColor", "(", "LINE_COLOR", ")", ",", "LINE_WIDTH", ")", ")", "# Y-AXIS", "# left axis", "self", ".", "scene", ".", "addLine", "(", "value", "[", "'x_min'", "]", ",", "value", "[", "'y_min'", "]", ",", "value", "[", "'x_max'", "]", ",", "value", "[", "'y_min'", "]", ",", "QPen", "(", "QColor", "(", "LINE_COLOR", ")", ",", "LINE_WIDTH", ")", ")", "# larger ticks on x-axis every 10 Hz", "x_high", "=", "int", "(", "floor", "(", "value", "[", "'x_max'", "]", ")", ")", "x_low", "=", "int", "(", "ceil", "(", "value", "[", "'x_min'", "]", ")", ")", "y_length", "=", "(", "value", "[", "'y_max'", "]", "-", "value", "[", "'y_min'", "]", ")", "/", "value", "[", "'y_tick'", "]", "for", "x", "in", "range", "(", "x_low", ",", "x_high", ",", "10", ")", ":", "self", ".", "scene", ".", "addLine", "(", "x", ",", "value", "[", "'y_min'", "]", ",", "x", ",", "value", "[", "'y_min'", "]", "+", "y_length", ",", "QPen", "(", "QColor", "(", "LINE_COLOR", ")", ",", "LINE_WIDTH", ")", ")", "# smaller ticks on x-axis every 10 Hz", "y_length", "=", "(", "value", "[", "'y_max'", "]", "-", "value", "[", "'y_min'", "]", ")", "/", "value", "[", "'y_tick'", "]", "/", "2", "for", "x", "in", "range", "(", "x_low", ",", "x_high", ",", "5", ")", ":", "self", ".", "scene", ".", "addLine", "(", "x", ",", "value", "[", "'y_min'", "]", ",", "x", ",", "value", "[", "'y_min'", "]", "+", "y_length", ",", "QPen", "(", "QColor", "(", "LINE_COLOR", ")", ",", "LINE_WIDTH", ")", ")" ]
43.25
18.1875
def terminate(self, sigkill=False): """ Terminate (and then kill) the process launched to process the file. :param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work. :type sigkill: bool """ if self._process is None: raise AirflowException("Tried to call stop before starting!") # The queue will likely get corrupted, so remove the reference self._result_queue = None self._process.terminate() # Arbitrarily wait 5s for the process to die self._process.join(5) if sigkill and self._process.is_alive(): self.log.warning("Killing PID %s", self._process.pid) os.kill(self._process.pid, signal.SIGKILL)
[ "def", "terminate", "(", "self", ",", "sigkill", "=", "False", ")", ":", "if", "self", ".", "_process", "is", "None", ":", "raise", "AirflowException", "(", "\"Tried to call stop before starting!\"", ")", "# The queue will likely get corrupted, so remove the reference", "self", ".", "_result_queue", "=", "None", "self", ".", "_process", ".", "terminate", "(", ")", "# Arbitrarily wait 5s for the process to die", "self", ".", "_process", ".", "join", "(", "5", ")", "if", "sigkill", "and", "self", ".", "_process", ".", "is_alive", "(", ")", ":", "self", ".", "log", ".", "warning", "(", "\"Killing PID %s\"", ",", "self", ".", "_process", ".", "pid", ")", "os", ".", "kill", "(", "self", ".", "_process", ".", "pid", ",", "signal", ".", "SIGKILL", ")" ]
42.588235
16.588235
def time_boxed(func, iterable, time_budget, *args): """ Apply a function to the items of an iterable within a given time budget. Loop the given iterable, calling the given function on each item. The expended time is compared to the given time budget after each iteration. """ time_budget = time_budget / 1000 # budget in milliseconds start = time.time() for thing in iterable: yield func(thing, *args) end = time.time() - start if end > time_budget: # Putting the condition at the end of the loop ensures that we # always run it once, which is useful for testing return
[ "def", "time_boxed", "(", "func", ",", "iterable", ",", "time_budget", ",", "*", "args", ")", ":", "time_budget", "=", "time_budget", "/", "1000", "# budget in milliseconds", "start", "=", "time", ".", "time", "(", ")", "for", "thing", "in", "iterable", ":", "yield", "func", "(", "thing", ",", "*", "args", ")", "end", "=", "time", ".", "time", "(", ")", "-", "start", "if", "end", ">", "time_budget", ":", "# Putting the condition at the end of the loop ensures that we", "# always run it once, which is useful for testing", "return" ]
36
21.777778
def detect_and_decorate(decorator, args, kwargs): """ Helper for applying a decorator when it is applied directly, and also applying it when it is given arguments and then applied to a function. """ # special behavior when invoked with only one non-keyword argument: act as # a normal decorator, decorating and returning that argument with # click.option if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): return decorator(args[0]) # if we're not doing that, we should see no positional args # the alternative behavior is to fall through and discard *args, but this # will probably confuse someone in the future when their arguments are # silently discarded elif len(args) != 0: raise ValueError("this decorator cannot take positional args") # final case: got 0 or more kwargs, no positionals # do the function-which-returns-a-decorator dance to produce a # new decorator based on the arguments given else: def inner_decorator(f): return decorator(f, **kwargs) return inner_decorator
[ "def", "detect_and_decorate", "(", "decorator", ",", "args", ",", "kwargs", ")", ":", "# special behavior when invoked with only one non-keyword argument: act as", "# a normal decorator, decorating and returning that argument with", "# click.option", "if", "len", "(", "args", ")", "==", "1", "and", "len", "(", "kwargs", ")", "==", "0", "and", "callable", "(", "args", "[", "0", "]", ")", ":", "return", "decorator", "(", "args", "[", "0", "]", ")", "# if we're not doing that, we should see no positional args", "# the alternative behavior is to fall through and discard *args, but this", "# will probably confuse someone in the future when their arguments are", "# silently discarded", "elif", "len", "(", "args", ")", "!=", "0", ":", "raise", "ValueError", "(", "\"this decorator cannot take positional args\"", ")", "# final case: got 0 or more kwargs, no positionals", "# do the function-which-returns-a-decorator dance to produce a", "# new decorator based on the arguments given", "else", ":", "def", "inner_decorator", "(", "f", ")", ":", "return", "decorator", "(", "f", ",", "*", "*", "kwargs", ")", "return", "inner_decorator" ]
40.148148
22.666667
def try_except_handler(self, node): """Handler for try except statement to ignore excepted exceptions.""" # List all excepted exception's names excepted_types = [] for handler in node.handlers: if handler.type is None: excepted_types = None break if isinstance(handler.type, ast.Tuple): excepted_types.extend([exception_type for exception_type in handler.type.elts]) else: excepted_types.append(handler.type) new_exception_list = self.ignore_exceptions if self.ignore_exceptions is not None: if excepted_types is None: new_exception_list = None else: new_exception_list = list(set(excepted_types + self.ignore_exceptions)) # Set the new ignore list, and save the old one old_exception_handlers, self.ignore_exceptions = \ self.ignore_exceptions, new_exception_list # Run recursively on all sub nodes with the new ignore list node.body = [self.visit(node_item) for node_item in node.body] # Revert changes from ignore list self.ignore_exceptions = old_exception_handlers
[ "def", "try_except_handler", "(", "self", ",", "node", ")", ":", "# List all excepted exception's names", "excepted_types", "=", "[", "]", "for", "handler", "in", "node", ".", "handlers", ":", "if", "handler", ".", "type", "is", "None", ":", "excepted_types", "=", "None", "break", "if", "isinstance", "(", "handler", ".", "type", ",", "ast", ".", "Tuple", ")", ":", "excepted_types", ".", "extend", "(", "[", "exception_type", "for", "exception_type", "in", "handler", ".", "type", ".", "elts", "]", ")", "else", ":", "excepted_types", ".", "append", "(", "handler", ".", "type", ")", "new_exception_list", "=", "self", ".", "ignore_exceptions", "if", "self", ".", "ignore_exceptions", "is", "not", "None", ":", "if", "excepted_types", "is", "None", ":", "new_exception_list", "=", "None", "else", ":", "new_exception_list", "=", "list", "(", "set", "(", "excepted_types", "+", "self", ".", "ignore_exceptions", ")", ")", "# Set the new ignore list, and save the old one", "old_exception_handlers", ",", "self", ".", "ignore_exceptions", "=", "self", ".", "ignore_exceptions", ",", "new_exception_list", "# Run recursively on all sub nodes with the new ignore list", "node", ".", "body", "=", "[", "self", ".", "visit", "(", "node_item", ")", "for", "node_item", "in", "node", ".", "body", "]", "# Revert changes from ignore list", "self", ".", "ignore_exceptions", "=", "old_exception_handlers" ]
38
18.970588
def seed_response(self, command, response): # type: (Text, dict) -> MockAdapter """ Sets the response that the adapter will return for the specified command. You can seed multiple responses per command; the adapter will put them into a FIFO queue. When a request comes in, the adapter will pop the corresponding response off of the queue. Example: .. code-block:: python adapter.seed_response('sayHello', {'message': 'Hi!'}) adapter.seed_response('sayHello', {'message': 'Hello!'}) adapter.send_request({'command': 'sayHello'}) # {'message': 'Hi!'} adapter.send_request({'command': 'sayHello'}) # {'message': 'Hello!'} """ if command not in self.responses: self.responses[command] = deque() self.responses[command].append(response) return self
[ "def", "seed_response", "(", "self", ",", "command", ",", "response", ")", ":", "# type: (Text, dict) -> MockAdapter", "if", "command", "not", "in", "self", ".", "responses", ":", "self", ".", "responses", "[", "command", "]", "=", "deque", "(", ")", "self", ".", "responses", "[", "command", "]", ".", "append", "(", "response", ")", "return", "self" ]
32.571429
21.214286
def set_stack(self, stack_dump, stack_top): """ Stack dump is a dump of the stack from gdb, i.e. the result of the following gdb command : ``dump binary memory [stack_dump] [begin_addr] [end_addr]`` We set the stack to the same addresses as the gdb session to avoid pointers corruption. :param stack_dump: The dump file. :param stack_top: The address of the top of the stack in the gdb session. """ data = self._read_data(stack_dump) self.real_stack_top = stack_top addr = stack_top - len(data) # Address of the bottom of the stack l.info("Setting stack from 0x%x up to %#x", addr, stack_top) #FIXME: we should probably make we don't overwrite other stuff loaded there self._write(addr, data)
[ "def", "set_stack", "(", "self", ",", "stack_dump", ",", "stack_top", ")", ":", "data", "=", "self", ".", "_read_data", "(", "stack_dump", ")", "self", ".", "real_stack_top", "=", "stack_top", "addr", "=", "stack_top", "-", "len", "(", "data", ")", "# Address of the bottom of the stack", "l", ".", "info", "(", "\"Setting stack from 0x%x up to %#x\"", ",", "addr", ",", "stack_top", ")", "#FIXME: we should probably make we don't overwrite other stuff loaded there", "self", ".", "_write", "(", "addr", ",", "data", ")" ]
46.235294
24.941176
def _authenticate(secrets_file): """Runs the OAuth 2.0 installed application flow. Returns: An authorized httplib2.Http instance. """ flow = oauthclient.flow_from_clientsecrets( secrets_file, scope=OAUTH_SCOPE, message=('Failed to initialized OAuth 2.0 flow with secrets ' 'file: %s' % secrets_file)) storage = oauthfile.Storage(OAUTH_CREDENTIALS_FILE) credentials = storage.get() if credentials is None or credentials.invalid: credentials = oauthtools.run_flow(flow, storage, oauthtools.argparser.parse_args(args=[])) http = httplib2.Http() return credentials.authorize(http)
[ "def", "_authenticate", "(", "secrets_file", ")", ":", "flow", "=", "oauthclient", ".", "flow_from_clientsecrets", "(", "secrets_file", ",", "scope", "=", "OAUTH_SCOPE", ",", "message", "=", "(", "'Failed to initialized OAuth 2.0 flow with secrets '", "'file: %s'", "%", "secrets_file", ")", ")", "storage", "=", "oauthfile", ".", "Storage", "(", "OAUTH_CREDENTIALS_FILE", ")", "credentials", "=", "storage", ".", "get", "(", ")", "if", "credentials", "is", "None", "or", "credentials", ".", "invalid", ":", "credentials", "=", "oauthtools", ".", "run_flow", "(", "flow", ",", "storage", ",", "oauthtools", ".", "argparser", ".", "parse_args", "(", "args", "=", "[", "]", ")", ")", "http", "=", "httplib2", ".", "Http", "(", ")", "return", "credentials", ".", "authorize", "(", "http", ")" ]
38.352941
15.294118
def patch_data(data, L=100, try_diag=True, verbose=False): '''Patch ``data`` (for example Markov chain output) into parts of length ``L``. Return a Gaussian mixture where each component gets the empirical mean and covariance of one patch. :param data: Matrix-like array; the points to be patched. Expect ``data[i]`` as the d-dimensional i-th point. :param L: Integer; the length of one patch. The last patch will be shorter if ``L`` is not a divisor of ``len(data)``. :param try_diag: Bool; If some patch does not define a proper covariance matrix, it cannot define a Gaussian component. ``try_diag`` defines how to handle that case: If ``True`` (default), the off-diagonal elements are set to zero and it is tried to form a Gaussian with that matrix again. If that fails as well, the patch is skipped. If ``False`` the patch is skipped directly. :param verbose: Bool; If ``True`` print all status information. ''' # patch data into length L patches patches = _np.array([data[patch_start:patch_start + L] for patch_start in range(0, len(data), L)]) # calculate means and covs means = _np.array([_np.mean(patch, axis=0) for patch in patches]) covs = _np.array([_np.cov (patch, rowvar=0) for patch in patches]) # form gaussian components components = [] skipped = [] for i, (mean, cov) in enumerate(zip(means, covs)): try: this_comp = Gauss(mean, cov) components.append(this_comp) except _np.linalg.LinAlgError as error1: if verbose: print("Could not form Gauss from patch %i. Reason: %s" % (i, repr(error1))) if try_diag: cov = _np.diag(_np.diag(cov)) try: this_comp = Gauss(mean, cov) components.append(this_comp) if verbose: print('Diagonal covariance attempt succeeded.') except _np.linalg.LinAlgError as error2: skipped.append(i) if verbose: print("Diagonal covariance attempt failed. Reason: %s" % repr(error2)) else: # if not try_diag skipped.append(i) # print skipped components if any if skipped: print("WARNING: Could not form Gaussians from: %s" % skipped) # create and return mixture return MixtureDensity(components)
[ "def", "patch_data", "(", "data", ",", "L", "=", "100", ",", "try_diag", "=", "True", ",", "verbose", "=", "False", ")", ":", "# patch data into length L patches", "patches", "=", "_np", ".", "array", "(", "[", "data", "[", "patch_start", ":", "patch_start", "+", "L", "]", "for", "patch_start", "in", "range", "(", "0", ",", "len", "(", "data", ")", ",", "L", ")", "]", ")", "# calculate means and covs", "means", "=", "_np", ".", "array", "(", "[", "_np", ".", "mean", "(", "patch", ",", "axis", "=", "0", ")", "for", "patch", "in", "patches", "]", ")", "covs", "=", "_np", ".", "array", "(", "[", "_np", ".", "cov", "(", "patch", ",", "rowvar", "=", "0", ")", "for", "patch", "in", "patches", "]", ")", "# form gaussian components", "components", "=", "[", "]", "skipped", "=", "[", "]", "for", "i", ",", "(", "mean", ",", "cov", ")", "in", "enumerate", "(", "zip", "(", "means", ",", "covs", ")", ")", ":", "try", ":", "this_comp", "=", "Gauss", "(", "mean", ",", "cov", ")", "components", ".", "append", "(", "this_comp", ")", "except", "_np", ".", "linalg", ".", "LinAlgError", "as", "error1", ":", "if", "verbose", ":", "print", "(", "\"Could not form Gauss from patch %i. Reason: %s\"", "%", "(", "i", ",", "repr", "(", "error1", ")", ")", ")", "if", "try_diag", ":", "cov", "=", "_np", ".", "diag", "(", "_np", ".", "diag", "(", "cov", ")", ")", "try", ":", "this_comp", "=", "Gauss", "(", "mean", ",", "cov", ")", "components", ".", "append", "(", "this_comp", ")", "if", "verbose", ":", "print", "(", "'Diagonal covariance attempt succeeded.'", ")", "except", "_np", ".", "linalg", ".", "LinAlgError", "as", "error2", ":", "skipped", ".", "append", "(", "i", ")", "if", "verbose", ":", "print", "(", "\"Diagonal covariance attempt failed. Reason: %s\"", "%", "repr", "(", "error2", ")", ")", "else", ":", "# if not try_diag", "skipped", ".", "append", "(", "i", ")", "# print skipped components if any", "if", "skipped", ":", "print", "(", "\"WARNING: Could not form Gaussians from: %s\"", "%", "skipped", ")", "# create and return mixture", "return", "MixtureDensity", "(", "components", ")" ]
36.910448
23.119403
def build_trading_timeline(start, end): ''' Build the daily-based index we will trade on ''' EMPTY_DATES = pd.date_range('2000/01/01', periods=0, tz=pytz.utc) now = dt.datetime.now(tz=pytz.utc) if not start: if not end: # Live trading until the end of the day bt_dates = EMPTY_DATES live_dates = pd.date_range( start=now, end=normalize_date_format('23h59')) else: end = normalize_date_format(end) if end < now: # Backtesting since a year before end bt_dates = pd.date_range( start=end - 360 * pd.datetools.day, end=end) live_dates = EMPTY_DATES elif end > now: # Live trading from now to end bt_dates = EMPTY_DATES live_dates = pd.date_range(start=now, end=end) else: start = normalize_date_format(start) if start < now: if not end: # Backtest for a year or until now end = start + 360 * pd.datetools.day if end > now: end = now - pd.datetools.day live_dates = EMPTY_DATES bt_dates = pd.date_range( start=start, end=end) else: end = normalize_date_format(end) if end < now: # Nothing to do, backtest from start to end live_dates = EMPTY_DATES bt_dates = pd.date_range(start=start, end=end) elif end > now: # Hybrid timeline, backtest from start to end, live # trade from now to end bt_dates = pd.date_range( start=start, end=now - pd.datetools.day) live_dates = pd.date_range(start=now, end=end) elif start > now: if not end: # Live trading from start to the end of the day bt_dates = EMPTY_DATES live_dates = pd.date_range( start=start, end=normalize_date_format('23h59')) else: # Live trading from start to end end = normalize_date_format(end) bt_dates = EMPTY_DATES live_dates = pd.date_range(start=start, end=end) return bt_dates + live_dates
[ "def", "build_trading_timeline", "(", "start", ",", "end", ")", ":", "EMPTY_DATES", "=", "pd", ".", "date_range", "(", "'2000/01/01'", ",", "periods", "=", "0", ",", "tz", "=", "pytz", ".", "utc", ")", "now", "=", "dt", ".", "datetime", ".", "now", "(", "tz", "=", "pytz", ".", "utc", ")", "if", "not", "start", ":", "if", "not", "end", ":", "# Live trading until the end of the day", "bt_dates", "=", "EMPTY_DATES", "live_dates", "=", "pd", ".", "date_range", "(", "start", "=", "now", ",", "end", "=", "normalize_date_format", "(", "'23h59'", ")", ")", "else", ":", "end", "=", "normalize_date_format", "(", "end", ")", "if", "end", "<", "now", ":", "# Backtesting since a year before end", "bt_dates", "=", "pd", ".", "date_range", "(", "start", "=", "end", "-", "360", "*", "pd", ".", "datetools", ".", "day", ",", "end", "=", "end", ")", "live_dates", "=", "EMPTY_DATES", "elif", "end", ">", "now", ":", "# Live trading from now to end", "bt_dates", "=", "EMPTY_DATES", "live_dates", "=", "pd", ".", "date_range", "(", "start", "=", "now", ",", "end", "=", "end", ")", "else", ":", "start", "=", "normalize_date_format", "(", "start", ")", "if", "start", "<", "now", ":", "if", "not", "end", ":", "# Backtest for a year or until now", "end", "=", "start", "+", "360", "*", "pd", ".", "datetools", ".", "day", "if", "end", ">", "now", ":", "end", "=", "now", "-", "pd", ".", "datetools", ".", "day", "live_dates", "=", "EMPTY_DATES", "bt_dates", "=", "pd", ".", "date_range", "(", "start", "=", "start", ",", "end", "=", "end", ")", "else", ":", "end", "=", "normalize_date_format", "(", "end", ")", "if", "end", "<", "now", ":", "# Nothing to do, backtest from start to end", "live_dates", "=", "EMPTY_DATES", "bt_dates", "=", "pd", ".", "date_range", "(", "start", "=", "start", ",", "end", "=", "end", ")", "elif", "end", ">", "now", ":", "# Hybrid timeline, backtest from start to end, live", "# trade from now to end", "bt_dates", "=", "pd", ".", "date_range", "(", "start", "=", "start", ",", "end", "=", "now", "-", "pd", ".", "datetools", ".", "day", ")", "live_dates", "=", "pd", ".", "date_range", "(", "start", "=", "now", ",", "end", "=", "end", ")", "elif", "start", ">", "now", ":", "if", "not", "end", ":", "# Live trading from start to the end of the day", "bt_dates", "=", "EMPTY_DATES", "live_dates", "=", "pd", ".", "date_range", "(", "start", "=", "start", ",", "end", "=", "normalize_date_format", "(", "'23h59'", ")", ")", "else", ":", "# Live trading from start to end", "end", "=", "normalize_date_format", "(", "end", ")", "bt_dates", "=", "EMPTY_DATES", "live_dates", "=", "pd", ".", "date_range", "(", "start", "=", "start", ",", "end", "=", "end", ")", "return", "bt_dates", "+", "live_dates" ]
39.096774
13.645161
def find_urls(thing, base_url=None, mimetype=None, log=False): """ This function uses several methods to extract URLs from 'thing', which can be a string or raw bytes. If you supply the base URL, it will attempt to use it with urljoin to create full URLs from relative paths. """ if log: logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)s.%(funcName)s +%(lineno)s: %(levelname)-8s %(message)s') logger = logging.getLogger(__name__) # Convert "thing" to bytes if it is a string. try: if isinstance(thing, str): thing = thing.encode(encoding='ascii', errors='ignore') except: if log: logger.exception('Unable to convert thing to bytes.') return [] # Store any URLs we find in the bytes. all_urls = [] # Continue if we have bytes. if isinstance(thing, bytes): # Return an empty list if we failed to get the mimetype. try: if not mimetype: mimetype = magic.from_buffer(thing) except: if log: logger.exception('Unable to get mimetype from the bytes buffer.') return [] mimetype = mimetype.lower() # If the bytes are HTML... if 'html' in mimetype: try: all_urls += _html_find_urls(thing, mimetype, base_url) except: if log: logger.exception('Error when finding HTML URLs.') # If the bytes are a PDF... elif 'pdf' in mimetype: try: all_urls += _pdf_find_urls(thing, mimetype) except: if log: logger.exception('Error when finding PDF URLs.') # If the bytes are an RFC 822 e-mail... elif 'rfc 822' in mimetype: return [] # If the bytes are ASCII or Unicode text... elif 'ascii' in mimetype or 'text' in mimetype: try: all_urls += _html_find_urls(thing, mimetype, base_url) except: if log: logger.exception('Error when finding ASCII/HTML URLs.') try: all_urls += _ascii_find_urls(thing, mimetype) except: if log: logger.exception('Error when finding ASCII URLs.') try: all_urls += _pdf_find_urls(thing, mimetype) except: if log: logger.exception('Error when finding ASCII/PDF URLs.') # If the bytes are anything else... else: # Try to treat the bytes as a PDF and find URLs. try: all_urls += _pdf_find_urls(thing, mimetype) except: if log: logger.exception('Error when finding unknown/PDF URLs.') # If we don't know how to handle this mimetype, the bytes are likely just "data". # In that case, we don't want to find all possible ASCII URLs, as it will result # in a lot of bad URLs. Try to treat the bytes as ASCII and find URLs. try: all_urls += _ascii_find_urls(thing, mimetype, extra_tokens=False) except: if log: logger.exception('Error when finding unknown/ASCII URLs.') # Make sure we only have valid URLs. valid_urls = [] for url in list(set(all_urls)): try: # If the URL is valid as-is, just add it to the list. if is_valid(url): valid_urls.append(url) # The URL is not valid. If we were given a base URL, try joining them and checking if the result is valid. elif base_url: joined_url = urljoin(base_url, url) if is_valid(joined_url): valid_urls.append(joined_url) except: pass # For the edge cases of HTML files where we didn't find any URLs, treat it as an ASCII file # and re-find any URLs that way. if not valid_urls and 'html' in mimetype: try: for url in _ascii_find_urls(thing, mimetype): # If the URL is valid as-is, just add it to the list. if is_valid(url): valid_urls.append(url) # The URL is not valid. If we were given a base URL, try joining them and checking if the result is valid. elif base_url: joined_url = urljoin(base_url, url) if is_valid(joined_url): valid_urls.append(joined_url) except: pass # Return the valid URLs in ASCII form. ascii_urls = [] for url in valid_urls: try: if isinstance(url, str): ascii_urls.append(url) if isinstance(url, bytes): ascii_urls.append(url.decode('ascii', errors='ignore')) except: pass # Check if any of the URLs are Proofpoint URLs and try to decode them. for url in ascii_urls[:]: if 'urldefense.proofpoint.com/v2/url' in url: try: query_u=parse_qs(urlparse(url).query)['u'][0] decoded_url = query_u.replace('-3A', ':').replace('_', '/').replace('-2D', '-') if is_valid(decoded_url): ascii_urls.append(decoded_url) except: if log: logger.exception('Error decoding Proofpoint URL: {}'.format(url)) # Check if any of the URLs are Outlook safelinks and try to decode them. for url in ascii_urls[:]: if 'safelinks.protection.outlook.com' in url: try: query_url=parse_qs(urlparse(url).query)['url'][0] decoded_url = urllib.parse.unquote(query_url) if is_valid(decoded_url): ascii_urls.append(decoded_url) except: if log: logger.exception('Error decoding Outlook safelinks URL: {}'.format(url)) # Check if any of the URLs are Google redirection URLs and try to decode them. for url in ascii_urls[:]: if 'www.google.com/url?' in url: try: query_url=parse_qs(urlparse(url).query)['q'][0] decoded_url = urllib.parse.unquote(query_url) if is_valid(decoded_url): ascii_urls.append(decoded_url) except: if log: logger.exception('Error decoding Google redirection URL: {}'.format(url)) # Add an unquoted version of each URL to the list. for url in ascii_urls[:]: ascii_urls.append(urllib.parse.unquote(url)) # Add http:// to the beginning of each URL if it isn't there already. This lets us properly # catch URLs that may not have the scheme on the front of them. ascii_urls = ['http://' + u if not u.lower().startswith('http') and not u.lower().startswith('ftp') else u for u in ascii_urls] # Remove any trailing "/" from the URLs so that they are consistent with how they go in CRITS. ascii_urls = [u[:-1] if u.endswith('/') else u for u in ascii_urls] return sorted(list(set(ascii_urls)))
[ "def", "find_urls", "(", "thing", ",", "base_url", "=", "None", ",", "mimetype", "=", "None", ",", "log", "=", "False", ")", ":", "if", "log", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ",", "format", "=", "'%(asctime)s %(name)s.%(funcName)s +%(lineno)s: %(levelname)-8s %(message)s'", ")", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "# Convert \"thing\" to bytes if it is a string.", "try", ":", "if", "isinstance", "(", "thing", ",", "str", ")", ":", "thing", "=", "thing", ".", "encode", "(", "encoding", "=", "'ascii'", ",", "errors", "=", "'ignore'", ")", "except", ":", "if", "log", ":", "logger", ".", "exception", "(", "'Unable to convert thing to bytes.'", ")", "return", "[", "]", "# Store any URLs we find in the bytes.", "all_urls", "=", "[", "]", "# Continue if we have bytes.", "if", "isinstance", "(", "thing", ",", "bytes", ")", ":", "# Return an empty list if we failed to get the mimetype.", "try", ":", "if", "not", "mimetype", ":", "mimetype", "=", "magic", ".", "from_buffer", "(", "thing", ")", "except", ":", "if", "log", ":", "logger", ".", "exception", "(", "'Unable to get mimetype from the bytes buffer.'", ")", "return", "[", "]", "mimetype", "=", "mimetype", ".", "lower", "(", ")", "# If the bytes are HTML...", "if", "'html'", "in", "mimetype", ":", "try", ":", "all_urls", "+=", "_html_find_urls", "(", "thing", ",", "mimetype", ",", "base_url", ")", "except", ":", "if", "log", ":", "logger", ".", "exception", "(", "'Error when finding HTML URLs.'", ")", "# If the bytes are a PDF...", "elif", "'pdf'", "in", "mimetype", ":", "try", ":", "all_urls", "+=", "_pdf_find_urls", "(", "thing", ",", "mimetype", ")", "except", ":", "if", "log", ":", "logger", ".", "exception", "(", "'Error when finding PDF URLs.'", ")", "# If the bytes are an RFC 822 e-mail...", "elif", "'rfc 822'", "in", "mimetype", ":", "return", "[", "]", "# If the bytes are ASCII or Unicode text...", "elif", "'ascii'", "in", "mimetype", "or", "'text'", "in", "mimetype", ":", "try", ":", "all_urls", "+=", "_html_find_urls", "(", "thing", ",", "mimetype", ",", "base_url", ")", "except", ":", "if", "log", ":", "logger", ".", "exception", "(", "'Error when finding ASCII/HTML URLs.'", ")", "try", ":", "all_urls", "+=", "_ascii_find_urls", "(", "thing", ",", "mimetype", ")", "except", ":", "if", "log", ":", "logger", ".", "exception", "(", "'Error when finding ASCII URLs.'", ")", "try", ":", "all_urls", "+=", "_pdf_find_urls", "(", "thing", ",", "mimetype", ")", "except", ":", "if", "log", ":", "logger", ".", "exception", "(", "'Error when finding ASCII/PDF URLs.'", ")", "# If the bytes are anything else...", "else", ":", "# Try to treat the bytes as a PDF and find URLs.", "try", ":", "all_urls", "+=", "_pdf_find_urls", "(", "thing", ",", "mimetype", ")", "except", ":", "if", "log", ":", "logger", ".", "exception", "(", "'Error when finding unknown/PDF URLs.'", ")", "# If we don't know how to handle this mimetype, the bytes are likely just \"data\".", "# In that case, we don't want to find all possible ASCII URLs, as it will result", "# in a lot of bad URLs. Try to treat the bytes as ASCII and find URLs.", "try", ":", "all_urls", "+=", "_ascii_find_urls", "(", "thing", ",", "mimetype", ",", "extra_tokens", "=", "False", ")", "except", ":", "if", "log", ":", "logger", ".", "exception", "(", "'Error when finding unknown/ASCII URLs.'", ")", "# Make sure we only have valid URLs.", "valid_urls", "=", "[", "]", "for", "url", "in", "list", "(", "set", "(", "all_urls", ")", ")", ":", "try", ":", "# If the URL is valid as-is, just add it to the list.", "if", "is_valid", "(", "url", ")", ":", "valid_urls", ".", "append", "(", "url", ")", "# The URL is not valid. If we were given a base URL, try joining them and checking if the result is valid.", "elif", "base_url", ":", "joined_url", "=", "urljoin", "(", "base_url", ",", "url", ")", "if", "is_valid", "(", "joined_url", ")", ":", "valid_urls", ".", "append", "(", "joined_url", ")", "except", ":", "pass", "# For the edge cases of HTML files where we didn't find any URLs, treat it as an ASCII file", "# and re-find any URLs that way.", "if", "not", "valid_urls", "and", "'html'", "in", "mimetype", ":", "try", ":", "for", "url", "in", "_ascii_find_urls", "(", "thing", ",", "mimetype", ")", ":", "# If the URL is valid as-is, just add it to the list.", "if", "is_valid", "(", "url", ")", ":", "valid_urls", ".", "append", "(", "url", ")", "# The URL is not valid. If we were given a base URL, try joining them and checking if the result is valid.", "elif", "base_url", ":", "joined_url", "=", "urljoin", "(", "base_url", ",", "url", ")", "if", "is_valid", "(", "joined_url", ")", ":", "valid_urls", ".", "append", "(", "joined_url", ")", "except", ":", "pass", "# Return the valid URLs in ASCII form.", "ascii_urls", "=", "[", "]", "for", "url", "in", "valid_urls", ":", "try", ":", "if", "isinstance", "(", "url", ",", "str", ")", ":", "ascii_urls", ".", "append", "(", "url", ")", "if", "isinstance", "(", "url", ",", "bytes", ")", ":", "ascii_urls", ".", "append", "(", "url", ".", "decode", "(", "'ascii'", ",", "errors", "=", "'ignore'", ")", ")", "except", ":", "pass", "# Check if any of the URLs are Proofpoint URLs and try to decode them.", "for", "url", "in", "ascii_urls", "[", ":", "]", ":", "if", "'urldefense.proofpoint.com/v2/url'", "in", "url", ":", "try", ":", "query_u", "=", "parse_qs", "(", "urlparse", "(", "url", ")", ".", "query", ")", "[", "'u'", "]", "[", "0", "]", "decoded_url", "=", "query_u", ".", "replace", "(", "'-3A'", ",", "':'", ")", ".", "replace", "(", "'_'", ",", "'/'", ")", ".", "replace", "(", "'-2D'", ",", "'-'", ")", "if", "is_valid", "(", "decoded_url", ")", ":", "ascii_urls", ".", "append", "(", "decoded_url", ")", "except", ":", "if", "log", ":", "logger", ".", "exception", "(", "'Error decoding Proofpoint URL: {}'", ".", "format", "(", "url", ")", ")", "# Check if any of the URLs are Outlook safelinks and try to decode them.", "for", "url", "in", "ascii_urls", "[", ":", "]", ":", "if", "'safelinks.protection.outlook.com'", "in", "url", ":", "try", ":", "query_url", "=", "parse_qs", "(", "urlparse", "(", "url", ")", ".", "query", ")", "[", "'url'", "]", "[", "0", "]", "decoded_url", "=", "urllib", ".", "parse", ".", "unquote", "(", "query_url", ")", "if", "is_valid", "(", "decoded_url", ")", ":", "ascii_urls", ".", "append", "(", "decoded_url", ")", "except", ":", "if", "log", ":", "logger", ".", "exception", "(", "'Error decoding Outlook safelinks URL: {}'", ".", "format", "(", "url", ")", ")", "# Check if any of the URLs are Google redirection URLs and try to decode them.", "for", "url", "in", "ascii_urls", "[", ":", "]", ":", "if", "'www.google.com/url?'", "in", "url", ":", "try", ":", "query_url", "=", "parse_qs", "(", "urlparse", "(", "url", ")", ".", "query", ")", "[", "'q'", "]", "[", "0", "]", "decoded_url", "=", "urllib", ".", "parse", ".", "unquote", "(", "query_url", ")", "if", "is_valid", "(", "decoded_url", ")", ":", "ascii_urls", ".", "append", "(", "decoded_url", ")", "except", ":", "if", "log", ":", "logger", ".", "exception", "(", "'Error decoding Google redirection URL: {}'", ".", "format", "(", "url", ")", ")", "# Add an unquoted version of each URL to the list.", "for", "url", "in", "ascii_urls", "[", ":", "]", ":", "ascii_urls", ".", "append", "(", "urllib", ".", "parse", ".", "unquote", "(", "url", ")", ")", "# Add http:// to the beginning of each URL if it isn't there already. This lets us properly", "# catch URLs that may not have the scheme on the front of them.", "ascii_urls", "=", "[", "'http://'", "+", "u", "if", "not", "u", ".", "lower", "(", ")", ".", "startswith", "(", "'http'", ")", "and", "not", "u", ".", "lower", "(", ")", ".", "startswith", "(", "'ftp'", ")", "else", "u", "for", "u", "in", "ascii_urls", "]", "# Remove any trailing \"/\" from the URLs so that they are consistent with how they go in CRITS.", "ascii_urls", "=", "[", "u", "[", ":", "-", "1", "]", "if", "u", ".", "endswith", "(", "'/'", ")", "else", "u", "for", "u", "in", "ascii_urls", "]", "return", "sorted", "(", "list", "(", "set", "(", "ascii_urls", ")", ")", ")" ]
38.128342
24.144385
def change_keyboard_control(self, onerror = None, **keys): """Change the parameters provided as keyword arguments: key_click_percent The volume of key clicks between 0 (off) and 100 (load). -1 will restore default setting. bell_percent The base volume of the bell, coded as above. bell_pitch The pitch of the bell in Hz, -1 restores the default. bell_duration The duration of the bell in milliseconds, -1 restores the default. led led_mode led_mode should be X.LedModeOff or X.LedModeOn. If led is provided, it should be a 32-bit mask listing the LEDs that should change. If led is not provided, all LEDs are changed. key auto_repeat_mode auto_repeat_mode should be one of X.AutoRepeatModeOff, X.AutoRepeatModeOn, or X.AutoRepeatModeDefault. If key is provided, that key will be modified, otherwise the global state for the entire keyboard will be modified.""" request.ChangeKeyboardControl(display = self.display, onerror = onerror, attrs = keys)
[ "def", "change_keyboard_control", "(", "self", ",", "onerror", "=", "None", ",", "*", "*", "keys", ")", ":", "request", ".", "ChangeKeyboardControl", "(", "display", "=", "self", ".", "display", ",", "onerror", "=", "onerror", ",", "attrs", "=", "keys", ")" ]
42.551724
22.37931
def cli(env): """List object storage accounts.""" mgr = SoftLayer.ObjectStorageManager(env.client) accounts = mgr.list_accounts() table = formatting.Table(['id', 'name', 'apiType']) table.sortby = 'id' api_type = None for account in accounts: if 'vendorName' in account and account['vendorName'] == 'Swift': api_type = 'Swift' elif 'Cleversafe' in account['serviceResource']['name']: api_type = 'S3' table.add_row([ account['id'], account['username'], api_type, ]) env.fout(table)
[ "def", "cli", "(", "env", ")", ":", "mgr", "=", "SoftLayer", ".", "ObjectStorageManager", "(", "env", ".", "client", ")", "accounts", "=", "mgr", ".", "list_accounts", "(", ")", "table", "=", "formatting", ".", "Table", "(", "[", "'id'", ",", "'name'", ",", "'apiType'", "]", ")", "table", ".", "sortby", "=", "'id'", "api_type", "=", "None", "for", "account", "in", "accounts", ":", "if", "'vendorName'", "in", "account", "and", "account", "[", "'vendorName'", "]", "==", "'Swift'", ":", "api_type", "=", "'Swift'", "elif", "'Cleversafe'", "in", "account", "[", "'serviceResource'", "]", "[", "'name'", "]", ":", "api_type", "=", "'S3'", "table", ".", "add_row", "(", "[", "account", "[", "'id'", "]", ",", "account", "[", "'username'", "]", ",", "api_type", ",", "]", ")", "env", ".", "fout", "(", "table", ")" ]
27.952381
19.904762
def app_trim_memory(self, pid: int or str, level: str = 'RUNNING_LOW') -> None: '''Trim memory. Args: level: HIDDEN | RUNNING_MODERATE | BACKGROUNDRUNNING_LOW | \ MODERATE | RUNNING_CRITICAL | COMPLETE ''' _, error = self._execute('-s', self.device_sn, 'shell', 'am', 'send-trim-memory', str(pid), level) if error and error.startswith('Error'): raise ApplicationsException(error.split(':', 1)[-1].strip())
[ "def", "app_trim_memory", "(", "self", ",", "pid", ":", "int", "or", "str", ",", "level", ":", "str", "=", "'RUNNING_LOW'", ")", "->", "None", ":", "_", ",", "error", "=", "self", ".", "_execute", "(", "'-s'", ",", "self", ".", "device_sn", ",", "'shell'", ",", "'am'", ",", "'send-trim-memory'", ",", "str", "(", "pid", ")", ",", "level", ")", "if", "error", "and", "error", ".", "startswith", "(", "'Error'", ")", ":", "raise", "ApplicationsException", "(", "error", ".", "split", "(", "':'", ",", "1", ")", "[", "-", "1", "]", ".", "strip", "(", ")", ")" ]
46.727273
27.272727
def pad(self, minibatch): """Pad a batch of examples using this field. Pads to self.fix_length if provided, otherwise pads to the length of the longest example in the batch. Prepends self.init_token and appends self.eos_token if those attributes are not None. Returns a tuple of the padded list and a list containing lengths of each example if `self.include_lengths` is `True` and `self.sequential` is `True`, else just returns the padded list. If `self.sequential` is `False`, no padding is applied. """ minibatch = list(minibatch) if not self.sequential: return minibatch if self.fix_length is None: max_len = max(len(x) for x in minibatch) else: max_len = self.fix_length + ( self.init_token, self.eos_token).count(None) - 2 padded, lengths = [], [] for x in minibatch: if self.pad_first: padded.append( [self.pad_token] * max(0, max_len - len(x)) + ([] if self.init_token is None else [self.init_token]) + list(x[-max_len:] if self.truncate_first else x[:max_len]) + ([] if self.eos_token is None else [self.eos_token])) else: padded.append( ([] if self.init_token is None else [self.init_token]) + list(x[-max_len:] if self.truncate_first else x[:max_len]) + ([] if self.eos_token is None else [self.eos_token]) + [self.pad_token] * max(0, max_len - len(x))) lengths.append(len(padded[-1]) - max(0, max_len - len(x))) if self.include_lengths: return (padded, lengths) return padded
[ "def", "pad", "(", "self", ",", "minibatch", ")", ":", "minibatch", "=", "list", "(", "minibatch", ")", "if", "not", "self", ".", "sequential", ":", "return", "minibatch", "if", "self", ".", "fix_length", "is", "None", ":", "max_len", "=", "max", "(", "len", "(", "x", ")", "for", "x", "in", "minibatch", ")", "else", ":", "max_len", "=", "self", ".", "fix_length", "+", "(", "self", ".", "init_token", ",", "self", ".", "eos_token", ")", ".", "count", "(", "None", ")", "-", "2", "padded", ",", "lengths", "=", "[", "]", ",", "[", "]", "for", "x", "in", "minibatch", ":", "if", "self", ".", "pad_first", ":", "padded", ".", "append", "(", "[", "self", ".", "pad_token", "]", "*", "max", "(", "0", ",", "max_len", "-", "len", "(", "x", ")", ")", "+", "(", "[", "]", "if", "self", ".", "init_token", "is", "None", "else", "[", "self", ".", "init_token", "]", ")", "+", "list", "(", "x", "[", "-", "max_len", ":", "]", "if", "self", ".", "truncate_first", "else", "x", "[", ":", "max_len", "]", ")", "+", "(", "[", "]", "if", "self", ".", "eos_token", "is", "None", "else", "[", "self", ".", "eos_token", "]", ")", ")", "else", ":", "padded", ".", "append", "(", "(", "[", "]", "if", "self", ".", "init_token", "is", "None", "else", "[", "self", ".", "init_token", "]", ")", "+", "list", "(", "x", "[", "-", "max_len", ":", "]", "if", "self", ".", "truncate_first", "else", "x", "[", ":", "max_len", "]", ")", "+", "(", "[", "]", "if", "self", ".", "eos_token", "is", "None", "else", "[", "self", ".", "eos_token", "]", ")", "+", "[", "self", ".", "pad_token", "]", "*", "max", "(", "0", ",", "max_len", "-", "len", "(", "x", ")", ")", ")", "lengths", ".", "append", "(", "len", "(", "padded", "[", "-", "1", "]", ")", "-", "max", "(", "0", ",", "max_len", "-", "len", "(", "x", ")", ")", ")", "if", "self", ".", "include_lengths", ":", "return", "(", "padded", ",", "lengths", ")", "return", "padded" ]
49.222222
21.805556
def update(self, id_equipment, id_environment, is_router): """Remove Related Equipment with Environment from by the identifier. :param id_equipment: Identifier of the Equipment. Integer value and greater than zero. :param id_environment: Identifier of the Environment. Integer value and greater than zero. :param is_router: Identifier of the Environment. Boolean value. :return: None :raise InvalidParameterError: The identifier of Environment, Equipament is null and invalid. :raise EquipamentoNotFoundError: Equipment not registered. :raise EquipamentoAmbienteNaoExisteError: Environment not registered. :raise VipIpError: IP-related equipment is being used for a request VIP. :raise XMLError: Networkapi failed to generate the XML response. :raise DataBaseError: Networkapi failed to access the database. """ if not is_valid_int_param(id_equipment): raise InvalidParameterError( u'The identifier of Equipment is invalid or was not informed.') if not is_valid_int_param(id_environment): raise InvalidParameterError( u'The identifier of Environment is invalid or was not informed.') equipment_environment_map = dict() equipment_environment_map['id_equipamento'] = id_equipment equipment_environment_map['id_ambiente'] = id_environment equipment_environment_map['is_router'] = is_router code, xml = self.submit( {'equipamento_ambiente': equipment_environment_map}, 'PUT', 'equipamentoambiente/update/') return self.response(code, xml)
[ "def", "update", "(", "self", ",", "id_equipment", ",", "id_environment", ",", "is_router", ")", ":", "if", "not", "is_valid_int_param", "(", "id_equipment", ")", ":", "raise", "InvalidParameterError", "(", "u'The identifier of Equipment is invalid or was not informed.'", ")", "if", "not", "is_valid_int_param", "(", "id_environment", ")", ":", "raise", "InvalidParameterError", "(", "u'The identifier of Environment is invalid or was not informed.'", ")", "equipment_environment_map", "=", "dict", "(", ")", "equipment_environment_map", "[", "'id_equipamento'", "]", "=", "id_equipment", "equipment_environment_map", "[", "'id_ambiente'", "]", "=", "id_environment", "equipment_environment_map", "[", "'is_router'", "]", "=", "is_router", "code", ",", "xml", "=", "self", ".", "submit", "(", "{", "'equipamento_ambiente'", ":", "equipment_environment_map", "}", ",", "'PUT'", ",", "'equipamentoambiente/update/'", ")", "return", "self", ".", "response", "(", "code", ",", "xml", ")" ]
48.176471
28.441176
def __authorize(self, client_id, client_secret, credit_card_id, **kwargs): """Call documentation: `/credit_card/authorize <https://www.wepay.com/developer/reference/credit_card#authorize>`_, plus extra keyword parameter: :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` """ params = { 'client_id': client_id, 'client_secret': client_secret, 'credit_card_id': credit_card_id } return self.make_call(self.__authorize, params, kwargs)
[ "def", "__authorize", "(", "self", ",", "client_id", ",", "client_secret", ",", "credit_card_id", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'client_id'", ":", "client_id", ",", "'client_secret'", ":", "client_secret", ",", "'credit_card_id'", ":", "credit_card_id", "}", "return", "self", ".", "make_call", "(", "self", ".", "__authorize", ",", "params", ",", "kwargs", ")" ]
37.142857
19.190476
def get_single(self, key, lang=None): """ Returns a single triple related to this node. :param key: Predicate of the triple :param lang: Language of the triple if applicable :rtype: Literal or BNode or URIRef """ if not isinstance(key, URIRef): key = URIRef(key) if lang is not None: default = None for o in self.graph.objects(self.asNode(), key): default = o if o.language == lang: return o return default else: for o in self.graph.objects(self.asNode(), key): return o
[ "def", "get_single", "(", "self", ",", "key", ",", "lang", "=", "None", ")", ":", "if", "not", "isinstance", "(", "key", ",", "URIRef", ")", ":", "key", "=", "URIRef", "(", "key", ")", "if", "lang", "is", "not", "None", ":", "default", "=", "None", "for", "o", "in", "self", ".", "graph", ".", "objects", "(", "self", ".", "asNode", "(", ")", ",", "key", ")", ":", "default", "=", "o", "if", "o", ".", "language", "==", "lang", ":", "return", "o", "return", "default", "else", ":", "for", "o", "in", "self", ".", "graph", ".", "objects", "(", "self", ".", "asNode", "(", ")", ",", "key", ")", ":", "return", "o" ]
32.25
13.35
def merge(self, samples_uuid): """ The method to merge the datamodels belonging to different references :param samples_uuid: The unique identifier metadata column name to identify the identical samples having different references :return: Returns the merged dataframe """ all_meta_data = pd.DataFrame() for dm in self.data_model: all_meta_data = pd.concat([all_meta_data, dm.meta], axis=0) group = all_meta_data.groupby([samples_uuid])['sample'] sample_sets = group.apply(list).values merged_df = pd.DataFrame() multi_index = list(map(list, zip(*sample_sets))) multi_index_names = list(range(0, len(sample_sets[0]))) i = 1 for pair in sample_sets: i += 1 numbers = list(range(0, len(pair))) df_temp = pd.DataFrame() for n in numbers: try: # data.loc[pair[n]] may not be found due to the fast loading (full_load = False) df_temp = pd.concat([df_temp, self.data_model[n].data.loc[pair[n]]], axis=1) except: pass merged_df = pd.concat([merged_df, df_temp.T.bfill().iloc[[0]]], axis=0) multi_index = np.asarray(multi_index) multi_index = pd.MultiIndex.from_arrays(multi_index, names=multi_index_names) merged_df.index = multi_index return merged_df
[ "def", "merge", "(", "self", ",", "samples_uuid", ")", ":", "all_meta_data", "=", "pd", ".", "DataFrame", "(", ")", "for", "dm", "in", "self", ".", "data_model", ":", "all_meta_data", "=", "pd", ".", "concat", "(", "[", "all_meta_data", ",", "dm", ".", "meta", "]", ",", "axis", "=", "0", ")", "group", "=", "all_meta_data", ".", "groupby", "(", "[", "samples_uuid", "]", ")", "[", "'sample'", "]", "sample_sets", "=", "group", ".", "apply", "(", "list", ")", ".", "values", "merged_df", "=", "pd", ".", "DataFrame", "(", ")", "multi_index", "=", "list", "(", "map", "(", "list", ",", "zip", "(", "*", "sample_sets", ")", ")", ")", "multi_index_names", "=", "list", "(", "range", "(", "0", ",", "len", "(", "sample_sets", "[", "0", "]", ")", ")", ")", "i", "=", "1", "for", "pair", "in", "sample_sets", ":", "i", "+=", "1", "numbers", "=", "list", "(", "range", "(", "0", ",", "len", "(", "pair", ")", ")", ")", "df_temp", "=", "pd", ".", "DataFrame", "(", ")", "for", "n", "in", "numbers", ":", "try", ":", "# data.loc[pair[n]] may not be found due to the fast loading (full_load = False)", "df_temp", "=", "pd", ".", "concat", "(", "[", "df_temp", ",", "self", ".", "data_model", "[", "n", "]", ".", "data", ".", "loc", "[", "pair", "[", "n", "]", "]", "]", ",", "axis", "=", "1", ")", "except", ":", "pass", "merged_df", "=", "pd", ".", "concat", "(", "[", "merged_df", ",", "df_temp", ".", "T", ".", "bfill", "(", ")", ".", "iloc", "[", "[", "0", "]", "]", "]", ",", "axis", "=", "0", ")", "multi_index", "=", "np", ".", "asarray", "(", "multi_index", ")", "multi_index", "=", "pd", ".", "MultiIndex", ".", "from_arrays", "(", "multi_index", ",", "names", "=", "multi_index_names", ")", "merged_df", ".", "index", "=", "multi_index", "return", "merged_df" ]
40.142857
23.971429
def setup(self, filters=()): """Sets up a cache of python interpreters. :param filters: A sequence of strings that constrain the interpreter compatibility for this cache, using the Requirement-style format, e.g. ``'CPython>=3', or just ['>=2.7','<3']`` for requirements agnostic to interpreter class. :returns: A list of cached interpreters :rtype: list of :class:`pex.interpreter.PythonInterpreter` """ # We filter the interpreter cache itself (and not just the interpreters we pull from it) # because setting up some python versions (e.g., 3<=python<3.3) crashes, and this gives us # an escape hatch. filters = filters if any(filters) else self.python_setup.interpreter_constraints setup_paths = self.python_setup.interpreter_search_paths logger.debug( 'Initializing Python interpreter cache matching filters `{}` from paths `{}`'.format( ':'.join(filters), ':'.join(setup_paths))) interpreters = [] def unsatisfied_filters(): return [f for f in filters if len(list(self._matching(interpreters, [f]))) == 0] with OwnerPrintingInterProcessFileLock(path=os.path.join(self._cache_dir, '.file_lock')): interpreters.extend(self._setup_cached(filters=filters)) if not interpreters or unsatisfied_filters(): interpreters.extend(self._setup_paths(setup_paths, filters=filters)) for filt in unsatisfied_filters(): logger.debug('No valid interpreters found for {}!'.format(filt)) matches = list(self._matching(interpreters, filters=filters)) if len(matches) == 0: logger.debug('Found no valid interpreters!') logger.debug( 'Initialized Python interpreter cache with {}'.format(', '.join([x.binary for x in matches]))) return matches
[ "def", "setup", "(", "self", ",", "filters", "=", "(", ")", ")", ":", "# We filter the interpreter cache itself (and not just the interpreters we pull from it)", "# because setting up some python versions (e.g., 3<=python<3.3) crashes, and this gives us", "# an escape hatch.", "filters", "=", "filters", "if", "any", "(", "filters", ")", "else", "self", ".", "python_setup", ".", "interpreter_constraints", "setup_paths", "=", "self", ".", "python_setup", ".", "interpreter_search_paths", "logger", ".", "debug", "(", "'Initializing Python interpreter cache matching filters `{}` from paths `{}`'", ".", "format", "(", "':'", ".", "join", "(", "filters", ")", ",", "':'", ".", "join", "(", "setup_paths", ")", ")", ")", "interpreters", "=", "[", "]", "def", "unsatisfied_filters", "(", ")", ":", "return", "[", "f", "for", "f", "in", "filters", "if", "len", "(", "list", "(", "self", ".", "_matching", "(", "interpreters", ",", "[", "f", "]", ")", ")", ")", "==", "0", "]", "with", "OwnerPrintingInterProcessFileLock", "(", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_cache_dir", ",", "'.file_lock'", ")", ")", ":", "interpreters", ".", "extend", "(", "self", ".", "_setup_cached", "(", "filters", "=", "filters", ")", ")", "if", "not", "interpreters", "or", "unsatisfied_filters", "(", ")", ":", "interpreters", ".", "extend", "(", "self", ".", "_setup_paths", "(", "setup_paths", ",", "filters", "=", "filters", ")", ")", "for", "filt", "in", "unsatisfied_filters", "(", ")", ":", "logger", ".", "debug", "(", "'No valid interpreters found for {}!'", ".", "format", "(", "filt", ")", ")", "matches", "=", "list", "(", "self", ".", "_matching", "(", "interpreters", ",", "filters", "=", "filters", ")", ")", "if", "len", "(", "matches", ")", "==", "0", ":", "logger", ".", "debug", "(", "'Found no valid interpreters!'", ")", "logger", ".", "debug", "(", "'Initialized Python interpreter cache with {}'", ".", "format", "(", "', '", ".", "join", "(", "[", "x", ".", "binary", "for", "x", "in", "matches", "]", ")", ")", ")", "return", "matches" ]
47.027027
28.513514
def J(self, log_sigma): """Return the sensitivity matrix Parameters ---------- log_sigma : numpy.ndarray log_e conductivities """ m = 1.0 / np.exp(log_sigma) tdm = self._get_tdm(m) tdm.model( sensitivities=True, # output_directory=stage_dir + 'modeling', ) measurements = tdm.measurements() # build up the sensitivity matrix sens_list = [] for config_nr, cids in sorted( tdm.assignments['sensitivities'].items()): sens_list.append(tdm.parman.parsets[cids[0]]) sensitivities_lin = np.array(sens_list) # now convert to the log-sensitivities relevant for CRTomo and the # resolution matrix sensitivities_log = sensitivities_lin # multiply measurements on first dimension measurements_rep = np.repeat( measurements[:, 0, np.newaxis], sensitivities_lin.shape[1], axis=1) # sensitivities_log = sensitivities_log * mfit # multiply resistivities on second dimension m_rep = np.repeat( m[np.newaxis, :], sensitivities_lin.shape[0], axis=0 ) # eq. 3.41 in Kemna, 2000: notice that m_rep here is in rho, not sigma factor = - 1 / (m_rep * measurements_rep) sensitivities_log = factor * sensitivities_lin # import IPython # IPython.embed() return sensitivities_log
[ "def", "J", "(", "self", ",", "log_sigma", ")", ":", "m", "=", "1.0", "/", "np", ".", "exp", "(", "log_sigma", ")", "tdm", "=", "self", ".", "_get_tdm", "(", "m", ")", "tdm", ".", "model", "(", "sensitivities", "=", "True", ",", "# output_directory=stage_dir + 'modeling',", ")", "measurements", "=", "tdm", ".", "measurements", "(", ")", "# build up the sensitivity matrix", "sens_list", "=", "[", "]", "for", "config_nr", ",", "cids", "in", "sorted", "(", "tdm", ".", "assignments", "[", "'sensitivities'", "]", ".", "items", "(", ")", ")", ":", "sens_list", ".", "append", "(", "tdm", ".", "parman", ".", "parsets", "[", "cids", "[", "0", "]", "]", ")", "sensitivities_lin", "=", "np", ".", "array", "(", "sens_list", ")", "# now convert to the log-sensitivities relevant for CRTomo and the", "# resolution matrix", "sensitivities_log", "=", "sensitivities_lin", "# multiply measurements on first dimension", "measurements_rep", "=", "np", ".", "repeat", "(", "measurements", "[", ":", ",", "0", ",", "np", ".", "newaxis", "]", ",", "sensitivities_lin", ".", "shape", "[", "1", "]", ",", "axis", "=", "1", ")", "# sensitivities_log = sensitivities_log * mfit", "# multiply resistivities on second dimension", "m_rep", "=", "np", ".", "repeat", "(", "m", "[", "np", ".", "newaxis", ",", ":", "]", ",", "sensitivities_lin", ".", "shape", "[", "0", "]", ",", "axis", "=", "0", ")", "# eq. 3.41 in Kemna, 2000: notice that m_rep here is in rho, not sigma", "factor", "=", "-", "1", "/", "(", "m_rep", "*", "measurements_rep", ")", "sensitivities_log", "=", "factor", "*", "sensitivities_lin", "# import IPython", "# IPython.embed()", "return", "sensitivities_log" ]
29.734694
18.693878
def seek_write(path, data, offset): ''' .. versionadded:: 2014.1.0 Seek to a position on a file and write to it path path to file data data to write to file offset position in file to start writing CLI Example: .. code-block:: bash salt '*' file.seek_write /path/to/file 'some data' 4096 ''' path = os.path.expanduser(path) seek_fh = os.open(path, os.O_WRONLY) try: os.lseek(seek_fh, int(offset), 0) ret = os.write(seek_fh, data) os.fsync(seek_fh) finally: os.close(seek_fh) return ret
[ "def", "seek_write", "(", "path", ",", "data", ",", "offset", ")", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "seek_fh", "=", "os", ".", "open", "(", "path", ",", "os", ".", "O_WRONLY", ")", "try", ":", "os", ".", "lseek", "(", "seek_fh", ",", "int", "(", "offset", ")", ",", "0", ")", "ret", "=", "os", ".", "write", "(", "seek_fh", ",", "data", ")", "os", ".", "fsync", "(", "seek_fh", ")", "finally", ":", "os", ".", "close", "(", "seek_fh", ")", "return", "ret" ]
19.433333
22.766667
def component_mget(self, zip_data, components): """Call the zip component_mget endpoint Args: - zip_data - As described in the class docstring. - components - A list of strings for each component to include in the request. Example: ["zip/details", "zip/volatility"] """ if not isinstance(components, list): print("Components param must be a list") return query_params = {"components": ",".join(components)} return self.fetch_identifier_component( "zip/component_mget", zip_data, query_params)
[ "def", "component_mget", "(", "self", ",", "zip_data", ",", "components", ")", ":", "if", "not", "isinstance", "(", "components", ",", "list", ")", ":", "print", "(", "\"Components param must be a list\"", ")", "return", "query_params", "=", "{", "\"components\"", ":", "\",\"", ".", "join", "(", "components", ")", "}", "return", "self", ".", "fetch_identifier_component", "(", "\"zip/component_mget\"", ",", "zip_data", ",", "query_params", ")" ]
37.75
20.25
def MoveToAttributeNo(self, no): """Moves the position of the current instance to the attribute with the specified index relative to the containing element. """ ret = libxml2mod.xmlTextReaderMoveToAttributeNo(self._o, no) return ret
[ "def", "MoveToAttributeNo", "(", "self", ",", "no", ")", ":", "ret", "=", "libxml2mod", ".", "xmlTextReaderMoveToAttributeNo", "(", "self", ".", "_o", ",", "no", ")", "return", "ret" ]
52.4
11.6
def create(self, title, body, labels): """Create an issue in Github. For JSON data returned by Github refer: https://developer.github.com/v3/issues/#create-an-issue :param title: title of the issue :param body: body of the issue :param labels: list of labels for the issue :returns: dict of JSON data returned by Github of newly created issue :rtype: `dict` """ url = "https://api.github.com/repos/{}/{}/issues".format( self.user, self.repo) data = { 'title': title, 'body': body, } if labels: data.update({'labels': labels}) response = self.session.post(url, json.dumps(data)) assert response.status_code == 201 return json.loads(response.content)
[ "def", "create", "(", "self", ",", "title", ",", "body", ",", "labels", ")", ":", "url", "=", "\"https://api.github.com/repos/{}/{}/issues\"", ".", "format", "(", "self", ".", "user", ",", "self", ".", "repo", ")", "data", "=", "{", "'title'", ":", "title", ",", "'body'", ":", "body", ",", "}", "if", "labels", ":", "data", ".", "update", "(", "{", "'labels'", ":", "labels", "}", ")", "response", "=", "self", ".", "session", ".", "post", "(", "url", ",", "json", ".", "dumps", "(", "data", ")", ")", "assert", "response", ".", "status_code", "==", "201", "return", "json", ".", "loads", "(", "response", ".", "content", ")" ]
29.777778
18.666667
def _format_numer(number_format, prefix='', suffix=''): """Format a number to a string.""" @_surpress_formatting_errors def inner(v): if isinstance(v, Number): return ("{{}}{{:{}}}{{}}" .format(number_format) .format(prefix, v, suffix)) else: raise TypeError("Numberic type required.") return inner
[ "def", "_format_numer", "(", "number_format", ",", "prefix", "=", "''", ",", "suffix", "=", "''", ")", ":", "@", "_surpress_formatting_errors", "def", "inner", "(", "v", ")", ":", "if", "isinstance", "(", "v", ",", "Number", ")", ":", "return", "(", "\"{{}}{{:{}}}{{}}\"", ".", "format", "(", "number_format", ")", ".", "format", "(", "prefix", ",", "v", ",", "suffix", ")", ")", "else", ":", "raise", "TypeError", "(", "\"Numberic type required.\"", ")", "return", "inner" ]
34.909091
11.818182
def get_names_in_namespace_page(namespace_id, offset, count, proxy=None, hostport=None): """ Get a page of names in a namespace Returns the list of names on success Returns {'error': ...} on error """ assert proxy or hostport, 'Need proxy or hostport' if proxy is None: proxy = connect_hostport(hostport) assert count <= 100, 'Page too big: {}'.format(count) names_schema = { 'type': 'object', 'properties': { 'names': { 'type': 'array', 'items': { 'type': 'string', 'uniqueItems': True }, }, }, 'required': [ 'names', ], } schema = json_response_schema( names_schema ) resp = {} try: resp = proxy.get_names_in_namespace(namespace_id, offset, count) resp = json_validate(schema, resp) if json_is_error(resp): return resp # must be valid names valid_names = [] for n in resp['names']: if not is_name_valid(str(n)): log.error('Invalid name "{}"'.format(str(n))) else: valid_names.append(n) return valid_names except ValidationError as e: if BLOCKSTACK_DEBUG: log.exception(e) resp = {'error': 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.', 'http_status': 502} return resp except socket.timeout: log.error("Connection timed out") resp = {'error': 'Connection to remote host timed out.', 'http_status': 503} return resp except socket.error as se: log.error("Connection error {}".format(se.errno)) resp = {'error': 'Connection to remote host failed.', 'http_status': 502} return resp except Exception as ee: if BLOCKSTACK_DEBUG: log.exception(ee) log.error("Caught exception while connecting to Blockstack node: {}".format(ee)) resp = {'error': 'Failed to contact Blockstack node. Try again with `--debug`.', 'http_status': 500} return resp
[ "def", "get_names_in_namespace_page", "(", "namespace_id", ",", "offset", ",", "count", ",", "proxy", "=", "None", ",", "hostport", "=", "None", ")", ":", "assert", "proxy", "or", "hostport", ",", "'Need proxy or hostport'", "if", "proxy", "is", "None", ":", "proxy", "=", "connect_hostport", "(", "hostport", ")", "assert", "count", "<=", "100", ",", "'Page too big: {}'", ".", "format", "(", "count", ")", "names_schema", "=", "{", "'type'", ":", "'object'", ",", "'properties'", ":", "{", "'names'", ":", "{", "'type'", ":", "'array'", ",", "'items'", ":", "{", "'type'", ":", "'string'", ",", "'uniqueItems'", ":", "True", "}", ",", "}", ",", "}", ",", "'required'", ":", "[", "'names'", ",", "]", ",", "}", "schema", "=", "json_response_schema", "(", "names_schema", ")", "resp", "=", "{", "}", "try", ":", "resp", "=", "proxy", ".", "get_names_in_namespace", "(", "namespace_id", ",", "offset", ",", "count", ")", "resp", "=", "json_validate", "(", "schema", ",", "resp", ")", "if", "json_is_error", "(", "resp", ")", ":", "return", "resp", "# must be valid names", "valid_names", "=", "[", "]", "for", "n", "in", "resp", "[", "'names'", "]", ":", "if", "not", "is_name_valid", "(", "str", "(", "n", ")", ")", ":", "log", ".", "error", "(", "'Invalid name \"{}\"'", ".", "format", "(", "str", "(", "n", ")", ")", ")", "else", ":", "valid_names", ".", "append", "(", "n", ")", "return", "valid_names", "except", "ValidationError", "as", "e", ":", "if", "BLOCKSTACK_DEBUG", ":", "log", ".", "exception", "(", "e", ")", "resp", "=", "{", "'error'", ":", "'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.'", ",", "'http_status'", ":", "502", "}", "return", "resp", "except", "socket", ".", "timeout", ":", "log", ".", "error", "(", "\"Connection timed out\"", ")", "resp", "=", "{", "'error'", ":", "'Connection to remote host timed out.'", ",", "'http_status'", ":", "503", "}", "return", "resp", "except", "socket", ".", "error", "as", "se", ":", "log", ".", "error", "(", "\"Connection error {}\"", ".", "format", "(", "se", ".", "errno", ")", ")", "resp", "=", "{", "'error'", ":", "'Connection to remote host failed.'", ",", "'http_status'", ":", "502", "}", "return", "resp", "except", "Exception", "as", "ee", ":", "if", "BLOCKSTACK_DEBUG", ":", "log", ".", "exception", "(", "ee", ")", "log", ".", "error", "(", "\"Caught exception while connecting to Blockstack node: {}\"", ".", "format", "(", "ee", ")", ")", "resp", "=", "{", "'error'", ":", "'Failed to contact Blockstack node. Try again with `--debug`.'", ",", "'http_status'", ":", "500", "}", "return", "resp" ]
30.571429
22.4
def rewind(self): """ Put us back at the beginning of the file again. """ # Superclass rewind super(FileRecordStream, self).rewind() self.close() self._file = open(self._filename, self._mode) self._reader = csv.reader(self._file, dialect="excel") # Skip header rows self._reader.next() self._reader.next() self._reader.next() # Reset record count, etc. self._recordCount = 0
[ "def", "rewind", "(", "self", ")", ":", "# Superclass rewind", "super", "(", "FileRecordStream", ",", "self", ")", ".", "rewind", "(", ")", "self", ".", "close", "(", ")", "self", ".", "_file", "=", "open", "(", "self", ".", "_filename", ",", "self", ".", "_mode", ")", "self", ".", "_reader", "=", "csv", ".", "reader", "(", "self", ".", "_file", ",", "dialect", "=", "\"excel\"", ")", "# Skip header rows", "self", ".", "_reader", ".", "next", "(", ")", "self", ".", "_reader", ".", "next", "(", ")", "self", ".", "_reader", ".", "next", "(", ")", "# Reset record count, etc.", "self", ".", "_recordCount", "=", "0" ]
21.894737
18.842105
def spawn_daemon(fork=None, pgrpfile=None, outfile='out.txt'): 'causes run to be executed in a newly spawned daemon process' global LAST_PGRP_PATH fork = fork or os.fork open(outfile, 'a').close() # TODO: configurable output file if pgrpfile and os.path.exists(pgrpfile): try: cur_pid = int(open(pgrpfile).read().rstrip("\n")) os.killpg(cur_pid, 0) raise Exception("arbiter still running with pid:" + str(cur_pid)) except (OSError, ValueError): pass if fork(): # return True means we are in parent return True else: os.setsid() # break association with terminal via new session id if fork(): # fork one more layer to ensure child will not re-acquire terminal os._exit(0) LAST_PGRP_PATH = pgrpfile if pgrpfile: with open(pgrpfile, 'w') as f: f.write(str(os.getpgrp()) + "\n") logging.root.addHandler(SysLogHandler()) rotating_out = RotatingStdoutFile(outfile) rotating_out.start() return False
[ "def", "spawn_daemon", "(", "fork", "=", "None", ",", "pgrpfile", "=", "None", ",", "outfile", "=", "'out.txt'", ")", ":", "global", "LAST_PGRP_PATH", "fork", "=", "fork", "or", "os", ".", "fork", "open", "(", "outfile", ",", "'a'", ")", ".", "close", "(", ")", "# TODO: configurable output file", "if", "pgrpfile", "and", "os", ".", "path", ".", "exists", "(", "pgrpfile", ")", ":", "try", ":", "cur_pid", "=", "int", "(", "open", "(", "pgrpfile", ")", ".", "read", "(", ")", ".", "rstrip", "(", "\"\\n\"", ")", ")", "os", ".", "killpg", "(", "cur_pid", ",", "0", ")", "raise", "Exception", "(", "\"arbiter still running with pid:\"", "+", "str", "(", "cur_pid", ")", ")", "except", "(", "OSError", ",", "ValueError", ")", ":", "pass", "if", "fork", "(", ")", ":", "# return True means we are in parent", "return", "True", "else", ":", "os", ".", "setsid", "(", ")", "# break association with terminal via new session id", "if", "fork", "(", ")", ":", "# fork one more layer to ensure child will not re-acquire terminal", "os", ".", "_exit", "(", "0", ")", "LAST_PGRP_PATH", "=", "pgrpfile", "if", "pgrpfile", ":", "with", "open", "(", "pgrpfile", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "str", "(", "os", ".", "getpgrp", "(", ")", ")", "+", "\"\\n\"", ")", "logging", ".", "root", ".", "addHandler", "(", "SysLogHandler", "(", ")", ")", "rotating_out", "=", "RotatingStdoutFile", "(", "outfile", ")", "rotating_out", ".", "start", "(", ")", "return", "False" ]
41.346154
18.192308
def global_workflow_add_authorized_users(name_or_id, alias=None, input_params={}, always_retry=True, **kwargs): """ Invokes the /globalworkflow-xxxx/addAuthorizedUsers API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Global-Workflows#API-method:-/globalworkflow-xxxx%5B/yyyy%5D/addAuthorizedUsers """ fully_qualified_version = name_or_id + (('/' + alias) if alias else '') return DXHTTPRequest('/%s/addAuthorizedUsers' % fully_qualified_version, input_params, always_retry=always_retry, **kwargs)
[ "def", "global_workflow_add_authorized_users", "(", "name_or_id", ",", "alias", "=", "None", ",", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "fully_qualified_version", "=", "name_or_id", "+", "(", "(", "'/'", "+", "alias", ")", "if", "alias", "else", "''", ")", "return", "DXHTTPRequest", "(", "'/%s/addAuthorizedUsers'", "%", "fully_qualified_version", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", "*", "*", "kwargs", ")" ]
68.5
46.75
def _getusers(self, ids=None, names=None, match=None): """ Return a list of users that match criteria. :kwarg ids: list of user ids to return data on :kwarg names: list of user names to return data on :kwarg match: list of patterns. Returns users whose real name or login name match the pattern. :raises XMLRPC Fault: Code 51: if a Bad Login Name was sent to the names array. Code 304: if the user was not authorized to see user they requested. Code 505: user is logged out and can't use the match or ids parameter. Available in Bugzilla-3.4+ """ params = {} if ids: params['ids'] = self._listify(ids) if names: params['names'] = self._listify(names) if match: params['match'] = self._listify(match) if not params: raise BugzillaError('_get() needs one of ids, ' ' names, or match kwarg.') return self._proxy.User.get(params)
[ "def", "_getusers", "(", "self", ",", "ids", "=", "None", ",", "names", "=", "None", ",", "match", "=", "None", ")", ":", "params", "=", "{", "}", "if", "ids", ":", "params", "[", "'ids'", "]", "=", "self", ".", "_listify", "(", "ids", ")", "if", "names", ":", "params", "[", "'names'", "]", "=", "self", ".", "_listify", "(", "names", ")", "if", "match", ":", "params", "[", "'match'", "]", "=", "self", ".", "_listify", "(", "match", ")", "if", "not", "params", ":", "raise", "BugzillaError", "(", "'_get() needs one of ids, '", "' names, or match kwarg.'", ")", "return", "self", ".", "_proxy", ".", "User", ".", "get", "(", "params", ")" ]
37.137931
18.172414
def get_centers(self, estimation): """Get estimation on centers Parameters ---------- estimation : 1D arrary Either prior of posterior estimation Returns ------- centers : 2D array, in shape [K, n_dim] Estimation on centers """ centers = estimation[0:self.map_offset[1]]\ .reshape(self.K, self.n_dim) return centers
[ "def", "get_centers", "(", "self", ",", "estimation", ")", ":", "centers", "=", "estimation", "[", "0", ":", "self", ".", "map_offset", "[", "1", "]", "]", ".", "reshape", "(", "self", ".", "K", ",", "self", ".", "n_dim", ")", "return", "centers" ]
23.222222
17.833333
def _dtype(cls, tensor: tf.Tensor) -> tf.Tensor: '''Converts `tensor` to tf.float32 datatype if needed.''' if tensor.dtype != tf.float32: tensor = tf.cast(tensor, tf.float32) return tensor
[ "def", "_dtype", "(", "cls", ",", "tensor", ":", "tf", ".", "Tensor", ")", "->", "tf", ".", "Tensor", ":", "if", "tensor", ".", "dtype", "!=", "tf", ".", "float32", ":", "tensor", "=", "tf", ".", "cast", "(", "tensor", ",", "tf", ".", "float32", ")", "return", "tensor" ]
44
12.4
def H7(self): "Sum variance (error in Haralick's original paper here)." h6 = np.tile(self.H6(), (self.rlevels2.shape[1], 1)).transpose() return (((self.rlevels2 + 2) - h6) ** 2 * self.p_xplusy).sum(1)
[ "def", "H7", "(", "self", ")", ":", "h6", "=", "np", ".", "tile", "(", "self", ".", "H6", "(", ")", ",", "(", "self", ".", "rlevels2", ".", "shape", "[", "1", "]", ",", "1", ")", ")", ".", "transpose", "(", ")", "return", "(", "(", "(", "self", ".", "rlevels2", "+", "2", ")", "-", "h6", ")", "**", "2", "*", "self", ".", "p_xplusy", ")", ".", "sum", "(", "1", ")" ]
55.25
28.75
def exclude_candidates(self, candidates, reason): """ mark one or more candidates as excluded from the count candidates: list of candidate_ids to exclude reason: the reason for the exclusion """ # put some paranoia around exclusion: we want to make sure that # `candidates` is unique, and that none of these candidates have # been previously excluded for candidate_id in candidates: assert(candidate_id not in self.candidates_excluded) assert(len(set(candidates)) == len(candidates)) # determine the paper transfers to be run, and the candidates # holding papers which are distributed in each transfer transfers_applicable = defaultdict(set) for candidate_id in candidates: self.candidates_excluded[candidate_id] = True for bundle_transaction in self.candidate_bundle_transactions.get(candidate_id): value = bundle_transaction.transfer_value transfers_applicable[value].add(candidate_id) transfer_values = list(reversed(sorted(transfers_applicable))) self.results.candidates_excluded( CandidatesExcluded( candidates=candidates, transfer_values=transfer_values, reason=reason)) for transfer_value in transfer_values: self.exclusion_distributions_pending.append((list(transfers_applicable[transfer_value]), transfer_value))
[ "def", "exclude_candidates", "(", "self", ",", "candidates", ",", "reason", ")", ":", "# put some paranoia around exclusion: we want to make sure that", "# `candidates` is unique, and that none of these candidates have", "# been previously excluded", "for", "candidate_id", "in", "candidates", ":", "assert", "(", "candidate_id", "not", "in", "self", ".", "candidates_excluded", ")", "assert", "(", "len", "(", "set", "(", "candidates", ")", ")", "==", "len", "(", "candidates", ")", ")", "# determine the paper transfers to be run, and the candidates", "# holding papers which are distributed in each transfer", "transfers_applicable", "=", "defaultdict", "(", "set", ")", "for", "candidate_id", "in", "candidates", ":", "self", ".", "candidates_excluded", "[", "candidate_id", "]", "=", "True", "for", "bundle_transaction", "in", "self", ".", "candidate_bundle_transactions", ".", "get", "(", "candidate_id", ")", ":", "value", "=", "bundle_transaction", ".", "transfer_value", "transfers_applicable", "[", "value", "]", ".", "add", "(", "candidate_id", ")", "transfer_values", "=", "list", "(", "reversed", "(", "sorted", "(", "transfers_applicable", ")", ")", ")", "self", ".", "results", ".", "candidates_excluded", "(", "CandidatesExcluded", "(", "candidates", "=", "candidates", ",", "transfer_values", "=", "transfer_values", ",", "reason", "=", "reason", ")", ")", "for", "transfer_value", "in", "transfer_values", ":", "self", ".", "exclusion_distributions_pending", ".", "append", "(", "(", "list", "(", "transfers_applicable", "[", "transfer_value", "]", ")", ",", "transfer_value", ")", ")" ]
45.9375
19.5
def remote_sys_name_uneq_store(self, remote_system_name): """This function saves the system name, if different from stored. """ if remote_system_name != self.remote_system_name: self.remote_system_name = remote_system_name return True return False
[ "def", "remote_sys_name_uneq_store", "(", "self", ",", "remote_system_name", ")", ":", "if", "remote_system_name", "!=", "self", ".", "remote_system_name", ":", "self", ".", "remote_system_name", "=", "remote_system_name", "return", "True", "return", "False" ]
48.333333
14.5
def unpublish(self): """ Unpublish this item. This will set and currently published versions to the archived state and delete all currently scheduled versions. """ assert self.state == self.DRAFT with xact(): self._publish(published=False) # Delete all scheduled items klass = self.get_version_class() for obj in klass.normal.filter(object_id=self.object_id, state=self.SCHEDULED): obj.delete()
[ "def", "unpublish", "(", "self", ")", ":", "assert", "self", ".", "state", "==", "self", ".", "DRAFT", "with", "xact", "(", ")", ":", "self", ".", "_publish", "(", "published", "=", "False", ")", "# Delete all scheduled items", "klass", "=", "self", ".", "get_version_class", "(", ")", "for", "obj", "in", "klass", ".", "normal", ".", "filter", "(", "object_id", "=", "self", ".", "object_id", ",", "state", "=", "self", ".", "SCHEDULED", ")", ":", "obj", ".", "delete", "(", ")" ]
29.941176
17.823529
def _construct_role(self, managed_policy_map): """Constructs a Lambda execution role based on this SAM function's Policies property. :returns: the generated IAM Role :rtype: model.iam.IAMRole """ execution_role = IAMRole(self.logical_id + 'Role', attributes=self.get_passthrough_resource_attributes()) execution_role.AssumeRolePolicyDocument = IAMRolePolicies.lambda_assume_role_policy() managed_policy_arns = [ArnGenerator.generate_aws_managed_policy_arn('service-role/AWSLambdaBasicExecutionRole')] if self.Tracing: managed_policy_arns.append(ArnGenerator.generate_aws_managed_policy_arn('AWSXrayWriteOnlyAccess')) function_policies = FunctionPolicies({"Policies": self.Policies}, # No support for policy templates in the "core" policy_template_processor=None) policy_documents = [] if self.DeadLetterQueue: policy_documents.append(IAMRolePolicies.dead_letter_queue_policy( self.dead_letter_queue_policy_actions[self.DeadLetterQueue['Type']], self.DeadLetterQueue['TargetArn'])) for index, policy_entry in enumerate(function_policies.get()): if policy_entry.type is PolicyTypes.POLICY_STATEMENT: policy_documents.append({ 'PolicyName': execution_role.logical_id + 'Policy' + str(index), 'PolicyDocument': policy_entry.data }) elif policy_entry.type is PolicyTypes.MANAGED_POLICY: # There are three options: # Managed Policy Name (string): Try to convert to Managed Policy ARN # Managed Policy Arn (string): Insert it directly into the list # Intrinsic Function (dict): Insert it directly into the list # # When you insert into managed_policy_arns list, de-dupe to prevent same ARN from showing up twice # policy_arn = policy_entry.data if isinstance(policy_entry.data, string_types) and policy_entry.data in managed_policy_map: policy_arn = managed_policy_map[policy_entry.data] # De-Duplicate managed policy arns before inserting. Mainly useful # when customer specifies a managed policy which is already inserted # by SAM, such as AWSLambdaBasicExecutionRole if policy_arn not in managed_policy_arns: managed_policy_arns.append(policy_arn) else: # Policy Templates are not supported here in the "core" raise InvalidResourceException( self.logical_id, "Policy at index {} in the 'Policies' property is not valid".format(index)) execution_role.ManagedPolicyArns = list(managed_policy_arns) execution_role.Policies = policy_documents or None execution_role.PermissionsBoundary = self.PermissionsBoundary return execution_role
[ "def", "_construct_role", "(", "self", ",", "managed_policy_map", ")", ":", "execution_role", "=", "IAMRole", "(", "self", ".", "logical_id", "+", "'Role'", ",", "attributes", "=", "self", ".", "get_passthrough_resource_attributes", "(", ")", ")", "execution_role", ".", "AssumeRolePolicyDocument", "=", "IAMRolePolicies", ".", "lambda_assume_role_policy", "(", ")", "managed_policy_arns", "=", "[", "ArnGenerator", ".", "generate_aws_managed_policy_arn", "(", "'service-role/AWSLambdaBasicExecutionRole'", ")", "]", "if", "self", ".", "Tracing", ":", "managed_policy_arns", ".", "append", "(", "ArnGenerator", ".", "generate_aws_managed_policy_arn", "(", "'AWSXrayWriteOnlyAccess'", ")", ")", "function_policies", "=", "FunctionPolicies", "(", "{", "\"Policies\"", ":", "self", ".", "Policies", "}", ",", "# No support for policy templates in the \"core\"", "policy_template_processor", "=", "None", ")", "policy_documents", "=", "[", "]", "if", "self", ".", "DeadLetterQueue", ":", "policy_documents", ".", "append", "(", "IAMRolePolicies", ".", "dead_letter_queue_policy", "(", "self", ".", "dead_letter_queue_policy_actions", "[", "self", ".", "DeadLetterQueue", "[", "'Type'", "]", "]", ",", "self", ".", "DeadLetterQueue", "[", "'TargetArn'", "]", ")", ")", "for", "index", ",", "policy_entry", "in", "enumerate", "(", "function_policies", ".", "get", "(", ")", ")", ":", "if", "policy_entry", ".", "type", "is", "PolicyTypes", ".", "POLICY_STATEMENT", ":", "policy_documents", ".", "append", "(", "{", "'PolicyName'", ":", "execution_role", ".", "logical_id", "+", "'Policy'", "+", "str", "(", "index", ")", ",", "'PolicyDocument'", ":", "policy_entry", ".", "data", "}", ")", "elif", "policy_entry", ".", "type", "is", "PolicyTypes", ".", "MANAGED_POLICY", ":", "# There are three options:", "# Managed Policy Name (string): Try to convert to Managed Policy ARN", "# Managed Policy Arn (string): Insert it directly into the list", "# Intrinsic Function (dict): Insert it directly into the list", "#", "# When you insert into managed_policy_arns list, de-dupe to prevent same ARN from showing up twice", "#", "policy_arn", "=", "policy_entry", ".", "data", "if", "isinstance", "(", "policy_entry", ".", "data", ",", "string_types", ")", "and", "policy_entry", ".", "data", "in", "managed_policy_map", ":", "policy_arn", "=", "managed_policy_map", "[", "policy_entry", ".", "data", "]", "# De-Duplicate managed policy arns before inserting. Mainly useful", "# when customer specifies a managed policy which is already inserted", "# by SAM, such as AWSLambdaBasicExecutionRole", "if", "policy_arn", "not", "in", "managed_policy_arns", ":", "managed_policy_arns", ".", "append", "(", "policy_arn", ")", "else", ":", "# Policy Templates are not supported here in the \"core\"", "raise", "InvalidResourceException", "(", "self", ".", "logical_id", ",", "\"Policy at index {} in the 'Policies' property is not valid\"", ".", "format", "(", "index", ")", ")", "execution_role", ".", "ManagedPolicyArns", "=", "list", "(", "managed_policy_arns", ")", "execution_role", ".", "Policies", "=", "policy_documents", "or", "None", "execution_role", ".", "PermissionsBoundary", "=", "self", ".", "PermissionsBoundary", "return", "execution_role" ]
50.590164
30.786885
def check_for_duplicate_assignments(participant): """Check that the assignment_id of the participant is unique. If it isnt the older participants will be failed. """ participants = models.Participant.query.filter_by( assignment_id=participant.assignment_id ).all() duplicates = [ p for p in participants if (p.id != participant.id and p.status == "working") ] for d in duplicates: q.enqueue(worker_function, "AssignmentAbandoned", None, d.id)
[ "def", "check_for_duplicate_assignments", "(", "participant", ")", ":", "participants", "=", "models", ".", "Participant", ".", "query", ".", "filter_by", "(", "assignment_id", "=", "participant", ".", "assignment_id", ")", ".", "all", "(", ")", "duplicates", "=", "[", "p", "for", "p", "in", "participants", "if", "(", "p", ".", "id", "!=", "participant", ".", "id", "and", "p", ".", "status", "==", "\"working\"", ")", "]", "for", "d", "in", "duplicates", ":", "q", ".", "enqueue", "(", "worker_function", ",", "\"AssignmentAbandoned\"", ",", "None", ",", "d", ".", "id", ")" ]
37.461538
19.923077
def ess(weights): r"""Calculate the normalized effective sample size :math:`ESS` [LC95]_ of samples with ``weights`` :math:`\omega_i`. :math:`ESS=0` is terrible and :math:`ESS=1` is perfect. .. math:: ESS = \frac{1}{1+C^2} where .. math:: C^2 = \frac{1}{N} \sum_{i=1}^N (N \bar{\omega}_i - 1)^2 .. math:: \bar{\omega}_i = \frac{\omega_i}{\sum_i \omega_i} :param weights: Vector-like array; the samples' weights """ # normalize weights w = _np.asarray(weights) / _np.sum(weights) # ess coeff_var = _np.sum((len(w) * w - 1)**2) / len(w) return 1.0 / (1.0 + coeff_var)
[ "def", "ess", "(", "weights", ")", ":", "# normalize weights", "w", "=", "_np", ".", "asarray", "(", "weights", ")", "/", "_np", ".", "sum", "(", "weights", ")", "# ess", "coeff_var", "=", "_np", ".", "sum", "(", "(", "len", "(", "w", ")", "*", "w", "-", "1", ")", "**", "2", ")", "/", "len", "(", "w", ")", "return", "1.0", "/", "(", "1.0", "+", "coeff_var", ")" ]
20.548387
25.677419
def format_files(src: str, dest: str, **fmt_vars: str) -> None: """Copies all files inside src into dest while formatting the contents of the files into the output. For example, a file with the following contents: {foo} bar {baz} and the vars {'foo': 'herp', 'baz': 'derp'} will end up in the output as herp bar derp :param text src: Source directory. :param text dest: Destination directory. :param dict fmt_vars: Vars to format into the files. """ assert os.path.exists(src) assert os.path.exists(dest) # Only at the root. Could be made more complicated and recursive later for filename in os.listdir(src): if filename.endswith(EXCLUDED_EXTENSIONS): continue # Flat directory structure elif not os.path.isfile(os.path.join(src, filename)): continue with open(os.path.join(src, filename)) as f: output_contents = f.read().format(**fmt_vars) with open(os.path.join(dest, filename), 'w') as file_obj: file_obj.write(output_contents)
[ "def", "format_files", "(", "src", ":", "str", ",", "dest", ":", "str", ",", "*", "*", "fmt_vars", ":", "str", ")", "->", "None", ":", "assert", "os", ".", "path", ".", "exists", "(", "src", ")", "assert", "os", ".", "path", ".", "exists", "(", "dest", ")", "# Only at the root. Could be made more complicated and recursive later", "for", "filename", "in", "os", ".", "listdir", "(", "src", ")", ":", "if", "filename", ".", "endswith", "(", "EXCLUDED_EXTENSIONS", ")", ":", "continue", "# Flat directory structure", "elif", "not", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "src", ",", "filename", ")", ")", ":", "continue", "with", "open", "(", "os", ".", "path", ".", "join", "(", "src", ",", "filename", ")", ")", "as", "f", ":", "output_contents", "=", "f", ".", "read", "(", ")", ".", "format", "(", "*", "*", "fmt_vars", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "dest", ",", "filename", ")", ",", "'w'", ")", "as", "file_obj", ":", "file_obj", ".", "write", "(", "output_contents", ")" ]
35.2
17.166667
def xml_path_completion(xml_path): """ Takes in a local xml path and returns a full path. if @xml_path is absolute, do nothing if @xml_path is not absolute, load xml that is shipped by the package """ if xml_path.startswith("/"): full_path = xml_path else: full_path = os.path.join(robosuite.models.assets_root, xml_path) return full_path
[ "def", "xml_path_completion", "(", "xml_path", ")", ":", "if", "xml_path", ".", "startswith", "(", "\"/\"", ")", ":", "full_path", "=", "xml_path", "else", ":", "full_path", "=", "os", ".", "path", ".", "join", "(", "robosuite", ".", "models", ".", "assets_root", ",", "xml_path", ")", "return", "full_path" ]
34.909091
14.909091
def remove_run_script(self, script, target_name=None): """ Removes the given script string from the given target :param script: The script string to be removed from the target :param target_name: Target name or list of target names to remove the run script from or None for every target :return: """ for target in self.objects.get_targets(target_name): for build_phase_id in target.buildPhases: build_phase = self.objects[build_phase_id] if not isinstance(build_phase, PBXShellScriptBuildPhase): continue if build_phase.shellScript == script: del self.objects[build_phase_id] target.remove_build_phase(build_phase)
[ "def", "remove_run_script", "(", "self", ",", "script", ",", "target_name", "=", "None", ")", ":", "for", "target", "in", "self", ".", "objects", ".", "get_targets", "(", "target_name", ")", ":", "for", "build_phase_id", "in", "target", ".", "buildPhases", ":", "build_phase", "=", "self", ".", "objects", "[", "build_phase_id", "]", "if", "not", "isinstance", "(", "build_phase", ",", "PBXShellScriptBuildPhase", ")", ":", "continue", "if", "build_phase", ".", "shellScript", "==", "script", ":", "del", "self", ".", "objects", "[", "build_phase_id", "]", "target", ".", "remove_build_phase", "(", "build_phase", ")" ]
48.5
21.625
def set_state(self, updater=None, **kwargs): """Update the datastore. :param func|dict updater: (state) => state_change or dict state_change :rtype: Iterable[tornado.concurrent.Future] """ if callable(updater): state_change = updater(self) elif updater is not None: state_change = updater else: state_change = kwargs return [callback_result for k, v in state_change.items() for callback_result in self.set(k, v)]
[ "def", "set_state", "(", "self", ",", "updater", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "callable", "(", "updater", ")", ":", "state_change", "=", "updater", "(", "self", ")", "elif", "updater", "is", "not", "None", ":", "state_change", "=", "updater", "else", ":", "state_change", "=", "kwargs", "return", "[", "callback_result", "for", "k", ",", "v", "in", "state_change", ".", "items", "(", ")", "for", "callback_result", "in", "self", ".", "set", "(", "k", ",", "v", ")", "]" ]
33.1875
13.875
def get_shell(pid=None, max_depth=6): """Get the shell that the supplied pid or os.getpid() is running in. """ pid = str(pid or os.getpid()) mapping = _get_process_mapping() for proc_cmd in _iter_process_command(mapping, pid, max_depth): if proc_cmd.startswith('-'): # Login shell! Let's use this. return _get_login_shell(proc_cmd) name = os.path.basename(proc_cmd).lower() if name in SHELL_NAMES: # The inner-most (non-login) shell. return (name, proc_cmd) return None
[ "def", "get_shell", "(", "pid", "=", "None", ",", "max_depth", "=", "6", ")", ":", "pid", "=", "str", "(", "pid", "or", "os", ".", "getpid", "(", ")", ")", "mapping", "=", "_get_process_mapping", "(", ")", "for", "proc_cmd", "in", "_iter_process_command", "(", "mapping", ",", "pid", ",", "max_depth", ")", ":", "if", "proc_cmd", ".", "startswith", "(", "'-'", ")", ":", "# Login shell! Let's use this.", "return", "_get_login_shell", "(", "proc_cmd", ")", "name", "=", "os", ".", "path", ".", "basename", "(", "proc_cmd", ")", ".", "lower", "(", ")", "if", "name", "in", "SHELL_NAMES", ":", "# The inner-most (non-login) shell.", "return", "(", "name", ",", "proc_cmd", ")", "return", "None" ]
44.75
12.166667
def strip_wsgi(request): """Strip WSGI data out of the request META data.""" meta = copy(request.META) for key in meta: if key[:4] == 'wsgi': meta[key] = None return meta
[ "def", "strip_wsgi", "(", "request", ")", ":", "meta", "=", "copy", "(", "request", ".", "META", ")", "for", "key", "in", "meta", ":", "if", "key", "[", ":", "4", "]", "==", "'wsgi'", ":", "meta", "[", "key", "]", "=", "None", "return", "meta" ]
25
16.875
def itemData(self, item, column, role=Qt.DisplayRole): """ Returns the data stored under the given role for the item. O The column parameter may be used to differentiate behavior per column. The default implementation does nothing. Descendants should typically override this function instead of data() Note: If you do not have a value to return, return an invalid QVariant instead of returning 0. (This means returning None in Python) """ if role == Qt.DecorationRole: if column == self.COL_DECORATION: return item.decoration elif role == Qt.FontRole: return item.font elif role == Qt.ForegroundRole: return item.foregroundBrush elif role == Qt.BackgroundRole: return item.backgroundBrush elif role == Qt.SizeHintRole: return self.cellSizeHint if item.sizeHint is None else item.sizeHint return None
[ "def", "itemData", "(", "self", ",", "item", ",", "column", ",", "role", "=", "Qt", ".", "DisplayRole", ")", ":", "if", "role", "==", "Qt", ".", "DecorationRole", ":", "if", "column", "==", "self", ".", "COL_DECORATION", ":", "return", "item", ".", "decoration", "elif", "role", "==", "Qt", ".", "FontRole", ":", "return", "item", ".", "font", "elif", "role", "==", "Qt", ".", "ForegroundRole", ":", "return", "item", ".", "foregroundBrush", "elif", "role", "==", "Qt", ".", "BackgroundRole", ":", "return", "item", ".", "backgroundBrush", "elif", "role", "==", "Qt", ".", "SizeHintRole", ":", "return", "self", ".", "cellSizeHint", "if", "item", ".", "sizeHint", "is", "None", "else", "item", ".", "sizeHint", "return", "None" ]
36.296296
20.925926
def tear_down(self): """Tear down the instance """ import boto.ec2 if not self.browser_config.get('terminate'): self.warning_log("Skipping terminate") return self.info_log("Tearing down...") ec2 = boto.ec2.connect_to_region(self.browser_config.get("region")) ec2.terminate_instances(instance_ids=[self.instance_id])
[ "def", "tear_down", "(", "self", ")", ":", "import", "boto", ".", "ec2", "if", "not", "self", ".", "browser_config", ".", "get", "(", "'terminate'", ")", ":", "self", ".", "warning_log", "(", "\"Skipping terminate\"", ")", "return", "self", ".", "info_log", "(", "\"Tearing down...\"", ")", "ec2", "=", "boto", ".", "ec2", ".", "connect_to_region", "(", "self", ".", "browser_config", ".", "get", "(", "\"region\"", ")", ")", "ec2", ".", "terminate_instances", "(", "instance_ids", "=", "[", "self", ".", "instance_id", "]", ")" ]
29.692308
20
def create(self, dcid, vpsplanid, osid, params=None): ''' /v1/server/create POST - account Create a new virtual machine. You will start being billed for this immediately. The response only contains the SUBID for the new machine. You should use v1/server/list to poll and wait for the machine to be created (as this does not happen instantly). Link: https://www.vultr.com/api/#server_create ''' params = update_params(params, { 'DCID': dcid, 'VPSPLANID': vpsplanid, 'OSID': osid }) return self.request('/v1/server/create', params, 'POST')
[ "def", "create", "(", "self", ",", "dcid", ",", "vpsplanid", ",", "osid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'DCID'", ":", "dcid", ",", "'VPSPLANID'", ":", "vpsplanid", ",", "'OSID'", ":", "osid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/create'", ",", "params", ",", "'POST'", ")" ]
40.4375
20.9375
def delete_enrollment_claim(self, id, **kwargs): """Delete""" api = self._get_api(enrollment.PublicAPIApi) return api.delete_device_enrollment(id=id)
[ "def", "delete_enrollment_claim", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "api", "=", "self", ".", "_get_api", "(", "enrollment", ".", "PublicAPIApi", ")", "return", "api", ".", "delete_device_enrollment", "(", "id", "=", "id", ")" ]
42.5
7.5
def random_box(molecules, total=None, proportions=None, size=[1.,1.,1.], maxtries=100): '''Create a System made of a series of random molecules. Parameters: total: molecules: proportions: ''' # Setup proportions to be right if proportions is None: proportions = np.ones(len(molecules)) / len(molecules) else: proportions = np.array(proportions) size = np.array(size) tree = CoverTree(metric="periodic", metric_args={'cell_lengths': size}) type_array = [] result = [] vdw_radii = [] max_vdw = max(vdw_radius(np.concatenate([m.type_array for m in molecules]))) first = True for l, n in enumerate((proportions * total).astype(int)): # We try to insert each molecule for i in range(n): # Attempts for k in range(maxtries): template = molecules[l].copy() reference = np.random.uniform(0, 1, 3) * size r_array = template.r_array + reference # Find all collision candidates pts_list, distances_list = tree.query_ball_many(r_array, vdw_radius(template.type_array) + max_vdw) # print pts_list, distances_list # Check if there is any collision ok = True for i, (dist, pts) in enumerate(zip(distances_list, pts_list)): if len(dist) == 0: break found_vdw = np.array([vdw_radii[p] for p in pts]) ok &= all(dist > found_vdw + vdw_radius(template.type_array[i])) if ok: tree.insert_many(r_array) template.r_array = r_array result.append(template) vdw_radii.extend(vdw_radius(template.type_array)) break if not ok: raise Exception("Trials exceeded") system = System(result) system.box_vectors[0, 0] = size[0] system.box_vectors[1, 1] = size[1] system.box_vectors[2, 2] = size[2] return system
[ "def", "random_box", "(", "molecules", ",", "total", "=", "None", ",", "proportions", "=", "None", ",", "size", "=", "[", "1.", ",", "1.", ",", "1.", "]", ",", "maxtries", "=", "100", ")", ":", "# Setup proportions to be right", "if", "proportions", "is", "None", ":", "proportions", "=", "np", ".", "ones", "(", "len", "(", "molecules", ")", ")", "/", "len", "(", "molecules", ")", "else", ":", "proportions", "=", "np", ".", "array", "(", "proportions", ")", "size", "=", "np", ".", "array", "(", "size", ")", "tree", "=", "CoverTree", "(", "metric", "=", "\"periodic\"", ",", "metric_args", "=", "{", "'cell_lengths'", ":", "size", "}", ")", "type_array", "=", "[", "]", "result", "=", "[", "]", "vdw_radii", "=", "[", "]", "max_vdw", "=", "max", "(", "vdw_radius", "(", "np", ".", "concatenate", "(", "[", "m", ".", "type_array", "for", "m", "in", "molecules", "]", ")", ")", ")", "first", "=", "True", "for", "l", ",", "n", "in", "enumerate", "(", "(", "proportions", "*", "total", ")", ".", "astype", "(", "int", ")", ")", ":", "# We try to insert each molecule ", "for", "i", "in", "range", "(", "n", ")", ":", "# Attempts", "for", "k", "in", "range", "(", "maxtries", ")", ":", "template", "=", "molecules", "[", "l", "]", ".", "copy", "(", ")", "reference", "=", "np", ".", "random", ".", "uniform", "(", "0", ",", "1", ",", "3", ")", "*", "size", "r_array", "=", "template", ".", "r_array", "+", "reference", "# Find all collision candidates", "pts_list", ",", "distances_list", "=", "tree", ".", "query_ball_many", "(", "r_array", ",", "vdw_radius", "(", "template", ".", "type_array", ")", "+", "max_vdw", ")", "# print pts_list, distances_list", "# Check if there is any collision", "ok", "=", "True", "for", "i", ",", "(", "dist", ",", "pts", ")", "in", "enumerate", "(", "zip", "(", "distances_list", ",", "pts_list", ")", ")", ":", "if", "len", "(", "dist", ")", "==", "0", ":", "break", "found_vdw", "=", "np", ".", "array", "(", "[", "vdw_radii", "[", "p", "]", "for", "p", "in", "pts", "]", ")", "ok", "&=", "all", "(", "dist", ">", "found_vdw", "+", "vdw_radius", "(", "template", ".", "type_array", "[", "i", "]", ")", ")", "if", "ok", ":", "tree", ".", "insert_many", "(", "r_array", ")", "template", ".", "r_array", "=", "r_array", "result", ".", "append", "(", "template", ")", "vdw_radii", ".", "extend", "(", "vdw_radius", "(", "template", ".", "type_array", ")", ")", "break", "if", "not", "ok", ":", "raise", "Exception", "(", "\"Trials exceeded\"", ")", "system", "=", "System", "(", "result", ")", "system", ".", "box_vectors", "[", "0", ",", "0", "]", "=", "size", "[", "0", "]", "system", ".", "box_vectors", "[", "1", ",", "1", "]", "=", "size", "[", "1", "]", "system", ".", "box_vectors", "[", "2", ",", "2", "]", "=", "size", "[", "2", "]", "return", "system" ]
33.31746
22.460317
def _hasattr(self, fieldname): """Returns True if this packet contains fieldname, False otherwise.""" special = 'history', 'raw' return (fieldname in special or fieldname in self._defn.fieldmap or fieldname in self._defn.derivationmap)
[ "def", "_hasattr", "(", "self", ",", "fieldname", ")", ":", "special", "=", "'history'", ",", "'raw'", "return", "(", "fieldname", "in", "special", "or", "fieldname", "in", "self", ".", "_defn", ".", "fieldmap", "or", "fieldname", "in", "self", ".", "_defn", ".", "derivationmap", ")" ]
47.833333
6.833333
def _list_objects(self, client_kwargs, max_request_entries): """ Lists objects. args: client_kwargs (dict): Client arguments. max_request_entries (int): If specified, maximum entries returned by request. Returns: generator of tuple: object name str, object header dict, directory bool """ client_kwargs = self._update_listing_client_kwargs( client_kwargs, max_request_entries) with _handle_azure_exception(): for obj in self.client.list_directories_and_files(**client_kwargs): yield (obj.name, self._model_to_dict(obj), isinstance(obj, _Directory))
[ "def", "_list_objects", "(", "self", ",", "client_kwargs", ",", "max_request_entries", ")", ":", "client_kwargs", "=", "self", ".", "_update_listing_client_kwargs", "(", "client_kwargs", ",", "max_request_entries", ")", "with", "_handle_azure_exception", "(", ")", ":", "for", "obj", "in", "self", ".", "client", ".", "list_directories_and_files", "(", "*", "*", "client_kwargs", ")", ":", "yield", "(", "obj", ".", "name", ",", "self", ".", "_model_to_dict", "(", "obj", ")", ",", "isinstance", "(", "obj", ",", "_Directory", ")", ")" ]
35.75
20.35
def deserialize(raw): """Instantiate :py:class:`ofxclient.Institution` from dictionary :param raw: serialized ``Institution`` :param type: dict per :py:method:`~Institution.serialize` :rtype: subclass of :py:class:`ofxclient.Institution` """ return Institution( id=raw['id'], org=raw['org'], url=raw['url'], broker_id=raw.get('broker_id', ''), username=raw['username'], password=raw['password'], description=raw.get('description', None), client_args=raw.get('client_args', {}) )
[ "def", "deserialize", "(", "raw", ")", ":", "return", "Institution", "(", "id", "=", "raw", "[", "'id'", "]", ",", "org", "=", "raw", "[", "'org'", "]", ",", "url", "=", "raw", "[", "'url'", "]", ",", "broker_id", "=", "raw", ".", "get", "(", "'broker_id'", ",", "''", ")", ",", "username", "=", "raw", "[", "'username'", "]", ",", "password", "=", "raw", "[", "'password'", "]", ",", "description", "=", "raw", ".", "get", "(", "'description'", ",", "None", ")", ",", "client_args", "=", "raw", ".", "get", "(", "'client_args'", ",", "{", "}", ")", ")" ]
36.176471
13.647059