text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def start(self, app): """ Start application. """ app.middlewares.insert(0, debugtoolbar_middleware_factory) self.global_panels = [Panel(self.app) for Panel in self.cfg.global_panels]
[ "def", "start", "(", "self", ",", "app", ")", ":", "app", ".", "middlewares", ".", "insert", "(", "0", ",", "debugtoolbar_middleware_factory", ")", "self", ".", "global_panels", "=", "[", "Panel", "(", "self", ".", "app", ")", "for", "Panel", "in", "se...
50.75
21.75
def string(prompt=None, empty=False): """Prompt a string. Parameters ---------- prompt : str, optional Use an alternative prompt. empty : bool, optional Allow an empty response. Returns ------- str or None A str if the user entered a non-empty string. None if the user pressed only Enter and ``empty`` was True. """ s = _prompt_input(prompt) if empty and not s: return None else: if s: return s else: return string(prompt=prompt, empty=empty)
[ "def", "string", "(", "prompt", "=", "None", ",", "empty", "=", "False", ")", ":", "s", "=", "_prompt_input", "(", "prompt", ")", "if", "empty", "and", "not", "s", ":", "return", "None", "else", ":", "if", "s", ":", "return", "s", "else", ":", "r...
21.96
20.28
def update(self, role_sid=values.unset, last_consumed_message_index=values.unset): """ Update the MemberInstance :param unicode role_sid: The Role assigned to this member. :param unicode last_consumed_message_index: An Integer representing index of the last Message this Member has read within this Channel :returns: Updated MemberInstance :rtype: twilio.rest.chat.v1.service.channel.member.MemberInstance """ return self._proxy.update( role_sid=role_sid, last_consumed_message_index=last_consumed_message_index, )
[ "def", "update", "(", "self", ",", "role_sid", "=", "values", ".", "unset", ",", "last_consumed_message_index", "=", "values", ".", "unset", ")", ":", "return", "self", ".", "_proxy", ".", "update", "(", "role_sid", "=", "role_sid", ",", "last_consumed_messa...
40.866667
22.733333
def manage(self): """ Manage the task to handle restarts, reconfiguration, etc. Returns True to request a shorter period before the next call, False if nothing special is needed. """ log = self._params.get('log', self._discard) if self._stopping: log.debug("Task '%s', stopping, retrying stop()", self._name) return self.stop() now = time.time() if self._started and self._limit: if now > self._limit: log.debug("Task '%s', time limit exceeded by %s, stopping", self._name, deltafmt(now - self._limit)) return self.stop() else: log.debug("Task '%s', time limit remaining %s", self._name, deltafmt(self._limit - now)) if self._legion.is_exiting(): log.debug("Not managing '%s', legion is exiting", self._name) return False log.debug("managing '%s'", self._name) return self._start()
[ "def", "manage", "(", "self", ")", ":", "log", "=", "self", ".", "_params", ".", "get", "(", "'log'", ",", "self", ".", "_discard", ")", "if", "self", ".", "_stopping", ":", "log", ".", "debug", "(", "\"Task '%s', stopping, retrying stop()\"", ",", "self...
42.26087
19.652174
def upload(self, file, name=None, prefix=None, extensions=None, overwrite=False, public=False, random_name=False, **kwargs): """ To upload file :param file: FileStorage object or string location :param name: The name of the object. :param prefix: A prefix for the object. Can be in the form of directory tree :param extensions: list of extensions to allow. If empty, it will use all extension. :param overwrite: bool - To overwrite if file exists :param public: bool - To set acl to private or public-read. Having acl in kwargs will override it :param random_name - If True and Name is None it will create a random name. Otherwise it will use the file name. `name` will always take precedence :param kwargs: extra params: ie: acl, meta_data etc. :return: Object """ tmp_file = None try: if "acl" not in kwargs: kwargs["acl"] = "public-read" if public else "private" extra = kwargs # It seems like this is a url, we'll try to download it first if isinstance(file, string_types) and re.match(URL_REGEXP, file): tmp_file = self._download_from_url(file) file = tmp_file # Create a random name if not name and random_name: name = uuid.uuid4().hex # coming from a flask, or upload object if isinstance(file, FileStorage): extension = get_file_extension(file.filename) if not name: fname = get_file_name(file.filename).split("." + extension)[0] name = slugify.slugify(fname) else: extension = get_file_extension(file) if not name: name = get_file_name(file) if len(get_file_extension(name).strip()) == 0: name += "." + extension name = name.strip("/").strip() if isinstance(self.driver, local.LocalStorageDriver): name = secure_filename(name) if prefix: name = prefix.lstrip("/") + name if not overwrite: name = self._safe_object_name(name) # For backwards compatibility, kwargs now holds `allowed_extensions` allowed_extensions = extensions or kwargs.get("allowed_extensions") if not allowed_extensions: allowed_extensions = self.allowed_extensions if extension.lower() not in allowed_extensions: raise InvalidExtensionError("Invalid file extension: '.%s' " % extension) if isinstance(file, FileStorage): obj = self.container.upload_object_via_stream(iterator=file.stream, object_name=name, extra=extra) else: obj = self.container.upload_object(file_path=file, object_name=name, extra=extra) return Object(obj=obj) except Exception as e: raise e finally: if tmp_file and os.path.isfile(tmp_file): os.remove(tmp_file)
[ "def", "upload", "(", "self", ",", "file", ",", "name", "=", "None", ",", "prefix", "=", "None", ",", "extensions", "=", "None", ",", "overwrite", "=", "False", ",", "public", "=", "False", ",", "random_name", "=", "False", ",", "*", "*", "kwargs", ...
41.39759
21.542169
def endpoint(self, endpoint): """A decorator to register a function as an endpoint. Example:: @app.endpoint('example.endpoint') def example(): return "example" :param endpoint: the name of the endpoint """ def decorator(f): self.view_functions[endpoint] = f return f return decorator
[ "def", "endpoint", "(", "self", ",", "endpoint", ")", ":", "def", "decorator", "(", "f", ")", ":", "self", ".", "view_functions", "[", "endpoint", "]", "=", "f", "return", "f", "return", "decorator" ]
27.428571
14.714286
def update(self, gradient, step): """Update the search direction given the latest gradient and step""" self.old_gradient = self.gradient self.gradient = gradient N = len(self.gradient) if self.inv_hessian is None: # update the direction self.direction = -self.gradient self.status = "SD" # new guess of the inverse hessian self.inv_hessian = np.identity(N, float) else: # update the direction self.direction = -np.dot(self.inv_hessian, self.gradient) self.status = "QN" # new guess of the inverse hessian (BFGS) y = self.gradient - self.old_gradient s = step sy = abs(np.dot(s, y))+1e-5 A = np.outer(-y/sy, s) A.ravel()[::N+1] += 1 self.inv_hessian = ( np.dot(np.dot(A.transpose(), self.inv_hessian), A) + np.outer(s/sy, s) )
[ "def", "update", "(", "self", ",", "gradient", ",", "step", ")", ":", "self", ".", "old_gradient", "=", "self", ".", "gradient", "self", ".", "gradient", "=", "gradient", "N", "=", "len", "(", "self", ".", "gradient", ")", "if", "self", ".", "inv_hes...
38.92
10.6
def translate_func(name, block, args): """Translates functions and all nested functions to Python code. name - name of that function (global functions will be available under var while inline will be available directly under this name ) block - code of the function (*with* brackets {} ) args - arguments that this function takes""" inline = name.startswith('PyJsLvalInline') real_name = '' if inline: name, real_name = name.split('@') arglist = ', '.join(args) + ', ' if args else '' code = '@Js\ndef %s(%sthis, arguments, var=var):\n' % (name, arglist) # register local variables scope = "'this':this, 'arguments':arguments" #it will be a simple dictionary for arg in args: scope += ', %s:%s' % (repr(arg), arg) if real_name: scope += ', %s:%s' % (repr(real_name), name) code += indent('var = Scope({%s}, var)\n' % scope) block, nested_hoisted, nested_inline = remove_functions(block) py_code, to_register = translate_flow(block) #register variables declared with var and names of hoisted functions. to_register += nested_hoisted.keys() if to_register: code += indent('var.registers(%s)\n' % str(to_register)) for nested_name, info in nested_hoisted.iteritems(): nested_block, nested_args = info new_code = translate_func('PyJsLvalTempHoisted', nested_block, nested_args) # Now put definition of hoisted function on the top code += indent(new_code) code += indent( 'PyJsLvalTempHoisted.func_name = %s\n' % repr(nested_name)) code += indent( 'var.put(%s, PyJsLvalTempHoisted)\n' % repr(nested_name)) for nested_name, info in nested_inline.iteritems(): nested_block, nested_args = info new_code = translate_func(nested_name, nested_block, nested_args) # Inject definitions of inline functions just before usage # nested inline names have this format : LVAL_NAME@REAL_NAME py_code = inject_before_lval(py_code, nested_name.split('@')[0], new_code) if py_code.strip(): code += indent(py_code) return code
[ "def", "translate_func", "(", "name", ",", "block", ",", "args", ")", ":", "inline", "=", "name", ".", "startswith", "(", "'PyJsLvalInline'", ")", "real_name", "=", "''", "if", "inline", ":", "name", ",", "real_name", "=", "name", ".", "split", "(", "'...
48.8
17.577778
def from_segment_xml(cls, xml_file, **kwargs): """ Read a ligo.segments.segmentlist from the file object file containing an xml segment table. Parameters ----------- xml_file : file object file object for segment xml file """ # load xmldocument and SegmentDefTable and SegmentTables fp = open(xml_file, 'r') xmldoc, _ = ligolw_utils.load_fileobj(fp, gz=xml_file.endswith(".gz"), contenthandler=ContentHandler) seg_def_table = table.get_table(xmldoc, lsctables.SegmentDefTable.tableName) seg_table = table.get_table(xmldoc, lsctables.SegmentTable.tableName) seg_sum_table = table.get_table(xmldoc, lsctables.SegmentSumTable.tableName) segs = segments.segmentlistdict() seg_summ = segments.segmentlistdict() seg_id = {} for seg_def in seg_def_table: # Here we want to encode ifo and segment name full_channel_name = ':'.join([str(seg_def.ifos), str(seg_def.name)]) seg_id[int(seg_def.segment_def_id)] = full_channel_name segs[full_channel_name] = segments.segmentlist() seg_summ[full_channel_name] = segments.segmentlist() for seg in seg_table: seg_obj = segments.segment( lal.LIGOTimeGPS(seg.start_time, seg.start_time_ns), lal.LIGOTimeGPS(seg.end_time, seg.end_time_ns)) segs[seg_id[int(seg.segment_def_id)]].append(seg_obj) for seg in seg_sum_table: seg_obj = segments.segment( lal.LIGOTimeGPS(seg.start_time, seg.start_time_ns), lal.LIGOTimeGPS(seg.end_time, seg.end_time_ns)) seg_summ[seg_id[int(seg.segment_def_id)]].append(seg_obj) for seg_name in seg_id.values(): segs[seg_name] = segs[seg_name].coalesce() xmldoc.unlink() fp.close() curr_url = urlparse.urlunparse(['file', 'localhost', xml_file, None, None, None]) return cls.from_segment_list_dict('SEGMENTS', segs, file_url=curr_url, file_exists=True, seg_summ_dict=seg_summ, **kwargs)
[ "def", "from_segment_xml", "(", "cls", ",", "xml_file", ",", "*", "*", "kwargs", ")", ":", "# load xmldocument and SegmentDefTable and SegmentTables", "fp", "=", "open", "(", "xml_file", ",", "'r'", ")", "xmldoc", ",", "_", "=", "ligolw_utils", ".", "load_fileob...
42.789474
21.982456
def process_nxml_file(fname, output_fmt='json', outbuf=None, cleanup=True, **kwargs): """Return processor with Statements extracted by reading an NXML file. Parameters ---------- fname : str The path to the NXML file to be read. output_fmt: Optional[str] The output format to obtain from Sparser, with the two options being 'json' and 'xml'. Default: 'json' outbuf : Optional[file] A file like object that the Sparser output is written to. cleanup : Optional[bool] If True, the output file created by Sparser is removed. Default: True Returns ------- sp : SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen. """ sp = None out_fname = None try: out_fname = run_sparser(fname, output_fmt, outbuf, **kwargs) sp = process_sparser_output(out_fname, output_fmt) except Exception as e: logger.error("Sparser failed to run on %s." % fname) logger.exception(e) finally: if out_fname is not None and os.path.exists(out_fname) and cleanup: os.remove(out_fname) return sp
[ "def", "process_nxml_file", "(", "fname", ",", "output_fmt", "=", "'json'", ",", "outbuf", "=", "None", ",", "cleanup", "=", "True", ",", "*", "*", "kwargs", ")", ":", "sp", "=", "None", "out_fname", "=", "None", "try", ":", "out_fname", "=", "run_spar...
33.085714
21.914286
def execute(self, query, *multiparams, **params): """Executes a SQL query with optional parameters. query - a SQL query string or any sqlalchemy expression. *multiparams/**params - represent bound parameter values to be used in the execution. Typically, the format is a dictionary passed to *multiparams: await conn.execute( table.insert(), {"id":1, "value":"v1"}, ) ...or individual key/values interpreted by **params:: await conn.execute( table.insert(), id=1, value="v1" ) In the case that a plain SQL string is passed, a tuple or individual values in \*multiparams may be passed:: await conn.execute( "INSERT INTO table (id, value) VALUES (%d, %s)", (1, "v1") ) await conn.execute( "INSERT INTO table (id, value) VALUES (%s, %s)", 1, "v1" ) Returns ResultProxy instance with results of SQL query execution. """ coro = self._execute(query, *multiparams, **params) return _SAConnectionContextManager(coro)
[ "def", "execute", "(", "self", ",", "query", ",", "*", "multiparams", ",", "*", "*", "params", ")", ":", "coro", "=", "self", ".", "_execute", "(", "query", ",", "*", "multiparams", ",", "*", "*", "params", ")", "return", "_SAConnectionContextManager", ...
30.564103
22.512821
def load_handlers(handler_mapping): """ Given a dictionary mapping which looks like the following, import the objects based on the dotted path and yield the packet type and handler as pairs. If the special string '*' is passed, don't process that, pass it on as it is a wildcard. If an non-string object is given for either packet or handler (key or value) assume these are the objects to use and yield them. :: { 'rfxcom.protocol.Status': 'home.collect.logging_handler', 'rfxcom.protocol.Elec': 'home.collect.elec_handler', 'rfxcom.protocol.TempHumidity': 'home.collect.temp_humidity_handler', '*': 'home.collect.logging_handler' } """ handlers = {} for packet_type, handler in handler_mapping.items(): if packet_type == '*': Packet = packet_type elif isinstance(packet_type, str): Packet = importer(packet_type) else: Packet = packet_type if isinstance(handler, str): Handler = importer(handler) else: Handler = handler if Packet in handlers: raise HandlerConfigError( "Handler already provided for packet %s" % Packet) handlers[Packet] = Handler return handlers
[ "def", "load_handlers", "(", "handler_mapping", ")", ":", "handlers", "=", "{", "}", "for", "packet_type", ",", "handler", "in", "handler_mapping", ".", "items", "(", ")", ":", "if", "packet_type", "==", "'*'", ":", "Packet", "=", "packet_type", "elif", "i...
28.977273
22.840909
def get_authorisation_url(self, reset=False): """ Initialises the OAuth2 Process by asking the auth server for a login URL. Once called, the user can login by being redirected to the url returned by this function. If there is an error during authorisation, None is returned.""" if reset: self.auth_url = None if not self.auth_url: try: oauth = OAuth2Session(self.client_id,redirect_uri=self.redirect_url) self.auth_url,self.state = oauth.authorization_url(self.auth_base_url) except Exception: #print("Unexpected error:", sys.exc_info()[0]) #print("Could not get Authorisation Url!") return None return self.auth_url
[ "def", "get_authorisation_url", "(", "self", ",", "reset", "=", "False", ")", ":", "if", "reset", ":", "self", ".", "auth_url", "=", "None", "if", "not", "self", ".", "auth_url", ":", "try", ":", "oauth", "=", "OAuth2Session", "(", "self", ".", "client...
43.611111
20.944444
def on_left_click(self, event, grid, choices): """ creates popup menu when user clicks on the column if that column is in the list of choices that get a drop-down menu. allows user to edit the column, but only from available values """ row, col = event.GetRow(), event.GetCol() if col == 0 and self.grid.name != 'ages': default_val = self.grid.GetCellValue(row, col) msg = "Choose a new name for {}.\nThe new value will propagate throughout the contribution.".format(default_val) dia = wx.TextEntryDialog(self.grid, msg, "Rename {}".format(self.grid.name, default_val), default_val) res = dia.ShowModal() if res == wx.ID_OK: new_val = dia.GetValue() # update the contribution with new name self.contribution.rename_item(self.grid.name, default_val, new_val) # don't propagate changes if we are just assigning a new name # and not really renaming # (i.e., if a blank row was added then named) if default_val == '': self.grid.SetCellValue(row, 0, new_val) return # update the current grid with new name for row in range(self.grid.GetNumberRows()): cell_value = self.grid.GetCellValue(row, 0) if cell_value == default_val: self.grid.SetCellValue(row, 0, new_val) else: continue return color = self.grid.GetCellBackgroundColour(event.GetRow(), event.GetCol()) # allow user to cherry-pick cells for editing. # gets selection of meta key for mac, ctrl key for pc if event.ControlDown() or event.MetaDown(): row, col = event.GetRow(), event.GetCol() if (row, col) not in self.dispersed_selection: self.dispersed_selection.append((row, col)) self.grid.SetCellBackgroundColour(row, col, 'light blue') else: self.dispersed_selection.remove((row, col)) self.grid.SetCellBackgroundColour(row, col, color)# 'white' self.grid.ForceRefresh() return if event.ShiftDown(): # allow user to highlight multiple consecutive cells in a column previous_col = self.grid.GetGridCursorCol() previous_row = self.grid.GetGridCursorRow() col = event.GetCol() row = event.GetRow() if col != previous_col: return else: if row > previous_row: row_range = list(range(previous_row, row+1)) else: row_range = list(range(row, previous_row+1)) for r in row_range: self.grid.SetCellBackgroundColour(r, col, 'light blue') self.selection.append((r, col)) self.grid.ForceRefresh() return selection = False if self.dispersed_selection: is_dispersed = True selection = self.dispersed_selection if self.selection: is_dispersed = False selection = self.selection try: col = event.GetCol() row = event.GetRow() except AttributeError: row, col = selection[0][0], selection[0][1] self.grid.SetGridCursor(row, col) if col in list(choices.keys()): # column should have a pop-up menu menu = wx.Menu() two_tiered = choices[col][1] choices = choices[col][0] if not two_tiered: # menu is one tiered if 'CLEAR cell of all values' not in choices: choices.insert(0, 'CLEAR cell of all values') for choice in choices: if not choice: choice = " " # prevents error if choice is an empty string menuitem = menu.Append(wx.ID_ANY, str(choice)) self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), menuitem) self.show_menu(event, menu) else: # menu is two_tiered clear = menu.Append(-1, 'CLEAR cell of all values') self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), clear) for choice in sorted(choices.items()): submenu = wx.Menu() for item in choice[1]: menuitem = submenu.Append(-1, str(item)) self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), menuitem) menu.Append(-1, choice[0], submenu) self.show_menu(event, menu) if selection: # re-whiten the cells that were previously highlighted for row, col in selection: self.grid.SetCellBackgroundColour(row, col, self.col_color) self.dispersed_selection = [] self.selection = [] self.grid.ForceRefresh()
[ "def", "on_left_click", "(", "self", ",", "event", ",", "grid", ",", "choices", ")", ":", "row", ",", "col", "=", "event", ".", "GetRow", "(", ")", ",", "event", ".", "GetCol", "(", ")", "if", "col", "==", "0", "and", "self", ".", "grid", ".", ...
46.280702
19.614035
def clear(self): ''' Clear plugin manager state. Registered mimetype functions will be disposed after calling this method. ''' self._mimetype_functions = list(self._default_mimetype_functions) super(MimetypePluginManager, self).clear()
[ "def", "clear", "(", "self", ")", ":", "self", ".", "_mimetype_functions", "=", "list", "(", "self", ".", "_default_mimetype_functions", ")", "super", "(", "MimetypePluginManager", ",", "self", ")", ".", "clear", "(", ")" ]
31.555556
25.333333
def lengths( self ): """ The cell lengths. Args: None Returns: (np.array(a,b,c)): The cell lengths. """ return( np.array( [ math.sqrt( sum( row**2 ) ) for row in self.matrix ] ) )
[ "def", "lengths", "(", "self", ")", ":", "return", "(", "np", ".", "array", "(", "[", "math", ".", "sqrt", "(", "sum", "(", "row", "**", "2", ")", ")", "for", "row", "in", "self", ".", "matrix", "]", ")", ")" ]
22.090909
21.909091
def ignore_warning(warning): """ Ignore any emitted warnings from a function. :param warning: The category of warning to ignore. """ def decorator(func): """ Return a decorated function whose emitted warnings are ignored. """ @wraps(func) def wrapper(*args, **kwargs): """ Wrap the function. """ warnings.simplefilter('ignore', warning) return func(*args, **kwargs) return wrapper return decorator
[ "def", "ignore_warning", "(", "warning", ")", ":", "def", "decorator", "(", "func", ")", ":", "\"\"\"\n Return a decorated function whose emitted warnings are ignored.\n \"\"\"", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*...
26.947368
10.736842
def beacon(config): ''' Scan for the configured services and fire events Example Config .. code-block:: yaml beacons: service: - services: salt-master: {} mysql: {} The config above sets up beacons to check for the salt-master and mysql services. The config also supports two other parameters for each service: `onchangeonly`: when `onchangeonly` is True the beacon will fire events only when the service status changes. Otherwise, it will fire an event at each beacon interval. The default is False. `delay`: when `delay` is greater than 0 the beacon will fire events only after the service status changes, and the delay (in seconds) has passed. Applicable only when `onchangeonly` is True. The default is 0. `emitatstartup`: when `emitatstartup` is False the beacon will not fire event when the minion is reload. Applicable only when `onchangeonly` is True. The default is True. `uncleanshutdown`: If `uncleanshutdown` is present it should point to the location of a pid file for the service. Most services will not clean up this pid file if they are shutdown uncleanly (e.g. via `kill -9`) or if they are terminated through a crash such as a segmentation fault. If the file is present, then the beacon will add `uncleanshutdown: True` to the event. If not present, the field will be False. The field is only added when the service is NOT running. Omitting the configuration variable altogether will turn this feature off. Please note that some init systems can remove the pid file if the service registers as crashed. One such example is nginx on CentOS 7, where the service unit removes the pid file when the service shuts down (IE: the pid file is observed as removed when kill -9 is sent to the nginx master process). The 'uncleanshutdown' option might not be of much use there, unless the unit file is modified. Here is an example that will fire an event 30 seconds after the state of nginx changes and report an uncleanshutdown. This example is for Arch, which places nginx's pid file in `/run`. .. code-block:: yaml beacons: service: - services: nginx: onchangeonly: True delay: 30 uncleanshutdown: /run/nginx.pid ''' ret = [] _config = {} list(map(_config.update, config)) for service in _config.get('services', {}): ret_dict = {} service_config = _config['services'][service] ret_dict[service] = {'running': __salt__['service.status'](service)} ret_dict['service_name'] = service ret_dict['tag'] = service currtime = time.time() # If no options is given to the service, we fall back to the defaults # assign a False value to oncleanshutdown and onchangeonly. Those # key:values are then added to the service dictionary. if not service_config: service_config = {} if 'oncleanshutdown' not in service_config: service_config['oncleanshutdown'] = False if 'emitatstartup' not in service_config: service_config['emitatstartup'] = True if 'onchangeonly' not in service_config: service_config['onchangeonly'] = False if 'delay' not in service_config: service_config['delay'] = 0 # We only want to report the nature of the shutdown # if the current running status is False # as well as if the config for the beacon asks for it if 'uncleanshutdown' in service_config and not ret_dict[service]['running']: filename = service_config['uncleanshutdown'] ret_dict[service]['uncleanshutdown'] = True if os.path.exists(filename) else False if 'onchangeonly' in service_config and service_config['onchangeonly'] is True: if service not in LAST_STATUS: LAST_STATUS[service] = ret_dict[service] if service_config['delay'] > 0: LAST_STATUS[service]['time'] = currtime elif not service_config['emitatstartup']: continue else: ret.append(ret_dict) if LAST_STATUS[service]['running'] != ret_dict[service]['running']: LAST_STATUS[service] = ret_dict[service] if service_config['delay'] > 0: LAST_STATUS[service]['time'] = currtime else: ret.append(ret_dict) if 'time' in LAST_STATUS[service]: elapsedtime = int(round(currtime - LAST_STATUS[service]['time'])) if elapsedtime > service_config['delay']: del LAST_STATUS[service]['time'] ret.append(ret_dict) else: ret.append(ret_dict) return ret
[ "def", "beacon", "(", "config", ")", ":", "ret", "=", "[", "]", "_config", "=", "{", "}", "list", "(", "map", "(", "_config", ".", "update", ",", "config", ")", ")", "for", "service", "in", "_config", ".", "get", "(", "'services'", ",", "{", "}",...
40.380165
23.917355
def to_qasm(self, header: Optional[str] = None, precision: int = 10, qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT, ) -> str: """Returns QASM equivalent to the circuit. Args: header: A multi-line string that is placed in a comment at the top of the QASM. Defaults to a cirq version specifier. precision: Number of digits to use when representing numbers. qubit_order: Determines how qubits are ordered in the QASM register. """ return str(self._to_qasm_output(header, precision, qubit_order))
[ "def", "to_qasm", "(", "self", ",", "header", ":", "Optional", "[", "str", "]", "=", "None", ",", "precision", ":", "int", "=", "10", ",", "qubit_order", ":", "ops", ".", "QubitOrderOrList", "=", "ops", ".", "QubitOrder", ".", "DEFAULT", ",", ")", "-...
43.733333
21.533333
def put(self, key, value, minutes): """ Store an item in the cache for a given number of minutes. :param key: The cache key :type key: str :param value: The cache value :type value: mixed :param minutes: The lifetime in minutes of the cached value :type minutes: int """ value = self.serialize(value) minutes = max(1, minutes) self._redis.setex(self._prefix + key, minutes * 60, value)
[ "def", "put", "(", "self", ",", "key", ",", "value", ",", "minutes", ")", ":", "value", "=", "self", ".", "serialize", "(", "value", ")", "minutes", "=", "max", "(", "1", ",", "minutes", ")", "self", ".", "_redis", ".", "setex", "(", "self", ".",...
26.055556
19.388889
def t_heredocvar_ENCAPSED_AND_WHITESPACE(t): r'( [^\n\\${] | \\. | \$(?![A-Za-z_{]) | \{(?!\$) )+\n? | \\?\n' t.lexer.lineno += t.value.count("\n") t.lexer.pop_state() return t
[ "def", "t_heredocvar_ENCAPSED_AND_WHITESPACE", "(", "t", ")", ":", "t", ".", "lexer", ".", "lineno", "+=", "t", ".", "value", ".", "count", "(", "\"\\n\"", ")", "t", ".", "lexer", ".", "pop_state", "(", ")", "return", "t" ]
37.6
15.6
def reactions_to_files(model, dest, writer, split_subsystem): """Turn the reaction subsystems into their own files. If a subsystem has a number of reactions over the threshold, it gets its own YAML file. All other reactions, those that don't have a subsystem or are in a subsystem that falls below the threshold, get added to a common reaction file. Args: model: :class:`psamm_import.model.MetabolicModel`. dest: output path for model files. writer: :class:`psamm.datasource.native.ModelWriter`. split_subsystem: Divide reactions into multiple files by subsystem. """ def safe_file_name(origin_name): safe_name = re.sub( r'\W+', '_', origin_name, flags=re.UNICODE) safe_name = re.sub( r'_+', '_', safe_name.lower(), flags=re.UNICODE) safe_name = safe_name.strip('_') return safe_name common_reactions = [] reaction_files = [] if not split_subsystem: common_reactions = sorted(model.reactions, key=lambda r: r.id) if len(common_reactions) > 0: reaction_file = 'reactions.yaml' with open(os.path.join(dest, reaction_file), 'w') as f: writer.write_reactions(f, common_reactions) reaction_files.append(reaction_file) else: subsystems = {} for reaction in sorted(model.reactions, key=lambda r: r.id): if 'subsystem' in reaction.properties: subsystem_file = safe_file_name( reaction.properties['subsystem']) subsystems.setdefault(subsystem_file, []).append(reaction) else: common_reactions.append(reaction) subsystem_folder = 'reactions' sub_existance = False for subsystem_file, reactions in iteritems(subsystems): if len(reactions) < _MAX_REACTION_COUNT: for reaction in reactions: common_reactions.append(reaction) else: if len(reactions) > 0: mkdir_p(os.path.join(dest, subsystem_folder)) subsystem_file = os.path.join( subsystem_folder, '{}.yaml'.format(subsystem_file)) with open(os.path.join(dest, subsystem_file), 'w') as f: writer.write_reactions(f, reactions) reaction_files.append(subsystem_file) sub_existance = True reaction_files.sort() if sub_existance: reaction_file = os.path.join( subsystem_folder, 'other_reactions.yaml') else: reaction_file = 'reactions.yaml' if len(common_reactions) > 0: with open(os.path.join(dest, reaction_file), 'w') as f: writer.write_reactions(f, common_reactions) reaction_files.append(reaction_file) return reaction_files
[ "def", "reactions_to_files", "(", "model", ",", "dest", ",", "writer", ",", "split_subsystem", ")", ":", "def", "safe_file_name", "(", "origin_name", ")", ":", "safe_name", "=", "re", ".", "sub", "(", "r'\\W+'", ",", "'_'", ",", "origin_name", ",", "flags"...
41.085714
18.657143
def sh(cmd): """ Run the given command in a shell. The command should be a single string containing a shell command. If the command contains the names of any local variables enclosed in braces, the actual values of the named variables will be filled in. (Note that this works on variables defined in the calling scope, which is a little bit magical.) Regular braces must be escaped as you would with str.format(). Also be aware that this approach is vulnerable to shell injection attacks. """ # Figure out what local variables are defined in the calling scope. import inspect frame = inspect.currentframe() try: locals = frame.f_back.f_locals finally: del frame # Run the given command in a shell. Return everything written to stdout if # the command returns an error code of 0, otherwise raise an exception. from subprocess import Popen, PIPE, CalledProcessError process = Popen(cmd.format(**locals), shell=True, stdout=PIPE) stdout, unused_stderr = process.communicate() retcode = process.poll() if retcode: error = subprocess.CalledProcessError(retcode, cmd) error.output = stdout raise error return stdout.strip()
[ "def", "sh", "(", "cmd", ")", ":", "# Figure out what local variables are defined in the calling scope.", "import", "inspect", "frame", "=", "inspect", ".", "currentframe", "(", ")", "try", ":", "locals", "=", "frame", ".", "f_back", ".", "f_locals", "finally", ":...
38.967742
24.451613
def list_ptr_records(self, device): """ Returns a list of all PTR records configured for this device. """ device_type = self._resolve_device_type(device) href, svc_name = self._get_ptr_details(device, device_type) uri = "/rdns/%s?href=%s" % (svc_name, href) try: resp, resp_body = self._retry_get(uri) except exc.NotFound: return [] records = [CloudDNSPTRRecord(rec, device) for rec in resp_body.get("records", [])] return records
[ "def", "list_ptr_records", "(", "self", ",", "device", ")", ":", "device_type", "=", "self", ".", "_resolve_device_type", "(", "device", ")", "href", ",", "svc_name", "=", "self", ".", "_get_ptr_details", "(", "device", ",", "device_type", ")", "uri", "=", ...
38.357143
14.214286
def sum_by_n(d, w, n): """A utility function to summarize a data array into n values after weighting the array with another weight array w Parameters ---------- d : array (t, 1), numerical values w : array (t, 1), numerical values for weighting n : integer the number of groups t = c*n (c is a constant) Returns ------- : array (n, 1), an array with summarized values Examples -------- Creating an array including four integers. We will compute weighted means for every two elements. >>> d = np.array([10, 9, 20, 30]) Here is another array with the weight values for d's elements. >>> w = np.array([0.5, 0.1, 0.3, 0.8]) We specify the number of groups for which the weighted mean is computed. >>> n = 2 Applying sum_by_n function >>> sum_by_n(d, w, n) array([ 5.9, 30. ]) """ t = len(d) h = t // n #must be floor! d = d * w return np.array([sum(d[i: i + h]) for i in range(0, t, h)])
[ "def", "sum_by_n", "(", "d", ",", "w", ",", "n", ")", ":", "t", "=", "len", "(", "d", ")", "h", "=", "t", "//", "n", "#must be floor!", "d", "=", "d", "*", "w", "return", "np", ".", "array", "(", "[", "sum", "(", "d", "[", "i", ":", "i", ...
24.111111
23.044444
def send(self, packet_buffer): """ send a buffer as a packet to the network interface :param packet_buffer: buffer to send (length shouldn't exceed MAX_INT) """ if self._handle is None: raise self.DeviceIsNotOpen() buffer_length = len(packet_buffer) buf_send = ctypes.cast(ctypes.create_string_buffer(packet_buffer, buffer_length), ctypes.POINTER(ctypes.c_ubyte)) wtypes.pcap_sendpacket(self._handle, buf_send, buffer_length)
[ "def", "send", "(", "self", ",", "packet_buffer", ")", ":", "if", "self", ".", "_handle", "is", "None", ":", "raise", "self", ".", "DeviceIsNotOpen", "(", ")", "buffer_length", "=", "len", "(", "packet_buffer", ")", "buf_send", "=", "ctypes", ".", "cast"...
47.454545
16
def calcFstats(predTst, yTest, p, axis=0): """calculate coefficient of determination. Assumes that axis=0 is time Parameters ---------- predTst : np.array, predicted reponse for yTest yTest : np.array, acxtually observed response for yTest p: float, number of predictors Returns ------- aryFunc : np.array R2 """ rss = np.sum((yTest - predTst) ** 2, axis=axis) tss = np.sum((yTest - yTest.mean()) ** 2, axis=axis) # derive number of measurements n = yTest.shape[0] # calculate Fvalues vecFvals = ((tss - rss)/p)/(rss/(n-p-1)) # calculate corresponding po values df1 = p - 1 df2 = n-1 vecPvals = stats.f.cdf(vecFvals, df1, df2) return vecFvals, vecPvals
[ "def", "calcFstats", "(", "predTst", ",", "yTest", ",", "p", ",", "axis", "=", "0", ")", ":", "rss", "=", "np", ".", "sum", "(", "(", "yTest", "-", "predTst", ")", "**", "2", ",", "axis", "=", "axis", ")", "tss", "=", "np", ".", "sum", "(", ...
30.32
15.88
def _format_num(self, value): """Return the number value for value, given this field's `num_type`.""" # (value is True or value is False) is ~5x faster than isinstance(value, bool) if value is True or value is False: raise TypeError('value must be a Number, not a boolean.') return self.num_type(value)
[ "def", "_format_num", "(", "self", ",", "value", ")", ":", "# (value is True or value is False) is ~5x faster than isinstance(value, bool)", "if", "value", "is", "True", "or", "value", "is", "False", ":", "raise", "TypeError", "(", "'value must be a Number, not a boolean.'"...
56.833333
15.666667
def add_cache(self, namespace, key, query_hash, length, cache): """Add cached values for the specified date range and query""" start = 0 bulk_insert = self.bulk_insert cache_len = len(cache) row = '(%s,%s,%s,%s,%s,%s)' query = 'INSERT INTO gauged_cache ' \ '(namespace, key, "hash", length, start, value) VALUES ' execute = self.cursor.execute query_hash = self.psycopg2.Binary(query_hash) while start < cache_len: rows = cache[start:start+bulk_insert] params = [] for timestamp, value in rows: params.extend((namespace, key, query_hash, length, timestamp, value)) insert = (row + ',') * (len(rows) - 1) + row execute(query + insert, params) start += bulk_insert self.db.commit()
[ "def", "add_cache", "(", "self", ",", "namespace", ",", "key", ",", "query_hash", ",", "length", ",", "cache", ")", ":", "start", "=", "0", "bulk_insert", "=", "self", ".", "bulk_insert", "cache_len", "=", "len", "(", "cache", ")", "row", "=", "'(%s,%s...
43.55
11.25
def set_background(self, image=None, path=None, resize=True): """ Set the background image of the Canvas. :param image: background image :type image: PhotoImage :param path: background image path :type path: str :param resize: whether to resize the image to the Canvas size :type resize: bool """ if not image and not path: raise ValueError("You must either pass a PhotoImage object or a path object") if image and path: raise ValueError("You must pass either a PhotoImage or str path, not both") if image is not None and not isinstance(image, tk.PhotoImage) and not isinstance(image, ImageTk.PhotoImage): raise ValueError("The image passed is not a PhotoImage object") if path is not None and not isinstance(path, str): raise ValueError("The image path passed is not of str type: {0}".format(path)) if path and not os.path.exists(path): raise ValueError("The image path passed is not valid: {0}".format(path)) if image is not None: self._image = image elif path is not None: img = Image.open(path) if resize: img = img.resize((self._canvaswidth, self._canvasheight), Image.ANTIALIAS) self._image = ImageTk.PhotoImage(img) self._background = self.canvas.create_image(0, 0, image=self._image, anchor=tk.NW, tag="background") self.canvas.tag_lower("background")
[ "def", "set_background", "(", "self", ",", "image", "=", "None", ",", "path", "=", "None", ",", "resize", "=", "True", ")", ":", "if", "not", "image", "and", "not", "path", ":", "raise", "ValueError", "(", "\"You must either pass a PhotoImage object or a path ...
50.233333
22.033333
def init_heat_consumer(self, mq): """ Init openstack heat mq 1. Check if enable listening heat notification 2. Create consumer :param mq: class ternya.mq.MQ """ if not self.enable_component_notification(Openstack.Heat): log.debug("disable listening heat notification") return for i in range(self.config.heat_mq_consumer_count): mq.create_consumer(self.config.heat_mq_exchange, self.config.heat_mq_queue, ProcessFactory.process(Openstack.Heat)) log.debug("enable listening openstack heat notification.")
[ "def", "init_heat_consumer", "(", "self", ",", "mq", ")", ":", "if", "not", "self", ".", "enable_component_notification", "(", "Openstack", ".", "Heat", ")", ":", "log", ".", "debug", "(", "\"disable listening heat notification\"", ")", "return", "for", "i", "...
34.631579
20.421053
def scale_and_crop(im, crop_spec): """ Scale and Crop. """ im = im.crop((crop_spec.x, crop_spec.y, crop_spec.x2, crop_spec.y2)) if crop_spec.width and crop_spec.height: im = im.resize((crop_spec.width, crop_spec.height), resample=Image.ANTIALIAS) return im
[ "def", "scale_and_crop", "(", "im", ",", "crop_spec", ")", ":", "im", "=", "im", ".", "crop", "(", "(", "crop_spec", ".", "x", ",", "crop_spec", ".", "y", ",", "crop_spec", ".", "x2", ",", "crop_spec", ".", "y2", ")", ")", "if", "crop_spec", ".", ...
27.181818
17.545455
def _create_from_owl(self): """ create a standard data object based on CSV file """ self.content['data'] = 'TODO - read OWL from ' + self.input_data lg.record_process('_create_from_owl', 'read ' + self._calc_size_stats() + ' from ' + self.input_data)
[ "def", "_create_from_owl", "(", "self", ")", ":", "self", ".", "content", "[", "'data'", "]", "=", "'TODO - read OWL from '", "+", "self", ".", "input_data", "lg", ".", "record_process", "(", "'_create_from_owl'", ",", "'read '", "+", "self", ".", "_calc_size_...
41.857143
23
def should_cache(self, request, response): """ Given the request and response should it be cached """ if not getattr(request, '_cache_update_cache', False): return False if not response.status_code in getattr(settings, 'BETTERCACHE_CACHEABLE_STATUS', CACHEABLE_STATUS): return False if getattr(settings, 'BETTERCACHE_ANONYMOUS_ONLY', False) and self.session_accessed and request.user.is_authenticated: return False if self.has_uncacheable_headers(response): return False return True
[ "def", "should_cache", "(", "self", ",", "request", ",", "response", ")", ":", "if", "not", "getattr", "(", "request", ",", "'_cache_update_cache'", ",", "False", ")", ":", "return", "False", "if", "not", "response", ".", "status_code", "in", "getattr", "(...
47.333333
26
def _walk(self): """Loop through all the instructions that are `_todo`.""" while self._todo: args = self._todo.pop(0) self._step(*args)
[ "def", "_walk", "(", "self", ")", ":", "while", "self", ".", "_todo", ":", "args", "=", "self", ".", "_todo", ".", "pop", "(", "0", ")", "self", ".", "_step", "(", "*", "args", ")" ]
34.2
10.8
def random_board(max_x, max_y, load_factor): """Return a random board with given max x and y coords.""" return dict(((randint(0, max_x), randint(0, max_y)), 0) for _ in xrange(int(max_x * max_y / load_factor)))
[ "def", "random_board", "(", "max_x", ",", "max_y", ",", "load_factor", ")", ":", "return", "dict", "(", "(", "(", "randint", "(", "0", ",", "max_x", ")", ",", "randint", "(", "0", ",", "max_y", ")", ")", ",", "0", ")", "for", "_", "in", "xrange",...
57.75
12.25
def _load_yaml_config(path=None): """Open and return the yaml contents.""" furious_yaml_path = path or find_furious_yaml() if furious_yaml_path is None: logging.debug("furious.yaml not found.") return None with open(furious_yaml_path) as yaml_file: return yaml_file.read()
[ "def", "_load_yaml_config", "(", "path", "=", "None", ")", ":", "furious_yaml_path", "=", "path", "or", "find_furious_yaml", "(", ")", "if", "furious_yaml_path", "is", "None", ":", "logging", ".", "debug", "(", "\"furious.yaml not found.\"", ")", "return", "None...
33.888889
12.111111
def modifier_list_id(self, modifier_list_id): """ Sets the modifier_list_id of this CatalogItemModifierListInfo. The ID of the [CatalogModifierList](#type-catalogmodifierlist) controlled by this [CatalogModifierListInfo](#type-catalogmodifierlistinfo). :param modifier_list_id: The modifier_list_id of this CatalogItemModifierListInfo. :type: str """ if modifier_list_id is None: raise ValueError("Invalid value for `modifier_list_id`, must not be `None`") if len(modifier_list_id) < 1: raise ValueError("Invalid value for `modifier_list_id`, length must be greater than or equal to `1`") self._modifier_list_id = modifier_list_id
[ "def", "modifier_list_id", "(", "self", ",", "modifier_list_id", ")", ":", "if", "modifier_list_id", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `modifier_list_id`, must not be `None`\"", ")", "if", "len", "(", "modifier_list_id", ")", "<", "1"...
47.666667
31.4
def create_variant(cls, variant, **kwargs): """Create Variant Create a new Variant This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_variant(variant, async=True) >>> result = thread.get() :param async bool :param Variant variant: Attributes of variant to create (required) :return: Variant If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_variant_with_http_info(variant, **kwargs) else: (data) = cls._create_variant_with_http_info(variant, **kwargs) return data
[ "def", "create_variant", "(", "cls", ",", "variant", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_create_variant_with_http_info",...
38.809524
18.333333
def call(self, args=None, kwargs=None, node=None, send_timeout=1000, recv_timeout=5000, zmq_ctx=None): """ Calls a service on a node with req as arguments. if node is None, a node is chosen by zmq. if zmq_ctx is passed, it will use the existing context Uses a REQ socket. Ref : http://api.zeromq.org/2-1:zmq-socket :param node : the node name """ context = zmq_ctx or zmq.Context() assert isinstance(context, zmq.Context) args = args or () assert isinstance(args, tuple) kwargs = kwargs or {} assert isinstance(kwargs, dict) socket = context.socket(zmq.REQ) # connect to all addresses ( optionally matching node name ) for n, a in [(n, a) for (n, a) in self.providers if (not node or n == node)]: socket.connect(a) # build message fullreq = ServiceRequest(service=self.name, args=pickle.dumps(args), kwargs=pickle.dumps(kwargs)) poller = zmq.Poller() poller.register(socket) # POLLIN for recv, POLLOUT for send evts = dict(poller.poll(send_timeout)) if socket in evts and evts[socket] == zmq.POLLOUT: socket.send(fullreq.serialize()) # TODO : find a way to get rid fo these timeouts when debugging # TODO : when timeout Exception should occur ( not returning None ) evts = dict(poller.poll(recv_timeout)) # blocking until answer if socket in evts and evts[socket] == zmq.POLLIN: resp = socket.recv() fullresp = ServiceResponse_dictparse(resp) if fullresp.has_field('response'): return pickle.loads(fullresp.response) elif fullresp.has_field('exception'): svcexc = fullresp.exception # This has already been parsed by ServiceResponse_dictparse tb = pickle.loads(svcexc.traceback) if Traceback and isinstance(tb, Traceback): reraise(pickle.loads(svcexc.exc_type), pickle.loads(svcexc.exc_value), tb.as_traceback()) else: # traceback not usable reraise(pickle.loads(svcexc.exc_type), pickle.loads(svcexc.exc_value), None) else: raise UnknownResponseTypeException("Unknown Response Type {0}".format(type(fullresp))) else: raise ServiceCallTimeout("Did not receive response through ZMQ socket.") else: raise ServiceCallTimeout("Can not send request through ZMQ socket.")
[ "def", "call", "(", "self", ",", "args", "=", "None", ",", "kwargs", "=", "None", ",", "node", "=", "None", ",", "send_timeout", "=", "1000", ",", "recv_timeout", "=", "5000", ",", "zmq_ctx", "=", "None", ")", ":", "context", "=", "zmq_ctx", "or", ...
47.425926
27.240741
def getpexts(lsp): ''' Get information from pext planes. This might or might not work, use with caution! Parameters: ----------- lsp : .lsp string Returns a list of dicts with information for all pext planes ''' lines=lsp.split('\n'); #unfortunately regex doesn't work here lns,planens = zip( *[ (i,int(re.search('^ *extract *([0-9]+)',line).group(1))) for i,line in enumerate(lines) if re.search('^ *extract *[0-9]+', line)]); if len(lns) == 0: return []; end = lns[-1]; for i,line in enumerate(lines[end+1:]): if re.match(' *\[',line): break; end += i; lineranges = zip(lns,(lns+(end,))[1:]); planes=dict() for (i,end),plane in zip(lineranges,planens): d=dict(); labels = [ 'species', 'direction', 'position',]; datarx = [ '^ *species *([0-9]+)', '^ *direction *([xXyYzZ])', '^ *at *(.*)',]; convs = [ lambda s: int(s), lambda i: i, lambda s: np.array( map(float,s.split(' '))), ]; for line in lines[i:end]: for label,rx,conv in zip(labels,datarx,convs): if re.match(rx,line): d[label]=conv(re.match(rx,line).group(1)); pass pass planes[plane] = d; return planes;
[ "def", "getpexts", "(", "lsp", ")", ":", "lines", "=", "lsp", ".", "split", "(", "'\\n'", ")", "#unfortunately regex doesn't work here", "lns", ",", "planens", "=", "zip", "(", "*", "[", "(", "i", ",", "int", "(", "re", ".", "search", "(", "'^ *extract...
28.06
18.34
def truncate_html(html, *args, **kwargs): """Truncates HTML string. :param html: The HTML string or parsed element tree (with :func:`html5lib.parse`). :param kwargs: Similar with :class:`.filters.TruncationFilter`. :return: The truncated HTML string. """ if hasattr(html, 'getchildren'): etree = html else: etree = html5lib.parse(html) walker = html5lib.getTreeWalker('etree') stream = walker(etree) stream = TruncationFilter(stream, *args, **kwargs) serializer = html5lib.serializer.HTMLSerializer() serialized = serializer.serialize(stream) return u''.join(serialized).strip()
[ "def", "truncate_html", "(", "html", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "hasattr", "(", "html", ",", "'getchildren'", ")", ":", "etree", "=", "html", "else", ":", "etree", "=", "html5lib", ".", "parse", "(", "html", ")", "wa...
28.130435
17.434783
def update(self, webhook_url=values.unset, friendly_name=values.unset, reachability_webhooks_enabled=values.unset, acl_enabled=values.unset): """ Update the ServiceInstance :param unicode webhook_url: A URL that will receive event updates when objects are manipulated. :param unicode friendly_name: Human-readable name for this service instance :param bool reachability_webhooks_enabled: True or false - controls whether this instance fires webhooks when client endpoints connect to Sync :param bool acl_enabled: true or false - determines whether token identities must be granted access to Sync objects via the Permissions API in this Service. :returns: Updated ServiceInstance :rtype: twilio.rest.sync.v1.service.ServiceInstance """ return self._proxy.update( webhook_url=webhook_url, friendly_name=friendly_name, reachability_webhooks_enabled=reachability_webhooks_enabled, acl_enabled=acl_enabled, )
[ "def", "update", "(", "self", ",", "webhook_url", "=", "values", ".", "unset", ",", "friendly_name", "=", "values", ".", "unset", ",", "reachability_webhooks_enabled", "=", "values", ".", "unset", ",", "acl_enabled", "=", "values", ".", "unset", ")", ":", ...
52.6
28.6
def _get_prereq_datasets(self, comp_id, prereq_nodes, keepables, skip=False): """Get a composite's prerequisites, generating them if needed. Args: comp_id (DatasetID): DatasetID for the composite whose prerequisites are being collected. prereq_nodes (sequence of Nodes): Prerequisites to collect keepables (set): `set` to update if any prerequisites can't be loaded at this time (see `_generate_composite`). skip (bool): If True, consider prerequisites as optional and only log when they are missing. If False, prerequisites are considered required and will raise an exception and log a warning if they can't be collected. Defaults to False. Raises: KeyError: If required (skip=False) prerequisite can't be collected. """ prereq_datasets = [] delayed_gen = False for prereq_node in prereq_nodes: prereq_id = prereq_node.name if prereq_id not in self.datasets and prereq_id not in keepables \ and not prereq_node.is_leaf: self._generate_composite(prereq_node, keepables) if prereq_id in self.datasets: prereq_datasets.append(self.datasets[prereq_id]) elif not prereq_node.is_leaf and prereq_id in keepables: delayed_gen = True continue elif not skip: LOG.debug("Missing prerequisite for '{}': '{}'".format(comp_id, prereq_id)) raise KeyError("Missing composite prerequisite") else: LOG.debug("Missing optional prerequisite for {}: {}".format(comp_id, prereq_id)) if delayed_gen: keepables.add(comp_id) keepables.update([x.name for x in prereq_nodes]) LOG.debug("Delaying generation of %s because of dependency's delayed generation: %s", comp_id, prereq_id) if not skip: LOG.debug("Missing prerequisite for '{}': '{}'".format(comp_id, prereq_id)) raise KeyError("Missing composite prerequisite") else: LOG.debug("Missing optional prerequisite for {}: {}".format(comp_id, prereq_id)) return prereq_datasets
[ "def", "_get_prereq_datasets", "(", "self", ",", "comp_id", ",", "prereq_nodes", ",", "keepables", ",", "skip", "=", "False", ")", ":", "prereq_datasets", "=", "[", "]", "delayed_gen", "=", "False", "for", "prereq_node", "in", "prereq_nodes", ":", "prereq_id",...
47.9
25.8
def _parse_plt_segment(self, fptr): """Parse the PLT segment. The packet headers are not parsed, i.e. they remain uninterpreted raw data buffers. Parameters ---------- fptr : file Open file object. Returns ------- PLTSegment The current PLT segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(3) length, zplt = struct.unpack('>HB', read_buffer) numbytes = length - 3 read_buffer = fptr.read(numbytes) iplt = np.frombuffer(read_buffer, dtype=np.uint8) packet_len = [] plen = 0 for byte in iplt: plen |= (byte & 0x7f) if byte & 0x80: # Continue by or-ing in the next byte. plen <<= 7 else: packet_len.append(plen) plen = 0 iplt = packet_len return PLTsegment(zplt, iplt, length, offset)
[ "def", "_parse_plt_segment", "(", "self", ",", "fptr", ")", ":", "offset", "=", "fptr", ".", "tell", "(", ")", "-", "2", "read_buffer", "=", "fptr", ".", "read", "(", "3", ")", "length", ",", "zplt", "=", "struct", ".", "unpack", "(", "'>HB'", ",",...
24.512821
19.589744
def tags(self): # type: () -> Set[str] """ Tags applied to operation. """ tags = set() if self._tags: tags.update(self._tags) if self.binding: binding_tags = getattr(self.binding, 'tags', None) if binding_tags: tags.update(binding_tags) return tags
[ "def", "tags", "(", "self", ")", ":", "# type: () -> Set[str]", "tags", "=", "set", "(", ")", "if", "self", ".", "_tags", ":", "tags", ".", "update", "(", "self", ".", "_tags", ")", "if", "self", ".", "binding", ":", "binding_tags", "=", "getattr", "...
27.076923
12
def iterpackages(self): """ Return an iterator over all the packages in the PackageStore. """ pkgdir = os.path.join(self._path, self.PKG_DIR) if not os.path.isdir(pkgdir): return for team in sub_dirs(pkgdir): for user in sub_dirs(self.team_path(team)): for pkg in sub_dirs(self.user_path(team, user)): pkgpath = self.package_path(team, user, pkg) for hsh in sub_files(os.path.join(pkgpath, PackageStore.CONTENTS_DIR)): yield self.get_package(team, user, pkg, pkghash=hsh)
[ "def", "iterpackages", "(", "self", ")", ":", "pkgdir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_path", ",", "self", ".", "PKG_DIR", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "pkgdir", ")", ":", "return", "for", "tea...
47
18.384615
def values(self): """Gets the parameter values :returns: dict of inputs: | *'nfft'*: int -- length, in samples, of FFT chunks | *'window'*: str -- name of window to apply to FFT chunks | *'overlap'*: float -- percent overlap of windows """ self.vals['nfft'] = self.ui.nfftSpnbx.value() self.vals['window'] = str(self.ui.windowCmbx.currentText()).lower() self.vals['overlap'] = self.ui.overlapSpnbx.value() return self.vals
[ "def", "values", "(", "self", ")", ":", "self", ".", "vals", "[", "'nfft'", "]", "=", "self", ".", "ui", ".", "nfftSpnbx", ".", "value", "(", ")", "self", ".", "vals", "[", "'window'", "]", "=", "str", "(", "self", ".", "ui", ".", "windowCmbx", ...
42.833333
19.916667
def update(self, x_list=list(), y_list=list()): """ update interpolation data :param list(float) x_list: x values :param list(float) y_list: y values """ if not y_list: for x in x_list: if x in self.x_list: i = self.x_list.index(float(x)) self.x_list.pop(i) self.y_list.pop(i) else: x_list = map(float, x_list) y_list = map(float, y_list) data = [(x, y) for x, y in zip(self.x_list, self.y_list) if x not in x_list] data.extend(zip(x_list, y_list)) data = sorted(data) self.x_list = [float(x) for (x, y) in data] self.y_list = [float(y) for (x, y) in data]
[ "def", "update", "(", "self", ",", "x_list", "=", "list", "(", ")", ",", "y_list", "=", "list", "(", ")", ")", ":", "if", "not", "y_list", ":", "for", "x", "in", "x_list", ":", "if", "x", "in", "self", ".", "x_list", ":", "i", "=", "self", "....
38.25
9.45
def fpsInformation(self,args): '''fps command''' invalidStr = 'Invalid number of arguments. Usage horizon-fps set <fps> or horizon-fps get. Set fps to zero to get unrestricted framerate.' if len(args)>0: if args[0] == "get": '''Get the current framerate.''' if (self.fps == 0.0): print('Horizon Framerate: Unrestricted') else: print("Horizon Framerate: " + str(self.fps)) elif args[0] == "set": if len(args)==2: self.fps = float(args[1]) if (self.fps != 0): self.sendDelay = 1.0/self.fps else: self.sendDelay = 0.0 self.msgList.append(FPS(self.fps)) if (self.fps == 0.0): print('Horizon Framerate: Unrestricted') else: print("Horizon Framerate: " + str(self.fps)) else: print(invalidStr) else: print(invalidStr) else: print(invalidStr)
[ "def", "fpsInformation", "(", "self", ",", "args", ")", ":", "invalidStr", "=", "'Invalid number of arguments. Usage horizon-fps set <fps> or horizon-fps get. Set fps to zero to get unrestricted framerate.'", "if", "len", "(", "args", ")", ">", "0", ":", "if", "args", "[", ...
41.428571
16.214286
def __response(self,stanza): """Handle successful disco response. :Parameters: - `stanza`: the stanza received. :Types: - `stanza`: `pyxmpp.stanza.Stanza`""" try: d=self.disco_class(stanza.get_query()) self.got_it(d) except ValueError,e: self.error(e)
[ "def", "__response", "(", "self", ",", "stanza", ")", ":", "try", ":", "d", "=", "self", ".", "disco_class", "(", "stanza", ".", "get_query", "(", ")", ")", "self", ".", "got_it", "(", "d", ")", "except", "ValueError", ",", "e", ":", "self", ".", ...
28.416667
15
def _reap_payloads(self): """Clean up all finished payloads""" for thread in self._threads.copy(): # CapturingThread.join will throw if thread.join(timeout=0): self._threads.remove(thread) self._logger.debug('reaped thread %s', thread)
[ "def", "_reap_payloads", "(", "self", ")", ":", "for", "thread", "in", "self", ".", "_threads", ".", "copy", "(", ")", ":", "# CapturingThread.join will throw", "if", "thread", ".", "join", "(", "timeout", "=", "0", ")", ":", "self", ".", "_threads", "."...
43
7.285714
def _ingest_source(self, source, ps, force=None): """Ingest a single source""" from ambry.bundle.process import call_interval try: from ambry.orm.exc import NotFoundError if not source.is_partition and source.datafile.exists: if not source.datafile.is_finalized: source.datafile.remove() elif force: source.datafile.remove() else: ps.update( message='Source {} already ingested, skipping'.format(source.name), state='skipped') return True if source.is_partition: # Check if the partition exists try: self.library.partition(source.ref) except NotFoundError: # Maybe it is an internal reference, in which case we can just delay # until the partition is built ps.update(message="Not Ingesting {}: referenced partition '{}' does not exist" .format(source.name, source.ref), state='skipped') return True source.state = source.STATES.INGESTING iterable_source, source_pipe = self.source_pipe(source, ps) if not source.is_ingestible: ps.update(message='Not an ingestiable source: {}'.format(source.name), state='skipped', source=source) source.state = source.STATES.NOTINGESTABLE return True ps.update('Ingesting {} from {}'.format(source.spec.name, source.url or source.generator), item_type='rows', item_count=0) @call_interval(5) def ingest_progress_f(i): (desc, n_records, total, rate) = source.datafile.report_progress() ps.update( message='Ingesting {}: rate: {}'.format(source.spec.name, rate), item_count=n_records) source.datafile.load_rows(iterable_source, callback=ingest_progress_f, limit=500 if self.limited_run else None, intuit_type=True, run_stats=False) if source.datafile.meta['warnings']: for w in source.datafile.meta['warnings']: self.error("Ingestion error: {}".format(w)) ps.update(message='Ingested to {}'.format(source.datafile.syspath)) ps.update(message='Updating tables and specs for {}'.format(source.name)) # source.update_table() # Generate the source tables. source.update_spec() # Update header_lines, start_line, etc. if self.limited_run: source.end_line = None # Otherwize, it will be 500 self.build_source_files.sources.objects_to_record() ps.update(message='Ingested {}'.format(source.datafile.path), state='done') source.state = source.STATES.INGESTED self.commit() return True except Exception as e: import traceback from ambry.util import qualified_class_name ps.update( message='Source {} failed with exception: {}'.format(source.name, e), exception_class=qualified_class_name(e), exception_trace=str(traceback.format_exc()), state='error' ) source.state = source.STATES.INGESTING + '_error' self.commit() return False
[ "def", "_ingest_source", "(", "self", ",", "source", ",", "ps", ",", "force", "=", "None", ")", ":", "from", "ambry", ".", "bundle", ".", "process", "import", "call_interval", "try", ":", "from", "ambry", ".", "orm", ".", "exc", "import", "NotFoundError"...
38.569892
25.83871
def encode(self, x): """ Encode given input. """ if not self.encoding_network: self.encoding_network = NeuralNetwork(self.input_dim, self.input_tensor) self.encoding_network.input_variables = self.input_variables for layer in self.encoding_layes: self.encoding_network.stack_layer(layer, no_setup=True) return self.encoding_network.compute(*x)
[ "def", "encode", "(", "self", ",", "x", ")", ":", "if", "not", "self", ".", "encoding_network", ":", "self", ".", "encoding_network", "=", "NeuralNetwork", "(", "self", ".", "input_dim", ",", "self", ".", "input_tensor", ")", "self", ".", "encoding_network...
42.6
15.6
def checktype(self, elt, ps): '''See if the type of the "elt" element is what we're looking for. Return the element's type. Parameters: elt -- the DOM element being parsed ps -- the ParsedSoap object. ''' typeName = _find_type(elt) if typeName is None or typeName == "": return (None,None) # Parse the QNAME. prefix,typeName = SplitQName(typeName) uri = ps.GetElementNSdict(elt).get(prefix) if uri is None: raise EvaluateException('Malformed type attribute (bad NS)', ps.Backtrace(elt)) #typeName = list[1] parselist,errorlist = self.get_parse_and_errorlist() if not parselist or \ (uri,typeName) in parselist or \ (_is_xsd_or_soap_ns(uri) and (None,typeName) in parselist): return (uri,typeName) raise EvaluateException( 'Type mismatch (%s namespace) (got %s wanted %s)' % \ (uri, typeName, errorlist), ps.Backtrace(elt))
[ "def", "checktype", "(", "self", ",", "elt", ",", "ps", ")", ":", "typeName", "=", "_find_type", "(", "elt", ")", "if", "typeName", "is", "None", "or", "typeName", "==", "\"\"", ":", "return", "(", "None", ",", "None", ")", "# Parse the QNAME.", "prefi...
38.444444
15.851852
def account_lists(self, id): """ Get all of the logged-in users lists which the specified user is a member of. Returns a list of `list dicts`_. """ id = self.__unpack_id(id) params = self.__generate_params(locals(), ['id']) url = '/api/v1/accounts/{0}/lists'.format(str(id)) return self.__api_request('GET', url, params)
[ "def", "account_lists", "(", "self", ",", "id", ")", ":", "id", "=", "self", ".", "__unpack_id", "(", "id", ")", "params", "=", "self", ".", "__generate_params", "(", "locals", "(", ")", ",", "[", "'id'", "]", ")", "url", "=", "'/api/v1/accounts/{0}/li...
35.545455
13.727273
def iter_referents_tuples(self): """ Generates target sets (as tuples of indicies) that are compatible with the current beliefstate.""" tlow, thigh = self['targetset_arity'].get_tuple() clow, chigh = self['contrast_arity'].get_tuple() singletons = list([int(i) for i,_ in self.iter_singleton_referents()]) t = len(singletons) low = int(max(1, tlow)) high = int(min([t, thigh])) for elements in itertools.chain.from_iterable(itertools.combinations(singletons, r) \ for r in reversed(xrange(low, high+1))): if clow <= t-len(elements) <= chigh: yield elements
[ "def", "iter_referents_tuples", "(", "self", ")", ":", "tlow", ",", "thigh", "=", "self", "[", "'targetset_arity'", "]", ".", "get_tuple", "(", ")", "clow", ",", "chigh", "=", "self", "[", "'contrast_arity'", "]", ".", "get_tuple", "(", ")", "singletons", ...
50.923077
14.692308
def tfds_dir(): """Path to tensorflow_datasets directory.""" return os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
[ "def", "tfds_dir", "(", ")", ":", "return", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", ")" ]
43
17.666667
def make_purge(parser): """ Remove Ceph packages from remote hosts and purge all data. """ parser.add_argument( 'host', metavar='HOST', nargs='+', help='hosts to purge Ceph from', ) parser.set_defaults( func=purge, )
[ "def", "make_purge", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "'host'", ",", "metavar", "=", "'HOST'", ",", "nargs", "=", "'+'", ",", "help", "=", "'hosts to purge Ceph from'", ",", ")", "parser", ".", "set_defaults", "(", "func", "=", ...
21.538462
16.769231
def parse_cmdln_opts(parser, cmdln_args): """Rather than have this all clutter main(), let's split this out. Clean arch decision: rather than parsing sys.argv directly, pass sys.argv[1:] to this function (or any iterable for testing.) """ parser.set_defaults( hosts=[], cert=None, log_level=logging.INFO, output_dir=None, output_file=None, formats=[], includes=[], excludes=[], nsscmd=None, tokenfile=None, noncefile=None, cachedir=None, ) parser.add_option( "-H", "--host", dest="hosts", action="append", help="format[:format]:hostname[:port]") parser.add_option("-c", "--server-cert", dest="cert") parser.add_option("-t", "--token-file", dest="tokenfile", help="file where token is stored") parser.add_option("-n", "--nonce-file", dest="noncefile", help="file where nonce is stored") parser.add_option("-d", "--output-dir", dest="output_dir", help="output directory; if not set then files are " "replaced with signed copies") parser.add_option("-o", "--output-file", dest="output_file", help="output file; if not set then files are replaced with signed " "copies. This can only be used when signing a single file") parser.add_option("-f", "--formats", dest="formats", action="append", help="signing formats (one or more of %s)" % ", ".join(ALLOWED_FORMATS)) parser.add_option("-q", "--quiet", dest="log_level", action="store_const", const=logging.WARN) parser.add_option( "-v", "--verbose", dest="log_level", action="store_const", const=logging.DEBUG) parser.add_option("-i", "--include", dest="includes", action="append", help="add to include patterns") parser.add_option("-x", "--exclude", dest="excludes", action="append", help="add to exclude patterns") parser.add_option("--nsscmd", dest="nsscmd", help="command to re-sign nss libraries, if required") parser.add_option("--cachedir", dest="cachedir", help="local cache directory") # TODO: Concurrency? # TODO: Different certs per server? options, args = parser.parse_args(cmdln_args) if not options.hosts: parser.error("at least one host is required") if not options.cert: parser.error("certificate is required") if not os.path.exists(options.cert): parser.error("certificate not found") if not options.tokenfile: parser.error("token file is required") if not options.noncefile: parser.error("nonce file is required") # Covert nsscmd to win32 path if required if sys.platform == 'win32' and options.nsscmd: nsscmd = options.nsscmd.strip() if nsscmd.startswith("/"): drive = nsscmd[1] options.nsscmd = "%s:%s" % (drive, nsscmd[2:]) # Handle format formats = [] for fmt in options.formats: if "," in fmt: for fmt in fmt.split(","): if fmt not in ALLOWED_FORMATS: parser.error("invalid format: %s" % fmt) formats.append(fmt) elif fmt not in ALLOWED_FORMATS: parser.error("invalid format: %s" % fmt) else: formats.append(fmt) # bug 1382882, 1164456 # Widevine and GPG signing must happen last because they will be invalid if # done prior to any format that modifies the file in-place. for fmt in ("widevine", "widevine_blessed", "gpg"): if fmt in formats: formats.remove(fmt) formats.append(fmt) if options.output_file and (len(args) > 1 or os.path.isdir(args[0])): parser.error( "-o / --output-file can only be used when signing a single file") if options.output_dir: if os.path.exists(options.output_dir): if not os.path.isdir(options.output_dir): parser.error( "output_dir (%s) must be a directory", options.output_dir) else: os.makedirs(options.output_dir) if not options.includes: # Do everything! options.includes.append("*") if not formats: parser.error("no formats specified") options.formats = formats format_urls = defaultdict(list) for h in options.hosts: # The last two parts of a host is the actual hostname:port. Any parts # before that are formats - there could be 0..n formats so this is # tricky to split. parts = h.split(":") h = parts[-2:] fmts = parts[:-2] # If no formats are specified, the host is assumed to support all of them. if not fmts: fmts = formats for f in fmts: format_urls[f].append("https://%s" % ":".join(h)) options.format_urls = format_urls missing_fmt_hosts = set(formats) - set(format_urls.keys()) if missing_fmt_hosts: parser.error("no hosts capable of signing formats: %s" % " ".join(missing_fmt_hosts)) return options, args
[ "def", "parse_cmdln_opts", "(", "parser", ",", "cmdln_args", ")", ":", "parser", ".", "set_defaults", "(", "hosts", "=", "[", "]", ",", "cert", "=", "None", ",", "log_level", "=", "logging", ".", "INFO", ",", "output_dir", "=", "None", ",", "output_file"...
37.108696
20.666667
def rpr(s): """Create a representation of a Unicode string that can be used in both Python 2 and Python 3k, allowing for use of the u() function""" if s is None: return 'None' seen_unicode = False results = [] for cc in s: ccn = ord(cc) if ccn >= 32 and ccn < 127: if cc == "'": # escape single quote results.append('\\') results.append(cc) elif cc == "\\": # escape backslash results.append('\\') results.append(cc) else: results.append(cc) else: seen_unicode = True if ccn <= 0xFFFF: results.append('\\u') results.append("%04x" % ccn) else: # pragma no cover results.append('\\U') results.append("%08x" % ccn) result = "'" + "".join(results) + "'" if seen_unicode: return "u(" + result + ")" else: return result
[ "def", "rpr", "(", "s", ")", ":", "if", "s", "is", "None", ":", "return", "'None'", "seen_unicode", "=", "False", "results", "=", "[", "]", "for", "cc", "in", "s", ":", "ccn", "=", "ord", "(", "cc", ")", "if", "ccn", ">=", "32", "and", "ccn", ...
31.806452
11.806452
def element_background_color_should_be(self, locator, expected): """Verifies the element identified by `locator` has the expected background color (it verifies the CSS attribute background-color). Color should be in RGBA format. Example of rgba format: rgba(RED, GREEN, BLUE, ALPHA) | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id | | expected | expected color | rgba(0, 128, 0, 1) |""" self._info("Verifying element '%s' has background color '%s'" % (locator, expected)) self._check_element_css_value(locator, 'background-color', expected)
[ "def", "element_background_color_should_be", "(", "self", ",", "locator", ",", "expected", ")", ":", "self", ".", "_info", "(", "\"Verifying element '%s' has background color '%s'\"", "%", "(", "locator", ",", "expected", ")", ")", "self", ".", "_check_element_css_val...
45.692308
24.076923
def parse_division(l, c, line, root_node, last_section_node): """ Extracts a division node from a line :param l: The line number (starting from 0) :param c: The column number :param line: The line string (without indentation) :param root_node: The document root node. :return: tuple(last_div_node, last_section_node) """ name = line name = name.replace(".", "") # trim whitespaces/tabs between XXX and DIVISION tokens = [t for t in name.split(' ') if t] node = Name(Name.Type.Division, l, c, '%s %s' % (tokens[0], tokens[1])) root_node.add_child(node) last_div_node = node # do not take previous sections into account if last_section_node: last_section_node.end_line = l last_section_node = None return last_div_node, last_section_node
[ "def", "parse_division", "(", "l", ",", "c", ",", "line", ",", "root_node", ",", "last_section_node", ")", ":", "name", "=", "line", "name", "=", "name", ".", "replace", "(", "\".\"", ",", "\"\"", ")", "# trim whitespaces/tabs between XXX and DIVISION", "token...
30.730769
16.192308
def setAll(self, pairs): """ Set multiple parameters, passed as a list of key-value pairs. :param pairs: list of key-value pairs to set """ for (k, v) in pairs: self.set(k, v) return self
[ "def", "setAll", "(", "self", ",", "pairs", ")", ":", "for", "(", "k", ",", "v", ")", "in", "pairs", ":", "self", ".", "set", "(", "k", ",", "v", ")", "return", "self" ]
26.666667
16
def set_payload(self, payload): """Set stanza payload to a single item. All current stanza content of will be dropped. Marks the stanza dirty. :Parameters: - `payload`: XML element or stanza payload object to use :Types: - `payload`: :etree:`ElementTree.Element` or `StanzaPayload` """ if isinstance(payload, ElementClass): self._payload = [ XMLPayload(payload) ] elif isinstance(payload, StanzaPayload): self._payload = [ payload ] else: raise TypeError("Bad payload type") self._dirty = True
[ "def", "set_payload", "(", "self", ",", "payload", ")", ":", "if", "isinstance", "(", "payload", ",", "ElementClass", ")", ":", "self", ".", "_payload", "=", "[", "XMLPayload", "(", "payload", ")", "]", "elif", "isinstance", "(", "payload", ",", "StanzaP...
34.333333
16.111111
def generate(self): ''' Generate noise samples. Returns: `np.ndarray` of samples. ''' sampled_arr = np.zeros((self.__batch_size, self.__channel, self.__seq_len, self.__dim)) for batch in range(self.__batch_size): for i in range(len(self.__program_list)): program_key = self.__program_list[i] key = np.random.randint(low=0, high=len(self.__midi_df_list)) midi_df = self.__midi_df_list[key] midi_df = midi_df[midi_df.program == program_key] if midi_df.shape[0] < self.__seq_len: continue row = np.random.uniform( low=midi_df.start.min(), high=midi_df.end.max() - (self.__seq_len * self.__time_fraction) ) for seq in range(self.__seq_len): start = row + (seq * self.__time_fraction) end = row + ((seq+1) * self.__time_fraction) df = midi_df[(start <= midi_df.start) & (midi_df.start <= end)] sampled_arr[batch, i, seq] = self.__convert_into_feature(df) return sampled_arr
[ "def", "generate", "(", "self", ")", ":", "sampled_arr", "=", "np", ".", "zeros", "(", "(", "self", ".", "__batch_size", ",", "self", ".", "__channel", ",", "self", ".", "__seq_len", ",", "self", ".", "__dim", ")", ")", "for", "batch", "in", "range",...
42.37931
23.413793
def output(output_id, name, value_class=NumberValue): """Add output to controller""" def _init(): return value_class( name, input_id=output_id, is_input=False, index=-1 ) def _decorator(cls): setattr(cls, output_id, _init()) return cls return _decorator
[ "def", "output", "(", "output_id", ",", "name", ",", "value_class", "=", "NumberValue", ")", ":", "def", "_init", "(", ")", ":", "return", "value_class", "(", "name", ",", "input_id", "=", "output_id", ",", "is_input", "=", "False", ",", "index", "=", ...
29.615385
12.846154
def _add_hypotheses_assuming_new_stroke(self, new_stroke, stroke_nr, new_beam): """ Get new guesses by assuming new_stroke is a new symbol. Parameters ---------- new_stroke : list of dicts A list of dicts [{'x': 12, 'y': 34, 'time': 56}, ...] which represent a point. stroke_nr : int Number of the stroke for segmentation new_beam : beam object """ guesses = single_clf.predict({'data': [new_stroke], 'id': None})[:self.m] for hyp in self.hypotheses: new_geometry = deepcopy(hyp['geometry']) most_right = new_geometry if len(hyp['symbols']) == 0: while 'right' in most_right: most_right = most_right['right'] most_right['right'] = {'symbol_index': len(hyp['symbols']), 'right': None} else: most_right = {'symbol_index': len(hyp['symbols']), 'right': None} for guess in guesses: sym = {'symbol': guess['semantics'], 'probability': guess['probability']} new_seg = deepcopy(hyp['segmentation']) new_seg.append([stroke_nr]) new_sym = deepcopy(hyp['symbols']) new_sym.append(sym) b = {'segmentation': new_seg, 'symbols': new_sym, 'geometry': new_geometry, 'probability': None } # spacial_rels = [] # TODO # for s1_indices, s2_indices in zip(b['segmentation'], # b['segmentation'][1:]): # tmp = [new_beam.history['data'][el] for el in s1_indices] # s1 = HandwrittenData(json.dumps(tmp)) # tmp = [new_beam.history['data'][el] for el in s2_indices] # s2 = HandwrittenData(json.dumps(tmp)) # rel = spacial_relationship.estimate(s1, s2) # spacial_rels.append(rel) # b['geometry'] = spacial_rels new_beam.hypotheses.append(b)
[ "def", "_add_hypotheses_assuming_new_stroke", "(", "self", ",", "new_stroke", ",", "stroke_nr", ",", "new_beam", ")", ":", "guesses", "=", "single_clf", ".", "predict", "(", "{", "'data'", ":", "[", "new_stroke", "]", ",", "'id'", ":", "None", "}", ")", "[...
45
14.698113
def restart_with_reloader(): """Create a new process and a subprocess in it with the same arguments as this one. """ cwd = os.getcwd() args = _get_args_for_reloading() new_environ = os.environ.copy() new_environ["SANIC_SERVER_RUNNING"] = "true" cmd = " ".join(args) worker_process = Process( target=subprocess.call, args=(cmd,), kwargs={"cwd": cwd, "shell": True, "env": new_environ}, ) worker_process.start() return worker_process
[ "def", "restart_with_reloader", "(", ")", ":", "cwd", "=", "os", ".", "getcwd", "(", ")", "args", "=", "_get_args_for_reloading", "(", ")", "new_environ", "=", "os", ".", "environ", ".", "copy", "(", ")", "new_environ", "[", "\"SANIC_SERVER_RUNNING\"", "]", ...
30.5
13.625
def _printM(self, messages): """print a list of strings - for the mom used only by stats printout""" if len(messages) == 2: print(Style.BRIGHT + messages[0] + Style.RESET_ALL + Fore.BLUE + messages[1] + Style.RESET_ALL) else: print("Not implemented")
[ "def", "_printM", "(", "self", ",", "messages", ")", ":", "if", "len", "(", "messages", ")", "==", "2", ":", "print", "(", "Style", ".", "BRIGHT", "+", "messages", "[", "0", "]", "+", "Style", ".", "RESET_ALL", "+", "Fore", ".", "BLUE", "+", "mes...
44.285714
13.857143
def getIndices(self): """Returns a generator function over all the existing indexes @returns A generator function over all rhe Index objects""" for indexName in self.neograph.nodes.indexes.keys(): indexObject = self.neograph.nodes.indexes.get(indexName) yield Index(indexName, "vertex", "manual", indexObject) for indexName in self.neograph.relationships.indexes.keys(): indexObject = self.neograph.relationships.indexes.get(indexName) yield Index(indexName, "edge", "manual", indexObject)
[ "def", "getIndices", "(", "self", ")", ":", "for", "indexName", "in", "self", ".", "neograph", ".", "nodes", ".", "indexes", ".", "keys", "(", ")", ":", "indexObject", "=", "self", ".", "neograph", ".", "nodes", ".", "indexes", ".", "get", "(", "inde...
56.1
22.3
def ensure_utf8(image_tag): """wrapper for ensuring image_tag returns utf8-encoded str on Python 2""" if py3compat.PY3: # nothing to do on Python 3 return image_tag def utf8_image_tag(*args, **kwargs): s = image_tag(*args, **kwargs) if isinstance(s, unicode): s = s.encode('utf8') return s return utf8_image_tag
[ "def", "ensure_utf8", "(", "image_tag", ")", ":", "if", "py3compat", ".", "PY3", ":", "# nothing to do on Python 3", "return", "image_tag", "def", "utf8_image_tag", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "s", "=", "image_tag", "(", "*", "args...
31.083333
12
def route(self, path=None, method='GET', callback=None, name=None, apply=None, skip=None, **config): """ A decorator to bind a function to a request URL. Example:: @app.route('/hello/:name') def hello(name): return 'Hello %s' % name The ``:name`` part is a wildcard. See :class:`Router` for syntax details. :param path: Request path or a list of paths to listen to. If no path is specified, it is automatically generated from the signature of the function. :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of methods to listen to. (default: `GET`) :param callback: An optional shortcut to avoid the decorator syntax. ``route(..., callback=func)`` equals ``route(...)(func)`` :param name: The name for this route. (default: None) :param apply: A decorator or plugin or a list of plugins. These are applied to the route callback in addition to installed plugins. :param skip: A list of plugins, plugin classes or names. Matching plugins are not installed to this route. ``True`` skips all. Any additional keyword arguments are stored as route-specific configuration and passed to plugins (see :meth:`Plugin.apply`). """ if callable(path): path, callback = None, path plugins = makelist(apply) skiplist = makelist(skip) def decorator(callback): # TODO: Documentation and tests if isinstance(callback, basestring): callback = load(callback) for rule in makelist(path) or yieldroutes(callback): for verb in makelist(method): verb = verb.upper() route = Route(self, rule, verb, callback, name=name, plugins=plugins, skiplist=skiplist, **config) self.routes.append(route) self.router.add(rule, verb, route, name=name) if DEBUG: route.prepare() return callback return decorator(callback) if callback else decorator
[ "def", "route", "(", "self", ",", "path", "=", "None", ",", "method", "=", "'GET'", ",", "callback", "=", "None", ",", "name", "=", "None", ",", "apply", "=", "None", ",", "skip", "=", "None", ",", "*", "*", "config", ")", ":", "if", "callable", ...
51.348837
21.744186
def submit_recording(raw_data_json): """Submit a recording to the database on write-math.com. Parameters ---------- raw_data_json : str Raw data in JSON format Raises ------ requests.exceptions.ConnectionError If the internet connection is lost. """ url = "http://www.martin-thoma.de/write-math/classify/index.php" headers = {'User-Agent': 'Mozilla/5.0', 'Content-Type': 'application/x-www-form-urlencoded'} payload = {'drawnJSON': raw_data_json} s = requests.Session() req = requests.Request('POST', url, headers=headers, data=payload) prepared = req.prepare() s.send(prepared)
[ "def", "submit_recording", "(", "raw_data_json", ")", ":", "url", "=", "\"http://www.martin-thoma.de/write-math/classify/index.php\"", "headers", "=", "{", "'User-Agent'", ":", "'Mozilla/5.0'", ",", "'Content-Type'", ":", "'application/x-www-form-urlencoded'", "}", "payload",...
29.590909
18.272727
def format(self, record): ''' Format the log record to include exc_info if the handler is enabled for a specific log level ''' formatted_record = super(ExcInfoOnLogLevelFormatMixIn, self).format(record) exc_info_on_loglevel = getattr(record, 'exc_info_on_loglevel', None) exc_info_on_loglevel_formatted = getattr(record, 'exc_info_on_loglevel_formatted', None) if exc_info_on_loglevel is None and exc_info_on_loglevel_formatted is None: return formatted_record # If we reached this far it means the log record was created with exc_info_on_loglevel # If this specific handler is enabled for that record, then we should format it to # include the exc_info details if self.level > exc_info_on_loglevel: # This handler is not enabled for the desired exc_info_on_loglevel, don't include exc_info return formatted_record # If we reached this far it means we should include exc_info if not record.exc_info_on_loglevel_instance and not exc_info_on_loglevel_formatted: # This should actually never occur return formatted_record if record.exc_info_on_loglevel_formatted is None: # Let's cache the formatted exception to avoid recurring conversions and formatting calls if self.formatter is None: # pylint: disable=access-member-before-definition self.formatter = logging._defaultFormatter record.exc_info_on_loglevel_formatted = self.formatter.formatException( record.exc_info_on_loglevel_instance ) # Let's format the record to include exc_info just like python's logging formatted does if formatted_record[-1:] != '\n': formatted_record += '\n' try: formatted_record += record.exc_info_on_loglevel_formatted except UnicodeError: # According to the standard library logging formatter comments: # # Sometimes filenames have non-ASCII chars, which can lead # to errors when s is Unicode and record.exc_text is str # See issue 8924. # We also use replace for when there are multiple # encodings, e.g. UTF-8 for the filesystem and latin-1 # for a script. See issue 13232. formatted_record += record.exc_info_on_loglevel_formatted.decode(sys.getfilesystemencoding(), 'replace') # Reset the record.exc_info_on_loglevel_instance because it might need # to "travel" through a multiprocessing process and it might contain # data which is not pickle'able record.exc_info_on_loglevel_instance = None return formatted_record
[ "def", "format", "(", "self", ",", "record", ")", ":", "formatted_record", "=", "super", "(", "ExcInfoOnLogLevelFormatMixIn", ",", "self", ")", ".", "format", "(", "record", ")", "exc_info_on_loglevel", "=", "getattr", "(", "record", ",", "'exc_info_on_loglevel'...
54.134615
29.711538
def dumps(self, cnf, **kwargs): """ Dump config 'cnf' to a string. :param cnf: Configuration data to dump :param kwargs: optional keyword parameters to be sanitized :: dict :return: string represents the configuration """ kwargs = anyconfig.utils.filter_options(self._dump_opts, kwargs) return self.dump_to_string(cnf, **kwargs)
[ "def", "dumps", "(", "self", ",", "cnf", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "anyconfig", ".", "utils", ".", "filter_options", "(", "self", ".", "_dump_opts", ",", "kwargs", ")", "return", "self", ".", "dump_to_string", "(", "cnf", ",", ...
34.909091
16.727273
def _run_server(self): """ 启动 HTTP Server """ try: if __conf__.DEBUG: self._webapp.listen(self._port) else: server = HTTPServer(self._webapp) server.bind(self._port) server.start(0) IOLoop.current().start() except KeyboardInterrupt: print ("exit ...")
[ "def", "_run_server", "(", "self", ")", ":", "try", ":", "if", "__conf__", ".", "DEBUG", ":", "self", ".", "_webapp", ".", "listen", "(", "self", ".", "_port", ")", "else", ":", "server", "=", "HTTPServer", "(", "self", ".", "_webapp", ")", "server",...
27.2
11.466667
def get_ddo(self, ont_id: str) -> dict: """ This interface is used to get a DDO object in the from of dict. :param ont_id: the unique ID for identity. :return: a description object of ONT ID in the from of dict. """ args = dict(ontid=ont_id.encode('utf-8')) invoke_code = build_vm.build_native_invoke_code(self.__contract_address, self.__version, 'getDDO', args) unix_time_now = int(time()) tx = Transaction(0, 0xd1, unix_time_now, 0, 0, None, invoke_code, bytearray(), []) response = self.__sdk.rpc.send_raw_transaction_pre_exec(tx) ddo = OntId.parse_ddo(ont_id, response['Result']) return ddo
[ "def", "get_ddo", "(", "self", ",", "ont_id", ":", "str", ")", "->", "dict", ":", "args", "=", "dict", "(", "ontid", "=", "ont_id", ".", "encode", "(", "'utf-8'", ")", ")", "invoke_code", "=", "build_vm", ".", "build_native_invoke_code", "(", "self", "...
48.428571
22.285714
def xception_exit(inputs): """Xception exit flow.""" with tf.variable_scope("xception_exit"): x = inputs x_shape = x.get_shape().as_list() if x_shape[1] is None or x_shape[2] is None: length_float = tf.to_float(tf.shape(x)[1]) length_float *= tf.to_float(tf.shape(x)[2]) spatial_dim_float = tf.sqrt(length_float) spatial_dim = tf.to_int32(spatial_dim_float) x_depth = x_shape[3] x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth]) elif x_shape[1] != x_shape[2]: spatial_dim = int(math.sqrt(float(x_shape[1] * x_shape[2]))) if spatial_dim * spatial_dim != x_shape[1] * x_shape[2]: raise ValueError("Assumed inputs were square-able but they were " "not. Shape: %s" % x_shape) x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth]) x = common_layers.conv_block_downsample(x, (3, 3), (2, 2), "SAME") return tf.nn.relu(x)
[ "def", "xception_exit", "(", "inputs", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"xception_exit\"", ")", ":", "x", "=", "inputs", "x_shape", "=", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "if", "x_shape", "[", "1", "]", "...
43.952381
15.904762
def parent(self, index): """ Reimplements the :meth:`QAbstractItemModel.parent` method. :param index: Index. :type index: QModelIndex :return: Parent. :rtype: QModelIndex """ if not index.isValid(): return QModelIndex() node = self.get_node(index) parent_node = node.parent if not parent_node: return QModelIndex() if parent_node == self.__root_node: return QModelIndex() row = parent_node.row() return self.createIndex(row, 0, parent_node) if row is not None else QModelIndex()
[ "def", "parent", "(", "self", ",", "index", ")", ":", "if", "not", "index", ".", "isValid", "(", ")", ":", "return", "QModelIndex", "(", ")", "node", "=", "self", ".", "get_node", "(", "index", ")", "parent_node", "=", "node", ".", "parent", "if", ...
26.478261
17.869565
def speech(self) -> str: """ Report summary designed to be read by a text-to-speech program """ if not self.data: self.update() return speech.metar(self.data, self.units)
[ "def", "speech", "(", "self", ")", "->", "str", ":", "if", "not", "self", ".", "data", ":", "self", ".", "update", "(", ")", "return", "speech", ".", "metar", "(", "self", ".", "data", ",", "self", ".", "units", ")" ]
30.857143
12.285714
def get_security_group_dict(): """Returns dictionary of named security groups {name: securitygroup}.""" client = get_ec2_client() response = client.describe_security_groups() assert is_good_response(response) result = OrderedDict() ec2 = get_ec2_resource() for security_group_response in response['SecurityGroups']: key = get_name(security_group_response.get('Tags', [])) if not key or key == EMPTY_NAME: continue # ignore unnamed security groups # key = security_group_response['GroupName'] if key in result: util.log(f"Warning: Duplicate security group {key}") if DUPLICATE_CHECKING: assert key not in result, ("Duplicate security group " + key) result[key] = ec2.SecurityGroup(security_group_response['GroupId']) return result
[ "def", "get_security_group_dict", "(", ")", ":", "client", "=", "get_ec2_client", "(", ")", "response", "=", "client", ".", "describe_security_groups", "(", ")", "assert", "is_good_response", "(", "response", ")", "result", "=", "OrderedDict", "(", ")", "ec2", ...
37.047619
18.095238
def description(self): """This read-only attribute is a sequence of 7-item sequences. Each of these sequences contains information describing one result column: - name - type_code - display_size (None in current implementation) - internal_size (None in current implementation) - precision (None in current implementation) - scale (None in current implementation) - null_ok (always True in current implementation) The ``type_code`` can be interpreted by comparing it to the Type Objects specified in the section below. """ # Sleep until we're done or we got the columns if self._columns is None: return [] return [ # name, type_code, display_size, internal_size, precision, scale, null_ok (col[0], col[1], None, None, None, None, True) for col in self._columns ]
[ "def", "description", "(", "self", ")", ":", "# Sleep until we're done or we got the columns", "if", "self", ".", "_columns", "is", "None", ":", "return", "[", "]", "return", "[", "# name, type_code, display_size, internal_size, precision, scale, null_ok", "(", "col", "["...
39.391304
24.043478
def dms_to_degrees(v): """Convert degree/minute/second to decimal degrees.""" d = float(v[0][0]) / float(v[0][1]) m = float(v[1][0]) / float(v[1][1]) s = float(v[2][0]) / float(v[2][1]) return d + (m / 60.0) + (s / 3600.0)
[ "def", "dms_to_degrees", "(", "v", ")", ":", "d", "=", "float", "(", "v", "[", "0", "]", "[", "0", "]", ")", "/", "float", "(", "v", "[", "0", "]", "[", "1", "]", ")", "m", "=", "float", "(", "v", "[", "1", "]", "[", "0", "]", ")", "/...
33.857143
8.714286
def tags(self): """ FEFF job parameters. Returns: Tags """ if "RECIPROCAL" in self.config_dict: if self.small_system: self.config_dict["CIF"] = "{}.cif".format( self.structure.formula.replace(" ", "")) self.config_dict["TARGET"] = self.atoms.center_index + 1 self.config_dict["COREHOLE"] = "RPA" logger.warning("Setting COREHOLE = RPA for K-space calculation") if not self.config_dict.get("KMESH", None): abc = self.structure.lattice.abc mult = (self.nkpts * abc[0] * abc[1] * abc[2]) ** (1 / 3) self.config_dict["KMESH"] = [int(round(mult / l)) for l in abc] else: logger.warning("Large system(>=14 atoms) or EXAFS calculation, \ removing K-space settings") del self.config_dict["RECIPROCAL"] self.config_dict.pop("CIF", None) self.config_dict.pop("TARGET", None) self.config_dict.pop("KMESH", None) self.config_dict.pop("STRFAC", None) return Tags(self.config_dict)
[ "def", "tags", "(", "self", ")", ":", "if", "\"RECIPROCAL\"", "in", "self", ".", "config_dict", ":", "if", "self", ".", "small_system", ":", "self", ".", "config_dict", "[", "\"CIF\"", "]", "=", "\"{}.cif\"", ".", "format", "(", "self", ".", "structure",...
43.357143
19.571429
def run_until(self, endtime, timeunit='minutes', save=True): """ Run a case untile the specifiend endtime """ integrator = self.case.solver.Integrator integrator.rununtil(endtime, timeunit) if save is True: self.case.save()
[ "def", "run_until", "(", "self", ",", "endtime", ",", "timeunit", "=", "'minutes'", ",", "save", "=", "True", ")", ":", "integrator", "=", "self", ".", "case", ".", "solver", ".", "Integrator", "integrator", ".", "rununtil", "(", "endtime", ",", "timeuni...
34.5
8.75
def unit_get(attribute): """Get the unit ID for the remote unit""" _args = ['unit-get', '--format=json', attribute] try: return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None
[ "def", "unit_get", "(", "attribute", ")", ":", "_args", "=", "[", "'unit-get'", ",", "'--format=json'", ",", "attribute", "]", "try", ":", "return", "json", ".", "loads", "(", "subprocess", ".", "check_output", "(", "_args", ")", ".", "decode", "(", "'UT...
34.714286
18.857143
def perplexity(self): """ Compute perplexity for each sample. """ return samplers_lda.perplexity_comp(self.docid, self.tokens, self.tt, self.dt, self.N, self.K, self.samples)
[ "def", "perplexity", "(", "self", ")", ":", "return", "samplers_lda", ".", "perplexity_comp", "(", "self", ".", "docid", ",", "self", ".", "tokens", ",", "self", ".", "tt", ",", "self", ".", "dt", ",", "self", ".", "N", ",", "self", ".", "K", ",", ...
32
20.444444
def _desc_has_data(desc): """Returns true if there is any data set for a particular PhoneNumberDesc.""" if desc is None: return False # Checking most properties since we don't know what's present, since a custom build may have # stripped just one of them (e.g. liteBuild strips exampleNumber). We don't bother checking the # possibleLengthsLocalOnly, since if this is the only thing that's present we don't really # support the type at all: no type-specific methods will work with only this data. return ((desc.example_number is not None) or _desc_has_possible_number_data(desc) or (desc.national_number_pattern is not None))
[ "def", "_desc_has_data", "(", "desc", ")", ":", "if", "desc", "is", "None", ":", "return", "False", "# Checking most properties since we don't know what's present, since a custom build may have", "# stripped just one of them (e.g. liteBuild strips exampleNumber). We don't bother checking...
61.363636
27.636364
def set_opcode(self, opcode): """Set the opcode. @param opcode: the opcode @type opcode: int """ self.flags &= 0x87FF self.flags |= dns.opcode.to_flags(opcode)
[ "def", "set_opcode", "(", "self", ",", "opcode", ")", ":", "self", ".", "flags", "&=", "0x87FF", "self", ".", "flags", "|=", "dns", ".", "opcode", ".", "to_flags", "(", "opcode", ")" ]
28.714286
7.714286
def mapfivo(ol,*args,**kwargs): ''' #mapfivo f,i,v,o四元决定 fivo-4-tuple-engine #map_func diff_func(index,value,*diff_args) ''' args = list(args) lngth = args.__len__() if(lngth==0): diff_funcs_arr = kwargs['map_funcs'] diff_args_arr = kwargs['map_func_args_array'] elif(lngth==1): if('map_func_args_array' in kwargs): diff_funcs_arr = args[0] diff_args_arr = kwargs['map_func_args_array'] else: diff_funcs_arr = kwargs['map_funcs'] diff_args_arr = args[0] else: diff_funcs_arr = args[0] diff_args_arr = args[1] lngth = ol.__len__() rslt = [] for i in range(0,lngth): index = i value = ol[i] func = diff_funcs_arr[i] args = diff_args_arr[i] ele = func(index,value,*args) rslt.append(ele) return(rslt)
[ "def", "mapfivo", "(", "ol", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "args", "=", "list", "(", "args", ")", "lngth", "=", "args", ".", "__len__", "(", ")", "if", "(", "lngth", "==", "0", ")", ":", "diff_funcs_arr", "=", "kwargs", "...
30.3
16.5
def plot_multi(data, cols=None, spacing=.06, color_map=None, plot_kw=None, **kwargs): """ Plot data with multiple scaels together Args: data: DataFrame of data cols: columns to be plotted spacing: spacing between legends color_map: customized colors in map plot_kw: kwargs for each plot **kwargs: kwargs for the first plot Returns: ax for plot Examples: >>> import pandas as pd >>> import numpy as np >>> >>> idx = range(5) >>> data = pd.DataFrame(dict(a=np.exp(idx), b=idx), index=idx) >>> # plot_multi(data=data, cols=['a', 'b'], plot_kw=[dict(style='.-'), dict()]) """ import matplotlib.pyplot as plt from pandas import plotting if cols is None: cols = data.columns if plot_kw is None: plot_kw = [{}] * len(cols) if len(cols) == 0: return num_colors = len(utils.flatten(cols)) # Get default color style from pandas colors = getattr(getattr(plotting, '_style'), '_get_standard_colors')(num_colors=num_colors) if color_map is None: color_map = dict() fig = plt.figure() ax, lines, labels, c_idx = None, [], [], 0 for n, col in enumerate(cols): if isinstance(col, (list, tuple)): ylabel = ' / '.join(cols[n]) color = [ color_map.get(cols[n][_ - c_idx], colors[_ % len(colors)]) for _ in range(c_idx, c_idx + len(cols[n])) ] c_idx += len(col) else: ylabel = col color = color_map.get(col, colors[c_idx % len(colors)]) c_idx += 1 if 'color' in plot_kw[n]: color = plot_kw[n].pop('color') if ax is None: # First y-axes legend = plot_kw[0].pop('legend', kwargs.pop('legend', False)) ax = data.loc[:, col].plot( label=col, color=color, legend=legend, zorder=n, **plot_kw[0], **kwargs ) ax.set_ylabel(ylabel=ylabel) line, label = ax.get_legend_handles_labels() ax.spines['left'].set_edgecolor('#D5C4A1') ax.spines['left'].set_alpha(.5) else: # Multiple y-axes legend = plot_kw[n].pop('legend', False) ax_new = ax.twinx() ax_new.spines['right'].set_position(('axes', 1 + spacing * (n - 1))) data.loc[:, col].plot( ax=ax_new, label=col, color=color, legend=legend, zorder=n, **plot_kw[n] ) ax_new.set_ylabel(ylabel=ylabel) line, label = ax_new.get_legend_handles_labels() ax_new.spines['right'].set_edgecolor('#D5C4A1') ax_new.spines['right'].set_alpha(.5) ax_new.grid(False) # Proper legend position lines += line labels += label fig.legend(lines, labels, loc=8, prop=dict(), ncol=num_colors).set_zorder(len(cols)) ax.set_xlabel(' \n ') return ax
[ "def", "plot_multi", "(", "data", ",", "cols", "=", "None", ",", "spacing", "=", ".06", ",", "color_map", "=", "None", ",", "plot_kw", "=", "None", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "from", "pandas...
34.464286
19.75
def none(n, coef): """ Build a matrix of zeros for features that should go unpenalized Parameters ---------- n : int number of splines coef : unused for compatibility with constraints Returns ------- penalty matrix : sparse csc matrix of shape (n,n) """ return sp.sparse.csc_matrix(np.zeros((n, n)))
[ "def", "none", "(", "n", ",", "coef", ")", ":", "return", "sp", ".", "sparse", ".", "csc_matrix", "(", "np", ".", "zeros", "(", "(", "n", ",", "n", ")", ")", ")" ]
21.625
20.625
def get_error(self, block = True, timeout = None): """ Gets the next error message. Each error message is a 2-tuple of (status, identifier).""" return self._error_queue.get(block = block, timeout = timeout)
[ "def", "get_error", "(", "self", ",", "block", "=", "True", ",", "timeout", "=", "None", ")", ":", "return", "self", ".", "_error_queue", ".", "get", "(", "block", "=", "block", ",", "timeout", "=", "timeout", ")" ]
35.333333
13.666667
def dispatch_write(self, buf): """Augment the buffer with stuff to write when possible""" self.write_buffer += buf if len(self.write_buffer) > self.MAX_BUFFER_SIZE: console_output('Buffer too big ({:d}) for {}\n'.format( len(self.write_buffer), str(self)).encode()) raise asyncore.ExitNow(1) return True
[ "def", "dispatch_write", "(", "self", ",", "buf", ")", ":", "self", ".", "write_buffer", "+=", "buf", "if", "len", "(", "self", ".", "write_buffer", ")", ">", "self", ".", "MAX_BUFFER_SIZE", ":", "console_output", "(", "'Buffer too big ({:d}) for {}\\n'", ".",...
46
13.25
def read(cls, data): """Reads data from URL, Dataframe, JSON string, JSON file or OrderedDict. Args: data: can be a Pandas Dataframe, a JSON string, a JSON file, an OrderedDict or a URL pointing to a JSONstat file. Returns: An object of class Dimension populated with data. """ if isinstance(data, pd.DataFrame): output = OrderedDict({}) output['version'] = '2.0' output['class'] = 'dimension' [label] = [x for x in list(data.columns.values) if x not in ['id', 'index']] output['label'] = label output['category'] = OrderedDict({}) output['category']['index'] = data.id.tolist() output['category']['label'] = OrderedDict( zip(data.id.values, data[label].values)) return cls(output) elif isinstance(data, OrderedDict): return cls(data) elif isinstance(data, basestring) and data.startswith(("http://", "https://", "ftp://", "ftps://")): return cls(request(data)) elif isinstance(data,basestring): try: json_dict = json.loads(data, object_pairs_hook=OrderedDict) return cls(json_dict) except ValueError: raise else: try: json_dict = json.load(data, object_pairs_hook=OrderedDict) return cls(json_dict) except ValueError: raise
[ "def", "read", "(", "cls", ",", "data", ")", ":", "if", "isinstance", "(", "data", ",", "pd", ".", "DataFrame", ")", ":", "output", "=", "OrderedDict", "(", "{", "}", ")", "output", "[", "'version'", "]", "=", "'2.0'", "output", "[", "'class'", "]"...
40.809524
17.142857
def url2path(url): """ If url identifies a file on the local host, return the path to the file otherwise raise ValueError. """ scheme, host, path, nul, nul, nul = urlparse(url) if scheme.lower() in ("", "file") and host.lower() in ("", "localhost"): return path raise ValueError(url)
[ "def", "url2path", "(", "url", ")", ":", "scheme", ",", "host", ",", "path", ",", "nul", ",", "nul", ",", "nul", "=", "urlparse", "(", "url", ")", "if", "scheme", ".", "lower", "(", ")", "in", "(", "\"\"", ",", "\"file\"", ")", "and", "host", "...
31.555556
16
def eval(self, exp): "main dispatch for expression evaluation" # todo: this needs an AST-assert that all BaseX descendants are being handled if isinstance(exp,sqparse2.BinX): return evalop(exp.op.op, *map(self.eval, (exp.left, exp.right))) elif isinstance(exp,sqparse2.UnX): return self.eval_unx(exp) elif isinstance(exp,sqparse2.NameX): return self.nix.rowget(self.tables,self.c_row,exp) elif isinstance(exp,sqparse2.AsterX): return sum(self.c_row,[]) # todo doc: how does this get disassembled by caller? elif isinstance(exp,sqparse2.ArrayLit): return map(self.eval,exp.vals) elif isinstance(exp,(sqparse2.Literal,sqparse2.ArrayLit)): return exp.toliteral() elif isinstance(exp,sqparse2.CommaX): # todo: think about getting rid of CommaX everywhere; it complicates syntax tree navigation. # a lot of things that are CommaX now should become weval.Row. ret = [] for child in exp.children: (ret.extend if starlike(child) else ret.append)(self.eval(child)) return ret elif isinstance(exp,sqparse2.CallX): return self.eval_callx(exp) elif isinstance(exp,sqparse2.SelectX): raise NotImplementedError('subqueries should have been evaluated earlier') # todo: specific error class elif isinstance(exp,sqparse2.AttrX):return self.nix.rowget(self.tables,self.c_row,exp) elif isinstance(exp,sqparse2.CaseX): for case in exp.cases: if self.eval(case.when): return self.eval(case.then) return self.eval(exp.elsex) elif isinstance(exp,sqparse2.CastX): if exp.to_type.type.lower() in ('text','varchar'): return unicode(self.eval(exp.expr)) else: raise NotImplementedError('unhandled_cast_type',exp.to_type) elif isinstance(exp,(int,basestring,float,type(None))): return exp # I think Table.insert is creating this in expand_row # todo: why tuple, list, dict below? throw some asserts in here and see where these are coming from. elif isinstance(exp,tuple): return tuple(map(self.eval, exp)) elif isinstance(exp,list): return map(self.eval, exp) elif isinstance(exp,dict): return exp elif isinstance(exp,sqparse2.NullX): return None elif isinstance(exp,sqparse2.ReturnX): # todo: I think ReturnX is *always* CommaX now; revisit this ret=self.eval(exp.expr) print "warning: not sure what I'm doing here with cardinality tweak on CommaX" return [ret] if isinstance(exp.expr,(sqparse2.CommaX,sqparse2.AsterX)) else [[ret]] # todo: update parser so this is always * or a commalist elif isinstance(exp,sqparse2.AliasX): return self.eval(exp.name) # todo: rename AliasX 'name' to 'expr' else: raise NotImplementedError(type(exp), exp)
[ "def", "eval", "(", "self", ",", "exp", ")", ":", "# todo: this needs an AST-assert that all BaseX descendants are being handled", "if", "isinstance", "(", "exp", ",", "sqparse2", ".", "BinX", ")", ":", "return", "evalop", "(", "exp", ".", "op", ".", "op", ",", ...
65.219512
30.487805
def receive_device_value(self, raw_value: int): """ Set a new value, called from within the joystick implementation class when parsing the event queue. :param raw_value: the raw value from the joystick hardware :internal: """ new_value = self._input_to_raw_value(raw_value) if self.button is not None: if new_value > (self.button_trigger_value + 0.05) > self.__value: self.buttons.button_pressed(self.button.key_code) elif new_value < (self.button_trigger_value - 0.05) < self.__value: self.buttons.button_released(self.button.key_code) self.__value = new_value if new_value > self.max: self.max = new_value elif new_value < self.min: self.min = new_value
[ "def", "receive_device_value", "(", "self", ",", "raw_value", ":", "int", ")", ":", "new_value", "=", "self", ".", "_input_to_raw_value", "(", "raw_value", ")", "if", "self", ".", "button", "is", "not", "None", ":", "if", "new_value", ">", "(", "self", "...
42.052632
20.368421
def summarize(group, fs=None, include_source=True): """ Tabulate and write the results of ComparisonBenchmarks to a file or standard out. :param str group: name of the comparison group. :param fs: file-like object (Optional) """ _line_break = '{0:-<120}\n'.format('') tests = sorted(ComparisonBenchmark.groups[group], key=lambda t: getattr(t, 'time_average_seconds')) log = StringIO.StringIO() log.write('Call statement:\n\n') log.write('\t' + tests[0].stmt) log.write('\n\n\n') fmt = "{0: <8} {1: <35} {2: <12} {3: <15} {4: <15} {5: <14}\n" log.write(fmt.format('Rank', 'Function Name', 'Time', '% of Slowest', 'timeit_repeat', 'timeit_number')) log.write(_line_break) log.write('\n') for i, t in enumerate(tests): func_name = "{}.{}".format(t.classname, t.callable.__name__) if t.classname else t.callable.__name__ if i == len(tests)-1: time_percent = 'Slowest' else: time_percent = "{:.1f}".format(t.time_average_seconds / tests[-1].time_average_seconds * 100) log.write(fmt.format(i+1, func_name, convert_time_units(t.time_average_seconds), time_percent, t.timeit_repeat, t.timeit_number)) log.write(_line_break) if include_source: log.write('\n\n\nSource Code:\n') log.write(_line_break) for test in tests: log.write(test.log.getvalue()) log.write(_line_break) if isinstance(fs, str): with open(fs, 'w') as f: f.write(log.getvalue()) elif fs is None: print(log.getvalue()) else: try: fs.write(log.getvalue()) except AttributeError as e: print(e)
[ "def", "summarize", "(", "group", ",", "fs", "=", "None", ",", "include_source", "=", "True", ")", ":", "_line_break", "=", "'{0:-<120}\\n'", ".", "format", "(", "''", ")", "tests", "=", "sorted", "(", "ComparisonBenchmark", ".", "groups", "[", "group", ...
40.469388
17.612245