text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def dot2svg(dot): # type: (str) -> str """ Render Graphviz data to SVG """ svg = graphviz.Source(dot).pipe(format='svg').decode('utf8') # type: str # strip doctype and xml declaration svg = svg[svg.index('<svg'):] return svg
[ "def", "dot2svg", "(", "dot", ")", ":", "# type: (str) -> str", "svg", "=", "graphviz", ".", "Source", "(", "dot", ")", ".", "pipe", "(", "format", "=", "'svg'", ")", ".", "decode", "(", "'utf8'", ")", "# type: str", "# strip doctype and xml declaration", "s...
34.714286
15.714286
def getAllCols(self, sddsfile=None): """ get all available column names from sddsfile :param sddsfile: sdds file name, if not given, rollback to the one that from ``__init__()`` :return: all sdds data column names :rtype: list :Example: >>> dh = DataExtracter('test.out') >>> print(dh.getAllCols()) ['x', 'xp', 'y', 'yp', 't', 'p', 'particleID'] >>> print(dh.getAllCols('test.twi')) ['s', 'betax', 'alphax', 'psix', 'etax', 'etaxp', 'xAperture', 'betay', 'alphay', 'psiy', 'etay', 'etayp', 'yAperture', 'pCentral0', 'ElementName', 'ElementOccurence', 'ElementType'] """ if SDDS_: if sddsfile is not None: sddsobj = sdds.SDDS(2) sddsobj.load(sddsfile) else: sddsobj = self.sddsobj return sddsobj.columnName else: if sddsfile is None: sddsfile = self.sddsfile return subprocess.check_output(['sddsquery', '-col', sddsfile]).split()
[ "def", "getAllCols", "(", "self", ",", "sddsfile", "=", "None", ")", ":", "if", "SDDS_", ":", "if", "sddsfile", "is", "not", "None", ":", "sddsobj", "=", "sdds", ".", "SDDS", "(", "2", ")", "sddsobj", ".", "load", "(", "sddsfile", ")", "else", ":",...
38.62963
18.888889
def validate_params(request): """Validate request params.""" if 'params' in request: correct_params = isinstance(request['params'], (list, dict)) error = 'Incorrect parameter values' assert correct_params, error
[ "def", "validate_params", "(", "request", ")", ":", "if", "'params'", "in", "request", ":", "correct_params", "=", "isinstance", "(", "request", "[", "'params'", "]", ",", "(", "list", ",", "dict", ")", ")", "error", "=", "'Incorrect parameter values'", "ass...
34
14.285714
def _handle_api_result(result: Optional[Dict[str, Any]]) -> Any: """ Retrieve 'data' field from the API result object. :param result: API result that received from HTTP API :return: the 'data' field in result object :raise ActionFailed: the 'status' field is 'failed' """ if isinstance(result, dict): if result.get('status') == 'failed': raise ActionFailed(retcode=result.get('retcode')) return result.get('data')
[ "def", "_handle_api_result", "(", "result", ":", "Optional", "[", "Dict", "[", "str", ",", "Any", "]", "]", ")", "->", "Any", ":", "if", "isinstance", "(", "result", ",", "dict", ")", ":", "if", "result", ".", "get", "(", "'status'", ")", "==", "'f...
38.25
12.916667
def start(self): ''' Turn on the master server components ''' self._pre_flight() log.info('salt-master is starting as user \'%s\'', salt.utils.user.get_user()) enable_sigusr1_handler() enable_sigusr2_handler() self.__set_max_open_files() # Reset signals to default ones before adding processes to the process # manager. We don't want the processes being started to inherit those # signal handlers with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): # Setup the secrets here because the PubServerChannel may need # them as well. SMaster.secrets['aes'] = { 'secret': multiprocessing.Array( ctypes.c_char, salt.utils.stringutils.to_bytes( salt.crypt.Crypticle.generate_key_string() ) ), 'reload': salt.crypt.Crypticle.generate_key_string } log.info('Creating master process manager') # Since there are children having their own ProcessManager we should wait for kill more time. self.process_manager = salt.utils.process.ProcessManager(wait_for_kill=5) pub_channels = [] log.info('Creating master publisher process') log_queue = salt.log.setup.get_multiprocessing_logging_queue() for _, opts in iter_transport_opts(self.opts): chan = salt.transport.server.PubServerChannel.factory(opts) chan.pre_fork(self.process_manager, kwargs={'log_queue': log_queue}) pub_channels.append(chan) log.info('Creating master event publisher process') self.process_manager.add_process(salt.utils.event.EventPublisher, args=(self.opts,)) if self.opts.get('reactor'): if isinstance(self.opts['engines'], list): rine = False for item in self.opts['engines']: if 'reactor' in item: rine = True break if not rine: self.opts['engines'].append({'reactor': {}}) else: if 'reactor' not in self.opts['engines']: log.info('Enabling the reactor engine') self.opts['engines']['reactor'] = {} salt.engines.start_engines(self.opts, self.process_manager) # must be after channels log.info('Creating master maintenance process') self.process_manager.add_process(Maintenance, args=(self.opts,)) if self.opts.get('event_return'): log.info('Creating master event return process') self.process_manager.add_process(salt.utils.event.EventReturn, args=(self.opts,)) ext_procs = self.opts.get('ext_processes', []) for proc in ext_procs: log.info('Creating ext_processes process: %s', proc) try: mod = '.'.join(proc.split('.')[:-1]) cls = proc.split('.')[-1] _tmp = __import__(mod, globals(), locals(), [cls], -1) cls = _tmp.__getattribute__(cls) self.process_manager.add_process(cls, args=(self.opts,)) except Exception: log.error('Error creating ext_processes process: %s', proc) if HAS_HALITE and 'halite' in self.opts: log.info('Creating master halite process') self.process_manager.add_process(Halite, args=(self.opts['halite'],)) # TODO: remove, or at least push into the transport stuff (pre-fork probably makes sense there) if self.opts['con_cache']: log.info('Creating master concache process') self.process_manager.add_process(salt.utils.master.ConnectedCache, args=(self.opts,)) # workaround for issue #16315, race condition log.debug('Sleeping for two seconds to let concache rest') time.sleep(2) log.info('Creating master request server process') kwargs = {} if salt.utils.platform.is_windows(): kwargs['log_queue'] = log_queue kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level() kwargs['secrets'] = SMaster.secrets self.process_manager.add_process( ReqServer, args=(self.opts, self.key, self.master_key), kwargs=kwargs, name='ReqServer') self.process_manager.add_process( FileserverUpdate, args=(self.opts,)) # Fire up SSDP discovery publisher if self.opts['discovery']: if salt.utils.ssdp.SSDPDiscoveryServer.is_available(): self.process_manager.add_process(salt.utils.ssdp.SSDPDiscoveryServer( port=self.opts['discovery']['port'], listen_ip=self.opts['interface'], answer={'mapping': self.opts['discovery'].get('mapping', {})}).run) else: log.error('Unable to load SSDP: asynchronous IO is not available.') if sys.version_info.major == 2: log.error('You are using Python 2, please install "trollius" module to enable SSDP discovery.') # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) self.process_manager.run()
[ "def", "start", "(", "self", ")", ":", "self", ".", "_pre_flight", "(", ")", "log", ".", "info", "(", "'salt-master is starting as user \\'%s\\''", ",", "salt", ".", "utils", ".", "user", ".", "get_user", "(", ")", ")", "enable_sigusr1_handler", "(", ")", ...
46.372093
24.55814
def highlightBlock(self, text): """Apply syntax highlighting to the given block of text. """ # Do other syntax formatting for expression, nth, format in self.rules: index = expression.indexIn(text, 0) while index >= 0: # We actually want the index of the nth match index = expression.pos(nth) length = len(expression.cap(nth)) self.setFormat(index, length, format) index = expression.indexIn(text, index + length) self.setCurrentBlockState(0) # Do multi-line strings in_multiline = self.match_multiline(text, *self.tri_single) if not in_multiline: in_multiline = self.match_multiline(text, *self.tri_double)
[ "def", "highlightBlock", "(", "self", ",", "text", ")", ":", "# Do other syntax formatting\r", "for", "expression", ",", "nth", ",", "format", "in", "self", ".", "rules", ":", "index", "=", "expression", ".", "indexIn", "(", "text", ",", "0", ")", "while",...
39.5
15.65
def write_gdf(gdf, fname): """ Fast line-by-line gdf-file write function Parameters ---------- gdf : numpy.ndarray Column 0 is gids, columns 1: are values. fname : str Path to gdf-file. Returns ------- None """ gdf_file = open(fname, 'w') for line in gdf: for i in np.arange(len(line)): gdf_file.write(str(line[i]) + '\t') gdf_file.write('\n') return None
[ "def", "write_gdf", "(", "gdf", ",", "fname", ")", ":", "gdf_file", "=", "open", "(", "fname", ",", "'w'", ")", "for", "line", "in", "gdf", ":", "for", "i", "in", "np", ".", "arange", "(", "len", "(", "line", ")", ")", ":", "gdf_file", ".", "wr...
18.28
20.68
def onConnect(self, client, userdata, flags, rc): """! The callback for when the client receives a CONNACK response from the server. @param client @param userdata @param flags @param rc """ for sub in self.subsciption: (result, mid) = self.client.subscribe(sub)
[ "def", "onConnect", "(", "self", ",", "client", ",", "userdata", ",", "flags", ",", "rc", ")", ":", "for", "sub", "in", "self", ".", "subsciption", ":", "(", "result", ",", "mid", ")", "=", "self", ".", "client", ".", "subscribe", "(", "sub", ")" ]
29.818182
17.363636
def __rubberband(y, sr, **kwargs): '''Execute rubberband Parameters ---------- y : np.ndarray [shape=(n,) or (n, c)] Audio time series, either single or multichannel sr : int > 0 sampling rate of y **kwargs keyword arguments to rubberband Returns ------- y_mod : np.ndarray [shape=(n,) or (n, c)] `y` after rubberband transformation ''' assert sr > 0 # Get the input and output tempfile fd, infile = tempfile.mkstemp(suffix='.wav') os.close(fd) fd, outfile = tempfile.mkstemp(suffix='.wav') os.close(fd) # dump the audio sf.write(infile, y, sr) try: # Execute rubberband arguments = [__RUBBERBAND_UTIL, '-q'] for key, value in six.iteritems(kwargs): arguments.append(str(key)) arguments.append(str(value)) arguments.extend([infile, outfile]) subprocess.check_call(arguments, stdout=DEVNULL, stderr=DEVNULL) # Load the processed audio. y_out, _ = sf.read(outfile, always_2d=True) # make sure that output dimensions matches input if y.ndim == 1: y_out = np.squeeze(y_out) except OSError as exc: six.raise_from(RuntimeError('Failed to execute rubberband. ' 'Please verify that rubberband-cli ' 'is installed.'), exc) finally: # Remove temp files os.unlink(infile) os.unlink(outfile) return y_out
[ "def", "__rubberband", "(", "y", ",", "sr", ",", "*", "*", "kwargs", ")", ":", "assert", "sr", ">", "0", "# Get the input and output tempfile", "fd", ",", "infile", "=", "tempfile", ".", "mkstemp", "(", "suffix", "=", "'.wav'", ")", "os", ".", "close", ...
23.84127
22.190476
def uint8_3(self, val1, val2, val3): """append a frame containing 3 uint8""" try: self.msg += [pack("BBB", val1, val2, val3)] except struct.error: raise ValueError("Expected uint8") return self
[ "def", "uint8_3", "(", "self", ",", "val1", ",", "val2", ",", "val3", ")", ":", "try", ":", "self", ".", "msg", "+=", "[", "pack", "(", "\"BBB\"", ",", "val1", ",", "val2", ",", "val3", ")", "]", "except", "struct", ".", "error", ":", "raise", ...
34.714286
12.285714
def add_suffix(filename, suffix): """ ADD suffix TO THE filename (NOT INCLUDING THE FILE EXTENSION) """ path = filename.split("/") parts = path[-1].split(".") i = max(len(parts) - 2, 0) parts[i] = parts[i] + suffix path[-1] = ".".join(parts) return "/".join(path)
[ "def", "add_suffix", "(", "filename", ",", "suffix", ")", ":", "path", "=", "filename", ".", "split", "(", "\"/\"", ")", "parts", "=", "path", "[", "-", "1", "]", ".", "split", "(", "\".\"", ")", "i", "=", "max", "(", "len", "(", "parts", ")", ...
32.6
7.4
def SetFillStyle(self, style): """ *style* may be any fill style understood by ROOT or matplotlib. For full documentation of accepted *style* arguments, see :class:`rootpy.plotting.style.FillStyle`. """ self._fillstyle = FillStyle(style) if isinstance(self, ROOT.TAttFill): ROOT.TAttFill.SetFillStyle(self, self._fillstyle('root'))
[ "def", "SetFillStyle", "(", "self", ",", "style", ")", ":", "self", ".", "_fillstyle", "=", "FillStyle", "(", "style", ")", "if", "isinstance", "(", "self", ",", "ROOT", ".", "TAttFill", ")", ":", "ROOT", ".", "TAttFill", ".", "SetFillStyle", "(", "sel...
39.1
14.9
def get_user_pubkeys(users): ''' Retrieve a set of public keys from GitHub for the specified list of users. Expects input in list format. Optionally, a value in the list may be a dict whose value is a list of key IDs to be returned. If this is not done, then all keys will be returned. Some example data structures that coupld be passed in would look like: .. code_block:: yaml ['user1', 'user2', 'user3'] [ 'user1': [ '12345', '67890', ], 'user2', 'user3', ] ''' if not isinstance(users, list): return {'Error': 'A list of users is expected'} ret = {} for user in users: key_ids = [] if isinstance(user, dict): tmp_user = next(six.iterkeys(user)) key_ids = user[tmp_user] user = tmp_user url = 'https://api.github.com/users/{0}/keys'.format(user) result = salt.utils.http.query( url, 'GET', decode=False, text=True, ) keys = salt.utils.json.loads(result['text']) ret[user] = {} for key in keys: if key_ids: if six.text_type(key['id']) in key_ids: ret[user][key['id']] = key['key'] else: ret[user][key['id']] = key['key'] return ret
[ "def", "get_user_pubkeys", "(", "users", ")", ":", "if", "not", "isinstance", "(", "users", ",", "list", ")", ":", "return", "{", "'Error'", ":", "'A list of users is expected'", "}", "ret", "=", "{", "}", "for", "user", "in", "users", ":", "key_ids", "=...
26.423077
23.038462
def update_product(product_id, **kwargs): """ Update a Product with new information """ content = update_product_raw(product_id, **kwargs) if content: return utils.format_json(content)
[ "def", "update_product", "(", "product_id", ",", "*", "*", "kwargs", ")", ":", "content", "=", "update_product_raw", "(", "product_id", ",", "*", "*", "kwargs", ")", "if", "content", ":", "return", "utils", ".", "format_json", "(", "content", ")" ]
29.428571
6
def get_service_reference(self, clazz, ldap_filter=None): # type: (Optional[str], Optional[str]) -> Optional[ServiceReference] """ Returns a ServiceReference object for a service that implements and was registered under the specified class :param clazz: The class name with which the service was registered. :param ldap_filter: A filter on service properties :return: A service reference, None if not found """ result = self.__framework.find_service_references( clazz, ldap_filter, True ) try: return result[0] except TypeError: return None
[ "def", "get_service_reference", "(", "self", ",", "clazz", ",", "ldap_filter", "=", "None", ")", ":", "# type: (Optional[str], Optional[str]) -> Optional[ServiceReference]", "result", "=", "self", ".", "__framework", ".", "find_service_references", "(", "clazz", ",", "l...
38.647059
19.352941
def getdarkcurrent(self,extver): """ Return the dark current for the ACS detector. This value will be contained within an instrument specific keyword. The value in the image header will be converted to units of electrons. Returns ------- darkcurrent: float Dark current value for the ACS detector in **units of electrons**. """ darkcurrent=0. try: darkcurrent = self._image[self.scienceExt,extver].header['MEANDARK'] except: str = "#############################################\n" str += "# #\n" str += "# Error: #\n" str += "# Cannot find the value for 'MEANDARK' #\n" str += "# in the image header. ACS input images #\n" str += "# are expected to have this header #\n" str += "# keyword. #\n" str += "# #\n" str += "# Error occured in the ACSInputImage class #\n" str += "# #\n" str += "#############################################\n" raise ValueError(str) return darkcurrent
[ "def", "getdarkcurrent", "(", "self", ",", "extver", ")", ":", "darkcurrent", "=", "0.", "try", ":", "darkcurrent", "=", "self", ".", "_image", "[", "self", ".", "scienceExt", ",", "extver", "]", ".", "header", "[", "'MEANDARK'", "]", "except", ":", "s...
43.16129
24.580645
def invoke(client, method, **kwargs): """Invoke a method on the underlying soap service.""" try: # Proxy the method to the suds service result = getattr(client.service, method)(**kwargs) except AttributeError: logger.critical("Unknown method: %s", method) raise except URLError as e: logger.debug(pprint(e)) logger.debug("A URL related error occurred while invoking the '%s' " "method on the VIM server, this can be caused by " "name resolution or connection problems.", method) logger.debug("The underlying error is: %s", e.reason[1]) raise except suds.client.TransportError as e: logger.debug(pprint(e)) logger.debug("TransportError: %s", e) except suds.WebFault as e: # Get the type of fault logger.critical("SUDS Fault: %s" % e.fault.faultstring) if len(e.fault.faultstring) > 0: raise detail = e.document.childAtPath("/Envelope/Body/Fault/detail") fault_type = detail.getChildren()[0].name fault = create(fault_type) if isinstance(e.fault.detail[0], list): for attr in e.fault.detail[0]: setattr(fault, attr[0], attr[1]) else: fault["text"] = e.fault.detail[0] raise VimFault(fault) return result
[ "def", "invoke", "(", "client", ",", "method", ",", "*", "*", "kwargs", ")", ":", "try", ":", "# Proxy the method to the suds service", "result", "=", "getattr", "(", "client", ".", "service", ",", "method", ")", "(", "*", "*", "kwargs", ")", "except", "...
36.916667
16.722222
def _pelita_member_filter(parent_name, item_names): """ Filter a list of autodoc items for which to generate documentation. Include only imports that come from the documented module or its submodules. """ filtered_names = [] if parent_name not in sys.modules: return item_names module = sys.modules[parent_name] for item_name in item_names: item = getattr(module, item_name, None) location = getattr(item, '__module__', None) if location is None or (location + ".").startswith(parent_name + "."): filtered_names.append(item_name) return filtered_names
[ "def", "_pelita_member_filter", "(", "parent_name", ",", "item_names", ")", ":", "filtered_names", "=", "[", "]", "if", "parent_name", "not", "in", "sys", ".", "modules", ":", "return", "item_names", "module", "=", "sys", ".", "modules", "[", "parent_name", ...
28.181818
20.727273
def slice_sequence(self,start,end,directionless=False): """Slice the mapping by the position in the sequence First coordinate is 0-indexed start Second coordinate is 1-indexed finish """ if end > self.length: end = self.length if start < 0: start = 0 if not directionless and self.direction == '-': newend = self.length-start newstart = self.length-end end = newend start = newstart #find the sequence length l = self.length indexstart = start indexend = end ns = [] tot = 0 for r in self._rngs: tot += r.length n = r.copy() if indexstart > r.length: indexstart-=r.length continue n.start = n.start+indexstart if tot > end: diff = tot-end n.end -= diff tot = end indexstart = 0 ns.append(n) if tot == end: break if len(ns)==0: return None return Transcript(ns,self._options)
[ "def", "slice_sequence", "(", "self", ",", "start", ",", "end", ",", "directionless", "=", "False", ")", ":", "if", "end", ">", "self", ".", "length", ":", "end", "=", "self", ".", "length", "if", "start", "<", "0", ":", "start", "=", "0", "if", ...
27.194444
14.611111
def _collect_variable_renaming( cls, expression: Expression, position: List[int]=None, variables: Dict[str, str]=None ) -> Dict[str, str]: """Return renaming for the variables in the expression. The variable names are generated according to the position of the variable in the expression. The goal is to rename variables in structurally identical patterns so that the automaton contains less redundant states. """ if position is None: position = [0] if variables is None: variables = {} if getattr(expression, 'variable_name', False): if expression.variable_name not in variables: variables[expression.variable_name] = cls._get_name_for_position(position, variables.values()) position[-1] += 1 if isinstance(expression, Operation): if isinstance(expression, CommutativeOperation): for operand in op_iter(expression): position.append(0) cls._collect_variable_renaming(operand, position, variables) position.pop() else: for operand in op_iter(expression): cls._collect_variable_renaming(operand, position, variables) return variables
[ "def", "_collect_variable_renaming", "(", "cls", ",", "expression", ":", "Expression", ",", "position", ":", "List", "[", "int", "]", "=", "None", ",", "variables", ":", "Dict", "[", "str", ",", "str", "]", "=", "None", ")", "->", "Dict", "[", "str", ...
47.814815
24.185185
def clean_whitespace(self, tree): """ Cleans up whitespace around block open and close tags if they are the only thing on the line :param tree: The AST - will be modified in place """ pointer = 0 end = len(tree) while pointer < end: piece = tree[pointer] if piece[0] == 'block': child_tree = piece[3] # Look at open tag, if the only other thing on the line is whitespace # then delete it so we don't introduce extra newlines to the output open_pre_whitespace = False open_pre_content = True if pointer > 1 and tree[pointer - 1][0] == 'whitespace' and (tree[pointer - 2][0] == 'newline' or tree[pointer - 2] == 'template'): open_pre_whitespace = True open_pre_content = False elif pointer > 0 and (tree[pointer - 1][0] == 'newline' or tree[pointer - 1] == 'template'): open_pre_content = False open_post_whitespace = False open_post_content = True child_len = len(child_tree) if child_len > 2 and child_tree[1][0] == 'whitespace' and child_tree[2][0] == 'newline': open_post_whitespace = True open_post_content = False elif child_len > 1 and child_tree[1][0] == 'newline': open_post_content = False if not open_pre_content and not open_post_content: if open_pre_whitespace: tree.pop(pointer - 1) pointer -= 1 end -= 1 if open_post_whitespace: child_tree.pop(1) child_tree.pop(1) # trailing newline # Do the same thing, but for the close tag close_pre_whitespace = False close_pre_content = True child_len = len(child_tree) if child_len > 2 and child_tree[child_len - 1][0] == 'whitespace' and child_tree[child_len - 2][0] == 'newline': close_pre_whitespace = True close_pre_content = False elif child_len > 1 and child_tree[child_len - 1][0] == 'newline': close_pre_content = False close_post_whitespace = False close_post_content = True tree_len = len(tree) if tree_len > pointer + 2 and tree[pointer + 1][0] == 'whitespace' and tree[pointer + 2][0] == 'newline': close_post_whitespace = True close_post_content = False elif tree_len == pointer + 2 and tree[pointer + 1][0] == 'whitespace': close_post_whitespace = True close_post_content = False elif tree_len > pointer + 1 and tree[pointer + 1][0] == 'newline': close_post_content = False elif tree_len == pointer + 1: close_post_content = False if not close_pre_content and not close_post_content: if close_pre_whitespace: child_tree.pop() child_tree.pop() # preceeding newline if close_post_whitespace: tree.pop(pointer + 1) end -= 1 self.clean_whitespace(child_tree) pointer += 1
[ "def", "clean_whitespace", "(", "self", ",", "tree", ")", ":", "pointer", "=", "0", "end", "=", "len", "(", "tree", ")", "while", "pointer", "<", "end", ":", "piece", "=", "tree", "[", "pointer", "]", "if", "piece", "[", "0", "]", "==", "'block'", ...
43.95
19.25
def show_nontab_menu(self, event): """Show the context menu assigned to nontabs section.""" menu = self.main.createPopupMenu() menu.exec_(self.dock_tabbar.mapToGlobal(event.pos()))
[ "def", "show_nontab_menu", "(", "self", ",", "event", ")", ":", "menu", "=", "self", ".", "main", ".", "createPopupMenu", "(", ")", "menu", ".", "exec_", "(", "self", ".", "dock_tabbar", ".", "mapToGlobal", "(", "event", ".", "pos", "(", ")", ")", ")...
50.25
7.25
def plot_drawdown_periods(returns, top=10, ax=None, **kwargs): """ Plots cumulative returns highlighting top drawdown periods. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. top : int, optional Amount of top drawdowns periods to plot (default 10). ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() y_axis_formatter = FuncFormatter(utils.two_dec_places) ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter)) df_cum_rets = ep.cum_returns(returns, starting_value=1.0) df_drawdowns = timeseries.gen_drawdown_table(returns, top=top) df_cum_rets.plot(ax=ax, **kwargs) lim = ax.get_ylim() colors = sns.cubehelix_palette(len(df_drawdowns))[::-1] for i, (peak, recovery) in df_drawdowns[ ['Peak date', 'Recovery date']].iterrows(): if pd.isnull(recovery): recovery = returns.index[-1] ax.fill_between((peak, recovery), lim[0], lim[1], alpha=.4, color=colors[i]) ax.set_ylim(lim) ax.set_title('Top %i drawdown periods' % top) ax.set_ylabel('Cumulative returns') ax.legend(['Portfolio'], loc='upper left', frameon=True, framealpha=0.5) ax.set_xlabel('') return ax
[ "def", "plot_drawdown_periods", "(", "returns", ",", "top", "=", "10", ",", "ax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", ")", "y_axis_formatter", "=", "FuncFormatter", "(", ...
31.078431
17.431373
def invoked(self, ctx): """ Guacamole method used by the command ingredient. :param ctx: The guacamole context object. Context provides access to all features of guacamole. The argparse ingredient adds the ``args`` attribute to it. That attribute contains the result of parsing command line arguments. :returns: The return code of the command. Guacamole translates ``None`` to a successful exit status (return code zero). """ print("{} + {} = {}".format( ctx.args.x, ctx.args.y, ctx.args.x + ctx.args.y))
[ "def", "invoked", "(", "self", ",", "ctx", ")", ":", "print", "(", "\"{} + {} = {}\"", ".", "format", "(", "ctx", ".", "args", ".", "x", ",", "ctx", ".", "args", ".", "y", ",", "ctx", ".", "args", ".", "x", "+", "ctx", ".", "args", ".", "y", ...
37.941176
18.647059
def getItems(self, sort=False, reverse=False, selector=None): """ #TODO: docstring """ selector = (lambda fgi: fgi.isValid) if selector is None else selector _container = {'_': self.container} return _getItems(_container, '_', sort, reverse, selector)
[ "def", "getItems", "(", "self", ",", "sort", "=", "False", ",", "reverse", "=", "False", ",", "selector", "=", "None", ")", ":", "selector", "=", "(", "lambda", "fgi", ":", "fgi", ".", "isValid", ")", "if", "selector", "is", "None", "else", "selector...
47.833333
14.666667
def predict(self, times): """ Predict the {0} at certain point in time. Uses a linear interpolation if points in time are not in the index. Parameters ---------- times: a scalar or an array of times to predict the value of {0} at. Returns ------- predictions: a scalar if time is a scalar, a numpy array if time in an array. """ if callable(self._estimation_method): return pd.DataFrame(self._estimation_method(_to_array(times)), index=_to_array(times)).loc[times].squeeze() estimate = getattr(self, self._estimation_method) # non-linear interpolations can push the survival curves above 1 and below 0. return dataframe_interpolate_at_times(estimate, times)
[ "def", "predict", "(", "self", ",", "times", ")", ":", "if", "callable", "(", "self", ".", "_estimation_method", ")", ":", "return", "pd", ".", "DataFrame", "(", "self", ".", "_estimation_method", "(", "_to_array", "(", "times", ")", ")", ",", "index", ...
42.555556
26.777778
def extend_safe(target, source): """ Extends source list to target list only if elements doesn't exists in target list. :param target: :type target: list :param source: :type source: list """ for elt in source: if elt not in target: target.append(elt)
[ "def", "extend_safe", "(", "target", ",", "source", ")", ":", "for", "elt", "in", "source", ":", "if", "elt", "not", "in", "target", ":", "target", ".", "append", "(", "elt", ")" ]
26.636364
15.727273
def set_dhw_on(self, until=None): """Sets the DHW on until a given time, or permanently.""" if until is None: data = {"Mode": "PermanentOverride", "State": "On", "UntilTime": None} else: data = {"Mode": "TemporaryOverride", "State": "On", "UntilTime": until.strftime('%Y-%m-%dT%H:%M:%SZ')} self._set_dhw(data)
[ "def", "set_dhw_on", "(", "self", ",", "until", "=", "None", ")", ":", "if", "until", "is", "None", ":", "data", "=", "{", "\"Mode\"", ":", "\"PermanentOverride\"", ",", "\"State\"", ":", "\"On\"", ",", "\"UntilTime\"", ":", "None", "}", "else", ":", "...
36.25
13.5
def default_facets_factory(search, index): """Add a default facets to query. :param search: Basic search object. :param index: Index name. :returns: A tuple containing the new search object and a dictionary with all fields and values used. """ urlkwargs = MultiDict() facets = current_app.config['RECORDS_REST_FACETS'].get(index) if facets is not None: # Aggregations. search = _aggregations(search, facets.get("aggs", {})) # Query filter search, urlkwargs = _query_filter( search, urlkwargs, facets.get("filters", {})) # Post filter search, urlkwargs = _post_filter( search, urlkwargs, facets.get("post_filters", {})) return (search, urlkwargs)
[ "def", "default_facets_factory", "(", "search", ",", "index", ")", ":", "urlkwargs", "=", "MultiDict", "(", ")", "facets", "=", "current_app", ".", "config", "[", "'RECORDS_REST_FACETS'", "]", ".", "get", "(", "index", ")", "if", "facets", "is", "not", "No...
29.72
19
def contains_list(longer, shorter): """Check if longer list starts with shorter list""" if len(longer) <= len(shorter): return False for a, b in zip(shorter, longer): if a != b: return False return True
[ "def", "contains_list", "(", "longer", ",", "shorter", ")", ":", "if", "len", "(", "longer", ")", "<=", "len", "(", "shorter", ")", ":", "return", "False", "for", "a", ",", "b", "in", "zip", "(", "shorter", ",", "longer", ")", ":", "if", "a", "!=...
29.875
12
def add_labels_to_pr(repo: GithubRepository, pull_id: int, *labels: str, override_token: str = None) -> None: """ References: https://developer.github.com/v3/issues/labels/#add-labels-to-an-issue """ url = ("https://api.github.com/repos/{}/{}/issues/{}/labels" "?access_token={}".format(repo.organization, repo.name, pull_id, override_token or repo.access_token)) response = requests.post(url, json=list(labels)) if response.status_code != 200: raise RuntimeError( 'Add labels failed. Code: {}. Content: {}.'.format( response.status_code, response.content))
[ "def", "add_labels_to_pr", "(", "repo", ":", "GithubRepository", ",", "pull_id", ":", "int", ",", "*", "labels", ":", "str", ",", "override_token", ":", "str", "=", "None", ")", "->", "None", ":", "url", "=", "(", "\"https://api.github.com/repos/{}/{}/issues/{...
41.736842
15.210526
async def sync_all_new_events(self, sync_all_new_events_request): """List all events occurring at or after a timestamp.""" response = hangouts_pb2.SyncAllNewEventsResponse() await self._pb_request('conversations/syncallnewevents', sync_all_new_events_request, response) return response
[ "async", "def", "sync_all_new_events", "(", "self", ",", "sync_all_new_events_request", ")", ":", "response", "=", "hangouts_pb2", ".", "SyncAllNewEventsResponse", "(", ")", "await", "self", ".", "_pb_request", "(", "'conversations/syncallnewevents'", ",", "sync_all_new...
57.166667
18.833333
def load_yaml_config(self, conf): """Load a YAML configuration file and recursively update the overall configuration.""" with open(conf) as fd: self.config = recursive_dict_update(self.config, yaml.load(fd, Loader=UnsafeLoader))
[ "def", "load_yaml_config", "(", "self", ",", "conf", ")", ":", "with", "open", "(", "conf", ")", "as", "fd", ":", "self", ".", "config", "=", "recursive_dict_update", "(", "self", ".", "config", ",", "yaml", ".", "load", "(", "fd", ",", "Loader", "="...
63.25
18.25
def first_items(self, index): """Meant to reproduce the results of the following grouper = pandas.Grouper(...) first_items = pd.Series(np.arange(len(index)), index).groupby(grouper).first() with index being a CFTimeIndex instead of a DatetimeIndex. """ datetime_bins, labels = _get_time_bins(index, self.freq, self.closed, self.label, self.base) if self.loffset is not None: if isinstance(self.loffset, datetime.timedelta): labels = labels + self.loffset else: labels = labels + to_offset(self.loffset) # check binner fits data if index[0] < datetime_bins[0]: raise ValueError("Value falls before first bin") if index[-1] > datetime_bins[-1]: raise ValueError("Value falls after last bin") integer_bins = np.searchsorted( index, datetime_bins, side=self.closed)[:-1] first_items = pd.Series(integer_bins, labels) # Mask duplicate values with NaNs, preserving the last values non_duplicate = ~first_items.duplicated('last') return first_items.where(non_duplicate)
[ "def", "first_items", "(", "self", ",", "index", ")", ":", "datetime_bins", ",", "labels", "=", "_get_time_bins", "(", "index", ",", "self", ".", "freq", ",", "self", ".", "closed", ",", "self", ".", "label", ",", "self", ".", "base", ")", "if", "sel...
39.645161
18.774194
def get(self, *args, **kwargs): """Handle reading of the model :param args: :param kwargs: """ # Create the model and fetch its data self.model = self.get_model(kwargs.get('id')) result = yield self.model.fetch() # If model is not found, return 404 if not result: LOGGER.debug('Not found') self.not_found() return # Stub to check for read permissions if not self.has_read_permission(): LOGGER.debug('Permission denied') self.permission_denied() return # Add the headers and return the content as JSON self.add_headers() self.finish(self.model_json())
[ "def", "get", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Create the model and fetch its data", "self", ".", "model", "=", "self", ".", "get_model", "(", "kwargs", ".", "get", "(", "'id'", ")", ")", "result", "=", "yield", "sel...
27.461538
15.115385
def copy_and_disconnect_tree(root, machine): """Copy a RoutingTree (containing nothing but RoutingTrees), disconnecting nodes which are not connected in the machine. Note that if a dead chip is part of the input RoutingTree, no corresponding node will be included in the copy. The assumption behind this is that the only reason a tree would visit a dead chip is because a route passed through the chip and wasn't actually destined to arrive at that chip. This situation is impossible to confirm since the input routing trees have not yet been populated with vertices. The caller is responsible for being sensible. Parameters ---------- root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree` The root of the RoutingTree that contains nothing but RoutingTrees (i.e. no children which are vertices or links). machine : :py:class:`~rig.place_and_route.Machine` The machine in which the routes exist. Returns ------- (root, lookup, broken_links) Where: * `root` is the new root of the tree :py:class:`~rig.place_and_route.routing_tree.RoutingTree` * `lookup` is a dict {(x, y): :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, ...} * `broken_links` is a set ([(parent, child), ...]) containing all disconnected parent and child (x, y) pairs due to broken links. """ new_root = None # Lookup for copied routing tree {(x, y): RoutingTree, ...} new_lookup = {} # List of missing connections in the copied routing tree [(new_parent, # new_child), ...] broken_links = set() # A queue [(new_parent, direction, old_node), ...] to_visit = deque([(None, None, root)]) while to_visit: new_parent, direction, old_node = to_visit.popleft() if old_node.chip in machine: # Create a copy of the node new_node = RoutingTree(old_node.chip) new_lookup[new_node.chip] = new_node else: # This chip is dead, move all its children into the parent node assert new_parent is not None, \ "Net cannot be sourced from a dead chip." new_node = new_parent if new_parent is None: # This is the root node new_root = new_node elif new_node is not new_parent: # If this node is not dead, check connectivity to parent node (no # reason to check connectivity between a dead node and its parent). if direction in links_between(new_parent.chip, new_node.chip, machine): # Is connected via working link new_parent.children.append((direction, new_node)) else: # Link to parent is dead (or original parent was dead and the # new parent is not adjacent) broken_links.add((new_parent.chip, new_node.chip)) # Copy children for child_direction, child in old_node.children: to_visit.append((new_node, child_direction, child)) return (new_root, new_lookup, broken_links)
[ "def", "copy_and_disconnect_tree", "(", "root", ",", "machine", ")", ":", "new_root", "=", "None", "# Lookup for copied routing tree {(x, y): RoutingTree, ...}", "new_lookup", "=", "{", "}", "# List of missing connections in the copied routing tree [(new_parent,", "# new_child), .....
41.578947
21.881579
def update_distribution( name, config, tags=None, region=None, key=None, keyid=None, profile=None, ): ''' Update the config (and optionally tags) for the CloudFront distribution with the given name. name Name of the CloudFront distribution config Configuration for the distribution tags Tags to associate with the distribution region Region to connect to key Secret key to use keyid Access key to use profile A dict with region, key, and keyid, or a pillar key (string) that contains such a dict. CLI Example: .. code-block:: bash salt myminion boto_cloudfront.update_distribution name=mydistribution profile=awsprofile \ config='{"Comment":"partial configuration","Enabled":true}' ''' ### FIXME - BUG. This function can NEVER work as written... ### Obviously it was never actually tested. distribution_ret = get_distribution( name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in distribution_ret: return distribution_ret dist_with_tags = distribution_ret['result'] current_distribution = dist_with_tags['distribution'] current_config = current_distribution['DistributionConfig'] current_tags = dist_with_tags['tags'] etag = dist_with_tags['etag'] config_diff = __utils__['dictdiffer.deep_diff'](current_config, config) if tags: tags_diff = __utils__['dictdiffer.deep_diff'](current_tags, tags) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: if 'old' in config_diff or 'new' in config_diff: conn.update_distribution( DistributionConfig=config, Id=current_distribution['Id'], IfMatch=etag, ) if tags: arn = current_distribution['ARN'] if 'new' in tags_diff: tags_to_add = { 'Items': [ {'Key': k, 'Value': v} for k, v in six.iteritems(tags_diff['new']) ], } conn.tag_resource( Resource=arn, Tags=tags_to_add, ) if 'old' in tags_diff: tags_to_remove = { 'Items': list(tags_diff['old'].keys()), } conn.untag_resource( Resource=arn, TagKeys=tags_to_remove, ) except botocore.exceptions.ClientError as err: return {'error': __utils__['boto3.get_error'](err)} finally: _cache_id( 'cloudfront', sub_resource=name, invalidate=True, region=region, key=key, keyid=keyid, profile=profile, ) return {'result': True}
[ "def", "update_distribution", "(", "name", ",", "config", ",", "tags", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ",", ")", ":", "### FIXME - BUG. This function can NEVER work as...
27.311321
21.613208
def _BuildFindSpecsFromFileSourcePath( self, source_path, path_separator, environment_variables, user_accounts): """Builds find specifications from a file source type. Args: source_path (str): file system path defined by the source. path_separator (str): file system path segment separator. environment_variables (list[str]): environment variable attributes used to dynamically populate environment variables in key. user_accounts (list[str]): identified user accounts stored in the knowledge base. Returns: list[dfvfs.FindSpec]: find specifications for the file source type. """ find_specs = [] for path_glob in path_helper.PathHelper.ExpandRecursiveGlobs( source_path, path_separator): logger.debug('building find spec from path glob: {0:s}'.format( path_glob)) for path in path_helper.PathHelper.ExpandUsersVariablePath( path_glob, path_separator, user_accounts): logger.debug('building find spec from path: {0:s}'.format(path)) if '%' in path: path = path_helper.PathHelper.ExpandWindowsPath( path, environment_variables) logger.debug('building find spec from expanded path: {0:s}'.format( path)) if not path.startswith(path_separator): logger.warning(( 'The path filter must be defined as an absolute path: ' '"{0:s}"').format(path)) continue # Convert the path filters into a list of path segments and # strip the root path segment. path_segments = path.split(path_separator) # Remove initial root entry path_segments.pop(0) if not path_segments[-1]: logger.warning( 'Empty last path segment in path filter: "{0:s}"'.format(path)) path_segments.pop(-1) try: find_spec = file_system_searcher.FindSpec( location_glob=path_segments, case_sensitive=False) except ValueError as exception: logger.error(( 'Unable to build find specification for path: "{0:s}" with ' 'error: {1!s}').format(path, exception)) continue find_specs.append(find_spec) return find_specs
[ "def", "_BuildFindSpecsFromFileSourcePath", "(", "self", ",", "source_path", ",", "path_separator", ",", "environment_variables", ",", "user_accounts", ")", ":", "find_specs", "=", "[", "]", "for", "path_glob", "in", "path_helper", ".", "PathHelper", ".", "ExpandRec...
36.639344
22.262295
def init(self, ctxt, step_addr): """ Initialize the item. This calls the class constructor with the appropriate arguments and returns the initialized object. :param ctxt: The context object. :param step_addr: The address of the step in the test configuration. """ return self.cls(ctxt, self.name, self.conf, step_addr)
[ "def", "init", "(", "self", ",", "ctxt", ",", "step_addr", ")", ":", "return", "self", ".", "cls", "(", "ctxt", ",", "self", ".", "name", ",", "self", ".", "conf", ",", "step_addr", ")" ]
35.727273
17
def first_order_markov_process(t, variance, time_scale, rseed=None): """ Generates a correlated noise vector using a multivariate normal random number generator with zero mean and covariance Sigma_ij = s^2 exp(-|t_i - t_j|/l), where s is the variance and l is the time scale. The Power spectral density associated to this covariance is S(f) = 2*l*s^2/(4*pi^2*f^2*l^2 +1), red noise spectrum is defined as proportional to 1/f^2. This covariance is the one expected from a first order markov process (Reference?) Parameters --------- t: ndarray A time vector for which the red noise vector will be sampled variance: positive float variance of the resulting red noise vector time_scale: positive float Parameter of the covariance matrix Returns ------- red_noise: ndarray Vector containing the red noise realizations See also -------- power_law_noise """ if variance < 0.0: raise ValueError("Variance must be positive") if time_scale < 0.0: raise ValueError("Time scale must be positive") np.random.seed(rseed) N = len(t) mu = np.zeros(shape=(N,)) if variance == 0.0: return mu dt = np.repeat(np.reshape(t, (1, -1)), N, axis=0) dt = np.absolute(dt - dt.T) # This is NxN S = variance*np.exp(-np.absolute(dt)/time_scale) red_noise = np.random.multivariate_normal(mu, S) return red_noise
[ "def", "first_order_markov_process", "(", "t", ",", "variance", ",", "time_scale", ",", "rseed", "=", "None", ")", ":", "if", "variance", "<", "0.0", ":", "raise", "ValueError", "(", "\"Variance must be positive\"", ")", "if", "time_scale", "<", "0.0", ":", ...
30.428571
20.183673
def _solution_factory(self, basis_kwargs, coefs_array, nodes, problem, result): """ Construct a representation of the solution to the boundary value problem. Parameters ---------- basis_kwargs : dict(str : ) coefs_array : numpy.ndarray problem : TwoPointBVPLike result : OptimizeResult Returns ------- solution : SolutionLike """ soln_coefs = self._array_to_list(coefs_array, problem.number_odes) soln_derivs = self._construct_derivatives(soln_coefs, **basis_kwargs) soln_funcs = self._construct_functions(soln_coefs, **basis_kwargs) soln_residual_func = self._interior_residuals_factory(soln_derivs, soln_funcs, problem) solution = solutions.Solution(basis_kwargs, soln_funcs, nodes, problem, soln_residual_func, result) return solution
[ "def", "_solution_factory", "(", "self", ",", "basis_kwargs", ",", "coefs_array", ",", "nodes", ",", "problem", ",", "result", ")", ":", "soln_coefs", "=", "self", ".", "_array_to_list", "(", "coefs_array", ",", "problem", ".", "number_odes", ")", "soln_derivs...
40.88
24.48
def perform_command(self): """ Perform command and return the appropriate exit code. :rtype: int """ if len(self.actual_arguments) < 1: return self.print_help() audio_file_path = self.actual_arguments[0] if not self.check_input_file(audio_file_path): return self.ERROR_EXIT_CODE try: prober = FFPROBEWrapper(rconf=self.rconf, logger=self.logger) dictionary = prober.read_properties(audio_file_path) for key in sorted(dictionary.keys()): self.print_generic(u"%s %s" % (key, dictionary[key])) return self.NO_ERROR_EXIT_CODE except FFPROBEPathError: self.print_error(u"Unable to call the ffprobe executable '%s'" % (self.rconf[RuntimeConfiguration.FFPROBE_PATH])) self.print_error(u"Make sure the path to ffprobe is correct") except (FFPROBEUnsupportedFormatError, FFPROBEParsingError): self.print_error(u"Cannot read properties of file '%s'" % (audio_file_path)) self.print_error(u"Make sure the input file has a format supported by ffprobe") return self.ERROR_EXIT_CODE
[ "def", "perform_command", "(", "self", ")", ":", "if", "len", "(", "self", ".", "actual_arguments", ")", "<", "1", ":", "return", "self", ".", "print_help", "(", ")", "audio_file_path", "=", "self", ".", "actual_arguments", "[", "0", "]", "if", "not", ...
43.333333
23.333333
def get_log(self): """Gets the ``Log`` at this node. return: (osid.logging.Log) - the log represented by this node *compliance: mandatory -- This method must be implemented.* """ if self._lookup_session is None: mgr = get_provider_manager('LOGGING', runtime=self._runtime, proxy=self._proxy) self._lookup_session = mgr.get_log_lookup_session(proxy=getattr(self, "_proxy", None)) return self._lookup_session.get_log(Id(self._my_map['id']))
[ "def", "get_log", "(", "self", ")", ":", "if", "self", ".", "_lookup_session", "is", "None", ":", "mgr", "=", "get_provider_manager", "(", "'LOGGING'", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")", "self", ...
45.636364
26.727273
def find(self, pkg, flag): """Start to find packages and print """ print("\nPackages with name matching [ {0}{1}{2} ]\n".format( self.cyan, ", ".join(pkg), self.endc)) Msg().template(78) print("| {0} {1}{2}{3}".format("Repository", "Package", " " * 54, "Size")) Msg().template(78) for repo in _meta_.repositories: PACKAGES_TXT = PackageManager(pkg).list_lib(repo) packages, sizes = PackageManager(pkg).list_greps(repo, PACKAGES_TXT) for find, size in zip(packages, sizes): for p in pkg: if "--case-ins" in flag: self.p_cache = p.lower() self.find_cache = find.lower() else: self.p_cache = p self.find_cache = find if self.p_cache in self.find_cache: if self.cache != repo: self.count_repo += 1 self.cache = repo self.count_pkg += 1 ver = self.sbo_version(repo, find) print(" {0}{1}{2}{3}{4} {5}{6:>11}".format( self.cyan, repo, self.endc, " " * (12 - len(repo)), find + ver, " " * (53 - len(find + ver)), size)) print("\nFound summary") print("=" * 79) print("{0}Total found {1} packages in {2} repositories." "{3}\n".format(self.grey, self.count_pkg, self.count_repo, self.endc))
[ "def", "find", "(", "self", ",", "pkg", ",", "flag", ")", ":", "print", "(", "\"\\nPackages with name matching [ {0}{1}{2} ]\\n\"", ".", "format", "(", "self", ".", "cyan", ",", "\", \"", ".", "join", "(", "pkg", ")", ",", "self", ".", "endc", ")", ")", ...
46.944444
13.166667
def validate(self, tracking_number): "Return True if this is a valid USPS tracking number." tracking_num = tracking_number[:-1].replace(' ', '') odd_total = 0 even_total = 0 for ii, digit in enumerate(tracking_num): if ii % 2: odd_total += int(digit) else: even_total += int(digit) total = odd_total + even_total * 3 check = ((total - (total % 10) + 10) - total) % 10 return (check == int(tracking_number[-1:]))
[ "def", "validate", "(", "self", ",", "tracking_number", ")", ":", "tracking_num", "=", "tracking_number", "[", ":", "-", "1", "]", ".", "replace", "(", "' '", ",", "''", ")", "odd_total", "=", "0", "even_total", "=", "0", "for", "ii", ",", "digit", "...
39.923077
12.692308
def standard_reader_routine(reader, filename, attrs=None): """Use a given reader from the ``READERS`` mapping in the common VTK reading pipeline routine. Parameters ---------- reader : vtkReader Any instantiated VTK reader class filename : str The string filename to the data file to read. attrs : dict, optional A dictionary of attributes to call on the reader. Keys of dictionary are the attribute/method names and values are the arguments passed to those calls. If you do not have any attributes to call, pass ``None`` as the value. """ if attrs is None: attrs = {} if not isinstance(attrs, dict): raise TypeError('Attributes must be a dictionary of name and arguments.') reader.SetFileName(filename) # Apply any attributes listed for name, args in attrs.items(): attr = getattr(reader, name) if args is not None: if not isinstance(args, (list, tuple)): args = [args] attr(*args) else: attr() # Perform the read reader.Update() return vtki.wrap(reader.GetOutputDataObject(0))
[ "def", "standard_reader_routine", "(", "reader", ",", "filename", ",", "attrs", "=", "None", ")", ":", "if", "attrs", "is", "None", ":", "attrs", "=", "{", "}", "if", "not", "isinstance", "(", "attrs", ",", "dict", ")", ":", "raise", "TypeError", "(", ...
32.885714
19.428571
def _inherit_data(self): """ Inherits the data from the parent. """ LOG.debug("'%s' inheriting data from '%s'" % (self.get_name(), self.parent.get_name()), extra=dict(data=self.parent.data)) self.set_data(**self.parent.data)
[ "def", "_inherit_data", "(", "self", ")", ":", "LOG", ".", "debug", "(", "\"'%s' inheriting data from '%s'\"", "%", "(", "self", ".", "get_name", "(", ")", ",", "self", ".", "parent", ".", "get_name", "(", ")", ")", ",", "extra", "=", "dict", "(", "dat...
41.125
12.375
def get_max_runs(x) -> np.array: """ Given a list of numbers, return a NumPy array of pairs (start index, end index + 1) of the runs of max value. Example:: >>> get_max_runs([7, 1, 2, 7, 7, 1, 2]) array([[0, 1], [3, 5]]) Assume x is not empty. Recipe comes from `Stack Overflow <http://stackoverflow.com/questions/1066758/find-length-of-sequences-of-identical-values-in-a-numpy-array>`_. """ # Get 0-1 array where 1 marks the max values of x x = np.array(x) m = np.max(x) y = (x == m) * 1 # Bound y by zeros to detect runs properly bounded = np.hstack(([0], y, [0])) # Get 1 at run starts and -1 at run ends diffs = np.diff(bounded) run_starts = np.where(diffs > 0)[0] run_ends = np.where(diffs < 0)[0] return np.array([run_starts, run_ends]).T
[ "def", "get_max_runs", "(", "x", ")", "->", "np", ".", "array", ":", "# Get 0-1 array where 1 marks the max values of x", "x", "=", "np", ".", "array", "(", "x", ")", "m", "=", "np", ".", "max", "(", "x", ")", "y", "=", "(", "x", "==", "m", ")", "*...
31.884615
17.884615
def _reliure_worker(wnum, Qin, Qout, pipeline, options={}): """ a worker used by :func:`run_parallel` """ #pipeline = get_pipeline() logger = logging.getLogger("reliure.run_parallel.worker#%s" % wnum) logger.debug("worker created") if options is None: options = {} while True: chunk = Qin.get() # get an element (and wait for it if needed) logger.debug("Get %s elements to process" % len(chunk)) res = [output for output in pipeline(chunk, **options)] logger.debug("processing done, results len = %s" % len(res)) Qout.put(res) Qin.task_done()
[ "def", "_reliure_worker", "(", "wnum", ",", "Qin", ",", "Qout", ",", "pipeline", ",", "options", "=", "{", "}", ")", ":", "#pipeline = get_pipeline()", "logger", "=", "logging", ".", "getLogger", "(", "\"reliure.run_parallel.worker#%s\"", "%", "wnum", ")", "lo...
40.8
17.866667
def _download_rtd_zip(rtd_version=None, **kwargs): """ Download and extract HTML ZIP from RTD to installed doc data path. Download is skipped if content already exists. Parameters ---------- rtd_version : str or `None` RTD version to download; e.g., "latest", "stable", or "v2.6.0". If not given, download closest match to software version. kwargs : dict Keywords for ``urlretrieve()``. Returns ------- index_html : str Path to local "index.html". """ # https://github.com/ejeschke/ginga/pull/451#issuecomment-298403134 if not toolkit.family.startswith('qt'): raise ValueError('Downloaded documentation not compatible with {} ' 'UI toolkit browser'.format(toolkit.family)) if rtd_version is None: rtd_version = _find_rtd_version() data_path = os.path.dirname( _find_pkg_data_path('help.html', package='ginga.doc')) index_html = os.path.join(data_path, 'index.html') # There is a previous download of documentation; Do nothing. # There is no check if downloaded version is outdated; The idea is that # this folder would be empty again when installing new version. if os.path.isfile(index_html): return index_html url = ('https://readthedocs.org/projects/ginga/downloads/htmlzip/' '{}/'.format(rtd_version)) local_path = urllib.request.urlretrieve(url, **kwargs)[0] with zipfile.ZipFile(local_path, 'r') as zf: zf.extractall(data_path) # RTD makes an undesirable sub-directory, so move everything there # up one level and delete it. subdir = os.path.join(data_path, 'ginga-{}'.format(rtd_version)) for s in os.listdir(subdir): src = os.path.join(subdir, s) if os.path.isfile(src): shutil.copy(src, data_path) else: # directory shutil.copytree(src, os.path.join(data_path, s)) shutil.rmtree(subdir) if not os.path.isfile(index_html): raise OSError( '{} is missing; Ginga doc download failed'.format(index_html)) return index_html
[ "def", "_download_rtd_zip", "(", "rtd_version", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# https://github.com/ejeschke/ginga/pull/451#issuecomment-298403134", "if", "not", "toolkit", ".", "family", ".", "startswith", "(", "'qt'", ")", ":", "raise", "ValueErr...
34.016393
21.229508
def concat(*cols): """ Concatenates multiple input columns together into a single column. The function works with strings, binary and compatible array columns. >>> df = spark.createDataFrame([('abcd','123')], ['s', 'd']) >>> df.select(concat(df.s, df.d).alias('s')).collect() [Row(s=u'abcd123')] >>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c']) >>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect() [Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
[ "def", "concat", "(", "*", "cols", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "concat", "(", "_to_seq", "(", "sc", ",", "cols", ",", "_to_java_column", ")", ")", ...
43.333333
23.6
def sina_download_by_vkey(vkey, title=None, output_dir='.', merge=True, info_only=False): """Downloads a Sina video by its unique vkey. http://video.sina.com/ """ url = 'http://video.sina.com/v/flvideo/%s_0.flv' % vkey type, ext, size = url_info(url) print_info(site_info, title, 'flv', size) if not info_only: download_urls([url], title, 'flv', size, output_dir = output_dir, merge = merge)
[ "def", "sina_download_by_vkey", "(", "vkey", ",", "title", "=", "None", ",", "output_dir", "=", "'.'", ",", "merge", "=", "True", ",", "info_only", "=", "False", ")", ":", "url", "=", "'http://video.sina.com/v/flvideo/%s_0.flv'", "%", "vkey", "type", ",", "e...
38.090909
21.727273
def user_list(self, userid, cur_p=''): ''' List the entities of the user. ''' current_page_number = int(cur_p) if cur_p else 1 current_page_number = 1 if current_page_number < 1 else current_page_number kwd = { 'current_page': current_page_number } recs = MEntity2User.get_all_pager_by_username(userid, current_page_num=current_page_number).objects() self.render('misc/entity/entity_user_download.html', imgs=recs, cfg=config.CMS_CFG, kwd=kwd, userinfo=self.userinfo)
[ "def", "user_list", "(", "self", ",", "userid", ",", "cur_p", "=", "''", ")", ":", "current_page_number", "=", "int", "(", "cur_p", ")", "if", "cur_p", "else", "1", "current_page_number", "=", "1", "if", "current_page_number", "<", "1", "else", "current_pa...
34.277778
23.277778
def pluralize(singular): """Convert singular word to its plural form. Args: singular: A word in its singular form. Returns: The word in its plural form. """ if singular in UNCOUNTABLES: return singular for i in IRREGULAR: if i[0] == singular: return i[1] for i in PLURALIZE_PATTERNS: if re.search(i[0], singular): return re.sub(i[0], i[1], singular)
[ "def", "pluralize", "(", "singular", ")", ":", "if", "singular", "in", "UNCOUNTABLES", ":", "return", "singular", "for", "i", "in", "IRREGULAR", ":", "if", "i", "[", "0", "]", "==", "singular", ":", "return", "i", "[", "1", "]", "for", "i", "in", "...
25.117647
14.941176
def format_number(col, d): """ Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places with HALF_EVEN round mode, and returns the result as a string. :param col: the column name of the numeric value to be formatted :param d: the N decimal places >>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect() [Row(v=u'5.0000')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.format_number(_to_java_column(col), d))
[ "def", "format_number", "(", "col", ",", "d", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "format_number", "(", "_to_java_column", "(", "col", ")", ",", "d", ")", "...
40.615385
23.846154
def get_tags(self, rev=None): """ Return the tags for the current revision as a set """ rev = rev or 'HEAD' return set(self._invoke('tag', '--points-at', rev).splitlines())
[ "def", "get_tags", "(", "self", ",", "rev", "=", "None", ")", ":", "rev", "=", "rev", "or", "'HEAD'", "return", "set", "(", "self", ".", "_invoke", "(", "'tag'", ",", "'--points-at'", ",", "rev", ")", ".", "splitlines", "(", ")", ")" ]
29.5
11.166667
def get_platform_by_name(self, name, for_target=None): """Finds the platform with the given name. If the name is empty or None, returns the default platform. If not platform with the given name is defined, raises an error. :param str name: name of the platform. :param JvmTarget for_target: optionally specified target we're looking up the platform for. Only used in error message generation. :return: The jvm platform object. :rtype: JvmPlatformSettings """ if not name: return self.default_platform if name not in self.platforms_by_name: raise self.UndefinedJvmPlatform(for_target, name, self.platforms_by_name) return self.platforms_by_name[name]
[ "def", "get_platform_by_name", "(", "self", ",", "name", ",", "for_target", "=", "None", ")", ":", "if", "not", "name", ":", "return", "self", ".", "default_platform", "if", "name", "not", "in", "self", ".", "platforms_by_name", ":", "raise", "self", ".", ...
43.5625
15.625
def RoundToSeconds(cls, timestamp): """Takes a timestamp value and rounds it to a second precision.""" leftovers = timestamp % definitions.MICROSECONDS_PER_SECOND scrubbed = timestamp - leftovers rounded = round(float(leftovers) / definitions.MICROSECONDS_PER_SECOND) return int(scrubbed + rounded * definitions.MICROSECONDS_PER_SECOND)
[ "def", "RoundToSeconds", "(", "cls", ",", "timestamp", ")", ":", "leftovers", "=", "timestamp", "%", "definitions", ".", "MICROSECONDS_PER_SECOND", "scrubbed", "=", "timestamp", "-", "leftovers", "rounded", "=", "round", "(", "float", "(", "leftovers", ")", "/...
50.142857
19.857143
def heightmap_add_hill( hm: np.ndarray, x: float, y: float, radius: float, height: float ) -> None: """Add a hill (a half spheroid) at given position. If height == radius or -radius, the hill is a half-sphere. Args: hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions. x (float): The x position at the center of the new hill. y (float): The y position at the center of the new hill. radius (float): The size of the new hill. height (float): The height or depth of the new hill. """ lib.TCOD_heightmap_add_hill(_heightmap_cdata(hm), x, y, radius, height)
[ "def", "heightmap_add_hill", "(", "hm", ":", "np", ".", "ndarray", ",", "x", ":", "float", ",", "y", ":", "float", ",", "radius", ":", "float", ",", "height", ":", "float", ")", "->", "None", ":", "lib", ".", "TCOD_heightmap_add_hill", "(", "_heightmap...
41.533333
23.866667
def on_connect(self, client, userdata, flags, result_code): """ Callback when the MQTT client is connected. :param client: the client being connected. :param userdata: unused. :param flags: unused. :param result_code: result code. """ self.log_info("Connected with result code {}".format(result_code)) self.state_handler.set_state(State.welcome)
[ "def", "on_connect", "(", "self", ",", "client", ",", "userdata", ",", "flags", ",", "result_code", ")", ":", "self", ".", "log_info", "(", "\"Connected with result code {}\"", ".", "format", "(", "result_code", ")", ")", "self", ".", "state_handler", ".", "...
40.1
13.3
def on_task_status(self, task): """Ignore not processing error in interactive mode""" if not self.interactive: super(OneScheduler, self).on_task_status(task) try: procesok = task['track']['process']['ok'] except KeyError as e: logger.error("Bad status pack: %s", e) return None if procesok: ret = self.on_task_done(task) else: ret = self.on_task_failed(task) if task['track']['fetch'].get('time'): self._cnt['5m_time'].event((task['project'], 'fetch_time'), task['track']['fetch']['time']) if task['track']['process'].get('time'): self._cnt['5m_time'].event((task['project'], 'process_time'), task['track']['process'].get('time')) self.projects[task['project']].active_tasks.appendleft((time.time(), task)) return ret
[ "def", "on_task_status", "(", "self", ",", "task", ")", ":", "if", "not", "self", ".", "interactive", ":", "super", "(", "OneScheduler", ",", "self", ")", ".", "on_task_status", "(", "task", ")", "try", ":", "procesok", "=", "task", "[", "'track'", "]"...
41.347826
19.73913
def hideEvent(self, event): """ Sets the visible state for this widget. If it is the first time this widget will be visible, the initialized signal will be emitted. :param state | <bool> """ super(XView, self).hideEvent(event) # record the visible state for this widget to be separate of Qt's # system to know if this view WILL be visible or not once the # system is done processing. This will affect how signals are # validated as part of the visible slot delegation self._visibleState = False if not self.signalsBlocked(): self.visibleStateChanged.emit(False) QTimer.singleShot(0, self.hidden)
[ "def", "hideEvent", "(", "self", ",", "event", ")", ":", "super", "(", "XView", ",", "self", ")", ".", "hideEvent", "(", "event", ")", "# record the visible state for this widget to be separate of Qt's", "# system to know if this view WILL be visible or not once the ", "# s...
40.722222
17.722222
def validateInt(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, min=None, max=None, lessThan=None, greaterThan=None, excMsg=None): """Raises ValidationException if value is not a int. Returns value, so it can be used inline in an expression: print(2 + validateInt(your_number)) Note that since int() and ignore leading or trailing whitespace when converting a string to a number, so does this validateNum(). * value (str): The value being validated as an int or float. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * _numType (str): One of 'num', 'int', or 'float' for the kind of number to validate against, where 'num' means int or float. * min (int, float): The (inclusive) minimum value for the value to pass validation. * max (int, float): The (inclusive) maximum value for the value to pass validation. * lessThan (int, float): The (exclusive) minimum value for the value to pass validation. * greaterThan (int, float): The (exclusive) maximum value for the value to pass validation. * excMsg (str): A custom message to use in the raised ValidationException. If you specify min or max, you cannot also respectively specify lessThan or greaterThan. Doing so will raise PySimpleValidateException. >>> import pysimplevalidate as pysv >>> pysv.validateInt('42') 42 >>> pysv.validateInt('forty two') Traceback (most recent call last): ... pysimplevalidate.ValidationException: 'forty two' is not an integer. """ return validateNum(value=value, blank=blank, strip=strip, allowlistRegexes=None, blocklistRegexes=blocklistRegexes, _numType='int', min=min, max=max, lessThan=lessThan, greaterThan=greaterThan)
[ "def", "validateInt", "(", "value", ",", "blank", "=", "False", ",", "strip", "=", "None", ",", "allowlistRegexes", "=", "None", ",", "blocklistRegexes", "=", "None", ",", "min", "=", "None", ",", "max", "=", "None", ",", "lessThan", "=", "None", ",", ...
60.783784
38.486486
def get_identity(self, subject_id, entities=None, check_not_on_or_after=True): """ Get all the identity information that has been received and are still valid about the subject. :param subject_id: The identifier of the subject :param entities: The identifiers of the entities whoes assertions are interesting. If the list is empty all entities are interesting. :return: A 2-tuple consisting of the identity information (a dictionary of attributes and values) and the list of entities whoes information has timed out. """ res = {} oldees = [] if not entities: for item in self._cache.find({"subject_id": subject_id}): try: info = self._get_info(item, check_not_on_or_after) except ToOld: oldees.append(item["entity_id"]) continue for key, vals in info["ava"].items(): try: tmp = set(res[key]).union(set(vals)) res[key] = list(tmp) except KeyError: res[key] = vals else: for entity_id in entities: try: info = self.get(subject_id, entity_id, check_not_on_or_after) except ToOld: oldees.append(entity_id) continue for key, vals in info["ava"].items(): try: tmp = set(res[key]).union(set(vals)) res[key] = list(tmp) except KeyError: res[key] = vals return res, oldees
[ "def", "get_identity", "(", "self", ",", "subject_id", ",", "entities", "=", "None", ",", "check_not_on_or_after", "=", "True", ")", ":", "res", "=", "{", "}", "oldees", "=", "[", "]", "if", "not", "entities", ":", "for", "item", "in", "self", ".", "...
39.288889
16.666667
def loss(loss_value): """Calculates aggregated mean loss.""" total_loss = tf.Variable(0.0, False) loss_count = tf.Variable(0, False) total_loss_update = tf.assign_add(total_loss, loss_value) loss_count_update = tf.assign_add(loss_count, 1) loss_op = total_loss / tf.cast(loss_count, tf.float32) return [total_loss_update, loss_count_update], loss_op
[ "def", "loss", "(", "loss_value", ")", ":", "total_loss", "=", "tf", ".", "Variable", "(", "0.0", ",", "False", ")", "loss_count", "=", "tf", ".", "Variable", "(", "0", ",", "False", ")", "total_loss_update", "=", "tf", ".", "assign_add", "(", "total_l...
44.5
10.75
def _array2cstr(arr): """ Serializes a numpy array to a compressed base64 string """ out = StringIO() np.save(out, arr) return b64encode(out.getvalue())
[ "def", "_array2cstr", "(", "arr", ")", ":", "out", "=", "StringIO", "(", ")", "np", ".", "save", "(", "out", ",", "arr", ")", "return", "b64encode", "(", "out", ".", "getvalue", "(", ")", ")" ]
32.8
12.4
def close(self): """Closes the serial port.""" if self.pyb and self.pyb.serial: self.pyb.serial.close() self.pyb = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "pyb", "and", "self", ".", "pyb", ".", "serial", ":", "self", ".", "pyb", ".", "serial", ".", "close", "(", ")", "self", ".", "pyb", "=", "None" ]
30.2
9.2
def copy_dependency_images(tile): """Copy all documentation from dependencies into build/output/doc folder""" env = Environment(tools=[]) outputbase = os.path.join('build', 'output') depbase = os.path.join('build', 'deps') for dep in tile.dependencies: depdir = os.path.join(depbase, dep['unique_id']) outputdir = os.path.join(outputbase) deptile = IOTile(depdir) for image in deptile.find_products('firmware_image'): name = os.path.basename(image) input_path = os.path.join(depdir, name) output_path = os.path.join(outputdir, name) env.Command([output_path], [input_path], Copy("$TARGET", "$SOURCE"))
[ "def", "copy_dependency_images", "(", "tile", ")", ":", "env", "=", "Environment", "(", "tools", "=", "[", "]", ")", "outputbase", "=", "os", ".", "path", ".", "join", "(", "'build'", ",", "'output'", ")", "depbase", "=", "os", ".", "path", ".", "joi...
38.222222
17.277778
def transform(self, X, y=None, copy=None): """ Perform standardization by centering and scaling using the parameters. :param X: Data matrix to scale. :type X: numpy.ndarray, shape [n_samples, n_features] :param y: Passthrough for scikit-learn ``Pipeline`` compatibility. :type y: None :param bool copy: Copy the X matrix. :return: Scaled version of the X data matrix. :rtype: numpy.ndarray, shape [n_samples, n_features] """ check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.scale_ is not None: inplace_column_scale(X, 1 / self.scale_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.scale_ return X
[ "def", "transform", "(", "self", ",", "X", ",", "y", "=", "None", ",", "copy", "=", "None", ")", ":", "check_is_fitted", "(", "self", ",", "'scale_'", ")", "copy", "=", "copy", "if", "copy", "is", "not", "None", "else", "self", ".", "copy", "X", ...
38.03125
18.46875
def export(self): """ Make the actual request to the Import API (exporting is part of the Import API) to export a map visualization as a .carto file :return: A URL pointing to the .carto file :rtype: str :raise: CartoException .. warning:: Non-public API. It may change with no previous notice .. note:: The export is asynchronous, but this method waits for the export to complete. See `MAX_NUMBER_OF_RETRIES` and `INTERVAL_BETWEEN_RETRIES_S` """ export_job = ExportJob(self.client, self.get_id()) export_job.run() export_job.refresh() count = 0 while export_job.state in ("exporting", "enqueued", "pending"): if count >= MAX_NUMBER_OF_RETRIES: raise CartoException(_("Maximum number of retries exceeded \ when polling the import API for \ visualization export")) time.sleep(INTERVAL_BETWEEN_RETRIES_S) export_job.refresh() count += 1 if export_job.state == "failure": raise CartoException(_("Visualization export failed")) if (export_job.state != "complete" and export_job.state != "created"): raise CartoException(_("Unexpected problem on visualization export \ (state: {state})"). format(state=export_job.state)) return export_job.url
[ "def", "export", "(", "self", ")", ":", "export_job", "=", "ExportJob", "(", "self", ".", "client", ",", "self", ".", "get_id", "(", ")", ")", "export_job", ".", "run", "(", ")", "export_job", ".", "refresh", "(", ")", "count", "=", "0", "while", "...
38.921053
26.868421
def random_numbers(n): """ Generate a random string from 0-9 :param n: length of the string :return: the random string """ return ''.join(random.SystemRandom().choice(string.digits) for _ in range(n))
[ "def", "random_numbers", "(", "n", ")", ":", "return", "''", ".", "join", "(", "random", ".", "SystemRandom", "(", ")", ".", "choice", "(", "string", ".", "digits", ")", "for", "_", "in", "range", "(", "n", ")", ")" ]
31.142857
11.142857
def _compute_and_transfer_to_final_run(self, process_name, start_timeperiod, end_timeperiod, job_record): """ method computes new unit_of_work and transfers the job to STATE_FINAL_RUN it also shares _fuzzy_ DuplicateKeyError logic from _compute_and_transfer_to_progress method""" source_collection_name = context.process_context[process_name].source start_id = self.ds.highest_primary_key(source_collection_name, start_timeperiod, end_timeperiod) end_id = self.ds.lowest_primary_key(source_collection_name, start_timeperiod, end_timeperiod) uow, transfer_to_final = self.insert_and_publish_uow(job_record, start_id, end_id) self.update_job(job_record, uow, job.STATE_FINAL_RUN) if transfer_to_final: self._process_state_final_run(job_record)
[ "def", "_compute_and_transfer_to_final_run", "(", "self", ",", "process_name", ",", "start_timeperiod", ",", "end_timeperiod", ",", "job_record", ")", ":", "source_collection_name", "=", "context", ".", "process_context", "[", "process_name", "]", ".", "source", "star...
73.454545
32.909091
def start(self): """Open sockets to the server and start threads""" if not self.writeThread.isAlive() and not self.readThread.isAlive(): self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.client.connect(self.ADDR) self.running = True self.writeThread.start() self.readThread.start()
[ "def", "start", "(", "self", ")", ":", "if", "not", "self", ".", "writeThread", ".", "isAlive", "(", ")", "and", "not", "self", ".", "readThread", ".", "isAlive", "(", ")", ":", "self", ".", "client", "=", "socket", ".", "socket", "(", "socket", "....
46.125
14.375
def get_file_array(self, start, end): """Return a list of filenames between and including start and end. Parameters ---------- start: array_like or single string filenames for start of returned filelist stop: array_like or single string filenames inclusive end of list Returns ------- list of filenames between and including start and end over all intervals. """ if hasattr(start, '__iter__') & hasattr(end, '__iter__'): files = [] for (sta,stp) in zip(start, end): id1 = self.get_index(sta) id2 = self.get_index(stp) files.extend(self.files.iloc[id1 : id2+1]) elif hasattr(start, '__iter__') | hasattr(end, '__iter__'): estr = 'Either both or none of the inputs need to be iterable' raise ValueError(estr) else: id1 = self.get_index(start) id2 = self.get_index(end) files = self.files[id1:id2+1].to_list() return files
[ "def", "get_file_array", "(", "self", ",", "start", ",", "end", ")", ":", "if", "hasattr", "(", "start", ",", "'__iter__'", ")", "&", "hasattr", "(", "end", ",", "'__iter__'", ")", ":", "files", "=", "[", "]", "for", "(", "sta", ",", "stp", ")", ...
37.433333
15.466667
def set_index(self, index): """Set or reset the index. Parameters ---------- index : string or pair of strings, optional Names of columns to use for positional index, e.g., 'POS' if table contains a 'POS' column and records from a single chromosome/contig, or ('CHROM', 'POS') if table contains records from multiple chromosomes/contigs. """ if index is None: pass elif isinstance(index, str): index = SortedIndex(self[index], copy=False) elif isinstance(index, (tuple, list)) and len(index) == 2: index = SortedMultiIndex(self[index[0]], self[index[1]], copy=False) else: raise ValueError('invalid index argument, expected string or ' 'pair of strings, found %s' % repr(index)) self.index = index
[ "def", "set_index", "(", "self", ",", "index", ")", ":", "if", "index", "is", "None", ":", "pass", "elif", "isinstance", "(", "index", ",", "str", ")", ":", "index", "=", "SortedIndex", "(", "self", "[", "index", "]", ",", "copy", "=", "False", ")"...
39.956522
20.695652
def _get_html_response(url, session): # type: (str, PipSession) -> Response """Access an HTML page with GET, and return the response. This consists of three parts: 1. If the URL looks suspiciously like an archive, send a HEAD first to check the Content-Type is HTML, to avoid downloading a large file. Raise `_NotHTTP` if the content type cannot be determined, or `_NotHTML` if it is not HTML. 2. Actually perform the request. Raise HTTP exceptions on network failures. 3. Check the Content-Type header to make sure we got HTML, and raise `_NotHTML` otherwise. """ if _is_url_like_archive(url): _ensure_html_response(url, session=session) logger.debug('Getting page %s', url) resp = session.get( url, headers={ "Accept": "text/html", # We don't want to blindly returned cached data for # /simple/, because authors generally expecting that # twine upload && pip install will function, but if # they've done a pip install in the last ~10 minutes # it won't. Thus by setting this to zero we will not # blindly use any cached data, however the benefit of # using max-age=0 instead of no-cache, is that we will # still support conditional requests, so we will still # minimize traffic sent in cases where the page hasn't # changed at all, we will just always incur the round # trip for the conditional GET now instead of only # once per 10 minutes. # For more information, please see pypa/pip#5670. "Cache-Control": "max-age=0", }, ) resp.raise_for_status() # The check for archives above only works if the url ends with # something that looks like an archive. However that is not a # requirement of an url. Unless we issue a HEAD request on every # url we cannot know ahead of time for sure if something is HTML # or not. However we can check after we've downloaded it. _ensure_html_header(resp) return resp
[ "def", "_get_html_response", "(", "url", ",", "session", ")", ":", "# type: (str, PipSession) -> Response", "if", "_is_url_like_archive", "(", "url", ")", ":", "_ensure_html_response", "(", "url", ",", "session", "=", "session", ")", "logger", ".", "debug", "(", ...
42.387755
21.693878
def _propagate_down(self, handle, target_id): """ For DEL_ROUTE, we additionally want to broadcast the message to any stream that has ever communicated with the disconnecting ID, so core.py's :meth:`mitogen.core.Router._on_del_route` can turn the message into a disconnect event. :param int handle: :data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE` :param int target_id: ID of the connecting or disconnecting context. """ for stream in self.router.get_streams(): if target_id in stream.egress_ids and ( (self.parent is None) or (self.parent.context_id != stream.remote_id) ): self._send_one(stream, mitogen.core.DEL_ROUTE, target_id, None)
[ "def", "_propagate_down", "(", "self", ",", "handle", ",", "target_id", ")", ":", "for", "stream", "in", "self", ".", "router", ".", "get_streams", "(", ")", ":", "if", "target_id", "in", "stream", ".", "egress_ids", "and", "(", "(", "self", ".", "pare...
45.444444
18.333333
def vera_request(self, **kwargs): """Perfom a vera_request for this scene.""" request_payload = { 'output_format': 'json', 'SceneNum': self.scene_id, } request_payload.update(kwargs) return self.vera_controller.data_request(request_payload)
[ "def", "vera_request", "(", "self", ",", "*", "*", "kwargs", ")", ":", "request_payload", "=", "{", "'output_format'", ":", "'json'", ",", "'SceneNum'", ":", "self", ".", "scene_id", ",", "}", "request_payload", ".", "update", "(", "kwargs", ")", "return",...
33
13.777778
def instantiate_for_read_and_search(handle_server_url, reverselookup_username, reverselookup_password, **config): ''' Initialize client with read access and with search function. :param handle_server_url: The URL of the Handle Server. May be None (then, the default 'https://hdl.handle.net' is used). :param reverselookup_username: The username to authenticate at the reverse lookup servlet. :param reverselookup_password: The password to authenticate at the reverse lookup servlet. :param \**config: More key-value pairs may be passed that will be passed on to the constructor as config. Config options from the credentials object are overwritten by this. :return: An instance of the client. ''' if handle_server_url is None and 'reverselookup_baseuri' not in config.keys(): raise TypeError('You must specify either "handle_server_url" or "reverselookup_baseuri".' + \ ' Searching not possible without the URL of a search servlet.') inst = EUDATHandleClient( handle_server_url, reverselookup_username=reverselookup_username, reverselookup_password=reverselookup_password, **config ) return inst
[ "def", "instantiate_for_read_and_search", "(", "handle_server_url", ",", "reverselookup_username", ",", "reverselookup_password", ",", "*", "*", "config", ")", ":", "if", "handle_server_url", "is", "None", "and", "'reverselookup_baseuri'", "not", "in", "config", ".", ...
48.296296
28.814815
def airing_today(self, **kwargs): """ Get the list of TV shows that air today. Without a specified timezone, this query defaults to EST (Eastern Time UTC-05:00). Args: page: (optional) Minimum 1, maximum 1000. language: (optional) ISO 639 code. timezone: (optional) Valid value from the list of timezones. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('airing_today') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "airing_today", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'airing_today'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(", "...
34.166667
19.055556
def write_config_file_value(key, value): """ Writes an environment variable configuration to the current config file. This will be read in on the next restart. The config file is created if not present. Note: The variables will not take effect until after restart. """ filename = get_config_file() config = _ConfigParser.SafeConfigParser() config.read(filename) __section = "Environment" if not(config.has_section(__section)): config.add_section(__section) config.set(__section, key, value) with open(filename, 'w') as config_file: config.write(config_file)
[ "def", "write_config_file_value", "(", "key", ",", "value", ")", ":", "filename", "=", "get_config_file", "(", ")", "config", "=", "_ConfigParser", ".", "SafeConfigParser", "(", ")", "config", ".", "read", "(", "filename", ")", "__section", "=", "\"Environment...
26.565217
17.869565
def shared_databases(self): """ Retrieves a list containing the names of databases shared with this account. :returns: List of database names """ endpoint = '/'.join(( self.server_url, '_api', 'v2', 'user', 'shared_databases')) resp = self.r_session.get(endpoint) resp.raise_for_status() data = response_to_json_dict(resp) return data.get('shared_databases', [])
[ "def", "shared_databases", "(", "self", ")", ":", "endpoint", "=", "'/'", ".", "join", "(", "(", "self", ".", "server_url", ",", "'_api'", ",", "'v2'", ",", "'user'", ",", "'shared_databases'", ")", ")", "resp", "=", "self", ".", "r_session", ".", "get...
34.076923
11.923077
def _is_auto_field(self, cursor, table_name, column_name): """ Checks whether column is Identity """ # COLUMNPROPERTY: http://msdn2.microsoft.com/en-us/library/ms174968.aspx #from django.db import connection #cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')", # (connection.ops.quote_name(table_name), column_name)) cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')", (self.connection.ops.quote_name(table_name), column_name)) return cursor.fetchall()[0][0]
[ "def", "_is_auto_field", "(", "self", ",", "cursor", ",", "table_name", ",", "column_name", ")", ":", "# COLUMNPROPERTY: http://msdn2.microsoft.com/en-us/library/ms174968.aspx", "#from django.db import connection", "#cursor.execute(\"SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')...
50.25
22.083333
def hybrid_meco_velocity(m1, m2, chi1, chi2, qm1=None, qm2=None): """Return the velocity of the hybrid MECO Parameters ---------- m1 : float Mass of the primary object in solar masses. m2 : float Mass of the secondary object in solar masses. chi1: float Dimensionless spin of the primary object. chi2: float Dimensionless spin of the secondary object. qm1: {None, float}, optional Quadrupole-monopole term of the primary object (1 for black holes). If None, will be set to qm1 = 1. qm2: {None, float}, optional Quadrupole-monopole term of the secondary object (1 for black holes). If None, will be set to qm2 = 1. Returns ------- v: float The velocity (dimensionless) of the hybrid MECO """ if qm1 is None: qm1 = 1 if qm2 is None: qm2 = 1 # Set bounds at 0.1 to skip v=0 and at the lightring velocity chi = (chi1 * m1 + chi2 * m2) / (m1 + m2) vmax = kerr_lightring_velocity(chi) - 0.01 return minimize(hybridEnergy, 0.2, args=(m1, m2, chi1, chi2, qm1, qm2), bounds=[(0.1, vmax)]).x.item()
[ "def", "hybrid_meco_velocity", "(", "m1", ",", "m2", ",", "chi1", ",", "chi2", ",", "qm1", "=", "None", ",", "qm2", "=", "None", ")", ":", "if", "qm1", "is", "None", ":", "qm1", "=", "1", "if", "qm2", "is", "None", ":", "qm2", "=", "1", "# Set ...
30.837838
21.216216
def _getBlobFromURL(cls, url, exists=False): """ Gets the blob specified by the url. caution: makes no api request. blob may not ACTUALLY exist :param urlparse.ParseResult url: the URL :param bool exists: if True, then syncs local blob object with cloud and raises exceptions if it doesn't exist remotely :return: the blob requested :rtype: :class:`~google.cloud.storage.blob.Blob` """ bucketName = url.netloc fileName = url.path # remove leading '/', which can cause problems if fileName is a path if fileName.startswith('/'): fileName = fileName[1:] storageClient = storage.Client() bucket = storageClient.get_bucket(bucketName) blob = bucket.blob(bytes(fileName)) if exists: if not blob.exists(): raise NoSuchFileException # sync with cloud so info like size is available blob.reload() return blob
[ "def", "_getBlobFromURL", "(", "cls", ",", "url", ",", "exists", "=", "False", ")", ":", "bucketName", "=", "url", ".", "netloc", "fileName", "=", "url", ".", "path", "# remove leading '/', which can cause problems if fileName is a path", "if", "fileName", ".", "s...
31.774194
18.225806
def tags(self): """Access the auxillary data here""" if self._tags: return self._tags tags = {} if not tags: return {} for m in [[y.group(1),y.group(2),y.group(3)] for y in [re.match('([^:]{2,2}):([^:]):(.+)$',x) for x in self.entries.optional_fields.split("\t")]]: if m[1] == 'i': m[2] = int(m[2]) elif m[1] == 'f': m[2] = float(m[2]) tags[m[0]] = TAGDatum(m[1],m[2]) self._tags = tags return self._tags
[ "def", "tags", "(", "self", ")", ":", "if", "self", ".", "_tags", ":", "return", "self", ".", "_tags", "tags", "=", "{", "}", "if", "not", "tags", ":", "return", "{", "}", "for", "m", "in", "[", "[", "y", ".", "group", "(", "1", ")", ",", "...
41.181818
19.818182
def clized_default_shorts(p1, p2, first_option='default_value', second_option=5, third_option=[4, 3], last_option=False): """Help docstring """ print('%s %s %s %s %s %s' % (p1, p2, first_option, second_option, third_option, last_option))
[ "def", "clized_default_shorts", "(", "p1", ",", "p2", ",", "first_option", "=", "'default_value'", ",", "second_option", "=", "5", ",", "third_option", "=", "[", "4", ",", "3", "]", ",", "last_option", "=", "False", ")", ":", "print", "(", "'%s %s %s %s %s...
43.25
11.5
def get_kgXref_hg19(self): """ Get UCSC kgXref table for Build 37. Returns ------- pandas.DataFrame kgXref table if loading was successful, else None """ if self._kgXref_hg19 is None: self._kgXref_hg19 = self._load_kgXref(self._get_path_kgXref_hg19()) return self._kgXref_hg19
[ "def", "get_kgXref_hg19", "(", "self", ")", ":", "if", "self", ".", "_kgXref_hg19", "is", "None", ":", "self", ".", "_kgXref_hg19", "=", "self", ".", "_load_kgXref", "(", "self", ".", "_get_path_kgXref_hg19", "(", ")", ")", "return", "self", ".", "_kgXref_...
28.916667
19.25
def _set_protobuf_value(value_pb, val): """Assign 'val' to the correct subfield of 'value_pb'. The Protobuf API uses different attribute names based on value types rather than inferring the type. Some value types (entities, keys, lists) cannot be directly assigned; this function handles them correctly. :type value_pb: :class:`.entity_pb2.Value` :param value_pb: The value protobuf to which the value is being assigned. :type val: :class:`datetime.datetime`, boolean, float, integer, string, :class:`google.cloud.datastore.key.Key`, :class:`google.cloud.datastore.entity.Entity` :param val: The value to be assigned. """ attr, val = _pb_attr_value(val) if attr == "key_value": value_pb.key_value.CopyFrom(val) elif attr == "timestamp_value": value_pb.timestamp_value.CopyFrom(val) elif attr == "entity_value": entity_pb = entity_to_protobuf(val) value_pb.entity_value.CopyFrom(entity_pb) elif attr == "array_value": if len(val) == 0: array_value = entity_pb2.ArrayValue(values=[]) value_pb.array_value.CopyFrom(array_value) else: l_pb = value_pb.array_value.values for item in val: i_pb = l_pb.add() _set_protobuf_value(i_pb, item) elif attr == "geo_point_value": value_pb.geo_point_value.CopyFrom(val) else: # scalar, just assign setattr(value_pb, attr, val)
[ "def", "_set_protobuf_value", "(", "value_pb", ",", "val", ")", ":", "attr", ",", "val", "=", "_pb_attr_value", "(", "val", ")", "if", "attr", "==", "\"key_value\"", ":", "value_pb", ".", "key_value", ".", "CopyFrom", "(", "val", ")", "elif", "attr", "==...
38.684211
14.026316
def eqdate(y): """ Like eq but compares datetime with y,m,d tuple. Also accepts magic string 'TODAY'. """ y = datetime.date.today() if y == 'TODAY' else datetime.date(*y) return lambda x: x == y
[ "def", "eqdate", "(", "y", ")", ":", "y", "=", "datetime", ".", "date", ".", "today", "(", ")", "if", "y", "==", "'TODAY'", "else", "datetime", ".", "date", "(", "*", "y", ")", "return", "lambda", "x", ":", "x", "==", "y" ]
30.285714
11.428571
def _default_ising_beta_range(h, J): """Determine the starting and ending beta from h J Args: h (dict) J (dict) Assume each variable in J is also in h. We use the minimum bias to give a lower bound on the minimum energy gap, such at the final sweeps we are highly likely to settle into the current valley. """ # Get nonzero, absolute biases abs_h = [abs(hh) for hh in h.values() if hh != 0] abs_J = [abs(jj) for jj in J.values() if jj != 0] abs_biases = abs_h + abs_J if not abs_biases: return [0.1, 1.0] # Rough approximation of min change in energy when flipping a qubit min_delta_energy = min(abs_biases) # Combine absolute biases by variable abs_bias_dict = {k: abs(v) for k, v in h.items()} for (k1, k2), v in J.items(): abs_bias_dict[k1] += abs(v) abs_bias_dict[k2] += abs(v) # Find max change in energy when flipping a single qubit max_delta_energy = max(abs_bias_dict.values()) # Selecting betas based on probability of flipping a qubit # Hot temp: We want to scale hot_beta so that for the most unlikely qubit flip, we get at least # 50% chance of flipping.(This means all other qubits will have > 50% chance of flipping # initially.) Most unlikely flip is when we go from a very low energy state to a high energy # state, thus we calculate hot_beta based on max_delta_energy. # 0.50 = exp(-hot_beta * max_delta_energy) # # Cold temp: Towards the end of the annealing schedule, we want to minimize the chance of # flipping. Don't want to be stuck between small energy tweaks. Hence, set cold_beta so that # at minimum energy change, the chance of flipping is set to 1%. # 0.01 = exp(-cold_beta * min_delta_energy) hot_beta = np.log(2) / max_delta_energy cold_beta = np.log(100) / min_delta_energy return [hot_beta, cold_beta]
[ "def", "_default_ising_beta_range", "(", "h", ",", "J", ")", ":", "# Get nonzero, absolute biases", "abs_h", "=", "[", "abs", "(", "hh", ")", "for", "hh", "in", "h", ".", "values", "(", ")", "if", "hh", "!=", "0", "]", "abs_J", "=", "[", "abs", "(", ...
38.9375
24.083333
def async_or_eager(self, **options): """ Attempt to call self.apply_async, or if that fails because of a problem with the broker, run the task eagerly and return an EagerResult. """ args = options.pop("args", None) kwargs = options.pop("kwargs", None) possible_broker_errors = self._get_possible_broker_errors_tuple() try: return self.apply_async(args, kwargs, **options) except possible_broker_errors: return self.apply(args, kwargs, **options)
[ "def", "async_or_eager", "(", "self", ",", "*", "*", "options", ")", ":", "args", "=", "options", ".", "pop", "(", "\"args\"", ",", "None", ")", "kwargs", "=", "options", ".", "pop", "(", "\"kwargs\"", ",", "None", ")", "possible_broker_errors", "=", "...
44.166667
14.666667
def DirContains(self,f) : """ Matches dirs that have a child that matches filter f""" def match(fsNode) : if not fsNode.isdir() : return False for c in fsNode.children() : if f(c) : return True return False return self.make_return(match)
[ "def", "DirContains", "(", "self", ",", "f", ")", ":", "def", "match", "(", "fsNode", ")", ":", "if", "not", "fsNode", ".", "isdir", "(", ")", ":", "return", "False", "for", "c", "in", "fsNode", ".", "children", "(", ")", ":", "if", "f", "(", "...
38.375
7.25
def _time_to_expiry(expires): """ Determines the seconds until a HTTP header "Expires" timestamp :param expires: HTTP response "Expires" header :return: seconds until "Expires" time """ try: expires_dt = datetime.strptime(str(expires), '%a, %d %b %Y %H:%M:%S %Z') delta = expires_dt - datetime.utcnow() return delta.seconds except ValueError: return 0
[ "def", "_time_to_expiry", "(", "expires", ")", ":", "try", ":", "expires_dt", "=", "datetime", ".", "strptime", "(", "str", "(", "expires", ")", ",", "'%a, %d %b %Y %H:%M:%S %Z'", ")", "delta", "=", "expires_dt", "-", "datetime", ".", "utcnow", "(", ")", "...
37
15.333333
def take_action(self, production_rule: str) -> 'GrammarStatelet': """ Takes an action in the current grammar state, returning a new grammar state with whatever updates are necessary. The production rule is assumed to be formatted as "LHS -> RHS". This will update the non-terminal stack. Updating the non-terminal stack involves popping the non-terminal that was expanded off of the stack, then pushing on any non-terminals in the production rule back on the stack. For example, if our current ``nonterminal_stack`` is ``["r", "<e,r>", "d"]``, and ``action`` is ``d -> [<e,d>, e]``, the resulting stack will be ``["r", "<e,r>", "e", "<e,d>"]``. If ``self._reverse_productions`` is set to ``False`` then we push the non-terminals on in in their given order, which means that the first non-terminal in the production rule gets popped off the stack `last`. """ left_side, right_side = production_rule.split(' -> ') assert self._nonterminal_stack[-1] == left_side, (f"Tried to expand {self._nonterminal_stack[-1]}" f"but got rule {left_side} -> {right_side}") new_stack = self._nonterminal_stack[:-1] productions = self._get_productions_from_string(right_side) if self._reverse_productions: productions = list(reversed(productions)) for production in productions: if self._is_nonterminal(production): new_stack.append(production) return GrammarStatelet(nonterminal_stack=new_stack, valid_actions=self._valid_actions, is_nonterminal=self._is_nonterminal, reverse_productions=self._reverse_productions)
[ "def", "take_action", "(", "self", ",", "production_rule", ":", "str", ")", "->", "'GrammarStatelet'", ":", "left_side", ",", "right_side", "=", "production_rule", ".", "split", "(", "' -> '", ")", "assert", "self", ".", "_nonterminal_stack", "[", "-", "1", ...
52.057143
31.428571
def user_can_add_attachments(self): """Checks if the current logged in user is allowed to add attachments """ if not self.global_attachments_allowed(): return False context = self.context pm = api.get_tool("portal_membership") return pm.checkPermission(AddAttachment, context)
[ "def", "user_can_add_attachments", "(", "self", ")", ":", "if", "not", "self", ".", "global_attachments_allowed", "(", ")", ":", "return", "False", "context", "=", "self", ".", "context", "pm", "=", "api", ".", "get_tool", "(", "\"portal_membership\"", ")", ...
41.125
7.875
def _get_timestamp_tuple(ts): """ Internal method to get a timestamp tuple from a value. Handles input being a datetime or a Timestamp. """ if isinstance(ts, datetime.datetime): return Timestamp.from_datetime(ts).tuple() elif isinstance(ts, Timestamp): return ts raise TypeError('Timestamp or datetime.datetime required')
[ "def", "_get_timestamp_tuple", "(", "ts", ")", ":", "if", "isinstance", "(", "ts", ",", "datetime", ".", "datetime", ")", ":", "return", "Timestamp", ".", "from_datetime", "(", "ts", ")", ".", "tuple", "(", ")", "elif", "isinstance", "(", "ts", ",", "T...
36.4
10
def check_membership(self, groups): ''' Allows for objects with no required groups ''' if not groups or groups == ['']: return True if self.request.user.is_superuser: return True user_groups = self.request.user.groups.values_list("name", flat=True) return set(groups).intersection(set(user_groups))
[ "def", "check_membership", "(", "self", ",", "groups", ")", ":", "if", "not", "groups", "or", "groups", "==", "[", "''", "]", ":", "return", "True", "if", "self", ".", "request", ".", "user", ".", "is_superuser", ":", "return", "True", "user_groups", "...
45.25
14.25
def _header(self): """ Default html header """ html = """ <!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <title>Report</title> """ if "bokeh" in self.report_engines: html += self.bokeh_header_() if "altair" in self.report_engines: html += self.altair_header_() if "chartjs" in self.report_engines: html += self.chartjs_header_() html += """ </head> <body> """ return html
[ "def", "_header", "(", "self", ")", ":", "html", "=", "\"\"\"\n\t\t<!DOCTYPE html>\n\t\t<html lang=\"en\">\n\t\t<head>\n\t\t\t<meta charset=\"utf-8\">\n\t\t\t<title>Report</title>\n\t\t\"\"\"", "if", "\"bokeh\"", "in", "self", ".", "report_engines", ":", "html", "+=", "self", "...
22.454545
10.863636
def update_shelf(self, shelf_id, shelf_data): """ 修改货架 :param shelf_id: 货架ID :param shelf_data: 货架详情 :return: 返回的 JSON 数据包 """ shelf_data['shelf_id'] = shelf_id return self._post( 'merchant/shelf/mod', data=shelf_data )
[ "def", "update_shelf", "(", "self", ",", "shelf_id", ",", "shelf_data", ")", ":", "shelf_data", "[", "'shelf_id'", "]", "=", "shelf_id", "return", "self", ".", "_post", "(", "'merchant/shelf/mod'", ",", "data", "=", "shelf_data", ")" ]
23.384615
13.076923