code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def ignore_file_extension(self, extension): """ Configure a file extension to be ignored. :param extension: file extension to be ignored (ex. .less, .scss, etc) """ logger.info('Ignoring file extension: {}'.format(extension)) self.watcher.ignore_file_extension(extension)
Configure a file extension to be ignored. :param extension: file extension to be ignored (ex. .less, .scss, etc)
def pricing(sort='cost', **kwargs): ''' Print pricing tables for all enabled providers ''' for name, provider in env.providers.items(): print name provider.pricing(sort, **kwargs) print
Print pricing tables for all enabled providers
def nextstate(self, newstate, treenode=None, user_data=None): """ Manage transition of state. """ if newstate is None: return self if isinstance(newstate, State) and id(newstate) != id(self): return newstate elif isinstance(newstate, StateEvent): self.state_register.named_events[newstate.name] = True return newstate.st elif isinstance(newstate, StatePrecond): return newstate.st elif isinstance(newstate, StateHook): # final API using PSL newstate.call(treenode, user_data) return newstate.st return self
Manage transition of state.
def validate_address(self, address_deets): """Validates a customer address and returns back a collection of address matches.""" request = self._post('addresses/validate', address_deets) return self.responder(request)
Validates a customer address and returns back a collection of address matches.
def send_cmd(self, command, connId='default'): """ Sends any command to FTP server. Returns server output. Parameters: - command - any valid command to be sent (invalid will result in exception). - connId(optional) - connection identifier. By default equals 'default' Example: | send cmd | HELP | """ thisConn = self.__getConnection(connId) outputMsg = "" try: outputMsg += str(thisConn.sendcmd(command)) except ftplib.all_errors as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) return outputMsg
Sends any command to FTP server. Returns server output. Parameters: - command - any valid command to be sent (invalid will result in exception). - connId(optional) - connection identifier. By default equals 'default' Example: | send cmd | HELP |
def get_proxies(self, quantity, type): ''' quantity: 数量 type: 类型 1.国内高匿代理 2.国内普通代理 3.国外高匿代理 4.国外普通代理 ''' url_queue = Queue() need_pages = int(math.ceil(quantity/15)) # 判断类型 if type == 1: # 国内高匿代理 base_url = self.domestic_gn_url elif type == 2: # 国内普通代理 base_url = self.domestic_pt_url elif type == 3: # 国外高匿代理 base_url = self.abroad_gn_url elif type == 4: # 国外普通代理 base_url = self.abroad_pt_url # 获取所需要的页面URL for index in range(need_pages): url = base_url.format(index+1) url_queue.put(url) # 处理所有URL,开启2个协程 gevent_list = [] gevent_list.append( gevent.spawn(self.fetch_urls, url_queue, quantity) ) gevent.joinall(gevent_list)
quantity: 数量 type: 类型 1.国内高匿代理 2.国内普通代理 3.国外高匿代理 4.国外普通代理
def _update_libdata(self, line): """Update the library meta data from the current line being parsed Args: line (str): The current line of the of the file being parsed """ #################################################### # parse MONA Comments line #################################################### # The mona msp files contain a "comments" line that contains lots of other information normally separated # into by "" if re.match('^Comment.*$', line, re.IGNORECASE): comments = re.findall('"([^"]*)"', line) for c in comments: self._parse_meta_info(c) self._parse_compound_info(c) #################################################### # parse meta and compound info lines #################################################### # check the current line for both general meta data # and compound information self._parse_meta_info(line) self._parse_compound_info(line) #################################################### # End of meta data #################################################### # Most MSP files have the a standard line of text before the spectra information begins. Here we check # for this line and store the relevant details for the compound and meta information to be ready for insertion # into the database if self.collect_meta and (re.match('^Num Peaks(.*)$', line, re.IGNORECASE) or re.match('^PK\$PEAK:(.*)', line, re.IGNORECASE) or re.match('^PK\$ANNOTATION(.*)', line, re.IGNORECASE)): self._store_compound_info() self._store_meta_info() # Reset the temp meta and compound information self.meta_info = get_blank_dict(self.meta_regex) self.compound_info = get_blank_dict(self.compound_regex) self.other_names = [] self.collect_meta = False # ignore additional information in the 3rd column if using the MassBank spectra schema if re.match('^PK\$PEAK: m/z int\. rel\.int\.$', line, re.IGNORECASE): self.ignore_additional_spectra_info = True # Check if annnotation or spectra is to be in the next lines to be parsed if re.match('^Num Peaks(.*)$', line, re.IGNORECASE) or re.match('^PK\$PEAK:(.*)', line, re.IGNORECASE): self.start_spectra = True return elif re.match('^PK\$ANNOTATION(.*)', line, re.IGNORECASE): self.start_spectra_annotation = True match = re.match('^PK\$ANNOTATION:(.*)', line, re.IGNORECASE) columns = match.group(1) cl = columns.split() self.spectra_annotation_indexes = {i: cl.index(i) for i in cl} return #################################################### # Process annotation details #################################################### # e.g. molecular formula for each peak in the spectra if self.start_spectra_annotation: self._parse_spectra_annotation(line) #################################################### # Process spectra #################################################### if self.start_spectra: self._parse_spectra(line)
Update the library meta data from the current line being parsed Args: line (str): The current line of the of the file being parsed
def send(self, sender: PytgbotApiBot): """ Send the message via pytgbot. :param sender: The bot instance to send with. :type sender: pytgbot.bot.Bot :rtype: PytgbotApiMessage """ return sender.send_sticker( # receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id sticker=self.sticker, chat_id=self.receiver, reply_to_message_id=self.reply_id, disable_notification=self.disable_notification, reply_markup=self.reply_markup )
Send the message via pytgbot. :param sender: The bot instance to send with. :type sender: pytgbot.bot.Bot :rtype: PytgbotApiMessage
def update(self, callback=None, errback=None, parent=True, **kwargs): """ Update address configuration. Pass a list of keywords and their values to update. For the list of keywords available for address configuration, see :attr:`ns1.rest.ipam.Addresses.INT_FIELDS` and :attr:`ns1.rest.ipam.Addresses.PASSTHRU_FIELDS` """ if not self.data: raise AddressException('Address not loaded') def success(result, *args): self.data = result self.id = result['id'] self.prefix = result['prefix'] self.type = result['type'] self.network = Network(self.config, id=result['network_id']) # self.scope_group = Scopegroup(config=self.config, id=result['scope_group_id']) if self.type != 'host': self.report = self._rest.report(self.id) children = self._rest.retrieve_children(self.id) self.children = [Address(self.config, id=child['id']) for child in children if len(children) > 0] try: parent = self._rest.retrieve_parent(self.id) self.parent = Address(self.config, id=parent['id']) except ResourceException: pass if callback: return callback(self) else: return self return self._rest.update(self.id, callback=success, errback=errback, parent=parent, **kwargs)
Update address configuration. Pass a list of keywords and their values to update. For the list of keywords available for address configuration, see :attr:`ns1.rest.ipam.Addresses.INT_FIELDS` and :attr:`ns1.rest.ipam.Addresses.PASSTHRU_FIELDS`
def notify(self, data): """Notify this channel of inbound data""" string_channels = { ChannelIdentifiers.de_registrations, ChannelIdentifiers.registrations_expired } if data['channel'] in string_channels: message = {'device_id': data["value"], 'channel': data["channel"]} else: message = DeviceStateChanges._map_endpoint_data(data) return super(DeviceStateChanges, self).notify(message)
Notify this channel of inbound data
def add_type_struct_or_union(self, name, interp, node): """Store the node with the name. When it is instantiated, the node itself will be handled. :name: name of the typedefd struct/union :node: the union/struct node :interp: the 010 interpreter """ self.add_type_class(name, StructUnionDef(name, interp, node))
Store the node with the name. When it is instantiated, the node itself will be handled. :name: name of the typedefd struct/union :node: the union/struct node :interp: the 010 interpreter
def show(ctx, project_id, backend): """ Shows the details of the given project id. """ try: project = ctx.obj['projects_db'].get(project_id, backend) except IOError: raise Exception("Error: the projects database file doesn't exist. " "Please run `taxi update` to create it") if project is None: ctx.obj['view'].err( "Could not find project `%s`" % (project_id) ) else: ctx.obj['view'].project_with_activities(project)
Shows the details of the given project id.
def generation_time(self): """A :class:`datetime.datetime` instance representing the time of generation for this :class:`ObjectId`. The :class:`datetime.datetime` is timezone aware, and represents the generation time in UTC. It is precise to the second. """ timestamp = struct.unpack(">I", self.__id[0:4])[0] return datetime.datetime.fromtimestamp(timestamp, utc)
A :class:`datetime.datetime` instance representing the time of generation for this :class:`ObjectId`. The :class:`datetime.datetime` is timezone aware, and represents the generation time in UTC. It is precise to the second.
def effsnr(snr, reduced_x2, fac=250.): """Calculate the effective SNR statistic. See (S5y1 paper) for definition. """ snr = numpy.array(snr, ndmin=1, dtype=numpy.float64) rchisq = numpy.array(reduced_x2, ndmin=1, dtype=numpy.float64) esnr = snr / (1 + snr ** 2 / fac) ** 0.25 / rchisq ** 0.25 # If snr input is float, return a float. Otherwise return numpy array. if hasattr(snr, '__len__'): return esnr else: return esnr[0]
Calculate the effective SNR statistic. See (S5y1 paper) for definition.
def cli(env, name, public): """List images.""" image_mgr = SoftLayer.ImageManager(env.client) images = [] if public in [False, None]: for image in image_mgr.list_private_images(name=name, mask=image_mod.MASK): images.append(image) if public in [True, None]: for image in image_mgr.list_public_images(name=name, mask=image_mod.MASK): images.append(image) table = formatting.Table(['id', 'name', 'type', 'visibility', 'account']) images = [image for image in images if image['parentId'] == ''] for image in images: visibility = (image_mod.PUBLIC_TYPE if image['publicFlag'] else image_mod.PRIVATE_TYPE) table.add_row([ image.get('id', formatting.blank()), formatting.FormattedItem(image['name'], click.wrap_text(image['name'], width=50)), formatting.FormattedItem( utils.lookup(image, 'imageType', 'keyName'), utils.lookup(image, 'imageType', 'name')), visibility, image.get('accountId', formatting.blank()), ]) env.fout(table)
List images.
def get_autoregressive_bias(max_length: int, dtype: str = C.DTYPE_FP32) -> mx.sym.Symbol: """ Returns bias/mask to ensure position i can only attend to positions <i. :param max_length: Sequence length. :param dtype: dtype of bias :return: Bias symbol of shape (1, max_length, max_length). """ length_array = mx.sym.arange(max_length, dtype=dtype) # matrix with lower triangle and main diagonal set to 0, upper triangle set to 1 bias = mx.sym.broadcast_greater(mx.sym.reshape(length_array, shape=(1, -1)), mx.sym.reshape(length_array, shape=(-1, 1))) bias = bias * -C.LARGE_VALUES[dtype] bias = mx.sym.reshape(bias, shape=(1, max_length, max_length)) return mx.sym.BlockGrad(bias)
Returns bias/mask to ensure position i can only attend to positions <i. :param max_length: Sequence length. :param dtype: dtype of bias :return: Bias symbol of shape (1, max_length, max_length).
def touch(self, key, expire=0, noreply=None): """ The memcached "touch" command. Args: key: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True if the expiration time was updated, False if the key wasn't found. """ if noreply is None: noreply = self.default_noreply key = self.check_key(key) cmd = b'touch ' + key + b' ' + six.text_type(expire).encode('ascii') if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'touch', noreply) if noreply: return True return results[0] == b'TOUCHED'
The memcached "touch" command. Args: key: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True if the expiration time was updated, False if the key wasn't found.
def __CheckValid(self, value): "check for validity of value" val = self.__val self.is_valid = True try: val = set_float(value) if self.__min is not None and (val < self.__min): self.is_valid = False val = self.__min if self.__max is not None and (val > self.__max): self.is_valid = False val = self.__max except: self.is_valid = False self.__bound_val = self.__val = val fgcol, bgcol = self.fgcol_valid, self.bgcol_valid if not self.is_valid: fgcol, bgcol = self.fgcol_invalid, self.bgcol_invalid self.SetForegroundColour(fgcol) self.SetBackgroundColour(bgcol) self.Refresh()
check for validity of value
def get_range(self, i): """ Check and retrieve range if value is a valid range. Here we are looking to see if the value is series or range. We look for `{1..2[..inc]}` or `{a..z[..inc]}` (negative numbers are fine). """ try: m = i.match(RE_INT_ITER) if m: return self.get_int_range(*m.groups()) m = i.match(RE_CHR_ITER) if m: return self.get_char_range(*m.groups()) except Exception: # pragma: no cover # TODO: We really should never fail here, # but if we do, assume the sequence range # was invalid. This catch can probably # be removed in the future with more testing. pass return None
Check and retrieve range if value is a valid range. Here we are looking to see if the value is series or range. We look for `{1..2[..inc]}` or `{a..z[..inc]}` (negative numbers are fine).
def _begin(self, retry_id=None): """Begin the transaction. Args: retry_id (Optional[bytes]): Transaction ID of a transaction to be retried. Raises: ValueError: If the current transaction has already begun. """ if self.in_progress: msg = _CANT_BEGIN.format(self._id) raise ValueError(msg) transaction_response = self._client._firestore_api.begin_transaction( self._client._database_string, options_=self._options_protobuf(retry_id), metadata=self._client._rpc_metadata, ) self._id = transaction_response.transaction
Begin the transaction. Args: retry_id (Optional[bytes]): Transaction ID of a transaction to be retried. Raises: ValueError: If the current transaction has already begun.
def getfieldcomm(bunchdt, data, commdct, idfobject, fieldname): """get the idd comment for the field""" key = idfobject.obj[0].upper() keyi = data.dtls.index(key) fieldi = idfobject.objls.index(fieldname) thiscommdct = commdct[keyi][fieldi] return thiscommdct
get the idd comment for the field
def create_circuit(name, provider_id, circuit_type, description=None): ''' .. versionadded:: 2019.2.0 Create a new Netbox circuit name Name of the circuit provider_id The netbox id of the circuit provider circuit_type The name of the circuit type asn The ASN of the circuit provider description The description of the circuit CLI Example: .. code-block:: bash salt myminion netbox.create_circuit NEW_CIRCUIT_01 Telia Transit 1299 "New Telia circuit" ''' nb_circuit_provider = get_('circuits', 'providers', provider_id) nb_circuit_type = get_('circuits', 'circuit-types', slug=slugify(circuit_type)) if nb_circuit_provider and nb_circuit_type: payload = { 'cid': name, 'provider': nb_circuit_provider['id'], 'type': nb_circuit_type['id'] } if description: payload['description'] = description nb_circuit = get_('circuits', 'circuits', cid=name) if nb_circuit: return False circuit = _add('circuits', 'circuits', payload) if circuit: return {'circuits': {'circuits': {circuit['id']: payload}}} else: return circuit else: return False
.. versionadded:: 2019.2.0 Create a new Netbox circuit name Name of the circuit provider_id The netbox id of the circuit provider circuit_type The name of the circuit type asn The ASN of the circuit provider description The description of the circuit CLI Example: .. code-block:: bash salt myminion netbox.create_circuit NEW_CIRCUIT_01 Telia Transit 1299 "New Telia circuit"
def render_select_site_form(self, request, context, form_url=''): """ Render the site choice form. """ app_label = self.opts.app_label context.update({ 'has_change_permission': self.has_change_permission(request), 'form_url': mark_safe(form_url), 'opts': self.opts, 'add': True, 'save_on_top': self.save_on_top, }) return render_to_response(self.select_site_form_template or [ 'admin/%s/%s/select_site_form.html' % (app_label, self.opts.object_name.lower()), 'admin/%s/select_site_form.html' % app_label, 'admin/usersettings/select_site_form.html', # added default here 'admin/select_site_form.html' ], context)
Render the site choice form.
def _get_alpha(self, C, vs30, pga_rock): """ Returns the alpha, the linearised functional relationship between the site amplification and the PGA on rock. Equation 31. """ alpha = np.zeros(len(pga_rock)) idx = vs30 < C["k1"] if np.any(idx): af1 = pga_rock[idx] +\ self.CONSTS["c"] * ((vs30[idx] / C["k1"]) ** self.CONSTS["n"]) af2 = pga_rock[idx] + self.CONSTS["c"] alpha[idx] = C["k2"] * pga_rock[idx] * ((1.0 / af1) - (1.0 / af2)) return alpha
Returns the alpha, the linearised functional relationship between the site amplification and the PGA on rock. Equation 31.
def headpart_types(self, method, input=True): """ Get a list of header I{parameter definitions} (pdefs) defined for the specified method. An input I{pdef} is a (I{name}, L{xsd.sxbase.SchemaObject}) tuple, while an output I{pdef} is a L{xsd.sxbase.SchemaObject}. @param method: A service method. @type method: I{service.Method} @param input: Defines input/output message. @type input: boolean @return: A list of parameter definitions @rtype: [I{pdef},...] """ if input: headers = method.soap.input.headers else: headers = method.soap.output.headers return [self.__part_type(h.part, input) for h in headers]
Get a list of header I{parameter definitions} (pdefs) defined for the specified method. An input I{pdef} is a (I{name}, L{xsd.sxbase.SchemaObject}) tuple, while an output I{pdef} is a L{xsd.sxbase.SchemaObject}. @param method: A service method. @type method: I{service.Method} @param input: Defines input/output message. @type input: boolean @return: A list of parameter definitions @rtype: [I{pdef},...]
def _send_command(self, command, expected_bytes): """ Send an XID command to the device """ response = self.con.send_xid_command(command, expected_bytes) return response
Send an XID command to the device
def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = winchester_config['database']['url'] context.configure(url=url) with context.begin_transaction(): context.run_migrations()
Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output.
def process_management_config_section(config, management_config): """ Processes the management section from a configuration data dict. :param config: The config reference of the object that will hold the configuration data from the config_data. :param management_config: Management section from a config data dict. """ if 'commands' in management_config: for command in management_config['commands']: config.management['commands'].append(command)
Processes the management section from a configuration data dict. :param config: The config reference of the object that will hold the configuration data from the config_data. :param management_config: Management section from a config data dict.
def defUtilityFuncs(self): ''' Defines CRRA utility function for this period (and its derivatives), saving them as attributes of self for other methods to use. Parameters ---------- none Returns ------- none ''' self.u = lambda c : utility(c,gam=self.CRRA) # utility function self.uP = lambda c : utilityP(c,gam=self.CRRA) # marginal utility function self.uPP = lambda c : utilityPP(c,gam=self.CRRA)
Defines CRRA utility function for this period (and its derivatives), saving them as attributes of self for other methods to use. Parameters ---------- none Returns ------- none
def main(output): """Output the HBP knowledge graph to the desktop""" from hbp_knowledge import get_graph graph = get_graph() text = to_html(graph) print(text, file=output)
Output the HBP knowledge graph to the desktop
def run(self): """ Performs actual NWCHEM2FIESTA run """ init_folder = os.getcwd() os.chdir(self.folder) with zopen(self.log_file, 'w') as fout: subprocess.call([self._NWCHEM2FIESTA_cmd, self._nwcheminput_fn, self._nwchemoutput_fn, self._nwchemmovecs_fn], stdout=fout) os.chdir(init_folder)
Performs actual NWCHEM2FIESTA run
def get_footnote_backreferences(text, markdown_obj): """Retrieves all footnote backreferences within the text. Fotnote backreferences can be defined anywhere in the text, and look like this: [^id]: text The corresponding footnote reference can then be placed anywhere in the text This is some text.[^id] Footnote IDs are case insensitive. Footnote references are also removed from the text after they have been retrieved. RETURNS: text -- str; text with all link labels removed references -- dict; link ids to (URL, title), where title is the empty string if it is omitted. """ footnotes = OrderedDict() for footnote_id, footnote in re_footnote_backreferences.findall(text): footnote_id = re.sub(r'<(.*?)>', r'\1', footnote_id).lower().strip() footnote = re.sub(r'^[ ]{0,4}', '', footnote, flags=re.M) footnotes[footnote_id] = footnote text = re_footnote_backreferences.sub('', text) return text, footnotes
Retrieves all footnote backreferences within the text. Fotnote backreferences can be defined anywhere in the text, and look like this: [^id]: text The corresponding footnote reference can then be placed anywhere in the text This is some text.[^id] Footnote IDs are case insensitive. Footnote references are also removed from the text after they have been retrieved. RETURNS: text -- str; text with all link labels removed references -- dict; link ids to (URL, title), where title is the empty string if it is omitted.
def update_payload(self, fields=None): """Wrap submitted data within an extra dict.""" payload = super(DiscoveryRule, self).update_payload(fields) if 'search_' in payload: payload['search'] = payload.pop('search_') return {u'discovery_rule': payload}
Wrap submitted data within an extra dict.
def state_get(self, block_id, addresses): '''Returns a list of address/data pairs (str, bytes)''' block = self._get_blocks([block_id.hex()])[0] block_header = BlockHeader() block_header.ParseFromString(block.header) try: state_view = self._state_view_factory.create_view( block_header.state_root_hash) except KeyError: LOGGER.error( 'State from block %s requested, but root hash %s was missing. ' 'Returning empty state.', block_id.hex(), block_header.state_root_hash) # The state root does not exist, which may indicate a pruned root # from a dropped fork or an invalid state. return [] result = [] for address in addresses: # a fully specified address if len(address) == 70: try: value = state_view.get(address) except KeyError: # if the key is missing, leave it out of the response continue result.append((address, value)) continue # an address prefix leaves = state_view.leaves(address) for leaf in leaves: result.append(leaf) return result
Returns a list of address/data pairs (str, bytes)
def p_statement_expr(self, t): '''statement : node_expression PLUS node_expression | node_expression MINUS node_expression''' if len(t)<3 : self.accu.add(Term('input', [t[1]])) print('input', t[1]) else : #print(t[1], t[2], t[3] self.accu.add(Term('edge', ["gen(\""+t[1]+"\")","gen(\""+t[3]+"\")"])) self.accu.add(Term('obs_elabel', ["gen(\""+t[1]+"\")","gen(\""+t[3]+"\")",t[2]]))
statement : node_expression PLUS node_expression | node_expression MINUS node_expression
def do_cli(ctx, host, port, template, env_vars, debug_port, debug_args, # pylint: disable=R0914 debugger_path, docker_volume_basedir, docker_network, log_file, layer_cache_basedir, skip_pull_image, force_image_build, parameter_overrides): """ Implementation of the ``cli`` method, just separated out for unit testing purposes """ LOG.debug("local start_lambda command is called") # Pass all inputs to setup necessary context to invoke function locally. # Handler exception raised by the processor for invalid args and print errors try: with InvokeContext(template_file=template, function_identifier=None, # Don't scope to one particular function env_vars_file=env_vars, docker_volume_basedir=docker_volume_basedir, docker_network=docker_network, log_file=log_file, skip_pull_image=skip_pull_image, debug_port=debug_port, debug_args=debug_args, debugger_path=debugger_path, parameter_overrides=parameter_overrides, layer_cache_basedir=layer_cache_basedir, force_image_build=force_image_build, aws_region=ctx.region) as invoke_context: service = LocalLambdaService(lambda_invoke_context=invoke_context, port=port, host=host) service.start() except (InvalidSamDocumentException, OverridesNotWellDefinedError, InvalidLayerReference, DebuggingNotSupported) as ex: raise UserException(str(ex))
Implementation of the ``cli`` method, just separated out for unit testing purposes
def split_(self, col: str) -> "list(Ds)": """ Split the main dataframe according to a column's unique values and return a dict of dataswim instances :return: list of dataswim instances :rtype: list(Ds) :example: ``dss = ds.slit_("Col 1")`` """ try: dss = {} unique = self.df[col].unique() for key in unique: df2 = self.df.loc[self.df[col] == key] ds2 = self._duplicate_(df2) dss[key] = ds2 return dss except Exception as e: self.err(e, "Can not split dataframe")
Split the main dataframe according to a column's unique values and return a dict of dataswim instances :return: list of dataswim instances :rtype: list(Ds) :example: ``dss = ds.slit_("Col 1")``
def show_rich_text(self, text, collapse=False, img_path=''): """Show text in rich mode""" self.switch_to_plugin() self.switch_to_rich_text() context = generate_context(collapse=collapse, img_path=img_path, css_path=self.css_path) self.render_sphinx_doc(text, context)
Show text in rich mode
def _req_files_move(self, pid, fids): """ Move files or directories :param str pid: destination directory id :param list fids: a list of ids of files or directories to be moved """ url = self.web_api_url + '/move' data = {} data['pid'] = pid for i, fid in enumerate(fids): data['fid[%d]' % i] = fid req = Request(method='POST', url=url, data=data) res = self.http.send(req) if res.state: return True else: raise RequestFailure('Failed to access files API.')
Move files or directories :param str pid: destination directory id :param list fids: a list of ids of files or directories to be moved
def apply_raw(self): """ apply to the values as a numpy array """ try: result = reduction.reduce(self.values, self.f, axis=self.axis) except Exception: result = np.apply_along_axis(self.f, self.axis, self.values) # TODO: mixed type case if result.ndim == 2: return self.obj._constructor(result, index=self.index, columns=self.columns) else: return self.obj._constructor_sliced(result, index=self.agg_axis)
apply to the values as a numpy array
def auth(**kwargs): ''' Authorize device synchronization. ''' """ kodrive auth <path> <device_id (client)> 1. make sure path has been added to config.xml, server 2. make sure path is not shared by someone 3. add device_id to folder in config.xml, server 4. add device to devices in config.xml, server """ option = 'add' path = kwargs['path'] key = kwargs['key'] if kwargs['remove']: option = 'remove' if kwargs['yes']: output, err = cli_syncthing_adapter.auth(option, key, path) click.echo("%s" % output, err=err) else: verb = 'authorize' if not kwargs['remove'] else 'de-authorize' if click.confirm("Are you sure you want to %s this device to access %s?" % (verb, path)): output, err = cli_syncthing_adapter.auth(option, key, path) if output: click.echo("%s" % output, err=err)
Authorize device synchronization.
def getsinIm(alat): """Computes sinIm from modified apex latitude. Parameters ========== alat : array_like Modified apex latitude Returns ======= sinIm : ndarray or float """ alat = np.float64(alat) return 2*np.sin(np.radians(alat))/np.sqrt(4 - 3*np.cos(np.radians(alat))**2)
Computes sinIm from modified apex latitude. Parameters ========== alat : array_like Modified apex latitude Returns ======= sinIm : ndarray or float
def bulk_record_workunits(self, engine_workunits): """A collection of workunits from v2 engine part""" for workunit in engine_workunits: duration = workunit['end_timestamp'] - workunit['start_timestamp'] span = zipkin_span( service_name="pants", span_name=workunit['name'], duration=duration, span_storage=self.span_storage, ) span.zipkin_attrs = ZipkinAttrs( trace_id=self.trace_id, span_id=workunit['span_id'], # TODO change it when we properly pass parent_id to the v2 engine Nodes # TODO Pass parent_id with ExecutionRequest when v2 engine is called by a workunit # TODO pass parent_id when v2 engine Node is called by another v2 engine Node parent_span_id=workunit.get("parent_id", self.parent_id), flags='0', # flags: stores flags header. Currently unused is_sampled=True, ) span.start() span.start_timestamp = workunit['start_timestamp'] span.stop()
A collection of workunits from v2 engine part
def shape_offset_y(self): """Return y distance of shape origin from local coordinate origin. The returned integer represents the topmost extent of the freeform shape, in local coordinates. Note that the bounding box of the shape need not start at the local origin. """ min_y = self._start_y for drawing_operation in self: if hasattr(drawing_operation, 'y'): min_y = min(min_y, drawing_operation.y) return min_y
Return y distance of shape origin from local coordinate origin. The returned integer represents the topmost extent of the freeform shape, in local coordinates. Note that the bounding box of the shape need not start at the local origin.
def to_decimal(alpha_number, alphabet=ALPHABET, default=_marker): """Converts an alphanumeric code (e.g AB12) to an integer :param alpha_number: representation of an alphanumeric code :param alphabet: alphabet to use when alpha_number is a non-int string :type number: int, string, Alphanumber, float :type alphabet: string """ num = api.to_int(alpha_number, default=None) if num is not None: return num alpha_number = str(alpha_number) regex = re.compile(r"([A-Z]+)(\d+)", re.IGNORECASE) matches = re.findall(regex, alpha_number) if not matches: if default is not _marker: return default raise ValueError("Not a valid alpha number: {}".format(alpha_number)) alpha = matches[0][0] number = int(matches[0][1]) max_num = 10 ** len(matches[0][1]) - 1 len_alphabet = len(alphabet) for pos_char, alpha_char in enumerate(reversed(alpha)): index_char = alphabet.find(alpha_char) number += (index_char * max_num * len_alphabet ** pos_char) return number
Converts an alphanumeric code (e.g AB12) to an integer :param alpha_number: representation of an alphanumeric code :param alphabet: alphabet to use when alpha_number is a non-int string :type number: int, string, Alphanumber, float :type alphabet: string
def set_children(self, child_ids): """Sets the children. arg: child_ids (osid.id.Id[]): the children``Ids`` raise: InvalidArgument - ``child_ids`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ if not isinstance(child_ids, list): raise errors.InvalidArgument() if self.get_children_metadata().is_read_only(): raise errors.NoAccess() idstr_list = [] for object_id in child_ids: if not self._is_valid_id(object_id): raise errors.InvalidArgument() if str(object_id) not in idstr_list: idstr_list.append(str(object_id)) self._my_map['childIds'] = idstr_list
Sets the children. arg: child_ids (osid.id.Id[]): the children``Ids`` raise: InvalidArgument - ``child_ids`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
def create( cls, model, parent = None, uifile = '', commit = True ): """ Prompts the user to create a new record for the inputed table. :param model | <subclass of orb.Table> parent | <QWidget> :return <orb.Table> || None/ | instance of the inputed table class """ # create the dialog dlg = QDialog(parent) dlg.setWindowTitle('Create %s' % model.schema().name()) # create the widget cls = model.schema().property('widgetClass', cls) widget = cls(dlg) if ( uifile ): widget.setUiFile(uifile) widget.setModel(model) widget.layout().setContentsMargins(0, 0, 0, 0) # create buttons opts = QDialogButtonBox.Save | QDialogButtonBox.Cancel btns = QDialogButtonBox(opts, Qt.Horizontal, dlg) # create layout layout = QVBoxLayout() layout.addWidget(widget) layout.addWidget(btns) dlg.setLayout(layout) dlg.adjustSize() # create connections btns.accepted.connect(widget.save) btns.rejected.connect(dlg.reject) widget.saved.connect(dlg.accept) if ( dlg.exec_() ): record = widget.record() if ( commit ): record.commit() return record return None
Prompts the user to create a new record for the inputed table. :param model | <subclass of orb.Table> parent | <QWidget> :return <orb.Table> || None/ | instance of the inputed table class
def write_all_files(filename, data, wref_data=None, params=None): """ Creates an LCModel control file for processing the supplied MRSData, and optional water reference data, updating the default parameters with any values supplied through params. :param filename: the location where the control file should be saved. :param data: MRSData to be processed. :param wref_data: Optional MRSData containing water reference. :param params: Optional dictionary containing non-default parameter values. :return: """ # we assume that the data has one spectral dimension, any others must be # spatial if len(data.shape) == 1: shape = (1, 1, 1) elif len(data.shape) == 2: shape = (data.shape[0], 1, 1) elif len(data.shape) == 3: shape = (data.shape[0], data.shape[1], 1) elif len(data.shape) == 4: shape = data.shape[0:3] elif len(data.shape) > 4: raise ValueError("LCModel cannot handle data with more than 4 dimensions") # We need to save a bunch of files for LCModel to process: a raw file for # the data, possibly a raw file for the wref and a control file for each # slice. In addition, in the absence of information in the params file # about where to save the output (.ps, .csv, .table etc.) that should also # be saved in the same folder as the input data for LCModel. folder, file_root = os.path.split(filename) # make sure that the folder exists before trying to save things to it if not os.path.isdir(folder): os.makedirs(folder) file_root, ext = os.path.splitext(file_root) base_params = { "FILBAS": "/home/spectre/.lcmodel/basis-sets/provencher/press_te30_3t_gsh_v3.basis", "ICOLST": 1, "ICOLEN": shape[0], "NDCOLS": shape[0], "IROWST": 1, "IROWEN": shape[1], "NDROWS": shape[1], "NDSLIC": shape[2], "DOWS": "T" if wref_data is not None else "F", "DOECC": "T" if wref_data is not None else "F", "FILRAW": os.path.join(folder, file_root + ".RAW"), "FILPS": os.path.join(folder, file_root + ".PS") } if wref_data is not None: base_params["FILH2O"] = os.path.join(folder, file_root + ".H2O") # add the user supplied parameters to the list if params is not None: base_params.update(params) # make a few modifications based on user edits if "FILTAB" in base_params: base_params["LTABLE"] = 7 base_params["FILTAB"] = "'{}'".format(base_params["FILTAB"]) elif "LTABLE" in base_params: base_params["LTABLE"] = 7 base_params["FILTAB"] = "'{}'".format(os.path.join(folder, file_root + ".TABLE")) if "FILCSV" in base_params: base_params["LCSV"] = 11 base_params["FILCSV"] = "'{}'".format(base_params["FILCSV"]) elif "LCSV" in base_params: base_params["LCSV"] = 11 base_params["FILCSV"] = "'{}'".format(os.path.join(folder, file_root + ".CSV")) if "FILCOO" in base_params: base_params["LCOORD"] = 9 base_params["FILCOO"] = "'{}'".format(base_params["FILCOO"]) elif "LCOORD" in base_params: base_params["LCOORD"] = 9 base_params["FILCOO"] = "'{}'".format(os.path.join(folder, file_root + ".COORD")) if "FILCOR" in base_params: base_params["LCORAW"] = 10 base_params["FILCOR"] = "'{}'".format(base_params["FILCOR"]) elif "LCORAW" in base_params: base_params["LCORAW"] = 10 base_params["FILCOR"] = "'{}'".format(os.path.join(folder, file_root + ".CORAW")) save_raw(base_params["FILRAW"], data) if wref_data is not None: save_raw(base_params["FILH2O"], wref_data) # have to add single quotes to the various paths base_params["FILRAW"] = "'{}'".format(base_params["FILRAW"]) base_params["FILBAS"] = "'{}'".format(base_params["FILBAS"]) base_params["FILPS"] = "'{}'".format(base_params["FILPS"]) if wref_data is not None: base_params["FILH2O"] = "'{}'".format(base_params["FILH2O"]) for slice_index in range(shape[2]): control_filename = "{0}_sl{1}.CONTROL".format(file_root, slice_index) control_filepath = os.path.join(folder, control_filename) with open(control_filepath, 'wt') as fout: fout.write(" $LCMODL\n") fout.write(" OWNER = ''\n") fout.write(" KEY = 123456789\n") fout.write(" DELTAT = {}\n".format(data.dt)) fout.write(" HZPPPM = {}\n".format(data.f0)) fout.write(" NUNFIL = {}\n".format(data.np)) for key, value in base_params.items(): fout.write(" {0} = {1}\n".format(key, value)) fout.write(" $END\n")
Creates an LCModel control file for processing the supplied MRSData, and optional water reference data, updating the default parameters with any values supplied through params. :param filename: the location where the control file should be saved. :param data: MRSData to be processed. :param wref_data: Optional MRSData containing water reference. :param params: Optional dictionary containing non-default parameter values. :return:
def get_average_record(self, n): """Returns a list of average current numbers, each representing the average over the last n data points. Args: n: Number of data points to average over. Returns: A list of average current values. """ history_deque = collections.deque() averages = [] for d in self.data_points: history_deque.appendleft(d) if len(history_deque) > n: history_deque.pop() avg = sum(history_deque) / len(history_deque) averages.append(round(avg, self.lr)) return averages
Returns a list of average current numbers, each representing the average over the last n data points. Args: n: Number of data points to average over. Returns: A list of average current values.
def p_encaps_var_object_property(p): 'encaps_var : VARIABLE OBJECT_OPERATOR STRING' p[0] = ast.ObjectProperty(ast.Variable(p[1], lineno=p.lineno(1)), p[3], lineno=p.lineno(2))
encaps_var : VARIABLE OBJECT_OPERATOR STRING
def default_to_hashed_rows(self, default=None): """ Gets the current setting with no parameters, sets it if a boolean is passed in :param default: the value to set :return: the current value, or new value if default is set to True or False """ if default is not None: self._default_to_hashed_rows = (default is True) return self._default_to_hashed_rows
Gets the current setting with no parameters, sets it if a boolean is passed in :param default: the value to set :return: the current value, or new value if default is set to True or False
def mk_examples_menu(text, root_dir=None, depth=0): """ :return: base_item, rel_paths """ # 3.12+ menus examples_dir = ide_utils.get_example_dir() if not examples_dir: return None, [] root_dir = root_dir or examples_dir file_actions = [] menu = Gio.Menu.new() base_item = Gio.MenuItem.new_submenu(text, menu) for fn in sorted(os.listdir(root_dir)): path = os.path.join(root_dir, fn) rel_path = path[len(examples_dir):] if os.path.isdir(path): label = fn.capitalize() item, sm_file_actions = mk_examples_menu(label, os.path.join(root_dir, fn)) menu.append_item(item) file_actions.extend(sm_file_actions) elif os.path.splitext(path)[1] in ['.bot', '.py'] and not fn.startswith('_'): label = ide_utils.make_readable_filename(fn) # the only way I could work out to attach the data to the menu item is in the name :/ action_name = "win.open_example__%s" % encode_relpath(rel_path) menu.append(label, action_name) file_actions.append(rel_path) return base_item, file_actions
:return: base_item, rel_paths
def add_reporter(self, reporter): """Add a MetricReporter""" with self._lock: reporter.init(list(self.metrics.values())) self._reporters.append(reporter)
Add a MetricReporter
def orthogonal(name, shape, scale=1.1, dtype=tf.sg_floatx, summary=True, regularizer=None, trainable=True): r"""Creates a tensor variable of which initial values are of an orthogonal ndarray. See [Saxe et al. 2014.](http://arxiv.org/pdf/1312.6120.pdf) Args: name: The name of new variable. shape: A tuple/list of integers. scale: A Python scalar. dtype: Either float32 or float64. summary: If True, add this constant to tensor board summary. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization trainable: If True, add this constant to trainable collection. Default is True. Returns: A `Variable`. """ flat_shape = (shape[0], np.prod(shape[1:])) a = np.random.normal(0.0, 1.0, flat_shape) u, _, v = np.linalg.svd(a, full_matrices=False) # pick the one with the correct shape q = u if u.shape == flat_shape else v q = q.reshape(shape) # create variable x = tf.get_variable(name, initializer=tf.constant(scale * q[:shape[0], :shape[1]], dtype=dtype), regularizer=regularizer, trainable=trainable) # add summary if summary: tf.sg_summary_param(x) return x
r"""Creates a tensor variable of which initial values are of an orthogonal ndarray. See [Saxe et al. 2014.](http://arxiv.org/pdf/1312.6120.pdf) Args: name: The name of new variable. shape: A tuple/list of integers. scale: A Python scalar. dtype: Either float32 or float64. summary: If True, add this constant to tensor board summary. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization trainable: If True, add this constant to trainable collection. Default is True. Returns: A `Variable`.
def get_init(self, filename="__init__.py"): """ Get various info from the package without importing them """ import ast with open(filename) as init_file: module = ast.parse(init_file.read()) itr = lambda x: (ast.literal_eval(node.value) for node in ast.walk(module) \ if isinstance(node, ast.Assign) and node.targets[0].id == x) try: return next(itr("__author__")), \ next(itr("__email__")), \ next(itr("__license__")), \ next(itr("__version__")) except StopIteration: raise ValueError("One of author, email, license, or version" " cannot be found in {}".format(filename))
Get various info from the package without importing them
def format_url(url: str, url_vars: Mapping[str, Any]) -> str: """Construct a URL for the GitHub API. The URL may be absolute or relative. In the latter case the appropriate domain will be added. This is to help when copying the relative URL directly from the GitHub developer documentation. The dict provided in url_vars is used in URI template formatting. """ url = urllib.parse.urljoin(DOMAIN, url) # Works even if 'url' is fully-qualified. expanded_url: str = uritemplate.expand(url, var_dict=url_vars) return expanded_url
Construct a URL for the GitHub API. The URL may be absolute or relative. In the latter case the appropriate domain will be added. This is to help when copying the relative URL directly from the GitHub developer documentation. The dict provided in url_vars is used in URI template formatting.
def generate_method(service_module, service_name, method_name): """Generate a method for the given Thrift service. :param service_module: Thrift-generated service module :param service_name: Name of the Thrift service :param method_name: Method being called """ assert service_module assert service_name assert method_name args_type = getattr(service_module, method_name + '_args') result_type = getattr(service_module, method_name + '_result', None) serializer = ThriftSerializer(result_type) # oneway not currently supported # TODO - write test for this if result_type is None: def not_supported(self, *args, **kwags): raise OneWayNotSupportedError( 'TChannel+Thrift does not currently support oneway procedues' ) return not_supported result_spec = result_type.thrift_spec # result_spec is a tuple of tuples in the form: # # (fieldId, fieldType, fieldName, ...) # # Where "..." is other information we don't care about right now. # # result_spec will be empty if there is no return value or exception for # the method. # # Its first element, with field ID 0, contains the spec for the return # value. It is None if the result type is void but the method may still # throw exceptions. # # Elements after the first one are specs for the exceptions. endpoint = '%s::%s' % (service_name, method_name) @gen.coroutine def send(self, *args, **kwargs): params = inspect.getcallargs( getattr(service_module.Iface, method_name), self, *args, **kwargs ) params.pop('self') # self is already known # $methodName_args is the implicit struct containing the various # method parameters. call_args = args_type() for name, value in params.items(): setattr(call_args, name, value) tracer = tracing.ClientTracer(channel=self.tchannel) span, headers = tracer.start_span( service=service_name, endpoint=method_name, headers={} ) body = serializer.serialize_body(call_args) header = serializer.serialize_header(headers) # Glue for old API. if hasattr(self.tchannel, 'request'): tracing.apply_trace_flag(span, self.trace, True) with span: response = yield self.tchannel.request( hostport=self.hostport, service=self.service ).send( arg1=endpoint, arg2=header, arg3=body, # body headers=self.protocol_headers, ) body = yield response.get_body() else: with span: response = yield self.tchannel.call( scheme=schemes.THRIFT, service=self.service, arg1=endpoint, arg2=header, arg3=body, hostport=self.hostport, trace=self.trace, tracing_span=span # TODO: Need to handle these! # headers=self.protocol_headers, ) body = response.body call_result = serializer.deserialize_body(body) if not result_spec: # void return type and no exceptions allowed raise gen.Return(None) for exc_spec in result_spec[1:]: # May have failed with an exception exc = getattr(call_result, exc_spec[2]) if exc is not None: raise exc if result_spec[0]: # Non-void return type. Return the result. success = getattr(call_result, result_spec[0][2]) if success is not None: raise gen.Return(success) else: # No return type specified and no exceptions raised. raise gen.Return(None) # Expected a result but nothing was present in the object. Something # went wrong. from thrift import Thrift raise Thrift.TApplicationException( Thrift.TApplicationException.MISSING_RESULT, '%s failed: did not receive a result as expected' % method_name ) # TODO: We should probably throw a custom exception instead. send.__name__ = method_name return send
Generate a method for the given Thrift service. :param service_module: Thrift-generated service module :param service_name: Name of the Thrift service :param method_name: Method being called
def _get_stack_info_for_trace( self, frames, library_frame_context_lines=None, in_app_frame_context_lines=None, with_locals=True, locals_processor_func=None, ): """If the stacktrace originates within the elasticapm module, it will skip frames until some other module comes up.""" return list( iterate_with_template_sources( frames, with_locals=with_locals, library_frame_context_lines=library_frame_context_lines, in_app_frame_context_lines=in_app_frame_context_lines, include_paths_re=self.include_paths_re, exclude_paths_re=self.exclude_paths_re, locals_processor_func=locals_processor_func, ) )
If the stacktrace originates within the elasticapm module, it will skip frames until some other module comes up.
def __setLock(self, command): """Set lock on requests.""" if command in (TURN_ON, TURN_OFF): self._operation = command elif command in INV_SOURCES: self._operation = SOURCE else: self._operation = ALL self._isLocked = True self._timer = time.time()
Set lock on requests.
def _create_more_application(): """ Create an `Application` instance that displays the "--MORE--". """ from prompt_toolkit.shortcuts import create_prompt_application registry = Registry() @registry.add_binding(' ') @registry.add_binding('y') @registry.add_binding('Y') @registry.add_binding(Keys.ControlJ) @registry.add_binding(Keys.ControlI) # Tab. def _(event): event.cli.set_return_value(True) @registry.add_binding('n') @registry.add_binding('N') @registry.add_binding('q') @registry.add_binding('Q') @registry.add_binding(Keys.ControlC) def _(event): event.cli.set_return_value(False) return create_prompt_application( '--MORE--', key_bindings_registry=registry, erase_when_done=True)
Create an `Application` instance that displays the "--MORE--".
def retrieveVals(self): """Retrieve values for graphs.""" name = 'diskspace' if self.hasGraph(name): for fspath in self._fslist: if self._statsSpace.has_key(fspath): self.setGraphVal(name, fspath, self._statsSpace[fspath]['inuse_pcent']) name = 'diskinode' if self.hasGraph(name): for fspath in self._fslist: if self._statsInode.has_key(fspath): self.setGraphVal(name, fspath, self._statsInode[fspath]['inuse_pcent'])
Retrieve values for graphs.
def argsort(self, axis=-1, kind='quicksort', order=None): """ Returns the indices that would sort an array. .. note:: This method wraps `numpy.argsort`. This documentation is modified from that of `numpy.argsort`. Perform an indirect sort along the given axis using the algorithm specified by the `kind` keyword. It returns an array of indices of the same shape as the original array that index data along the given axis in sorted order. **Parameters** **axis** : int or None, optional Axis along which to sort. The default is -1 (the last axis). If `None`, the flattened array is used. **kind** : {'quicksort', 'mergesort', 'heapsort'}, optional Sorting algorithm. **order** : list, optional This argument specifies which fields to compare first, second, etc. Not all fields need be specified. **Returns** **index_array** : ndarray, int Array of indices that sort the tabarray along the specified axis. In other words, ``a[index_array]`` yields a sorted `a`. **See Also** sort : Describes sorting algorithms used. lexsort : Indirect stable sort with multiple keys. ndarray.sort : Inplace sort. **Notes** See `numpy.sort` for notes on the different sorting algorithms. **Examples** Sorting with keys: >>> x = tabarray([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')]) >>> x tabarray([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')]) >>> x.argsort(order=('x','y')) array([1, 0]) >>> x.argsort(order=('y','x')) array([0, 1]) """ index_array = np.core.fromnumeric._wrapit(self, 'argsort', axis, kind, order) index_array = index_array.view(np.ndarray) return index_array
Returns the indices that would sort an array. .. note:: This method wraps `numpy.argsort`. This documentation is modified from that of `numpy.argsort`. Perform an indirect sort along the given axis using the algorithm specified by the `kind` keyword. It returns an array of indices of the same shape as the original array that index data along the given axis in sorted order. **Parameters** **axis** : int or None, optional Axis along which to sort. The default is -1 (the last axis). If `None`, the flattened array is used. **kind** : {'quicksort', 'mergesort', 'heapsort'}, optional Sorting algorithm. **order** : list, optional This argument specifies which fields to compare first, second, etc. Not all fields need be specified. **Returns** **index_array** : ndarray, int Array of indices that sort the tabarray along the specified axis. In other words, ``a[index_array]`` yields a sorted `a`. **See Also** sort : Describes sorting algorithms used. lexsort : Indirect stable sort with multiple keys. ndarray.sort : Inplace sort. **Notes** See `numpy.sort` for notes on the different sorting algorithms. **Examples** Sorting with keys: >>> x = tabarray([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')]) >>> x tabarray([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')]) >>> x.argsort(order=('x','y')) array([1, 0]) >>> x.argsort(order=('y','x')) array([0, 1])
def query_random(num=6, kind='1'): ''' Query wikis randomly. ''' return TabWiki.select().where( TabWiki.kind == kind ).order_by( peewee.fn.Random() ).limit(num)
Query wikis randomly.
def sign(self, value): """ :type value: any :rtype: bytes """ payload = struct.pack('>cQ', self.version, int(time.time())) payload += force_bytes(value) return payload + self.signature(payload).finalize()
:type value: any :rtype: bytes
def limits_hydrate(db, lims): """ Helper function to hydrate a list of limits. :param db: A database handle. :param lims: A list of limit strings, as retrieved from the database. """ return [limits.Limit.hydrate(db, lim) for lim in lims]
Helper function to hydrate a list of limits. :param db: A database handle. :param lims: A list of limit strings, as retrieved from the database.
def isdir(self, dirname): """Returns whether the path is a directory or not.""" client = boto3.client("s3") bucket, path = self.bucket_and_path(dirname) if not path.endswith("/"): path += "/" # This will now only retrieve subdir content r = client.list_objects(Bucket=bucket, Prefix=path, Delimiter="/") if r.get("Contents") or r.get("CommonPrefixes"): return True return False
Returns whether the path is a directory or not.
def SplitMeta(meta): """Split and validate metacharacters. Example: '{}' -> ('{', '}') This is public so the syntax highlighter and other tools can use it. """ n = len(meta) if n % 2 == 1: raise ConfigurationError( '%r has an odd number of metacharacters' % meta) return meta[:n // 2], meta[n // 2:]
Split and validate metacharacters. Example: '{}' -> ('{', '}') This is public so the syntax highlighter and other tools can use it.
def cmd_link_add(self, args): '''add new link''' descriptor = args[0] print("Adding link %s" % descriptor) self.link_add(descriptor)
add new link
def concat_same_type(self, to_concat, placement=None): """ Concatenate list of single blocks of the same type. """ values = self._concatenator([blk.values for blk in to_concat], axis=self.ndim - 1) return self.make_block_same_class( values, placement=placement or slice(0, len(values), 1))
Concatenate list of single blocks of the same type.
def is_callable(instance, attribute, value): """Raises a :exc:`TypeError` if the value is not a callable.""" if not callable(value): raise TypeError("'{}' must be callable".format(attribute.name))
Raises a :exc:`TypeError` if the value is not a callable.
def initialize_communities_bucket(): """Initialize the communities file bucket. :raises: `invenio_files_rest.errors.FilesException` """ bucket_id = UUID(current_app.config['COMMUNITIES_BUCKET_UUID']) if Bucket.query.get(bucket_id): raise FilesException("Bucket with UUID {} already exists.".format( bucket_id)) else: storage_class = current_app.config['FILES_REST_DEFAULT_STORAGE_CLASS'] location = Location.get_default() bucket = Bucket(id=bucket_id, location=location, default_storage_class=storage_class) db.session.add(bucket) db.session.commit()
Initialize the communities file bucket. :raises: `invenio_files_rest.errors.FilesException`
def _rd_dat_signals(file_name, dir_name, pb_dir, fmt, n_sig, sig_len, byte_offset, samps_per_frame, skew, sampfrom, sampto, smooth_frames): """ Read all signals from a WFDB dat file. Parameters ---------- file_name : str The name of the dat file * other params See docstring for `_rd_segment`. Returns ------- signals : numpy array, or list See docstring for `_rd_segment`. Notes ----- See docstring notes for `_rd_segment`. """ # Total number of samples per frame tsamps_per_frame = sum(samps_per_frame) # The signal length to read (per channel) read_len = sampto - sampfrom # Calculate parameters used to read and process the dat file (start_byte, n_read_samples, block_floor_samples, extra_flat_samples, nan_replace) = _dat_read_params(fmt, sig_len, byte_offset, skew, tsamps_per_frame, sampfrom, sampto) # Number of bytes to be read from the dat file total_read_bytes = _required_byte_num('read', fmt, n_read_samples) # Total samples to be processed in intermediate step. Includes extra # padded samples beyond dat file total_process_samples = n_read_samples + extra_flat_samples # Total number of bytes to be processed in intermediate step. total_process_bytes = _required_byte_num('read', fmt, total_process_samples) # Get the intermediate bytes or samples to process. Bit of a # discrepancy. Recall special formats load uint8 bytes, other formats # already load samples. # Read values from dat file. Append bytes/samples if needed. if extra_flat_samples: if fmt in UNALIGNED_FMTS: # Extra number of bytes to append onto the bytes read from # the dat file. n_extra_bytes = total_process_bytes - total_read_bytes sig_data = np.concatenate((_rd_dat_file(file_name, dir_name, pb_dir, fmt, start_byte, n_read_samples), np.zeros(n_extra_bytes, dtype=np.dtype(DATA_LOAD_TYPES[fmt])))) else: sig_data = np.concatenate((_rd_dat_file(file_name, dir_name, pb_dir, fmt, start_byte, n_read_samples), np.zeros(extra_flat_samples, dtype=np.dtype(DATA_LOAD_TYPES[fmt])))) else: sig_data = _rd_dat_file(file_name, dir_name, pb_dir, fmt, start_byte, n_read_samples) # Finish processing the read data into proper samples if not already # For unaligned fmts, turn the uint8 blocks into actual samples if fmt in UNALIGNED_FMTS: sig_data = _blocks_to_samples(sig_data, total_process_samples, fmt) # Remove extra leading sample read within the byte block if any if block_floor_samples: sig_data = sig_data[block_floor_samples:] # Adjust samples values for byte offset formats if fmt in OFFSET_FMTS: if fmt == '80': sig_data = (sig_data.astype('int16') - 128).astype('int8') elif fmt == '160': sig_data = (sig_data.astype('int32') - 32768).astype('int16') # At this point, dtype of sig_data is the minimum integer format # required for storing the final digital samples. # No extra samples/frame. Obtain original uniform numpy array if tsamps_per_frame == n_sig: # Reshape into multiple channels signal = sig_data.reshape(-1, n_sig) # Skew the signal signal = _skew_sig(signal, skew, n_sig, read_len, fmt, nan_replace) # Extra frames present to be smoothed. Obtain averaged uniform numpy array elif smooth_frames: # Allocate memory for smoothed signal. signal = np.zeros((int(len(sig_data) / tsamps_per_frame) , n_sig), dtype=sig_data.dtype) # Transfer and average samples for ch in range(n_sig): if samps_per_frame[ch] == 1: signal[:, ch] = sig_data[sum(([0] + samps_per_frame)[:ch + 1])::tsamps_per_frame] else: if ch == 0: startind = 0 else: startind = np.sum(samps_per_frame[:ch]) signal[:,ch] = [np.average(sig_data[ind:ind+samps_per_frame[ch]]) for ind in range(startind,len(sig_data),tsamps_per_frame)] # Skew the signal signal = _skew_sig(signal, skew, n_sig, read_len, fmt, nan_replace) # Extra frames present without wanting smoothing. Return all # expanded samples. else: # List of 1d numpy arrays signal = [] # Transfer over samples for ch in range(n_sig): # Indices of the flat signal that belong to the channel ch_indices = np.concatenate([np.array(range(samps_per_frame[ch])) + sum([0] + samps_per_frame[:ch]) + tsamps_per_frame * framenum for framenum in range(int(len(sig_data)/tsamps_per_frame))]) signal.append(sig_data[ch_indices]) # Skew the signal signal = _skew_sig(signal, skew, n_sig, read_len, fmt, nan_replace, samps_per_frame) # Integrity check of signal shape after reading _check_sig_dims(signal, read_len, n_sig, samps_per_frame) return signal
Read all signals from a WFDB dat file. Parameters ---------- file_name : str The name of the dat file * other params See docstring for `_rd_segment`. Returns ------- signals : numpy array, or list See docstring for `_rd_segment`. Notes ----- See docstring notes for `_rd_segment`.
def rest_api_url(self, *url_parts, **kwargs): """Join the URL of REST_API parameters: upl_parts: strings that are joined to the url by "/". a REST url like https://na1.salesforce.com/services/data/v44.0/ is usually added, but not if the first string starts with https:// api_ver: API version that should be used instead of connection.api_ver default. A special api_ver="" can be used to omit api version (for request to ask for available api versions) relative: If `relative` is true then the url is without domain Examples: self.rest_api_url("query?q=select+id+from+Organization") self.rest_api_url("sobject", "Contact", id, api_ver="45.0") self.rest_api_url(api_ver="") # versions request self.rest_api_url("sobject", relative=True) self.rest_api_url("/services/data/v45.0") Output: https://na1.salesforce.com/services/data/v44.0/query?q=select+id+from+Organization https://na1.salesforce.com/services/data/v45.0/sobject/Contact/003DD00000000XYAAA https://na1.salesforce.com/services/data /services/data/v45.0 https://na1.salesforce.com/services/data/44.0 """ url_parts = list(url_parts) if url_parts and re.match(r'^(?:https|mock)://', url_parts[0]): return '/'.join(url_parts) relative = kwargs.pop('relative', False) api_ver = kwargs.pop('api_ver', None) api_ver = api_ver if api_ver is not None else self.api_ver assert not kwargs if not relative: base = [self.sf_session.auth.instance_url] else: base = [''] if url_parts and url_parts[0].startswith('/'): prefix = [] url_parts[0] = url_parts[0][1:] else: prefix = ['services/data'] if api_ver: prefix += ['v{api_ver}'.format(api_ver=api_ver)] return '/'.join(base + prefix + url_parts)
Join the URL of REST_API parameters: upl_parts: strings that are joined to the url by "/". a REST url like https://na1.salesforce.com/services/data/v44.0/ is usually added, but not if the first string starts with https:// api_ver: API version that should be used instead of connection.api_ver default. A special api_ver="" can be used to omit api version (for request to ask for available api versions) relative: If `relative` is true then the url is without domain Examples: self.rest_api_url("query?q=select+id+from+Organization") self.rest_api_url("sobject", "Contact", id, api_ver="45.0") self.rest_api_url(api_ver="") # versions request self.rest_api_url("sobject", relative=True) self.rest_api_url("/services/data/v45.0") Output: https://na1.salesforce.com/services/data/v44.0/query?q=select+id+from+Organization https://na1.salesforce.com/services/data/v45.0/sobject/Contact/003DD00000000XYAAA https://na1.salesforce.com/services/data /services/data/v45.0 https://na1.salesforce.com/services/data/44.0
def _build_youtube_dl(worker, destdir, site): ''' Builds a `youtube_dl.YoutubeDL` for brozzling `site` with `worker`. The `YoutubeDL` instance does a few special brozzler-specific things: - keeps track of urls fetched using a `YoutubeDLSpy` - periodically updates `site.last_claimed` in rethinkdb - if brozzling through warcprox and downloading segmented videos (e.g. HLS), pushes the stitched-up video created by youtube-dl to warcprox using a WARCPROX_WRITE_RECORD request - some logging Args: worker (brozzler.BrozzlerWorker): the calling brozzler worker destdir (str): where to save downloaded videos site (brozzler.Site): the site we are brozzling Returns: a `youtube_dl.YoutubeDL` instance ''' class _YoutubeDL(youtube_dl.YoutubeDL): logger = logging.getLogger(__module__ + "." + __qualname__) def urlopen(self, req): try: url = req.full_url except AttributeError: url = req self.logger.debug('fetching %r', url) return super().urlopen(req) def add_default_extra_info(self, ie_result, ie, url): # hook in some logging super().add_default_extra_info(ie_result, ie, url) if ie_result.get('_type') == 'playlist': self.logger.info( 'extractor %r found playlist in %s', ie.IE_NAME, url) if ie.IE_NAME == 'youtube:playlist': # At this point ie_result['entries'] is an iterator that # will fetch more metadata from youtube to list all the # videos. We unroll that iterator here partly because # otherwise `process_ie_result()` will clobber it, and we # use it later to extract the watch pages as outlinks. ie_result['entries_no_dl'] = list(ie_result['entries']) ie_result['entries'] = [] self.logger.info( 'not downoading %s videos from this youtube ' 'playlist because we expect to capture them from ' 'individual watch pages', len(ie_result['entries_no_dl'])) else: self.logger.info( 'extractor %r found a video in %s', ie.IE_NAME, url) def _push_stitched_up_vid_to_warcprox(self, site, info_dict, ctx): # XXX Don't know how to get the right content-type. Youtube-dl # doesn't supply it. Sometimes (with --hls-prefer-native) # youtube-dl produces a stitched-up video that /usr/bin/file fails # to identify (says "application/octet-stream"). `ffprobe` doesn't # give us a mimetype. if info_dict.get('ext') == 'mp4': mimetype = 'video/mp4' else: try: import magic mimetype = magic.from_file(ctx['filename'], mime=True) except ImportError as e: mimetype = 'video/%s' % info_dict['ext'] self.logger.warn( 'guessing mimetype %s because %r', mimetype, e) url = 'youtube-dl:%05d:%s' % ( info_dict.get('playlist_index') or 1, info_dict['webpage_url']) size = os.path.getsize(ctx['filename']) self.logger.info( 'pushing %r video stitched-up as %s (%s bytes) to ' 'warcprox at %s with url %s', info_dict['format'], mimetype, size, worker._proxy_for(site), url) with open(ctx['filename'], 'rb') as f: # include content-length header to avoid chunked # transfer, which warcprox currently rejects extra_headers = dict(site.extra_headers()) extra_headers['content-length'] = size request, response = worker._warcprox_write_record( warcprox_address=worker._proxy_for(site), url=url, warc_type='resource', content_type=mimetype, payload=f, extra_headers=extra_headers) # consulted by _remember_videos() self.stitch_ups.append({ 'url': url, 'response_code': response.code, 'content-type': mimetype, 'content-length': size, }) def process_info(self, info_dict): ''' See comment above on `_finish_frag_download()` ''' def ffd_callback(ffd_self, ctx): if worker._using_warcprox(site): self._push_stitched_up_vid_to_warcprox(site, info_dict, ctx) try: thread_local.finish_frag_download_callback = ffd_callback return super().process_info(info_dict) finally: delattr(thread_local, 'finish_frag_download_callback') def maybe_heartbeat_site_last_claimed(*args, **kwargs): # in case youtube-dl takes a long time, heartbeat site.last_claimed # to prevent another brozzler-worker from claiming the site try: if site.rr and doublethink.utcnow() - site.last_claimed > datetime.timedelta(minutes=worker.SITE_SESSION_MINUTES): worker.logger.debug( 'heartbeating site.last_claimed to prevent another ' 'brozzler-worker claiming this site id=%r', site.id) site.last_claimed = doublethink.utcnow() site.save() except: worker.logger.debug( 'problem heartbeating site.last_claimed site id=%r', site.id, exc_info=True) ydl_opts = { "outtmpl": "{}/ydl%(autonumber)s.out".format(destdir), "retries": 1, "nocheckcertificate": True, "hls_prefer_native": True, "noprogress": True, "nopart": True, "no_color": True, "progress_hooks": [maybe_heartbeat_site_last_claimed], # https://github.com/rg3/youtube-dl/blob/master/README.md#format-selection # "best: Select the best quality format represented by a single # file with video and audio." "format": "best/bestvideo+bestaudio", ### we do our own logging # "logger": logging.getLogger("youtube_dl"), "verbose": False, "quiet": True, } if worker._proxy_for(site): ydl_opts["proxy"] = "http://{}".format(worker._proxy_for(site)) ydl = _YoutubeDL(ydl_opts) if site.extra_headers(): ydl._opener.add_handler(ExtraHeaderAdder(site.extra_headers())) ydl.fetch_spy = YoutubeDLSpy() ydl.stitch_ups = [] ydl._opener.add_handler(ydl.fetch_spy) return ydl
Builds a `youtube_dl.YoutubeDL` for brozzling `site` with `worker`. The `YoutubeDL` instance does a few special brozzler-specific things: - keeps track of urls fetched using a `YoutubeDLSpy` - periodically updates `site.last_claimed` in rethinkdb - if brozzling through warcprox and downloading segmented videos (e.g. HLS), pushes the stitched-up video created by youtube-dl to warcprox using a WARCPROX_WRITE_RECORD request - some logging Args: worker (brozzler.BrozzlerWorker): the calling brozzler worker destdir (str): where to save downloaded videos site (brozzler.Site): the site we are brozzling Returns: a `youtube_dl.YoutubeDL` instance
def send_zipfile(request, fileList): """ Create a ZIP file on disk and transmit it in chunks of 8KB, without loading the whole file into memory. A similar approach can be used for large dynamic PDF files. """ temp = tempfile.TemporaryFile() archive = zipfile.ZipFile(temp, 'w', zipfile.ZIP_DEFLATED) for artist,files in fileList.iteritems(): for f in files: archive.write(f[0], '%s/%s' % (artist, f[1])) archive.close() wrapper = FixedFileWrapper(temp) response = HttpResponse(wrapper, content_type='application/zip') response['Content-Disposition'] = 'attachment; filename=FrogSources.zip' response['Content-Length'] = temp.tell() temp.seek(0) return response
Create a ZIP file on disk and transmit it in chunks of 8KB, without loading the whole file into memory. A similar approach can be used for large dynamic PDF files.
def _cons(self, word, i): """cons(i) is TRUE <=> b[i] is a consonant.""" if word[i] in self.vowels: return False if word[i] == 'y': if i == 0: return True else: return (not self._cons(word, i - 1)) return True
cons(i) is TRUE <=> b[i] is a consonant.
def delete_relationship(self, session, data, api_type, obj_id, rel_key): """ Delete a resource or multiple resources from a to-many relationship. :param session: SQLAlchemy session :param data: JSON data provided with the request :param api_type: Type of the resource :param obj_id: ID of the resource :param rel_key: Key of the relationship to fetch """ model = self._fetch_model(api_type) resource = self._fetch_resource(session, api_type, obj_id, Permissions.EDIT) relationship = self._get_relationship(resource, rel_key, Permissions.DELETE) self._check_json_data(data) if not isinstance(data['data'], list): raise ValidationError('Provided data must be an array.') if relationship.direction == MANYTOONE: return ToManyExpectedError(model, resource, relationship) response = JSONAPIResponse() response.data = {'data': []} session.add(resource) remove = get_rel_desc(resource, relationship.key, RelationshipActions.DELETE) reverse_side = relationship.back_populates for item in data['data']: item = self._fetch_resource(session, item['type'], item['id'], Permissions.EDIT) if reverse_side: reverse_rel = item.__mapper__.relationships[reverse_side] if reverse_rel.direction == MANYTOONE: permission = Permissions.EDIT else: permission = Permissions.DELETE check_permission(item, reverse_side, permission) remove(resource, item) session.commit() session.refresh(resource) get = get_rel_desc(resource, relationship.key, RelationshipActions.GET) for item in get(resource): response.data['data'].append(self._render_short_instance(item)) return response
Delete a resource or multiple resources from a to-many relationship. :param session: SQLAlchemy session :param data: JSON data provided with the request :param api_type: Type of the resource :param obj_id: ID of the resource :param rel_key: Key of the relationship to fetch
def draw_graph(graph_instance, map_coloring = None): """! @brief Draw graph. @param[in] graph_instance (graph): Graph that should be drawn. @param[in] map_coloring (list): List of color indexes for each vertex. Size of this list should be equal to size of graph (number of vertices). If it's not specified (None) than graph without coloring will be dwarn. @warning Graph can be represented if there is space representation for it. """ if (graph_instance.space_description is None): raise NameError("The graph haven't got representation in space"); if (map_coloring is not None): if (len(graph_instance) != len(map_coloring)): raise NameError("Size of graph should be equal to size coloring map"); fig = plt.figure(); axes = fig.add_subplot(111); available_colors = ['#00a2e8', '#22b14c', '#ed1c24', '#fff200', '#000000', '#a349a4', '#ffaec9', '#7f7f7f', '#b97a57', '#c8bfe7', '#880015', '#ff7f27', '#3f48cc', '#c3c3c3', '#ffc90e', '#efe4b0', '#b5e61d', '#99d9ea', '#7092b4', '#ffffff']; if (map_coloring is not None): if (len(map_coloring) > len(available_colors)): raise NameError('Impossible to represent colored graph due to number of specified colors.'); x_maximum = -float('inf'); x_minimum = float('inf'); y_maximum = -float('inf'); y_minimum = float('inf'); for i in range(0, len(graph_instance.space_description), 1): if (graph_instance.type_graph_descr == type_graph_descr.GRAPH_MATRIX_DESCR): for j in range(i, len(graph_instance.space_description), 1): # draw connection between two points only one time if (graph_instance.data[i][j] == 1): axes.plot([graph_instance.space_description[i][0], graph_instance.space_description[j][0]], [graph_instance.space_description[i][1], graph_instance.space_description[j][1]], 'k-', linewidth = 1.5); elif (graph_instance.type_graph_descr == type_graph_descr.GRAPH_VECTOR_DESCR): for j in graph_instance.data[i]: if (i > j): # draw connection between two points only one time axes.plot([graph_instance.space_description[i][0], graph_instance.space_description[j][0]], [graph_instance.space_description[i][1], graph_instance.space_description[j][1]], 'k-', linewidth = 1.5); color_node = 'b'; if (map_coloring is not None): color_node = colors.hex2color(available_colors[map_coloring[i]]); axes.plot(graph_instance.space_description[i][0], graph_instance.space_description[i][1], color = color_node, marker = 'o', markersize = 20); if (x_maximum < graph_instance.space_description[i][0]): x_maximum = graph_instance.space_description[i][0]; if (x_minimum > graph_instance.space_description[i][0]): x_minimum = graph_instance.space_description[i][0]; if (y_maximum < graph_instance.space_description[i][1]): y_maximum = graph_instance.space_description[i][1]; if (y_minimum > graph_instance.space_description[i][1]): y_minimum = graph_instance.space_description[i][1]; plt.xlim(x_minimum - 0.5, x_maximum + 0.5); plt.ylim(y_minimum - 0.5, y_maximum + 0.5); plt.show();
! @brief Draw graph. @param[in] graph_instance (graph): Graph that should be drawn. @param[in] map_coloring (list): List of color indexes for each vertex. Size of this list should be equal to size of graph (number of vertices). If it's not specified (None) than graph without coloring will be dwarn. @warning Graph can be represented if there is space representation for it.
def get_time_slice(time, z, zdot=None, timeStart=None, timeEnd=None): """ Get slice of time, z and (if provided) zdot from timeStart to timeEnd. Parameters ---------- time : ndarray array of time values z : ndarray array of z values zdot : ndarray, optional array of zdot (velocity) values. timeStart : float, optional time at which to start the slice. Defaults to beginnging of time trace timeEnd : float, optional time at which to end the slide. Defaults to end of time trace Returns ------- time_sliced : ndarray array of time values from timeStart to timeEnd z_sliced : ndarray array of z values from timeStart to timeEnd zdot_sliced : ndarray array of zdot values from timeStart to timeEnd. None if zdot not provided """ if timeStart == None: timeStart = time[0] if timeEnd == None: timeEnd = time[-1] StartIndex = _np.where(time == take_closest(time, timeStart))[0][0] EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0] time_sliced = time[StartIndex:EndIndex] z_sliced = z[StartIndex:EndIndex] if zdot != None: zdot_sliced = zdot[StartIndex:EndIndex] else: zdot_sliced = None return time_sliced, z_sliced, zdot_sliced
Get slice of time, z and (if provided) zdot from timeStart to timeEnd. Parameters ---------- time : ndarray array of time values z : ndarray array of z values zdot : ndarray, optional array of zdot (velocity) values. timeStart : float, optional time at which to start the slice. Defaults to beginnging of time trace timeEnd : float, optional time at which to end the slide. Defaults to end of time trace Returns ------- time_sliced : ndarray array of time values from timeStart to timeEnd z_sliced : ndarray array of z values from timeStart to timeEnd zdot_sliced : ndarray array of zdot values from timeStart to timeEnd. None if zdot not provided
def push(self, files, run=None, entity=None, project=None, description=None, force=True, progress=False): """Uploads multiple files to W&B Args: files (list or dict): The filenames to upload run (str, optional): The run to upload to entity (str, optional): The entity to scope this project to. Defaults to wandb models project (str, optional): The name of the project to upload to. Defaults to the one in settings. description (str, optional): The description of the changes force (bool, optional): Whether to prevent push if git has uncommitted changes progress (callable, or stream): If callable, will be called with (chunk_bytes, total_bytes) as argument else if True, renders a progress bar to stream. Returns: The requests library response object """ if project is None: project = self.get_project() if project is None: raise CommError("No project configured.") if run is None: run = self.current_run_id # TODO(adrian): we use a retriable version of self.upload_file() so # will never retry self.upload_urls() here. Instead, maybe we should # make push itself retriable. run_id, result = self.upload_urls( project, files, run, entity, description) responses = [] for file_name, file_info in result.items(): try: # To handle Windows paths # TODO: this doesn't handle absolute paths... normal_name = os.path.join(*file_name.split("/")) open_file = files[normal_name] if isinstance( files, dict) else open(normal_name, "rb") except IOError: print("%s does not exist" % file_name) continue if progress: if hasattr(progress, '__call__'): responses.append(self.upload_file_retry( file_info['url'], open_file, progress)) else: length = os.fstat(open_file.fileno()).st_size with click.progressbar(file=progress, length=length, label='Uploading file: %s' % (file_name), fill_char=click.style('&', fg='green')) as bar: responses.append(self.upload_file_retry( file_info['url'], open_file, lambda bites, _: bar.update(bites))) else: responses.append(self.upload_file_retry(file_info['url'], open_file)) open_file.close() return responses
Uploads multiple files to W&B Args: files (list or dict): The filenames to upload run (str, optional): The run to upload to entity (str, optional): The entity to scope this project to. Defaults to wandb models project (str, optional): The name of the project to upload to. Defaults to the one in settings. description (str, optional): The description of the changes force (bool, optional): Whether to prevent push if git has uncommitted changes progress (callable, or stream): If callable, will be called with (chunk_bytes, total_bytes) as argument else if True, renders a progress bar to stream. Returns: The requests library response object
def remove_config(chassis_id=None, community=None, contact=None, location=None, test=False, commit=True, **kwargs): # pylint: disable=unused-argument ''' Removes a configuration element from the SNMP configuration. :param chassis_id: (optional) Chassis ID :param community: (optional) A dictionary having the following optional keys: - acl (if any policy / ACL need to be set) - mode: rw or ro. Default: ro :param contact: Contact details :param location: Location :param test: Dry run? If set as True, will apply the config, discard and return the changes. Default: False :param commit: Commit? (default: True) Sometimes it is not needed to commit the config immediately after loading the changes. E.g.: a state loads a couple of parts (add / remove / update) and would not be optimal to commit after each operation. Also, from the CLI when the user needs to apply the similar changes before committing, can specify commit=False and will not discard the config. :raise MergeConfigException: If there is an error on the configuration sent. :return: A dictionary having the following keys: - result (bool): if the config was applied successfully. It is `False` only in case of failure. In case there are no changes to be applied and successfully performs all operations it is still `True` and so will be the `already_configured` flag (example below) - comment (str): a message for the user - already_configured (bool): flag to check if there were no changes applied - diff (str): returns the config changes applied CLI Example: .. code-block:: bash salt '*' snmp.remove_config community='abcd' ''' dic = { 'template_name': 'delete_snmp_config', 'test': test, 'commit': commit } if chassis_id: dic['chassis_id'] = chassis_id if community: dic['community'] = community if contact: dic['contact'] = contact if location: dic['location'] = location dic['inherit_napalm_device'] = napalm_device # pylint: disable=undefined-variable return __salt__['net.load_template'](**dic)
Removes a configuration element from the SNMP configuration. :param chassis_id: (optional) Chassis ID :param community: (optional) A dictionary having the following optional keys: - acl (if any policy / ACL need to be set) - mode: rw or ro. Default: ro :param contact: Contact details :param location: Location :param test: Dry run? If set as True, will apply the config, discard and return the changes. Default: False :param commit: Commit? (default: True) Sometimes it is not needed to commit the config immediately after loading the changes. E.g.: a state loads a couple of parts (add / remove / update) and would not be optimal to commit after each operation. Also, from the CLI when the user needs to apply the similar changes before committing, can specify commit=False and will not discard the config. :raise MergeConfigException: If there is an error on the configuration sent. :return: A dictionary having the following keys: - result (bool): if the config was applied successfully. It is `False` only in case of failure. In case there are no changes to be applied and successfully performs all operations it is still `True` and so will be the `already_configured` flag (example below) - comment (str): a message for the user - already_configured (bool): flag to check if there were no changes applied - diff (str): returns the config changes applied CLI Example: .. code-block:: bash salt '*' snmp.remove_config community='abcd'
def create(self, timeout=values.unset, priority=values.unset, task_channel=values.unset, workflow_sid=values.unset, attributes=values.unset): """ Create a new TaskInstance :param unicode timeout: The amount of time in seconds the task is allowed to live up to a maximum of 2 weeks. :param unicode priority: Override priority for the Task. :param unicode task_channel: When MultiTasking is enabled specify the type of the task by passing either TaskChannel Unique Name or Task Channel Sid. :param unicode workflow_sid: The WorkflowSid for the Workflow that you would like to handle routing for this Task. :param unicode attributes: Url-encoded JSON string describing the attributes of this task. :returns: Newly created TaskInstance :rtype: twilio.rest.taskrouter.v1.workspace.task.TaskInstance """ data = values.of({ 'Timeout': timeout, 'Priority': priority, 'TaskChannel': task_channel, 'WorkflowSid': workflow_sid, 'Attributes': attributes, }) payload = self._version.create( 'POST', self._uri, data=data, ) return TaskInstance(self._version, payload, workspace_sid=self._solution['workspace_sid'], )
Create a new TaskInstance :param unicode timeout: The amount of time in seconds the task is allowed to live up to a maximum of 2 weeks. :param unicode priority: Override priority for the Task. :param unicode task_channel: When MultiTasking is enabled specify the type of the task by passing either TaskChannel Unique Name or Task Channel Sid. :param unicode workflow_sid: The WorkflowSid for the Workflow that you would like to handle routing for this Task. :param unicode attributes: Url-encoded JSON string describing the attributes of this task. :returns: Newly created TaskInstance :rtype: twilio.rest.taskrouter.v1.workspace.task.TaskInstance
def run(dest, router, args, deadline=None, econtext=None): """ Run the command specified by `args` such that ``PATH`` searches for SSH by the command will cause its attempt to use SSH to execute a remote program to be redirected to use mitogen to execute that program using the context `dest` instead. :param list args: Argument vector. :param mitogen.core.Context dest: The destination context to execute the SSH command line in. :param mitogen.core.Router router: :param list[str] args: Command line arguments for local program, e.g. ``['rsync', '/tmp', 'remote:/tmp']`` :returns: Exit status of the child process. """ if econtext is not None: mitogen.parent.upgrade_router(econtext) context_id = router.allocate_id() fakessh = mitogen.parent.Context(router, context_id) fakessh.name = u'fakessh.%d' % (context_id,) sock1, sock2 = socket.socketpair() stream = mitogen.core.Stream(router, context_id) stream.name = u'fakessh' stream.accept(sock1.fileno(), sock1.fileno()) router.register(fakessh, stream) # Held in socket buffer until process is booted. fakessh.call_async(_fakessh_main, dest.context_id) tmp_path = tempfile.mkdtemp(prefix='mitogen_fakessh') try: ssh_path = os.path.join(tmp_path, 'ssh') fp = open(ssh_path, 'w') try: fp.write('#!%s\n' % (mitogen.parent.get_sys_executable(),)) fp.write(inspect.getsource(mitogen.core)) fp.write('\n') fp.write('ExternalContext(%r).main()\n' % ( _get_econtext_config(context, sock2), )) finally: fp.close() os.chmod(ssh_path, int('0755', 8)) env = os.environ.copy() env.update({ 'PATH': '%s:%s' % (tmp_path, env.get('PATH', '')), 'ARGV0': mitogen.parent.get_sys_executable(), 'SSH_PATH': ssh_path, }) proc = subprocess.Popen(args, env=env) return proc.wait() finally: shutil.rmtree(tmp_path)
Run the command specified by `args` such that ``PATH`` searches for SSH by the command will cause its attempt to use SSH to execute a remote program to be redirected to use mitogen to execute that program using the context `dest` instead. :param list args: Argument vector. :param mitogen.core.Context dest: The destination context to execute the SSH command line in. :param mitogen.core.Router router: :param list[str] args: Command line arguments for local program, e.g. ``['rsync', '/tmp', 'remote:/tmp']`` :returns: Exit status of the child process.
def set_resize_parameters( self, degrad=6, labels=None, resize_mm=None, resize_voxel_number=None, ): """ set_input_data() should be called before :param degrad: :param labels: :param resize_mm: :param resize_voxel_number: :return: """ # from . import show_segmentation logger.debug("set_resize_parameters(\ndegrad={}, \nlabels={}\nresize_mm={}\nresize_voxel_number={}".format( degrad, labels, resize_mm, resize_voxel_number )) degrad = int(degrad) # import ipdb; ipdb.set_trace() # return voxelsize_mm, degrad self.degrad = degrad self.labels = labels segmentation = self._select_labels(self.segmentation, labels) if resize_voxel_number is not None: nvoxels = np.sum(segmentation > 0) volume = nvoxels * np.prod(self.voxelsize_mm) voxel_volume = volume / float(resize_voxel_number) resize_mm = voxel_volume ** (1.0 / 3.0) else: resize_mm = np.mean(self.voxelsize_mm) # self.working_voxelsize_mm = voxelsize_mm # self.working_segmentation = segmentation if np.sum(np.abs(self.resize_mm_1d - resize_mm)) != 0: # resize parameter changed self.resized_segmentation = None self.resized_binar_segmentation = None self.resize_mm_1d = resize_mm
set_input_data() should be called before :param degrad: :param labels: :param resize_mm: :param resize_voxel_number: :return:
def load_policy_config(filters=None, prepend=True, pillar_key='acl', pillarenv=None, saltenv=None, merge_pillar=True, only_lower_merge=False, revision_id=None, revision_no=None, revision_date=True, revision_date_format='%Y/%m/%d', test=False, commit=True, debug=False, **kwargs): # pylint: disable=unused-argument ''' Generate and load the configuration of the whole policy. .. note:: The order of the filters and their terms is very important. The configuration loaded on the device respects the order defined in the ``filters`` and/or inside the pillar. When merging the ``filters`` with the pillar data, consider the ``prepend`` argument to make sure the order is correct! filters List of filters for this policy. If not specified or empty, will try to load the configuration from the pillar, unless ``merge_pillar`` is set as ``False``. prepend: ``True`` When ``merge_pillar`` is set as ``True``, the final list of filters generated by merging the filters from ``filters`` with those defined in the pillar (if any): new filters are prepended at the beginning, while existing ones will preserve the position. To add the new filters at the end of the list, set this argument to ``False``. pillar_key: ``acl`` The key in the pillar containing the default attributes values. Default: ``acl``. pillarenv Query the master to generate fresh pillar data on the fly, specifically from the requested pillar environment. saltenv Included only for compatibility with :conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored. merge_pillar: ``True`` Merge the CLI variables with the pillar. Default: ``True``. The merge logic depends on the ``prepend`` argument and the CLI has higher priority than the pillar. only_lower_merge: ``False`` Specify if it should merge only the filters and terms fields. Otherwise it will try to merge everything at the policy level. Default: ``False``. This option requires ``merge_pillar``, otherwise it is ignored. revision_id Add a comment in the policy config having the description for the changes applied. revision_no The revision count. revision_date: ``True`` Boolean flag: display the date when the policy configuration was generated. Default: ``True``. revision_date_format: ``%Y/%m/%d`` The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>). test: ``False`` Dry run? If set as ``True``, will apply the config, discard and return the changes. Default: ``False`` and will commit the changes on the device. commit: ``True`` Commit? Default: ``True``. debug: ``False`` Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw configuration loaded on the device. The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`. CLI Example: .. code-block:: bash salt 'edge01.flw01' netacl.load_policy_config debug=True Output Example: .. code-block:: text edge01.flw01: ---------- already_configured: False comment: diff: --- +++ @@ -1228,9 +1228,24 @@ ! +ipv4 access-list my-filter + 10 remark my-term + 20 deny tcp host 1.2.3.4 eq 1234 any + 30 deny udp host 1.2.3.4 eq 1234 any + 40 deny tcp host 1.2.3.4 eq 1235 any + 50 deny udp host 1.2.3.4 eq 1235 any + 60 remark my-other-term + 70 permit tcp any range 5678 5680 any +! +! +ipv4 access-list block-icmp + 10 remark first-term + 20 deny icmp any any ! loaded_config: ! $Date: 2017/03/22 $ no ipv4 access-list my-filter ipv4 access-list my-filter remark my-term deny tcp host 1.2.3.4 eq 1234 any deny udp host 1.2.3.4 eq 1234 any deny tcp host 1.2.3.4 eq 1235 any deny udp host 1.2.3.4 eq 1235 any remark my-other-term permit tcp any range 5678 5680 any exit no ipv4 access-list block-icmp ipv4 access-list block-icmp remark first-term deny icmp any any exit result: True The policy configuration has been loaded from the pillar, having the following structure: .. code-block:: yaml acl: - my-filter: terms: - my-term: source_port: - 1234 - 1235 protocol: - tcp - udp source_address: 1.2.3.4 action: reject - my-other-term: source_port: - [5678, 5680] protocol: tcp action: accept - block-icmp: terms: - first-term: protocol: - icmp action: reject ''' if not filters: filters = [] platform = _get_capirca_platform() policy_config = __salt__['capirca.get_policy_config'](platform, filters=filters, prepend=prepend, pillar_key=pillar_key, pillarenv=pillarenv, saltenv=saltenv, merge_pillar=merge_pillar, only_lower_merge=only_lower_merge, revision_id=revision_id, revision_no=revision_no, revision_date=revision_date, revision_date_format=revision_date_format) return __salt__['net.load_config'](text=policy_config, test=test, commit=commit, debug=debug, inherit_napalm_device=napalm_device)
Generate and load the configuration of the whole policy. .. note:: The order of the filters and their terms is very important. The configuration loaded on the device respects the order defined in the ``filters`` and/or inside the pillar. When merging the ``filters`` with the pillar data, consider the ``prepend`` argument to make sure the order is correct! filters List of filters for this policy. If not specified or empty, will try to load the configuration from the pillar, unless ``merge_pillar`` is set as ``False``. prepend: ``True`` When ``merge_pillar`` is set as ``True``, the final list of filters generated by merging the filters from ``filters`` with those defined in the pillar (if any): new filters are prepended at the beginning, while existing ones will preserve the position. To add the new filters at the end of the list, set this argument to ``False``. pillar_key: ``acl`` The key in the pillar containing the default attributes values. Default: ``acl``. pillarenv Query the master to generate fresh pillar data on the fly, specifically from the requested pillar environment. saltenv Included only for compatibility with :conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored. merge_pillar: ``True`` Merge the CLI variables with the pillar. Default: ``True``. The merge logic depends on the ``prepend`` argument and the CLI has higher priority than the pillar. only_lower_merge: ``False`` Specify if it should merge only the filters and terms fields. Otherwise it will try to merge everything at the policy level. Default: ``False``. This option requires ``merge_pillar``, otherwise it is ignored. revision_id Add a comment in the policy config having the description for the changes applied. revision_no The revision count. revision_date: ``True`` Boolean flag: display the date when the policy configuration was generated. Default: ``True``. revision_date_format: ``%Y/%m/%d`` The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>). test: ``False`` Dry run? If set as ``True``, will apply the config, discard and return the changes. Default: ``False`` and will commit the changes on the device. commit: ``True`` Commit? Default: ``True``. debug: ``False`` Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw configuration loaded on the device. The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`. CLI Example: .. code-block:: bash salt 'edge01.flw01' netacl.load_policy_config debug=True Output Example: .. code-block:: text edge01.flw01: ---------- already_configured: False comment: diff: --- +++ @@ -1228,9 +1228,24 @@ ! +ipv4 access-list my-filter + 10 remark my-term + 20 deny tcp host 1.2.3.4 eq 1234 any + 30 deny udp host 1.2.3.4 eq 1234 any + 40 deny tcp host 1.2.3.4 eq 1235 any + 50 deny udp host 1.2.3.4 eq 1235 any + 60 remark my-other-term + 70 permit tcp any range 5678 5680 any +! +! +ipv4 access-list block-icmp + 10 remark first-term + 20 deny icmp any any ! loaded_config: ! $Date: 2017/03/22 $ no ipv4 access-list my-filter ipv4 access-list my-filter remark my-term deny tcp host 1.2.3.4 eq 1234 any deny udp host 1.2.3.4 eq 1234 any deny tcp host 1.2.3.4 eq 1235 any deny udp host 1.2.3.4 eq 1235 any remark my-other-term permit tcp any range 5678 5680 any exit no ipv4 access-list block-icmp ipv4 access-list block-icmp remark first-term deny icmp any any exit result: True The policy configuration has been loaded from the pillar, having the following structure: .. code-block:: yaml acl: - my-filter: terms: - my-term: source_port: - 1234 - 1235 protocol: - tcp - udp source_address: 1.2.3.4 action: reject - my-other-term: source_port: - [5678, 5680] protocol: tcp action: accept - block-icmp: terms: - first-term: protocol: - icmp action: reject
def promote(self, name): """Promote to a PartitionName by combining with a bundle Name.""" return PartitionName(**dict(list(name.dict.items()) + list(self.dict.items())))
Promote to a PartitionName by combining with a bundle Name.
def scheduled_status_delete(self, id): """ Deletes a scheduled status. """ id = self.__unpack_id(id) url = '/api/v1/scheduled_statuses/{0}'.format(str(id)) self.__api_request('DELETE', url)
Deletes a scheduled status.
def create_api_key(self, body, params=None): """ `<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html>`_ :arg body: The api key request to create an API key :arg refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes., valid choices are: 'true', 'false', 'wait_for' """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return self.transport.perform_request( "PUT", "/_security/api_key", params=params, body=body )
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html>`_ :arg body: The api key request to create an API key :arg refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes., valid choices are: 'true', 'false', 'wait_for'
async def process_ltd_doc(session, github_api_token, ltd_product_url, mongo_collection=None): """Ingest any kind of LSST document hosted on LSST the Docs from its source. Parameters ---------- session : `aiohttp.ClientSession` Your application's aiohttp client session. See http://aiohttp.readthedocs.io/en/stable/client.html. github_api_token : `str` A GitHub personal API token. See the `GitHub personal access token guide`_. ltd_product_url : `str` URL of the technote's product resource in the LTD Keeper API. mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional MongoDB collection. This should be the common MongoDB collection for LSST projectmeta JSON-LD records. If provided, ths JSON-LD is upserted into the MongoDB collection. Returns ------- metadata : `dict` JSON-LD-formatted dictionary. .. `GitHub personal access token guide`: https://ls.st/41d """ logger = logging.getLogger(__name__) ltd_product_data = await get_ltd_product(session, url=ltd_product_url) # Ensure the LTD product is a document product_name = ltd_product_data['slug'] doc_handle_match = DOCUMENT_HANDLE_PATTERN.match(product_name) if doc_handle_match is None: logger.debug('%s is not a document repo', product_name) return # Figure out the format of the document by probing for metadata files. # reStructuredText-based Sphinx documents have metadata.yaml file. try: return await process_sphinx_technote(session, github_api_token, ltd_product_data, mongo_collection=mongo_collection) except NotSphinxTechnoteError: # Catch error so we can try the next format logger.debug('%s is not a Sphinx-based technote.', product_name) except Exception: # Something bad happened trying to process the technote. # Log and just move on. logger.exception('Unexpected error trying to process %s', product_name) return # Try interpreting it as a Lander page with a /metadata.jsonld document try: return await process_lander_page(session, github_api_token, ltd_product_data, mongo_collection=mongo_collection) except NotLanderPageError: # Catch error so we can try the next format logger.debug('%s is not a Lander page with a metadata.jsonld file.', product_name) except Exception: # Something bad happened; log and move on logger.exception('Unexpected error trying to process %s', product_name) return
Ingest any kind of LSST document hosted on LSST the Docs from its source. Parameters ---------- session : `aiohttp.ClientSession` Your application's aiohttp client session. See http://aiohttp.readthedocs.io/en/stable/client.html. github_api_token : `str` A GitHub personal API token. See the `GitHub personal access token guide`_. ltd_product_url : `str` URL of the technote's product resource in the LTD Keeper API. mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional MongoDB collection. This should be the common MongoDB collection for LSST projectmeta JSON-LD records. If provided, ths JSON-LD is upserted into the MongoDB collection. Returns ------- metadata : `dict` JSON-LD-formatted dictionary. .. `GitHub personal access token guide`: https://ls.st/41d
def add_header_callback(self, cb, port, channel, port_mask=0xFF, channel_mask=0xFF): """ Add a callback for a specific port/header callback with the possibility to add a mask for channel and port for multiple hits for same callback. """ self.cb.append(_CallbackContainer(port, port_mask, channel, channel_mask, cb))
Add a callback for a specific port/header callback with the possibility to add a mask for channel and port for multiple hits for same callback.
def _check_required_settings(batches): """Ensure that all settings required at genesis are set.""" required_settings = [ 'sawtooth.consensus.algorithm.name', 'sawtooth.consensus.algorithm.version'] for batch in batches: for txn in batch.transactions: txn_header = TransactionHeader() txn_header.ParseFromString(txn.header) if txn_header.family_name == 'sawtooth_settings': settings_payload = SettingsPayload() settings_payload.ParseFromString(txn.payload) if settings_payload.action == SettingsPayload.PROPOSE: proposal = SettingProposal() proposal.ParseFromString(settings_payload.data) if proposal.setting in required_settings: required_settings.remove(proposal.setting) if required_settings: raise CliException( 'The following setting(s) are required at genesis, but were not ' 'included in the genesis batches: {}'.format(required_settings))
Ensure that all settings required at genesis are set.
def kernel_command_line(self, kernel_command_line): """ Sets the kernel command line for this QEMU VM. :param kernel_command_line: QEMU kernel command line """ log.info('QEMU VM "{name}" [{id}] has set the QEMU kernel command line to {kernel_command_line}'.format(name=self._name, id=self._id, kernel_command_line=kernel_command_line)) self._kernel_command_line = kernel_command_line
Sets the kernel command line for this QEMU VM. :param kernel_command_line: QEMU kernel command line
def get_plugin(self, service_provider=None, auth_url=None, plugins=None, **kwargs): """Authenticate using keystone to keystone federation. This plugin uses other v3 plugins to authenticate a user to a identity provider in order to authenticate the user to a service provider :param service_provider: service provider ID :param auth_url: Keystone auth url :param plugins: list of openstack_auth plugins to check :returns Keystone2Keystone keystone auth plugin """ # Avoid mutable default arg for plugins plugins = plugins or [] # service_provider being None prevents infinite recursion if utils.get_keystone_version() < 3 or not service_provider: return None keystone_idp_id = getattr(settings, 'KEYSTONE_PROVIDER_IDP_ID', 'localkeystone') if service_provider == keystone_idp_id: return None for plugin in plugins: unscoped_idp_auth = plugin.get_plugin(plugins=plugins, auth_url=auth_url, **kwargs) if unscoped_idp_auth: break else: LOG.debug('Could not find base authentication backend for ' 'K2K plugin with the provided credentials.') return None idp_exception = None scoped_idp_auth = None unscoped_auth_ref = base.BasePlugin.get_access_info( self, unscoped_idp_auth) try: scoped_idp_auth, __ = self.get_project_scoped_auth( unscoped_idp_auth, unscoped_auth_ref, recent_project=kwargs['recent_project']) except exceptions.KeystoneAuthException as idp_excp: idp_exception = idp_excp if not scoped_idp_auth or idp_exception: msg = _('Identity provider authentication failed.') raise exceptions.KeystoneAuthException(msg) session = utils.get_session() if scoped_idp_auth.get_sp_auth_url(session, service_provider) is None: msg = _('Could not find service provider ID on keystone.') raise exceptions.KeystoneAuthException(msg) unscoped_auth = v3_auth.Keystone2Keystone( base_plugin=scoped_idp_auth, service_provider=service_provider) return unscoped_auth
Authenticate using keystone to keystone federation. This plugin uses other v3 plugins to authenticate a user to a identity provider in order to authenticate the user to a service provider :param service_provider: service provider ID :param auth_url: Keystone auth url :param plugins: list of openstack_auth plugins to check :returns Keystone2Keystone keystone auth plugin
def v1(self): """Return voltage phasors at the "from buses" (bus1)""" Vm = self.system.dae.y[self.v] Va = self.system.dae.y[self.a] return polar(Vm[self.a1], Va[self.a1])
Return voltage phasors at the "from buses" (bus1)
def revoke_permission(user, permission_name): """ Revoke a specified permission from a user. Permissions are only revoked if they are in the scope any of the user's roles. If the permission is out of scope, a RolePermissionScopeException is raised. """ roles = get_user_roles(user) for role in roles: if permission_name in role.permission_names_list(): permission = get_permission(permission_name) user.user_permissions.remove(permission) return raise RolePermissionScopeException( "This permission isn't in the scope of " "any of this user's roles.")
Revoke a specified permission from a user. Permissions are only revoked if they are in the scope any of the user's roles. If the permission is out of scope, a RolePermissionScopeException is raised.
def _downgrade_v4(op): """ Downgrades assets db by copying the `exchange_full` column to `exchange`, then dropping the `exchange_full` column. """ op.drop_index('ix_equities_fuzzy_symbol') op.drop_index('ix_equities_company_symbol') op.execute("UPDATE equities SET exchange = exchange_full") with op.batch_alter_table('equities') as batch_op: batch_op.drop_column('exchange_full') op.create_index('ix_equities_fuzzy_symbol', table_name='equities', columns=['fuzzy_symbol']) op.create_index('ix_equities_company_symbol', table_name='equities', columns=['company_symbol'])
Downgrades assets db by copying the `exchange_full` column to `exchange`, then dropping the `exchange_full` column.
def Drop(self: Iterable, n): """ [ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [1, 2] } ] """ con = tuple(self) n = len(con) - n if n <= 0: yield from con else: for i, e in enumerate(con): if i == n: break yield e
[ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [1, 2] } ]
def _gen_records(self, record, zone_id, creating=False): ''' Turns an octodns.Record into one or more `_Route53*`s ''' return _Route53Record.new(self, record, zone_id, creating)
Turns an octodns.Record into one or more `_Route53*`s
def plus_dora(tile, dora_indicators): """ :param tile: int 136 tiles format :param dora_indicators: array of 136 tiles format :return: int count of dora """ tile_index = tile // 4 dora_count = 0 for dora in dora_indicators: dora //= 4 # sou, pin, man if tile_index < EAST: # with indicator 9, dora will be 1 if dora == 8: dora = -1 elif dora == 17: dora = 8 elif dora == 26: dora = 17 if tile_index == dora + 1: dora_count += 1 else: if dora < EAST: continue dora -= 9 * 3 tile_index_temp = tile_index - 9 * 3 # dora indicator is north if dora == 3: dora = -1 # dora indicator is hatsu if dora == 6: dora = 3 if tile_index_temp == dora + 1: dora_count += 1 return dora_count
:param tile: int 136 tiles format :param dora_indicators: array of 136 tiles format :return: int count of dora
def read_numpy_text (self, dfcols=None, **kwargs): """Read this path into a :class:`numpy.ndarray` as a text file using :func:`numpy.loadtxt`. In normal conditions the returned array is two-dimensional, with the first axis spanning the rows in the file and the second axis columns (but see the *unpack* and *dfcols* keywords). If *dfcols* is not None, the return value is a :class:`pandas.DataFrame` constructed from the array. *dfcols* should be an iterable of column names, one for each of the columns returned by the :func:`numpy.loadtxt` call. For convenience, if *dfcols* is a single string, it will by turned into an iterable by a call to :func:`str.split`. The remaining *kwargs* are passed to :func:`numpy.loadtxt`; they likely are: dtype : data type The data type of the resulting array. comments : str If specific, a character indicating the start of a comment. delimiter : str The string that separates values. If unspecified, any span of whitespace works. converters : dict A dictionary mapping zero-based column *number* to a function that will turn the cell text into a number. skiprows : int (default=0) Skip this many lines at the top of the file usecols : sequence Which columns keep, by number, starting at zero. unpack : bool (default=False) If true, the return value is transposed to be of shape ``(cols, rows)``. ndmin : int (default=0) The returned array will have at least this many dimensions; otherwise mono-dimensional axes will be squeezed. """ import numpy as np if dfcols is not None: kwargs['unpack'] = True retval = np.loadtxt (text_type (self), **kwargs) if dfcols is not None: import pandas as pd if isinstance (dfcols, six.string_types): dfcols = dfcols.split () retval = pd.DataFrame (dict (zip (dfcols, retval))) return retval
Read this path into a :class:`numpy.ndarray` as a text file using :func:`numpy.loadtxt`. In normal conditions the returned array is two-dimensional, with the first axis spanning the rows in the file and the second axis columns (but see the *unpack* and *dfcols* keywords). If *dfcols* is not None, the return value is a :class:`pandas.DataFrame` constructed from the array. *dfcols* should be an iterable of column names, one for each of the columns returned by the :func:`numpy.loadtxt` call. For convenience, if *dfcols* is a single string, it will by turned into an iterable by a call to :func:`str.split`. The remaining *kwargs* are passed to :func:`numpy.loadtxt`; they likely are: dtype : data type The data type of the resulting array. comments : str If specific, a character indicating the start of a comment. delimiter : str The string that separates values. If unspecified, any span of whitespace works. converters : dict A dictionary mapping zero-based column *number* to a function that will turn the cell text into a number. skiprows : int (default=0) Skip this many lines at the top of the file usecols : sequence Which columns keep, by number, starting at zero. unpack : bool (default=False) If true, the return value is transposed to be of shape ``(cols, rows)``. ndmin : int (default=0) The returned array will have at least this many dimensions; otherwise mono-dimensional axes will be squeezed.