code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def clean_bytes(line): text = line.decode('utf-8').replace('\r', '').strip('\n') return re.sub(r'\x1b[^m]*m', '', text).replace("``", "`\u200b`").strip('\n')
Cleans a byte sequence of shell directives and decodes it.
def get_index(self, filename): index = self.fsmodel.index(filename) if index.isValid() and index.model() is self.fsmodel: return self.proxymodel.mapFromSource(index)
Return index associated with filename
def generate_and_merge_schemas(samples): merged = generate_schema_for_sample(next(iter(samples))) for sample in samples: merged = merge_schema(merged, generate_schema_for_sample(sample)) return merged
Iterates through the given samples, generating schemas and merging them, returning the resulting merged schema.
def all_elements_equal(value): if is_scalar(value): return True return np.array(value == value.flatten()[0]).all()
Checks if all elements in the given value are equal to each other. If the input is a single value the result is trivial. If not, we compare all the values to see if they are exactly the same. Args: value (ndarray or number): a numpy array or a single number. Returns: bool: true if all elements are equal to each other, false otherwise
def _parse_scale(scale_exp): m = re.search("(\w+?)\{(.*?)\}", scale_exp) if m is None: raise InvalidFormat('Unable to parse the given time period.') scale = m.group(1) range = m.group(2) if scale not in SCALES: raise InvalidFormat('%s is not a valid scale.' % scale) ranges = re.split("\s", range) return scale, ranges
Parses a scale expression and returns the scale, and a list of ranges.
def pipe_value(self, message): 'Send a new value into the ws pipe' jmsg = json.dumps(message) self.send(jmsg)
Send a new value into the ws pipe
async def get(self, cmd, daap_data=True, timeout=None, **args): def _get_request(): return self.http.get_data( self._mkurl(cmd, *args), headers=_DMAP_HEADERS, timeout=timeout) await self._assure_logged_in() return await self._do(_get_request, is_daap=daap_data)
Perform a DAAP GET command.
def clear_published_date(self): if (self.get_published_date_metadata().is_read_only() or self.get_published_date_metadata().is_required()): raise errors.NoAccess() self._my_map['publishedDate'] = self._published_date_default
Removes the puiblished date. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
def find_next(lines, find_str, start_index): mode = None if isinstance(find_str, basestring): mode = 'normal' message = find_str elif isinstance(find_str, Invert): mode = 'invert' message = str(find_str) else: raise TypeError("Unsupported message type") for i in range(start_index, len(lines)): if re.search(message, lines[i]): return mode == 'normal', i, lines[i] elif message in lines[i]: return mode == 'normal', i, lines[i] if mode == 'invert': return True, len(lines), None raise LookupError("Not found")
Find the next instance of find_str from lines starting from start_index. :param lines: Lines to look through :param find_str: String or Invert to look for :param start_index: Index to start from :return: (boolean, index, line)
def stop_daemon(self, payload=None): kill_signal = signals['9'] self.process_handler.kill_all(kill_signal, True) self.running = False return {'message': 'Pueue daemon shutting down', 'status': 'success'}
Kill current processes and initiate daemon shutdown. The daemon will shut down after a last check on all killed processes.
def BuildDefaultValue(self, value_cls): try: return value_cls() except Exception as e: logging.exception(e) raise DefaultValueError( "Can't create default for value %s: %s" % (value_cls.__name__, e))
Renders default value of a given class. Args: value_cls: Default value of this class will be rendered. This class has to be (or to be a subclass of) a self.value_class (i.e. a class that this renderer is capable of rendering). Returns: An initialized default value. Raises: DefaultValueError: if something goes wrong.
def close(self): from matplotlib.pyplot import close for ax in self.axes[::-1]: ax.set_xscale('linear') ax.set_yscale('linear') ax.cla() close(self)
Close the plot and release its memory.
def from_sds(var, *args, **kwargs): var.__dict__['dtype'] = HTYPE_TO_DTYPE[var.info()[3]] shape = var.info()[2] var.__dict__['shape'] = shape if isinstance(shape, (tuple, list)) else tuple(shape) return da.from_array(var, *args, **kwargs)
Create a dask array from a SD dataset.
def close(self) -> None: self._channel.close() self._channel = self._stub_v1 = self._stub_v2 = None
Close the gRPC channel and free the acquired resources. Using a closed client is not supported.
def retract(self, idx_or_declared_fact): self.facts.retract(idx_or_declared_fact) if not self.running: added, removed = self.get_activations() self.strategy.update_agenda(self.agenda, added, removed)
Retracts a specific fact, using its index .. note:: This updates the agenda
def run_in_terminal(self, func, render_cli_done=False, cooked_mode=True): if render_cli_done: self._return_value = True self._redraw() self.renderer.reset() else: self.renderer.erase() self._return_value = None if cooked_mode: with self.input.cooked_mode(): result = func() else: result = func() self.renderer.reset() self.renderer.request_absolute_cursor_position() self._redraw() return result
Run function on the terminal above the prompt. What this does is first hiding the prompt, then running this callable (which can safely output to the terminal), and then again rendering the prompt which causes the output of this function to scroll above the prompt. :param func: The callable to execute. :param render_cli_done: When True, render the interface in the 'Done' state first, then execute the function. If False, erase the interface first. :param cooked_mode: When True (the default), switch the input to cooked mode while executing the function. :returns: the result of `func`.
def create_session(self, ticket, payload=None, expires=None): assert isinstance(self.session_storage_adapter, CASSessionAdapter) logging.debug('[CAS] Creating session for ticket {}'.format(ticket)) self.session_storage_adapter.create( ticket, payload=payload, expires=expires, )
Create a session record from a service ticket.
def save_model(self, filename, num_iteration=None, start_iteration=0): if num_iteration is None: num_iteration = self.best_iteration _safe_call(_LIB.LGBM_BoosterSaveModel( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), c_str(filename))) _dump_pandas_categorical(self.pandas_categorical, filename) return self
Save Booster to file. Parameters ---------- filename : string Filename to save Booster. num_iteration : int or None, optional (default=None) Index of the iteration that should be saved. If None, if the best iteration exists, it is saved; otherwise, all iterations are saved. If <= 0, all iterations are saved. start_iteration : int, optional (default=0) Start index of the iteration that should be saved. Returns ------- self : Booster Returns self.
def close(self): super(LockingDatabase, self).close() if not self.readonly: self.release_lock()
Closes the database, releasing lock.
def prev(self, n=1): i = abs(self.tell - n) return self.get(i, n)
Get the previous n data from file. Keyword argument: n -- number of structs to be retrieved (default 1) Must be greater than 0. Return: A data in the format of obj_fmt, if n = 1. A list of structs, otherwise.
def plot_signal(signal, sig_len, n_sig, fs, time_units, sig_style, axes): "Plot signal channels" if len(sig_style) == 1: sig_style = n_sig * sig_style if time_units == 'samples': t = np.linspace(0, sig_len-1, sig_len) else: downsample_factor = {'seconds':fs, 'minutes':fs * 60, 'hours':fs * 3600} t = np.linspace(0, sig_len-1, sig_len) / downsample_factor[time_units] if signal.ndim == 1: axes[0].plot(t, signal, sig_style[0], zorder=3) else: for ch in range(n_sig): axes[ch].plot(t, signal[:,ch], sig_style[ch], zorder=3)
Plot signal channels
def _parse_description(self, description_text): text = description_text text = text.strip() lines = text.split('\n') data = {} for line in lines: if ":" in line: idx = line.index(":") key = line[:idx] value = line[idx+1:].lstrip().rstrip() data[key] = value else: if isinstance(value, list) is False: value = [value] value.append(line.lstrip().rstrip()) data[key] = value return data
Turn description to dictionary.
def addToStore(store, identifier, name): persistedFactory = store.findOrCreate(_PersistedFactory, identifier=identifier) persistedFactory.name = name return persistedFactory
Adds a persisted factory with given identifier and object name to the given store. This is intended to have the identifier and name partially applied, so that a particular module with an exercise in it can just have an ``addToStore`` function that remembers it in the store. If a persisted factory with the same identifier already exists, the name will be updated.
def start_tcp_server(self, ip, port, name=None, timeout=None, protocol=None, family='ipv4'): self._start_server(TCPServer, ip, port, name, timeout, protocol, family)
Starts a new TCP server to given `ip` and `port`. Server can be given a `name`, default `timeout` and a `protocol`. `family` can be either ipv4 (default) or ipv6. Notice that you have to use `Accept Connection` keyword for server to receive connections. Examples: | Start TCP server | 10.10.10.2 | 53 | | Start TCP server | 10.10.10.2 | 53 | Server1 | | Start TCP server | 10.10.10.2 | 53 | name=Server1 | protocol=GTPV2 | | Start TCP server | 10.10.10.2 | 53 | timeout=5 | | Start TCP server | 0:0:0:0:0:0:0:1 | 53 | family=ipv6 |
def get_arrays(self, type_img): if type_img.lower() == 'lola': return LolaMap(self.ppdlola, *self.window, path_pdsfile=self.path_pdsfiles).image() elif type_img.lower() == 'wac': return WacMap(self.ppdwac, *self.window, path_pdsfile=self.path_pdsfiles).image() else: raise ValueError('The img type has to be either "Lola" or "Wac"')
Return arrays the region of interest Args: type_img (str): Either lola or wac. Returns: A tupple of three arrays ``(X,Y,Z)`` with ``X`` contains the longitudes, ``Y`` contains the latitude and ``Z`` the values extracted for the region of interest. Note: The argument has to be either lola or wac. Note case sensitive. All return arrays have the same size. All coordinates are in degree.
def get_class_from_settings_from_apps(settings_key): cls_path = getattr(settings, settings_key, None) if not cls_path: raise NotImplementedError() try: app_label = cls_path.split('.')[-2] model_name = cls_path.split('.')[-1] except ValueError: raise ImproperlyConfigured("{0} must be of the form " "'app_label.model_name'".format( settings_key)) app = apps.get_app_config(app_label).models_module if not app: raise ImproperlyConfigured("{0} setting refers to an app that has not " "been installed".format(settings_key)) return getattr(app, model_name)
Try and get a class from a settings path by lookin in installed apps.
def configure_logging(verbosity): root = logging.getLogger() formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname).3s %(name)s %(message)s', '%y-%m-%d %H:%M:%S') handler = logging.StreamHandler() handler.setFormatter(formatter) loglevels = [logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG] if verbosity >= len(loglevels): verbosity = len(loglevels) - 1 level = loglevels[verbosity] root.setLevel(level) root.addHandler(handler)
Set up the global logging level. Args: verbosity (int): The logging verbosity
def parse_content(self, content, allow_no_value=False): super(IniConfigFile, self).parse_content(content) config = RawConfigParser(allow_no_value=allow_no_value) fp = io.StringIO(u"\n".join(content)) config.readfp(fp, filename=self.file_name) self.data = config
Parses content of the config file. In child class overload and call super to set flag ``allow_no_values`` and allow keys with no value in config file:: def parse_content(self, content): super(YourClass, self).parse_content(content, allow_no_values=True)
def commuting_sets_by_indices(pauli_sums, commutation_check): assert isinstance(pauli_sums, list) group_inds = [] group_terms = [] for i, pauli_sum in enumerate(pauli_sums): for j, term in enumerate(pauli_sum): if len(group_inds) == 0: group_inds.append([(i, j)]) group_terms.append([term]) continue for k, group in enumerate(group_terms): if commutation_check(group, term): group_inds[k] += [(i, j)] group_terms[k] += [term] break else: group_inds.append([(i, j)]) group_terms.append([term]) return group_inds
For a list of pauli sums, find commuting sets and keep track of which pauli sum they came from. :param pauli_sums: A list of PauliSum :param commutation_check: a function that checks if all elements of a list and a single pauli term commute. :return: A list of commuting sets. Each set is a list of tuples (i, j) to find the particular commuting term. i is the index of the pauli sum from whence the term came. j is the index within the set.
def register(self, es, append=None, modulo=None): if not isinstance(es, CMAEvolutionStrategy): raise TypeError("only class CMAEvolutionStrategy can be " + "registered for logging") self.es = es if append is not None: self.append = append if modulo is not None: self.modulo = modulo self.registered = True return self
register a `CMAEvolutionStrategy` instance for logging, ``append=True`` appends to previous data logged under the same name, by default previous data are overwritten.
def _get_arg_tokens(cli): arg = cli.input_processor.arg return [ (Token.Prompt.Arg, '(arg: '), (Token.Prompt.Arg.Text, str(arg)), (Token.Prompt.Arg, ') '), ]
Tokens for the arg-prompt.
def remove_sites_from_neighbours( self, remove_labels ): if type( remove_labels ) is str: remove_labels = [ remove_labels ] self.neighbours = set( n for n in self.neighbours if n.label not in remove_labels )
Removes sites from the set of neighbouring sites if these have labels in remove_labels. Args: Remove_labels (List) or (Str): List of Site labels to be removed from the cluster neighbour set. Returns: None
def get_environments(): envs = [] for root, subfolders, files in os.walk('environments'): for filename in files: if filename.endswith(".json"): path = os.path.join( root[len('environments'):], filename[:-len('.json')]) envs.append(get_environment(path)) return sorted(envs, key=lambda x: x['name'])
Gets all environments found in the 'environments' directory
def update_contact_of_client(self, contact_id, contact_dict): return self._create_put_request(resource=CONTACTS, billomat_id=contact_id, send_data=contact_dict)
Updates a contact :param contact_id: the id of the contact :param contact_dict: dict :return: dict
def check_who_read(self, messages): for m in messages: readers = [] for p in m.thread.participation_set.all(): if p.date_last_check is None: pass elif p.date_last_check > m.sent_at: readers.append(p.participant.id) setattr(m, "readers", readers) return messages
Check who read each message.
def _wrap_rpc_behavior(handler, fn): if handler is None: return None if handler.request_streaming and handler.response_streaming: behavior_fn = handler.stream_stream handler_factory = grpc.stream_stream_rpc_method_handler elif handler.request_streaming and not handler.response_streaming: behavior_fn = handler.stream_unary handler_factory = grpc.stream_unary_rpc_method_handler elif not handler.request_streaming and handler.response_streaming: behavior_fn = handler.unary_stream handler_factory = grpc.unary_stream_rpc_method_handler else: behavior_fn = handler.unary_unary handler_factory = grpc.unary_unary_rpc_method_handler return handler_factory( fn(behavior_fn, handler.request_streaming, handler.response_streaming), request_deserializer=handler.request_deserializer, response_serializer=handler.response_serializer )
Returns a new rpc handler that wraps the given function
def close(self): if self._writer is not None: self.flush() self._writer.close() self._writer = None
Closes the record writer.
def add_volume(self,colorchange=True,column=None,name='',str='{name}',**kwargs): if not column: column=self._d['volume'] up_color=kwargs.pop('up_color',self.theme['up_color']) down_color=kwargs.pop('down_color',self.theme['down_color']) study={'kind':'volume', 'name':name, 'params':{'colorchange':colorchange,'base':'close','column':column, 'str':None}, 'display':utils.merge_dict({'up_color':up_color,'down_color':down_color},kwargs)} self._add_study(study)
Add 'volume' study to QuantFigure.studies Parameters: colorchange : bool If True then each volume bar will have a fill color depending on if 'base' had a positive or negative change compared to the previous value If False then each volume bar will have a fill color depending on if the volume data itself had a positive or negative change compared to the previous value column :string Defines the data column name that contains the volume data. Default: 'volume' name : string Name given to the study str : string Label factory for studies The following wildcards can be used: {name} : Name of the column {study} : Name of the study {period} : Period used Examples: 'study: {study} - period: {period}' kwargs : base : string Defines the column which will define the positive/negative changes (if colorchange=True). Default = 'close' up_color : string Color for positive bars down_color : string Color for negative bars
def create_topics(self, new_topics, timeout_ms=None, validate_only=False): version = self._matching_api_version(CreateTopicsRequest) timeout_ms = self._validate_timeout(timeout_ms) if version == 0: if validate_only: raise IncompatibleBrokerVersion( "validate_only requires CreateTopicsRequest >= v1, which is not supported by Kafka {}." .format(self.config['api_version'])) request = CreateTopicsRequest[version]( create_topic_requests=[self._convert_new_topic_request(new_topic) for new_topic in new_topics], timeout=timeout_ms ) elif version <= 2: request = CreateTopicsRequest[version]( create_topic_requests=[self._convert_new_topic_request(new_topic) for new_topic in new_topics], timeout=timeout_ms, validate_only=validate_only ) else: raise NotImplementedError( "Support for CreateTopics v{} has not yet been added to KafkaAdminClient." .format(version)) return self._send_request_to_controller(request)
Create new topics in the cluster. :param new_topics: A list of NewTopic objects. :param timeout_ms: Milliseconds to wait for new topics to be created before the broker returns. :param validate_only: If True, don't actually create new topics. Not supported by all versions. Default: False :return: Appropriate version of CreateTopicResponse class.
def get_column_index(self, header): try: index = self._column_headers.index(header) return index except ValueError: raise_suppressed(KeyError(("'{}' is not a header for any " "column").format(header)))
Get index of a column from it's header. Parameters ---------- header: str header of the column. Raises ------ ValueError: If no column could be found corresponding to `header`.
def __create_entry(self, entrytype, data, index, ttl=None): if entrytype == 'HS_ADMIN': op = 'creating HS_ADMIN entry' msg = 'This method can not create HS_ADMIN entries.' raise IllegalOperationException(operation=op, msg=msg) entry = {'index':index, 'type':entrytype, 'data':data} if ttl is not None: entry['ttl'] = ttl return entry
Create an entry of any type except HS_ADMIN. :param entrytype: THe type of entry to create, e.g. 'URL' or 'checksum' or ... Note: For entries of type 'HS_ADMIN', please use __create_admin_entry(). For type '10320/LOC', please use 'add_additional_URL()' :param data: The actual value for the entry. Can be a simple string, e.g. "example", or a dict {"format":"string", "value":"example"}. :param index: The integer to be used as index. :param ttl: Optional. If not set, the library's default is set. If there is no default, it is not set by this library, so Handle System sets it. :return: The entry as a dict.
def is_schema_of_common_names(schema: GraphQLSchema) -> bool: query_type = schema.query_type if query_type and query_type.name != "Query": return False mutation_type = schema.mutation_type if mutation_type and mutation_type.name != "Mutation": return False subscription_type = schema.subscription_type if subscription_type and subscription_type.name != "Subscription": return False return True
Check whether this schema uses the common naming convention. GraphQL schema define root types for each type of operation. These types are the same as any other type and can be named in any manner, however there is a common naming convention: schema { query: Query mutation: Mutation } When using this naming convention, the schema description can be omitted.
def random_filename(path=None): filename = uuid4().hex if path is not None: filename = os.path.join(path, filename) return filename
Make a UUID-based file name which is extremely unlikely to exist already.
def contribute_to_class(self, cls, name): self.update_rel_to(cls) self.set_attributes_from_name(name) self.model = cls if not self.remote_field.through and not cls._meta.abstract: self.remote_field.through = create_many_to_many_intermediary_model( self, cls) super(M2MFromVersion, self).contribute_to_class(cls, name)
Because django doesn't give us a nice way to provide a through table without losing functionality. We have to provide our own through table creation that uses the FKToVersion field to be used for the from field.
def register_trading_control(self, control): if self.initialized: raise RegisterTradingControlPostInit() self.trading_controls.append(control)
Register a new TradingControl to be checked prior to order calls.
def shape(cls, dataset): if not dataset.data: return (0, len(dataset.dimensions())) rows, cols = 0, 0 ds = cls._inner_dataset_template(dataset) for d in dataset.data: ds.data = d r, cols = ds.interface.shape(ds) rows += r return rows+len(dataset.data)-1, cols
Returns the shape of all subpaths, making it appear like a single array of concatenated subpaths separated by NaN values.
def _set_init_params(self, qrs_amp_recent, noise_amp_recent, rr_recent, last_qrs_ind): self.qrs_amp_recent = qrs_amp_recent self.noise_amp_recent = noise_amp_recent self.qrs_thr = max(0.25*self.qrs_amp_recent + 0.75*self.noise_amp_recent, self.qrs_thr_min * self.transform_gain) self.rr_recent = rr_recent self.last_qrs_ind = last_qrs_ind self.last_qrs_peak_num = None
Set initial online parameters
def importFile(self, srcUrl, sharedFileName=None): self._assertContextManagerUsed() return self._jobStore.importFile(srcUrl, sharedFileName=sharedFileName)
Imports the file at the given URL into job store. See :func:`toil.jobStores.abstractJobStore.AbstractJobStore.importFile` for a full description
def loadSessions(self, callback, bare_jid, device_ids): if self.is_async: self.__loadSessionsAsync(callback, bare_jid, device_ids, {}) else: return self.__loadSessionsSync(bare_jid, device_ids)
Return a dict containing the session for each device id. By default, this method calls loadSession for each device id.
def bitstring_probs_to_z_moments(p): zmat = np.array([[1, 1], [1, -1]]) return _apply_local_transforms(p, (zmat for _ in range(p.ndim)))
Convert between bitstring probabilities and joint Z moment expectations. :param np.array p: An array that enumerates bitstring probabilities. When flattened out ``p = [p_00...0, p_00...1, ...,p_11...1]``. The total number of elements must therefore be a power of 2. The canonical shape has a separate axis for each qubit, such that ``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``. :return: ``z_moments``, an np.array with one length-2 axis per qubit which contains the expectations of all monomials in ``{I, Z_0, Z_1, ..., Z_{n-1}}``. The expectations of each monomial can be accessed via:: <Z_0^j_0 Z_1^j_1 ... Z_m^j_m> = z_moments[j_0,j_1,...,j_m] :rtype: np.array
def _reduce_age(self, now): if self.max_age: keys = [ key for key, value in iteritems(self.data) if now - value['date'] > self.max_age ] for key in keys: del self.data[key]
Reduce size of cache by date. :param datetime.datetime now: Current time
def files(self): self._printer('\tFiles Walk') for directory in self.directory: for path in os.listdir(directory): full_path = os.path.join(directory, path) if os.path.isfile(full_path): if not path.startswith('.'): self.filepaths.append(full_path) return self._get_filepaths()
Return list of files in root directory
def grammatical_join(l, initial_joins=", ", final_join=" and "): return initial_joins.join(l[:-2] + [final_join.join(l[-2:])])
Display a list of items nicely, with a different string before the final item. Useful for using lists in sentences. >>> grammatical_join(['apples', 'pears', 'bananas']) 'apples, pears and bananas' >>> grammatical_join(['apples', 'pears', 'bananas'], initial_joins=";", final_join="; or ") 'apples; pears; or bananas' :param l: List of strings to join :param initial_joins: the string to join the non-ultimate items with :param final_join: the string to join the final item with :return: items joined with commas except " and " before the final one.
def get_output_content(job_id, max_size=1024, conn=None): content = None if RBO.index_list().contains(IDX_OUTPUT_JOB_ID).run(conn): check_status = RBO.get_all(job_id, index=IDX_OUTPUT_JOB_ID).run(conn) else: check_status = RBO.filter({OUTPUTJOB_FIELD: {ID_FIELD: job_id}}).run(conn) for status_item in check_status: content = _truncate_output_content_if_required(status_item, max_size) return content
returns the content buffer for a job_id if that job output exists :param job_id: <str> id for the job :param max_size: <int> truncate after [max_size] bytes :param conn: (optional)<connection> to run on :return: <str> or <bytes>
def ReadAllClientActionRequests(self, client_id, cursor=None): query = ("SELECT request, UNIX_TIMESTAMP(leased_until), leased_by, " "leased_count " "FROM client_action_requests " "WHERE client_id = %s") cursor.execute(query, [db_utils.ClientIDToInt(client_id)]) ret = [] for req, leased_until, leased_by, leased_count in cursor.fetchall(): request = rdf_flows.ClientActionRequest.FromSerializedString(req) if leased_until is not None: request.leased_by = leased_by request.leased_until = mysql_utils.TimestampToRDFDatetime(leased_until) else: request.leased_by = None request.leased_until = None request.ttl = db.Database.CLIENT_MESSAGES_TTL - leased_count ret.append(request) return sorted(ret, key=lambda req: (req.flow_id, req.request_id))
Reads all client messages available for a given client_id.
def confirm(prompt_str, default=False): if default: default_str = 'y' prompt = '%s [Y/n]' % prompt_str else: default_str = 'n' prompt = '%s [y/N]' % prompt_str ans = click.prompt(prompt, default=default_str, show_default=False) if ans.lower() in ('y', 'yes', 'yeah', 'yup', 'yolo'): return True return False
Show a confirmation prompt to a command-line user. :param string prompt_str: prompt to give to the user :param bool default: Default value to True or False
def update_generators(): for generator in _GENERATOR_DB.keys(): install_templates_translations(generator) add_variables_to_context(generator) interlink_static_files(generator) interlink_removed_content(generator) interlink_translated_content(generator)
Update the context of all generators Ads useful variables and translations into the template context and interlink translations
def cluster( self, cluster_id, location_id=None, serve_nodes=None, default_storage_type=None ): return Cluster( cluster_id, self, location_id=location_id, serve_nodes=serve_nodes, default_storage_type=default_storage_type, )
Factory to create a cluster associated with this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_create_cluster] :end-before: [END bigtable_create_cluster] :type cluster_id: str :param cluster_id: The ID of the cluster. :type instance: :class:`~google.cloud.bigtable.instance.Instance` :param instance: The instance where the cluster resides. :type location_id: str :param location_id: (Creation Only) The location where this cluster's nodes and storage reside. For best performance, clients should be located as close as possible to this cluster. For list of supported locations refer to https://cloud.google.com/bigtable/docs/locations :type serve_nodes: int :param serve_nodes: (Optional) The number of nodes in the cluster. :type default_storage_type: int :param default_storage_type: (Optional) The type of storage Possible values are represented by the following constants: :data:`google.cloud.bigtable.enums.StorageType.SSD`. :data:`google.cloud.bigtable.enums.StorageType.SHD`, Defaults to :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. :rtype: :class:`~google.cloud.bigtable.instance.Cluster` :returns: a cluster owned by this instance.
def remove_negative_entries(A): r A = A.tocoo() data = A.data row = A.row col = A.col pos = data > 0.0 datap = data[pos] rowp = row[pos] colp = col[pos] Aplus = coo_matrix((datap, (rowp, colp)), shape=A.shape) return Aplus
r"""Remove all negative entries from sparse matrix. Aplus=max(0, A) Parameters ---------- A : (M, M) scipy.sparse matrix Input matrix Returns ------- Aplus : (M, M) scipy.sparse matrix Input matrix with negative entries set to zero.
def get(cls, *args, **kwargs): if len(args) == 1: pk = args[0] elif kwargs: if len(kwargs) == 1 and cls._field_is_pk(list(kwargs.keys())[0]): pk = list(kwargs.values())[0] else: result = cls.collection(**kwargs).sort(by='nosort') if len(result) == 0: raise DoesNotExist(u"No object matching filter: %s" % kwargs) elif len(result) > 1: raise ValueError(u"More than one object matching filter: %s" % kwargs) else: try: pk = result[0] except IndexError: raise DoesNotExist(u"No object matching filter: %s" % kwargs) else: raise ValueError("Invalid `get` usage with args %s and kwargs %s" % (args, kwargs)) return cls(pk)
Retrieve one instance from db according to given kwargs. Optionnaly, one arg could be used to retrieve it from pk.
def _create_netmap_config(self): netmap_path = os.path.join(self.working_dir, "NETMAP") try: with open(netmap_path, "w", encoding="utf-8") as f: for bay in range(0, 16): for unit in range(0, 4): f.write("{ubridge_id}:{bay}/{unit}{iou_id:>5d}:{bay}/{unit}\n".format(ubridge_id=str(self.application_id + 512), bay=bay, unit=unit, iou_id=self.application_id)) log.info("IOU {name} [id={id}]: NETMAP file created".format(name=self._name, id=self._id)) except OSError as e: raise IOUError("Could not create {}: {}".format(netmap_path, e))
Creates the NETMAP file.
def add_modifier(self, modifier, keywords, relative_pos, action, parameter=None): if relative_pos == 0: raise ValueError("relative_pos cannot be 0") modifier_dict = self._modifiers.get(modifier, {}) value = (action, parameter, relative_pos) for keyword in keywords: action_list = list(modifier_dict.get(keyword, [])) action_list.append(value) modifier_dict[keyword] = tuple(action_list) self._modifiers[modifier] = modifier_dict
Modify existing tasks based on presence of a keyword. Parameters ---------- modifier : str A string value which would trigger the given Modifier. keywords : iterable of str sequence of strings which are keywords for some task, which has to be modified. relative_pos : int Relative position of the task which should be modified in the presence of `modifier`. It's value can never be 0. Data fields should also be considered when calculating the relative position. action : str String value representing the action which should be performed on the task. Action represents calling a arbitrary function to perform th emodification. parameter : object value required by the `action`.(Default None)
def refit(self, data, label, decay_rate=0.9, **kwargs): if self.__set_objective_to_none: raise LightGBMError('Cannot refit due to null objective function.') predictor = self._to_predictor(copy.deepcopy(kwargs)) leaf_preds = predictor.predict(data, -1, pred_leaf=True) nrow, ncol = leaf_preds.shape train_set = Dataset(data, label, silent=True) new_booster = Booster(self.params, train_set, silent=True) _safe_call(_LIB.LGBM_BoosterMerge( new_booster.handle, predictor.handle)) leaf_preds = leaf_preds.reshape(-1) ptr_data, type_ptr_data, _ = c_int_array(leaf_preds) _safe_call(_LIB.LGBM_BoosterRefit( new_booster.handle, ptr_data, ctypes.c_int(nrow), ctypes.c_int(ncol))) new_booster.network = self.network new_booster.__attr = self.__attr.copy() return new_booster
Refit the existing Booster by new data. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for refit. If string, it represents the path to txt file. label : list, numpy 1-D array or pandas Series / one-column DataFrame Label for refit. decay_rate : float, optional (default=0.9) Decay rate of refit, will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees. **kwargs Other parameters for refit. These parameters will be passed to ``predict`` method. Returns ------- result : Booster Refitted Booster.
def warn(self, collection): super(CodeElement, self).warn(collection) if not "implicit none" in self.modifiers: collection.append("WARNING: implicit none not set in {}".format(self.name))
Checks the module for documentation and best-practice warnings.
def regions(self): regions = [] elem = self.dimensions["region"].elem for option_elem in elem.find_all("option"): region = option_elem.text.strip() regions.append(region) return regions
Get a list of all regions
def sets(self, keyword, value): if isinstance(value, str): value = KQMLString(value) self.set(keyword, value)
Set the element of the list after the given keyword as string. Parameters ---------- keyword : str The keyword parameter to find in the list. Putting a colon before the keyword is optional, if no colon is given, it is added automatically (e.g. "keyword" will be found as ":keyword" in the list). value : str The value is instantiated as KQMLString and added to the list. Example: kl = KQMLList.from_string('(FAILURE)') kl.sets('reason', 'this is a custom string message, not a token')
def bet_place( self, betting_market_id, amount_to_bet, backer_multiplier, back_or_lay, account=None, **kwargs ): from . import GRAPHENE_BETTING_ODDS_PRECISION assert isinstance(amount_to_bet, Amount) assert back_or_lay in ["back", "lay"] if not account: if "default_account" in self.config: account = self.config["default_account"] if not account: raise ValueError("You need to provide an account") account = Account(account) bm = BettingMarket(betting_market_id) op = operations.Bet_place( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "bettor_id": account["id"], "betting_market_id": bm["id"], "amount_to_bet": amount_to_bet.json(), "backer_multiplier": ( int(backer_multiplier * GRAPHENE_BETTING_ODDS_PRECISION) ), "back_or_lay": back_or_lay, "prefix": self.prefix, } ) return self.finalizeOp(op, account["name"], "active", **kwargs)
Place a bet :param str betting_market_id: The identifier for the market to bet in :param peerplays.amount.Amount amount_to_bet: Amount to bet with :param int backer_multiplier: Multipler for backer :param str back_or_lay: "back" or "lay" the bet :param str account: (optional) the account to bet (defaults to ``default_account``)
def init_properties(env='dev', app='unnecessary', **_): aws_env = boto3.session.Session(profile_name=env) s3client = aws_env.resource('s3') generated = get_details(app=app, env=env) archaius = generated.archaius() archaius_file = ('{path}/application.properties').format(path=archaius['path']) try: s3client.Object(archaius['bucket'], archaius_file).get() LOG.info('Found: %(bucket)s/%(file)s', {'bucket': archaius['bucket'], 'file': archaius_file}) return True except boto3.exceptions.botocore.client.ClientError: s3client.Object(archaius['bucket'], archaius_file).put() LOG.info('Created: %(bucket)s/%(file)s', {'bucket': archaius['bucket'], 'file': archaius_file}) return False
Make sure _application.properties_ file exists in S3. For Applications with Archaius support, there needs to be a file where the cloud environment variable points to. Args: env (str): Deployment environment/account, i.e. dev, stage, prod. app (str): GitLab Project name. Returns: True when application.properties was found. False when application.properties needed to be created.
def record_content_length(self): untldict = py2dict(self) untldict.pop('meta', None) return len(str(untldict))
Calculate length of record, excluding metadata.
def _sd_of_eigenvector(data, vec, measurement='poles', bidirectional=True): lon, lat = _convert_measurements(data, measurement) vals, vecs = cov_eig(lon, lat, bidirectional) x, y, z = vecs[:, vec] s, d = stereonet_math.geographic2pole(*stereonet_math.cart2sph(x, y, z)) return s[0], d[0]
Unifies ``fit_pole`` and ``fit_girdle``.
def list_datacenters(conn=None, call=None): if call != 'function': raise SaltCloudSystemExit( 'The list_datacenters function must be called with ' '-f or --function.' ) datacenters = [] if not conn: conn = get_conn() for item in conn.list_datacenters()['items']: datacenter = {'id': item['id']} datacenter.update(item['properties']) datacenters.append({item['properties']['name']: datacenter}) return {'Datacenters': datacenters}
List all the data centers CLI Example: .. code-block:: bash salt-cloud -f list_datacenters my-profitbricks-config
def _contains_policies(self, resource_properties): return resource_properties is not None \ and isinstance(resource_properties, dict) \ and self.POLICIES_PROPERTY_NAME in resource_properties
Is there policies data in this resource? :param dict resource_properties: Properties of the resource :return: True if we can process this resource. False, otherwise
def configure(self, options, conf): super(ProgressivePlugin, self).configure(options, conf) if (getattr(options, 'verbosity', 0) > 1 and getattr(options, 'enable_plugin_id', False)): print ('Using --with-id and --verbosity=2 or higher with ' 'nose-progressive causes visualization errors. Remove one ' 'or the other to avoid a mess.') if options.with_bar: options.with_styling = True
Turn style-forcing on if bar-forcing is on. It'd be messy to position the bar but still have the rest of the terminal capabilities emit ''.
def insertPrimaryDataset(self): try : body = request.body.read() indata = cjson.decode(body) indata = validateJSONInputNoCopy("primds", indata) indata.update({"creation_date": dbsUtils().getTime(), "create_by": dbsUtils().getCreateBy() }) self.dbsPrimaryDataset.insertPrimaryDataset(indata) except cjson.DecodeError as dc: dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert PrimaryDataset input", self.logger.exception, str(dc)) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message) except HTTPError as he: raise he except Exception as ex: sError = "DBSWriterModel/insertPrimaryDataset. %s\n Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
API to insert A primary dataset in DBS :param primaryDSObj: primary dataset object :type primaryDSObj: dict :key primary_ds_type: TYPE (out of valid types in DBS, MC, DATA) (Required) :key primary_ds_name: Name of the primary dataset (Required)
def kill(self, container, signal=None): url = self._url("/containers/{0}/kill", container) params = {} if signal is not None: if not isinstance(signal, six.string_types): signal = int(signal) params['signal'] = signal res = self._post(url, params=params) self._raise_for_status(res)
Kill a container or send a signal to a container. Args: container (str): The container to kill signal (str or int): The signal to send. Defaults to ``SIGKILL`` Raises: :py:class:`docker.errors.APIError` If the server returns an error.
def add_item(self, assessment_id, item_id): if assessment_id.get_identifier_namespace() != 'assessment.Assessment': raise errors.InvalidArgument self._part_item_design_session.add_item(item_id, self._get_first_part_id(assessment_id))
Adds an existing ``Item`` to an assessment. arg: assessment_id (osid.id.Id): the ``Id`` of the ``Assessment`` arg: item_id (osid.id.Id): the ``Id`` of the ``Item`` raise: NotFound - ``assessment_id`` or ``item_id`` not found raise: NullArgument - ``assessment_id`` or ``item_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
def _surpress_formatting_errors(fn): @wraps(fn) def inner(*args, **kwargs): try: return fn(*args, **kwargs) except ValueError: return "" return inner
I know this is dangerous and the wrong way to solve the problem, but when using both row and columns summaries it's easier to just swallow errors so users can format their tables how they need.
def NumRegressors(npix, pld_order, cross_terms=True): res = 0 for k in range(1, pld_order + 1): if cross_terms: res += comb(npix + k - 1, k) else: res += npix return int(res)
Return the number of regressors for `npix` pixels and PLD order `pld_order`. :param bool cross_terms: Include pixel cross-terms? Default :py:obj:`True`
async def set_action(self, on=None, bri=None, hue=None, sat=None, xy=None, ct=None, alert=None, effect=None, transitiontime=None, bri_inc=None, sat_inc=None, hue_inc=None, ct_inc=None, xy_inc=None, scene=None): data = { key: value for key, value in { 'on': on, 'bri': bri, 'hue': hue, 'sat': sat, 'xy': xy, 'ct': ct, 'alert': alert, 'effect': effect, 'transitiontime': transitiontime, 'bri_inc': bri_inc, 'sat_inc': sat_inc, 'hue_inc': hue_inc, 'ct_inc': ct_inc, 'xy_inc': xy_inc, 'scene': scene, }.items() if value is not None } await self._request('put', 'groups/{}/action'.format(self.id), json=data)
Change action of a group.
def _fixedpoint(D, tol=1e-7, maxiter=None): N, K = D.shape logp = log(D).mean(axis=0) a0 = _init_a(D) if maxiter is None: maxiter = MAXINT for i in xrange(maxiter): a1 = _ipsi(psi(a0.sum()) + logp) if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: return a1 a0 = a1 raise Exception('Failed to converge after {} iterations, values are {}.' .format(maxiter, a1))
Simple fixed point iteration method for MLE of Dirichlet distribution
def download_file(self, regex, dest_dir): log = logging.getLogger(self.cls_logger + '.download_file') if not isinstance(regex, basestring): log.error('regex argument is not a string') return None if not isinstance(dest_dir, basestring): log.error('dest_dir argument is not a string') return None if not os.path.isdir(dest_dir): log.error('Directory not found on file system: %s', dest_dir) return None key = self.find_key(regex) if key is None: log.warn('Could not find a matching S3 key for: %s', regex) return None return self.__download_from_s3(key, dest_dir)
Downloads a file by regex from the specified S3 bucket This method takes a regular expression as the arg, and attempts to download the file to the specified dest_dir as the destination directory. This method sets the downloaded filename to be the same as it is on S3. :param regex: (str) Regular expression matching the S3 key for the file to be downloaded. :param dest_dir: (str) Full path destination directory :return: (str) Downloaded file destination if the file was downloaded successfully, None otherwise.
def tally(self, name, value): value = value or 0 if 'used' not in self.usages[name]: self.usages[name]['used'] = 0 self.usages[name]['used'] += int(value) self.update_available(name)
Adds to the "used" metric for the given quota.
def dysmetria_score(self, data_frame): tap_data = data_frame[data_frame.action_type == 0] ds = np.mean(np.sqrt((tap_data.x - tap_data.x_target) ** 2 + (tap_data.y - tap_data.y_target) ** 2)) duration = math.ceil(data_frame.td[-1]) return ds, duration
This method calculates accuracy of target taps in pixels :param data_frame: the data frame :type data_frame: pandas.DataFrame :return ds: dysmetria score in pixels :rtype ds: float
def queue_purge(self, queue, **kwargs): return self.channel.queue_purge(queue=queue).message_count
Discard all messages in the queue. This will delete the messages and results in an empty queue.
def get_prefix_source(cls): try: return cls.override_prefix() except AttributeError: if hasattr(cls, '_prefix_source'): return cls.site + cls._prefix_source else: return cls.site
Return the prefix source, by default derived from site.
def create(self, auth, type, desc, defer=False): return self._call('create', auth, [type, desc], defer)
Create something in Exosite. Args: auth: <cik> type: What thing to create. desc: Information about thing.
def classify_elements(self, file, file_content_type=None, model=None, **kwargs): if file is None: raise ValueError('file must be provided') headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('compare-comply', 'V1', 'classify_elements') headers.update(sdk_headers) params = {'version': self.version, 'model': model} form_data = {} form_data['file'] = (None, file, file_content_type or 'application/octet-stream') url = '/v1/element_classification' response = self.request( method='POST', url=url, headers=headers, params=params, files=form_data, accept_json=True) return response
Classify the elements of a document. Analyzes the structural and semantic elements of a document. :param file file: The document to classify. :param str file_content_type: The content type of file. :param str model: The analysis model to be used by the service. For the **Element classification** and **Compare two documents** methods, the default is `contracts`. For the **Extract tables** method, the default is `tables`. These defaults apply to the standalone methods as well as to the methods' use in batch-processing requests. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
def start(self): fname = self.conf['file'] logging.info("Configfile watcher plugin: Starting to watch route spec " "file '%s' for changes..." % fname) route_spec = {} try: route_spec = read_route_spec_config(fname) if route_spec: self.last_route_spec_update = datetime.datetime.now() self.q_route_spec.put(route_spec) except ValueError as e: logging.warning("Cannot parse route spec: %s" % str(e)) abspath = os.path.abspath(fname) parent_dir = os.path.dirname(abspath) handler = RouteSpecChangeEventHandler( route_spec_fname = fname, route_spec_abspath = abspath, q_route_spec = self.q_route_spec, plugin = self) self.observer_thread = watchdog.observers.Observer() self.observer_thread.name = "ConfMon" self.observer_thread.schedule(handler, parent_dir) self.observer_thread.start()
Start the configfile change monitoring thread.
def is_product_owner(self, team_id): if self.is_super_admin(): return True team_id = uuid.UUID(str(team_id)) return team_id in self.child_teams_ids
Ensure the user is a PRODUCT_OWNER.
def gen_file_jinja(self, template_file, data, output, dest_path): if not os.path.exists(dest_path): os.makedirs(dest_path) output = join(dest_path, output) logger.debug("Generating: %s" % output) env = Environment() env.loader = FileSystemLoader(self.TEMPLATE_DIR) template = env.get_template(template_file) target_text = template.render(data) open(output, "w").write(target_text) return dirname(output), output
Fills data to the project template, using jinja2.
def list_targets_by_instance(self, instance_id, target_list=None): if target_list is not None: return [target for target in target_list if target['instance_id'] == instance_id] else: ports = self._target_ports_by_instance(instance_id) reachable_subnets = self._get_reachable_subnets( ports, fetch_router_ports=True) name = self._get_server_name(instance_id) targets = [] for p in ports: for ip in p.fixed_ips: if ip['subnet_id'] not in reachable_subnets: continue if netaddr.IPAddress(ip['ip_address']).version != 4: continue targets.append(FloatingIpTarget(p, ip['ip_address'], name)) return targets
Returns a list of FloatingIpTarget objects of FIP association. :param instance_id: ID of target VM instance :param target_list: (optional) a list returned by list_targets(). If specified, looking up is done against the specified list to save extra API calls to a back-end. Otherwise target list is retrieved from a back-end inside the method.
def db(self, entity, query_filters="size=10"): if self.entity_api_key == "": return {'status': 'failure', 'response': 'No API key found in request'} historic_url = self.base_url + "api/0.1.0/historicData?" + query_filters historic_headers = { "apikey": self.entity_api_key, "Content-Type": "application/json" } historic_query_data = json.dumps({ "query": { "match": { "key": entity } } }) with self.no_ssl_verification(): r = requests.get(historic_url, data=historic_query_data, headers=historic_headers) response = dict() if "No API key" in str(r.content.decode("utf-8")): response["status"] = "failure" else: r = r.content.decode("utf-8") response = r return response
This function allows an entity to access the historic data. Args: entity (string): Name of the device to listen to query_filters (string): Elastic search response format string example, "pretty=true&size=10"
def imagej_shape(shape, rgb=None): shape = tuple(int(i) for i in shape) ndim = len(shape) if 1 > ndim > 6: raise ValueError('invalid ImageJ hyperstack: not 2 to 6 dimensional') if rgb is None: rgb = shape[-1] in (3, 4) and ndim > 2 if rgb and shape[-1] not in (3, 4): raise ValueError('invalid ImageJ hyperstack: not a RGB image') if not rgb and ndim == 6 and shape[-1] != 1: raise ValueError('invalid ImageJ hyperstack: not a non-RGB image') if rgb or shape[-1] == 1: return (1, ) * (6 - ndim) + shape return (1, ) * (5 - ndim) + shape + (1,)
Return shape normalized to 6D ImageJ hyperstack TZCYXS. Raise ValueError if not a valid ImageJ hyperstack shape. >>> imagej_shape((2, 3, 4, 5, 3), False) (2, 3, 4, 5, 3, 1)
def write_array_empty(self, key, value): arr = np.empty((1,) * value.ndim) self._handle.create_array(self.group, key, arr) getattr(self.group, key)._v_attrs.value_type = str(value.dtype) getattr(self.group, key)._v_attrs.shape = value.shape
write a 0-len array
def pad(obj, pad_length): _check_supported(obj) copied = deepcopy(obj) copied.pad(pad_length) return copied
Return a copy of the object with piano-roll padded with zeros at the end along the time axis. Parameters ---------- pad_length : int The length to pad along the time axis with zeros.
def map_block_storage(service, pool, image): cmd = [ 'rbd', 'map', '{}/{}'.format(pool, image), '--user', service, '--secret', _keyfile_path(service), ] check_call(cmd)
Map a RADOS block device for local use.
def walk(self): intervals = sorted(self._intervals) def nextFull(): start, stop = intervals.pop(0) while intervals: if intervals[0][0] <= stop: _, thisStop = intervals.pop(0) if thisStop > stop: stop = thisStop else: break return (start, stop) if intervals: if intervals[0][0] > 0: yield (self.EMPTY, (0, intervals[0][0])) while intervals: lastFull = nextFull() yield (self.FULL, lastFull) if intervals: yield (self.EMPTY, (lastFull[1], intervals[0][0])) if lastFull[1] < self._targetLength: yield (self.EMPTY, (lastFull[1], self._targetLength)) else: yield (self.EMPTY, (0, self._targetLength))
Get the non-overlapping read intervals that match the subject. @return: A generator that produces (TYPE, (START, END)) tuples, where where TYPE is either self.EMPTY or self.FULL and (START, STOP) is the interval. The endpoint (STOP) of the interval is not considered to be in the interval. I.e., the interval is really [START, STOP).
def str_from_file(path): with open(path) as f: s = f.read().strip() return s
Return file contents as string.
def xpathNsLookup(self, prefix): ret = libxml2mod.xmlXPathNsLookup(self._o, prefix) return ret
Search in the namespace declaration array of the context for the given namespace name associated to the given prefix
def _read_para_hip_mac_2(self, code, cbit, clen, *, desc, length, version): _hmac = self._read_fileng(clen) hip_mac_2 = dict( type=desc, critical=cbit, length=clen, hmac=_hmac, ) _plen = length - clen if _plen: self._read_fileng(_plen) return hip_mac_2
Read HIP HIP_MAC_2 parameter. Structure of HIP HIP_MAC_2 parameter [RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | HMAC | / / / +-------------------------------+ | | Padding | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 hip_mac_2.type Parameter Type 1 15 hip_mac_2.critical Critical Bit 2 16 hip_mac_2.length Length of Contents 4 32 hip_mac_2.hmac HMAC ? ? - Padding