code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def summary(doc): lines = [] if "Summary" in doc and len(doc["Summary"]) > 0: lines.append(fix_footnotes(" ".join(doc["Summary"]))) lines.append("\n") if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0: lines.append(fix_footnotes(" ".join(doc["Extended Summary"]))) lines.append("\n") return lines
Generate markdown for summary section. Parameters ---------- doc : dict Output from numpydoc Returns ------- list of str Markdown strings
def split_sentences_regex(text): parts = regex.split(r'([a-zA-Z0-9][.?!])[\s$]', text) sentences = [''.join(s) for s in zip(parts[0::2], parts[1::2])] return sentences + [parts[-1]] if len(parts) % 2 else sentences
Use dead-simple regex to split text into sentences. Very poor accuracy. >>> split_sentences_regex("Hello World. I'm I.B.M.'s Watson. --Watson") ['Hello World.', "I'm I.B.M.'s Watson.", '--Watson']
def remove_option(self, mask): if not isinstance(mask, int): raise TypeError("mask must be an int") self.__check_okay_to_chain() if mask & _QUERY_OPTIONS["exhaust"]: self.__exhaust = False self.__query_flags &= ~mask return self
Unset arbitrary query flags using a bitmask. To unset the tailable flag: cursor.remove_option(2)
def check_tool(command): assert is_iterable_typed(command, basestring) if check_tool_aux(command[0]) or check_tool_aux(command[-1]): return command
Checks that a tool can be invoked by 'command'. If command is not an absolute path, checks if it can be found in 'path'. If comand is absolute path, check that it exists. Returns 'command' if ok and empty string otherwise.
def driverDebugRequest(self, unDeviceIndex, pchRequest, pchResponseBuffer, unResponseBufferSize): fn = self.function_table.driverDebugRequest result = fn(unDeviceIndex, pchRequest, pchResponseBuffer, unResponseBufferSize) return result
Sends a request to the driver for the specified device and returns the response. The maximum response size is 32k, but this method can be called with a smaller buffer. If the response exceeds the size of the buffer, it is truncated. The size of the response including its terminating null is returned.
def copy(self): return Quaternion(self.w, self.x, self.y, self.z, False)
Create an exact copy of this quaternion.
def classes(self): defclass = lib.EnvGetNextDefclass(self._env, ffi.NULL) while defclass != ffi.NULL: yield Class(self._env, defclass) defclass = lib.EnvGetNextDefclass(self._env, defclass)
Iterate over the defined Classes.
def _max_weight_state(states: Iterable[TensorProductState]) -> Union[None, TensorProductState]: mapping = dict() for state in states: for oneq_state in state.states: if oneq_state.qubit in mapping: if mapping[oneq_state.qubit] != oneq_state: return None else: mapping[oneq_state.qubit] = oneq_state return TensorProductState(list(mapping.values()))
Construct a TensorProductState by taking the single-qubit state at each qubit position. This function will return ``None`` if the input states are not compatible For example, the max_weight_state of ["(+X, q0)", "(-Z, q1)"] is "(+X, q0; -Z q1)". Asking for the max weight state of something like ["(+X, q0)", "(+Z, q0)"] will return None.
def list_path_traversal(path): out = [path] (head, tail) = os.path.split(path) if tail == '': out = [head] (head, tail) = os.path.split(head) while head != out[0]: out.insert(0, head) (head, tail) = os.path.split(head) return out
Returns a full list of directories leading up to, and including, a path. So list_path_traversal('/path/to/salt') would return: ['/', '/path', '/path/to', '/path/to/salt'] in that order. This routine has been tested on Windows systems as well. list_path_traversal('c:\\path\\to\\salt') on Windows would return: ['c:\\', 'c:\\path', 'c:\\path\\to', 'c:\\path\\to\\salt']
def voltage(self): raw = self.value volts = raw * (_ADS1X15_PGA_RANGE[self._ads.gain] / (2**(self._ads.bits-1) - 1)) return volts
Returns the voltage from the ADC pin as a floating point value.
def namfrm(frname): frname = stypes.stringToCharP(frname) frcode = ctypes.c_int() libspice.namfrm_c(frname, ctypes.byref(frcode)) return frcode.value
Look up the frame ID code associated with a string. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/namfrm_c.html :param frname: The name of some reference frame. :type frname: str :return: The SPICE ID code of the frame. :rtype: int
def clamp(self, clampVal): if self.x > clampVal: self.x = clampVal if self.y > clampVal: self.y = clampVal if self.z > clampVal: self.z = clampVal if self.w > clampVal: self.w = clampVal
Clamps all the components in the vector to the specified clampVal.
def append_code(original, codefile): with open(codefile) as code, open(original, "a") as o: o.write("\n") o.writelines(code)
Append the contents of one file to another. :param original: name of file that will be appended to :type original: str :param codefile: name of file that will be appende :type codefile: str This function is particularly useful when one wants to replace a function in student code with their own implementation of one. If two functions are defined with the same name in Python, the latter definition is taken so overwriting a function is as simple as writing it to a file and then appending it to the student's code. Example usage:: # Include a file containing our own implementation of a lookup function. check50.include("lookup.py") # Overwrite the lookup function in helpers.py with our own implementation. check50.py.append_code("helpers.py", "lookup.py")
def copy_update(pb_message, **kwds): result = pb_message.__class__() result.CopyFrom(pb_message) for k, v in kwds.items(): setattr(result, k, v) return result
Returns a copy of the PB object, with some fields updated. Args: pb_message: **kwds: Returns:
def query_tags(order=None, orderby=None, limit=None): from taggit.models import Tag, TaggedItem EntryModel = get_entry_model() ct = ContentType.objects.get_for_model(EntryModel) entry_filter = { 'status': EntryModel.PUBLISHED } if appsettings.FLUENT_BLOGS_FILTER_SITE_ID: entry_filter['parent_site'] = settings.SITE_ID entry_qs = EntryModel.objects.filter(**entry_filter).values_list('pk') queryset = Tag.objects.filter( taggit_taggeditem_items__content_type=ct, taggit_taggeditem_items__object_id__in=entry_qs ).annotate( count=Count('taggit_taggeditem_items') ) if orderby: queryset = queryset.order_by(*_get_order_by(order, orderby, TAG_ORDER_BY_FIELDS)) else: queryset = queryset.order_by('-count') if limit: queryset = queryset[:limit] return queryset
Query the tags, with usage count included. This interface is mainly used by the ``get_tags`` template tag.
def get_day_end(config): day_start_datetime = datetime.datetime.combine(datetime.date.today(), config['day_start']) day_end_datetime = day_start_datetime - datetime.timedelta(seconds=1) return day_end_datetime.time()
Get the day end time given the day start. This assumes full 24h day. Args: config (dict): Configdict. Needed to extract ``day_start``. Note: This is merely a convinience funtion so we do not have to deduct this from ``day_start`` by hand all the time.
def get_token_network_identifiers( chain_state: ChainState, payment_network_id: PaymentNetworkID, ) -> List[TokenNetworkID]: payment_network = chain_state.identifiers_to_paymentnetworks.get(payment_network_id) if payment_network is not None: return [ token_network.address for token_network in payment_network.tokenidentifiers_to_tokennetworks.values() ] return list()
Return the list of token networks registered with the given payment network.
def get_file_handler(file_path="out.log", level=logging.INFO, log_format=log_formats.easy_read, handler=logging.FileHandler, **handler_kwargs): fh = handler(file_path, **handler_kwargs) fh.setLevel(level) fh.setFormatter(logging.Formatter(log_format)) return fh
Set up a file handler to add to a logger. :param file_path: file to write the log to, defaults to out.log :param level: logging level to set handler at :param log_format: formatter to use :param handler: logging handler to use, defaults to FileHandler :param handler_kwargs: options to pass to the handler :return: handler
def norm_package_version(version): if version: version = ','.join(v.strip() for v in version.split(',')).strip() if version.startswith('(') and version.endswith(')'): version = version[1:-1] version = ''.join(v for v in version if v.strip()) else: version = '' return version
Normalize a version by removing extra spaces and parentheses.
def verify_checksum(self): res = self._FITS.verify_checksum(self._ext+1) if res['dataok'] != 1: raise ValueError("data checksum failed") if res['hduok'] != 1: raise ValueError("hdu checksum failed")
Verify the checksum in the header for this HDU.
def stream(self, status=values.unset, phone_number=values.unset, incoming_phone_number_sid=values.unset, friendly_name=values.unset, unique_name=values.unset, limit=None, page_size=None): limits = self._version.read_limits(limit, page_size) page = self.page( status=status, phone_number=phone_number, incoming_phone_number_sid=incoming_phone_number_sid, friendly_name=friendly_name, unique_name=unique_name, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit'])
Streams DependentHostedNumberOrderInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param DependentHostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder. :param unicode phone_number: An E164 formatted phone number. :param unicode incoming_phone_number_sid: IncomingPhoneNumber sid. :param unicode friendly_name: A human readable description of this resource. :param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance]
def calcOffset(self, x, y): return x + self.size * (self.size - y - 1)
Calculate offset into data array. Only uses to test correctness of the formula.
def search(self, keyword): params = { "source": "map", "description": keyword } data = self._request(ENDPOINTS['SEARCH'], params) data['result_data'] = [res for res in data['result_data'] if isinstance(res, dict)] return data
Return all buildings related to the provided query. :param keyword: The keyword for your map search >>> results = n.search('Harrison')
def get_model_url_base(): url_base = get_model_url_base_from_env() if url_base is not None: logger.info('NNBLA_MODELS_URL_BASE is set as {}.'.format(url_base)) else: url_base = 'https://nnabla.org/pretrained-models/nnp_models/' return url_base
Returns a root folder for models.
def parse_column_names(text): return tuple( re.sub(r"^`(.*)`$", r"\1", column_data.strip()) for column_data in text.split(",") )
Extracts column names from a string containing quoted and comma separated column names of a table. :param text: Line extracted from MySQL's `INSERT INTO` statement containing quoted and comma separated column names. :type text: str :return: Tuple containing just the column names. :rtype: tuple[str]
def _queue_models(self, models, context): model_queue = [] number_remaining_models = len(models) MAX_CYCLES = number_remaining_models allowed_cycles = MAX_CYCLES while number_remaining_models > 0: previous_number_remaining_models = number_remaining_models model = models.pop(0) if check_dependencies(model, model_queue, context["__avaliable_models"]): model_class = ModelCode(model=model, context=context, stdout=self.stdout, stderr=self.stderr, options=self.options) model_queue.append(model_class) else: models.append(model) number_remaining_models = len(models) if number_remaining_models == previous_number_remaining_models: allowed_cycles -= 1 if allowed_cycles <= 0: missing_models = [ModelCode(model=m, context=context, stdout=self.stdout, stderr=self.stderr, options=self.options) for m in models] model_queue += missing_models models[:] = missing_models break else: allowed_cycles = MAX_CYCLES return model_queue
Work an an appropriate ordering for the models. This isn't essential, but makes the script look nicer because more instances can be defined on their first try.
def admin_password(self, environment, target_name, password): try: remote_server_command( ["ssh", environment.deploy_target, "admin_password", target_name, password], environment, self, clean_up=True ) return True except WebCommandError: return False
Return True if password was set successfully
def getBufferedFiles(self, block_id): try: conn = self.dbi.connection() result = self.buflist.execute(conn, block_id) return result finally: if conn: conn.close()
Get some files from the insert buffer
def _read_file(self, filename): if self.__compression: f = gzip.GzipFile(filename, "rb") else: f = open(filename, "rb") res = pickle.load(f) f.close() return res
read a Python object from a cache file. Reads a pickled object from disk and returns it. :param filename: Name of the file that should be read. :type filename: str :rtype: object
def _load(self, files_in, files_out, urlpath, meta=True): import dask out = [] outnames = [] for file_in, file_out in zip(files_in, files_out): cache_path = file_out.path outnames.append(cache_path) if cache_path == urlpath: continue if not os.path.isfile(cache_path): logger.debug("Caching file: {}".format(file_in.path)) logger.debug("Original path: {}".format(urlpath)) logger.debug("Cached at: {}".format(cache_path)) if meta: self._log_metadata(urlpath, file_in.path, cache_path) ddown = dask.delayed(_download) out.append(ddown(file_in, file_out, self.blocksize, self.output)) dask.compute(*out) return outnames
Download a set of files
def connected_component_labels(edges, node_count=None): matrix = edges_to_coo(edges, node_count) body_count, labels = csgraph.connected_components( matrix, directed=False) assert len(labels) == node_count return labels
Label graph nodes from an edge list, using scipy.sparse.csgraph Parameters ---------- edges : (n, 2) int Edges of a graph node_count : int, or None The largest node in the graph. Returns --------- labels : (node_count,) int Component labels for each node
def update_authorization(self, authorization_form): collection = JSONClientValidated('authorization', collection='Authorization', runtime=self._runtime) if not isinstance(authorization_form, ABCAuthorizationForm): raise errors.InvalidArgument('argument type is not an AuthorizationForm') if not authorization_form.is_for_update(): raise errors.InvalidArgument('the AuthorizationForm is for update only, not create') try: if self._forms[authorization_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('authorization_form already used in an update transaction') except KeyError: raise errors.Unsupported('authorization_form did not originate from this session') if not authorization_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') collection.save(authorization_form._my_map) self._forms[authorization_form.get_id().get_identifier()] = UPDATED return objects.Authorization( osid_object_map=authorization_form._my_map, runtime=self._runtime, proxy=self._proxy)
Updates an existing authorization. arg: authorization_form (osid.authorization.AuthorizationForm): the authorization ``Id`` raise: IllegalState - ``authorization_form`` already used in an update transaction raise: InvalidArgument - one or more of the form elements is invalid raise: NullArgument - ``authorization_form`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: PermissionDenied - authorization failure raise: Unsupported - ``authorization_form`` did not originate from ``get_authorization_form_for_update()`` *compliance: mandatory -- This method must be implemented.*
def fire_master(self, data, tag, preload=None): load = {} if preload: load.update(preload) load.update({ 'id': self.opts['id'], 'tag': tag, 'data': data, 'cmd': '_minion_event', 'tok': self.auth.gen_token(b'salt'), }) channel = salt.transport.client.ReqChannel.factory(self.opts) try: channel.send(load) except Exception: pass finally: channel.close() return True
Fire an event off on the master server CLI Example: .. code-block:: bash salt '*' event.fire_master 'stuff to be in the event' 'tag'
def flatten(l, unique=True): l = reduce(lambda x, y: x + y, l) if not unique: return list(l) return list(set(l))
flatten a list of lists Parameters ---------- l : list of lists unique : boolean whether or not only unique items are wanted (default=True) Returns ------- list of single items Examples -------- Creating a sample list whose elements are lists of integers >>> l = [[1, 2], [3, 4, ], [5, 6]] Applying flatten function >>> flatten(l) [1, 2, 3, 4, 5, 6]
def call_workflow_event(instance, event, after=True): if not event.transition: return False portal_type = instance.portal_type wf_module = _load_wf_module('{}.events'.format(portal_type.lower())) if not wf_module: return False prefix = after and "after" or "before" func_name = "{}_{}".format(prefix, event.transition.id) func = getattr(wf_module, func_name, False) if not func: return False logger.info('WF event: {0}.events.{1}' .format(portal_type.lower(), func_name)) func(instance) return True
Calls the instance's workflow event
def results_to_csv(query_name, **kwargs): query = get_result_set(query_name, **kwargs) result = query.result columns = list(result[0].keys()) data = [tuple(row.values()) for row in result] frame = tablib.Dataset() frame.headers = columns for row in data: frame.append(row) csvs = frame.export('csv') return csvs
Generate CSV from result data
def fetch(self, minion_id, pillar, *args, **kwargs): db_name = self._db_name() log.info('Querying %s for information for %s', db_name, minion_id) qbuffer = self.extract_queries(args, kwargs) with self._get_cursor() as cursor: for root, details in qbuffer: cursor.execute(details['query'], (minion_id,)) self.process_fields([row[0] for row in cursor.description], details['depth']) self.enter_root(root) self.as_list = details['as_list'] if details['with_lists']: self.with_lists = details['with_lists'] else: self.with_lists = [] self.ignore_null = details['ignore_null'] self.process_results(cursor.fetchall()) log.debug('ext_pillar %s: Return data: %s', db_name, self) return self.result
Execute queries, merge and return as a dict.
def installedOn(self): try: return self.store.findUnique(_DependencyConnector, _DependencyConnector.installee == self ).target except ItemNotFound: return None
If this item is installed on another item, return the install target. Otherwise return None.
def check_vpc(vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None): if not _exactly_one((vpc_name, vpc_id)): raise SaltInvocationError('One (but not both) of vpc_id or vpc_name ' 'must be provided.') if vpc_name: vpc_id = _get_id(vpc_name=vpc_name, region=region, key=key, keyid=keyid, profile=profile) elif not _find_vpcs(vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile): log.info('VPC %s does not exist.', vpc_id) return None return vpc_id
Check whether a VPC with the given name or id exists. Returns the vpc_id or None. Raises SaltInvocationError if both vpc_id and vpc_name are None. Optionally raise a CommandExecutionError if the VPC does not exist. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt myminion boto_vpc.check_vpc vpc_name=myvpc profile=awsprofile
def optimizer(name): warn_msg = ("Please update `registry.optimizer` callsite " "(likely due to a `HParams.optimizer` value)") if name == "SGD": name = "sgd" tf.logging.warning("'SGD' optimizer now keyed by 'sgd'. %s" % warn_msg) elif name == "RMSProp": name = "rms_prop" tf.logging.warning( "'RMSProp' optimizer now keyed by 'rms_prop'. %s" % warn_msg) else: snake_name = misc_utils.camelcase_to_snakecase(name) if name != snake_name: tf.logging.warning( "optimizer names now keyed by snake_case names. %s" % warn_msg) name = snake_name return Registries.optimizers[name]
Get pre-registered optimizer keyed by name. `name` should be snake case, though SGD -> sgd, RMSProp -> rms_prop and UpperCamelCase -> snake_case conversions included for legacy support. Args: name: name of optimizer used in registration. This should be a snake case identifier, though others supported for legacy reasons. Returns: optimizer
def split_scene(geometry): if util.is_instance_named(geometry, 'Scene'): return geometry if util.is_sequence(geometry): metadata = {} for g in geometry: try: metadata.update(g.metadata) except BaseException: continue return Scene(geometry, metadata=metadata) split = collections.deque() metadata = {} for g in util.make_sequence(geometry): split.extend(g.split()) metadata.update(g.metadata) if len(split) == 1 and 'file_name' in metadata: split = {metadata['file_name']: split[0]} scene = Scene(split, metadata=metadata) return scene
Given a geometry, list of geometries, or a Scene return them as a single Scene object. Parameters ---------- geometry : splittable Returns --------- scene: trimesh.Scene
def CloseCHM(self): if self.filename is not None: chmlib.chm_close(self.file) self.file = None self.filename = '' self.title = "" self.home = "/" self.index = None self.topics = None self.encoding = None
Closes the CHM archive. This function will close the CHM file, if it is open. All variables are also reset.
def _updateRepo(self, func, *args, **kwargs): self._repo.open(datarepo.MODE_WRITE) try: func(*args, **kwargs) self._repo.commit() finally: self._repo.close()
Runs the specified function that updates the repo with the specified arguments. This method ensures that all updates are transactional, so that if any part of the update fails no changes are made to the repo.
def token(self): return self._portalTokenHandler.servertoken(serverURL=self._serverUrl, referer=self._referer)
gets the AGS server token
def get_operator_cloud(auth=None): if auth is None: auth = __salt__['config.option']('keystone', {}) if 'shade_opcloud' in __context__: if __context__['shade_opcloud'].auth == auth: return __context__['shade_opcloud'] __context__['shade_opcloud'] = shade.operator_cloud(**auth) return __context__['shade_opcloud']
Return an operator_cloud
def start_container(self): self.__container_lengths.append(self.current_container_length) self.current_container_length = 0 new_container_node = _Node() self.__container_node.add_child(new_container_node) self.__container_nodes.append(self.__container_node) self.__container_node = new_container_node
Add a node to the tree that represents the start of a container. Until end_container is called, any nodes added through add_scalar_value or start_container will be children of this new node.
def create(self, name, incident_preference): data = { "policy": { "name": name, "incident_preference": incident_preference } } return self._post( url='{0}alerts_policies.json'.format(self.URL), headers=self.headers, data=data )
This API endpoint allows you to create an alert policy :type name: str :param name: The name of the policy :type incident_preference: str :param incident_preference: Can be PER_POLICY, PER_CONDITION or PER_CONDITION_AND_TARGET :rtype: dict :return: The JSON response of the API :: { "policy": { "created_at": "time", "id": "integer", "incident_preference": "string", "name": "string", "updated_at": "time" } }
def from_non_aligned_residue_IDs(Chain, StartResidueID, EndResidueID, Sequence = None): return PDBSection(Chain, PDB.ResidueID2String(StartResidueID), PDB.ResidueID2String(EndResidueID), Sequence = Sequence)
A more forgiving method that does not care about the padding of the residue IDs.
def skull_strip(dset,suffix='_ns',prefix=None,unifize=True): return available_method('skull_strip')(dset,suffix,prefix,unifize)
attempts to cleanly remove skull from ``dset``
def reset_parameter_group(self, name, reset_all_params=False, parameters=None): params = {'DBParameterGroupName':name} if reset_all_params: params['ResetAllParameters'] = 'true' else: params['ResetAllParameters'] = 'false' for i in range(0, len(parameters)): parameter = parameters[i] parameter.merge(params, i+1) return self.get_status('ResetDBParameterGroup', params)
Resets some or all of the parameters of a ParameterGroup to the default value :type key_name: string :param key_name: The name of the ParameterGroup to reset :type parameters: list of :class:`boto.rds.parametergroup.Parameter` :param parameters: The parameters to reset. If not supplied, all parameters will be reset.
def next_frame_ae_tiny(): hparams = next_frame_tiny() hparams.bottom["inputs"] = modalities.video_bitwise_bottom hparams.top["inputs"] = modalities.video_top hparams.batch_size = 8 hparams.dropout = 0.4 return hparams
Conv autoencoder, tiny set for testing.
def kml(self): url = self._url + "/kml" return _kml.KML(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=True)
returns the kml functions for server
def last_message(self, timeout=5): if self._thread is not None: self._thread.join(timeout=timeout) return self._task.last_message
Wait a specified amount of time and return the last message from the task :rtype: str
def _make_one_char_uppercase(string: str) -> str: if not isinstance(string, str): raise TypeError('string must be a string') if Aux.lowercase_count(string) > 0: while True: cindex = randbelow(len(string)) if string[cindex].islower(): aux = list(string) aux[cindex] = aux[cindex].upper() string = ''.join(aux) break return string
Make a single char from the string uppercase.
def parse_bool(val): true_vals = ('t', 'true', 'yes', 'y', '1', 'on') false_vals = ('f', 'false', 'no', 'n', '0', 'off') val = val.lower() if val in true_vals: return True if val in false_vals: return False raise ValueError('"%s" is not a valid bool value' % val)
Parse a bool value. Handles a series of values, but you should probably standardize on "true" and "false". >>> parse_bool('y') True >>> parse_bool('FALSE') False
def api(f): def wraps(self, *args, **kwargs): try: return f(self, *args, **kwargs) except Exception as e: logging.exception(e) return json_error_response(get_error_msg()) return functools.update_wrapper(wraps, f)
A decorator to label an endpoint as an API. Catches uncaught exceptions and return the response in the JSON format
def verify(self, signature, msg): if not self.key: return False try: self.key.verify(signature + msg) except ValueError: return False return True
Verify the message
def all_stop_places_quays(self) -> list: all_places = self.stops.copy() for quay in self.quays: all_places.append(quay) return all_places
Get all stop places and quays
def get_table(table_name): table = get_raw_table(table_name) if isinstance(table, TableFuncWrapper): table = table() return table
Get a registered table. Decorated functions will be converted to `DataFrameWrapper`. Parameters ---------- table_name : str Returns ------- table : `DataFrameWrapper`
def get_lab_managers_formatted_emails(self): users = api.get_users_by_roles("LabManager") users = map(lambda user: (user.getProperty("fullname"), user.getProperty("email")), users) return map(self.get_formatted_email, users)
Returns a list with lab managers formatted emails
async def serviceViewChanger(self, limit) -> int: if not self.isReady(): return 0 o = self.serviceViewChangerOutBox(limit) i = await self.serviceViewChangerInbox(limit) return o + i
Service the view_changer's inBox, outBox and action queues. :return: the number of messages successfully serviced
def read(self, config_dir=None, clear=False, config_file=None): if config_file: data_file = os.path.basename(config_file) data_path = os.path.dirname(config_file) if clear: self.clear() config = munge.load_datafile(data_file, data_path, default=None) if not config: raise IOError("Config file not found: %s" % config_file) munge.util.recursive_update(self.data, config) self._meta_config_dir = data_path return else: return super(Config, self).read(config_dir=config_dir, clear=clear)
The munge Config's read function only allows to read from a config directory, but we also want to be able to read straight from a config file as well
def __generate_string(length): return ''.join( SystemRandom().choice(string.ascii_letters + string.digits) for x in range(length)).encode()
Generate a string for password creation.
def addcomment(self, comment, private=False): vals = self.bugzilla.build_update(comment=comment, comment_private=private) log.debug("addcomment: update=%s", vals) return self.bugzilla.update_bugs(self.bug_id, vals)
Add the given comment to this bug. Set private to True to mark this comment as private.
def build_args(cmd, src, dst): cmd = cmd % (quote(src), quote(dst)) args = shlex.split(cmd) return [arg for arg in args if arg]
Build arguments list for passing to subprocess.call_check :param cmd str: Command string to interpolate src and dst filepaths into. Typically the output of `config.Config.uic_command` or `config.Config.rcc_command`. :param src str: Source filepath. :param dst str: Destination filepath.
def _process_params(self, params): new_params = OrderedDict() for param_name, param_value in sorted(params.items()): param_value = params[param_name] ParamClass = AirtableParams._get(param_name) new_params.update(ParamClass(param_value).to_param_dict()) return new_params
Process params names or values as needed using filters
def parse_escape_sequences(string): string = safe_unicode(string) characters = [] i = 0 string_len = len(string) while i < string_len: character = string[i] if character == '\\': if string[(i + 1):(i + 2)] == 'u': offset = 6 else: offset = 2 try: json_string = '"' + string[i:(i + offset)] + '"' character = scanstring(json_string, 1)[0] characters.append(character) i += offset except ValueError: raise_from(ValueError(string), None) else: characters.append(character) i += 1 return ''.join(characters)
Parse a string for possible escape sequences. Sample usage: >>> parse_escape_sequences('foo\\nbar') 'foo\nbar' >>> parse_escape_sequences('foo\\\\u0256') 'foo\\u0256' :param string: Any string. :type string: `basestring` :raises: :class:`ValueError` if a backslash character is found, but it doesn't form a proper escape sequence with the character(s) that follow. :return: The parsed string. Will parse the standard escape sequences, and also basic \\uxxxx escape sequences. \\uxxxxxxxxxx escape sequences are not currently supported. :rtype: `unicode`
def dateJDN(year, month, day, calendar): a = (14 - month) // 12 y = year + 4800 - a m = month + 12*a - 3 if calendar == GREGORIAN: return day + (153*m + 2)//5 + 365*y + y//4 - y//100 + y//400 - 32045 else: return day + (153*m + 2)//5 + 365*y + y//4 - 32083
Converts date to Julian Day Number.
def _piped_input_cl(data, region, tmp_dir, out_base_file, prep_params): return data["work_bam"], _gatk_extract_reads_cl(data, region, prep_params, tmp_dir)
Retrieve the commandline for streaming input into preparation step.
def run(self): inside = 0 for draws in range(1, self.data['samples']): r1, r2 = (random(), random()) if r1 ** 2 + r2 ** 2 < 1.0: inside += 1 if draws % 1000 != 0: continue yield self.emit('log', {'draws': draws, 'inside': inside}) p = inside / draws pi = { 'estimate': 4.0 * inside / draws, 'uncertainty': 4.0 * math.sqrt(draws * p * (1.0 - p)) / draws, } yield self.set_state(pi=pi) yield self.emit('log', {'action': 'done'})
Run when button is pressed.
def force_list(data): if data is None: return [] elif not isinstance(data, (list, tuple, set)): return [data] elif isinstance(data, (tuple, set)): return list(data) return data
Force ``data`` to become a list. You should use this method whenever you don't want to deal with the fact that ``NoneType`` can't be iterated over. For example, instead of writing:: bar = foo.get('bar') if bar is not None: for el in bar: ... you can write:: for el in force_list(foo.get('bar')): ... Args: data: any Python object. Returns: list: a list representation of ``data``. Examples: >>> force_list(None) [] >>> force_list('foo') ['foo'] >>> force_list(('foo', 'bar')) ['foo', 'bar'] >>> force_list(['foo', 'bar', 'baz']) ['foo', 'bar', 'baz']
def make_codon_list(protein_seq, template_dna=None, include_stop=True): codon_list = [] if template_dna is None: template_dna = [] for i, res in enumerate(protein_seq.upper()): try: template_codon = template_dna[3*i:3*i+3] except IndexError: template_codon = '---' possible_codons = dna.ecoli_reverse_translate[res] possible_codons.sort( key=lambda x: dna.num_mutations(x, template_codon)) codon_list.append(possible_codons[0]) last_codon = codon_list[-1] stop_codons = dna.ecoli_reverse_translate['.'] if include_stop and last_codon not in stop_codons: codon_list.append(stop_codons[0]) return codon_list
Return a list of codons that would be translated to the given protein sequence. Codons are picked first to minimize the mutations relative to a template DNA sequence and second to prefer "optimal" codons.
def args_from_config(func): func_args = signature(func).parameters @wraps(func) def wrapper(*args, **kwargs): config = get_config() for i, argname in enumerate(func_args): if len(args) > i or argname in kwargs: continue elif argname in config: kwargs[argname] = config[argname] try: getcallargs(func, *args, **kwargs) except TypeError as exc: msg = "{}\n{}".format(exc.args[0], PALLADIUM_CONFIG_ERROR) exc.args = (msg,) raise exc return func(*args, **kwargs) wrapper.__wrapped__ = func return wrapper
Decorator that injects parameters from the configuration.
def get_unread_topics(context, topics, user): request = context.get('request', None) return TrackingHandler(request=request).get_unread_topics(topics, user)
This will return a list of unread topics for the given user from a given set of topics. Usage:: {% get_unread_topics topics request.user as unread_topics %}
def _parse_order_by(model, order_by): out = [] for key in order_by: key = key.strip() if key.startswith("+"): out.append(getattr(model, key[1:])) elif key.startswith("-"): out.append(getattr(model, key[1:]).desc()) else: out.append(getattr(model, key)) return out
This function figures out the list of orderings for the given model and argument. Args: model (nautilus.BaseModel): The model to compute ordering against order_by (list of str): the list of fields to order_by. If the field starts with a `+` then the order is acending, if `-` descending, if no character proceeds the field, the ordering is assumed to be ascending. Returns: (list of filters): the model filters to apply to the query
def backup_location(src, loc=None): from photon.util.system import get_timestamp src = _path.realpath(src) if not loc or not loc.startswith(_sep): loc = _path.dirname(src) pth = _path.join(_path.basename(src), _path.realpath(loc)) out = '%s_backup_%s' % (_path.basename(src), get_timestamp()) change_location(src, search_location(out, create_in=pth))
Writes Backups of locations :param src: The source file/folder to backup :param loc: The target folder to backup into The backup will be called `src` + :func:`util.system.get_timestamp`. * If `loc` left to none, the backup gets written in the same \ folder like `src` resides in * Otherwise the specified path will be used.
def _find_intervals(bundles, duration, step): segments = [] for bund in bundles: beg, end = bund['times'][0][0], bund['times'][-1][1] if end - beg >= duration: new_begs = arange(beg, end - duration, step) for t in new_begs: seg = bund.copy() seg['times'] = [(t, t + duration)] segments.append(seg) return segments
Divide bundles into segments of a certain duration and a certain step, discarding any remainder.
def _maybe_warn_for_unseparable_batches(self, output_key: str): if output_key not in self._warn_for_unseparable_batches: logger.warning(f"Encountered the {output_key} key in the model's return dictionary which " "couldn't be split by the batch size. Key will be ignored.") self._warn_for_unseparable_batches.add(output_key)
This method warns once if a user implements a model which returns a dictionary with values which we are unable to split back up into elements of the batch. This is controlled by a class attribute ``_warn_for_unseperable_batches`` because it would be extremely verbose otherwise.
def copy(self): new = self.__class__() for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords", "tokens", "symbol2label"): setattr(new, dict_attr, getattr(self, dict_attr).copy()) new.labels = self.labels[:] new.states = self.states[:] new.start = self.start return new
Copy the grammar.
def remove_trailing_white_spaces(self): cursor = self.textCursor() block = self.document().findBlockByLineNumber(0) while block.isValid(): cursor.setPosition(block.position()) if re.search(r"\s+$", block.text()): cursor.movePosition(QTextCursor.EndOfBlock) cursor.movePosition(QTextCursor.StartOfBlock, QTextCursor.KeepAnchor) cursor.insertText(foundations.strings.to_string(block.text()).rstrip()) block = block.next() cursor.movePosition(QTextCursor.End, QTextCursor.MoveAnchor) if not cursor.block().text().isEmpty(): cursor.insertText("\n") return True
Removes document trailing white spaces. :return: Method success. :rtype: bool
def get_subsections(self, section_name): subsections = [sec[len(section_name)+1:] for sec in self.sections()\ if sec.startswith(section_name + '-')] for sec in subsections: sp = sec.split('-') if (len(sp) > 1) and not self.has_section('%s-%s' % (section_name, sp[0])): raise ValueError( "Workflow uses the '-' as a delimiter so " "this is interpreted as section-subsection-tag. " "While checking section %s, no section with " "name %s-%s was found. " "If you did not intend to use tags in an " "'advanced user' manner, or do not understand what " "this means, don't use dashes in section " "names. So [injection-nsbhinj] is good. " "[injection-nsbh-inj] is not." % (sec, sp[0], sp[1])) if len(subsections) > 0: return [sec.split('-')[0] for sec in subsections] elif self.has_section(section_name): return [''] else: return []
Return a list of subsections for the given section name
def create_profiles(self, prefix, weeks, ip_user=False): record_counter = {} for year, week in weeks: file = self.storage.get(prefix, year, week) self.count_records(record_counter, file) print("Records read all: {}".format(self.stat)) records_valid = self.filter_counter(record_counter) profiles = defaultdict(list) for year, week in weeks: file = self.storage.get(prefix, year, week) self._create_user_profiles(profiles, file, records_valid, ip_user, year, week) return profiles
Create the user profiles for the given weeks.
def _check_response_for_request_errors(self): if self.response.HighestSeverity == "ERROR": for notification in self.response.Notifications: if notification.Severity == "ERROR": raise FedexError(notification.Code, notification.Message)
Override this in each service module to check for errors that are specific to that module. For example, invalid tracking numbers in a Tracking request.
def interlink_translated_content(generator): inspector = GeneratorInspector(generator) for content in inspector.all_contents(): interlink_translations(content)
Make translations link to the native locations for generators that may contain translated content
def _updateTargetFromNode(self): self.plotItem.showGrid(x=self.xGridCti.configValue, y=self.yGridCti.configValue, alpha=self.alphaCti.configValue) self.plotItem.updateGrid()
Applies the configuration to the grid of the plot item.
def stop(self): self.camera._get_config()['actions']['movie'].set(False) self.videofile = self.camera._wait_for_event( event_type=lib.GP_EVENT_FILE_ADDED) if self._old_captarget != "Memory card": self.camera.config['settings']['capturetarget'].set( self._old_captarget)
Stop the capture.
def arduino_default_path(): if sys.platform == 'darwin': s = path('/Applications/Arduino.app/Contents/Resources/Java') elif sys.platform == 'win32': s = None else: s = path('/usr/share/arduino/') return s
platform specific default root path.
def load_builtin_slots(): builtin_slots = {} for index, line in enumerate(open(BUILTIN_SLOTS_LOCATION)): o = line.strip().split('\t') builtin_slots[index] = {'name' : o[0], 'description' : o[1] } return builtin_slots
Helper function to load builtin slots from the data location
def communicate(sock, command, settings=[]): try: COMMUNICATE_LOCK.acquire() write_packet(sock, command) for option in settings: write_packet(sock, option) return read_packet(sock) finally: COMMUNICATE_LOCK.release()
Communicate with monitor
def _load_values(self): path = self._config_path() if path is not None and os.path.isfile(path): self._load_file(path)
Load config.yaml from the run directory if available.
def get_new_document(self, cursor_pos=None): lines = [] if self.original_document.text_before_cursor: lines.append(self.original_document.text_before_cursor) for line_no in sorted(self.selected_lines): lines.append(self.history_lines[line_no]) if self.original_document.text_after_cursor: lines.append(self.original_document.text_after_cursor) text = '\n'.join(lines) if cursor_pos is not None and cursor_pos > len(text): cursor_pos = len(text) return Document(text, cursor_pos)
Create a `Document` instance that contains the resulting text.
def to_dict(self, properties=None): if not properties: skip = {'deposited_compound', 'standardized_compound', 'cids', 'aids'} properties = [p for p in dir(Substance) if isinstance(getattr(Substance, p), property) and p not in skip] return {p: getattr(self, p) for p in properties}
Return a dictionary containing Substance data. If the properties parameter is not specified, everything except cids and aids is included. This is because the aids and cids properties each require an extra request to retrieve. :param properties: (optional) A list of the desired properties.
def _prt_edge(dag_edge, attr): print("Edge {ATTR}: {VAL}".format(ATTR=attr, VAL=dag_edge.obj_dict[attr]))
Print edge attribute
def DbDeleteDeviceAlias(self, argin): self._log.debug("In DbDeleteDeviceAlias()") self.db.delete_device_alias(argin)
Delete a device alias. :param argin: device alias name :type: tango.DevString :return: :rtype: tango.DevVoid
def copy_submission_to_destination(self, src_filename, dst_subdir, submission_id): extension = [e for e in ALLOWED_EXTENSIONS if src_filename.endswith(e)] if len(extension) != 1: logging.error('Invalid submission extension: %s', src_filename) return dst_filename = os.path.join(self.target_dir, dst_subdir, submission_id + extension[0]) cmd = ['gsutil', 'cp', src_filename, dst_filename] if subprocess.call(cmd) != 0: logging.error('Can\'t copy submission to destination') else: logging.info('Submission copied to: %s', dst_filename)
Copies submission to target directory. Args: src_filename: source filename of the submission dst_subdir: subdirectory of the target directory where submission should be copied to submission_id: ID of the submission, will be used as a new submission filename (before extension)
def get_asset_temporal_assignment_session(self): if not self.supports_asset_temporal_assignment(): raise Unimplemented() try: from . import sessions except ImportError: raise try: session = sessions.AssetTemporalAssignmentSession(proxy=self._proxy, runtime=self._runtime) except AttributeError: raise return session
Gets the session for assigning temporal coverage to an asset. return: (osid.repository.AssetTemporalAssignmentSession) - an AssetTemporalAssignmentSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_temporal_assignment() is false compliance: optional - This method must be implemented if supports_asset_temporal_assignment() is true.
def build_complex_fault_geometry(fault_source): num_edges = len(fault_source.edges) edge_nodes = [] for iloc, edge in enumerate(fault_source.edges): if iloc == 0: node_name = "faultTopEdge" elif iloc == (num_edges - 1): node_name = "faultBottomEdge" else: node_name = "intermediateEdge" edge_nodes.append( Node(node_name, nodes=[build_linestring_node(edge, with_depth=True)])) return Node("complexFaultGeometry", nodes=edge_nodes)
Returns the complex fault source geometry as a Node :param fault_source: Complex fault source model as an instance of the :class: `openquake.hazardlib.source.complex_fault.ComplexFaultSource` :returns: Instance of :class:`openquake.baselib.node.Node`
def _get_component(self, string, initial_pos): add_code = string[initial_pos:initial_pos + self.ADDR_CODE_LENGTH] if add_code == 'REM': raise ish_reportException("This is a remarks record") if add_code == 'EQD': raise ish_reportException("This is EQD record") initial_pos += self.ADDR_CODE_LENGTH try: useable_map = self.MAP[add_code] except: raise BaseException("Cannot find code %s in string %s (%d)." % (add_code, string, initial_pos)) if useable_map[1] is False: chars_to_read = string[initial_pos + self.ADDR_CODE_LENGTH:initial_pos + \ (self.ADDR_CODE_LENGTH * 2)] chars_to_read = int(chars_to_read) initial_pos += (self.ADDR_CODE_LENGTH * 2) else: chars_to_read = useable_map[1] new_position = initial_pos + chars_to_read string_value = string[initial_pos:new_position] try: object_value = useable_map[2]() object_value.loads(string_value) except IndexError as err: object_value = string_value return (new_position, [add_code, object_value])
given a string and a position, return both an updated position and either a Component Object or a String back to the caller
def is_reserved_ip(self, ip): theip = ipaddress(ip) for res in self._reserved_netmasks: if theip in ipnetwork(res): return True return False
Check if the given ip address is in a reserved ipv4 address space. :param ip: ip address :return: boolean
def create_reader_of_type(type_name): readers = available_readers() if type_name not in readers.keys(): raise UnknownReaderException('Unknown reader: %s' % (type_name,)) return readers[type_name]()
Create an instance of the reader with the given name. Args: type_name: The name of a reader. Returns: An instance of the reader with the given type.