code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def boltzmann_exploration(actions, utilities, temperature, action_counter): utilities = [utilities[x] for x in actions] temperature = max(temperature, 0.01) _max = max(utilities) _min = min(utilities) if _max == _min: return random.choice(actions) utilities = [math.exp(((u - _min) / (_max - _min)) / temperature) for u in utilities] probs = [u / sum(utilities) for u in utilities] i = 0 tot = probs[i] r = random.random() while i < len(actions) and r >= tot: i += 1 tot += probs[i] return actions[i]
returns an action with a probability depending on utilities and temperature
def green_callback(fn, obj=None, green_mode=None): executor = get_object_executor(obj, green_mode) @wraps(fn) def greener(*args, **kwargs): return executor.submit(fn, *args, **kwargs) return greener
Return a green verion of the given callback.
def clear_subsystem_caches(subsys): try: subsys._repertoire_cache.clear() subsys._mice_cache.clear() except TypeError: try: subsys._repertoire_cache.cache = {} subsys._mice_cache.cache = {} except AttributeError: subsys._repertoire_cache = {} subsys._repertoire_cache_info = [0, 0] subsys._mice_cache = {}
Clear subsystem caches
def dev_from_name(self, name): try: return next(iface for iface in six.itervalues(self) if (iface.name == name or iface.description == name)) except (StopIteration, RuntimeError): raise ValueError("Unknown network interface %r" % name)
Return the first pcap device name for a given Windows device name.
def get_evernote_client(self, token=None): if token: return EvernoteClient(token=token, sandbox=self.sandbox) else: return EvernoteClient(consumer_key=self.consumer_key, consumer_secret=self.consumer_secret, sandbox=self.sandbox)
get the token from evernote
def new_revoke_public_key_transaction(self, ont_id: str, bytes_operator: bytes, revoked_pub_key: str or bytes, b58_payer_address: str, gas_limit: int, gas_price: int): if isinstance(revoked_pub_key, str): bytes_revoked_pub_key = bytes.fromhex(revoked_pub_key) elif isinstance(revoked_pub_key, bytes): bytes_revoked_pub_key = revoked_pub_key else: raise SDKException(ErrorCode.params_type_error('a bytes or str type of public key is required.')) bytes_ont_id = ont_id.encode('utf-8') args = dict(ontid=bytes_ont_id, pk=bytes_revoked_pub_key, operator=bytes_operator) tx = self.__generate_transaction('removeKey', args, b58_payer_address, gas_limit, gas_price) return tx
This interface is used to generate a Transaction object which is used to remove public key. :param ont_id: OntId. :param bytes_operator: operator args in from of bytes. :param revoked_pub_key: a public key string which will be removed. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which is used to remove public key.
def _generate_next_token_helper(self, past_states, transitions): key = tuple(past_states) assert key in transitions, "%s" % str(key) return utils.weighted_choice(transitions[key].items())
generates next token based previous states
def parse_boolean_envvar(val): if not val or val.lower() in {'false', '0'}: return False elif val.lower() in {'true', '1'}: return True else: raise ValueError('Invalid boolean environment variable: %s' % val)
Parse a boolean environment variable.
def eval_valid(self, feval=None): return [item for i in range_(1, self.__num_dataset) for item in self.__inner_eval(self.name_valid_sets[i - 1], i, feval)]
Evaluate for validation data. Parameters ---------- feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. Returns ------- result : list List with evaluation results.
def add_fileformat(self, fileformat): self.fileformat = fileformat logger.info("Adding fileformat to vcf: {0}".format(fileformat)) return
Add fileformat line to the header. Arguments: fileformat (str): The id of the info line
def imwrite(file, data=None, shape=None, dtype=None, **kwargs): tifargs = parse_kwargs(kwargs, 'append', 'bigtiff', 'byteorder', 'imagej') if data is None: dtype = numpy.dtype(dtype) size = product(shape) * dtype.itemsize byteorder = dtype.byteorder else: try: size = data.nbytes byteorder = data.dtype.byteorder except Exception: size = 0 byteorder = None bigsize = kwargs.pop('bigsize', 2**32-2**25) if 'bigtiff' not in tifargs and size > bigsize and not ( tifargs.get('imagej', False) or tifargs.get('truncate', False)): tifargs['bigtiff'] = True if 'byteorder' not in tifargs: tifargs['byteorder'] = byteorder with TiffWriter(file, **tifargs) as tif: return tif.save(data, shape, dtype, **kwargs)
Write numpy array to TIFF file. Refer to the TiffWriter class and its asarray function for documentation. A BigTIFF file is created if the data size in bytes is larger than 4 GB minus 32 MB (for metadata), and 'bigtiff' is not specified, and 'imagej' or 'truncate' are not enabled. Parameters ---------- file : str or binary stream File name or writable binary stream, such as an open file or BytesIO. data : array_like Input image. The last dimensions are assumed to be image depth, height, width, and samples. If None, an empty array of the specified shape and dtype is saved to file. Unless 'byteorder' is specified in 'kwargs', the TIFF file byte order is determined from the data's dtype or the dtype argument. shape : tuple If 'data' is None, shape of an empty array to save to the file. dtype : numpy.dtype If 'data' is None, data-type of an empty array to save to the file. kwargs : dict Parameters 'append', 'byteorder', 'bigtiff', and 'imagej', are passed to the TiffWriter constructor. Other parameters are passed to the TiffWriter.save function. Returns ------- offset, bytecount : tuple or None If the image data are written contiguously, return offset and bytecount of image data in the file.
def createOverlay(self, pchOverlayKey, pchOverlayName): fn = self.function_table.createOverlay pOverlayHandle = VROverlayHandle_t() result = fn(pchOverlayKey, pchOverlayName, byref(pOverlayHandle)) return result, pOverlayHandle
Creates a new named overlay. All overlays start hidden and with default settings.
def get_java_container(self, package_name=None, object_name=None, java_class_instance=None): if package_name is not None: jcontainer = self.import_scala_package_object(package_name) elif object_name is not None: jcontainer = self.import_scala_object(object_name) elif java_class_instance is not None: jcontainer = java_class_instance else: raise RuntimeError("Expected one of package_name, object_name or java_class_instance") return jcontainer
Convenience method to get the container that houses methods we wish to call a method on.
def update(self, data): self._md.update(data) bufpos = self._nbytes & 63 self._nbytes += len(data) if self._rarbug and len(data) > 64: dpos = self.block_size - bufpos while dpos + self.block_size <= len(data): self._corrupt(data, dpos) dpos += self.block_size
Process more data.
def image_generator(images, labels): if not images: raise ValueError("Must provide some images for the generator.") width, height, _ = images[0].shape for (enc_image, label) in zip(encode_images_as_png(images), labels): yield { "image/encoded": [enc_image], "image/format": ["png"], "image/class/label": [int(label)], "image/height": [height], "image/width": [width] }
Generator for images that takes image and labels lists and creates pngs. Args: images: list of images given as [width x height x channels] numpy arrays. labels: list of ints, same length as images. Yields: A dictionary representing the images with the following fields: * image/encoded: the string encoding the image as PNG, * image/format: the string "png" representing image format, * image/class/label: an integer representing the label, * image/height: an integer representing the height, * image/width: an integer representing the width. Every field is actually a singleton list of the corresponding type. Raises: ValueError: if images is an empty list.
def create_back_links(env): if env.needs_workflow['backlink_creation']: return needs = env.needs_all_needs for key, need in needs.items(): for link in need["links"]: link_main = link.split('.')[0] try: link_part = link.split('.')[1] except IndexError: link_part = None if link_main in needs: if key not in needs[link_main]["links_back"]: needs[link_main]["links_back"].append(key) if link_part is not None: if link_part in needs[link_main]['parts']: if 'links_back' not in needs[link_main]['parts'][link_part].keys(): needs[link_main]['parts'][link_part]['links_back'] = [] needs[link_main]['parts'][link_part]['links_back'].append(key) env.needs_workflow['backlink_creation'] = True
Create back-links in all found needs. But do this only once, as all needs are already collected and this sorting is for all needs and not only for the ones of the current document. :param env: sphinx enviroment :return: None
async def eval(self, text, opts=None, user=None): if user is None: user = self.auth.getUserByName('root') await self.boss.promote('storm', user=user, info={'query': text}) async with await self.snap(user=user) as snap: async for node in snap.eval(text, opts=opts, user=user): yield node
Evaluate a storm query and yield Nodes only.
def send_message(self, message): if not self.live(): raise IOError("Connection is not live.") message.add_flag(BEGIN_END_FLAG) self.write(message.buffer)
Sends a message to this connection. :param message: (Message), message to be sent to this connection.
def _ParseLogonApplications(self, parser_mediator, registry_key): for application in self._LOGON_APPLICATIONS: command_value = registry_key.GetValueByName(application) if not command_value: continue values_dict = { 'Application': application, 'Command': command_value.GetDataAsObject(), 'Trigger': 'Logon'} event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event_data.source_append = ': Winlogon' event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses the registered logon applications. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
def set_orders(self, object_pks): objects_to_sort = self.filter(pk__in=object_pks) max_value = self.model.objects.all().aggregate( models.Max('sort_order') )['sort_order__max'] orders = list(objects_to_sort.values_list('sort_order', flat=True)) if len(orders) != len(object_pks): pks = set(objects_to_sort.values_list('pk', flat=True)) message = 'The following object_pks are not in this queryset: {}'.format( [pk for pk in object_pks if pk not in pks] ) raise TypeError(message) with transaction.atomic(): objects_to_sort.update(sort_order=models.F('sort_order') + max_value) for pk, order in zip(object_pks, orders): self.filter(pk=pk).update(sort_order=order) return objects_to_sort
Perform a mass update of sort_orders across the full queryset. Accepts a list, object_pks, of the intended order for the objects. Works as follows: - Compile a list of all sort orders in the queryset. Leave out anything that isn't in the object_pks list - this deals with pagination and any inconsistencies. - Get the maximum among all model object sort orders. Update the queryset to add it to all the existing sort order values. This lifts them 'out of the way' of unique_together clashes when setting the intended sort orders. - Set the sort order on each object. Use only sort_order values that the objects had before calling this method, so they get rearranged in place. Performs O(n) queries.
def is_boolean(node): return any([ isinstance(node, ast.Name) and node.id in ('True', 'False'), hasattr(ast, 'NameConstant') and isinstance(node, getattr(ast, 'NameConstant')) and str(node.value) in ('True', 'False') ])
Checks if node is True or False
def download_files(files): for (url, file) in files: print("Downloading %s as %s" % (url, file)) data = download_url(url) if data is None: continue try: open(file, mode='wb').write(data) except Exception as e: print("Failed to save to %s : %s" % (file, e))
download an array of files
def sky_fraction(self): pix_id = self._best_res_pixels() nb_pix_filled = pix_id.size return nb_pix_filled / float(3 << (2*(self.max_order + 1)))
Sky fraction covered by the MOC
def get_jwt(self, request): try: authorization = request.authorization except ValueError: return None if authorization is None: return None authtype, token = authorization if authtype.lower() != self.auth_header_prefix.lower(): return None return token
Extract the JWT token from the authorisation header of the request. Returns the JWT token or None, if the token cannot be extracted. :param request: request object. :type request: :class:`morepath.Request`
def load_yaml(data=None, path=None, name='NT'): if data and not path: return mapper(yaml.load(data), _nt_name=name) if path and not data: with open(path, 'r') as f: data = yaml.load(f) return mapper(data, _nt_name=name) if data and path: raise ValueError('expected one source and received two')
Map namedtuples with yaml data.
def is_url(value, **kwargs): try: value = validators.url(value, **kwargs) except SyntaxError as error: raise error except Exception: return False return True
Indicate whether ``value`` is a URL. .. note:: URL validation is...complicated. The methodology that we have adopted here is *generally* compliant with `RFC 1738 <https://tools.ietf.org/html/rfc1738>`_, `RFC 6761 <https://tools.ietf.org/html/rfc6761>`_, `RFC 2181 <https://tools.ietf.org/html/rfc2181>`_ and uses a combination of string parsing and regular expressions, This approach ensures more complete coverage for unusual edge cases, while still letting us use regular expressions that perform quickly. :param value: The value to evaluate. :param allow_special_ips: If ``True``, will succeed when validating special IP addresses, such as loopback IPs like ``127.0.0.1`` or ``0.0.0.0``. If ``False``, will fail if ``value`` is a special IP address. Defaults to ``False``. :type allow_special_ips: :class:`bool <python:bool>` :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator
def gammalnStirling(z): return (0.5 * (np.log(2. * np.pi) - np.log(z))) \ + (z * (np.log(z + (1. / ((12. * z) - (1. / (10. * z))))) - 1.))
Uses Stirling's approximation for the log-gamma function suitable for large arguments.
async def send_script(self, conn_id, data): adapter_id = self._get_property(conn_id, 'adapter') return await self.adapters[adapter_id].send_script(conn_id, data)
Send a script to a device. See :meth:`AbstractDeviceAdapter.send_script`.
def term_to_binary(term, compressed=False): data_uncompressed = _term_to_binary(term) if compressed is False: return b_chr(_TAG_VERSION) + data_uncompressed else: if compressed is True: compressed = 6 if compressed < 0 or compressed > 9: raise InputException('compressed in [0..9]') data_compressed = zlib.compress(data_uncompressed, compressed) size_uncompressed = len(data_uncompressed) if size_uncompressed > 4294967295: raise OutputException('uint32 overflow') return ( b_chr(_TAG_VERSION) + b_chr(_TAG_COMPRESSED_ZLIB) + struct.pack(b'>I', size_uncompressed) + data_compressed )
Encode Python types into Erlang terms in binary data
def start(self): if self._response is not None: raise RuntimeError("command execution already started") request = aioxmpp.IQ( type_=aioxmpp.IQType.SET, to=self._peer_jid, payload=adhoc_xso.Command(self._command_name), ) self._response = yield from self._stream.send_iq_and_wait_for_reply( request, ) return self._response.first_payload
Initiate the session by starting to execute the command with the peer. :return: The :attr:`~.xso.Command.first_payload` of the response This sends an empty command IQ request with the :attr:`~.ActionType.EXECUTE` action. The :attr:`status`, :attr:`response` and related attributes get updated with the newly received values.
def _call_one_middleware(self, middleware): args = {} for arg in middleware['args']: if hasattr(self, arg): args[arg] = reduce(getattr, arg.split('.'), self) self.logger.debug('calling middleware event {}' .format(middleware['name'])) middleware['call'](**args)
Evaluate arguments and execute the middleware function
def load_pickle(file, encoding=None): if encoding: with open(file, 'rb') as f: return pickle.load(f, encoding=encoding) with open(file, 'rb') as f: return pickle.load(f)
Load a pickle file. Args: file (str): Path to pickle file Returns: object: Loaded object from pickle file
def positions_to_contigs(positions): if isinstance(positions, np.ndarray): flattened_positions = positions.flatten() else: try: flattened_positions = np.array( [pos for contig in positions for pos in contig]) except TypeError: flattened_positions = np.array(positions) if (np.diff(positions) == 0).any() and not (0 in set(positions)): warnings.warn("I detected identical consecutive nonzero values.") return positions n = len(flattened_positions) contigs = np.ones(n) counter = 0 for i in range(1, n): if positions[i] == 0: counter += 1 contigs[i] += counter else: contigs[i] = contigs[i - 1] return contigs
Flattens and converts a positions array to a contigs array, if applicable.
def remove_widget(self, widget): self._grid_widgets = dict((key, val) for (key, val) in self._grid_widgets.items() if val[-1] != widget) self._need_solver_recreate = True
Remove a widget from this grid Parameters ---------- widget : Widget The Widget to remove
def plot_noncontiguous(ax, data, ind, color='black', label='', offset=0, linewidth=0.5, linestyle='-'): def slice_with_nans(ind, data, offset): import copy import numpy ind_nan = numpy.zeros(len(data)) ind_nan[:] = numpy.nan ind_nan[ind-offset] = copy.deepcopy(ind) data_nan = copy.deepcopy(data) data_nan[numpy.isnan(ind_nan)] = numpy.nan return ind_nan, data_nan x, y = slice_with_nans(ind, data, offset) ax.plot(x, y, color=color, linewidth=linewidth, linestyle=linestyle, label=label) return ax
Plot non-contiguous slice of data Args ---- data: ndarray The data with non continguous regions to plot ind: ndarray indices of data to be plotted color: matplotlib color Color of plotted line label: str Name to be shown in legend offset: int The number of index positions to reset start of data to zero linewidth: float The width of the plotted line linstyle: str The char representation of the plotting style for the line Returns ------- ax: pyplot.ax Axes object with line glyph added for non-contiguous regions
def next_content(self, start, amount=1): while start < len(self.code) and self.code[start] in (' ', '\t', '\n'): start += 1 return self.code[start: start + amount]
Returns the next non-whitespace characters
def change_token(self, id): schema = UserSchema(exclude=('password', 'password_confirm')) resp = self.service.post(self.base+str(id)+'/token/') return self.service.decode(schema, resp)
Change a user's token. :param id: User ID as an int. :return: :class:`users.User <users.User>` object :rtype: users.User
def transpose(vari): if isinstance(vari, Poly): core = vari.A.copy() for key in vari.keys: core[key] = transpose(core[key]) return Poly(core, vari.dim, vari.shape[::-1], vari.dtype) return numpy.transpose(vari)
Transpose a shapeable quantety. Args: vari (chaospy.poly.base.Poly, numpy.ndarray): Quantety of interest. Returns: (chaospy.poly.base.Poly, numpy.ndarray): Same type as ``vari``. Examples: >>> P = chaospy.reshape(chaospy.prange(4), (2,2)) >>> print(P) [[1, q0], [q0^2, q0^3]] >>> print(chaospy.transpose(P)) [[1, q0^2], [q0, q0^3]]
def has_privs(self, user, lowest_mode='o'): if isinstance(user, User): user = user.nick user_prefixes = self.prefixes.get(user, None) if not user_prefixes: return False mode_dict = self.s.features.available['prefix'] caught = False for mode, prefix in mode_dict.items(): if mode in lowest_mode and not caught: caught = True elif mode not in lowest_mode and not caught: continue if prefix in user_prefixes: return True return False
Return True if user has the given mode or higher.
def wrap_query_in_nested_if_field_is_nested(query, field, nested_fields): for element in nested_fields: match_pattern = r'^{}.'.format(element) if re.match(match_pattern, field): return generate_nested_query(element, query) return query
Helper for wrapping a query into a nested if the fields within the query are nested Args: query : The query to be wrapped. field : The field that is being queried. nested_fields : List of fields which are nested. Returns: (dict): The nested query
def get(self, index): return SyncListItemContext( self._version, service_sid=self._solution['service_sid'], list_sid=self._solution['list_sid'], index=index, )
Constructs a SyncListItemContext :param index: The index :returns: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemContext :rtype: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemContext
def convert_sequence_to_motor_units(cycles, unit_converter): cv_cycles = copy.deepcopy(cycles) for cycle in cv_cycles: for move in cycle['moves']: move['A'] = unit_converter.to_motor_velocity_acceleration( \ move['A']) move['AD'] = \ unit_converter.to_motor_velocity_acceleration( \ move['AD']) move['V'] = unit_converter.to_motor_velocity_acceleration( \ move['V']) move['D'] = int(unit_converter.to_motor_distance(move['D'])) return cv_cycles
Converts a move sequence to motor units. Converts a move sequence to motor units using the provied converter. Parameters ---------- cycles : iterable of dicts The iterable of cycles of motion to do one after another. See ``compile_sequence`` for format. unit_converter : UnitConverter, optional ``GeminiMotorDrive.utilities.UnitConverter`` to use to convert the units in `cycles` to motor units. Returns ------- motor_cycles : list of dicts A deep copy of `cycles` with all units converted to motor units. See Also -------- compile_sequence GeminiMotorDrive.utilities.UnitConverter
def process_remote_sources(raw_config, environment=None): config = yaml.safe_load(raw_config) if config and config.get('package_sources'): processor = SourceProcessor( sources=config['package_sources'], stacker_cache_dir=config.get('stacker_cache_dir') ) processor.get_package_sources() if processor.configs_to_merge: for i in processor.configs_to_merge: logger.debug("Merging in remote config \"%s\"", i) remote_config = yaml.safe_load(open(i)) config = merge_map(remote_config, config) if not environment: environment = {} return render(str(config), environment) return raw_config
Stage remote package sources and merge in remote configs. Args: raw_config (str): the raw stacker configuration string. environment (dict, optional): any environment values that should be passed to the config Returns: str: the raw stacker configuration string
def iter_documents(self, fileids=None, categories=None, _destroy=False): doc_ids = self._filter_ids(fileids, categories) for doc in imap(self.get_document, doc_ids): yield doc if _destroy: doc.destroy()
Return an iterator over corpus documents.
def access_id(self, id_, lineno, scope=None, default_type=None, default_class=CLASS.unknown): if isinstance(default_type, symbols.BASICTYPE): default_type = symbols.TYPEREF(default_type, lineno, implicit=False) assert default_type is None or isinstance(default_type, symbols.TYPEREF) if not check_is_declared_explicit(lineno, id_): return None result = self.get_entry(id_, scope) if result is None: if default_type is None: default_type = symbols.TYPEREF(self.basic_types[global_.DEFAULT_IMPLICIT_TYPE], lineno, implicit=True) result = self.declare_variable(id_, lineno, default_type) result.declared = False result.class_ = default_class return result if default_type is not None and result.type_ == self.basic_types[TYPE.auto]: result.type_ = default_type warning_implicit_type(lineno, id_, default_type) return result
Access a symbol by its identifier and checks if it exists. If not, it's supposed to be an implicit declared variable. default_class is the class to use in case of an undeclared-implicit-accessed id
def set_custom_value(self, field_name, value): custom_field = self.get_custom_field(field_name) custom_value = CustomFieldValue.objects.get_or_create( field=custom_field, object_id=self.id)[0] custom_value.value = value custom_value.save()
Set a value for a specified custom field field_name - Name of the custom field you want. value - Value to set it to
async def status(request: web.Request) -> web.Response: connectivity = {'status': 'none', 'interfaces': {}} try: connectivity['status'] = await nmcli.is_connected() connectivity['interfaces'] = { i.value: await nmcli.iface_info(i) for i in nmcli.NETWORK_IFACES } log.debug("Connectivity: {}".format(connectivity['status'])) log.debug("Interfaces: {}".format(connectivity['interfaces'])) status = 200 except subprocess.CalledProcessError as e: log.error("CalledProcessError: {}".format(e.stdout)) status = 500 except FileNotFoundError as e: log.error("FileNotFoundError: {}".format(e)) status = 500 return web.json_response(connectivity, status=status)
Get request will return the status of the machine's connection to the internet as well as the status of its network interfaces. The body of the response is a json dict containing 'status': internet connectivity status, where the options are: "none" - no connection to router or network "portal" - device behind a captive portal and cannot reach full internet "limited" - connection to router but not internet "full" - connection to router and internet "unknown" - an exception occured while trying to determine status 'interfaces': JSON object of networking interfaces, keyed by device name, where the value of each entry is another object with the keys: - 'type': "ethernet" or "wifi" - 'state': state string, e.g. "disconnected", "connecting", "connected" - 'ipAddress': the ip address, if it exists (null otherwise); this also contains the subnet mask in CIDR notation, e.g. 10.2.12.120/16 - 'macAddress': the MAC address of the interface device - 'gatewayAddress': the address of the current gateway, if it exists (null otherwise) Example request: ``` GET /networking/status ``` Example response: ``` 200 OK { "status": "full", "interfaces": { "wlan0": { "ipAddress": "192.168.43.97/24", "macAddress": "B8:27:EB:6C:95:CF", "gatewayAddress": "192.168.43.161", "state": "connected", "type": "wifi" }, "eth0": { "ipAddress": "169.254.229.173/16", "macAddress": "B8:27:EB:39:C0:9A", "gatewayAddress": null, "state": "connected", "type": "ethernet" } } } ```
def run(self, bundle, container_id=None, log_path=None, pid_file=None, log_format="kubernetes"): return self._run(bundle, container_id=container_id, log_path=log_path, pid_file=pid_file, command="run", log_format=log_format)
run is a wrapper to create, start, attach, and delete a container. Equivalent command line example: singularity oci run -b ~/bundle mycontainer Parameters ========== bundle: the full path to the bundle folder container_id: an optional container_id. If not provided, use same container_id used to generate OciImage instance log_path: the path to store the log. pid_file: specify the pid file path to use log_format: defaults to kubernetes. Can also be "basic" or "json"
def period_end_day(self, value=None): if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `period_end_day`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `period_end_day`') self._period_end_day = value
Corresponds to IDD Field `period_end_day` Args: value (str): value for IDD Field `period_end_day` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def mk_dropdown_tree(cls, model, for_node=None): options = [(0, _('-- root --'))] for node in model.get_root_nodes(): cls.add_subtree(for_node, node, options) return options
Creates a tree-like list of choices
def is_iso8601(instance: str): if not isinstance(instance, str): return True return ISO8601.match(instance) is not None
Validates ISO8601 format
def get_context_data(self, **kwargs): self.set_return_page('viewregistrations',_('View Registrations'),event_id=self.object.id) context = { 'event': self.object, 'registrations': EventRegistration.objects.filter( event=self.object, cancelled=False ).order_by('registration__customer__user__first_name', 'registration__customer__user__last_name'), } context.update(kwargs) return super(EventRegistrationSummaryView, self).get_context_data(**context)
Add the list of registrations for the given series
def extract_audioclip_samples(d) -> dict: ret = {} if not d.data: return {} try: from fsb5 import FSB5 except ImportError as e: raise RuntimeError("python-fsb5 is required to extract AudioClip") af = FSB5(d.data) for i, sample in enumerate(af.samples): if i > 0: filename = "%s-%i.%s" % (d.name, i, af.get_sample_extension()) else: filename = "%s.%s" % (d.name, af.get_sample_extension()) try: sample = af.rebuild_sample(sample) except ValueError as e: print("WARNING: Could not extract %r (%s)" % (d, e)) continue ret[filename] = sample return ret
Extract all the sample data from an AudioClip and convert it from FSB5 if needed.
def join(self, inner_iterable, outer_key_selector=identity, inner_key_selector=identity, result_selector=lambda outer, inner: (outer, inner)): if self.closed(): raise ValueError("Attempt to call join() on a closed Queryable.") if not is_iterable(inner_iterable): raise TypeError("Cannot compute join() with inner_iterable of " "non-iterable {0}".format(str(type(inner_iterable))[7: -1])) if not is_callable(outer_key_selector): raise TypeError("join() parameter outer_key_selector={0} is not " "callable".format(repr(outer_key_selector))) if not is_callable(inner_key_selector): raise TypeError("join() parameter inner_key_selector={0} is not " "callable".format(repr(inner_key_selector))) if not is_callable(result_selector): raise TypeError("join() parameter result_selector={0} is not " "callable".format(repr(result_selector))) return self._create(self._generate_join_result(inner_iterable, outer_key_selector, inner_key_selector, result_selector))
Perform an inner join with a second sequence using selected keys. The order of elements from outer is maintained. For each of these the order of elements from inner is also preserved. Note: This method uses deferred execution. Args: inner_iterable: The sequence to join with the outer sequence. outer_key_selector: An optional unary function to extract keys from elements of the outer (source) sequence. The first positional argument of the function should accept outer elements and the result value should be the key. If omitted, the identity function is used. inner_key_selector: An optional unary function to extract keys from elements of the inner_iterable. The first positional argument of the function should accept outer elements and the result value should be the key. If omitted, the identity function is used. result_selector: An optional binary function to create a result element from two matching elements of the outer and inner. If omitted the result elements will be a 2-tuple pair of the matching outer and inner elements. Returns: A Queryable whose elements are the result of performing an inner- join on two sequences. Raises: ValueError: If the Queryable has been closed. TypeError: If the inner_iterable is not in fact iterable. TypeError: If the outer_key_selector is not callable. TypeError: If the inner_key_selector is not callable. TypeError: If the result_selector is not callable.
def _fallback_to_available_output(self): if len(self.active_comb) == 1: self._choose_what_to_display(force_refresh=True) self._apply() self.py3.update()
Fallback to the first available output when the active layout was composed of only one output. This allows us to avoid cases where you get stuck with a black sreen on your laptop by switching back to the integrated screen automatically !
def check_ok_button(self): login = self.login.text() password = self.password.text() url = self.url.text() if self.layers.count() >= 1 and login and password and url: self.ok_button.setEnabled(True) else: self.ok_button.setEnabled(False)
Helper to enable or not the OK button.
def init(project_name): dst_path = os.path.join(os.getcwd(), project_name) start_init_info(dst_path) _mkdir_p(dst_path) os.chdir(dst_path) init_code('manage.py', _manage_basic_code) init_code('requirement.txt', _requirement_code) app_path = os.path.join(dst_path, 'app') _mkdir_p(app_path) os.chdir(app_path) init_code('views.py', _views_basic_code) init_code('forms.py', _forms_basic_code) init_code('__init__.py', _init_basic_code) create_templates_static_files(app_path) init_done_info()
build a minimal flask project
def genome_mutation(candidate): size = len(candidate) prob = random.random() if prob > .5: p = random.randint(0, size-1) q = random.randint(0, size-1) if p > q: p, q = q, p q += 1 s = candidate[p:q] x = candidate[:p] + s[::-1] + candidate[q:] return creator.Individual(x), else: p = random.randint(0, size-1) q = random.randint(0, size-1) cq = candidate.pop(q) candidate.insert(p, cq) return candidate,
Return the mutants created by inversion mutation on the candidates. This function performs inversion or insertion. It randomly chooses two locations along the candidate and reverses the values within that slice. Insertion is done by popping one item and insert it back at random position.
def where(cond, a, b, use_numexpr=True): if use_numexpr: return _where(cond, a, b) return _where_standard(cond, a, b)
evaluate the where condition cond on a and b Parameters ---------- cond : a boolean array a : return if cond is True b : return if cond is False use_numexpr : whether to try to use numexpr (default True)
def get_offset(self): resp = requests.head(self.url, headers=self.headers) offset = resp.headers.get('upload-offset') if offset is None: msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code) raise TusCommunicationError(msg, resp.status_code, resp.content) return int(offset)
Return offset from tus server. This is different from the instance attribute 'offset' because this makes an http request to the tus server to retrieve the offset.
def to_bytes(value): if isinstance(value, text_type): return value.encode('utf-8') elif isinstance(value, ffi.CData): return ffi.string(value) elif isinstance(value, binary_type): return value else: raise ValueError('Value must be text, bytes, or char[]')
Converts bytes, unicode, and C char arrays to bytes. Unicode strings are encoded to UTF-8.
def disconnect(self, event, cb): try: self._callbacks[event].remove(cb) except KeyError: raise ValueError("{!r} is not a valid cursor event".format(event)) except ValueError: raise ValueError("Callback {} is not registered".format(event))
Disconnect a previously connected callback. If a callback is connected multiple times, only one connection is removed.
def scipy_sparse_to_spmatrix(A): coo = A.tocoo() SP = spmatrix(coo.data.tolist(), coo.row.tolist(), coo.col.tolist(), size=A.shape) return SP
Efficient conversion from scipy sparse matrix to cvxopt sparse matrix
def userInvitations(self): self.__init() items = [] for n in self._userInvitations: if "id" in n: url = "%s/%s" % (self.root, n['id']) items.append(self.Invitation(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=True)) return items
gets all user invitations
def palettize(self, colormap): if self.mode not in ("L", "LA"): raise ValueError("Image should be grayscale to colorize") l_data = self.data.sel(bands=['L']) def _palettize(data): return colormap.palettize(data)[0] new_data = l_data.data.map_blocks(_palettize, dtype=l_data.dtype) self.palette = tuple(colormap.colors) if self.mode == "L": mode = "P" else: mode = "PA" new_data = da.concatenate([new_data, self.data.sel(bands=['A'])], axis=0) self.data.data = new_data self.data.coords['bands'] = list(mode)
Palettize the current image using `colormap`. .. note:: Works only on "L" or "LA" images.
def verify_token(self, token): try: result = self.resolver.get_token(token) except Exception as ex: raise EauthAuthenticationError( "Token validation failed with {0}.".format(repr(ex))) return result
If token is valid Then returns user name associated with token Else False.
def applyFracdet(self, lon, lat): self.loadFracdet() fracdet_core = meanFracdet(self.m_fracdet, lon, lat, np.tile(0.1, len(lon))) fracdet_wide = meanFracdet(self.m_fracdet, lon, lat, np.tile(0.5, len(lon))) return (fracdet_core >= self.config[self.algorithm]['fracdet_core_threshold']) \ & (fracdet_core >= self.config[self.algorithm]['fracdet_core_threshold'])
We want to enforce minimum fracdet for a satellite to be considered detectable True is passes fracdet cut
def main(): argv = sys.argv if len(argv) < 2: targetfile = 'target.y' else: targetfile = argv[1] print 'Parsing ruleset: ' + targetfile, flex_a = Flexparser() mma = flex_a.yyparse(targetfile) print 'OK' print 'Perform minimization on initial automaton:', mma.minimize() print 'OK' print 'Perform Brzozowski on minimal automaton:', brzozowski_a = Brzozowski(mma) mma_regex = brzozowski_a.get_regex() print mma_regex
Testing function for DFA brzozowski algebraic method Operation
def read_bytes(self, count): if self.pos + count > self.remaining_length: return NC.ERR_PROTOCOL, None ba = bytearray(count) for x in xrange(0, count): ba[x] = self.payload[self.pos] self.pos += 1 return NC.ERR_SUCCESS, ba
Read count number of bytes.
def comp_pipe_handle(loc, tokens): internal_assert(len(tokens) >= 3 and len(tokens) % 2 == 1, "invalid composition pipe tokens", tokens) funcs = [tokens[0]] stars = [] direction = None for i in range(1, len(tokens), 2): op, fn = tokens[i], tokens[i + 1] new_direction, star = comp_pipe_info(op) if direction is None: direction = new_direction elif new_direction != direction: raise CoconutDeferredSyntaxError("cannot mix function composition pipe operators with different directions", loc) funcs.append(fn) stars.append(star) if direction == "backwards": funcs.reverse() stars.reverse() func = funcs.pop(0) funcstars = zip(funcs, stars) return "_coconut_base_compose(" + func + ", " + ", ".join( "(%s, %s)" % (f, star) for f, star in funcstars ) + ")"
Process pipe function composition.
def str2float(text): try: return float(re.sub(r"\(.+\)*", "", text)) except TypeError: if isinstance(text, list) and len(text) == 1: return float(re.sub(r"\(.+\)*", "", text[0])) except ValueError as ex: if text.strip() == ".": return 0 raise ex
Remove uncertainty brackets from strings and return the float.
def sanitize_filepath(file_path, replacement_text="", platform=None, max_len=None): return FilePathSanitizer(platform=platform, max_len=max_len).sanitize( file_path, replacement_text )
Make a valid file path from a string. Replace invalid characters for a file path within the ``file_path`` with the ``replacement_text``. Invalid characters are as followings: |invalid_file_path_chars|, |invalid_win_file_path_chars| (and non printable characters). Args: file_path (str or PathLike object): File path to sanitize. replacement_text (str, optional): Replacement text for invalid characters. Defaults to ``""``. platform (str, optional): .. include:: platform.txt max_len (int, optional): The upper limit of the ``file_path`` length. Truncate the name if the ``file_path`` length exceedd this value. If the value is |None|, the default value automatically determined by the execution platform: - ``Linux``: 4096 - ``macOS``: 1024 - ``Windows``: 260 Returns: Same type as the argument (str or PathLike object): Sanitized filepath. Raises: ValueError: If the ``file_path`` is an invalid file path. Example: :ref:`example-sanitize-file-path`
def contains_pts(self, pts): obj1, obj2 = self.objects arg1 = obj2.contains_pts(pts) arg2 = np.logical_not(obj1.contains_pts(pts)) return np.logical_and(arg1, arg2)
Containment test on arrays.
def generate_docker_file(py_ver: PyVer): with open(os.path.join(script_templates_root, 'Dockerfile')) as fh: return fh.read().format(py_ver=py_ver, author=author_file)
Templated docker files
def dequeue(ctx): tweet =ctx.obj['TWEETLIST'].peek() if tweet is None: click.echo("Nothing to dequeue.") ctx.exit(1) if ctx.obj['DRYRUN']: click.echo(tweet) else: tweet = ctx.obj['TWEETLIST'].pop() ctx.obj['TWEEPY_API'].update_status(tweet)
Sends a tweet from the queue
def _run_hooked_methods(self, hook: str): for method in self._potentially_hooked_methods: for callback_specs in method._hooked: if callback_specs['hook'] != hook: continue when = callback_specs.get('when') if when: if self._check_callback_conditions(callback_specs): method() else: method()
Iterate through decorated methods to find those that should be triggered by the current hook. If conditions exist, check them before running otherwise go ahead and run.
def csrf(request): def _get_val(): token = get_token(request) if token is None: return 'NOTPROVIDED' else: token = force_bytes(token, encoding='latin-1') key = force_bytes( get_random_string(len(token)), encoding='latin-1' ) value = b64_encode(xor(token, key)) return force_text(b'$'.join((key, value)), encoding='latin-1') _get_val = lazy(_get_val, text_type) return {'csrf_token': _get_val()}
Context processor that provides a CSRF token, or the string 'NOTPROVIDED' if it has not been provided by either a view decorator or the middleware
def _format_capability_report(self, data): if self.log_output: return else: pin_modes = {0: 'Digital_Input', 1: 'Digital_Output', 2: 'Analog', 3: 'PWM', 4: 'Servo', 5: 'Shift', 6: 'I2C', 7: 'One Wire', 8: 'Stepper', 9: 'Encoder'} x = 0 pin = 0 print('\nCapability Report') print('-----------------\n') while x < len(data): print('{} {}{}'.format('Pin', str(pin), ':')) while data[x] != 127: mode_str = "" pin_mode = pin_modes.get(data[x]) mode_str += str(pin_mode) x += 1 bits = data[x] print('{:>5}{}{} {}'.format(' ', mode_str, ':', bits)) x += 1 x += 1 pin += 1
This is a private utility method. This method formats a capability report if the user wishes to send it to the console. If log_output = True, no output is generated :param data: Capability report :returns: None
def _add_column(self, type, name, **parameters): parameters.update({ 'type': type, 'name': name }) column = Fluent(**parameters) self._columns.append(column) return column
Add a new column to the blueprint. :param type: The column type :type type: str :param name: The column name :type name: str :param parameters: The column parameters :type parameters: dict :rtype: Fluent
def rowxcol(row, col): row = row.reshape(4, 4) col = col.reshape(4, 8) ret = uint2exprs(0, 8) for i in range(4): for j in range(4): if row[i, j]: ret ^= xtime(col[i], j) return ret
Multiply one row and one column.
def to_native(key): item = find(whatever=key) if not item: raise NonExistentLanguageError('Language does not exist.') return item[u'native']
Find the native name for the language specified by key. >>> to_native('br') u'brezhoneg' >>> to_native('sw') u'Kiswahili'
def save_form(self, request, form, change): obj = form.save(commit=False) if not obj.id and "in_sitemap" not in form.fields: obj.in_sitemap = False return super(LinkAdmin, self).save_form(request, form, change)
Don't show links in the sitemap.
def state_destruction(self, model, prop_name, info): import rafcon.gui.singleton as gui_singletons states_editor_ctrl = gui_singletons.main_window_controller.get_controller('states_editor_ctrl') state_identifier = states_editor_ctrl.get_state_identifier(self.model) states_editor_ctrl.close_page(state_identifier, delete=True)
Close state editor when state is being destructed
def remove_source(self, source): geocode_service = self._get_service_by_name(source[0]) self._sources.remove(geocode_service(**source[1]))
Remove a geocoding service from this instance.
def grid_linspace(bounds, count): bounds = np.asanyarray(bounds, dtype=np.float64) if len(bounds) != 2: raise ValueError('bounds must be (2, dimension!') count = np.asanyarray(count, dtype=np.int) if count.shape == (): count = np.tile(count, bounds.shape[1]) grid_elements = [np.linspace(*b, num=c) for b, c in zip(bounds.T, count)] grid = np.vstack(np.meshgrid(*grid_elements) ).reshape(bounds.shape[1], -1).T return grid
Return a grid spaced inside a bounding box with edges spaced using np.linspace. Parameters --------- bounds: (2,dimension) list of [[min x, min y, etc], [max x, max y, etc]] count: int, or (dimension,) int, number of samples per side Returns ------- grid: (n, dimension) float, points in the specified bounds
def find_elements_by_name(self, name, update=False) -> Elements: return self.find_elements(by=By.NAME, value=name, update=update)
Finds multiple elements by name. Args: name: The name of the elements to be found. update: If the interface has changed, this option should be True. Returns: A list with elements if any was found. An empty list if not. Raises: NoSuchElementException - If the element wasn't found. Usage: elements = driver.find_elements_by_name('foo')
def _toggle_monitoring(self, action, no_ssh=False): payload = { 'action': action, 'name': self.name, 'no_ssh': no_ssh, 'public_ips': self.info['public_ips'], 'dns_name': self.info['extra'].get('dns_name', 'n/a') } data = json.dumps(payload) req = self.request(self.mist_client.uri+"/clouds/"+self.cloud.id+"/machines/"+self.id+"/monitoring", data=data) req.post()
Enable or disable monitoring on a machine :param action: Can be either "enable" or "disable"
def penUp(self): if self._penDown==True: self._penDown = False self._addPolylineToElements()
Raises the pen. Any movement will not draw lines till pen is lowered again.
def get_config(cli_args=None, config_path=None): config = Config(app_name="MYAPP", cli_args=cli_args, config_path=config_path) config_dict = config.get_config() return config_dict
Perform standard setup - get the merged config :param cli_args dict: A dictionary of CLI arguments :param config_path string: Path to the config file to load :return dict: A dictionary of config values drawn from different sources
def get_thread_block_dimensions(params, block_size_names=None): if not block_size_names: block_size_names = default_block_size_names block_size_x = params.get(block_size_names[0], 256) block_size_y = params.get(block_size_names[1], 1) block_size_z = params.get(block_size_names[2], 1) return (int(block_size_x), int(block_size_y), int(block_size_z))
thread block size from tuning params, currently using convention
def summarize_taxa(biom): tamtcounts = defaultdict(int) tot_seqs = 0.0 for row, col, amt in biom['data']: tot_seqs += amt rtax = biom['rows'][row]['metadata']['taxonomy'] for i, t in enumerate(rtax): t = t.strip() if i == len(rtax)-1 and len(t) > 3 and len(rtax[-1]) > 3: t = 's__'+rtax[i-1].strip().split('_')[-1]+'_'+t.split('_')[-1] tamtcounts[t] += amt lvlData = {lvl: levelData(tamtcounts, tot_seqs, lvl) for lvl in ['k', 'p', 'c', 'o', 'f', 'g', 's']} return tot_seqs, lvlData
Given an abundance table, group the counts by every taxonomic level.
def _CaptureRequestLogId(self): if callable(request_log_id_collector): request_log_id = request_log_id_collector() if request_log_id: self.breakpoint['labels'][ labels.Breakpoint.REQUEST_LOG_ID] = request_log_id
Captures the request log id if possible. The request log id is stored inside the breakpoint labels.
def filePath(self, index): return self._fs_model_source.filePath( self._fs_model_proxy.mapToSource(index))
Gets the file path of the item at the specified ``index``. :param index: item index - QModelIndex :return: str
def extend(self, content, zorder): if zorder not in self._content: self._content[zorder] = [] self._content[zorder].extend(content)
Extends with a list and a z-order
def _get_client(self): client_kwargs = self._storage_parameters.get('client', dict()) if self._unsecure: client_kwargs = client_kwargs.copy() client_kwargs['use_ssl'] = False return self._get_session().client("s3", **client_kwargs)
S3 Boto3 client Returns: boto3.session.Session.client: client
def from_file(cls, f, fname=None, readers=None): if isinstance(f, six.string_types): f = io.open(f, 'rb') if not fname and hasattr(f, 'name'): fname = f.name return cls.from_string(f.read(), fname=fname, readers=readers)
Create a Document from a file. Usage:: with open('paper.html', 'rb') as f: doc = Document.from_file(f) .. note:: Always open files in binary mode by using the 'rb' parameter. :param file|string f: A file-like object or path to a file. :param string fname: (Optional) The filename. Used to help determine file format. :param list[chemdataextractor.reader.base.BaseReader] readers: (Optional) List of readers to use.
def _ensure_authed(self, ptype, message): if ( not self.server_mode or ptype <= HIGHEST_USERAUTH_MESSAGE_ID or self.is_authenticated() ): return None reply = Message() if ptype == MSG_GLOBAL_REQUEST: reply.add_byte(cMSG_REQUEST_FAILURE) elif ptype == MSG_CHANNEL_OPEN: kind = message.get_text() chanid = message.get_int() reply.add_byte(cMSG_CHANNEL_OPEN_FAILURE) reply.add_int(chanid) reply.add_int(OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED) reply.add_string("") reply.add_string("en") return reply
Checks message type against current auth state. If server mode, and auth has not succeeded, and the message is of a post-auth type (channel open or global request) an appropriate error response Message is crafted and returned to caller for sending. Otherwise (client mode, authed, or pre-auth message) returns None.
def revoke_access_token(self): if not self._access_token: return self.execute_post('authentication/revoke', json=dict( token_type_hint='access_token', token=self._access_token )) self._access_token = None
Revoke the access token currently in use.
def get_crc(msg): register = 0xFFFF for byte_ in msg: try: val = struct.unpack('<B', byte_)[0] except TypeError: val = byte_ register = \ (register >> 8) ^ look_up_table[(register ^ val) & 0xFF] return struct.pack('<H', register)
Return CRC of 2 byte for message. >>> assert get_crc(b'\x02\x07') == struct.unpack('<H', b'\x41\x12') :param msg: A byte array. :return: Byte array of 2 bytes.
def sort(self, order="asc"): self.__prepare() if isinstance(self._json_data, list): if order == "asc": self._json_data = sorted(self._json_data) else: self._json_data = sorted(self._json_data, reverse=True) return self
Getting the sorted result of the given list :@param order: "asc" :@type order: string :@return self