code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _set_dict_translations(instance, dict_translations): if not hasattr(instance._meta, "translatable_fields"): return False if site_is_monolingual(): return False translatable_fields = instance._meta.translatable_fields for field in translatable_fields: for lang in settings.LANGUAGES: lang = lang[0] if lang != settings.LANGUAGE_CODE: trans_field = trans_attr(field,lang) if dict_translations.has_key(trans_field): setattr(instance,trans_field,dict_translations[trans_field]) trans_isfuzzy = trans_is_fuzzy_attr(field,lang) if dict_translations.has_key(trans_isfuzzy): is_fuzzy_value = (dict_translations[trans_isfuzzy]=="1") or (dict_translations[trans_isfuzzy]==1) setattr(instance,trans_isfuzzy, is_fuzzy_value)
Establece los atributos de traducciones a partir de una dict que contiene todas las traducciones.
def escape_string(string): result = string result = result.replace('\\', '\\\\') result = result.replace('"', '\\"') return '"' + result + '"'
Escape a string for use in Gerrit commands. :arg str string: The string to escape. :returns: The string with necessary escapes and surrounding double quotes so that it can be passed to any of the Gerrit commands that require double-quoted strings.
def _request_process_json_standard(self, response_data): data = response_data.get('data', {}).get(self.request_entity, []) status = response_data.get('status', 'Failure') return data, status
Handle JSON response This should be the most common response from the ThreatConnect API. Return: (string): The response data (string): The response status
def any_contains_any(strings, candidates): for string in strings: for c in candidates: if c in string: return True
Whether any of the strings contains any of the candidates.
def media_type_str(mediatype): if mediatype == const.MEDIA_TYPE_UNKNOWN: return 'Unknown' if mediatype == const.MEDIA_TYPE_VIDEO: return 'Video' if mediatype == const.MEDIA_TYPE_MUSIC: return 'Music' if mediatype == const.MEDIA_TYPE_TV: return 'TV' return 'Unsupported'
Convert internal API media type to string.
def _set_comment(self, section, comment, key=None): if '\n' in comment: comment = '\n comment = ' if key: self._comments[(section, key)] = comment else: self._comments[section] = comment
Set a comment for section or key :param str section: Section to add comment to :param str comment: Comment to add :param str key: Key to add comment to
def get_canonical_headers(headers): if headers is None: headers = [] elif isinstance(headers, dict): headers = list(headers.items()) if not headers: return [], [] normalized = collections.defaultdict(list) for key, val in headers: key = key.lower().strip() val = MULTIPLE_SPACES.sub(" ", val.strip()) normalized[key].append(val) ordered_headers = sorted((key, ",".join(val)) for key, val in normalized.items()) canonical_headers = ["{}:{}".format(*item) for item in ordered_headers] return canonical_headers, ordered_headers
Canonicalize headers for signing. See: https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers :type headers: Union[dict|List(Tuple(str,str))] :param headers: (Optional) Additional HTTP headers to be included as part of the signed URLs. See: https://cloud.google.com/storage/docs/xml-api/reference-headers Requests using the signed URL *must* pass the specified header (name and value) with each request for the URL. :rtype: str :returns: List of headers, normalized / sortted per the URL refernced above.
def write_bytes(out_data, encoding="ascii"): if sys.version_info[0] >= 3: if isinstance(out_data, type("")): if encoding == "utf-8": return out_data.encode("utf-8") else: return out_data.encode("ascii", "ignore") elif isinstance(out_data, type(b"")): return out_data else: if isinstance(out_data, type("")): if encoding == "utf-8": return out_data.encode("utf-8") else: return out_data.encode("ascii", "ignore") elif isinstance(out_data, type(str(""))): return out_data msg = "Invalid value for out_data neither unicode nor byte string: {}".format( out_data ) raise ValueError(msg)
Write Python2 and Python3 compatible byte stream.
def materialize(datasets): from .. import GMQLDataset if isinstance(datasets, dict): result = dict() for output_path in datasets.keys(): dataset = datasets[output_path] if not isinstance(dataset, GMQLDataset.GMQLDataset): raise TypeError("The values of the dictionary must be GMQLDataset." " {} was given".format(type(dataset))) gframe = dataset.materialize(output_path) result[output_path] = gframe elif isinstance(datasets, list): result = [] for dataset in datasets: if not isinstance(dataset, GMQLDataset.GMQLDataset): raise TypeError("The values of the list must be GMQLDataset." " {} was given".format(type(dataset))) gframe = dataset.materialize() result.append(gframe) else: raise TypeError("The input must be a dictionary of a list. " "{} was given".format(type(datasets))) return result
Multiple materializations. Enables the user to specify a set of GMQLDataset to be materialized. The engine will perform all the materializations at the same time, if an output path is provided, while will perform each operation separately if the output_path is not specified. :param datasets: it can be a list of GMQLDataset or a dictionary {'output_path' : GMQLDataset} :return: a list of GDataframe or a dictionary {'output_path' : GDataframe}
def _has_method(arg, method): return hasattr(arg, method) and callable(getattr(arg, method))
Returns true if the given object has a method with the given name. :param arg: the object :param method: the method name :type method: string :rtype: bool
def covariance(self, param_1, param_2): param_1_number = self.model.params.index(param_1) param_2_number = self.model.params.index(param_2) return self.covariance_matrix[param_1_number, param_2_number]
Return the covariance between param_1 and param_2. :param param_1: ``Parameter`` Instance. :param param_2: ``Parameter`` Instance. :return: Covariance of the two params.
def non_existing_path(path_, dpath=None, offset=0, suffix=None, force_fmt=False): r import utool as ut from os.path import basename, dirname if dpath is None: dpath = dirname(path_) base_fmtstr = basename(path_) if suffix is not None: base_fmtstr = ut.augpath(base_fmtstr, suffix) if '%' not in base_fmtstr: if not force_fmt: first_choice = join(dpath, base_fmtstr) if not exists(first_choice): return first_choice base_fmtstr = ut.augpath(base_fmtstr, '%d') dname_list = ut.glob(dpath, pattern='*', recursive=False, with_files=True, with_dirs=True) conflict_set = set(basename(dname) for dname in dname_list) newname = ut.get_nonconflicting_string(base_fmtstr, conflict_set, offset=offset) newpath = join(dpath, newname) return newpath
r""" Searches for and finds a path garuenteed to not exist. Args: path_ (str): path string. If may include a "%" formatstr. dpath (str): directory path(default = None) offset (int): (default = 0) suffix (None): (default = None) Returns: str: path_ - path string CommandLine: python -m utool.util_path non_existing_path Example: >>> # ENABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> import utool as ut >>> base = ut.ensure_app_resource_dir('utool', 'tmp') >>> ut.touch(base + '/tmp.txt') >>> ut.touch(base + '/tmp0.txt') >>> ut.delete(base + '/tmp1.txt') >>> path_ = base + '/tmp.txt' >>> newpath = ut.non_existing_path(path_) >>> assert basename(newpath) == 'tmp1.txt' Example: >>> # ENABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> import utool as ut >>> base = ut.ensure_app_resource_dir('utool', 'tmp') >>> ut.ensurepath(base + '/dir_old') >>> ut.ensurepath(base + '/dir_old0') >>> ut.ensurepath(base + '/dir_old1') >>> ut.delete(base + '/dir_old2') >>> path_ = base + '/dir' >>> suffix = '_old' >>> newpath = ut.non_existing_path(path_, suffix=suffix) >>> ut.assert_eq(basename(newpath), 'dir_old2')
def libvlc_media_list_player_event_manager(p_mlp): f = _Cfunctions.get('libvlc_media_list_player_event_manager', None) or \ _Cfunction('libvlc_media_list_player_event_manager', ((1,),), class_result(EventManager), ctypes.c_void_p, MediaListPlayer) return f(p_mlp)
Return the event manager of this media_list_player. @param p_mlp: media list player instance. @return: the event manager.
def _expand_place_ids(self, terms): place_vids = [] first_type = None for result in self.backend.identifier_index.search(terms): if not first_type: first_type = result.type if result.type != first_type: continue place_vids.append(result.vid) if place_vids: all_set = set(itertools.chain.from_iterable(iallval(GVid.parse(x)) for x in place_vids)) place_vids += list(str(x) for x in all_set) return place_vids else: return terms
Lookups all of the place identifiers to get gvids Args: terms (str or unicode): terms to lookup Returns: str or list: given terms if no identifiers found, otherwise list of identifiers.
def extract_arguments(args, defaults): out_dict = convert_option_dict_to_dict(defaults) for key in defaults.keys(): mapped_val = args.get(key, None) if mapped_val is None: pass else: out_dict[key] = mapped_val return out_dict
Extract a set of arguments from a large dictionary Parameters ---------- args : dict Dictionary with the arguments values to use defaults : dict Dictionary with all the argument to extract, and default values for each Returns ------- out_dict : dict A dictionary with only the extracted arguments
def restore_image_options(cli, image, options): dockerfile = io.StringIO() dockerfile.write(u'FROM {image}\nCMD {cmd}'.format( image=image, cmd=json.dumps(options['cmd']))) if options['entrypoint']: dockerfile.write( '\nENTRYPOINT {}'.format(json.dumps(options['entrypoint']))) cli.build(tag=image, fileobj=dockerfile)
Restores CMD and ENTRYPOINT values of the image This is needed because we force the overwrite of ENTRYPOINT and CMD in the `run_code_in_container` function, to be able to run the code in the container, through /bin/bash.
def fetch_defense_data(self): if self.defenses_data_initialized: return logging.info('Fetching defense data from datastore') self.submissions.init_from_datastore() self.dataset_batches.init_from_datastore() self.adv_batches.init_from_datastore() self.read_dataset_metadata() self.defenses_data_initialized = True
Lazy initialization of data necessary to execute defenses.
def _pad(self, text): top_bottom = ("\n" * self._padding) + " " right_left = " " * self._padding * self.PAD_WIDTH return top_bottom + right_left + text + right_left + top_bottom
Pad the text.
def rule_match(component, cmd): if component == cmd: return True expanded = rule_expand(component, cmd) if cmd in expanded: return True return False
see if one rule component matches
def generic_path_not_found(*args): exception_tuple = LambdaErrorResponses.PathNotFoundException return BaseLocalService.service_response( LambdaErrorResponses._construct_error_response_body( LambdaErrorResponses.LOCAL_SERVICE_ERROR, "PathNotFoundException"), LambdaErrorResponses._construct_headers(exception_tuple[0]), exception_tuple[1] )
Creates a Lambda Service Generic PathNotFound Response Parameters ---------- args list List of arguments Flask passes to the method Returns ------- Flask.Response A response object representing the GenericPathNotFound Error
def build(self): if len(self.dutinformation) > 0 and (self.dutinformation.get(0).build is not None): return self.dutinformation.get(0).build.name return None
get build name. :return: build name. None if not found
def epoch_cb(self): metrics = {} metrics['elapsed'] = self.elapsed() now = datetime.datetime.now() metrics['epoch_time'] = now - self.last_epoch_time self.append_metrics(metrics, 'epoch') self.last_epoch_time = now
Callback function after each epoch. Now it records each epoch time and append it to epoch dataframe.
def get_js(self): js_file = os.path.join(self.theme_dir, 'js', 'slides.js') if not os.path.exists(js_file): js_file = os.path.join(THEMES_DIR, 'default', 'js', 'slides.js') if not os.path.exists(js_file): raise IOError(u"Cannot find slides.js in default theme") with codecs.open(js_file, encoding=self.encoding) as js_file_obj: return { 'path_url': utils.get_path_url(js_file, self.relative), 'contents': js_file_obj.read(), }
Fetches and returns javascript file path or contents, depending if we want a standalone presentation or not.
def _get_index_class_name(self, index_cls): cls_name = index_cls.__name__ aliases = self.Meta.index_aliases return aliases.get(cls_name, cls_name.split('.')[-1])
Converts in index model class to a name suitable for use as a field name prefix. A user may optionally specify custom aliases via an 'index_aliases' attribute on the Meta class
def _urlnorm(cls, uri): (scheme, authority, path, query, fragment) = parse_uri(uri) if not scheme or not authority: raise Exception("Only absolute URIs are allowed. uri = %s" % uri) scheme = scheme.lower() authority = authority.lower() if not path: path = "/" request_uri = query and "?".join([path, query]) or path defrag_uri = scheme + "://" + authority + request_uri return defrag_uri
Normalize the URL to create a safe key for the cache
def get_options(self): query = 'SET' return dict(row[:2] for row in self.con.fetchall(query))
Return current query options for the Impala session
def cyclic(self): "Returns True if the options cycle, otherwise False" return any(isinstance(val, Cycle) for val in self.kwargs.values())
Returns True if the options cycle, otherwise False
def set(self, name, msg) : "fills in the error name and message." dbus.dbus_set_error(self._dbobj, name.encode(), b"%s", msg.encode())
fills in the error name and message.
def logo_element(): path = os.path.join(resources_path(), 'img', 'logos', 'inasafe-logo.png') url = urllib.parse.urljoin('file:', urllib.request.pathname2url(path)) return url
Create a sanitised local url to the logo for insertion into html. :returns: A sanitised local url to the logo prefixed with file://. :rtype: str ..note:: We are not using QUrl here because on Windows 10 it returns an empty path if using QUrl.toLocalPath
def download_dataset(self, dataset_name, local_path, how="stream"): if not os.path.isdir(local_path): os.makedirs(local_path) else: raise ValueError("Path {} already exists!".format(local_path)) local_path = os.path.join(local_path, FILES_FOLDER) os.makedirs(local_path) if how == 'zip': return self.download_as_zip(dataset_name, local_path) elif how == 'stream': return self.download_as_stream(dataset_name, local_path) else: raise ValueError("how must be {'zip', 'stream'}")
It downloads from the repository the specified dataset and puts it in the specified local folder :param dataset_name: the name the dataset has in the repository :param local_path: where you want to save the dataset :param how: 'zip' downloads the whole dataset as a zip file and decompress it; 'stream' downloads the dataset sample by sample :return: None
async def volume(dev: Device, volume, output): vol = None vol_controls = await dev.get_volume_information() if output is not None: click.echo("Using output: %s" % output) output_uri = (await dev.get_zone(output)).uri for v in vol_controls: if v.output == output_uri: vol = v break else: vol = vol_controls[0] if vol is None: err("Unable to find volume controller: %s" % output) return if volume and volume == "mute": click.echo("Muting") await vol.set_mute(True) elif volume and volume == "unmute": click.echo("Unmuting") await vol.set_mute(False) elif volume: click.echo("Setting volume to %s" % volume) await vol.set_volume(volume) if output is not None: click.echo(vol) else: [click.echo(x) for x in vol_controls]
Get and set the volume settings. Passing 'mute' as new volume will mute the volume, 'unmute' removes it.
def query_band(self, value): self._query_band = value if value is None: try: del self._connectionXML.attrib['query-band-spec'] except KeyError: pass else: self._connectionXML.set('query-band-spec', value)
Set the connection's query_band property. Args: value: New query_band value. String. Returns: Nothing.
def signup(remote_app): if remote_app not in current_oauthclient.signup_handlers: return abort(404) res = current_oauthclient.signup_handlers[remote_app]['view']() return abort(404) if res is None else res
Extra signup step.
def CreateFromDOM(node, default_namespace=None): if default_namespace is None: default_namespace = Namespace.fallbackNamespace() return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
Create a Python instance from the given DOM node. The node tag must correspond to an element declaration in this module. @deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}.
def expect_column_values_to_be_in_set(self, column, value_set, mostly=None, parse_strings_as_datetimes=None, result_format=None, include_config=False, catch_exceptions=None, meta=None ): raise NotImplementedError
Expect each column value to be in a given set. For example: :: # my_df.my_col = [1,2,2,3,3,3] >>> my_df.expect_column_values_to_be_in_set( "my_col", [2,3] ) { "success": false "result": { "unexpected_count": 1 "unexpected_percent": 0.16666666666666666, "unexpected_percent_nonmissing": 0.16666666666666666, "partial_unexpected_list": [ 1 ], }, } expect_column_values_to_be_in_set is a :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>`. Args: column (str): \ The column name. value_set (set-like): \ A set of objects used for comparison. Keyword Args: mostly (None or a float between 0 and 1): \ Return `"success": True` if at least mostly percent of values match the expectation. \ For more detail, see :ref:`mostly`. parse_strings_as_datetimes (boolean or None) : If True values provided in value_set will be parsed as \ datetimes before making comparisons. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. \ For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \ For more detail, see :ref:`meta`. Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. See Also: expect_column_values_to_not_be_in_set
def add_line(self, line='', *, empty=False): max_page_size = self.max_size - self._prefix_len - 2 if len(line) > max_page_size: raise RuntimeError('Line exceeds maximum page size %s' % (max_page_size)) if self._count + len(line) + 1 > self.max_size: self.close_page() self._count += len(line) + 1 self._current_page.append(line) if empty: self._current_page.append('') self._count += 1
Adds a line to the current page. If the line exceeds the :attr:`max_size` then an exception is raised. Parameters ----------- line: :class:`str` The line to add. empty: :class:`bool` Indicates if another empty line should be added. Raises ------ RuntimeError The line was too big for the current :attr:`max_size`.
def render(data, saltenv='base', sls='', argline='', **kwargs): translate_newlines = kwargs.get('translate_newlines', False) return _decrypt_object(data, translate_newlines=translate_newlines)
Decrypt the data to be rendered that was encrypted using AWS KMS envelope encryption.
def toggle_rules(self, *args): if self.app.manager.current != 'rules' and not isinstance(self.app.selected_proxy, CharStatProxy): self.app.rules.entity = self.app.selected_proxy self.app.rules.rulebook = self.app.selected_proxy.rulebook if isinstance(self.app.selected_proxy, CharStatProxy): self.app.charrules.character = self.app.selected_proxy self.app.charrules.toggle() else: self.app.rules.toggle()
Display or hide the view for constructing rules out of cards.
def _summary_matching_gos(prt, pattern, matching_gos, all_gos): msg = 'Found {N} GO(s) out of {M} matching pattern("{P}")\n' num_gos = len(matching_gos) num_all = len(all_gos) prt.write(msg.format(N=num_gos, M=num_all, P=pattern))
Print summary for get_matching_gos.
def set_scan_parameters(self, scan_type=ScanType.ACTIVE, interval_ms=10, window_ms=10, address_type=BluetoothAddressType.RANDOM, filter_type=ScanFilter.ALL): interval_fractions = interval_ms / MS_FRACTION_DIVIDER if interval_fractions < 0x0004 or interval_fractions > 0x4000: raise ValueError( "Invalid interval given {}, must be in range of 2.5ms to 10240ms!".format( interval_fractions)) window_fractions = window_ms / MS_FRACTION_DIVIDER if window_fractions < 0x0004 or window_fractions > 0x4000: raise ValueError( "Invalid window given {}, must be in range of 2.5ms to 10240ms!".format( window_fractions)) interval_fractions, window_fractions = int(interval_fractions), int(window_fractions) scan_parameter_pkg = struct.pack( ">BHHBB", scan_type, interval_fractions, window_fractions, address_type, filter_type) self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS, scan_parameter_pkg)
sets the le scan parameters Args: scan_type: ScanType.(PASSIVE|ACTIVE) interval: ms (as float) between scans (valid range 2.5ms - 10240ms) ..note:: when interval and window are equal, the scan runs continuos window: ms (as float) scan duration (valid range 2.5ms - 10240ms) address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM) * PUBLIC = use device MAC address * RANDOM = generate a random MAC address and use that filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will return all fetched bluetooth packets (WHITELIST_ONLY is not supported, because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented) Raises: ValueError: A value had an unexpected format or was not in range
def _extend_data(self, datapoint, new_data): if new_data: try: self.data[datapoint].extend(new_data) except KeyError: self.data[datapoint] = new_data
extend or assign new data to datapoint
def apply_conditions(self, value): retval = value if self._cyclic: retval = apply_cyclic(value, self) retval = self._reflect(retval) if isinstance(retval, numpy.ndarray) and retval.size == 1: try: retval = retval[0] except IndexError: retval = float(retval) return retval
Applies any boundary conditions to the given value. The value is manipulated according based on the following conditions: * If `self.cyclic` is True then `value` is wrapped around to the minimum (maximum) bound if `value` is `>= self.max` (`< self.min`) bound. For example, if the minimum and maximum bounds are `0, 2*pi` and `value = 5*pi`, then the returned value will be `pi`. * If `self.min` is a reflected boundary then `value` will be reflected to the right if it is `< self.min`. For example, if `self.min = 10` and `value = 3`, then the returned value will be 17. * If `self.max` is a reflected boundary then `value` will be reflected to the left if it is `> self.max`. For example, if `self.max = 20` and `value = 27`, then the returned value will be 13. * If `self.min` and `self.max` are both reflected boundaries, then `value` will be reflected between the two boundaries until it falls within the bounds. The first reflection occurs off of the maximum boundary. For example, if `self.min = 10`, `self.max = 20`, and `value = 42`, the returned value will be 18 ( the first reflection yields -2, the second 22, and the last 18). * If neither bounds are reflected and cyclic is False, then the value is just returned as-is. Parameters ---------- value : float The value to apply the conditions to. Returns ------- float The value after the conditions are applied; see above for details.
def get_or_create(self, db_name): if not db_name in self: res = self.resource.put(db_name) if not res[0] == 201: raise RuntimeError( 'Failed to create database "{}"'.format(db_name) ) return self[db_name]
Creates the database named `db_name` if it doesn't already exist and return it
def clear(self): if hasattr(self.stdout, 'isatty') and self.stdout.isatty() or self.term_type == 'mintty': cmd, shell = { 'posix': ('clear', False), 'nt': ('cls', True), 'cygwin': (['echo', '-en', r'\ec'], False), 'mintty': (r'echo -en "\ec', False), }[self.term_type] subprocess.call(cmd, shell=shell, stdin=self.stdin, stdout=self.stdout, stderr=self.stderr)
Clear the terminal screen.
def getlist(self, event): try: componentlist = model_factory(Schema).find({}) data = [] for comp in componentlist: try: data.append({ 'name': comp.name, 'uuid': comp.uuid, 'class': comp.componentclass, 'active': comp.active }) except AttributeError: self.log('Bad component without component class encountered:', lvl=warn) self.log(comp.serializablefields(), pretty=True, lvl=warn) data = sorted(data, key=lambda x: x['name']) response = { 'component': 'hfos.ui.configurator', 'action': 'getlist', 'data': data } self.fireEvent(send(event.client.uuid, response)) return except Exception as e: self.log("List error: ", e, type(e), lvl=error, exc=True)
Processes configuration list requests :param event:
def prt_goids(self, prt): fmt = self.gosubdag.prt_attr['fmta'] nts = sorted(self.gosubdag.go2nt.values(), key=lambda nt: [nt.NS, nt.depth, nt.alt]) _get_color = self.pydotnodego.go2color.get for ntgo in nts: gostr = fmt.format(**ntgo._asdict()) col = _get_color(ntgo.GO, "") prt.write("{COLOR:7} {GO}\n".format(COLOR=col, GO=gostr))
Print all GO IDs in the plot, plus their color.
def _get_preferred_cipher_suite( cls, server_connectivity_info: ServerConnectivityInfo, ssl_version: OpenSslVersionEnum, accepted_cipher_list: List['AcceptedCipherSuite'] ) -> Optional['AcceptedCipherSuite']: if len(accepted_cipher_list) < 2: return None accepted_cipher_names = [cipher.openssl_name for cipher in accepted_cipher_list] should_use_legacy_openssl = None if ssl_version == OpenSslVersionEnum.TLSV1_2: should_use_legacy_openssl = True for cipher_name in accepted_cipher_names: modern_supported_cipher_count = 0 if not WorkaroundForTls12ForCipherSuites.requires_legacy_openssl(cipher_name): modern_supported_cipher_count += 1 if modern_supported_cipher_count > 1: should_use_legacy_openssl = False break first_cipher_str = ', '.join(accepted_cipher_names) second_cipher_str = ', '.join([accepted_cipher_names[1], accepted_cipher_names[0]] + accepted_cipher_names[2:]) try: first_cipher = cls._get_selected_cipher_suite( server_connectivity_info, ssl_version, first_cipher_str, should_use_legacy_openssl ) second_cipher = cls._get_selected_cipher_suite( server_connectivity_info, ssl_version, second_cipher_str, should_use_legacy_openssl ) except (SslHandshakeRejected, ConnectionError): return None if first_cipher.name == second_cipher.name: return first_cipher else: return None
Try to detect the server's preferred cipher suite among all cipher suites supported by SSLyze.
def add_ihex(self, records, overwrite=False): extended_segment_address = 0 extended_linear_address = 0 for record in StringIO(records): type_, address, size, data = unpack_ihex(record.strip()) if type_ == IHEX_DATA: address = (address + extended_segment_address + extended_linear_address) address *= self.word_size_bytes self._segments.add(_Segment(address, address + size, bytearray(data), self.word_size_bytes), overwrite) elif type_ == IHEX_END_OF_FILE: pass elif type_ == IHEX_EXTENDED_SEGMENT_ADDRESS: extended_segment_address = int(binascii.hexlify(data), 16) extended_segment_address *= 16 elif type_ == IHEX_EXTENDED_LINEAR_ADDRESS: extended_linear_address = int(binascii.hexlify(data), 16) extended_linear_address <<= 16 elif type_ in [IHEX_START_SEGMENT_ADDRESS, IHEX_START_LINEAR_ADDRESS]: self.execution_start_address = int(binascii.hexlify(data), 16) else: raise Error("expected type 1..5 in record {}, but got {}".format( record, type_))
Add given Intel HEX records string. Set `overwrite` to ``True`` to allow already added data to be overwritten.
def _url_params(size:str='>400*300', format:str='jpg') -> str: "Build Google Images Search Url params and return them as a string." _fmts = {'jpg':'ift:jpg','gif':'ift:gif','png':'ift:png','bmp':'ift:bmp', 'svg':'ift:svg','webp':'webp','ico':'ift:ico'} if size not in _img_sizes: raise RuntimeError(f ) if format not in _fmts: raise RuntimeError(f"Unexpected image file format: {format}. Use jpg, gif, png, bmp, svg, webp, or ico.") return "&tbs=" + _img_sizes[size] + "," + _fmts[format]
Build Google Images Search Url params and return them as a string.
def signature(self, name, file_name, file_type, file_content, owner=None, **kwargs): return Signature(self.tcex, name, file_name, file_type, file_content, owner=owner, **kwargs)
Create the Signature TI object. Args: owner: file_content: file_name: file_type: name: **kwargs: Return:
def get_value(self, class_name, attr, default_value=None, state='normal', base_name='View'): styles = self.get_dict_for_class(class_name, state, base_name) try: return styles[attr] except KeyError: return default_value
Get a single style attribute value for the given class.
def update(self, dt=None): dt = dt if (dt is not None and dt > 0) else self.dt tspan = [0, dt] res = self.sim.run(tspan=tspan, initials=self.state) self.state = res.species[-1] self.time += dt if self.time > self.stop_time: self.DONE = True print((self.time, self.state)) self.time_course.append((self.time.copy(), self.state.copy()))
Simulate the model for a given time interval. Parameters ---------- dt : Optional[float] The time step to simulate, if None, the default built-in time step is used.
def task_add_user(self, *args, **kwargs): if not self.cur_task: return dialog = UserAdderDialog(task=self.cur_task) dialog.exec_() users = dialog.users for user in users: userdata = djitemdata.UserItemData(user) treemodel.TreeItem(userdata, self.task_user_model.root)
Add users to the current task :returns: None :rtype: None :raises: None
def run_sphinx(): old_dir = here_directory() os.chdir(str(doc_directory())) doc_status = subprocess.check_call(['make', 'html'], shell=True) os.chdir(str(old_dir)) if doc_status is not 0: exit(Fore.RED + 'Something broke generating your documentation...')
Runs Sphinx via it's `make html` command
def add_text(self, tag, text, global_step=None): self._file_writer.add_summary(text_summary(tag, text), global_step) if tag not in self._text_tags: self._text_tags.append(tag) extension_dir = self.get_logdir() + '/plugins/tensorboard_text/' if not os.path.exists(extension_dir): os.makedirs(extension_dir) with open(extension_dir + 'tensors.json', 'w') as fp: json.dump(self._text_tags, fp)
Add text data to the event file. Parameters ---------- tag : str Name for the `text`. text : str Text to be saved to the event file. global_step : int Global step value to record.
def profile_update(request, template="accounts/account_profile_update.html", extra_context=None): profile_form = get_profile_form() form = profile_form(request.POST or None, request.FILES or None, instance=request.user) if request.method == "POST" and form.is_valid(): user = form.save() info(request, _("Profile updated")) try: return redirect("profile", username=user.username) except NoReverseMatch: return redirect("profile_update") context = {"form": form, "title": _("Update Profile")} context.update(extra_context or {}) return TemplateResponse(request, template, context)
Profile update form.
def _build_processor(cls, session: AppSession): web_processor = cls._build_web_processor(session) ftp_processor = cls._build_ftp_processor(session) delegate_processor = session.factory.new('Processor') delegate_processor.register('http', web_processor) delegate_processor.register('https', web_processor) delegate_processor.register('ftp', ftp_processor)
Create the Processor Returns: Processor: An instance of :class:`.processor.BaseProcessor`.
def analyze_async(output_dir, dataset, cloud=False, project_id=None): import google.datalab.utils as du with warnings.catch_warnings(): warnings.simplefilter("ignore") fn = lambda: _analyze(output_dir, dataset, cloud, project_id) return du.LambdaJob(fn, job_id=None)
Analyze data locally or in the cloud with BigQuery. Produce analysis used by training. This can take a while, even for small datasets. For small datasets, it may be faster to use local_analysis. Args: output_dir: The output directory to use. dataset: only CsvDataSet is supported currently. cloud: If False, runs analysis locally with Pandas. If Ture, runs analysis in the cloud with BigQuery. project_id: Uses BigQuery with this project id. Default is datalab's default project id. Returns: A google.datalab.utils.Job object that can be used to query state from or wait.
def create_router(self, context, router): if router: router_name = self._arista_router_name(router['id'], router['name']) hashed = hashlib.sha256(router_name.encode('utf-8')) rdm = str(int(hashed.hexdigest(), 16) % 65536) mlag_peer_failed = False for s in self._servers: try: self.create_router_on_eos(router_name, rdm, s) mlag_peer_failed = False except Exception: if self._mlag_configured and not mlag_peer_failed: mlag_peer_failed = True else: msg = (_('Failed to create router %s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError(msg=msg)
Creates a router on Arista Switch. Deals with multiple configurations - such as Router per VRF, a router in default VRF, Virtual Router in MLAG configurations
def ls(ctx, name, list_formatted): session = create_session(ctx.obj['AWS_PROFILE_NAME']) client = session.client('autoscaling') if name == "*": groups = client.describe_auto_scaling_groups() else: groups = client.describe_auto_scaling_groups( AutoScalingGroupNames=[ name, ] ) out = format_output(groups, list_formatted) click.echo('\n'.join(out))
List AutoScaling groups
def create_tc_pool(self, zone_name, owner_name, ttl, pool_info, rdata_info, backup_record): rrset = self._build_tc_rrset(backup_record, pool_info, rdata_info, ttl) return self.rest_api_connection.post("/v1/zones/" + zone_name + "/rrsets/A/" + owner_name, json.dumps(rrset))
Creates a new TC Pool. Arguments: zone_name -- The zone that contains the RRSet. The trailing dot is optional. owner_name -- The owner name for the RRSet. If no trailing dot is supplied, the owner_name is assumed to be relative (foo). If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.) ttl -- The updated TTL value for the RRSet. pool_info -- dict of information about the pool rdata_info -- dict of information about the records in the pool. The keys in the dict are the A and CNAME records that make up the pool. The values are the rdataInfo for each of the records backup_record -- dict of information about the backup (all-fail) records in the pool. There are two key/value in the dict: rdata - the A or CNAME for the backup record failoverDelay - the time to wait to fail over (optional, defaults to 0)
def exception_to_unicode(e, traceback=False): message = '%s: %s' % (e.__class__.__name__, to_unicode(e)) if traceback: from docido_sdk.toolbox import get_last_traceback traceback_only = get_last_traceback().split('\n')[:-2] message = '\n%s\n%s' % (to_unicode('\n'.join(traceback_only)), message) return message
Convert an `Exception` to an `unicode` object. In addition to `to_unicode`, this representation of the exception also contains the class name and optionally the traceback.
def from_settings(settings): connection_type = settings.get('RABBITMQ_CONNECTION_TYPE', RABBITMQ_CONNECTION_TYPE) queue_name = settings.get('RABBITMQ_QUEUE_NAME', RABBITMQ_QUEUE_NAME) connection_parameters = settings.get('RABBITMQ_CONNECTION_PARAMETERS', RABBITMQ_CONNECTION_PARAMETERS) connection = { 'blocking': pika.BlockingConnection, 'libev': pika.LibevConnection, 'select': pika.SelectConnection, 'tornado': pika.TornadoConnection, 'twisted': pika.TwistedConnection }[connection_type](pika.ConnectionParameters(**connection_parameters)) channel = connection.channel() channel.queue_declare(queue=queue_name, durable=True) return channel
Factory method that returns an instance of channel :param str connection_type: This field can be `blocking` `asyncore`, `libev`, `select`, `tornado`, or `twisted` See pika documentation for more details: TODO: put pika url regarding connection type Parameters is a dictionary that can include the following values: :param str host: Hostname or IP Address to connect to :param int port: TCP port to connect to :param str virtual_host: RabbitMQ virtual host to use :param pika.credentials.Credentials credentials: auth credentials :param int channel_max: Maximum number of channels to allow :param int frame_max: The maximum byte size for an AMQP frame :param int heartbeat_interval: How often to send heartbeats :param bool ssl: Enable SSL :param dict ssl_options: Arguments passed to ssl.wrap_socket as :param int connection_attempts: Maximum number of retry attempts :param int|float retry_delay: Time to wait in seconds, before the next :param int|float socket_timeout: Use for high latency networks :param str locale: Set the locale value :param bool backpressure_detection: Toggle backpressure detection :return: Channel object
def state(self, states=None): if states is None or not states: return self nodes = [] for node in self.nodes: if any(state.lower() == node.state.lower() for state in states): nodes.append(node) self.nodes = nodes return self
Filter by state. :param tags: States to filter. :type tags: ``list`` :return: A list of Node objects. :rtype: ``list`` of :class:`Node`
def equal(list1, list2): return [item1 == item2 for item1, item2 in broadcast_zip(list1, list2)]
takes flags returns indexes of True values
def _newline_tokenize(self, data): parts = data.split('\n') tokens = [] for num, part in enumerate(parts): if part: tokens.append((self.TOKEN_DATA, None, None, part)) if num < (len(parts) - 1): tokens.append((self.TOKEN_NEWLINE, None, None, '\n')) return tokens
Given a string that does not contain any tags, this function will return a list of NEWLINE and DATA tokens such that if you concatenate their data, you will have the original string.
def set_dwelling_current(self, settings): self._dwelling_current_settings['now'].update(settings) dwelling_axes_to_update = { axis: amps for axis, amps in self._dwelling_current_settings['now'].items() if self._active_axes.get(axis) is False if self.current[axis] != amps } if dwelling_axes_to_update: self._save_current(dwelling_axes_to_update, axes_active=False)
Sets the amperage of each motor for when it is dwelling. Values are initialized from the `robot_config.log_current` values, and can then be changed through this method by other parts of the API. For example, `Pipette` setting the dwelling-current of it's pipette, depending on what model pipette it is. settings Dict with axes as valies (e.g.: 'X', 'Y', 'Z', 'A', 'B', or 'C') and floating point number for current (generally between 0.1 and 2)
def create_delete_model(record): arn = f"arn:aws:s3:::{cloudwatch.filter_request_parameters('bucketName', record)}" LOG.debug(f'[-] Deleting Dynamodb Records. Hash Key: {arn}') data = { 'arn': arn, 'principalId': cloudwatch.get_principal(record), 'userIdentity': cloudwatch.get_user_identity(record), 'accountId': record['account'], 'eventTime': record['detail']['eventTime'], 'BucketName': cloudwatch.filter_request_parameters('bucketName', record), 'Region': cloudwatch.get_region(record), 'Tags': {}, 'configuration': {}, 'eventSource': record['detail']['eventSource'], 'version': VERSION } return CurrentS3Model(**data)
Create an S3 model from a record.
def get_total_ram(): with open('/proc/meminfo', 'r') as f: for line in f.readlines(): if line: key, value, unit = line.split() if key == 'MemTotal:': assert unit == 'kB', 'Unknown unit' return int(value) * 1024 raise NotImplementedError()
The total amount of system RAM in bytes. This is what is reported by the OS, and may be overcommitted when there are multiple containers hosted on the same machine.
def read_byte_data(self, i2c_addr, register, force=None): self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BYTE_DATA ) ioctl(self.fd, I2C_SMBUS, msg) return msg.data.contents.byte
Read a single byte from a designated register. :param i2c_addr: i2c address :type i2c_addr: int :param register: Register to read :type register: int :param force: :type force: Boolean :return: Read byte value :rtype: int
def update_nodes(nodes,**kwargs): user_id = kwargs.get('user_id') updated_nodes = [] for n in nodes: updated_node_i = update_node(n, flush=False, user_id=user_id) updated_nodes.append(updated_node_i) db.DBSession.flush() return updated_nodes
Update multiple nodes. If new attributes are present, they will be added to the node. The non-presence of attributes does not remove them. %TODO:merge this with the 'update_nodes' functionality in the 'update_netework' function, so we're not duplicating functionality. D.R.Y! returns: a list of updated nodes
def fit_transform(self, *args, **kwargs): self.fit(*args, **kwargs) return self.transform(*args, **kwargs)
Performs fit followed by transform. This method simply combines fit and transform. Args: args: positional arguments (can be anything) kwargs: keyword arguments (can be anything) Returns: dict: output
def _has_fr_route(self): if self._should_use_fr_error_handler(): return True if not request.url_rule: return False return self.owns_endpoint(request.url_rule.endpoint)
Encapsulating the rules for whether the request was to a Flask endpoint
def dirty_fields(self): dirty_fields = [] for field in self.all_fields: if field not in self._original: dirty_fields.append(field) elif self._original[field] != getattr(self, field): dirty_fields.append(field) return dirty_fields
Return an array of field names that are dirty Dirty means if a model was hydrated first from the store & then had field values changed they are now considered dirty. For new models all fields are considered dirty. :return: list
def get_known_topics(self, lang): return [topic['title'] for topic in self.user_data.language_data[lang]['skills'] if topic['learned']]
Return the topics learned by a user in a language.
def set_duration(self, duration): if self.get_duration_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_duration( duration, self.get_duration_metadata()): raise errors.InvalidArgument() map = dict() map['days'] = duration.days map['seconds'] = duration.seconds map['microseconds'] = duration.microseconds self._my_map['duration'] = map
Sets the assessment duration. arg: duration (osid.calendaring.Duration): assessment duration raise: InvalidArgument - ``duration`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
def authGenders(self, countsOnly = False, fractionsMode = False, _countsTuple = False): authDict = recordGenders(self) if _countsTuple or countsOnly or fractionsMode: rawList = list(authDict.values()) countsList = [] for k in ('Male','Female','Unknown'): countsList.append(rawList.count(k)) if fractionsMode: tot = sum(countsList) for i in range(3): countsList.append(countsList.pop(0) / tot) if _countsTuple: return tuple(countsList) else: return {'Male' : countsList[0], 'Female' : countsList[1], 'Unknown' : countsList[2]} else: return authDict
Creates a dict mapping `'Male'`, `'Female'` and `'Unknown'` to lists of the names of all the authors. # Parameters _countsOnly_ : `optional bool` > Default `False`, if `True` the counts (lengths of the lists) will be given instead of the lists of names _fractionsMode_ : `optional bool` > Default `False`, if `True` the fraction counts (lengths of the lists divided by the total number of authors) will be given instead of the lists of names. This supersedes _countsOnly_ # Returns `dict[str:str or int]` > The mapping of genders to author's names or counts
def translate_gitlab_exception(func): @functools.wraps(func) def _wrapper(*args, **kwargs): try: return func(*args, **kwargs) except gitlab.GitlabError as e: status_to_exception = { 401: UnauthorizedError, 404: NotFoundError, } exc_class = status_to_exception.get(e.response_code, GitClientError) raise exc_class(str(e), status_code=e.response_code) return _wrapper
Decorator to catch GitLab-specific exceptions and raise them as GitClientError exceptions.
def _get_color_from_config(config, option): if not config.has_option(COLOR_SECTION, option): return None else: return ast.literal_eval(config.get(COLOR_SECTION, option))
Helper method to uet an option from the COLOR_SECTION of the config. Returns None if the value is not present. If the value is present, it tries to parse the value as a raw string literal, allowing escape sequences in the egrc.
def append(self, clause): self.nv = max([abs(l) for l in clause] + [self.nv]) self.clauses.append(clause)
Add one more clause to CNF formula. This method additionally updates the number of variables, i.e. variable ``self.nv``, used in the formula. :param clause: a new clause to add. :type clause: list(int) .. code-block:: python >>> from pysat.formula import CNF >>> cnf = CNF(from_clauses=[[-1, 2], [3]]) >>> cnf.append([-3, 4]) >>> print cnf.clauses [[-1, 2], [3], [-3, 4]]
def update_unit(self, unit_id, unit_dict): return self._create_put_request(resource=UNITS, billomat_id=unit_id, send_data=unit_dict)
Updates an unit :param unit_id: the unit id :param unit_dict: dict :return: dict
def save_vip_request(self, vip_request): uri = 'api/v3/vip-request/' data = dict() data['vips'] = list() data['vips'].append(vip_request) return super(ApiVipRequest, self).post(uri, data)
Method to save vip request param vip_request: vip_request object
def hardware_info(self, mask=0xFFFFFFFF): buf = (ctypes.c_uint32 * 32)() res = self._dll.JLINKARM_GetHWInfo(mask, ctypes.byref(buf)) if res != 0: raise errors.JLinkException(res) return list(buf)
Returns a list of 32 integer values corresponding to the bitfields specifying the power consumption of the target. The values returned by this function only have significance if the J-Link is powering the target. The words, indexed, have the following significance: 0. If ``1``, target is powered via J-Link. 1. Overcurrent bitfield: 0: No overcurrent. 1: Overcurrent happened. 2ms @ 3000mA 2: Overcurrent happened. 10ms @ 1000mA 3: Overcurrent happened. 40ms @ 400mA 2. Power consumption of target (mA). 3. Peak of target power consumption (mA). 4. Peak of target power consumption during J-Link operation (mA). Args: self (JLink): the ``JLink`` instance mask (int): bit mask to decide which hardware information words are returned (defaults to all the words). Returns: List of bitfields specifying different states based on their index within the list and their value. Raises: JLinkException: on hardware error.
def unregister(self, id): result = self.rr.table(self.table).get(id).delete().run() if result != { 'deleted':1, 'errors':0,'inserted':0, 'replaced':0,'skipped':0,'unchanged':0}: self.logger.warn( 'unexpected result attempting to delete id=%s from ' 'rethinkdb services table: %s', id, result)
Remove the service with id `id` from the service registry.
def common_update_sys(self): try: sudo('apt-get update -y --fix-missing') except Exception as e: print(e) print(green('System package is up to date.')) print()
update system package
def _stamp_and_update_hook(method, dependencies, stampfile, func, *args, **kwargs): result = _stamp(stampfile, func, *args, **kwargs) method.update_stampfile_hook(dependencies) return result
Write stamp and call update_stampfile_hook on method.
def install(self, opener): _opener = opener if isinstance(opener, Opener) else opener() assert isinstance(_opener, Opener), "Opener instance required" assert _opener.protocols, "must list one or more protocols" for protocol in _opener.protocols: self._protocols[protocol] = _opener return opener
Install an opener. Arguments: opener (`Opener`): an `Opener` instance, or a callable that returns an opener instance. Note: May be used as a class decorator. For example:: registry = Registry() @registry.install class ArchiveOpener(Opener): protocols = ['zip', 'tar']
def install_from_pypi(context): tmp_dir = venv.create_venv() install_cmd = '%s/bin/pip install %s' % (tmp_dir, context.module_name) package_index = 'pypi' if context.pypi: install_cmd += '-i %s' % context.pypi package_index = context.pypi try: result = shell.dry_run(install_cmd, context.dry_run) if not context.dry_run and not result: log.error( 'Failed to install %s from %s', context.module_name, package_index ) else: log.info( 'Successfully installed %s from %s', context.module_name, package_index ) except Exception as e: error_msg = 'Error installing %s from %s' % (context.module_name, package_index) log.exception(error_msg) raise Exception(error_msg, e)
Attempts to install your package from pypi.
def run_git_shell(cls, cmd, cwd=None): p = Popen(cmd, shell=True, stdout=PIPE, cwd=cwd) output, _ = p.communicate() output = cls.decode_git_output(output) if p.returncode: if sys.version_info > (2, 6): raise CalledProcessError(returncode=p.returncode, cmd=cmd, output=output) else: raise CalledProcessError(returncode=p.returncode, cmd=cmd) return output
Runs git shell command, reads output and decodes it into unicode string. @param cmd: Command to be executed. @type cmd: str @type cwd: str @param cwd: Working directory. @rtype: str @return: Output of the command. @raise CalledProcessError: Raises exception if return code of the command is non-zero.
def write_sample(binary, payload, path, filename): if not os.path.exists(path): os.makedirs(path) sample = os.path.join(path, filename) if binary: with open(sample, "wb") as f: f.write(base64.b64decode(payload)) else: with open(sample, "w") as f: f.write(payload)
This function writes a sample on file system. Args: binary (bool): True if it's a binary file payload: payload of sample, in base64 if it's a binary path (string): path of file filename (string): name of file hash_ (string): file hash
def canonical_uri_path(self): result = getattr(self, "_canonical_uri_path", None) if result is None: result = self._canonical_uri_path = get_canonical_uri_path( self.uri_path) return result
The canonicalized URI path from the request.
def u2handlers(self): handlers = [] handlers.append(u2.ProxyHandler(self.proxy)) return handlers
Get a collection of urllib handlers. @return: A list of handlers to be installed in the opener. @rtype: [Handler,...]
def _calc_adu(self): res = super()._calc_adu() self.ensure_one() dafs_to_apply = self.env['ddmrp.adjustment'].search( self._daf_to_apply_domain()) if dafs_to_apply: daf = 1 values = dafs_to_apply.mapped('value') for val in values: daf *= val prev = self.adu self.adu *= daf _logger.debug( "DAF=%s applied to %s. ADU: %s -> %s" % (daf, self.name, prev, self.adu)) dafs_to_explode = self.env['ddmrp.adjustment'].search( self._daf_to_apply_domain(False)) for daf in dafs_to_explode: prev = self.adu increased_demand = prev * daf.value - prev self.explode_demand_to_components( daf, increased_demand, self.product_uom) return res
Apply DAFs if existing for the buffer.
def human_time(seconds): units = [ ('y', 60 * 60 * 24 * 7 * 52), ('w', 60 * 60 * 24 * 7), ('d', 60 * 60 * 24), ('h', 60 * 60), ('m', 60), ('s', 1), ] seconds = int(seconds) if seconds < 60: return ' {0:2d}s'.format(seconds) for i in range(len(units) - 1): unit1, limit1 = units[i] unit2, limit2 = units[i + 1] if seconds >= limit1: return '{0:2d}{1}{2:2d}{3}'.format( seconds // limit1, unit1, (seconds % limit1) // limit2, unit2) return ' ~inf'
Returns a human-friendly time string that is always exactly 6 characters long. Depending on the number of seconds given, can be one of:: 1w 3d 2d 4h 1h 5m 1m 4s 15s Will be in color if console coloring is turned on. Parameters ---------- seconds : int The number of seconds to represent Returns ------- time : str A human-friendly representation of the given number of seconds that is always exactly 6 characters.
def save_id_to_path_mapping(self): if not self.id_to_path_mapping: return with open(self.local_id_to_path_mapping_file, 'w') as f: writer = csv.writer(f) writer.writerow(['id', 'path']) for k, v in sorted(iteritems(self.id_to_path_mapping)): writer.writerow([k, v]) cmd = ['gsutil', 'cp', self.local_id_to_path_mapping_file, os.path.join(self.target_dir, 'id_to_path_mapping.csv')] if subprocess.call(cmd) != 0: logging.error('Can\'t copy id_to_path_mapping.csv to target directory')
Saves mapping from submission IDs to original filenames. This mapping is saved as CSV file into target directory.
def send_raw_email(self, raw_message, source=None, destinations=None): params = { 'RawMessage.Data': base64.b64encode(raw_message), } if source: params['Source'] = source if destinations: self._build_list_params(params, destinations, 'Destinations.member') return self._make_request('SendRawEmail', params)
Sends an email message, with header and content specified by the client. The SendRawEmail action is useful for sending multipart MIME emails, with attachments or inline content. The raw text of the message must comply with Internet email standards; otherwise, the message cannot be sent. :type source: string :param source: The sender's email address. Amazon's docs say: If you specify the Source parameter, then bounce notifications and complaints will be sent to this email address. This takes precedence over any Return-Path header that you might include in the raw text of the message. :type raw_message: string :param raw_message: The raw text of the message. The client is responsible for ensuring the following: - Message must contain a header and a body, separated by a blank line. - All required header fields must be present. - Each part of a multipart MIME message must be formatted properly. - MIME content types must be among those supported by Amazon SES. Refer to the Amazon SES Developer Guide for more details. - Content must be base64-encoded, if MIME requires it. :type destinations: list of strings or string :param destinations: A list of destinations for the message.
def resolve_alias(self, path_str): if path_str in self.aliases: return self.aliases[path_str] return path_str
Returns the actual command name. Uses the alias mapping.
def _bottom(self): _, top, _, height = self._extents return top + height
Index of row following last row of range
def decrypt(key, ciphertext, shift_function=shift_case_english): return [shift_function(key, symbol) for symbol in ciphertext]
Decrypt Shift enciphered ``ciphertext`` using ``key``. Examples: >>> ''.join(decrypt(3, "KHOOR")) HELLO >> decrypt(15, [0xcf, 0x9e, 0xaf, 0xe0], shift_bytes) [0xde, 0xad, 0xbe, 0xef] Args: key (int): The shift to use ciphertext (iterable): The symbols to decrypt shift_function (function (shift, symbol)): Shift function to apply to symbols in the ciphertext Returns: Decrypted ciphertext, list of plaintext symbols
def search_indicators_page(self, search_term=None, enclave_ids=None, from_time=None, to_time=None, indicator_types=None, tags=None, excluded_tags=None, page_size=None, page_number=None): body = { 'searchTerm': search_term } params = { 'enclaveIds': enclave_ids, 'from': from_time, 'to': to_time, 'entityTypes': indicator_types, 'tags': tags, 'excludedTags': excluded_tags, 'pageSize': page_size, 'pageNumber': page_number } resp = self._client.post("indicators/search", params=params, data=json.dumps(body)) return Page.from_dict(resp.json(), content_type=Indicator)
Search for indicators containing a search term. :param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must be at least 3 characters. :param list(str) enclave_ids: list of enclave ids used to restrict to indicators found in reports in specific enclaves (optional - by default reports from all of the user's enclaves are used) :param int from_time: start of time window in milliseconds since epoch (optional) :param int to_time: end of time window in milliseconds since epoch (optional) :param list(str) indicator_types: a list of indicator types to filter by (optional) :param list(str) tags: Name (or list of names) of tag(s) to filter indicators by. Only indicators containing ALL of these tags will be returned. (optional) :param list(str) excluded_tags: Indicators containing ANY of these tags will be excluded from the results. :param int page_number: the page number to get. :param int page_size: the size of the page to be returned. :return: a |Page| of |Indicator| objects.