code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def data_fetch(self, url, task): self.on_fetch('data', task) result = {} result['orig_url'] = url result['content'] = dataurl.decode(url) result['headers'] = {} result['status_code'] = 200 result['url'] = url result['cookies'] = {} result['time'] =...
A fake fetcher for dataurl
def _qnwcheb1(n, a, b): nodes = (b+a)/2 - (b-a)/2 * np.cos(np.pi/n * np.linspace(0.5, n-0.5, n)) t1 = np.arange(1, n+1) - 0.5 t2 = np.arange(0.0, n, 2) t3 = np.concatenate((np.array([1.0]), -2.0/(np.arange(1.0, n-1, 2)*np.arange(3.0, n+1, 2)))) weights = ((b-a)/n)*np.cos(np.p...
Compute univariate Guass-Checbychev quadrature nodes and weights Parameters ---------- n : int The number of nodes a : int The lower endpoint b : int The upper endpoint Returns ------- nodes : np.ndarray(dtype=float) An n element array of nodes no...
def pickle_dump(self): if self.has_chrooted: warnings.warn("Cannot pickle_dump since we have chrooted from %s" % self.has_chrooted) return -1 protocol = self.pickle_protocol with FileLock(self.pickle_file): with AtomicFile(self.pickle_file, mode="wb") as fh: ...
Save the status of the object in pickle format. Returns 0 if success
def rpc_get_refactor_options(self, filename, start, end=None): try: from elpy import refactor except: raise ImportError("Rope not installed, refactorings unavailable") ref = refactor.Refactor(self.project_root, filename) return ref.get_refactor_options(start, end)
Return a list of possible refactoring options. This list will be filtered depending on whether it's applicable at the point START and possibly the region between START and END.
def claim_new(self) -> Iterable[str]: new_subdir = self._paths['new'] cur_subdir = self._paths['cur'] for name in os.listdir(new_subdir): new_path = os.path.join(new_subdir, name) cur_path = os.path.join(cur_subdir, name) try: os.rename(new_pat...
Checks for messages in the ``new`` subdirectory, moving them to ``cur`` and returning their keys.
def declare_var(self, key, val): if val is not None: line = "export " + key + '=' + str(val) else: line = "unset " + key self._add(line)
Declare a env variable. If val is None the variable is unset.
def clear(self): self.__init__(size=self.size, alpha=self.alpha, clock=self.clock)
Clear the samples.
def check_for_period_error(data, period): period = int(period) data_len = len(data) if data_len < period: raise Exception("Error: data_len < period")
Check for Period Error. This method checks if the developer is trying to enter a period that is larger than the data set being entered. If that is the case an exception is raised with a custom message that informs the developer that their period is greater than the data set.
def _classification_fetch(self, skip_missing=None): skip_missing = skip_missing if skip_missing else self._kwargs["skip_missing"] new_classifications = [] for a in self._res_list: if a.__class__.__name__ == "Samples": c = a.primary_classification elif a.__...
Turns a list of objects associated with a classification result into a list of Classifications objects. Parameters ---------- skip_missing : `bool` If an analysis was not successful, exclude it, warn, and keep going Returns ------- None, but stores a...
def _init_idxs_strpat(self, usr_hdrs): strpat = self.strpat_hdrs.keys() self.idxs_strpat = [ Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in strpat]
List of indexes whose values will be strings.
def _pop(self, key, default=None): path = self.__path_of(key) value = None try: raw_value, _ = self.connection.retry(self.connection.get, path) value = self.encoding.decode(raw_value) except self.no_node_error: if default: return defaul...
If ``key`` is present in Zookeeper, removes it from Zookeeper and returns the value. If key is not in Zookeper and ``default`` argument is provided, ``default`` is returned. If ``default`` argument is not provided, ``KeyError`` is raised. :param key: Key to remove from Zookeeper ...
def remove_tmp_prefix_from_filename(filename): if not filename.startswith(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX): raise RuntimeError(ERROR_MESSAGES['filename_hasnt_tmp_prefix'] % {'filename': filename}) return filename[len(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):]
Remove tmp prefix from filename.
def _makeResponse(self, urllib2_response): resp = HTTPResponse() resp.body = urllib2_response.read(MAX_RESPONSE_KB * 1024) resp.final_url = urllib2_response.geturl() resp.headers = self._lowerCaseKeys( dict(list(urllib2_response.info().items()))) if hasattr(urllib2_re...
Construct an HTTPResponse from the the urllib response. Attempt to decode the response body from bytes to str if the necessary information is available.
def _get_regex_pattern(label): parts = _split_by_punctuation.split(label) for index, part in enumerate(parts): if index % 2 == 0: if not parts[index].isdigit() and len(parts[index]) > 1: parts[index] = _convert_word(parts[index]) else: if not parts[index +...
Return a regular expression of the label. This takes care of plural and different kinds of separators.
def _update_failure_type(self): note = JobNote.objects.filter(job=self.job).order_by('-created').first() if note: self.job.failure_classification_id = note.failure_classification.id else: self.job.failure_classification_id = FailureClassification.objects.get(name='not cla...
Updates the failure type of this Note's Job. Set the linked Job's failure type to that of the most recent JobNote or set to Not Classified if there are no JobNotes. This is called when JobNotes are created (via .save()) and deleted (via .delete()) and is used to resolved the FailureCla...
def extract_words(string): return re.findall(r'[%s]+[%s\.]*[%s]+' % (A, A, A), string, flags=FLAGS)
Extract all alphabetic syllabified forms from 'string'.
def set_level(logger, level): if isinstance(logger, str): logger = logging.getLogger(logger) original = logger.level logger.setLevel(level) try: yield finally: logger.setLevel(original)
Temporarily change log level of logger. Parameters ---------- logger : str or ~logging.Logger Logger name or logger whose log level to change. level : int Log level to set. Examples -------- >>> with set_level('sqlalchemy.engine', logging.INFO): ... pass # sqlalche...
def proc_check_guard(self, instance, sql): self.open_db_connections(instance, self.PROC_GUARD_DB_KEY) cursor = self.get_cursor(instance, self.PROC_GUARD_DB_KEY) should_run = False try: cursor.execute(sql, ()) result = cursor.fetchone() should_run = res...
check to see if the guard SQL returns a single column containing 0 or 1 We return true if 1, else False
def get(self): if len(self) < 1: return "wow" if self.index >= len(self): self.index = 0 step = random.randint(1, min(self.step, len(self))) res = self[0] self.index += step self.rotate(step) return res
Get one item and prepare to get an item with lower rank on the next call.
def dump(cls, filename, objects, properties, bools, encoding): if encoding is None: encoding = cls.encoding source = cls.dumps(objects, properties, bools) if PY2: source = unicode(source) with io.open(filename, 'w', encoding=encoding) as fd: fd.write(s...
Write serialized objects, properties, bools to file.
def column_vectors(self): a, b, c, d, e, f, _, _, _ = self return (a, d), (b, e), (c, f)
The values of the transform as three 2D column vectors
def get_pages(self): pages = [] page = [] for i, item in enumerate(self.get_rows): if i > 0 and i % self.objects_per_page == 0: pages.append(page) page = [] page.append(item) pages.append(page) return pages
returns pages with rows
def is_valid_int(value): if 0 <= value <= Parameter.MAX: return True if value == Parameter.UNKNOWN_VALUE: return True if value == Parameter.CURRENT_POSITION: return True return False
Test if value can be rendered out of int.
def parsehttpdate(string_): try: t = time.strptime(string_, "%a, %d %b %Y %H:%M:%S %Z") except ValueError: return None return datetime.datetime(*t[:6])
Parses an HTTP date into a datetime object. >>> parsehttpdate('Thu, 01 Jan 1970 01:01:01 GMT') datetime.datetime(1970, 1, 1, 1, 1, 1)
def _get_global_color_table(colors): global_color_table = b''.join(c[0] for c in colors.most_common()) full_table_size = 2**(1+int(get_color_table_size(len(colors)), 2)) repeats = 3 * (full_table_size - len(colors)) zeros = struct.pack('<{}x'.format(repeats)) return global_color_table + zeros
Return a color table sorted in descending order of count.
def store_initial_k2k_session(auth_url, request, scoped_auth_ref, unscoped_auth_ref): keystone_provider_id = request.session.get('keystone_provider_id', None) if keystone_provider_id: return None providers = getattr(scoped_auth_ref, 'service_providers', None) if pro...
Stores session variables if there are k2k service providers This stores variables related to Keystone2Keystone federation. This function gets skipped if there are no Keystone service providers. An unscoped token to the identity provider keystone gets stored so that it can be used to do federated login ...
def authenticate_with_email_and_pwd(user_email, user_password): if user_email is None or user_password is None: raise ValueError( 'Could not authenticate user. Missing username or password') upload_token = uploader.get_upload_token(user_email, user_password) if not upload_token: ...
Authenticate the user by passing the email and password. This function avoids prompting the command line for user credentials and is useful for calling tools programmatically
def dumpfile(self, fd): self.start() dump = DumpFile(fd) self.queue.append(dump)
Dump a file through a Spin instance.
def is_valid(self): validity = True for element in self._validity_map: if self._validity_map[element] is not VALID: validity = False return validity
Tests if ths form is in a valid state for submission. A form is valid if all required data has been supplied compliant with any constraints. return: (boolean) - false if there is a known error in this form, true otherwise raise: OperationFailed - attempt to perform val...
def from_record(self, record): kwargs = self.get_field_kwargs(record) return self.sequenced_item_class(**kwargs)
Constructs and returns a sequenced item object, from given ORM object.
def get_accelerometer_raw(self): raw = self._get_raw_data('accelValid', 'accel') if raw is not None: self._last_accel_raw = raw return deepcopy(self._last_accel_raw)
Accelerometer x y z raw data in Gs
def add_dockwidget(self, child): dockwidget, location = child.create_dockwidget() if CONF.get('main', 'vertical_dockwidget_titlebars'): dockwidget.setFeatures(dockwidget.features()| QDockWidget.DockWidgetVerticalTitleBar) self.addDockWidget(loc...
Add QDockWidget and toggleViewAction
def clear_recovery_range(working_dir): recovery_range_path = os.path.join(working_dir, '.recovery') if os.path.exists(recovery_range_path): os.unlink(recovery_range_path)
Clear out our recovery hint
def nearest_neighbour_delta_E( self ): delta_nn = self.final_site.nn_occupation() - self.initial_site.nn_occupation() - 1 return ( delta_nn * self.nearest_neighbour_energy )
Nearest-neighbour interaction contribution to the change in system energy if this jump were accepted. Args: None Returns: (Float): delta E (nearest-neighbour)
def update_dois(csv_source, write_file=True): _dois_arr = [] _dois_raw = [] with open(csv_source, "r") as f: reader = csv.reader(f) for row in reader: _dois_arr.append(row[0]) for _doi in _dois_arr: _dois_raw.append(_update_doi(_doi)) if write_file: new_fi...
Get DOI publication info for a batch of DOIs. This is LiPD-independent and only requires a CSV file with all DOIs listed in a single column. The output is LiPD-formatted publication data for each entry. :param str csv_source: Local path to CSV file :param bool write_file: Write output data to JSON file (Tr...
def has_previous_assessment_section(self, assessment_section_id): try: self.get_previous_assessment_section(assessment_section_id) except errors.IllegalState: return False else: return True
Tests if there is a previous assessment section in the assessment following the given assessment section ``Id``. arg: assessment_section_id (osid.id.Id): ``Id`` of the ``AssessmentSection`` return: (boolean) - ``true`` if there is a previous assessment section, ``fals...
def _maybe_null_out(result, axis, mask, min_count=1): if hasattr(axis, '__len__'): raise ValueError('min_count is not available for reduction ' 'with more than one dimensions.') if axis is not None and getattr(result, 'ndim', False): null_mask = (mask.shape[axis] - mask....
xarray version of pandas.core.nanops._maybe_null_out
def rss_create(channel, articles): channel = channel.copy() articles = list(articles) rss = ET.Element('rss') rss.set('version', '2.0') channel_node = ET.SubElement(rss, 'channel') element_from_dict(channel_node, channel, 'title') element_from_dict(channel_node, channel, 'link') element_...
Create RSS xml feed. :param channel: channel info [title, link, description, language] :type channel: dict(str, str) :param articles: list of articles, an article is a dictionary with some \ required fields [title, description, link] and any optional, which will \ result to `<dict_key>dict_value</d...
def zeta(x, context=None): return _apply_function_in_current_context( BigFloat, mpfr.mpfr_zeta, (BigFloat._implicit_convert(x),), context, )
Return the value of the Riemann zeta function on x.
def match_process(pid, name, cmdline, exe, cfg): if cfg['selfmon'] and pid == os.getpid(): return True for exe_re in cfg['exe']: if exe_re.search(exe): return True for name_re in cfg['name']: if name_re.search(name): return True for cmdline_re in cfg['cmdl...
Decides whether a process matches with a given process descriptor :param pid: process pid :param exe: process executable :param name: process name :param cmdline: process cmdline :param cfg: the dictionary from processes that describes with the process group we're testing for :return: T...
def _createtoken(self, type_, value, flags=None): pos = None assert len(self._positions) >= 2, (type_, value) p2 = self._positions.pop() p1 = self._positions.pop() pos = [p1, p2] return token(type_, value, pos, flags)
create a token with position information
def create_reward_encoder(): last_reward = tf.Variable(0, name="last_reward", trainable=False, dtype=tf.float32) new_reward = tf.placeholder(shape=[], dtype=tf.float32, name='new_reward') update_reward = tf.assign(last_reward, new_reward) return last_reward, new_reward, update_reward
Creates TF ops to track and increment recent average cumulative reward.
def hsl_to_rgb(self, h, s, l): h = h % 1.0 s = min(max(s, 0.0), 1.0) l = min(max(l, 0.0), 1.0) if l <= 0.5: m2 = l*(s + 1.0) else: m2 = l + s - l*s m1 = l*2.0 - m2 r = self._hue_to_rgb(m1, m2, h + 1.0/3.0) g = self._hue_to_rgb(m1, m...
Convert a color from HSL color-model to RGB. See also: - http://www.w3.org/TR/css3-color/#hsl-color
def get(self, timeout=None, block=True): _vv and IOLOG.debug('%r.get(timeout=%r, block=%r)', self, timeout, block) self._lock.acquire() try: if self.closed: raise LatchError() i = len(self._sleeping) if len(self._que...
Return the next enqueued object, or sleep waiting for one. :param float timeout: If not :data:`None`, specifies a timeout in seconds. :param bool block: If :data:`False`, immediately raise :class:`mitogen.core.TimeoutError` if the latch is empty. :raises mi...
def append_field(self, field_name, list_value): return self._single_list_field_operation(field_name, list_value, prepend=False)
Return a copy of this object with `list_value` appended to the field named `field_name`.
def build_css(minimize=True): print('Build CSS') args = {} args['style'] = 'compressed' if minimize else 'nested' cmd = CMD_SASS.format(**args) run(cmd)
Builds CSS from SASS.
def format_ubuntu_dialog(df): s = '' for i, record in df.iterrows(): statement = list(split_turns(record.Context))[-1] reply = list(split_turns(record.Utterance))[-1] s += 'Statement: {}\n'.format(statement) s += 'Reply: {}\n\n'.format(reply) return s
Print statements paired with replies, formatted for easy review
def marquee(text="", width=78, mark='*'): if not text: return (mark*width)[:width] nmark = (width-len(text)-2)//len(mark)//2 if nmark < 0: nmark = 0 marks = mark * nmark return '%s %s %s' % (marks, text, marks)
Return the input string centered in a 'marquee'. Args: text (str): Input string width (int): Width of final output string. mark (str): Character used to fill string. :Examples: >>> marquee('A test', width=40) '**************** A test ****************' >>> marquee('A test'...
def dt2str(dt, flagSeconds=True): if isinstance(dt, str): return dt return dt.strftime(_FMTS if flagSeconds else _FMT)
Converts datetime object to str if not yet an str.
def get_protein_id_list(df, level=0): protein_list = [] for s in df.index.get_level_values(level): protein_list.extend( get_protein_ids(s) ) return list(set(protein_list))
Return a complete list of shortform IDs from a DataFrame Extract all protein IDs from a dataframe from multiple rows containing protein IDs in MaxQuant output format: e.g. P07830;P63267;Q54A44;P63268 Long names (containing species information) are eliminated (split on ' ') and isoforms are removed (sp...
def readInstance(self, key, makeGlyphs=True, makeKerning=True, makeInfo=True): attrib, value = key for instanceElement in self.root.findall('.instances/instance'): if instanceElement.attrib.get(attrib) == value: self._readSingleInstanceElement(instanceElement, makeGlyphs=make...
Read a single instance element. key: an (attribute, value) tuple used to find the requested instance. :: <instance familyname="SuperFamily" filename="OutputNameInstance1.ufo" location="location-token-aaa" stylename="Regular">
def check_socket(host, port): with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: return sock.connect_ex((host, port)) == 0
Checks if port is open on host. This is used to check if the Xenon-GRPC server is running.
def list_files(dir_pathname, recursive=True, topdown=True, followlinks=False): for root, dirnames, filenames\ in walk(dir_pathname, recursive, topdown, followlinks): for filename in filenames: yield absolute_path(os.path.join(root, filename))
Enlists all the files using their absolute paths within the specified directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Pl...
def convert_time(time): split_time = time.split() try: am_pm = split_time[1].replace('.', '') time_str = '{0} {1}'.format(split_time[0], am_pm) except IndexError: return time try: time_obj = datetime.strptime(time_str, '%I:%M %p') except ValueError: time_obj =...
Convert a time string into 24-hour time.
def _get_elem_names(self): import nuutils as u element_name = self.elements_names u.give_zip_element_z_and_names(element_name) self.z_of_element_name = u.index_z_for_elements
returns for one cycle an element name dictionary.
def convert_ipynbs(directory): for root, subfolders, files in os.walk(os.path.abspath(directory)): for f in files: if ".ipynb_checkpoints" not in root: if f.endswith("ipynb"): ipynb_to_rst(root, f)
Recursively converts all ipynb files in a directory into rst files in the same directory.
def extract_sponsor(bill): logger.debug("Extracting Sponsor") sponsor_map = [] sponsor = bill.get('sponsor', None) if sponsor: sponsor_map.append(sponsor.get('type')) sponsor_map.append(sponsor.get('thomas_id')) sponsor_map.append(bill.get('bill_id')) sponsor_map.append(s...
Return a list of the fields we need to map a sponser to a bill
def print_name_version(self): if self.use_sys: self.print_generic(u"%s v%s" % (self.NAME, aeneas_version)) return self.exit(self.HELP_EXIT_CODE)
Print program name and version and exit. :rtype: int
def to_ufos( font, include_instances=False, family_name=None, propagate_anchors=True, ufo_module=defcon, minimize_glyphs_diffs=False, generate_GDEF=True, store_editor_state=True, ): builder = UFOBuilder( font, ufo_module=ufo_module, family_name=family_name, ...
Take a GSFont object and convert it into one UFO per master. Takes in data as Glyphs.app-compatible classes, as documented at https://docu.glyphsapp.com/ If include_instances is True, also returns the parsed instance data. If family_name is provided, the master UFOs will be given this name and on...
def get_s3_buckets(api_client, s3_info, s3_params): manage_dictionary(s3_info, 'buckets', {}) buckets = api_client[get_s3_list_region(s3_params['selected_regions'])].list_buckets()['Buckets'] targets = [] for b in buckets: if (b['Name'] in s3_params['skipped_buckets']) or (len(s3_params['checked...
List all available buckets :param api_client: :param s3_info: :param s3_params: :return:
def load_commands_from_entry_point(self, specifier): for ep in pkg_resources.iter_entry_points(specifier): module = ep.load() command.discover_and_call(module, self.command)
Load commands defined within a pkg_resources entry point. Each entry will be a module that should be searched for functions decorated with the :func:`subparse.command` decorator. This operation is not recursive.
def convert_from_sliced_object(data): if data.base is not None and isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray): if not data.flags.c_contiguous: warnings.warn("Usage of np.ndarray subset (sliced data) is not recommended " "due to it will double the...
Fix the memory of multi-dimensional sliced object.
def real_main(start_url=None, ignore_prefixes=None, upload_build_id=None, upload_release_name=None): coordinator = workers.get_coordinator() fetch_worker.register(coordinator) coordinator.start() item = SiteDiff( start_url=start_url, ignore_prefi...
Runs the site_diff.
def out_of_bag_mae(self): if not self._out_of_bag_mae_clean: try: self._out_of_bag_mae = self.test(self.out_of_bag_samples) self._out_of_bag_mae_clean = True except NodeNotReadyToPredict: return return self._out_of_bag_mae.copy()
Returns the mean absolute error for predictions on the out-of-bag samples.
def _update_eof(self): self._aftermathmp() self._ifile.close() self._flag_e = True
Update EOF flag.
def get_statements(self): stmt_lists = [v for k, v in self.stmts.items()] stmts = [] for s in stmt_lists: stmts += s return stmts
Return a list of all Statements in a single list. Returns ------- stmts : list[indra.statements.Statement] A list of all the INDRA Statements in the model.
def order_snapshot_space(self, volume_id, capacity, tier, upgrade, **kwargs): block_mask = 'id,billingItem[location,hourlyFlag],'\ 'storageType[keyName],storageTierLevel,provisionedIops,'\ 'staasVersion,hasEncryptionAtRest' block_volume = self.get_blo...
Orders snapshot space for the given block volume. :param integer volume_id: The id of the volume :param integer capacity: The capacity to order, in GB :param float tier: The tier level of the block volume, in IOPS per GB :param boolean upgrade: Flag to indicate if this order is an upgra...
def loadIntoTextureD3D11_Async(self, textureId, pDstTexture): fn = self.function_table.loadIntoTextureD3D11_Async result = fn(textureId, pDstTexture) return result
Helper function to copy the bits into an existing texture.
def ensure_connected(self): if not self.is_connected(): if not self._auto_connect: raise DBALConnectionError.connection_closed() self.connect()
Ensures database connection is still open.
def write(self, s): for line in re.split(r'\n+', s): if line != '': self._logger.log(self._level, line)
Write message to logger.
def load_cli(subparsers): for command_name in available_commands(): module = '{}.{}'.format(__package__, command_name) loader, description = _import_loader(module) parser = subparsers.add_parser(command_name, description=description) command = l...
Given a parser, load the CLI subcommands
def _cleandoc(doc): indent_length = lambda s: len(s) - len(s.lstrip(" ")) not_empty = lambda s: s != "" lines = doc.split("\n") indent = min(map(indent_length, filter(not_empty, lines))) return "\n".join(s[indent:] for s in lines)
Remove uniform indents from ``doc`` lines that are not empty :returns: Cleaned ``doc``
def _getPOS( self, token, onlyFirst = True ): if onlyFirst: return token[ANALYSIS][0][POSTAG] else: return [ a[POSTAG] for a in token[ANALYSIS] ]
Returns POS of the current token.
def _parse(res, params, n, api, **kwds): cursor = "cursor" in params if not cursor: start = params["start"] if n == 0: return "" _json = res.get('search-results', {}).get('entry', []) while n > 0: n -= params["count"] if cursor: pointer = res['search-resul...
Auxiliary function to download results and parse json.
def get_connection_by_node(self, node): self._checkpid() self.nodes.set_node_name(node) try: connection = self._available_connections.get(node["name"], []).pop() except IndexError: connection = self.make_connection(node) self._in_use_connections.setdefault...
get a connection by node
def rescale_variables( df, variables_include = [], variables_exclude = [] ): variables_not_rescale = variables_exclude variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) ...
Rescale variables in a DataFrame, excluding variables with NaNs and strings, excluding specified variables, and including specified variables.
def helper(self, name, *args): py_name = ast.Name("@dessert_ar", ast.Load()) attr = ast.Attribute(py_name, "_" + name, ast.Load()) return ast_Call(attr, list(args), [])
Call a helper in this module.
def belongs_to_module(t, module_name): "Check if `t` belongs to `module_name`." if hasattr(t, '__func__'): return belongs_to_module(t.__func__, module_name) if not inspect.getmodule(t): return False return inspect.getmodule(t).__name__.startswith(module_name)
Check if `t` belongs to `module_name`.
def update_configuration(self, timeout=-1): uri = "{}/configuration".format(self.data["uri"]) return self._helper.update(None, uri=uri, timeout=timeout)
Asynchronously applies or re-applies the logical interconnect configuration to all managed interconnects. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. R...
def get_structure_by_formula(self, formula, **kwargs): structures = [] sql = 'select file, sg from data where formula="- %s -"' % \ Composition(formula).hill_formula text = self.query(sql).split("\n") text.pop(0) for l in text: if l.strip(): ...
Queries the COD for structures by formula. Requires mysql executable to be in the path. Args: cod_id (int): COD id. kwargs: All kwargs supported by :func:`pymatgen.core.structure.Structure.from_str`. Returns: A list of dict of the format ...
def url2fs(url): uri, extension = posixpath.splitext(url) return safe64.dir(uri) + extension
encode a URL to be safe as a filename
def _split_generators(self, dl_manager): path = dl_manager.download_and_extract(_DOWNLOAD_URL) return [ tfds.core.SplitGenerator( name=tfds.Split.TEST, num_shards=1, gen_kwargs={'data_dir': os.path.join(path, _DIRNAME)}) ]
Return the test split of Cifar10. Args: dl_manager: download manager object. Returns: test split.
def remove_service_listener(self, listener): for browser in self.browsers: if browser.listener == listener: browser.cancel() del(browser)
Removes a listener from the set that is currently listening.
def paste(self, key, data): data_gen = self._get_paste_data_gen(key, data) self.grid.actions.paste(key[:2], data_gen, freq=1000) self.main_window.grid.ForceRefresh()
Pastes data into grid Parameters ---------- key: 2-Tuple of Integer \tTop left cell data: String or wx.Bitmap \tTab separated string of paste data \tor paste data image
def document_path_path(cls, project, database, document_path): return google.api_core.path_template.expand( "projects/{project}/databases/{database}/documents/{document_path=**}", project=project, database=database, document_path=document_path, )
Return a fully-qualified document_path string.
def _BinsToQuery(self, bins, column_name): result = [] for prev_b, next_b in zip([0] + bins[:-1], bins[:-1] + [None]): query = "COUNT(CASE WHEN %s >= %f" % (column_name, prev_b) if next_b is not None: query += " AND %s < %f" % (column_name, next_b) query += " THEN 1 END)" result....
Builds an SQL query part to fetch counts corresponding to given bins.
def _minimum_one_is_missing(self, **kwargs): rqset = self._meta_data['minimum_additional_parameters'] if rqset: kwarg_set = set(iterkeys(kwargs)) if kwarg_set.isdisjoint(rqset): args = sorted(rqset) error_message = 'This resource requires at least ...
Helper function to do operation on sets Verify if at least one of the elements is present in **kwargs. If no items of rqset are contained in **kwargs the function raises exception. This check will only trigger if rqset is not empty. Raises: MissingRequire...
def prepare_request(self, request): try: request_id = local.request_id except AttributeError: request_id = NO_REQUEST_ID if self.request_id_header and request_id != NO_REQUEST_ID: request.headers[self.request_id_header] = request_id return super(Sessio...
Include the request ID, if available, in the outgoing request
def fetch(self, invoice_id, data={}, **kwargs): return super(Invoice, self).fetch(invoice_id, data, **kwargs)
Fetch Invoice for given Id Args: invoice_id : Id for which invoice object has to be retrieved Returns: Invoice dict for given invoice Id
def get_neg_one_task_agent(generators, market, nOffer, maxSteps): env = pyreto.discrete.MarketEnvironment(generators, market, nOffer) task = pyreto.discrete.ProfitTask(env, maxSteps=maxSteps) agent = pyreto.util.NegOneAgent(env.outdim, env.indim) return task, agent
Returns a task-agent tuple whose action is always minus one.
def get(cls, rkey): if rkey in cls._cached: logger.info('Resource %s is in cache.' % rkey) return cls._cached[rkey] if rkey in cls._stock: img = cls._load_image(rkey) return img else: raise StockImageException('StockImage: %s not regist...
Get image previously registered with key rkey. If key not exist, raise StockImageException
def away_abbreviation(self): abbr = re.sub(r'.*/teams/', '', str(self._away_name)) abbr = re.sub(r'/.*', '', abbr) return abbr
Returns a ``string`` of the away team's abbreviation, such as 'NWE'.
def get_assessments_offered_by_ids(self, assessment_offered_ids): collection = JSONClientValidated('assessment', collection='AssessmentOffered', runtime=self._runtime) object_id_list = [] for i in assessment_offere...
Gets an ``AssessmentOfferedList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the assessments specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or...
def parse_slab_stats(slab_stats): stats_dict = {'slabs': defaultdict(lambda: {})} for line in slab_stats.splitlines(): if line == 'END': break cmd, key, value = line.split(' ') if cmd != 'STAT': continue if ":" not in key: stats_dict[key] = int...
Convert output from memcached's `stats slabs` into a Python dict. Newlines are returned by memcached along with carriage returns (i.e. '\r\n'). >>> parse_slab_stats( "STAT 1:chunk_size 96\r\nSTAT 1:chunks_per_page 10922\r\nSTAT " "active_slabs 1\r\nSTAT total_malloced 1048512\r\nEN...
def get_value(self, instance): return instance.values.get(self.alias, self.default)
Get value for the current object instance :param instance: :return:
def CopyFromDateTimeString(self, time_string): date_time_values = self._CopyDateTimeFromString(time_string) year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = da...
Copies a fake timestamp from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds ...
def parse(self): try: self.parsed_data = json.loads(self.data) except UnicodeError as e: self.parsed_data = json.loads(self.data.decode('latin1')) except Exception as e: raise Exception('Error while converting response from JSON to python. %s' % e) if ...
parse geojson and ensure is collection
def _convert_to_unicode(string): codepoints = [] for character in string.split('-'): if character in BLACKLIST_UNICODE: next codepoints.append( '\U{0:0>8}'.format(character).decode('unicode-escape') ) return codepoints
This method should work with both Python 2 and 3 with the caveat that they need to be compiled with wide unicode character support. If there isn't wide unicode character support it'll blow up with a warning.
def printed_out(self, name): opt = self.variables().optional_namestring() req = self.variables().required_namestring() out = '' out += '| |\n' out += '| |---{}({}{})\n'.format(name, req, opt) if self.description: out += '| | {}\n'.format(self.descr...
Create a string representation of the action
def create_as(access_token, subscription_id, resource_group, as_name, update_domains, fault_domains, location): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/provi...
Create availability set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. as_name (str): Name of the new availability set. update_domains (int): Number of update domai...