code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def data_fetch(self, url, task): self.on_fetch('data', task) result = {} result['orig_url'] = url result['content'] = dataurl.decode(url) result['headers'] = {} result['status_code'] = 200 result['url'] = url result['cookies'] = {} result['time'] = 0 result['save'] = task.get('fetch', {}).get('save') if len(result['content']) < 70: logger.info("[200] %s:%s %s 0s", task.get('project'), task.get('taskid'), url) else: logger.info( "[200] %s:%s data:,%s...[content:%d] 0s", task.get('project'), task.get('taskid'), result['content'][:70], len(result['content']) ) return result
A fake fetcher for dataurl
def _qnwcheb1(n, a, b): nodes = (b+a)/2 - (b-a)/2 * np.cos(np.pi/n * np.linspace(0.5, n-0.5, n)) t1 = np.arange(1, n+1) - 0.5 t2 = np.arange(0.0, n, 2) t3 = np.concatenate((np.array([1.0]), -2.0/(np.arange(1.0, n-1, 2)*np.arange(3.0, n+1, 2)))) weights = ((b-a)/n)*np.cos(np.pi/n*np.outer(t1, t2)) @ t3 return nodes, weights
Compute univariate Guass-Checbychev quadrature nodes and weights Parameters ---------- n : int The number of nodes a : int The lower endpoint b : int The upper endpoint Returns ------- nodes : np.ndarray(dtype=float) An n element array of nodes nodes : np.ndarray(dtype=float) An n element array of weights Notes ----- Based of original function ``qnwcheb1`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002.
def pickle_dump(self): if self.has_chrooted: warnings.warn("Cannot pickle_dump since we have chrooted from %s" % self.has_chrooted) return -1 protocol = self.pickle_protocol with FileLock(self.pickle_file): with AtomicFile(self.pickle_file, mode="wb") as fh: pmg_pickle_dump(self, fh, protocol=protocol) return 0
Save the status of the object in pickle format. Returns 0 if success
def rpc_get_refactor_options(self, filename, start, end=None): try: from elpy import refactor except: raise ImportError("Rope not installed, refactorings unavailable") ref = refactor.Refactor(self.project_root, filename) return ref.get_refactor_options(start, end)
Return a list of possible refactoring options. This list will be filtered depending on whether it's applicable at the point START and possibly the region between START and END.
def claim_new(self) -> Iterable[str]: new_subdir = self._paths['new'] cur_subdir = self._paths['cur'] for name in os.listdir(new_subdir): new_path = os.path.join(new_subdir, name) cur_path = os.path.join(cur_subdir, name) try: os.rename(new_path, cur_path) except FileNotFoundError: pass else: yield name.rsplit(self.colon, 1)[0]
Checks for messages in the ``new`` subdirectory, moving them to ``cur`` and returning their keys.
def declare_var(self, key, val): if val is not None: line = "export " + key + '=' + str(val) else: line = "unset " + key self._add(line)
Declare a env variable. If val is None the variable is unset.
def clear(self): self.__init__(size=self.size, alpha=self.alpha, clock=self.clock)
Clear the samples.
def check_for_period_error(data, period): period = int(period) data_len = len(data) if data_len < period: raise Exception("Error: data_len < period")
Check for Period Error. This method checks if the developer is trying to enter a period that is larger than the data set being entered. If that is the case an exception is raised with a custom message that informs the developer that their period is greater than the data set.
def _classification_fetch(self, skip_missing=None): skip_missing = skip_missing if skip_missing else self._kwargs["skip_missing"] new_classifications = [] for a in self._res_list: if a.__class__.__name__ == "Samples": c = a.primary_classification elif a.__class__.__name__ == "Classifications": c = a else: raise OneCodexException( "Objects in SampleCollection must be one of: Classifications, Samples" ) if skip_missing and not c.success: warnings.warn("Classification {} not successful. Skipping.".format(c.id)) continue new_classifications.append(c) self._cached["classifications"] = new_classifications
Turns a list of objects associated with a classification result into a list of Classifications objects. Parameters ---------- skip_missing : `bool` If an analysis was not successful, exclude it, warn, and keep going Returns ------- None, but stores a result in self._cached.
def _init_idxs_strpat(self, usr_hdrs): strpat = self.strpat_hdrs.keys() self.idxs_strpat = [ Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in strpat]
List of indexes whose values will be strings.
def _pop(self, key, default=None): path = self.__path_of(key) value = None try: raw_value, _ = self.connection.retry(self.connection.get, path) value = self.encoding.decode(raw_value) except self.no_node_error: if default: return default else: raise KeyError try: self.connection.retry(self.connection.delete, path) self.__increment_last_updated() except self.no_node_error: pass return value
If ``key`` is present in Zookeeper, removes it from Zookeeper and returns the value. If key is not in Zookeper and ``default`` argument is provided, ``default`` is returned. If ``default`` argument is not provided, ``KeyError`` is raised. :param key: Key to remove from Zookeeper :type key: string :param default: Default object to return if ``key`` is not present. :type default: object
def remove_tmp_prefix_from_filename(filename): if not filename.startswith(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX): raise RuntimeError(ERROR_MESSAGES['filename_hasnt_tmp_prefix'] % {'filename': filename}) return filename[len(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):]
Remove tmp prefix from filename.
def _makeResponse(self, urllib2_response): resp = HTTPResponse() resp.body = urllib2_response.read(MAX_RESPONSE_KB * 1024) resp.final_url = urllib2_response.geturl() resp.headers = self._lowerCaseKeys( dict(list(urllib2_response.info().items()))) if hasattr(urllib2_response, 'code'): resp.status = urllib2_response.code else: resp.status = 200 _, extra_dict = self._parseHeaderValue( resp.headers.get("content-type", "")) charset = extra_dict.get('charset', 'latin1') try: resp.body = resp.body.decode(charset) except Exception: pass return resp
Construct an HTTPResponse from the the urllib response. Attempt to decode the response body from bytes to str if the necessary information is available.
def _get_regex_pattern(label): parts = _split_by_punctuation.split(label) for index, part in enumerate(parts): if index % 2 == 0: if not parts[index].isdigit() and len(parts[index]) > 1: parts[index] = _convert_word(parts[index]) else: if not parts[index + 1]: parts[index] = _convert_punctuation( parts[index], current_app.config["CLASSIFIER_SYMBOLS"] ) else: parts[index] = _convert_punctuation( parts[index], current_app.config["CLASSIFIER_SEPARATORS"] ) return "".join(parts)
Return a regular expression of the label. This takes care of plural and different kinds of separators.
def _update_failure_type(self): note = JobNote.objects.filter(job=self.job).order_by('-created').first() if note: self.job.failure_classification_id = note.failure_classification.id else: self.job.failure_classification_id = FailureClassification.objects.get(name='not classified').id self.job.save()
Updates the failure type of this Note's Job. Set the linked Job's failure type to that of the most recent JobNote or set to Not Classified if there are no JobNotes. This is called when JobNotes are created (via .save()) and deleted (via .delete()) and is used to resolved the FailureClassification which has been denormalised onto Job.
def extract_words(string): return re.findall(r'[%s]+[%s\.]*[%s]+' % (A, A, A), string, flags=FLAGS)
Extract all alphabetic syllabified forms from 'string'.
def set_level(logger, level): if isinstance(logger, str): logger = logging.getLogger(logger) original = logger.level logger.setLevel(level) try: yield finally: logger.setLevel(original)
Temporarily change log level of logger. Parameters ---------- logger : str or ~logging.Logger Logger name or logger whose log level to change. level : int Log level to set. Examples -------- >>> with set_level('sqlalchemy.engine', logging.INFO): ... pass # sqlalchemy log level is set to INFO in this block
def proc_check_guard(self, instance, sql): self.open_db_connections(instance, self.PROC_GUARD_DB_KEY) cursor = self.get_cursor(instance, self.PROC_GUARD_DB_KEY) should_run = False try: cursor.execute(sql, ()) result = cursor.fetchone() should_run = result[0] == 1 except Exception as e: self.log.error("Failed to run proc_only_if sql {} : {}".format(sql, e)) self.close_cursor(cursor) self.close_db_connections(instance, self.PROC_GUARD_DB_KEY) return should_run
check to see if the guard SQL returns a single column containing 0 or 1 We return true if 1, else False
def get(self): if len(self) < 1: return "wow" if self.index >= len(self): self.index = 0 step = random.randint(1, min(self.step, len(self))) res = self[0] self.index += step self.rotate(step) return res
Get one item and prepare to get an item with lower rank on the next call.
def dump(cls, filename, objects, properties, bools, encoding): if encoding is None: encoding = cls.encoding source = cls.dumps(objects, properties, bools) if PY2: source = unicode(source) with io.open(filename, 'w', encoding=encoding) as fd: fd.write(source)
Write serialized objects, properties, bools to file.
def column_vectors(self): a, b, c, d, e, f, _, _, _ = self return (a, d), (b, e), (c, f)
The values of the transform as three 2D column vectors
def get_pages(self): pages = [] page = [] for i, item in enumerate(self.get_rows): if i > 0 and i % self.objects_per_page == 0: pages.append(page) page = [] page.append(item) pages.append(page) return pages
returns pages with rows
def is_valid_int(value): if 0 <= value <= Parameter.MAX: return True if value == Parameter.UNKNOWN_VALUE: return True if value == Parameter.CURRENT_POSITION: return True return False
Test if value can be rendered out of int.
def parsehttpdate(string_): try: t = time.strptime(string_, "%a, %d %b %Y %H:%M:%S %Z") except ValueError: return None return datetime.datetime(*t[:6])
Parses an HTTP date into a datetime object. >>> parsehttpdate('Thu, 01 Jan 1970 01:01:01 GMT') datetime.datetime(1970, 1, 1, 1, 1, 1)
def _get_global_color_table(colors): global_color_table = b''.join(c[0] for c in colors.most_common()) full_table_size = 2**(1+int(get_color_table_size(len(colors)), 2)) repeats = 3 * (full_table_size - len(colors)) zeros = struct.pack('<{}x'.format(repeats)) return global_color_table + zeros
Return a color table sorted in descending order of count.
def store_initial_k2k_session(auth_url, request, scoped_auth_ref, unscoped_auth_ref): keystone_provider_id = request.session.get('keystone_provider_id', None) if keystone_provider_id: return None providers = getattr(scoped_auth_ref, 'service_providers', None) if providers: providers = getattr(providers, '_service_providers', None) if providers: keystone_idp_name = getattr(settings, 'KEYSTONE_PROVIDER_IDP_NAME', 'Local Keystone') keystone_idp_id = getattr( settings, 'KEYSTONE_PROVIDER_IDP_ID', 'localkeystone') keystone_identity_provider = {'name': keystone_idp_name, 'id': keystone_idp_id} keystone_providers = [ {'name': provider_id, 'id': provider_id} for provider_id in providers] keystone_providers.append(keystone_identity_provider) request.session['keystone_provider_id'] = keystone_idp_id request.session['keystone_providers'] = keystone_providers request.session['k2k_base_unscoped_token'] =\ unscoped_auth_ref.auth_token request.session['k2k_auth_url'] = auth_url
Stores session variables if there are k2k service providers This stores variables related to Keystone2Keystone federation. This function gets skipped if there are no Keystone service providers. An unscoped token to the identity provider keystone gets stored so that it can be used to do federated login into the service providers when switching keystone providers. The settings file can be configured to set the display name of the local (identity provider) keystone by setting KEYSTONE_PROVIDER_IDP_NAME. The KEYSTONE_PROVIDER_IDP_ID settings variable is used for comparison against the service providers. It should not conflict with any of the service provider ids. :param auth_url: base token auth url :param request: Django http request object :param scoped_auth_ref: Scoped Keystone access info object :param unscoped_auth_ref: Unscoped Keystone access info object
def authenticate_with_email_and_pwd(user_email, user_password): if user_email is None or user_password is None: raise ValueError( 'Could not authenticate user. Missing username or password') upload_token = uploader.get_upload_token(user_email, user_password) if not upload_token: print("Authentication failed for user name " + user_name + ", please try again.") sys.exit(1) user_key = get_user_key(user_name) if not user_key: print("User name {} does not exist, please try again or contact Mapillary user support.".format( user_name)) sys.exit(1) user_permission_hash, user_signature_hash = get_user_hashes( user_key, upload_token) user_items["MAPSettingsUsername"] = section user_items["MAPSettingsUserKey"] = user_key user_items["user_upload_token"] = upload_token user_items["user_permission_hash"] = user_permission_hash user_items["user_signature_hash"] = user_signature_hash return user_items
Authenticate the user by passing the email and password. This function avoids prompting the command line for user credentials and is useful for calling tools programmatically
def dumpfile(self, fd): self.start() dump = DumpFile(fd) self.queue.append(dump)
Dump a file through a Spin instance.
def is_valid(self): validity = True for element in self._validity_map: if self._validity_map[element] is not VALID: validity = False return validity
Tests if ths form is in a valid state for submission. A form is valid if all required data has been supplied compliant with any constraints. return: (boolean) - false if there is a known error in this form, true otherwise raise: OperationFailed - attempt to perform validation failed compliance: mandatory - This method must be implemented.
def from_record(self, record): kwargs = self.get_field_kwargs(record) return self.sequenced_item_class(**kwargs)
Constructs and returns a sequenced item object, from given ORM object.
def get_accelerometer_raw(self): raw = self._get_raw_data('accelValid', 'accel') if raw is not None: self._last_accel_raw = raw return deepcopy(self._last_accel_raw)
Accelerometer x y z raw data in Gs
def add_dockwidget(self, child): dockwidget, location = child.create_dockwidget() if CONF.get('main', 'vertical_dockwidget_titlebars'): dockwidget.setFeatures(dockwidget.features()| QDockWidget.DockWidgetVerticalTitleBar) self.addDockWidget(location, dockwidget) self.widgetlist.append(child)
Add QDockWidget and toggleViewAction
def clear_recovery_range(working_dir): recovery_range_path = os.path.join(working_dir, '.recovery') if os.path.exists(recovery_range_path): os.unlink(recovery_range_path)
Clear out our recovery hint
def nearest_neighbour_delta_E( self ): delta_nn = self.final_site.nn_occupation() - self.initial_site.nn_occupation() - 1 return ( delta_nn * self.nearest_neighbour_energy )
Nearest-neighbour interaction contribution to the change in system energy if this jump were accepted. Args: None Returns: (Float): delta E (nearest-neighbour)
def update_dois(csv_source, write_file=True): _dois_arr = [] _dois_raw = [] with open(csv_source, "r") as f: reader = csv.reader(f) for row in reader: _dois_arr.append(row[0]) for _doi in _dois_arr: _dois_raw.append(_update_doi(_doi)) if write_file: new_filename = os.path.splitext(csv_source)[0] write_json_to_file(_dois_raw, new_filename) else: print(json.dumps(_dois_raw, indent=2)) return
Get DOI publication info for a batch of DOIs. This is LiPD-independent and only requires a CSV file with all DOIs listed in a single column. The output is LiPD-formatted publication data for each entry. :param str csv_source: Local path to CSV file :param bool write_file: Write output data to JSON file (True), OR pretty print output to console (False) :return none:
def has_previous_assessment_section(self, assessment_section_id): try: self.get_previous_assessment_section(assessment_section_id) except errors.IllegalState: return False else: return True
Tests if there is a previous assessment section in the assessment following the given assessment section ``Id``. arg: assessment_section_id (osid.id.Id): ``Id`` of the ``AssessmentSection`` return: (boolean) - ``true`` if there is a previous assessment section, ``false`` otherwise raise: IllegalState - ``has_assessment_begun()`` is ``false`` raise: NotFound - ``assessment_section_id`` is not found raise: NullArgument - ``assessment_section_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
def _maybe_null_out(result, axis, mask, min_count=1): if hasattr(axis, '__len__'): raise ValueError('min_count is not available for reduction ' 'with more than one dimensions.') if axis is not None and getattr(result, 'ndim', False): null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0 if null_mask.any(): dtype, fill_value = dtypes.maybe_promote(result.dtype) result = result.astype(dtype) result[null_mask] = fill_value elif getattr(result, 'dtype', None) not in dtypes.NAT_TYPES: null_mask = mask.size - mask.sum() if null_mask < min_count: result = np.nan return result
xarray version of pandas.core.nanops._maybe_null_out
def rss_create(channel, articles): channel = channel.copy() articles = list(articles) rss = ET.Element('rss') rss.set('version', '2.0') channel_node = ET.SubElement(rss, 'channel') element_from_dict(channel_node, channel, 'title') element_from_dict(channel_node, channel, 'link') element_from_dict(channel_node, channel, 'description') element_from_dict(channel_node, channel, 'language') for article in articles: item = ET.SubElement(channel_node, 'item') element_from_dict(item, article, 'title') element_from_dict(item, article, 'description') element_from_dict(item, article, 'link') for key in article: complex_el_from_dict(item, article, key) return ET.ElementTree(rss)
Create RSS xml feed. :param channel: channel info [title, link, description, language] :type channel: dict(str, str) :param articles: list of articles, an article is a dictionary with some \ required fields [title, description, link] and any optional, which will \ result to `<dict_key>dict_value</dict_key>` :type articles: list(dict(str,str)) :return: root element :rtype: ElementTree.Element
def zeta(x, context=None): return _apply_function_in_current_context( BigFloat, mpfr.mpfr_zeta, (BigFloat._implicit_convert(x),), context, )
Return the value of the Riemann zeta function on x.
def match_process(pid, name, cmdline, exe, cfg): if cfg['selfmon'] and pid == os.getpid(): return True for exe_re in cfg['exe']: if exe_re.search(exe): return True for name_re in cfg['name']: if name_re.search(name): return True for cmdline_re in cfg['cmdline']: if cmdline_re.search(' '.join(cmdline)): return True return False
Decides whether a process matches with a given process descriptor :param pid: process pid :param exe: process executable :param name: process name :param cmdline: process cmdline :param cfg: the dictionary from processes that describes with the process group we're testing for :return: True if it matches :rtype: bool
def _createtoken(self, type_, value, flags=None): pos = None assert len(self._positions) >= 2, (type_, value) p2 = self._positions.pop() p1 = self._positions.pop() pos = [p1, p2] return token(type_, value, pos, flags)
create a token with position information
def create_reward_encoder(): last_reward = tf.Variable(0, name="last_reward", trainable=False, dtype=tf.float32) new_reward = tf.placeholder(shape=[], dtype=tf.float32, name='new_reward') update_reward = tf.assign(last_reward, new_reward) return last_reward, new_reward, update_reward
Creates TF ops to track and increment recent average cumulative reward.
def hsl_to_rgb(self, h, s, l): h = h % 1.0 s = min(max(s, 0.0), 1.0) l = min(max(l, 0.0), 1.0) if l <= 0.5: m2 = l*(s + 1.0) else: m2 = l + s - l*s m1 = l*2.0 - m2 r = self._hue_to_rgb(m1, m2, h + 1.0/3.0) g = self._hue_to_rgb(m1, m2, h) b = self._hue_to_rgb(m1, m2, h - 1.0/3.0) r **= self.gamma g **= self.gamma b **= self.gamma return (r, g, b)
Convert a color from HSL color-model to RGB. See also: - http://www.w3.org/TR/css3-color/#hsl-color
def get(self, timeout=None, block=True): _vv and IOLOG.debug('%r.get(timeout=%r, block=%r)', self, timeout, block) self._lock.acquire() try: if self.closed: raise LatchError() i = len(self._sleeping) if len(self._queue) > i: _vv and IOLOG.debug('%r.get() -> %r', self, self._queue[i]) return self._queue.pop(i) if not block: raise TimeoutError() rsock, wsock = self._get_socketpair() cookie = self._make_cookie() self._sleeping.append((wsock, cookie)) finally: self._lock.release() poller = self.poller_class() poller.start_receive(rsock.fileno()) try: return self._get_sleep(poller, timeout, block, rsock, wsock, cookie) finally: poller.close()
Return the next enqueued object, or sleep waiting for one. :param float timeout: If not :data:`None`, specifies a timeout in seconds. :param bool block: If :data:`False`, immediately raise :class:`mitogen.core.TimeoutError` if the latch is empty. :raises mitogen.core.LatchError: :meth:`close` has been called, and the object is no longer valid. :raises mitogen.core.TimeoutError: Timeout was reached. :returns: The de-queued object.
def append_field(self, field_name, list_value): return self._single_list_field_operation(field_name, list_value, prepend=False)
Return a copy of this object with `list_value` appended to the field named `field_name`.
def build_css(minimize=True): print('Build CSS') args = {} args['style'] = 'compressed' if minimize else 'nested' cmd = CMD_SASS.format(**args) run(cmd)
Builds CSS from SASS.
def format_ubuntu_dialog(df): s = '' for i, record in df.iterrows(): statement = list(split_turns(record.Context))[-1] reply = list(split_turns(record.Utterance))[-1] s += 'Statement: {}\n'.format(statement) s += 'Reply: {}\n\n'.format(reply) return s
Print statements paired with replies, formatted for easy review
def marquee(text="", width=78, mark='*'): if not text: return (mark*width)[:width] nmark = (width-len(text)-2)//len(mark)//2 if nmark < 0: nmark = 0 marks = mark * nmark return '%s %s %s' % (marks, text, marks)
Return the input string centered in a 'marquee'. Args: text (str): Input string width (int): Width of final output string. mark (str): Character used to fill string. :Examples: >>> marquee('A test', width=40) '**************** A test ****************' >>> marquee('A test', width=40, mark='-') '---------------- A test ----------------' marquee('A test',40, ' ') ' A test '
def dt2str(dt, flagSeconds=True): if isinstance(dt, str): return dt return dt.strftime(_FMTS if flagSeconds else _FMT)
Converts datetime object to str if not yet an str.
def get_protein_id_list(df, level=0): protein_list = [] for s in df.index.get_level_values(level): protein_list.extend( get_protein_ids(s) ) return list(set(protein_list))
Return a complete list of shortform IDs from a DataFrame Extract all protein IDs from a dataframe from multiple rows containing protein IDs in MaxQuant output format: e.g. P07830;P63267;Q54A44;P63268 Long names (containing species information) are eliminated (split on ' ') and isoforms are removed (split on '_'). :param df: DataFrame :type df: pandas.DataFrame :param level: Level of DataFrame index to extract IDs from :type level: int or str :return: list of string ids
def readInstance(self, key, makeGlyphs=True, makeKerning=True, makeInfo=True): attrib, value = key for instanceElement in self.root.findall('.instances/instance'): if instanceElement.attrib.get(attrib) == value: self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo) return raise MutatorError("No instance found with key: (%s, %s)." % key)
Read a single instance element. key: an (attribute, value) tuple used to find the requested instance. :: <instance familyname="SuperFamily" filename="OutputNameInstance1.ufo" location="location-token-aaa" stylename="Regular">
def check_socket(host, port): with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: return sock.connect_ex((host, port)) == 0
Checks if port is open on host. This is used to check if the Xenon-GRPC server is running.
def list_files(dir_pathname, recursive=True, topdown=True, followlinks=False): for root, dirnames, filenames\ in walk(dir_pathname, recursive, topdown, followlinks): for filename in filenames: yield absolute_path(os.path.join(root, filename))
Enlists all the files using their absolute paths within the specified directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk`
def convert_time(time): split_time = time.split() try: am_pm = split_time[1].replace('.', '') time_str = '{0} {1}'.format(split_time[0], am_pm) except IndexError: return time try: time_obj = datetime.strptime(time_str, '%I:%M %p') except ValueError: time_obj = datetime.strptime(time_str, '%I %p') return time_obj.strftime('%H:%M %p')
Convert a time string into 24-hour time.
def _get_elem_names(self): import nuutils as u element_name = self.elements_names u.give_zip_element_z_and_names(element_name) self.z_of_element_name = u.index_z_for_elements
returns for one cycle an element name dictionary.
def convert_ipynbs(directory): for root, subfolders, files in os.walk(os.path.abspath(directory)): for f in files: if ".ipynb_checkpoints" not in root: if f.endswith("ipynb"): ipynb_to_rst(root, f)
Recursively converts all ipynb files in a directory into rst files in the same directory.
def extract_sponsor(bill): logger.debug("Extracting Sponsor") sponsor_map = [] sponsor = bill.get('sponsor', None) if sponsor: sponsor_map.append(sponsor.get('type')) sponsor_map.append(sponsor.get('thomas_id')) sponsor_map.append(bill.get('bill_id')) sponsor_map.append(sponsor.get('district')) sponsor_map.append(sponsor.get('state')) logger.debug("END Extracting Sponsor") return sponsor_map if sponsor_map else None
Return a list of the fields we need to map a sponser to a bill
def print_name_version(self): if self.use_sys: self.print_generic(u"%s v%s" % (self.NAME, aeneas_version)) return self.exit(self.HELP_EXIT_CODE)
Print program name and version and exit. :rtype: int
def to_ufos( font, include_instances=False, family_name=None, propagate_anchors=True, ufo_module=defcon, minimize_glyphs_diffs=False, generate_GDEF=True, store_editor_state=True, ): builder = UFOBuilder( font, ufo_module=ufo_module, family_name=family_name, propagate_anchors=propagate_anchors, minimize_glyphs_diffs=minimize_glyphs_diffs, generate_GDEF=generate_GDEF, store_editor_state=store_editor_state, ) result = list(builder.masters) if include_instances: return result, builder.instance_data return result
Take a GSFont object and convert it into one UFO per master. Takes in data as Glyphs.app-compatible classes, as documented at https://docu.glyphsapp.com/ If include_instances is True, also returns the parsed instance data. If family_name is provided, the master UFOs will be given this name and only instances with this name will be returned. If generate_GDEF is True, write a `table GDEF {...}` statement in the UFO's features.fea, containing GlyphClassDef and LigatureCaretByPos.
def get_s3_buckets(api_client, s3_info, s3_params): manage_dictionary(s3_info, 'buckets', {}) buckets = api_client[get_s3_list_region(s3_params['selected_regions'])].list_buckets()['Buckets'] targets = [] for b in buckets: if (b['Name'] in s3_params['skipped_buckets']) or (len(s3_params['checked_buckets']) and b['Name'] not in s3_params['checked_buckets']): continue targets.append(b) s3_info['buckets_count'] = len(targets) s3_params['api_clients'] = api_client s3_params['s3_info'] = s3_info thread_work(targets, get_s3_bucket, params = s3_params, num_threads = 30) show_status(s3_info) s3_info['buckets_count'] = len(s3_info['buckets']) return s3_info
List all available buckets :param api_client: :param s3_info: :param s3_params: :return:
def load_commands_from_entry_point(self, specifier): for ep in pkg_resources.iter_entry_points(specifier): module = ep.load() command.discover_and_call(module, self.command)
Load commands defined within a pkg_resources entry point. Each entry will be a module that should be searched for functions decorated with the :func:`subparse.command` decorator. This operation is not recursive.
def convert_from_sliced_object(data): if data.base is not None and isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray): if not data.flags.c_contiguous: warnings.warn("Usage of np.ndarray subset (sliced data) is not recommended " "due to it will double the peak memory cost in LightGBM.") return np.copy(data) return data
Fix the memory of multi-dimensional sliced object.
def real_main(start_url=None, ignore_prefixes=None, upload_build_id=None, upload_release_name=None): coordinator = workers.get_coordinator() fetch_worker.register(coordinator) coordinator.start() item = SiteDiff( start_url=start_url, ignore_prefixes=ignore_prefixes, upload_build_id=upload_build_id, upload_release_name=upload_release_name, heartbeat=workers.PrintWorkflow) item.root = True coordinator.input_queue.put(item) coordinator.wait_one() coordinator.stop() coordinator.join()
Runs the site_diff.
def out_of_bag_mae(self): if not self._out_of_bag_mae_clean: try: self._out_of_bag_mae = self.test(self.out_of_bag_samples) self._out_of_bag_mae_clean = True except NodeNotReadyToPredict: return return self._out_of_bag_mae.copy()
Returns the mean absolute error for predictions on the out-of-bag samples.
def _update_eof(self): self._aftermathmp() self._ifile.close() self._flag_e = True
Update EOF flag.
def get_statements(self): stmt_lists = [v for k, v in self.stmts.items()] stmts = [] for s in stmt_lists: stmts += s return stmts
Return a list of all Statements in a single list. Returns ------- stmts : list[indra.statements.Statement] A list of all the INDRA Statements in the model.
def order_snapshot_space(self, volume_id, capacity, tier, upgrade, **kwargs): block_mask = 'id,billingItem[location,hourlyFlag],'\ 'storageType[keyName],storageTierLevel,provisionedIops,'\ 'staasVersion,hasEncryptionAtRest' block_volume = self.get_block_volume_details(volume_id, mask=block_mask, **kwargs) order = storage_utils.prepare_snapshot_order_object( self, block_volume, capacity, tier, upgrade) return self.client.call('Product_Order', 'placeOrder', order)
Orders snapshot space for the given block volume. :param integer volume_id: The id of the volume :param integer capacity: The capacity to order, in GB :param float tier: The tier level of the block volume, in IOPS per GB :param boolean upgrade: Flag to indicate if this order is an upgrade :return: Returns a SoftLayer_Container_Product_Order_Receipt
def loadIntoTextureD3D11_Async(self, textureId, pDstTexture): fn = self.function_table.loadIntoTextureD3D11_Async result = fn(textureId, pDstTexture) return result
Helper function to copy the bits into an existing texture.
def ensure_connected(self): if not self.is_connected(): if not self._auto_connect: raise DBALConnectionError.connection_closed() self.connect()
Ensures database connection is still open.
def write(self, s): for line in re.split(r'\n+', s): if line != '': self._logger.log(self._level, line)
Write message to logger.
def load_cli(subparsers): for command_name in available_commands(): module = '{}.{}'.format(__package__, command_name) loader, description = _import_loader(module) parser = subparsers.add_parser(command_name, description=description) command = loader(parser) if command is None: raise RuntimeError('Failed to load "{}".'.format(command_name)) parser.set_defaults(cmmd=command)
Given a parser, load the CLI subcommands
def _cleandoc(doc): indent_length = lambda s: len(s) - len(s.lstrip(" ")) not_empty = lambda s: s != "" lines = doc.split("\n") indent = min(map(indent_length, filter(not_empty, lines))) return "\n".join(s[indent:] for s in lines)
Remove uniform indents from ``doc`` lines that are not empty :returns: Cleaned ``doc``
def _getPOS( self, token, onlyFirst = True ): if onlyFirst: return token[ANALYSIS][0][POSTAG] else: return [ a[POSTAG] for a in token[ANALYSIS] ]
Returns POS of the current token.
def _parse(res, params, n, api, **kwds): cursor = "cursor" in params if not cursor: start = params["start"] if n == 0: return "" _json = res.get('search-results', {}).get('entry', []) while n > 0: n -= params["count"] if cursor: pointer = res['search-results']['cursor'].get('@next') params.update({'cursor': pointer}) else: start += params["count"] params.update({'start': start}) res = download(url=URL[api], params=params, accept="json", **kwds).json() _json.extend(res.get('search-results', {}).get('entry', [])) return _json
Auxiliary function to download results and parse json.
def get_connection_by_node(self, node): self._checkpid() self.nodes.set_node_name(node) try: connection = self._available_connections.get(node["name"], []).pop() except IndexError: connection = self.make_connection(node) self._in_use_connections.setdefault(node["name"], set()).add(connection) return connection
get a connection by node
def rescale_variables( df, variables_include = [], variables_exclude = [] ): variables_not_rescale = variables_exclude variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) variables_rescale = list(set(df.columns) - set(variables_not_rescale)) variables_rescale.extend(variables_include) scaler = MinMaxScaler() df[variables_rescale] = scaler.fit_transform(df[variables_rescale]) return df
Rescale variables in a DataFrame, excluding variables with NaNs and strings, excluding specified variables, and including specified variables.
def helper(self, name, *args): py_name = ast.Name("@dessert_ar", ast.Load()) attr = ast.Attribute(py_name, "_" + name, ast.Load()) return ast_Call(attr, list(args), [])
Call a helper in this module.
def belongs_to_module(t, module_name): "Check if `t` belongs to `module_name`." if hasattr(t, '__func__'): return belongs_to_module(t.__func__, module_name) if not inspect.getmodule(t): return False return inspect.getmodule(t).__name__.startswith(module_name)
Check if `t` belongs to `module_name`.
def update_configuration(self, timeout=-1): uri = "{}/configuration".format(self.data["uri"]) return self._helper.update(None, uri=uri, timeout=timeout)
Asynchronously applies or re-applies the logical interconnect configuration to all managed interconnects. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Logical Interconnect.
def get_structure_by_formula(self, formula, **kwargs): structures = [] sql = 'select file, sg from data where formula="- %s -"' % \ Composition(formula).hill_formula text = self.query(sql).split("\n") text.pop(0) for l in text: if l.strip(): cod_id, sg = l.split("\t") r = requests.get("http://www.crystallography.net/cod/%s.cif" % cod_id.strip()) try: s = Structure.from_str(r.text, fmt="cif", **kwargs) structures.append({"structure": s, "cod_id": int(cod_id), "sg": sg}) except Exception: import warnings warnings.warn("\nStructure.from_str failed while parsing CIF file:\n%s" % r.text) raise return structures
Queries the COD for structures by formula. Requires mysql executable to be in the path. Args: cod_id (int): COD id. kwargs: All kwargs supported by :func:`pymatgen.core.structure.Structure.from_str`. Returns: A list of dict of the format [{"structure": Structure, "cod_id": cod_id, "sg": "P n m a"}]
def url2fs(url): uri, extension = posixpath.splitext(url) return safe64.dir(uri) + extension
encode a URL to be safe as a filename
def _split_generators(self, dl_manager): path = dl_manager.download_and_extract(_DOWNLOAD_URL) return [ tfds.core.SplitGenerator( name=tfds.Split.TEST, num_shards=1, gen_kwargs={'data_dir': os.path.join(path, _DIRNAME)}) ]
Return the test split of Cifar10. Args: dl_manager: download manager object. Returns: test split.
def remove_service_listener(self, listener): for browser in self.browsers: if browser.listener == listener: browser.cancel() del(browser)
Removes a listener from the set that is currently listening.
def paste(self, key, data): data_gen = self._get_paste_data_gen(key, data) self.grid.actions.paste(key[:2], data_gen, freq=1000) self.main_window.grid.ForceRefresh()
Pastes data into grid Parameters ---------- key: 2-Tuple of Integer \tTop left cell data: String or wx.Bitmap \tTab separated string of paste data \tor paste data image
def document_path_path(cls, project, database, document_path): return google.api_core.path_template.expand( "projects/{project}/databases/{database}/documents/{document_path=**}", project=project, database=database, document_path=document_path, )
Return a fully-qualified document_path string.
def _BinsToQuery(self, bins, column_name): result = [] for prev_b, next_b in zip([0] + bins[:-1], bins[:-1] + [None]): query = "COUNT(CASE WHEN %s >= %f" % (column_name, prev_b) if next_b is not None: query += " AND %s < %f" % (column_name, next_b) query += " THEN 1 END)" result.append(query) return ", ".join(result)
Builds an SQL query part to fetch counts corresponding to given bins.
def _minimum_one_is_missing(self, **kwargs): rqset = self._meta_data['minimum_additional_parameters'] if rqset: kwarg_set = set(iterkeys(kwargs)) if kwarg_set.isdisjoint(rqset): args = sorted(rqset) error_message = 'This resource requires at least one of the ' \ 'mandatory additional ' \ 'parameters to be provided: %s' % ', '.join(args) raise MissingRequiredCreationParameter(error_message)
Helper function to do operation on sets Verify if at least one of the elements is present in **kwargs. If no items of rqset are contained in **kwargs the function raises exception. This check will only trigger if rqset is not empty. Raises: MissingRequiredCreationParameter
def prepare_request(self, request): try: request_id = local.request_id except AttributeError: request_id = NO_REQUEST_ID if self.request_id_header and request_id != NO_REQUEST_ID: request.headers[self.request_id_header] = request_id return super(Session, self).prepare_request(request)
Include the request ID, if available, in the outgoing request
def fetch(self, invoice_id, data={}, **kwargs): return super(Invoice, self).fetch(invoice_id, data, **kwargs)
Fetch Invoice for given Id Args: invoice_id : Id for which invoice object has to be retrieved Returns: Invoice dict for given invoice Id
def get_neg_one_task_agent(generators, market, nOffer, maxSteps): env = pyreto.discrete.MarketEnvironment(generators, market, nOffer) task = pyreto.discrete.ProfitTask(env, maxSteps=maxSteps) agent = pyreto.util.NegOneAgent(env.outdim, env.indim) return task, agent
Returns a task-agent tuple whose action is always minus one.
def get(cls, rkey): if rkey in cls._cached: logger.info('Resource %s is in cache.' % rkey) return cls._cached[rkey] if rkey in cls._stock: img = cls._load_image(rkey) return img else: raise StockImageException('StockImage: %s not registered.' % rkey)
Get image previously registered with key rkey. If key not exist, raise StockImageException
def away_abbreviation(self): abbr = re.sub(r'.*/teams/', '', str(self._away_name)) abbr = re.sub(r'/.*', '', abbr) return abbr
Returns a ``string`` of the away team's abbreviation, such as 'NWE'.
def get_assessments_offered_by_ids(self, assessment_offered_ids): collection = JSONClientValidated('assessment', collection='AssessmentOffered', runtime=self._runtime) object_id_list = [] for i in assessment_offered_ids: object_id_list.append(ObjectId(self._get_id(i, 'assessment').get_identifier())) result = collection.find( dict({'_id': {'$in': object_id_list}}, **self._view_filter())) result = list(result) sorted_result = [] for object_id in object_id_list: for object_map in result: if object_map['_id'] == object_id: sorted_result.append(object_map) break return objects.AssessmentOfferedList(sorted_result, runtime=self._runtime, proxy=self._proxy)
Gets an ``AssessmentOfferedList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the assessments specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible ``AssessmentOffered`` objects may be omitted from the list and may present the elements in any order including returning a unique set. arg: assessment_offered_ids (osid.id.IdList): the list of ``Ids`` to retrieve return: (osid.assessment.AssessmentOfferedList) - the returned ``AssessmentOffered`` list raise: NotFound - an ``Id was`` not found raise: NullArgument - ``assessment_offered_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - assessment failure *compliance: mandatory -- This method must be implemented.*
def parse_slab_stats(slab_stats): stats_dict = {'slabs': defaultdict(lambda: {})} for line in slab_stats.splitlines(): if line == 'END': break cmd, key, value = line.split(' ') if cmd != 'STAT': continue if ":" not in key: stats_dict[key] = int(value) continue slab, key = key.split(':') stats_dict['slabs'][int(slab)][key] = int(value) return stats_dict
Convert output from memcached's `stats slabs` into a Python dict. Newlines are returned by memcached along with carriage returns (i.e. '\r\n'). >>> parse_slab_stats( "STAT 1:chunk_size 96\r\nSTAT 1:chunks_per_page 10922\r\nSTAT " "active_slabs 1\r\nSTAT total_malloced 1048512\r\nEND\r\n") { 'slabs': { 1: { 'chunk_size': 96, 'chunks_per_page': 10922, # ... }, }, 'active_slabs': 1, 'total_malloced': 1048512, }
def get_value(self, instance): return instance.values.get(self.alias, self.default)
Get value for the current object instance :param instance: :return:
def CopyFromDateTimeString(self, time_string): date_time_values = self._CopyDateTimeFromString(time_string) year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) self._normalized_timestamp = None self._number_of_seconds = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) self._microseconds = date_time_values.get('microseconds', None) self.is_local_time = False
Copies a fake timestamp from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC.
def parse(self): try: self.parsed_data = json.loads(self.data) except UnicodeError as e: self.parsed_data = json.loads(self.data.decode('latin1')) except Exception as e: raise Exception('Error while converting response from JSON to python. %s' % e) if self.parsed_data.get('type', '') != 'FeatureCollection': raise Exception('GeoJson synchronizer expects a FeatureCollection object at root level') self.parsed_data = self.parsed_data['features']
parse geojson and ensure is collection
def _convert_to_unicode(string): codepoints = [] for character in string.split('-'): if character in BLACKLIST_UNICODE: next codepoints.append( '\U{0:0>8}'.format(character).decode('unicode-escape') ) return codepoints
This method should work with both Python 2 and 3 with the caveat that they need to be compiled with wide unicode character support. If there isn't wide unicode character support it'll blow up with a warning.
def printed_out(self, name): opt = self.variables().optional_namestring() req = self.variables().required_namestring() out = '' out += '| |\n' out += '| |---{}({}{})\n'.format(name, req, opt) if self.description: out += '| | {}\n'.format(self.description) return out
Create a string representation of the action
def create_as(access_token, subscription_id, resource_group, as_name, update_domains, fault_domains, location): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/availabilitySets/', as_name, '?api-version=', COMP_API]) as_body = {'location': location} properties = {'platformUpdateDomainCount': update_domains} properties['platformFaultDomainCount'] = fault_domains as_body['properties'] = properties body = json.dumps(as_body) return do_put(endpoint, body, access_token)
Create availability set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. as_name (str): Name of the new availability set. update_domains (int): Number of update domains. fault_domains (int): Number of fault domains. location (str): Azure data center location. E.g. westus. Returns: HTTP response. JSON body of the availability set properties.