code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def unauthorized_view(self): url = request.script_root + request.path flash(_("You do not have permission to access '%(url)s'.", url=url), 'error') return redirect(self._endpoint_url(self.USER_UNAUTHORIZED_ENDPOINT))
Prepare a Flash message and redirect to USER_UNAUTHORIZED_ENDPOINT
def createReference(self, fromnode, tonode, edge_data=None): if fromnode is None: fromnode = self fromident, toident = self.getIdent(fromnode), self.getIdent(tonode) if fromident is None or toident is None: return self.msg(4, "createReference", fromnode, tonode, edge_data) self.graph.add_edge(fromident, toident, edge_data=edge_data)
Create a reference from fromnode to tonode
def mutate(self, node, index): assert index == 0 assert isinstance(node, ForStmt) empty_list = parso.parse(' []') node.children[3] = empty_list return node
Modify the For loop to evaluate to None
def ensure_file(path): try: exists = isfile(path) if not exists: with open(path, 'w+') as fname: fname.write('initialized') return (True, path) return (True, 'exists') except OSError as e: return (False, e)
Checks if file exists, if fails, tries to create file
def _get_url_doc(self): resolver = get_resolver(None) possibilities = resolver.reverse_dict.getlist(self) urls = [possibility[0] for possibility in possibilities] return urls
Return a list of URLs that map to this resource.
def parse_band_status_update(self, message): self.log.debug("Status update: " + str(message)) self.update_status(status_code=message["statuscode"])
Process incoming status updates from the service.
def process_request(self, request, client_address): self.collect_children() pid = os.fork() if pid: if self.active_children is None: self.active_children = [] self.active_children.append(pid) self.close_request(request) return else: try: self.finish_request(request, client_address) self.shutdown_request(request) os._exit(0) except: try: self.handle_error(request, client_address) self.shutdown_request(request) finally: os._exit(1)
Fork a new subprocess to process the request.
def sort_common_members(): filename = PREFIX + '/common_members.json' sorted_json_data = {} json_data = read_json(filename) all_keys = [] for key, value in json_data.items(): all_keys.append(key) sorted_keys = sorted(all_keys) for key in sorted_keys: if len(json_data[key]) > 0: sorted_json_data[key] = sorted(json_data[key]) print('--> Sorted/cleaned ' + os.path.basename(filename)) write_json(sorted_json_data, filename)
Sorts the keys and members
def to_dict(self): return { "total": self.total, "subtotal": self.subtotal, "items": self.items, "extra_amount": self.extra_amount }
Attribute values to dict
def diff(file_, imports): modules_not_imported = compare_modules(file_, imports) logging.info("The following modules are in {} but do not seem to be imported: " "{}".format(file_, ", ".join(x for x in modules_not_imported)))
Display the difference between modules in a file and imported modules.
def add_role(role,**kwargs): role_i = Role(name=role.name, code=role.code) db.DBSession.add(role_i) db.DBSession.flush() return role_i
Add a new role
def _create_save_scenario_action(self): icon = resources_path('img', 'icons', 'save-as-scenario.svg') self.action_save_scenario = QAction( QIcon(icon), self.tr('Save Current Scenario'), self.iface.mainWindow()) message = self.tr('Save current scenario to text file') self.action_save_scenario.setStatusTip(message) self.action_save_scenario.setWhatsThis(message) self.action_save_scenario.triggered.connect(self.save_scenario) self.add_action( self.action_save_scenario, add_to_toolbar=self.full_toolbar)
Create action for save scenario dialog.
def text_has_changed(self, text): text = to_text_string(text) if text: self.lineno = int(text) else: self.lineno = None
Line edit's text has changed
def start(self): self.storage.update({ 'term': self.storage.term + 1, 'voted_for': self.id }) self.vote_count = 1 self.request_vote() self.election_timer.start()
Increment current term, vote for herself & send vote requests
def cycle_list(self,*args): noargs = len(args) == 0 return np.unique(self.cycle) if noargs else np.unique(self.cycle.compress(args[0]))
return the list of cycles contained if the dataset
def _guess_seqtype_from_file(handle): "Guess the sequence type from the file's contents" if isinstance(handle, basestring): handle = StringIO(handle) for line in handle: if not line.strip(): continue if line.lstrip().split()[0] in ('LOCUS', 'FEATURES', 'source', 'CDS', 'gene'): return 'genbank' if len(line) > 2 and line[:3] in ('ID ', 'FT '): return 'embl' if line.startswith('>'): return 'fasta' handle.seek(0) import string from Bio.Data import IUPACData as iupac all_input_letters = set(handle.read().lower()) all_valid = set(string.digits) all_valid.update(set(iupac.protein_letters.lower())) all_valid.update(set(iupac.unambiguous_dna_letters.lower())) all_valid.update(set('- \n')) if all_valid.issuperset(all_input_letters): return 'fasta' raise ValueError("Failed to guess format for input")
Guess the sequence type from the file's contents
def print_events(events): columns = ['Date', 'Type', 'IP Address', 'label', 'username'] table = formatting.Table(columns) for event in events: table.add_row([event.get('eventCreateDate'), event.get('eventName'), event.get('ipAddress'), event.get('label'), event.get('username')]) return table
Prints out the event log for a user
def add_required_resources(resources): required = [["variation", "cosmic"], ["variation", "clinvar"], ["variation", "dbsnp"], ["variation", "lcr"], ["variation", "polyx"], ["variation", "encode_blacklist"], ["variation", "gc_profile"], ["variation", "germline_het_pon"], ["variation", "train_hapmap"], ["variation", "train_indels"], ["variation", "editing"], ["variation", "exac"], ["variation", "esp"], ["variation", "gnomad_exome"], ["variation", "1000g"], ["aliases", "human"]] for key in required: if not tz.get_in(key, resources): resources = tz.update_in(resources, key, lambda x: None) return resources
Add default or empty values for required resources referenced in CWL
def train_model(model_folder): os.chdir(model_folder) training = generate_training_command(model_folder) if training is None: return -1 logging.info(training) os.chdir(model_folder) os.system(training)
Train the model in ``model_folder``.
def _crates_cache() -> str: return os.environ.get( 'XDG_CACHE_HOME', os.path.join(os.path.expanduser('~'), '.cache', 'cr8', 'crates'))
Return the path to the crates cache folder
def connectTo(self, remoteRouteName): self.remoteRouteName = remoteRouteName if self.router._sender is not None: self.start()
Set the name of the route which will be added to outgoing boxes.
def _crop_image(image, left=0, top=0, right=0, bottom=0, **kwargs): return image.crop((left, top, right, bottom))
Crop the border from the layout
def publish(self): try: for collection in self.settings.get("scheduler").get("collections"): yield self.publish_for_collection(collection) except Exception as ex: self.logger.error(ex)
Iterate over the scheduler collections and apply any actions found
def query(self, **options): if not self._query_path: raise AttributeError('query is not available for %s' % self._item_name) last_item = 0 offset = 0 current_item = None limit = options.get('limit', 25) options['limit'] = limit target = self._query_path while True: options['offset'] = offset json_data = self._redmine.get(target, options) try: data = json.loads(json_data) except: raise RedmineError(json_data) data_container = data[self._query_container] for item_data in data_container: yield(self._objectify(data=item_data)) if not data_container: break try: if int(data['total_count']) > ( offset + len(data_container) ): offset += limit else: break except: break
Return an iterator for the given items.
def run(self, order=None): for event in self.runner.run(order=order): self.receive(event)
self.runner must be present
def json(self, dict=None, indent=None): if not dict: dict = self.dict() for key, value in dict.iteritems(): if type(value) == datetime.datetime: dict[key] = value.strftime(conf.GOSCALE_ATOM_DATETIME_FORMAT) return simplejson.dumps(dict, indent=indent)
Returns post JSON representation
def clear_caches(self): self._single_node_repertoire_cache.clear() self._repertoire_cache.clear() self._mice_cache.clear()
Clear the mice and repertoire caches.
def _verbs_with_subjects(doc): verb_subj = [] for possible_subject in doc: if (possible_subject.dep_ == 'nsubj' and possible_subject.head.pos_ == 'VERB'): verb_subj.append([possible_subject.head, possible_subject]) return verb_subj
Given a spacy document return the verbs that have subjects
def usb_info(self): return UsbInformation(self._abilities.usb_vendor, self._abilities.usb_product, self._abilities.usb_class, self._abilities.usb_subclass, self._abilities.usb_protocol)
The camera's USB information.
def _domain_event_pmsuspend_cb(conn, domain, reason, opaque): _salt_send_domain_event(opaque, conn, domain, opaque['event'], { 'reason': 'unknown' })
Domain suspend events handler
def download(self, download_key, raise_exception_on_failure=False): query = {"output": "json", "user_credentials": self.api_key} resp = requests.get( "%sdownload/%s" % (self._url, download_key), params=query, timeout=self._timeout, ) if raise_exception_on_failure and resp.status_code != 200: raise DocumentDownloadFailure(resp.content, resp.status_code) return resp
Download the file represented by the download_key.
def extern_store_bool(self, context_handle, b): c = self._ffi.from_handle(context_handle) return c.to_value(b)
Given a context and _Bool, return a new Handle to represent the _Bool.
def full_path(self): if Path(self.path).is_absolute(): return self.path else: return str(self.app_root / self.path)
Return the full path to the file.
def from_fs_path(path): scheme = 'file' params, query, fragment = '', '', '' path, netloc = _normalize_win_path(path) return urlunparse((scheme, netloc, path, params, query, fragment))
Returns a URI for the given filesystem path.
def _inference_tip_cached(func, instance, args, kwargs, _cache={}): node = args[0] try: return iter(_cache[func, node]) except KeyError: result = func(*args, **kwargs) original, copy = itertools.tee(result) _cache[func, node] = list(copy) return original
Cache decorator used for inference tips
def write(self, path): with open(path, 'w') as fd: fd.write(json.dumps( { "created_at": self._created_at, "version": self._version, "mtllibs": self._mtllibs, "vertex_buffers": self._vertex_buffers, }, indent=2, ))
Save the metadata as json
def xgroup_destroy(self, stream, group_name): fut = self.execute(b'XGROUP', b'DESTROY', stream, group_name) return wait_ok(fut)
Delete a consumer group
def raise_errors(self, response): verb = self.debug_verbs method = response.request.method data = None is_json = 'json' in response.headers.get('Content-Type', '') and response.text if is_json: data = json.loads(response.text) if not (isinstance(data, list) and data and 'errorCode' in data[0]): messages = [response.text] if is_json else [] raise OperationalError( ['HTTP error "%d %s":' % (response.status_code, response.reason)] + messages, response, ['method+url']) err_msg = data[0]['message'] err_code = data[0]['errorCode'] if response.status_code == 404: if method == 'DELETE' and err_code in ('ENTITY_IS_DELETED', 'INVALID_CROSS_REFERENCE_KEY'): warn_sf([err_msg, "Object is deleted before delete or update"], response, ['method+url']) return None if err_code in ('NOT_FOUND', 'METHOD_NOT_ALLOWED', ): raise SalesforceError([err_msg], response, ['method+url']) raise SalesforceError([err_msg], response)
The innermost part - report errors by exceptions
def check(self, action, page=None, lang=None, method=None): if self.user.is_superuser: return True if action == 'change': return self.has_change_permission(page, lang, method) if action == 'delete': if not self.delete_page(): return False return True if action == 'add': if not self.add_page(): return False return True if action == 'freeze': perm = self.user.has_perm('pages.can_freeze') if perm: return True return False if action == 'publish': perm = self.user.has_perm('pages.can_publish') if perm: return True return False return False
Return ``True`` if the current user has permission on the page.
def _get_1f_sum_scans(self,d,freq): unique_freq = np.unique(freq) sum_scans = [[] for i in range(len(d))] for f in unique_freq: tag = freq==f for i in range(len(d)): sum_scans[i].append(np.sum(d[i][tag])) return (np.array(unique_freq),np.array(sum_scans))
Sum counts in each frequency bin over 1f scans.
def deserialize_subject_info(subject_info_xml_path): try: with open(subject_info_xml_path) as f: return d1_common.xml.deserialize(f.read()) except ValueError as e: raise d1_common.types.exceptions.InvalidToken( 0, 'Could not deserialize SubjectInfo. subject_info="{}", error="{}"'.format( subject_info_xml_path, str(e) ), )
Deserialize a SubjectInfo XML file to a PyXB object.
def remove_reaction(self, reaction): if reaction not in self._reaction_set: return self._reaction_set.remove(reaction) self._limits_lower.pop(reaction, None) self._limits_upper.pop(reaction, None) for compound, value in self._database.get_reaction_values(reaction): reactions = frozenset( self._database.get_compound_reactions(compound)) if all(other_reaction not in self._reaction_set for other_reaction in reactions): self._compound_set.remove(compound)
Remove reaction from model
def _contains_blinded_text(stats_xml): tree = ET.parse(stats_xml) root = tree.getroot() total_tokens = int(root.find('size/total/tokens').text) unique_lemmas = int(root.find('lemmas').get('unique')) return (unique_lemmas / total_tokens) < 0.01
Heuristic to determine whether the treebank has blinded texts or not
def window_size(self, value): if (value > 4 and value < self.parameter_maxima["window_size"] and value % 2): self._window_size = value else: raise InvalidWindowSizeError("Window size must be an odd number " "between 0 and {}.".format( self.parameter_maxima["window_size"] + 1)) self._replace_bm()
Set private ``_window_size`` and reset ``_block_matcher``.
def _create_plot(tumor, in_glob, out_ext, page=1): out_dir = utils.safe_makedir("images") out_name = os.path.join(out_dir, "%s-%s" % (tumor, out_ext)) in_file = glob.glob(in_glob)[0] cmd = ["pdftoppm", in_file, out_name, "-png", "-f", page, "-singlefile"] if not os.path.exists(out_name + ".png"): subprocess.check_call([str(x) for x in cmd]) return out_name + ".png"
Create an output plot for the given PDF in the images directory.
def make_ns(self, ns): if self.namespace: val = {} val.update(self.namespace) val.update(ns) return val else: return ns
Returns the `lazily` created template namespace.
def init_tag_processors(self): register = self.register_tag_processor register('class', classTagProcessor(**self.opts)) register('file', fileTagProcessor(**self.opts)) register('namespace', namespaceTagProcessor(**self.opts)) register('struct', structTagProcessor(**self.opts)) register('union', unionTagProcessor(**self.opts)) register('function', functionTagProcessor(**self.opts)) register('define', defineTagProcessor(**self.opts)) register('enumeration', enumerationTagProcessor(**self.opts)) register('enumvalue', enumvalueTagProcessor(**self.opts)) register('typedef', typedefTagProcessor(**self.opts)) register('variable', variableTagProcessor(**self.opts))
Register the TagProcessors that are bundled with doxytag2zealdb.
def copyFeatures(self, featureSource): if featureSource in self.sources: src, loc = self.sources[featureSource] if isinstance(src.features.text, str): self.font.features.text = u""+src.features.text elif isinstance(src.features.text, unicode): self.font.features.text = src.features.text
Copy the features from this source
def visit_GpxModel(self, gpx_model, *args, **kwargs): result = OrderedDict() put_scalar = lambda name, json_name=None: self.optional_attribute_scalar(result, gpx_model, name, json_name) put_list = lambda name, json_name=None: self.optional_attribute_list(result, gpx_model, name, json_name) put_scalar('creator') put_scalar('metadata') put_list('waypoints') put_list('routes') put_list('tracks') put_list('extensions') return result
Render a GPXModel as a single JSON structure.
def _get_cache_plus_key(self): key = getattr(self, '_cache_key', self.key_from_query()) return self._cache.cache, key
Return a cache region plus key.
def rexponweib(alpha, k, loc=0, scale=1, size=None): q = np.random.uniform(size=size) r = flib.exponweib_ppf(q, alpha, k) return loc + r * scale
Random exponentiated Weibull variates.
def export_to_dict(session, recursive, back_references, include_defaults): logging.info('Starting export') dbs = session.query(Database) databases = [database.export_to_dict(recursive=recursive, include_parent_ref=back_references, include_defaults=include_defaults) for database in dbs] logging.info('Exported %d %s', len(databases), DATABASES_KEY) cls = session.query(DruidCluster) clusters = [cluster.export_to_dict(recursive=recursive, include_parent_ref=back_references, include_defaults=include_defaults) for cluster in cls] logging.info('Exported %d %s', len(clusters), DRUID_CLUSTERS_KEY) data = dict() if databases: data[DATABASES_KEY] = databases if clusters: data[DRUID_CLUSTERS_KEY] = clusters return data
Exports databases and druid clusters to a dictionary
def someoneKnownSeen(self, home=None, camera=None): try: cam_id = self.cameraByName(camera=camera, home=home)['id'] except TypeError: logger.warning("personSeenByCamera: Camera name or home is unknown") return False if self.lastEvent[cam_id]['type'] == 'person': if self.lastEvent[cam_id]['person_id'] in self._knownPersons(): return True return False
Return True if someone known has been seen
def saveTopicPageDefinitionToFile(self, fname): open(fname, "w", encoding="utf-8").write(json.dumps(self.topicPage, indent = 4, sort_keys = True))
save the topic page definition to a file
def remove_child(self, child): assert isinstance(child, Term) self.children.remove(child) self.doc.remove_term(child)
Remove the term from this term's children.
def send_config_set(self, config_commands=None, exit_config_mode=True, **kwargs): return super(CiscoXrSSH, self).send_config_set( config_commands=config_commands, exit_config_mode=False, **kwargs )
IOS-XR requires you not exit from configuration mode.
def _CheckDatabaseEncoding(cursor): cur_character_set = _ReadVariable("character_set_database", cursor) if cur_character_set != CHARACTER_SET: raise EncodingEnforcementError( "Require MySQL character_set_database of {}, got {}." " To create your database, use: {}".format(CHARACTER_SET, cur_character_set, CREATE_DATABASE_QUERY))
Enforces a sane UTF-8 encoding for the database.
def _get_keys(self, read, input_records): for i in range(read.value): ir = input_records[i] if ir.EventType in EventTypes: ev = getattr(ir.Event, EventTypes[ir.EventType]) if type(ev) == KEY_EVENT_RECORD and ev.KeyDown: for key_press in self._event_to_key_presses(ev): yield key_press elif type(ev) == MOUSE_EVENT_RECORD: for key_press in self._handle_mouse(ev): yield key_press
Generator that yields `KeyPress` objects from the input records.
def _choose_float_dtype(dtype, has_offset): if dtype.itemsize <= 4 and np.issubdtype(dtype, np.floating): return np.float32 if dtype.itemsize <= 2 and np.issubdtype(dtype, np.integer): if not has_offset: return np.float32 return np.float64
Return a float dtype that can losslessly represent `dtype` values.
def generate_random_string(length=8): char_set = string.ascii_uppercase + string.digits return ''.join(random.sample(char_set * (length - 1), length))
Generate a random string
def phrase_replace(self, replace_dict): def r(tokens): text = ' ' + ' '.join(tokens) for k, v in replace_dict.items(): text = text.replace(" " + k + " ", " " + v + " ") return text.split() self.stems = list(map(r, self.stems))
Replace phrases with single token, mapping defined in replace_dict
def parse_udiff(diff, patterns=None, parent='.'): rv = {} path = nrows = None for line in diff.splitlines(): if nrows: if line[:1] != '-': nrows -= 1 continue if line[:3] == '@@ ': hunk_match = HUNK_REGEX.match(line) (row, nrows) = [int(g or '1') for g in hunk_match.groups()] rv[path].update(range(row, row + nrows)) elif line[:3] == '+++': path = line[4:].split('\t', 1)[0] if path[:2] == 'b/': path = path[2:] rv[path] = set() return dict([(os.path.join(parent, path), rows) for (path, rows) in rv.items() if rows and filename_match(path, patterns)])
Return a dictionary of matching lines.
def parse_string(self): word = '' if self.prior_delim: delim = self.prior_delim self.prior_delim = None else: delim = self.char word += self.char self.update_chars() while True: if self.char == delim: self.update_chars() if self.char == delim: word += 2 * delim self.update_chars() else: word += delim break elif self.char == '\n': self.prior_delim = delim break else: word += self.char self.update_chars() return word
Tokenize a Fortran string.
def exit_if_missing_graphviz(self): (out, err) = utils.capture_shell("which dot") if "dot" not in out: ui.error(c.MESSAGES["dot_missing"])
Detect the presence of the dot utility to make a png graph.
def hessian(self): result = np.zeros((self.numc, 3, self.numc, 3), float) for index1 in range(self.numc): for index2 in range(self.numc): result[index1, :, index2, :] = self.hessian_component(index1, index2) return result
Compute the hessian of the energy
def extract_gzip (archive, compression, cmd, verbosity, interactive, outdir): targetname = util.get_single_outfile(outdir, archive) try: with gzip.GzipFile(archive) as gzipfile: with open(targetname, 'wb') as targetfile: data = gzipfile.read(READ_SIZE_BYTES) while data: targetfile.write(data) data = gzipfile.read(READ_SIZE_BYTES) except Exception as err: msg = "error extracting %s to %s: %s" % (archive, targetname, err) raise util.PatoolError(msg) return None
Extract a GZIP archive with the gzip Python module.
def match_all(d_SMEFT, parameters=None): p = default_parameters.copy() if parameters is not None: p.update(parameters) C = wilson.util.smeftutil.wcxf2arrays_symmetrized(d_SMEFT) C['vT'] = 246.22 C_WET = match_all_array(C, p) C_WET = wilson.translate.wet.rotate_down(C_WET, p) C_WET = wetutil.unscale_dict_wet(C_WET) d_WET = wilson.util.smeftutil.arrays2wcxf(C_WET) basis = wcxf.Basis['WET', 'JMS'] keys = set(d_WET.keys()) & set(basis.all_wcs) d_WET = {k: d_WET[k] for k in keys} return d_WET
Match the SMEFT Warsaw basis onto the WET JMS basis.
def file_to_attachment(filename, filehandler=None): if filehandler: return {'_name': filename, 'content': base64.b64encode(filehandler.read()) } with open(filename, 'rb') as _file: return {'_name': filename, 'content': base64.b64encode(_file.read()) }
Convert a file to attachment
def print_statements(self): logger.info('--- Direct INDRA statements ----------') for i, stmt in enumerate(self.statements): logger.info("%s: %s" % (i, stmt)) logger.info('--- Indirect INDRA statements ----------') for i, stmt in enumerate(self.indirect_stmts): logger.info("%s: %s" % (i, stmt))
Print all extracted INDRA Statements.
def unhandled_exception(self, exception: Exception): log.critical( 'Unhandled exception when processing endpoint request', exc_info=True, node=pex(self.rest_api.raiden_api.address), ) self.greenlet.kill(exception) return api_error([str(exception)], HTTPStatus.INTERNAL_SERVER_ERROR)
Flask.errorhandler when an exception wasn't correctly handled
def countfiles(path, recurse=False): if not op.isdir(path): return 0 count = 0 for r,ds,fs in os.walk(path): count += len(fs) if not recurse: break return count
Returns the number of files under the given directory path.
def _str_replace(txt): txt = txt.replace(",", "") txt = txt.replace(" ", "_") txt = txt.replace(":", "") txt = txt.replace(".", "") txt = txt.replace("/", "") txt = txt.replace("", "") return txt
Makes a small text amenable to being used in a filename.
def _make_version(major, minor, micro, releaselevel, serial): assert releaselevel in ['alpha', 'beta', 'candidate', 'final'] version = "%d.%d" % (major, minor) if micro: version += ".%d" % (micro,) if releaselevel != 'final': short = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc'}[releaselevel] version += "%s%d" % (short, serial) return version
Create a readable version string from version_info tuple components.
def items2file(items, filename, encoding='utf-8', modifier='w'): with codecs.open(filename, modifier, encoding=encoding) as f: for item in items: f.write(u"{}\n".format(json.dumps( item, ensure_ascii=False, sort_keys=True)))
json array to file, canonical json format
def calc_core_bytes(self): if len(self.sample) == 1: sampdiff = np.array([self.sample[0]]) else: sampdiff = np.concatenate(([self.sample[0]], np.diff(self.sample))) compact_annotation = copy.deepcopy(self) compact_annotation.compact_fields() extra_write_fields = [] for field in ['num', 'subtype', 'chan', 'aux_note']: if not isblank(getattr(compact_annotation, field)): extra_write_fields.append(field) data_bytes = [] for i in range(len(sampdiff)): data_bytes.append(field2bytes('samptype', [sampdiff[i], self.symbol[i]])) for field in extra_write_fields: value = getattr(compact_annotation, field)[i] if value is not None: data_bytes.append(field2bytes(field, value)) data_bytes = np.array([item for sublist in data_bytes for item in sublist]).astype('u1') return data_bytes
Convert all used annotation fields into bytes to write
def _tokenize_chinese_chars(self, text): output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output)
Adds whitespace around any CJK character.
def _has_y(self, kwargs): return (('y' in kwargs) or (self._element_y in kwargs) or (self._type == 3 and self._element_1my in kwargs))
Returns True if y is explicitly defined in kwargs
def check_rotation(rotation): if rotation not in ALLOWED_ROTATION: allowed_rotation = ', '.join(ALLOWED_ROTATION) raise UnsupportedRotation('Rotation %s is not allwoed. Allowed are %s' % (rotation, allowed_rotation))
checks rotation parameter if illegal value raises exception
def prune_hashes(self, hashes, list_type): discarded = [] for hash in hashes: if (hash in self.hashes): self.hashes.discard(hash) discarded.append(hash) self.logger.info("Not calculating %s hash(es) on destination as not present " "in source %s list" % (', '.join(sorted(discarded)), list_type))
Prune any hashes not in source resource or change list.
def add_task(self, pid): _register_process_with_cgrulesengd(pid) for cgroup in self.paths: with open(os.path.join(cgroup, 'tasks'), 'w') as tasksFile: tasksFile.write(str(pid))
Add a process to the cgroups represented by this instance.
def answered_by(self, rec): return self.clazz == rec.clazz and \ (self.type == rec.type or self.type == _TYPE_ANY) and \ self.name == rec.name
Returns true if the question is answered by the record
def visit_BoolOp(self, node: ast.BoolOp) -> Any: values = [self.visit(value_node) for value_node in node.values] if isinstance(node.op, ast.And): result = functools.reduce(lambda left, right: left and right, values, True) elif isinstance(node.op, ast.Or): result = functools.reduce(lambda left, right: left or right, values, True) else: raise NotImplementedError("Unhandled op of {}: {}".format(node, node.op)) self.recomputed_values[node] = result return result
Recursively visit the operands and apply the operation on them.
def completer_tokenize(cls, value, min_length=3): tokens = list(itertools.chain(*[ [m for m in n.split("'") if len(m) > min_length] for n in value.split(' ') ])) return list(set([value] + tokens + [' '.join(tokens)]))
Quick and dirty tokenizer for completion suggester
def __generate_string(length): return ''.join( SystemRandom().choice(string.ascii_letters + string.digits) for x in range(length)).encode()
Generate a string for password creation.
def validate_headers(spec, data): validated_data = { spec.VERSION: data[spec.VERSION], spec.KIND: data[spec.KIND], } if data.get(spec.LOGGING): validated_data[spec.LOGGING] = LoggingConfig.from_dict( data[spec.LOGGING]) if data.get(spec.TAGS): validated_data[spec.TAGS] = data[spec.TAGS] if data.get(spec.HP_TUNING): validated_data[spec.HP_TUNING] = HPTuningConfig.from_dict( data[spec.HP_TUNING]) return validated_data
Validates headers data and creates the config objects
def capture_heroku_database(self): self.print_message("Capturing database backup for app '%s'" % self.args.source_app) args = [ "heroku", "pg:backups:capture", "--app=%s" % self.args.source_app, ] if self.args.use_pgbackups: args = [ "heroku", "pgbackups:capture", "--app=%s" % self.args.source_app, "--expire", ] subprocess.check_call(args)
Capture Heroku database backup.
def _get_bottom_line_color(self): color = self.cell_attributes[self.key]["bordercolor_bottom"] return tuple(c / 255.0 for c in color_pack2rgb(color))
Returns color rgb tuple of bottom line
def apply(self, matrix): view = matrix[ self.indices() ] return self.transform(view) if self.transform != None else view
Slices the supplied matrix and applies any transform bound to this window
def average(var, key, N): global average_data if not key in average_data: average_data[key] = [var]*N return var average_data[key].pop(0) average_data[key].append(var) return sum(average_data[key])/N
average over N points
def _load_root_directory(self): kwargs = self._req_directory(0) self._root_directory = Directory(api=self, **kwargs)
Load root directory, which has a cid of 0
def removeRnaQuantificationSet(self): self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) rnaQuantSet = dataset.getRnaQuantificationSetByName( self._args.rnaQuantificationSetName) def func(): self._updateRepo(self._repo.removeRnaQuantificationSet, rnaQuantSet) self._confirmDelete( "RnaQuantificationSet", rnaQuantSet.getLocalId(), func)
Removes an rnaQuantificationSet from this repo
def image_data(verbose=False): global _IMAGE_DATA if _IMAGE_DATA is None: if verbose: logger.info("--- Downloading image.") with contextlib.closing(urllib.request.urlopen(IMAGE_URL)) as infile: _IMAGE_DATA = infile.read() return _IMAGE_DATA
Get the raw encoded image data, downloading it if necessary.
def StoreStat(self, responses): index = responses.request_data["index"] if not responses.success: self.Log("Failed to stat file: %s", responses.status) self._FileFetchFailed(index, responses.request_data["request_name"]) return tracker = self.state.pending_hashes[index] tracker["stat_entry"] = responses.First()
Stores stat entry in the flow's state.
def _get_sqs_conn(profile, region=None, key=None, keyid=None): if profile: if isinstance(profile, six.string_types): _profile = __opts__[profile] elif isinstance(profile, dict): _profile = profile key = _profile.get('key', None) keyid = _profile.get('keyid', None) region = _profile.get('region', None) if not region: region = __opts__.get('sqs.region', 'us-east-1') if not key: key = __opts__.get('sqs.key', None) if not keyid: keyid = __opts__.get('sqs.keyid', None) try: conn = boto.sqs.connect_to_region(region, aws_access_key_id=keyid, aws_secret_access_key=key) except boto.exception.NoAuthHandlerFound: log.error('No authentication credentials found when attempting to' ' make sqs_event engine connection to AWS.') return None return conn
Get a boto connection to SQS.
def end_logging(filename=None): if logutil.global_logging_started: if filename: print('Trailer file written to: ', filename) else: print('No trailer file saved...') logutil.teardown_global_logging() else: print('No trailer file saved...')
Close log file and restore system defaults.
def append(self, text): cursor = QTextCursor(self._doc) cursor.movePosition(QTextCursor.End) cursor.insertBlock() cursor.insertText(text)
Append line to the end
def run(self): states = open(self.states, 'r').read().splitlines() for state in states: url = self.build_url(state) log = "Downloading State < {0} > from < {1} >" logging.info(log.format(state, url)) tmp = self.download(self.output, url, self.overwrite) self.s3.store(self.extract(tmp, self.tmp2poi(tmp)))
For each state in states file build url and download file
def _remove_single_line_import_comments(r): logging.info('Removing single line import comments') import_r, remaining_r = split_by_last_import(r) new_import_r = redbaron.NodeList() for i, v in enumerate(import_r): if 1 < i < len(import_r) - 2: if not ( import_r[i - 2].type != 'comment' and v.type == 'comment' and import_r[i + 2].type != 'comment' ) or _is_keep_comment(v): new_import_r.append(v) else: new_import_r.append(v) return new_import_r + remaining_r
We previously used more groups for the import statements and named each group.
def field_adaptors(self): with exception_logging(logger, 'Exception in `field_adaptors` property'): conjunction_globs = self.get_sources() if conjunction_globs is None: return tuple() sources = conjunction_globs.non_path_globs conjunction = conjunction_globs.conjunction if not sources: return tuple() base_globs = BaseGlobs.from_sources_field(sources, self.address.spec_path) path_globs = base_globs.to_path_globs(self.address.spec_path, conjunction) return (SourcesField( self.address, 'sources', base_globs.filespecs, base_globs, path_globs, self.validate_sources, ),)
Returns a tuple of Fields for captured fields which need additional treatment.
def pubmed_url(args=sys.argv[1:], resolve_doi=True, out=sys.stdout): parser = argparse.ArgumentParser( description='Get a publication URL using a PubMed ID or PubMed URL') parser.add_argument('query', help='PubMed ID or PubMed URL') parser.add_argument( '-d', '--doi', action='store_false', help='get DOI URL') parser.add_argument( '-e', '--email', action='store', help='set user email', default='') args = parser.parse_args(args=args) lookup = PubMedLookup(args.query, args.email) publication = Publication(lookup, resolve_doi=args.doi) out.write(publication.url + '\n')
Get a publication URL via the command line using a PubMed ID or PubMed URL