code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def run(self): self.utils = salt.loader.utils(self.opts, proxy=self.proxy) if salt.utils.platform.is_windows(): if self.opts['__role'] == 'master': self.runners = salt.loader.runner(self.opts, utils=self.utils) else: self.runners = [] self.funcs = salt.loader.minion_mods(self.opts, utils=self.utils, proxy=self.proxy) self.engine = salt.loader.engines(self.opts, self.funcs, self.runners, self.utils, proxy=self.proxy) kwargs = self.config or {} try: self.engine[self.fun](**kwargs) except Exception as exc: log.critical( 'Engine \'%s\' could not be started!', self.fun.split('.')[0], exc_info=True )
Run the master service!
def downvote(self): data = self.get_selected_item() if 'likes' not in data: self.term.flash() elif getattr(data['object'], 'archived'): self.term.show_notification("Voting disabled for archived post", style='Error') elif data['likes'] or data['likes'] is None: with self.term.loader('Voting'): data['object'].downvote() if not self.term.loader.exception: data['likes'] = False else: with self.term.loader('Clearing vote'): data['object'].clear_vote() if not self.term.loader.exception: data['likes'] = None
Downvote the currently selected item.
def highlight_block(self, text): text = to_text_string(text) if text.startswith(("c", "C")): self.setFormat(0, len(text), self.formats["comment"]) self.highlight_spaces(text) else: FortranSH.highlight_block(self, text) self.setFormat(0, 5, self.formats["comment"]) self.setFormat(73, max([73, len(text)]), self.formats["comment"])
Implement highlight specific for Fortran77.
def font_size_to_pixels(size): if size is None or not isinstance(size, basestring): return conversions = {'em': 16, 'pt': 16/12.} val = re.findall('\d+', size) unit = re.findall('[a-z]+', size) if (val and not unit) or (val and unit[0] == 'px'): return int(val[0]) elif val and unit[0] in conversions: return (int(int(val[0]) * conversions[unit[0]]))
Convert a fontsize to a pixel value
def __init(self): res = self._get(url=self._url, param_dict={"f": "json"}, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) self._json_dict = res self._json_string = json.dumps(self._json_dict) for k,v in self._json_dict.items(): setattr(self, k, v)
loads the json values
def diag(A, k=0): if isinstance(A, Poly): core, core_new = A.A, {} for key in A.keys: core_new[key] = numpy.diag(core[key], k) return Poly(core_new, A.dim, None, A.dtype) return numpy.diag(A, k)
Extract or construct a diagonal polynomial array.
def _get_controller_agent(self, arg): controller_agent = None controller = arg.get('arg') if controller is not None: controller_agent, coords = self._get_agent_from_entity(controller) elif arg['argument-type'] == 'complex': controllers = list(arg.get('args').values()) controller_agent, coords = \ self._get_agent_from_entity(controllers[0]) bound_agents = [self._get_agent_from_entity(c)[0] for c in controllers[1:]] bound_conditions = [BoundCondition(ba, True) for ba in bound_agents] controller_agent.bound_conditions = bound_conditions return controller_agent, coords
Return a single or a complex controller agent.
def require_session(handler): @functools.wraps(handler) async def decorated(request: web.Request) -> web.Response: request_session_token = request.match_info['session'] session = session_from_request(request) if not session or request_session_token != session.token: LOG.warning(f"request for invalid session {request_session_token}") return web.json_response( data={'error': 'bad-token', 'message': f'No such session {request_session_token}'}, status=404) return await handler(request, session) return decorated
Decorator to ensure a session is properly in the request
def download_file(image_name, output_path, width=DEFAULT_WIDTH): image_name = clean_up_filename(image_name) logging.info("Downloading %s with width %s", image_name, width) try: contents, output_file_name = get_thumbnail_of_file(image_name, width) except RequestedWidthBiggerThanSourceException: logging.warning("Requested width is bigger than source - downloading full size") contents, output_file_name = get_full_size_file(image_name) output_file_path = os.path.join(output_path, output_file_name) try: with open(output_file_path, 'wb') as f: logging.debug("Writing as %s", output_file_path) f.write(contents) return output_file_path except IOError, e: msg = 'Could not write file %s on disk to %s: %s' % \ (image_name, output_path, e.message) logging.error(msg) raise CouldNotWriteFileOnDiskException(msg) except Exception, e: logging.critical(e.message) msg = 'An unexpected error occured when downloading %s to %s: %s' % \ (image_name, output_path, e.message) raise DownloadException(msg)
Download a given Wikimedia Commons file.
def ask_yes_no(*question: Token, default: bool = False) -> bool: while True: tokens = [green, "::", reset] + list(question) + [reset] if default: tokens.append("(Y/n)") else: tokens.append("(y/N)") info(*tokens) answer = read_input() if answer.lower() in ["y", "yes"]: return True if answer.lower() in ["n", "no"]: return False if not answer: return default warning("Please answer by 'y' (yes) or 'n' (no) ")
Ask the user to answer by yes or no
def validate_on_submit(self): valid = FlaskWtf.validate_on_submit(self) if not self._schema or not self.is_submitted(): return valid data = dict() for field in self._fields: data[field] = self._fields[field].data result = self.schema.process(data, context=self._force_context) self.set_errors(result) for field in data: self._fields[field].data = data[field] return valid and not bool(self.errors)
Extend validate on submit to allow validation with schema
def convert_field(self, value, conversion): func = self.CONV_FUNCS.get(conversion) if func is not None: value = getattr(value, func)() elif conversion not in ['R']: return super(StringFormatter, self).convert_field(value, conversion) if conversion in ['h', 'H', 'R']: value = value.replace('-', '').replace('_', '').replace(':', '').replace(' ', '') return value
Apply conversions mentioned above.
def to_datetime(dt, tzinfo=None, format=None): if not dt: return dt tz = pick_timezone(tzinfo, __timezone__) if isinstance(dt, (str, unicode)): if not format: formats = DEFAULT_DATETIME_INPUT_FORMATS else: formats = list(format) d = None for fmt in formats: try: d = datetime.strptime(dt, fmt) except ValueError: continue if not d: return None d = d.replace(tzinfo=tz) else: d = datetime(getattr(dt, 'year', 1970), getattr(dt, 'month', 1), getattr(dt, 'day', 1), getattr(dt, 'hour', 0), getattr(dt, 'minute', 0), getattr(dt, 'second', 0), getattr(dt, 'microsecond', 0)) if not getattr(dt, 'tzinfo', None): d = d.replace(tzinfo=tz) else: d = d.replace(tzinfo=dt.tzinfo) return to_timezone(d, tzinfo)
Convert a date or time to datetime with tzinfo
def attempt_reauthorization(blink): _LOGGER.info("Auth token expired, attempting reauthorization.") headers = blink.get_auth_token(is_retry=True) return headers
Attempt to refresh auth token and links.
def load_global_catalog(): cat_dir = global_data_dir() if not os.path.isdir(cat_dir): return Catalog() else: return YAMLFilesCatalog(cat_dir)
Return a catalog for the environment-specific Intake directory
def use_plenary_hierarchy_view(self): self._hierarchy_view = PLENARY for session in self._get_provider_sessions(): try: session.use_plenary_hierarchy_view() except AttributeError: pass
Pass through to provider HierarchyLookupSession.use_plenary_hierarchy_view
def _decode_received(self, msg): if not isinstance(msg, six.binary_type): return msg type = six.byte2int(msg[0:1]) if type >= 48: return msg.decode('utf-8') return msg
Returns either bytes or str, depending on message type.
def css(self, css): return [self.get_node_factory().create(node_id) for node_id in self._get_css_ids(css).split(",") if node_id]
Finds another node by a CSS selector relative to the current node.
def _varname_inj(self): if not self.n: return m = self.system.dae.m xy_idx = range(m, self.n + m) self.system.varname.append( listname='unamey', xy_idx=xy_idx, var_name='P', element_name=self.name) self.system.varname.append( listname='fnamey', xy_idx=xy_idx, var_name='P', element_name=self.name) xy_idx = range(m + self.n, m + 2 * self.n) self.system.varname.append( listname='unamey', xy_idx=xy_idx, var_name='Q', element_name=self.name) self.system.varname.append( listname='fnamey', xy_idx=xy_idx, var_name='Q', element_name=self.name)
Customize varname for bus injections
def put(path, obj): try: import cPickle as pickle except: import pickle with open(path, 'wb') as file: return pickle.dump(obj, file)
Write an object to file
def build_kal_scan_channel_string(kal_bin, channel, args): option_mapping = {"gain": "-g", "device": "-d", "error": "-e"} base_string = "%s -v -c %s" % (kal_bin, channel) base_string += options_string_builder(option_mapping, args) return(base_string)
Return string for CLI invocation of kal, for channel scan.
def unapostrophe(text): text = re.sub(r'[%s]s?$' % ''.join(APOSTROPHES), '', text) return text
Strip apostrophe and 's' from the end of a string.
def nonpresent_module_filename(): while True: module_name = get_random_name() loader = pkgutil.find_loader(module_name) if loader is not None: continue importlib.invalidate_caches() return "{}.py".format(module_name)
Return module name that doesn't already exist
def append (self, cmd, delay=0.000, attrs=None): self.lines.append( SeqCmd(cmd, delay, attrs) )
Adds a new command with a relative time delay to this sequence.
def fw_retry_failures_create(self): for tenant_id in self.fwid_attr: try: with self.fwid_attr[tenant_id].mutex_lock: if self.fwid_attr[tenant_id].is_fw_drvr_create_needed(): fw_dict = self.fwid_attr[tenant_id].get_fw_dict() if fw_dict: fw_obj, fw_data = self.get_fw(fw_dict.get('fw_id')) self.retry_failure_fab_dev_create(tenant_id, fw_data, fw_dict) else: LOG.error("FW data not found for tenant %s", tenant_id) except Exception as exc: LOG.error("Exception in retry failure create %s", str(exc))
This module is called for retrying the create cases.
def add(self, source_id, profile_data, training_metadata=[], profile_reference=None, timestamp_reception=None): data = { "source_id": _validate_source_id(source_id), "profile_json": _validate_dict(profile_data, "profile_data"), "training_metadata": _validate_training_metadata(training_metadata), "profile_reference": profile_reference } if timestamp_reception is not None: data['timestamp_reception'] = _validate_timestamp(timestamp_reception, 'timestamp_reception') response = self.client.post("profile/json", data=data) return response.json()
Use the api to add a new profile using profile_data.
def opened(filename, mode): "Open filename, or do nothing if filename is already an open file object" if isinstance(filename, str): file = open(filename, mode) try: yield file finally: if not file.closed: file.close() else: yield filename
Open filename, or do nothing if filename is already an open file object
def to_dict(self): "returns self as a dictionary with _underscore subdicts corrected." ndict = {} for key, val in self.__dict__.items(): if key[0] == "_": ndict[key[1:]] = val else: ndict[key] = val return ndict
returns self as a dictionary with _underscore subdicts corrected.
def expand_includes(text, path='.'): def read_and_expand(match): filename = match.group('filename') filename = join(path, filename) text = read(filename) return expand_includes( text, path=join(path, dirname(filename))) return re.sub(r'^\.\. include:: (?P<filename>.*)$', read_and_expand, text, flags=re.MULTILINE)
Recursively expands includes in given text.
def insert_after(self, key, new_item, instance=0): self._insert_item(key, new_item, instance, True)
Insert an item after a key
def add(self, scene): if not isinstance(scene, Scene): raise TypeError() for i, j in enumerate(self.__scenes): if j.scene_id == scene.scene_id: self.__scenes[i] = scene return self.__scenes.append(scene)
Add scene, replace existing scene if scene with scene_id is present.
def sync_one(self, aws_syncr, amazon, function): function_info = amazon.lambdas.function_info(function.name, function.location) if not function_info: amazon.lambdas.create_function(function.name, function.description, function.location, function.runtime, function.role, function.handler, function.timeout, function.memory_size, function.code) else: amazon.lambdas.modify_function(function_info, function.name, function.description, function.location, function.runtime, function.role, function.handler, function.timeout, function.memory_size, function.code)
Make sure this function exists and has only attributes we want it to have
def _int64_feature(value): if not isinstance(value, list): value = [value] return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
Wrapper for inserting int64 features into Example proto.
def show_quota(self, project_id, **_params): return self.get(self.quota_path % (project_id), params=_params)
Fetch information of a certain project's quotas.
def _get_property(name): ret = property( lambda self: getattr(self.loop, name)) if six.PY3: try: ret.__doc__ = getattr(TrainLoop, name).__doc__ except AttributeError: pass return ret
Delegate property to self.loop
def _BinsToQuery(self, bins, column_name): result = [] for prev_b, next_b in zip([0] + bins[:-1], bins[:-1] + [None]): query = "COUNT(CASE WHEN %s >= %f" % (column_name, prev_b) if next_b is not None: query += " AND %s < %f" % (column_name, next_b) query += " THEN 1 END)" result.append(query) return ", ".join(result)
Builds an SQL query part to fetch counts corresponding to given bins.
def get(self, value, default=None): path = value if isinstance(value, Path) else Path(str(value)) subtree = self for part in path.parts: try: subtree = subtree[part] except KeyError: return default return subtree
Return a subtree if exists.
def emit(self): self.count += 1 event_name = self.context.subcategory if hasattr(self.handler, event_name): getattr(self.handler, event_name)(self.context) elif hasattr(self.handler, 'default'): self.handler.default(self.context)
We are finished processing one element. Emit it
def read(self, num_bytes=None): res = self.get_next(num_bytes) self.skip(len(res)) return res
Read and return the specified bytes from the buffer.
def _build_implicit_prefetches( self, model, prefetches, requirements ): for source, remainder in six.iteritems(requirements): if not remainder or isinstance(remainder, six.string_types): continue related_field = get_model_field(model, source) related_model = get_related_model(related_field) queryset = self._build_implicit_queryset( related_model, remainder ) if related_model else None prefetches[source] = self._create_prefetch( source, queryset ) return prefetches
Build a prefetch dictionary based on internal requirements.
def bytes_to_file(input_data, output_file): pathlib.Path(output_file.parent).mkdir(parents=True, exist_ok=True) with open(output_file, "wb") as file: file.write(input_data)
Save bytes to a file.
def _handle_progress(self, total, progress_callback): current = 0 while True: current += yield try: progress_callback(current, total) except Exception: _LOG.exception('Progress callback raised an exception. %s', progress_callback) continue
Calls the callback with the current progress and total .
def _join_chars(chars, length): mult = int(length / len(chars)) + 1 mult_chars = chars * mult return "".join(random.sample(mult_chars, length))
Used by the random character functions.
def update(self, data): if data is None: for device in self.devices: device.clear_info() else: for device, device_info in zip(self.devices, data): device.device_info = device_info self.connection.log("Device information updated -> [{}]".format(device))
Update the chain object with the predefined data.
def _push_textbuffer(self): if self._textbuffer: self._stack.append(tokens.Text(text="".join(self._textbuffer))) self._textbuffer = []
Push the textbuffer onto the stack as a Text node and clear it.
def logBranch(self, indent=0, level=logging.DEBUG): if 0: print(indent * " " + str(self)) else: logger.log(level, indent * " " + str(self)) for childItems in self.childItems: childItems.logBranch(indent + 1, level=level)
Logs the item and all descendants, one line per child
def make_library(**kwargs): library_yaml = kwargs.pop('library', 'models/library.yaml') comp_yaml = kwargs.pop('comp', 'config/binning.yaml') basedir = kwargs.pop('basedir', os.path.abspath('.')) model_man = kwargs.get('ModelManager', ModelManager(basedir=basedir)) model_comp_dict = model_man.make_library(library_yaml, library_yaml, comp_yaml) return dict(model_comp_dict=model_comp_dict, ModelManager=model_man)
Build and return a ModelManager object and fill the associated model library
def reset(self): self._attempts = 0 self._cur_delay = self.delay self._cur_stoptime = None
Reset the attempt counter
def delete(self, wg_uuid, uuid): url = "%(base)s/%(wg_uuid)s/members/%(uuid)s" % { 'base': self.local_base_url, 'wg_uuid': wg_uuid, 'uuid': uuid } return self.core.delete(url)
Delete one thread member.
def __wrap_accepted_val(self, value): if isinstance(value, tuple): value = list(value) elif not isinstance(value, list): value = [value] return value
Wrap accepted value in the list if yet not wrapped.
def _reconnect(self): log.debug("Reconnecting to JLigier...") self._disconnect() self._connect() self._update_subscriptions()
Reconnect to JLigier and subscribe to the tags.
async def init(): global redis_conn conn = await aioredis.create_connection( 'redis://{}:{}'.format( SETTINGS.get('FLOW_EXECUTOR', {}).get('REDIS_CONNECTION', {}).get('host', 'localhost'), SETTINGS.get('FLOW_EXECUTOR', {}).get('REDIS_CONNECTION', {}).get('port', 56379) ), db=int(SETTINGS.get('FLOW_EXECUTOR', {}).get('REDIS_CONNECTION', {}).get('db', 1)) ) redis_conn = aioredis.Redis(conn)
Create a connection to the Redis server.
def write_json(data, filename, gzip_mode=False): open_file = open if gzip_mode: open_file = gzip.open try: with open_file(filename, 'wt') as fh: json.dump(obj=data, fp=fh, sort_keys=True) except AttributeError: fh = open_file(filename, 'wt') json.dump(obj=data, fp=fh, sort_keys=True) fh.close()
Write the python data structure as a json-Object to filename.
def cub200_iterator(data_path, batch_k, batch_size, data_shape): return (CUB200Iter(data_path, batch_k, batch_size, data_shape, is_train=True), CUB200Iter(data_path, batch_k, batch_size, data_shape, is_train=False))
Return training and testing iterator for the CUB200-2011 dataset.
def _search(mapping, filename): result = mapping.get(filename) if result is not None: return result name, ext = os.path.splitext(filename) result = mapping.get(ext) if result is not None: for pattern, result2 in result: if fnmatch(filename, pattern): return result2 return None
Search a Loader data structure for a filename.
def jacobian_singular(self): cses, (jac_in_cses,) = self.be.cse(self.get_jac()) if jac_in_cses.nullspace(): return True else: return False
Returns True if Jacobian is singular, else False.
def time_restarts(data_path): path = os.path.join(data_path, 'last_restarted') if not os.path.isfile(path): with open(path, 'a'): os.utime(path, None) last_modified = os.stat(path).st_mtime with open(path, 'a'): os.utime(path, None) now = os.stat(path).st_mtime dif = round(now - last_modified, 2) last_restart = datetime.fromtimestamp(now).strftime('%H:%M:%S') result = 'LAST RESTART WAS {} SECONDS AGO at {}'.format(dif, last_restart) print(style(fg='green', bg='red', text=result))
When called will create a file and measure its mtime on restarts
def read(self, size=None): if size is None: return self.buf.read() + self.open_file.read() contents = self.buf.read(size) if len(contents) < size: contents += self.open_file.read(size - len(contents)) return contents
Read `size` of bytes.
def isdicom(fn): fn = str(fn) if fn.endswith('.dcm'): return True with open(fn,'rb') as fh: fh.seek(0x80) return fh.read(4)==b'DICM'
True if the fn points to a DICOM image
def _cover2exprs(inputs, noutputs, cover): fs = list() for i in range(noutputs): terms = list() for invec, outvec in cover: if outvec[i]: term = list() for j, v in enumerate(inputs): if invec[j] == 1: term.append(~v) elif invec[j] == 2: term.append(v) terms.append(term) fs.append(Or(*[And(*term) for term in terms])) return tuple(fs)
Convert a cover to a tuple of Expression instances.
def open(self): self.hwman = HardwareManager(port=self._port) self.opened = True if self._connection_string is not None: try: self.hwman.connect_direct(self._connection_string) except HardwareError: self.hwman.close() raise elif self._connect_id is not None: try: self.hwman.connect(self._connect_id) except HardwareError: self.hwman.close() raise
Open and potentially connect to a device.
def search_associations_go( subject_category=None, object_category=None, relation=None, subject=None, **kwargs): go_golr_url = "http://golr.geneontology.org/solr/" go_solr = pysolr.Solr(go_golr_url, timeout=5) go_solr.get_session().headers['User-Agent'] = get_user_agent(caller_name=__name__) return search_associations(subject_category, object_category, relation, subject, solr=go_solr, field_mapping=goassoc_fieldmap(), **kwargs)
Perform association search using Monarch golr
def _assign_zones(self): for zone_id in range(1, 5): zone = \ RainCloudyFaucetZone( parent=self._parent, controller=self._controller, faucet=self, zone_id=zone_id) if zone not in self.zones: self.zones.append(zone)
Assign all RainCloudyFaucetZone managed by faucet.
def check_no_overlapping_paths(paths): for path in paths: list_copy_without_path = list(paths) list_copy_without_path.remove(path) if path in list_copy_without_path: raise ValueError('{} appeared more than once. All paths must be unique.'.format(path)) for p in list_copy_without_path: if path in p: raise ValueError('{} and {} have the same prefix. All paths must be unique and cannot overlap.'.format(path, p))
Given a list of paths, ensure that all are unique and do not have the same prefix.
def _context_names(): import inspect from renku.models import provenance from renku.models._jsonld import JSONLDMixin for name in dir(provenance): cls = getattr(provenance, name) if inspect.isclass(cls) and issubclass(cls, JSONLDMixin): yield name
Return list of valid context names.
def replace_placeholders(path: Path, properties: Dict[str, str]): with open(path, encoding='utf8') as file: file_content = Template(file.read()) with open(path, 'w', encoding='utf8') as file: file.write(file_content.safe_substitute(properties))
Replace placeholders in a file with the values from the mapping.
async def _make_url(self, url: Text, request: 'Request') -> Text: if self.sign_webview: return await request.sign_url(url) return url
Signs the URL if needed
def append_args(self, arg): debug.log("Adding Arguments: %s"%(arg)) if isinstance(arg, (int,float)): self.args.append(str(arg)) if isinstance(arg, str): self.args.append(arg) if isinstance(arg, list): if sys.version_info < (3, 0): self.args.extend([str(x) if not isinstance(x, (unicode)) else x.encode('utf-8') for x in arg]) else: self.args.extend([str(x) for x in arg])
This function appends the provided arguments to the program object.
def parse_author(self, value): tokens = tuple([t.upper().strip() for t in value.split(',')]) if len(tokens) == 1: tokens = value.split(' ') if len(tokens) > 0: if len(tokens) > 1: aulast, auinit = tokens[0:2] else: aulast = tokens[0] auinit = '' else: aulast, auinit = tokens[0], '' aulast = _strip_punctuation(aulast).upper() auinit = _strip_punctuation(auinit).upper() return aulast, auinit
Attempts to split an author name into last and first parts.
def delete_suspect(self, suspect_id): suspect_obj = self.suspect(suspect_id) logger.debug("Deleting suspect {0}".format(suspect_obj.name)) self.session.delete(suspect_obj) self.save()
De-link a suspect from a case.
def distinct(self): d = collections.defaultdict(set) for i in range(self.shape[1]): k = hash(self.values[:, i].tobytes()) d[k].add(i) return sorted(d.values(), key=len, reverse=True)
Return sets of indices for each distinct haplotype.
def salt_run(): import salt.cli.run if '' in sys.path: sys.path.remove('') client = salt.cli.run.SaltRun() _install_signal_handlers(client) client.run()
Execute a salt convenience routine.
def match_regexp(self, value, q, strict=False): value = stringify(value) mr = re.compile(q) if value is not None: if mr.match(value): return self.shout('%r not matching the regexp %r', strict, value, q)
if value matches a regexp q
def find_sink_variables(self): is_sink = {name: True for name in self.variables.keys()} for operator in self.operators.values(): for variable in operator.inputs: is_sink[variable.onnx_name] = False return [variable for name, variable in self.variables.items() if is_sink[name]]
Find sink variables in this scope
def connectionLost(self, reason): AMP.connectionLost(self, reason) if self.logout is not None: self.logout() self.boxReceiver = self.logout = None
If a login has happened, perform a logout.
def all_columns(self): columns = set() for values in self._parts: for value in values._parts: columns.add(value.column_name) return sorted(columns)
Return list of all columns.
def aws_to_unix_id(aws_key_id): uid_bytes = hashlib.sha256(aws_key_id.encode()).digest()[-2:] if USING_PYTHON2: return 2000 + int(from_bytes(uid_bytes) // 2) else: return 2000 + (int.from_bytes(uid_bytes, byteorder=sys.byteorder) // 2)
Converts a AWS Key ID into a UID
def _breakRemNewlines(tag): for i,c in enumerate(tag.contents): if type(c) != bs4.element.NavigableString: continue c.replace_with(re.sub(r' {2,}', ' ', c).replace('\n',''))
non-recursively break spaces and remove newlines in the tag
def start_group(self, scol, typ): return Group(parent=self, level=scol, typ=typ)
Start a new group
def atlasdb_cache_zonefile_info( con=None, path=None ): global ZONEFILE_INV, NUM_ZONEFILES, ZONEFILE_INV_LOCK inv = None with ZONEFILE_INV_LOCK: inv_len = atlasdb_zonefile_inv_length( con=con, path=path ) inv = atlas_make_zonefile_inventory( 0, inv_len, con=con, path=path ) ZONEFILE_INV = inv NUM_ZONEFILES = inv_len return inv
Load up and cache our zonefile inventory from the database
def new_driver(browser_name, *args, **kwargs): if browser_name == FIREFOX: return webdriver.Firefox(*args, **kwargs) elif browser_name == PHANTOMJS: executable_path = os.path.join(os.path.dirname(__file__), 'phantomjs/executable/phantomjs_64bit') driver = webdriver.PhantomJS(executable_path=executable_path, **kwargs) driver.set_window_size(1280, 800) return driver else: driver = webdriver.Remote(*args, **kwargs) return driver
Instantiates a new WebDriver instance, determining class by environment variables
def combine_data(self, command2): if command2 is None: return self._data = self._merge(command2._data, self._data)
Combines the data for this command with another.
def _create_and_add_parameters(params): global _current_parameter if _is_simple_type(params): _current_parameter = SimpleParameter(params) _current_option.add_parameter(_current_parameter) else: for i in params: if _is_simple_type(i): _current_parameter = SimpleParameter(i) else: _current_parameter = TypedParameter() _parse_typed_parameter(i) _current_option.add_parameter(_current_parameter)
Parses the configuration and creates Parameter instances.
def infos_on_basis_set(self): o = [] o.append("=========================================") o.append("Reading basis set:") o.append("") o.append(" Basis set for {} atom ".format(str(self.filename))) o.append(" Maximum angular momentum = {}".format(self.data['lmax'])) o.append(" Number of atomics orbitals = {}".format(self.data['n_nlo'])) o.append(" Number of nlm orbitals = {}".format(self.data['n_nlmo'])) o.append("=========================================") return str(0)
infos on the basis set as in Fiesta log
def authenticate(self, provider): callback_url = url_for(".callback", provider=provider, _external=True) provider = self.get_provider(provider) session['next'] = request.args.get('next') or '' return provider.authorize(callback_url)
Starts OAuth authorization flow, will redirect to 3rd party site.
def setup_groups(portal): logger.info("*** Setup Roles and Groups ***") portal_groups = api.get_tool("portal_groups") for gdata in GROUPS: group_id = gdata["id"] if group_id not in portal_groups.listGroupIds(): logger.info("+++ Adding group {title} ({id})".format(**gdata)) portal_groups.addGroup(group_id, title=gdata["title"], roles=gdata["roles"]) else: ploneapi.group.grant_roles( groupname=gdata["id"], roles=gdata["roles"],) logger.info("+++ Granted group {title} ({id}) the roles {roles}" .format(**gdata))
Setup roles and groups for BECHEM
def delete_custom_field(self, custom_field_key): custom_field_key = quote(custom_field_key, '') response = self._delete("/lists/%s/customfields/%s.json" % (self.list_id, custom_field_key))
Deletes a custom field associated with this list.
def descriptions(self): return {key: val[2] for key, val in six.iteritems(self.defaultParams) if len(val) >= 3}
The description of each keyword in the rcParams dictionary
def _refresh_authentication_token(self): if self.retry == self._MAX_RETRIES: raise GeocoderAuthenticationFailure( 'Too many retries for auth: %s' % self.retry ) token_request_arguments = { 'username': self.username, 'password': self.password, 'referer': self.referer, 'expiration': self.token_lifetime, 'f': 'json' } url = "?".join((self.auth_api, urlencode(token_request_arguments))) logger.debug( "%s._refresh_authentication_token: %s", self.__class__.__name__, url ) self.token_expiry = int(time()) + self.token_lifetime response = self._base_call_geocoder(url) if 'token' not in response: raise GeocoderAuthenticationFailure( 'Missing token in auth request.' 'Request URL: %s; response JSON: %s' % (url, json.dumps(response)) ) self.retry = 0 self.token = response['token']
POST to ArcGIS requesting a new token.
def _change_sel_color(self, event): (r, g, b), (h, s, v), color = self.square.get() self.red.set(r) self.green.set(g) self.blue.set(b) self.saturation.set(s) self.value.set(v) self.hexa.delete(0, "end") self.hexa.insert(0, color.upper()) if self.alpha_channel: self.alphabar.set_color((r, g, b)) self.hexa.insert('end', ("%2.2x" % self.alpha.get()).upper()) self._update_preview()
Respond to motion of the color selection cross.
def _tc_below(self): tr_below = self._tr_below if tr_below is None: return None return tr_below.tc_at_grid_col(self._grid_col)
The tc element immediately below this one in its grid column.
def _get_vs30star(self, vs30, imt): if imt.name == "SA": t = imt.period if t <= 0.50: v1 = 1500.0 elif t < 3.0: v1 = np.exp(-0.35 * np.log(t / 0.5) + np.log(1500.)) else: v1 = 800.0 elif imt.name == "PGA": v1 = 1500.0 else: v1 = 1500.0 vs30_star = np.ones_like(vs30) * vs30 vs30_star[vs30 >= v1] = v1 return vs30_star
This computes equations 8 and 9 at page 1034
def on_trial_remove(self, trial_runner, trial): if trial.status is Trial.PAUSED and trial in self._results: self._completed_trials.add(trial)
Marks trial as completed if it is paused and has previously ran.
def reload(self): result = yield from self._control_vm("reset") log.info("VirtualBox VM '{name}' [{id}] reloaded".format(name=self.name, id=self.id)) log.debug("Reload result: {}".format(result))
Reloads this VirtualBox VM.
def check_order(self, order): own_order = self.order for item in order: if item not in own_order: raise ValueError(f'Order item {item} not found.') return order
order must be a subset of self.order
def isInRoom(self, _id): if SockJSRoomHandler._room.has_key(self._gcls() + _id): if self in SockJSRoomHandler._room[self._gcls() + _id]: return True return False
Check a given user is in given room
def _get_raw_data(self, is_valid_key, data_key): result = None if self._read_imu(): data = self._imu.getIMUData() if data[is_valid_key]: raw = data[data_key] result = { 'x': raw[0], 'y': raw[1], 'z': raw[2] } return result
Internal. Returns the specified raw data from the IMU when valid
def n_faces(self): if self._faces is not None: return self._faces.shape[0] elif self._vertices_indexed_by_faces is not None: return self._vertices_indexed_by_faces.shape[0]
The number of faces in the mesh
def install(self, to, chmod=0644): self.copy(to) path(to).chmod(chmod)
Copy data and set mode to 'chmod'.
def add_command(self, command): try: self._history.remove(command) except ValueError: pass self._history.insert(0, command) self._index = -1
Adds a command to the history and reset history index.