code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def _execute_workflow(cls, process_args): workflow, input_args, log_path = process_args if log_path: logger = logging.getLogger() logger.setLevel(logging.DEBUG) handler = cls._get_log_handler(log_path) logger.addHandler(handler) stats = {'start_time': dt.datetime.now()} try: _ = workflow.execute(input_args, monitor=True) except BaseException: stats['error'] = traceback.format_exc() stats['end_time'] = dt.datetime.now() if log_path: handler.close() logger.removeHandler(handler) return stats
Handles a single execution of a workflow
def _read_next_timestep(self, ts=None): if self.ts.frame >= self.n_frames-1: raise IOError(errno.EIO, 'trying to go over trajectory limit') if ts is None: ts = self.ts ts.frame += 1 self.zdock_inst._set_pose_num(ts.frame+1) ts._pos = self.zdock_inst.static_mobile_copy_uni.trajectory.ts._pos return ts
copy next frame into timestep
def kernelDriverActive(self, interface): result = libusb1.libusb_kernel_driver_active(self.__handle, interface) if result == 0: return False elif result == 1: return True raiseUSBError(result)
Tell whether a kernel driver is active on given interface number.
def clean_built(outdir): print("Removing the built files!") build_dir = os.path.join(cwd, outdir) if os.path.exists(build_dir): shutil.rmtree(build_dir)
Removes all built files
def int32(name, description, default=None): args = common_args(name, default) return Anno(description, typ=int, **args)
Add an int32 parameter to be passed when instantiating this YAML file
def _translate_stm(self, oprnd1, oprnd2, oprnd3): assert oprnd1.size and oprnd3.size assert oprnd3.size == self._address_size op1_var = self._translate_src_oprnd(oprnd1) op3_var = self._translate_src_oprnd(oprnd3) for i in range(0, oprnd1.size, 8): self._mem_curr[op3_var + i//8] = smtfunction.extract(op1_var, i, 8) self._mem_instance += 1 mem_old = self._mem_curr mem_new = self.make_array(self._address_size, "MEM_{}".format(self._mem_instance)) self._mem_curr = mem_new return [mem_new == mem_old]
Return a formula representation of a STM instruction.
def _get_analysis_type(analysis_types: List[str]) -> str: types_set = set(analysis_types) return types_set.pop() if len(types_set) == 1 else 'wgs'
Determine the overall analysis type.
def fromDict(cls, data): obj = cls() obj.raw = data for name, value in data.iteritems(): if cls.SIMPLE_PROPS and name in cls.SIMPLE_PROPS: setattr(obj, name, value) elif cls.COMPLEX_PROPS and name in cls.COMPLEX_PROPS: value = cls.COMPLEX_PROPS[name].fromDict(value) setattr(obj, name, value) elif cls.LIST_PROPS and name in cls.LIST_PROPS: value = [cls.LIST_PROPS[name].fromDict(item) for item in value] setattr(obj, name, value) return obj
Fill this objects attributes from a dict for known properties.
def _update_url_map(self): if HAS_WEBSOCKETS: self.url_map.update({ 'ws': WebsocketEndpoint, }) self.url_map.update({ self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook, }) self.url_map.update({ self.apiopts.get('app_path', 'app').lstrip('/'): App, })
Assemble any dynamic or configurable URLs
def vgadata(): if os.path.isfile('/sbin/lspci'): lspci = '/sbin/lspci' else: lspci = '/usr/bin/lspci' f = os.popen (lspci + ' -m') pdata = {} for line in f.readlines(): p = line.split("\"") name = p[1].strip() if (name == "VGA compatible controller"): pdata["Graphics"] = p[3] + " " + p[5] f.close() return pdata
Get data about the graphics card.
def scan(stream, Loader=Loader): loader = Loader(stream) try: while loader.check_token(): yield loader.get_token() finally: loader.dispose()
Scan a YAML stream and produce scanning tokens.
def unaccentuate(s): return "".join(c for c in unicodedata.normalize("NFKD", s) if not unicodedata.combining(c))
Replace accentuated chars in string by their non accentuated equivalent.
def inc(self, *args, **kwargs): pairs = [] if len(args) == 1: pairs.append((args[0], 1)) elif len(args) == 2: pairs.append(args) elif len(kwargs) != 0: pairs.extend([(k, v) for k, v in kwargs.items()]) else: raise UpdateException('Invalid arguments for set. Requires either two positional arguments or at least one keyword argument') ret = self for qfield, value in pairs: ret = self._atomic_op('$inc', qfield, value) return ret
Atomically increment ``qfield`` by ``value``
def manage_itstat(self): itst = self.iteration_stats() self.itstat.append(itst) self.display_status(self.fmtstr, itst)
Compute, record, and display iteration statistics.
def count_entries(self, table=None): if table is None: table = self.main_table self.own_cursor.execute('SELECT COUNT(1) FROM "%s";' % table) return int(self.own_cursor.fetchone()[0])
How many rows in a table.
def print_most_common(counter, number=5, tab=1): for key, count in counter.most_common(number): print "{0}{1} - {2}".format('\t'*tab, key, count)
print the most common elements of a counter
def write_report(self, force=False): path = self.title + '.html' value = self._template.format( title=self.title, body=self.body, sidebar=self.sidebar) write_file(path, value, force=force) plt.ion()
Writes the report to a file.
def compute_node_sizes(self): if type(self.node_size) is str: nodes = self.graph.nodes self.node_sizes = [nodes[n][self.node_size] for n in self.nodes] else: self.node_sizes = self.node_size
Compute the node sizes.
def parsed_function_to_ast(parsed: Parsed, parsed_key): sub = parsed[parsed_key] subtree = { "type": "Function", "span": sub["span"], "function": { "name": sub["name"], "name_span": sub["name_span"], "parens_span": sub.get("parens_span", []), }, } args = [] for arg in parsed[parsed_key].get("args", []): if arg["type"] == "Function": args.append(parsed_function_to_ast(parsed, arg["span"])) elif arg["type"] == "NSArg": args.append( { "arg": arg["arg"], "type": arg["type"], "span": arg["span"], "nsarg": { "ns": arg["ns"], "ns_val": arg["ns_val"], "ns_span": arg["ns_span"], "ns_val_span": arg["ns_val_span"], }, } ) elif arg["type"] == "StrArg": args.append({"arg": arg["arg"], "type": arg["type"], "span": arg["span"]}) subtree["args"] = copy.deepcopy(args) return subtree
Create AST for top-level functions
def add_headers(self, app): @app.after_request def after_request(response): for header, value in self.config.get('headers'): response.headers[header] = value return response
Inject headers after request
def _to_dict(self): return { 'auto_watering': getattr(self, "auto_watering"), 'droplet': getattr(self, "droplet"), 'is_watering': getattr(self, "is_watering"), 'name': getattr(self, "name"), 'next_cycle': getattr(self, "next_cycle"), 'rain_delay': getattr(self, "rain_delay"), 'watering_time': getattr(self, "watering_time"), }
Method to build zone dict.
def priority_color(self, p_priority): def _str_to_dict(p_string): pri_colors_dict = dict() for pri_color in p_string.split(','): pri, color = pri_color.split(':') pri_colors_dict[pri] = Color(color) return pri_colors_dict try: pri_colors_str = self.cp.get('colorscheme', 'priority_colors') if pri_colors_str == '': pri_colors_dict = _str_to_dict('A:-1,B:-1,C:-1') else: pri_colors_dict = _str_to_dict(pri_colors_str) except ValueError: pri_colors_dict = _str_to_dict(self.defaults['colorscheme']['priority_colors']) return pri_colors_dict[p_priority] if p_priority in pri_colors_dict else Color('NEUTRAL')
Returns a dict with priorities as keys and color numbers as value.
def from_dict(d: dict) -> 'SimpleCompiler': cmd = d['command'] cmd_with_instrumentation = d.get('command_with_instrumentation', None) time_limit = d['time-limit'] context = d['context'] cmd_clean = d.get('command_clean', 'exit 0') return SimpleCompiler(command=cmd, command_clean=cmd_clean, command_with_instrumentation=cmd_with_instrumentation, context=context, time_limit=time_limit)
Loads a SimpleCompiler from its dictionary-based description.
def _get_py_loglevel(lvl): if not lvl: lvl = 'INFO' return LOG_LEVEL_MAP.get(lvl.upper(), logging.DEBUG)
Map log levels to strings
def transform(self, m): if not len(m) == 6: raise ValueError("bad sequ. length") self.x0, self.y0, self.x1, self.y1 = TOOLS._transform_rect(self, m) return self
Replace rectangle with its transformation by matrix m.
def guess_payload_class(self, payload): t = (orb(payload[0]) & 0xf0) >> 4 if t == 0: return ISOTP_SF elif t == 1: return ISOTP_FF elif t == 2: return ISOTP_CF else: return ISOTP_FC
ISOTP encodes the frame type in the first nibble of a frame.
def _create_dll(self): self._dir = tempfile.mkdtemp() with open(path.join(self._dir, 'pyrtlsim.c'), 'w') as f: self._create_code(lambda s: f.write(s+'\n')) if platform.system() == 'Darwin': shared = '-dynamiclib' else: shared = '-shared' subprocess.check_call([ 'gcc', '-O0', '-march=native', '-std=c99', '-m64', shared, '-fPIC', path.join(self._dir, 'pyrtlsim.c'), '-o', path.join(self._dir, 'pyrtlsim.so'), ], shell=(platform.system() == 'Windows')) self._dll = ctypes.CDLL(path.join(self._dir, 'pyrtlsim.so')) self._crun = self._dll.sim_run_all self._crun.restype = None
Create a dynamically-linked library implementing the simulation logic.
def build_duration(self): return int(self.state.build_done) - int(self.state.build)
Return the difference between build and build_done states
def rename(self, name): self.ec2.create_tags(Resources = [self.instance_id], Tags = [{'Key': 'Name', 'Value': name}]) self.refresh_info()
Set the name of the machine.
def _find_file(self, needle, candidates): for candidate in candidates: fullpath = os.path.join(candidate, needle) if os.path.isfile(fullpath): return fullpath raise PathError("Unable to locate file %s; tried %s" % (needle, candidates))
Find the first directory containing a given candidate file.
def add_kde_setting (key, value, data): if key == "ProxyType": mode = None int_value = int(value) if int_value == 1: mode = "manual" elif int_value == 2: mode = "pac" elif int_value == 3: mode = "wpad" elif int_value == 4: mode = "indirect" data["mode"] = mode elif key == "Proxy Config Script": data["autoconfig_url"] = value elif key == "httpProxy": add_kde_proxy("http_proxy", value, data) elif key == "httpsProxy": add_kde_proxy("https_proxy", value, data) elif key == "ftpProxy": add_kde_proxy("ftp_proxy", value, data) elif key == "ReversedException": data["reversed_bypass"] = bool(value == "true" or int(value)) elif key == "NoProxyFor": data["ignore_hosts"] = split_hosts(value) elif key == "AuthMode": mode = int(value)
Add a KDE proxy setting value to data dictionary.
def conv_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs): return conv_block_internal(conv, inputs, filters, dilation_rates_and_kernel_sizes, **kwargs)
A block of standard 2d convolutions.
def _optimize(self, period, tmpid, use_gradient=True): theta_0 = [self.y.min(), self.y.max() - self.y.min(), 0] result = minimize(self._chi2, theta_0, jac=bool(use_gradient), bounds=[(None, None), (0, None), (None, None)], args=(period, tmpid, use_gradient)) return result.x
Optimize the model for the given period & template
async def login_user(self, password, **kwds): user_data = (await self._get_matching_user(fields=list(kwds.keys()), **kwds))['data'] try: passwordEntry = self.model.select().where( self.model.user == user_data[root_query()][0]['pk'] )[0] except (KeyError, IndexError) as e: raise RuntimeError('Could not find matching registered user') if passwordEntry and passwordEntry.password == password: user = user_data[root_query()][0] return { 'user': user, 'sessionToken': self._user_session_token(user) } raise RuntimeError("Incorrect credentials")
This function handles the registration of the given user credentials in the database
def check_topics(client, req_topics): client.update_cluster() logger.debug('Found topics: %r', client.topics.keys()) for req_topic in req_topics: if req_topic not in client.topics.keys(): err_topic_not_found = 'Topic not found: {}'.format(req_topic) logger.warning(err_topic_not_found) raise TopicNotFound(err_topic_not_found) topic = client.topics[req_topic] if not topic.partitions: err_topic_no_part = 'Topic has no partitions: {}'.format(req_topic) logger.warning(err_topic_no_part) raise TopicNoPartition(err_topic_no_part) logger.info('Topic is ready: %s', req_topic)
Check for existence of provided topics in Kafka.
def _view(self, filepath, format): methodnames = [ '_view_%s_%s' % (format, backend.PLATFORM), '_view_%s' % backend.PLATFORM, ] for name in methodnames: view_method = getattr(self, name, None) if view_method is not None: break else: raise RuntimeError('%r has no built-in viewer support for %r ' 'on %r platform' % (self.__class__, format, backend.PLATFORM)) view_method(filepath)
Start the right viewer based on file format and platform.
def validate(self, value, model_instance): if not self.multiple: return super(CountryField, self).validate(value, model_instance) if not self.editable: return if value: choices = [option_key for option_key, option_value in self.choices] for single_value in value: if single_value not in choices: raise exceptions.ValidationError( self.error_messages["invalid_choice"], code="invalid_choice", params={"value": single_value}, ) if not self.blank and value in self.empty_values: raise exceptions.ValidationError(self.error_messages["blank"], code="blank")
Use custom validation for when using a multiple countries field.
def cast(self, value, custom_formatters=None, strict=True): if value is None: if not self.nullable: raise InvalidSchemaValue("Null value for non-nullable schema", value, self.type) return self.default cast_mapping = self.get_cast_mapping( custom_formatters=custom_formatters, strict=strict) if self.type is not SchemaType.STRING and value == '': return None cast_callable = cast_mapping[self.type] try: return cast_callable(value) except ValueError: raise InvalidSchemaValue( "Failed to cast value {value} to type {type}", value, self.type)
Cast value to schema type
async def async_load_parameters(self): zones = await self._async_get_data('zone') areas = await self._async_get_data('area') if not zones or not areas: return False for spc_area in areas: area = Area(self, spc_area) area_zones = [Zone(area, z) for z in zones if z['area'] == spc_area['id']] area.zones = area_zones self._areas[area.id] = area self._zones.update({z.id: z for z in area_zones}) return True
Fetch area and zone info from SPC to initialize.
def loads(astring): try: return pickle.loads(zlib.decompress(astring)) except zlib.error as e: raise SerializerError( 'Cannot decompress object ("{}")'.format(str(e)) ) except pickle.UnpicklingError as e: raise SerializerError( 'Cannot restore object ("{}")'.format(str(e)) )
Decompress and deserialize string into Python object via pickle.
def _normalize_data_types(self, strategy): for k, v in strategy.iteritems(): if not isinstance(v, str): continue if v == 'true': strategy[k] = True elif v == 'false' or v is None: strategy[k] = False else: try: if v.find('.') > 0: strategy[k] = float(v) else: strategy[k] = int(v) except ValueError: pass
some contexts only retrieves strings, giving back right type
def eval_ast(self, ast): new_ast = ast.replace_dict(self.replacements, leaf_operation=self._leaf_op) return backends.concrete.eval(new_ast, 1)[0]
Eval the ast, replacing symbols by their last value in the model.
def post(self, url, data): headers = { "Content-type": "application/x-www-form-urlencoded", "Accept": "text/json" } self.conn.request("POST", url, data, headers) return self._process_response()
Send a HTTP POST request to a URL and return the result.
def _is_interactive(self): return not ( self.realworld and (dt.date.today() > self.datetime.date()))
Prevent middlewares and orders to work outside live mode
def extract_deps(bundles, log=None): def _flatten(bundle): deps = [] if hasattr(bundle, 'npm'): deps.append(bundle.npm) for content in bundle.contents: if isinstance(content, BundleBase): deps.extend(_flatten(content)) return deps flatten_deps = [] for bundle in bundles: flatten_deps.extend(_flatten(bundle)) packages = defaultdict(list) for dep in flatten_deps: for pkg, version in dep.items(): packages[pkg].append(version) deps = {} for package, versions in packages.items(): deps[package] = semver.max_satisfying(versions, '*', True) if log and len(versions) > 1: log('Warn: {0} version {1} resolved to: {2}'.format( repr(package), versions, repr(deps[package]) )) return deps
Extract the dependencies from the bundle and its sub-bundles.
def parse_fields(cls, formatter): "Returns the format fields otherwise raise exception" if formatter is None: return [] try: parse = list(string.Formatter().parse(formatter)) return set(f for f in list(zip(*parse))[1] if f is not None) except: raise SyntaxError("Could not parse formatter %r" % formatter)
Returns the format fields otherwise raise exception
def denormalize_volume(volume): id = volume.get('id', None) res = dict() res.update(volume['metadata']) denorm_attachments = list() for a in volume['attachments']: denorm_attachments.append(Archivant.denormalize_attachment(a)) res['_attachments'] = denorm_attachments return id, res
convert volume metadata from archivant to es format
def count_rows_distinct(self, table, cols='*'): return self.fetch('SELECT COUNT(DISTINCT {0}) FROM {1}'.format(join_cols(cols), wrap(table)))
Get the number distinct of rows in a particular table.
def describe(self, element): if (element == 'tasks'): return self.tasks_df.describe() elif (element == 'task_runs'): return self.task_runs_df.describe() else: return "ERROR: %s not found" % element
Return tasks or task_runs Panda describe.
def initialize(): NST.running = True pg = Page("http://www.neopets.com/") curtime = pg.find("td", {'id': 'nst'}).text NST.curTime = datetime.datetime.strptime(curtime.replace(" NST", ""), "%I:%M:%S %p") + datetime.timedelta(0,2) NST.inst = NST() NST.daemon = True NST.inst.start()
Initializes the global NST instance with the current NST and begins tracking
def checkdiff(a,b,sp=True): 'take diff of a to b, apply to a, return the applied diff so external code can check it against b' if sp: a=splitpreserve(a); b=splitpreserve(b) res=applydiff(a,rediff(a,b)) if sp: res=''.join(res) return res
take diff of a to b, apply to a, return the applied diff so external code can check it against b
def load_contexts_and_renderers(events, mediums): sources = {event.source for event in events} rendering_styles = {medium.rendering_style for medium in mediums if medium.rendering_style} default_rendering_style = get_default_rendering_style() if default_rendering_style: rendering_styles.add(default_rendering_style) context_renderers = ContextRenderer.objects.filter( Q(source__in=sources, rendering_style__in=rendering_styles) | Q(source_group_id__in=[s.group_id for s in sources], rendering_style__in=rendering_styles)).select_related( 'source', 'rendering_style').prefetch_related('source_group__source_set') context_hints_per_source = get_context_hints_per_source(context_renderers) model_querysets = get_querysets_for_context_hints(context_hints_per_source) model_ids_to_fetch = get_model_ids_to_fetch(events, context_hints_per_source) model_data = fetch_model_data(model_querysets, model_ids_to_fetch) load_fetched_objects_into_contexts(events, model_data, context_hints_per_source) load_renderers_into_events(events, mediums, context_renderers, default_rendering_style) return events
Given a list of events and mediums, load the context model data into the contexts of the events.
def within_n_sds(n, series): z_score = (series - series.mean()) / series.std() return (z_score.abs() <= n).all()
Return true if all values in sequence are within n SDs
def validate_float(cls, value): if not isinstance(value, (int, float)): raise TypeError( "value must be a number, got %s" % type(value) )
Note that int values are accepted.
def save(self, incoming_stream, size_limit=None, size=None, chunk_size=None, progress_callback=None): fp = self.open(mode='wb') try: bytes_written, checksum = self._write_stream( incoming_stream, fp, chunk_size=chunk_size, progress_callback=progress_callback, size_limit=size_limit, size=size) except Exception: fp.close() self.delete() raise finally: fp.close() self._size = bytes_written return self.fileurl, bytes_written, checksum
Save file in the file system.
def parts(): parts = { 'Canon': [ _ for _ in range(1, 5) ], 'Apostle': [ 5 ], 'Paul': [ _ for _ in range(6, 19) ], 'General': [ _ for _ in range(19, 26) ], 'Apocalypse': [ 27 ] } return parts
Returns the dictionary with the part as key and the contained book as indices.
def _decimal_to_json(value): if isinstance(value, decimal.Decimal): value = str(value) return value
Coerce 'value' to a JSON-compatible representation.
def index(config): client = Client() client.prepare_connection() group_api = API(client) print(group_api.index())
Display group info in raw format.
def primary_key_attributes(self): if self.range_key is None: return (self.hash_key.name,) else: return (self.hash_key.name, self.range_key.name)
Get the names of the primary key attributes as a tuple
def pickColor( self ): color = QColorDialog.getColor( self.color(), self ) if ( color.isValid() ): self.setColor(color)
Prompts the user to select a color for this button.
def imagetransformer_sep_channels_8l_tpu(): hparams = imagetransformer_sep_channels_8l() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.num_heads = 4 hparams.shared_embedding_and_softmax_weights = False return hparams
Hparams for training imagetransformer on tpu.
def binary_size(self): return ( 1 + 2 + 1 + len(self.name.encode('utf-8')) + 1 + 1 + len(self.dimensions) + self.total_bytes + 1 + len(self.desc.encode('utf-8')) )
Return the number of bytes needed to store this parameter.
def responseInColor(request, status, headers, prefix='Response', opts=None): "Prints the response info in color" code, message = status.split(None, 1) message = '%s [%s] => Request %s %s %s on pid %d' % ( prefix, code, str(request.host), request.method, request.path, os.getpid() ) signal = int(code) / 100 if signal == 2: chalk.green(message, opts=opts) elif signal == 3: chalk.blue(message, opts=opts) else: chalk.red(message, opts=opts)
Prints the response info in color
def coerce(cls, key, value): self = MutationList((MutationObj.coerce(key, v) for v in value)) self._key = key return self
Convert plain list to MutationList
def mcast_sender(mcgroup=MC_GROUP): sock = socket(AF_INET, SOCK_DGRAM) sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) if _is_broadcast_group(mcgroup): group = '<broadcast>' sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1) elif((int(mcgroup.split(".")[0]) > 239) or (int(mcgroup.split(".")[0]) < 224)): raise IOError("Invalid multicast address.") else: group = mcgroup ttl = struct.pack('b', TTL_LOCALNET) sock.setsockopt(IPPROTO_IP, IP_MULTICAST_TTL, ttl) return sock, group
Non-object interface for sending multicast messages.
def get(orcid_id): resp = requests.get(ORCID_PUBLIC_BASE_URL + unicode(orcid_id), headers=BASE_HEADERS) json_body = resp.json() return Author(json_body)
Get an author based on an ORCID identifier.
def get(): LOG.debug('GET list of SBIs.') _url = get_root_url() response = dict(scheduling_blocks=[], links=dict(home='{}'.format(_url))) block_ids = DB.get_sched_block_instance_ids() for block in DB.get_block_details(block_ids): block_id = block['id'] LOG.debug('Adding SBI %s to list', block_id) LOG.debug(block) block['num_processing_blocks'] = len(block['processing_block_ids']) temp = ['OK'] * 10 + ['WAITING'] * 4 + ['FAILED'] * 2 block['status'] = choice(temp) try: del block['processing_block_ids'] except KeyError: pass block['links'] = { 'detail': '{}/scheduling-block/{}' .format(_url, block_id) } response['scheduling_blocks'].append(block) return response, HTTPStatus.OK
Return list of Scheduling Blocks Instances known to SDP .
def scatter(slope, zero, x1, x2, x1err=[], x2err=[]): n = len(x1) x2pred = zero + slope * x1 s = sum((x2 - x2pred) ** 2) / (n - 1) if len(x2err) == n: s_obs = sum((x2err / x2) ** 2) / n s0 = s - s_obs print numpy.sqrt(s), numpy.sqrt(s_obs), numpy.sqrt(s0) return numpy.sqrt(s0)
Used mainly to measure scatter for the BCES best-fit
def left_to_right(self): self._entry_mode |= Command.MODE_INCREMENT self.command(self._entry_mode)
This is for text that flows Left to Right
def clean(self, value): if not value: raise ValidationError( 'Error found in Form Field: Nothing to validate') data = dict((bf.name, value[i]) for i, bf in enumerate(self.form)) self.form = form = self.form.__class__(data) if not form.is_valid(): error_dict = list(form.errors.items()) raise ValidationError([ ValidationError(mark_safe('{} {}'.format( k.title(), v)), code=k) for k, v in error_dict]) return super(FormField, self).clean(value)
Call the form is_valid to ensure every value supplied is valid
def validate_description(xml_data): try: root = ET.fromstring('<document>' + xml_data + '</document>') except StdlibParseError as e: raise ParseError(str(e)) return _parse_desc(root)
Validate the description for validity
def to_rst(cls) -> str: sep_line = '+' + 6 * '-' + '+' + '-' * 71 + '+\n' blank_line = '|' + 78 * ' ' + '|\n' table = '' for group in cls.groups: table += sep_line table += blank_line table += '|' + '**{}**'.format(group.name).center(78) + '|\n' table += blank_line for error in group.errors: table += sep_line table += ('|' + error.code.center(6) + '| ' + error.short_desc.ljust(70) + '|\n') table += sep_line return table
Output the registry as reStructuredText, for documentation.
def _build_strain_specific_model(self, strain_id, ref_functional_genes, orth_matrix, force_rerun=False): gp_noseqs_path = op.join(self.model_dir, '{}_gp.pckl'.format(strain_id)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=gp_noseqs_path): logging.disable(logging.WARNING) strain_gp = GEMPRO(gem_name=strain_id) strain_genes = [x for x in ref_functional_genes] strain_gp.add_gene_ids(strain_genes) logging.disable(logging.NOTSET) genes_to_remove = orth_matrix[pd.isnull(orth_matrix[strain_id])][strain_id].index.tolist() strain_genes = [x.id for x in strain_gp.genes] genes_to_remove = list(set(genes_to_remove).intersection(set(strain_genes))) if len(genes_to_remove) > 0: for g in genes_to_remove: strain_gp.genes.get_by_id(g).functional = False strain_gp.save_pickle(outfile=gp_noseqs_path) return strain_id, gp_noseqs_path
Create strain GEMPRO, set functional genes
def location(self): try: return self.data.get('identity').get('location') except (KeyError, AttributeError): return self.device_status_simple('')
Return the location of the printer.
def to_api(in_dict, int_keys=None, date_keys=None, bool_keys=None): if int_keys: for in_key in int_keys: if (in_key in in_dict) and (in_dict.get(in_key, None) is not None): in_dict[in_key] = int(in_dict[in_key]) if date_keys: for in_key in date_keys: if (in_key in in_dict) and (in_dict.get(in_key, None) is not None): _from = in_dict[in_key] if isinstance(_from, basestring): dtime = parse_datetime(_from) elif isinstance(_from, datetime): dtime = _from in_dict[in_key] = dtime.isoformat() elif (in_key in in_dict) and in_dict.get(in_key, None) is None: del in_dict[in_key] for k, v in in_dict.items(): if v is None: del in_dict[k] return in_dict
Extends a given object for API Production.
def draw_graph( g, fmt='svg', prg='dot', options={} ): buf = StringIO() rdf2dot( g, buf, options ) gv_options = options.get('graphviz',[]) if fmt == 'png': gv_options += [ '-Gdpi=220', '-Gsize=25,10!' ] metadata = { "width": 5500, "height": 2200, "unconfined" : True } image = run_dot( buf.getvalue(), fmt=fmt, options=gv_options, prg=prg ) if fmt == 'png': return { 'image/png' : base64.b64encode(image).decode('ascii') }, \ { "image/png" : metadata } elif fmt == 'svg': return { 'image/svg+xml' : image.decode('utf-8').replace('<svg','<svg class="unconfined"',1) }, \ { "unconfined" : True }
Draw an RDF graph as an image
def build_props(self): props = {} if self.filters: props["filters"] = {} for grp in self.filters: props["filters"][grp] = [f.params for f in self.filters[grp]] if self.charts: props["charts"] = [c.params for c in self.charts] props["type"] = self.layout return props
Build the props dictionary.
def rmarkdown_draft(filename, template, package): if file_exists(filename): return filename draft_template = Template( 'rmarkdown::draft("$filename", template="$template", package="$package", edit=FALSE)' ) draft_string = draft_template.substitute( filename=filename, template=template, package=package) report_dir = os.path.dirname(filename) rcmd = Rscript_cmd() with chdir(report_dir): do.run([rcmd, "--no-environ", "-e", draft_string], "Creating bcbioRNASeq quality control template.") do.run(["sed", "-i", "s/YYYY-MM-DD\///g", filename], "Editing bcbioRNAseq quality control template.") return filename
create a draft rmarkdown file from an installed template
def create_data_types(self): return { 'Binary': self.create_binary, 'BinaryArray': self.create_binary_array, 'KeyValue': self.create_key_value, 'KeyValueArray': self.create_key_value_array, 'String': self.create_string, 'StringArray': self.create_string_array, 'TCEntity': self.create_tc_entity, 'TCEntityArray': self.create_tc_entity_array, }
Map of standard playbook variable types to create method.
def niterations(self): itercounter = self._itercounter if itercounter is None: itercounter = 0 lastclear = self._lastclear if lastclear is None: lastclear = 0 return itercounter + lastclear
The current number of iterations.
def refresh_data(self): j = self.data_request({'id': 'sdata'}).json() self.temperature_units = j.get('temperature', 'C') self.model = j.get('model') self.version = j.get('version') self.serial_number = j.get('serial_number') categories = {} cats = j.get('categories') for cat in cats: categories[cat.get('id')] = cat.get('name') device_id_map = {} devs = j.get('devices') for dev in devs: dev['categoryName'] = categories.get(dev.get('category')) device_id_map[dev.get('id')] = dev return device_id_map
Refresh data from Vera device.
def multi_keyword(tokens, keyword_parts): tokens = iter(tokens) matched_tokens = [] limit = len(keyword_parts) for idx in six.moves.range(limit): try: token = next(tokens) except StopIteration: return if (not token or token.name != "symbol" or token.value.lower() != keyword_parts[idx]): return matched_tokens.append(token) return TokenMatch(None, token.value, matched_tokens)
Match a case-insensitive keyword consisting of multiple tokens.
def _generate_corpus_table(self, labels, ngrams): html = [] for label in labels: html.append(self._render_corpus_row(label, ngrams)) return '\n'.join(html)
Returns an HTML table containing data on each corpus' n-grams.
def cancel(self) : "tells libdbus you no longer care about the pending incoming message." dbus.dbus_pending_call_cancel(self._dbobj) if self._awaiting != None : self._awaiting.cancel()
tells libdbus you no longer care about the pending incoming message.
def _validation_error(prop, prop_type, prop_value, expected): if prop_type is None: attrib = 'value' assigned = prop_value else: attrib = 'type' assigned = prop_type raise ValidationError( 'Invalid property {attrib} for {prop}:\n\t{attrib}: {assigned}\n\texpected: {expected}', attrib=attrib, prop=prop, assigned=assigned, expected=expected, invalid={prop: prop_value} if attrib == 'value' else {} )
Default validation for updated properties
def _dM(self, k, t, param, M, tips=None, gaps=None): if self._distributionmodel: return self.model.dM(k, t, param, M, tips, gaps) else: return self.model.dM(t, param, M, tips, gaps)
Returns derivative of matrix exponential.
def parse_band_log(self, message): if "payload" in message and hasattr(message["payload"], "name"): record = message["payload"] for k in dir(record): if k.startswith("workflows_exc_"): setattr(record, k[14:], getattr(record, k)) delattr(record, k) for k, v in self.get_status().items(): setattr(record, "workflows_" + k, v) logging.getLogger(record.name).handle(record) else: self.log.warning( "Received broken record on log band\n" + "Message: %s\nRecord: %s", str(message), str( hasattr(message.get("payload"), "__dict__") and message["payload"].__dict__ ), )
Process incoming logging messages from the service.
def sadd(self, key, member, *members): return self.execute(b'SADD', key, member, *members)
Add one or more members to a set.
def fasta_stats(self): for sample in self.metadata: contig_lengths = list() fasta_sequence = str() for contig, record in sample[self.analysistype].record_dict.items(): contig_lengths.append(len(record.seq)) fasta_sequence += record.seq sample[self.analysistype].contig_lengths = sorted(contig_lengths, reverse=True) try: sample[self.analysistype].gc = float('{:0.2f}'.format(GC(fasta_sequence))) except TypeError: sample[self.analysistype].gc = 'NA'
Parse the lengths of all contigs for each sample, as well as the total GC%
def to_topojson(self): topojson = self.topojson topojson["objects"]["points"] = { "type": "GeometryCollection", "geometries": [point.to_topojson() for point in self.points.all()], } return json.dumps(topojson)
Adds points and converts to topojson string.
def prt_data(self, name, vals, prt=sys.stdout): fld2val = self.get_fld2val(name, vals) prt.write(self.fmt.format(**fld2val)) return fld2val
Print stats data in markdown style.
def logger(self): if self._experiment: return logging.getLogger('.'.join([self.name, self.experiment])) elif self._projectname: return logging.getLogger('.'.join([self.name, self.projectname])) else: return logging.getLogger('.'.join([self.name]))
The logger of this organizer
def close_pages_for_specific_sm_id(self, sm_id): states_to_be_closed = [] for state_identifier in self.tabs: state_m = self.tabs[state_identifier]["state_m"] if state_m.state.get_state_machine().state_machine_id == sm_id: states_to_be_closed.append(state_identifier) for state_identifier in states_to_be_closed: self.close_page(state_identifier, delete=False)
Closes all tabs of the states editor for a specific sm_id
def hold_sync(self): if self._holding_sync is True: yield else: try: self._holding_sync = True yield finally: self._holding_sync = False self.send_state(self._states_to_send) self._states_to_send.clear()
Hold syncing any state until the outermost context manager exits
def build_unit_name(dimensions): name = '' for unit in dimensions: if unit['power'] < 0: name += 'per ' power = abs(unit['power']) if power == 1: name += unit['base'] elif power == 2: name += 'square ' + unit['base'] elif power == 3: name += 'cubic ' + unit['base'] elif power > 3: name += unit['base'] + ' to the %g' % power name += ' ' name = name.strip() logging.debug(u'\tUnit inferred name: %s', name) return name
Build the name of the unit from its dimensions.
def compute_intensity(self, _cost_func): self.evaluate(_cost_func) self.intensity = 1 / self.time
Evaluate cost function and compute intensity at this position
async def update_websub(self, config, hub): try: LOGGER.debug("WebSub: Notifying %s of %s", hub, self.url) request = await utils.retry_post( config, hub, data={ 'hub.mode': 'publish', 'hub.url': self.url }) if request.success: LOGGER.info("%s: WebSub notification sent to %s", self.url, hub) else: LOGGER.warning("%s: Hub %s returned status code %s: %s", self.url, hub, request.status, request.text) except Exception as err: LOGGER.warning("WebSub %s: got %s: %s", hub, err.__class__.__name__, err)
Update WebSub hub to know about this feed
def clean(self, value): if isinstance(value, FileStorage): return self.storage.save(value) return value
Takes a Werkzeug FileStorage, returns the relative path.
def remove_on_change(self, attr, *callbacks): if len(callbacks) == 0: raise ValueError("remove_on_change takes an attribute name and one or more callbacks, got only one parameter") _callbacks = self._callbacks.setdefault(attr, []) for callback in callbacks: _callbacks.remove(callback)
Remove a callback from this object
def _get_nsymop(self): if self.centrosymmetric: return 2 * len(self._rotations) * len(self._subtrans) else: return len(self._rotations) * len(self._subtrans)
Returns total number of symmetry operations.