code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def render(self, context): extra_context = self.context_expr.resolve(context) if not isinstance(extra_context, dict): raise TemplateSyntaxError("{% withdict %} expects the argument to be a dictionary.") with context.push(**extra_context): return self.nodelist.render(context)
Render the tag, with extra context layer.
def word_diff(a,b): 'do diff on words but return character offsets' return translate_diff(a,rediff(splitpreserve(a),splitpreserve(b)))
do diff on words but return character offsets
def async_refresh_state(self): _LOGGER.debug('Setting up extended status') ext_status = ExtendedSend( address=self._address, commandtuple=COMMAND_EXTENDED_GET_SET_0X2E_0X00, cmd2=0x02, userdata=Userdata()) ext_status.set_crc() _LOGGER.debug('Sending ext status: %s', ext_status) self._send_msg(ext_status) _LOGGER.debug('Sending temp status request') self.temperature.async_refresh_state()
Request each state to provide status update.
def default_image_loader(filename, flags, **kwargs): def load(rect=None, flags=None): return filename, rect, flags return load
This default image loader just returns filename, rect, and any flags
def _generate_result(self, callname, result): schema = self.api.result_schema() schema.context['callname'] = callname self.callback(schema.load(result), self.context)
Generate a results object for delivery to the context object
def status(self) -> str: self._config = self._load_config() return self._config.get('status')
Return the workflow stage status.
def run(data): bwakit_dir = os.path.dirname(os.path.realpath(utils.which("run-bwamem"))) hla_fqs = tz.get_in(["hla", "fastq"], data, []) if len(hla_fqs) > 0: hla_base = os.path.commonprefix(hla_fqs) while hla_base.endswith("."): hla_base = hla_base[:-1] out_file = hla_base + ".top" if not utils.file_exists(out_file): cmd = "{bwakit_dir}/run-HLA {hla_base}" do.run(cmd.format(**locals()), "HLA typing with bwakit") out_file = _organize_calls(out_file, hla_base, data) data["hla"].update({"call_file": out_file, "hlacaller": "bwakit"}) return data
HLA typing with bwakit, parsing output from called genotype files.
async def convert_endpoint(url_string, ts, is_just_checking): response = singletons.server.response singletons.settings foreign_res = ForeignResource(url_string) target_ts = TypeString(ts) target_resource = TypedResource(url_string, target_ts) if target_resource.cache_exists(): if is_just_checking: return _just_checking_response(True, target_resource) return await response.file(target_resource.cache_path, headers={ 'Content-Type': target_ts.mimetype, }) if not foreign_res.cache_exists(): singletons.workers.enqueue_download(foreign_res) singletons.workers.enqueue_sync( enqueue_conversion_path, url_string, str(target_ts), singletons.workers.enqueue_convert ) if is_just_checking: return _just_checking_response(False, target_resource) return singletons.placeholders.stream_response(target_ts, response)
Main logic for HTTP endpoint.
def preserve_shape(func): @wraps(func) def wrapped_function(img, *args, **kwargs): shape = img.shape result = func(img, *args, **kwargs) result = result.reshape(shape) return result return wrapped_function
Preserve shape of the image.
def move_items(self, from_group, to_group): if from_group not in self.keys() or len(self.groups[from_group]) == 0: return self.groups.setdefault(to_group, list()).extend(self.groups.get (from_group, list())) if from_group in self.groups: del self.groups[from_group]
Take all elements from the from_group and add it to the to_group.
def keys(self): keys = OrderedDict() def order_key(_): (k, v) = _ cache_key = getattr(type(self), k) return cache_key.order items = [(k, getattr(type(self), k)) for k in dir(type(self)) ] items = [(k, v) for (k, v) in items if isinstance(v, Key) ] for k, v in sorted(items, key=order_key): keys[k] = getattr(self, k) return keys
Create an ordered dict of the names and values of key fields.
def _upload_folder_in_background(self, folder_path, container, ignore, upload_key, ttl=None): uploader = FolderUploader(folder_path, container, ignore, upload_key, self, ttl=ttl) uploader.start()
Runs the folder upload in the background.
def add_module(self, module): for key, value in module.__dict__.iteritems(): if key[0:2] != '__': self.__setattr__(attr=key, value=value)
Adds configuration parameters from a Python module.
def snapshot_folder(): logger.info("Snapshot folder") try: stdout = subprocess.check_output(["git", "show", "-s", "--format=%cI", "HEAD"]) except subprocess.CalledProcessError as e: logger.error("Error: {}".format(e.output.decode('ascii', 'ignore').strip())) sys.exit(2) except FileNotFoundError as e: logger.error("Error: {}".format(e)) sys.exit(2) ds = stdout.decode('ascii', 'ignore').strip() dt = datetime.fromisoformat(ds) utc = dt - dt.utcoffset() return utc.strftime("%Y%m%d_%H%M%S")
Use the commit date in UTC as folder name
def create_ini(self, board, project_dir='', sayyes=False): project_dir = util.check_dir(project_dir) ini_path = util.safe_join(project_dir, PROJECT_FILENAME) boards = Resources().boards if board not in boards.keys(): click.secho( 'Error: no such board \'{}\''.format(board), fg='red') sys.exit(1) if isfile(ini_path): if sayyes: self._create_ini_file(board, ini_path, PROJECT_FILENAME) else: click.secho( 'Warning: {} file already exists'.format(PROJECT_FILENAME), fg='yellow') if click.confirm('Do you want to replace it?'): self._create_ini_file(board, ini_path, PROJECT_FILENAME) else: click.secho('Abort!', fg='red') else: self._create_ini_file(board, ini_path, PROJECT_FILENAME)
Creates a new apio project file
def multi_mask_sequences(records, slices): for record in records: record_indices = list(range(len(record))) keep_indices = reduce(lambda i, s: i - frozenset(record_indices[s]), slices, frozenset(record_indices)) seq = ''.join(b if i in keep_indices else '-' for i, b in enumerate(str(record.seq))) record.seq = Seq(seq) yield record
Replace characters sliced by slices with gap characters.
def _load_sequences_to_strain(self, strain_id, force_rerun=False): gp_seqs_path = op.join(self.model_dir, '{}_gp_withseqs.pckl'.format(strain_id)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=gp_seqs_path): gp_noseqs = ssbio.io.load_pickle(self.strain_infodict[strain_id]['gp_noseqs_path']) strain_sequences = SeqIO.index(self.strain_infodict[strain_id]['genome_path'], 'fasta') for strain_gene in gp_noseqs.functional_genes: strain_gene_key = self.df_orthology_matrix.at[strain_gene.id, strain_id] new_id = '{}_{}'.format(strain_gene.id, strain_id) if strain_gene.protein.sequences.has_id(new_id): continue strain_gene.protein.load_manual_sequence(seq=strain_sequences[strain_gene_key], ident=new_id, set_as_representative=True) gp_noseqs.save_pickle(outfile=gp_seqs_path) return strain_id, gp_seqs_path
Load strain GEMPRO with functional genes defined, load sequences to it, save as new GEMPRO
def update(self, buffer, length): return lib.zdigest_update(self._as_parameter_, buffer, length)
Add buffer into digest calculation
def draw(self, clear=True): if clear: self.clear() with self.gl_states, self.camera, self.light: for mesh in self.meshes: try: mesh.draw() except AttributeError: pass
Draw each visible mesh in the scene from the perspective of the scene's camera and lit by its light.
def construct_user_list(raw_users=None): users = Users(oktypes=User) for user_dict in raw_users: public_keys = None if user_dict.get('public_keys'): public_keys = [PublicKey(b64encoded=x, raw=None) for x in user_dict.get('public_keys')] users.append(User(name=user_dict.get('name'), passwd=user_dict.get('passwd'), uid=user_dict.get('uid'), gid=user_dict.get('gid'), home_dir=user_dict.get('home_dir'), gecos=user_dict.get('gecos'), shell=user_dict.get('shell'), public_keys=public_keys, sudoers_entry=user_dict.get('sudoers_entry'))) return users
Construct a list of User objects from a list of dicts.
def _read_block(blocksize, stream): blockdata = stream.read(blocksize) return int.from_bytes(blockdata, 'big')
Read block data from network into integer type
def delete_image(self, glance, image): self.log.warn('/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_image.') self.log.debug('Deleting glance image ({})...'.format(image)) return self.delete_resource(glance.images, image, msg='glance image')
Delete the specified image.
def _check_checksum(msg): checksum = int(msg[-2:], 16) for char in msg[:-2]: checksum += ord(char) if (checksum % 256) != 0: raise ValueError("Elk message checksum invalid")
Ensure checksum in message is good.
def parent(self): if self.path: return Category(os.path.dirname(self.path)) return None
Get the parent category
def _inner_default(x1, x2): order = 'F' if all(a.data.flags.f_contiguous for a in (x1, x2)) else 'C' if is_real_dtype(x1.dtype): if x1.size > THRESHOLD_MEDIUM: return np.tensordot(x1, x2, [range(x1.ndim)] * 2) else: return np.dot(x1.data.ravel(order), x2.data.ravel(order)) else: return np.vdot(x2.data.ravel(order), x1.data.ravel(order))
Default Euclidean inner product implementation.
def unwatch_zone(self, zone_id): self._watched_zones.remove(zone_id) return (yield from self._send_cmd("WATCH %s OFF" % (zone_id.device_str(), )))
Remove a zone from the watchlist.
def _get_attributes(schema, location): schema = DottedNameResolver(__name__).maybe_resolve(schema) def _filter(attr): if not hasattr(attr, "location"): valid_location = 'body' in location else: valid_location = attr.location in to_list(location) return valid_location return list(filter(_filter, schema().children))
Return the schema's children, filtered by location.
def linkfetch(self): request, handle = self.open() self._add_headers(request) if handle: self._get_crawled_urls(handle, request)
Public method to call the internal methods
def getAnalyst(self): analyst = self.getField("Analyst").get(self) if not analyst: analyst = self.getSubmittedBy() return analyst or ""
Returns the stored Analyst or the user who submitted the result
def gradient_component(self, index1): result = np.zeros(3, float) for index2 in range(self.numc): if self.scaling[index1, index2] > 0: for (se, ve), (sg, vg) in zip(self.yield_pair_energies(index1, index2), self.yield_pair_gradients(index1, index2)): result += (sg*self.directions[index1, index2]*ve + se*vg)*self.scaling[index1, index2] return result
Compute the gradient of the energy for one atom
def update_resource(self, resource, underlined=None): try: pymodule = self.project.get_pymodule(resource) modname = self._module_name(resource) self._add_names(pymodule, modname, underlined) except exceptions.ModuleSyntaxError: pass
Update the cache for global names in `resource`
def save_lyrics(self, extension='json', overwrite=False, verbose=True, binary_encoding=False): extension = extension.lstrip(".") assert (extension == 'json') or (extension == 'txt'), "format_ must be JSON or TXT" for song in self.songs: song.save_lyrics(extension=extension, overwrite=overwrite, verbose=verbose, binary_encoding=binary_encoding)
Allows user to save all lyrics within an Artist object
def _memoize(f): nothing = object() cache = {} lock = threading.RLock() @functools.wraps(f) def wrapper(arg): if cache.get(arg, nothing) is nothing: with lock: if cache.get(arg, nothing) is nothing: cache[arg] = f(arg) return cache[arg] return wrapper
Memoizing decorator for f, which must have exactly 1 hashable argument.
def netdevs(): with open('/proc/net/dev') as f: net_dump = f.readlines() device_data={} data = namedtuple('data',['rx','tx']) for line in net_dump[2:]: line = line.split(':') if line[0].strip() != 'lo': device_data[line[0].strip()] = data(float(line[1].split()[0])/(1024.0*1024.0), float(line[1].split()[8])/(1024.0*1024.0)) return device_data
RX and TX bytes for each of the network devices
def peukerdouglas(np, fel, streamSkeleton, workingdir=None, mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None): fname = TauDEM.func_name('peukerdouglas') return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir), {'-fel': fel}, workingdir, None, {'-ss': streamSkeleton}, {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np}, {'logfile': log_file, 'runtimefile': runtime_file})
Run peuker-douglas function
def dashboard(request): sources = (models.Source.objects.all().prefetch_related('metric_set') .order_by('name')) metrics = SortedDict([(src, src.metric_set.all()) for src in sources]) no_source_metrics = models.Metric.objects.filter(source__isnull=True) if no_source_metrics: metrics[''] = no_source_metrics if request.META.get('HTTP_X_PJAX', False): parent_template = 'pjax.html' else: parent_template = 'base.html' return render(request, 'metrics/dashboard.html', { 'source_metrics': metrics, 'parent_template': parent_template })
Shows the latest results for each source
def GetColumnNumber (self, columnName): for row in range(1, self.maxRow + 1): for column in range(1, self.maxColumn + 1): if self.GetCellValue(column, row, "") == columnName: return column return 0
returns the column number for a given column heading name, 0 if not found
def encode_set(dynamizer, value): inner_value = next(iter(value)) inner_type = dynamizer.raw_encode(inner_value)[0] return inner_type + 'S', [dynamizer.raw_encode(v)[1] for v in value]
Encode a set for the DynamoDB format
def compute_distance(x_ori, x_pert, constraint='l2'): if constraint == 'l2': dist = np.linalg.norm(x_ori - x_pert) elif constraint == 'linf': dist = np.max(abs(x_ori - x_pert)) return dist
Compute the distance between two images.
def _copytree(src, dst): try: os.makedirs(dst) except OSError: pass for file in os.listdir(src): try: shutil.copy2('%s/%s' % (src, file), '%s/%s' % (dst, file)) except IOError, e: try: shutil.copytree('%s/%s' % (src, file), '%s/%s' % (dst, file)) except OSError: raise e
Similar to shutils.copytree, except that dst is already there
def viewBoxAxisRange(viewBox, axisNumber): rect = viewBox.childrenBoundingRect() if rect is not None: if axisNumber == X_AXIS: return rect.left(), rect.right() elif axisNumber == Y_AXIS: return rect.bottom(), rect.top() else: raise ValueError("axisNumber should be 0 or 1, got: {}".format(axisNumber)) else: raise AssertionError("No children bbox. Plot range not updated.")
Calculates the range of an axis of a viewBox.
def getActiveAxes(self): active = [] for i in range(len(self._axisId)): if self._menu.IsChecked(self._axisId[i]): active.append(i) return active
Return a list of the selected axes.
def dale_chall(self, diff_count, words, sentences): pdw = diff_count / words * 100 asl = words / sentences raw = 0.1579 * (pdw) + 0.0496 * asl if pdw > 5: return raw + 3.6365 return raw
Calculate Dale-Chall readability score.
def build_year(self, dt): self.year = str(dt.year) logger.debug("Building %s" % self.year) self.request = self.create_request(self.get_url()) target_path = self.get_build_path() self.build_file(target_path, self.get_content())
Build the page for the provided year.
def populate(cls, graph): [graph.bind(k, v) for k, v in cls._dict.items()]
populate an rdflib graph with these curies
def generate_schema_file(config_file): config = utils.load_config_from_ini_file(config_file) schema = {} for section_name in config: for option_name in config[section_name]: schema.setdefault(section_name, {}).setdefault(option_name, {}) schema[section_name][option_name]['description'] = 'No description provided.' return utils.dump_schema_file(schema)
Generates a basic confirm schema file from a configuration file.
def write_readme(self, role): j2_out = self.readme_template.render(self.readme_template_vars) self.update_gen_report(role, "readme", j2_out)
Write out a new readme file.
def sync_config(self, force=False): from ambry.library.config import LibraryConfigSyncProxy lcsp = LibraryConfigSyncProxy(self) lcsp.sync(force=force)
Sync the file config into the library proxy data in the root dataset
def _get_catalog_hierarchy_id(self, catalog_id, proxy, runtime): seed_str = convert_catalog_id_to_object_id_string(catalog_id) ident = Id(authority=self._authority, namespace='hierarchy.Hierarchy', identifier=seed_str) return HierarchyLookupSession(proxy, runtime).get_hierarchy(ident).get_id()
Gets the catalog hierarchy
def off(self, group): asyncio.ensure_future(self._send_led_on_off_request(group, 0), loop=self._loop)
Turn the LED off for a group.
def delif(self, iname): _runshell([brctlexe, 'delif', self.name, iname], "Could not delete interface %s from %s." % (iname, self.name))
Delete an interface from the bridge.
def initial_global_state(self) -> GlobalState: environment = Environment( self.callee_account, self.caller, self.call_data, self.gas_price, self.call_value, self.origin, code=self.code or self.callee_account.code, ) return super().initial_global_state_from_environment( environment, active_function="fallback" )
Initialize the execution environment.
def xpath(self, expression): global namespaces return self.tree.xpath(expression, namespaces=namespaces)
Executes an xpath expression using the correct namespaces
def team_players(self, team): keys = 'shirtNumber name position nationality dateOfBirth'.split() data = [{key: player[key] for key in keys} for player in team] self.generate_output({'players': data})
Store output of team players to a JSON file
def mean(self): if len(self) == 0: return float('NaN') arr = self.samples() return sum(arr) / float(len(arr))
Return the sample mean.
def _write(self, data): stream = yaml.dump(data, default_flow_style=False) self.path.write_text(stream)
Write data to config file.
def make_format(self, fmt, width): if not self.report_data: return for data_item in self.report_data: if data_item.results: if fmt is None or fmt == 'text': data_item.make_text(width) elif fmt == 'html': data_item.make_html() elif fmt == 'csv': data_item.make_csv()
Make subreport text in a specified format
def _async_poll(self, poller, async_seconds, async_poll_interval): results = poller.wait(async_seconds, async_poll_interval) for host in poller.hosts_to_poll: reason = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' } self.runner_callbacks.on_failed(host, reason) results['contacted'][host] = reason return results
launch an async job, if poll_interval is set, wait for completion
def add_term_occurrence(self, term, document): if document not in self._documents: self._documents[document] = 0 if term not in self._terms: if self._freeze: return else: self._terms[term] = collections.Counter() if document not in self._terms[term]: self._terms[term][document] = 0 self._documents[document] += 1 self._terms[term][document] += 1
Adds an occurrence of the term in the specified document.
def handle_flush_error(cls, exception): trace = exception.args[0] m = re.match(cls.MYSQL_FLUSH_ERROR_REGEX, trace) if not m: raise exception entity = m.group('entity') eid = m.group('eid') raise AlreadyExistsError(entity=entity, eid=eid)
Handle flush error exceptions.
def __get_oauth_url(self, url, method, **kwargs): oauth = OAuth( url=url, consumer_key=self.consumer_key, consumer_secret=self.consumer_secret, version=self.version, method=method, oauth_timestamp=kwargs.get("oauth_timestamp", int(time())) ) return oauth.get_oauth_url()
Generate oAuth1.0a URL
def _audit_request(options, func, request_context, *args, **kwargs): logger = getLogger("audit") request_info = RequestInfo(options, func, request_context) response = None request_info.capture_request() try: with elapsed_time(request_info.timing): response = func(*args, **kwargs) except Exception as error: request_info.capture_error(error) raise else: request_info.capture_response(response) return response finally: if not should_skip_logging(func): request_info.log(logger)
Run a request function under audit.
def _memcache_key(self, timestamped=False): request = tuple(map(str, self.package_requests)) repo_ids = [] for path in self.package_paths: repo = package_repository_manager.get_repository(path) repo_ids.append(repo.uid) t = ["resolve", request, tuple(repo_ids), self.package_filter_hash, self.package_orderers_hash, self.building, config.prune_failed_graph] if timestamped and self.timestamp: t.append(self.timestamp) return str(tuple(t))
Makes a key suitable as a memcache entry.
def exception( self, msg, *args, exc_info=True, **kwargs ) -> Task: return self.error(msg, *args, exc_info=exc_info, **kwargs)
Convenience method for logging an ERROR with exception information.
def close(self, wait=True, abort=False): if not self.closed and not self.closing: self.closing = True self.server._trigger_event('disconnect', self.sid, run_async=False) if not abort: self.send(packet.Packet(packet.CLOSE)) self.closed = True self.queue.put(None) if wait: self.queue.join()
Close the socket connection.
def my_shared_endpoint_list(endpoint_id): client = get_client() ep_iterator = client.my_shared_endpoint_list(endpoint_id) formatted_print(ep_iterator, fields=ENDPOINT_LIST_FIELDS)
Executor for `globus endpoint my-shared-endpoint-list`
def num_unused_cpus(thresh=10): import psutil cpu_usage = psutil.cpu_percent(percpu=True) return sum([p < thresh for p in cpu_usage])
Returns the number of cpus with utilization less than `thresh` percent
def main_loop(self): def exit(signum, frame): shutdown() sys.exit(0) signal.signal(signal.SIGTERM, exit) while True: task = self._get_next_task_from_raylet() self._wait_for_and_process_task(task)
The main loop a worker runs to receive and execute tasks.
def write_to_disk(filename, delete=False, content=get_time()): if not os.path.exists(os.path.dirname(filename)): return if delete: if os.path.lexists(filename): os.remove(filename) else: with open(filename, 'wb') as f: f.write(content.encode('utf-8'))
Write filename out to disk
def _out(ins): output = _8bit_oper(ins.quad[2]) output.extend(_16bit_oper(ins.quad[1])) output.append('ld b, h') output.append('ld c, l') output.append('out (c), a') return output
Translates OUT to asm.
def partial(cls, prefix, source): match = prefix + "." matches = cls([(key[len(match):], source[key]) for key in source if key.startswith(match)]) if not matches: raise ValueError() return matches
Strip a prefix from the keys of another dictionary, returning a Bunch containing only valid key, value pairs.
def _random_block(): random_number = random.randint(0, DISCRETE_VALUES) random_string = _to_base36(random_number) return _pad(random_string, BLOCK_SIZE)
Generate a random string of `BLOCK_SIZE` length.
def _generate_overlays(self): overlays = defaultdict(dict) for handle in self._storage_broker.iter_item_handles(): identifier = dtoolcore.utils.generate_identifier(handle) item_metadata = self._storage_broker.get_item_metadata(handle) for k, v in item_metadata.items(): overlays[k][identifier] = v return overlays
Return dictionary of overlays generated from added item metadata.
def language_to_locale(language): tokens = language.split('-') if len(tokens) == 1: return tokens[0] return "%s_%s" % (tokens[0], tokens[1].upper())
Converts django's `LANGUAGE_CODE` settings to a proper locale code.
def init_app(self, app): self.app = app self.app.apscheduler = self self._load_config() self._load_jobs() if self.api_enabled: self._load_api()
Initialize the APScheduler with a Flask application instance.
def _get_brew_versions(): from bcbio import install tooldir = install.get_defaults().get("tooldir") brew_cmd = os.path.join(tooldir, "bin", "brew") if tooldir else "brew" try: vout = subprocess.check_output([brew_cmd, "list", "--versions"]) except OSError: vout = "" out = {} for vstr in vout.split("\n"): if vstr.strip(): parts = vstr.rstrip().split() name = parts[0] v = parts[-1] out[name] = v return out
Retrieve versions of tools installed via brew.
def mks(val): if sys.version_info > (3, 0, 0): if isinstance(val, bytes): sval = str(val, 'utf-8') else: sval = str(val) else: sval = str(val) return sval
make sure the value is a string, paying mind to python3 vs 2
def one_mask(self): accum = 0 for i in range(self.data.itemsize): accum += (0xAA << (i << 3)) return accum
Return a mask to determine whether an array chunk has any ones.
def _ensure_frames(cls, documents): frames = [] for document in documents: if not isinstance(document, Frame): frames.append(cls(document)) else: frames.append(document) return frames
Ensure all items in a list are frames by converting those that aren't.
def check(func): def iCheck(request, *args, **kwargs): if not request.method == "POST": return HttpResponseBadRequest("Must be POST request.") follow = func(request, *args, **kwargs) if request.is_ajax(): return HttpResponse('ok') try: if 'next' in request.GET: return HttpResponseRedirect(request.GET.get('next')) if 'next' in request.POST: return HttpResponseRedirect(request.POST.get('next')) return HttpResponseRedirect(follow.target.get_absolute_url()) except (AttributeError, TypeError): if 'HTTP_REFERER' in request.META: return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/')) if follow: return HttpResponseServerError('"%s" object of type ``%s`` has no method ``get_absolute_url()``.' % ( unicode(follow.target), follow.target.__class__)) return HttpResponseServerError('No follow object and `next` parameter found.') return iCheck
Check the permissions, http method and login state.
def make_source(self, groups, code_opts, gen_opts): modules = self.make_modules(groups, code_opts) var_decls = modules.var_decls relocs = AttrsGetter(modules.relocs) x86, x64 = relocs.get_attrs('x86', 'x64') if code_opts.windll: structs, x86_reloc, x64_reloc = make_windll( modules.structs ) x86 += x86_reloc x64 += x64_reloc else: structs = ''.join(modules.structs) c_relocs = reloc_both(relocs.strings + x86, x64) data = var_decls.strip() c_header = make_c_header( gen_opts.filename, 'NOTICE', modules.typedefs + structs + data ) c_source = make_init( modules.hashes + c_relocs + modules.libprocs, callable(code_opts.hash_func) ) return [c_header, c_source]
Build the final source code for all modules.
def _alter_umask(self): if self.umask is None: yield else: prev_umask = os.umask(self.umask) try: yield finally: os.umask(prev_umask)
Temporarily alter umask to custom setting, if applicable
def _setup_genome_annotations(g, args, ann_groups): available_anns = g.get("annotations", []) + g.pop("annotations_available", []) anns = [] for orig_target in args.datatarget: if orig_target in ann_groups: targets = ann_groups[orig_target] else: targets = [orig_target] for target in targets: if target in available_anns: anns.append(target) g["annotations"] = anns if "variation" not in args.datatarget and "validation" in g: del g["validation"] return g
Configure genome annotations to install based on datatarget.
def handleNotification(self, handle, raw_data): if raw_data is None: return data = raw_data.decode("utf-8").strip(' \n\t') self._cache = data self._check_data() if self.cache_available(): self._last_read = datetime.now() else: self._last_read = datetime.now() - self._cache_timeout + \ timedelta(seconds=300)
gets called by the bluepy backend when using wait_for_notification
def add_relationship(self, term1, relationship, term2): url = self.base_path + 'term/add-relationship' data = {'term1_id': term1['id'], 'relationship_tid': relationship['id'], 'term2_id': term2['id'], 'term1_version': term1['version'], 'relationship_term_version': relationship['version'], 'term2_version': term2['version']} return self.post(url, data)
Creates a relationship between 3 entities in database
def _report_error(self, request, exp): message = ( "Failure to perform %s due to [ %s ]" % (request, exp) ) self.log.fatal(message) raise requests.RequestException(message)
When making the request, if an error happens, log it.
def count_leases_by_owner(self, leases): owners = [l.owner for l in leases] return dict(Counter(owners))
Returns a dictionary of leases by current owner.
def _cast(self, value, format=None, **opts): if format is not None: return datetime.strptime(value, format) return dateutil.parser.parse(value)
Optionally apply a format string.
def widget_kwargs_for_field(self, field_name): if self._meta.widgets: return self._meta.widgets.get(field_name, {}) return {}
Returns widget kwargs for given field_name.
def by_date(self, chamber, date): "Return votes cast in a chamber on a single day" date = parse_date(date) return self.by_range(chamber, date, date)
Return votes cast in a chamber on a single day
def __get_connection_cloudwatch(): region = get_global_option('region') try: if (get_global_option('aws_access_key_id') and get_global_option('aws_secret_access_key')): logger.debug( 'Authenticating to CloudWatch using ' 'credentials in configuration file') connection = cloudwatch.connect_to_region( region, aws_access_key_id=get_global_option('aws_access_key_id'), aws_secret_access_key=get_global_option( 'aws_secret_access_key')) else: logger.debug( 'Authenticating using boto\'s authentication handler') connection = cloudwatch.connect_to_region(region) except Exception as err: logger.error('Failed connecting to CloudWatch: {0}'.format(err)) logger.error( 'Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to CloudWatch in {0}'.format(region)) return connection
Ensure connection to CloudWatch
def save_params(self, **kw): if ('hazard_calculation_id' in kw and kw['hazard_calculation_id'] is None): del kw['hazard_calculation_id'] vars(self.oqparam).update(**kw) self.datastore['oqparam'] = self.oqparam attrs = self.datastore['/'].attrs attrs['engine_version'] = engine_version attrs['date'] = datetime.now().isoformat()[:19] if 'checksum32' not in attrs: attrs['checksum32'] = readinput.get_checksum32(self.oqparam) self.datastore.flush()
Update the current calculation parameters and save engine_version
def _get_response(self, parse_result=True): self.vw_process.expect_exact('\r\n', searchwindowsize=-1) if parse_result: output = self.vw_process.before result_struct = VWResult(output, active_mode=self.active_mode) else: result_struct = None return result_struct
If 'parse_result' is False, ignore the received output and return None.
def additionalProperties(self): value = self._schema.get("additionalProperties", {}) if not isinstance(value, dict) and value is not False: raise SchemaError( "additionalProperties value {0!r} is neither false nor" " an object".format(value)) return value
Schema for all additional properties, or False.
def stop_refresh(self): self.logger.debug("stopping timed refresh") self.rf_flags['done'] = True self.rf_timer.clear()
Stop redrawing the canvas at the previously set timed interval.
def ec2_fab(service, args): instance_ids = args.instances instances = service.list(elb=args.elb, instance_ids=instance_ids) hosts = service.resolve_hosts(instances) fab.env.hosts = hosts fab.env.key_filename = settings.get('SSH', 'KEY_FILE') fab.env.user = settings.get('SSH', 'USER', getpass.getuser()) fab.env.parallel = True fabfile = find_fabfile(args.file) if not fabfile: print 'Couldn\'t find any fabfiles!' return fab.env.real_fabile = fabfile docstring, callables, default = load_fabfile(fabfile) fab_state.commands.update(callables) commands_to_run = parse_arguments(args.methods) for name, args, kwargs, arg_hosts, arg_roles, arg_exclude_hosts in commands_to_run: fab.execute(name, hosts=arg_hosts, roles=arg_roles, exclude_hosts=arg_exclude_hosts, *args, **kwargs)
Run Fabric commands against EC2 instances
def with_dimensions(self, *dimensions): self.dimensions = tuple( self._maybe_make_dimension(dim) for dim in dimensions) self._cached = None return self
Declare dimensions for this Measurement, returns self for chaining.
def printBoundingBox(self): print ("Bounding Latitude: ") print (self.startlatitude) print (self.endlatitude) print ("Bounding Longitude: ") print (self.startlongitude) print (self.endlongitude)
Print the bounding box that this DEM covers
def tokenize_paragraphs(cls, text): paragraphs = [] paragraphs_first_pass = text.split('\n') for p in paragraphs_first_pass: paragraphs_second_pass = re.split('\s{4,}', p) paragraphs += paragraphs_second_pass paragraphs = [p for p in paragraphs if p] return paragraphs
Convert an input string into a list of paragraphs.
def ConsultarDomicilios(self, nro_doc, tipo_doc=80, cat_iva=None): "Busca los domicilios, devuelve la cantidad y establece la lista" self.cursor.execute("SELECT direccion FROM domicilio WHERE " " tipo_doc=? AND nro_doc=? ORDER BY id ", [tipo_doc, nro_doc]) filas = self.cursor.fetchall() self.domicilios = [fila['direccion'] for fila in filas] return len(filas)
Busca los domicilios, devuelve la cantidad y establece la lista