code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def _bits_to_geohash(value): ret = [] for i in (value[i:i+5] for i in xrange(0, len(value), 5)): total = sum([(bit*2**count) for count,bit in enumerate(i[::-1])]) ret.append(BASE32MAPR[total]) return "".join(ret)
Convert a list of GeoHash bits to a GeoHash.
def encrypt(source, dest, keys): cmd = flatten([gnupg_bin(), "--armor", "--output", dest, gnupg_verbose(), gnupg_home(), recipients_args(keys), "--encrypt", source]) stderr_output(cmd) return True
Encrypts a file using the given keys
def generate(self, api): for namespace in api.namespaces.values(): with self.output_to_relative_path('%s.stone' % namespace.name): self.emit('namespace %s' % namespace.name) for data_type in namespace.linearize_data_types(): self.generate_data_type...
Main code generator entry point.
def format(self): self.max_lengths = [] for row in self.rows: if len(self.max_lengths) < len(row): self.max_lengths += [0] * (len(row) - len(self.max_lengths)) for idx, val in enumerate(row): len_cell = len(str(val)) if val else 0 i...
Format table to print out
def build_block(self): header_bytes = self.block_header.SerializeToString() block = Block(header=header_bytes, header_signature=self._header_signature) block.batches.extend(self.batches) return block
Assembles the candidate block into it's finalized form for broadcast.
def download(args): accessions, infoFTP = set(args['g']), args['i'] search, exclude = args['s'], args['e'] FTPs = getFTPs(accessions, infoFTP, search, exclude, threads = args['t'], convert = args['convert']) if args['test'] is True: for genome in FTPs: print('found:', ';'...
download genomes from NCBI
def debug(self, request, message, extra_tags='', fail_silently=False): add(self.target_name, request, constants.DEBUG, message, extra_tags=extra_tags, fail_silently=fail_silently)
Add a message with the ``DEBUG`` level.
def close(self): from matplotlib.pyplot import close for ax in self.axes[::-1]: ax.set_xscale('linear') ax.set_yscale('linear') ax.cla() close(self)
Close the plot and release its memory.
def push(self, obj): rv = getattr(self._local, "stack", None) if rv is None: self._local.stack = rv = [] rv.append(obj) return rv
Pushes a new item to the stack
def filter_repeated(self): defaults = { 'occurrences': 1, 'interval': 30, 'refresh': 1800 } if isinstance(self.settings['sensu_plugin'], dict): defaults.update(self.settings['sensu_plugin']) occurrences = int(self.event['check'].get( ...
Determine whether a check is repeating.
def scale_image(image, new_width): (original_width, original_height) = image.size aspect_ratio = original_height/float(original_width) new_height = int(aspect_ratio * new_width) new_image = image.resize((new_width*2, new_height)) return new_image
Resizes an image preserving the aspect ratio.
def run(bam_file, sample, out_dir): out = {} peaks = sample.get("peaks_files", {}).get("main") if peaks: out.update(_reads_in_peaks(bam_file, peaks, sample)) return out
Standard QC metrics for chipseq
def saveFormatFileEnc(self, filename, encoding, format): ret = libxml2mod.xmlSaveFormatFileEnc(filename, self._o, encoding, format) return ret
Dump an XML document to a file or an URL.
def save_file(fullpath, entry): with tempfile.NamedTemporaryFile('w', delete=False) as file: tmpfile = file.name for key, val in entry.items(): print('{}: {}'.format(key, str(val)), file=file) print('', file=file) file.write(entry.get_payload()) shutil.move(tmpfile, f...
Save a message file out, without mangling the headers
def unfix_parameters(self): for W, b in zip(self.W_list, self.b_list): W.unfix() b.unfix()
Helper function that unfixes all parameters
def secure(self): log.debug('ConCache securing sockets') if os.path.exists(self.cache_sock): os.chmod(self.cache_sock, 0o600) if os.path.exists(self.update_sock): os.chmod(self.update_sock, 0o600) if os.path.exists(self.upd_t_sock): os.chmod(self.upd_t...
secure the sockets for root-only access
def _get_snpeff_cmd(cmd_name, datadir, data, out_file): resources = config_utils.get_resources("snpeff", data["config"]) jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx3g"]) jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust": ...
Retrieve snpEff base command line.
def delay_1(year): months = trunc(((235 * year) - 234) / 19) parts = 12084 + (13753 * months) day = trunc((months * 29) + parts / 25920) if ((3 * (day + 1)) % 7) < 3: day += 1 return day
Test for delay of start of new year and to avoid
def draw(self): if not self.visible: return if self.focus: pygame.draw.rect(self.window, self.focusColor, self.focusedImageRect, 1) self.window.blit(self.textImage, self.loc) if self.focus: self.cursorMsCounter = self.cursorMsCounter + self.cloc...
Draws the Text in the window.
def convert(input_file_name, **kwargs): delimiter = kwargs["delimiter"] or "," quotechar = kwargs["quotechar"] or "|" if six.PY2: delimiter = delimiter.encode("utf-8") quotechar = quotechar.encode("utf-8") with open(input_file_name, "rb") as input_file: reader = csv.reader(input_...
Convert CSV file to HTML table
def determine_2(self, container_name, container_alias, meta, val): if container_alias is not NotSpecified: return container_alias return container_name[container_name.rfind(":")+1:].replace('/', '-')
Default the alias to the name of the container
def read_nb_content(cells, mod_name): "Build a dictionary containing the position of the `cells`." doc_fns = {} for i, cell in enumerate(cells): if cell['cell_type'] == 'code': for match in SHOW_DOC_RE.findall(cell['source']): doc_fns[match] = i return doc_fns
Build a dictionary containing the position of the `cells`.
def _read_single_point_data(self): temp_dict = read_pattern( self.text, { "final_energy": r"\s*SCF\s+energy in the final basis set\s+=\s*([\d\-\.]+)" }) if temp_dict.get('final_energy') == None: self.data['final_energy'] = None ...
Parses final free energy information from single-point calculations.
def paused_partitions(self): return set(partition for partition in self.assignment if self.is_paused(partition))
Return current set of paused TopicPartitions.
def csvtolist(inputstr): reader = csv.reader([inputstr], skipinitialspace=True) output = [] for r in reader: output += r return output
converts a csv string into a list
def benchmark(N, gates): qubits = list(range(0, N)) ket = qf.zero_state(N) for n in range(0, N): ket = qf.H(n).run(ket) for _ in range(0, (gates-N)//3): qubit0, qubit1 = random.sample(qubits, 2) ket = qf.X(qubit0).run(ket) ket = qf.T(qubit1).run(ket) ket = qf.CNOT...
Create and run a circuit with N qubits and given number of gates
def load(self, path): path = os.path.normpath(path) mtime = os.stat(path).st_mtime if path not in self or self[path].mtime != mtime: manifest = self.build(path) self[path] = self.manifest_mod(manifest, mtime) return self[path].manifest
Load a manifest at path or return a suitable manifest already loaded.
def to_snake_case(s): return re.sub('([^_A-Z])([A-Z])', lambda m: m.group(1) + '_' + m.group(2).lower(), s)
Converts camel-case identifiers to snake-case.
def cleanup(self): for future in self.futures: future.cancel() self.executor.shutdown(wait=10) if self.ssh.get_transport() != None: self.ssh.close()
Release resources used during shell execution
def division_content(self, election_day, division, special=False): from electionnight.models import PageType division_type = ContentType.objects.get_for_model(division) page_type = PageType.objects.get( model_type=division_type, election_day=election_day, divi...
Return serialized content for a division page.
def filter_objects_by_section(self, rels, section): subtree = section.get_descendants(include_self=True) kwargs_list = [{'%s__in' % rel.field.name: subtree} for rel in rels] q = Q(**kwargs_list[0]) for kwargs in kwargs_list[1:]: q |= Q(**kwargs) return self.get_manage...
Build a queryset containing all objects in the section subtree.
def _checkout(self): cmd = ["atomic", "mount", "--storage", "ostree", self.ref_image_name, self.mount_point] self._run_and_log(cmd, self.ostree_path, "Failed to mount selected image as an ostree repo.")
check out the image filesystem on self.mount_point
def _get_distance_term(self, C, rjb, mag): c_3 = self._get_anelastic_coeff(C) rval = np.sqrt(rjb ** 2. + C["h"] ** 2.) return (C["c1"] + C["c2"] * (mag - self.CONSTS["Mref"])) *\ np.log(rval / self.CONSTS["Rref"]) +\ c_3 * (rval - self.CONSTS["Rref"])
Returns the general distance scaling term - equation 2
def parallel_combine_variants(orig_files, out_file, ref_file, config, run_parallel): file_key = "vcf_files" def split_by_region(data): base, ext = utils.splitext_plus(os.path.basename(out_file)) args = [] for region in [x.name for x in ref.file_contigs(ref_file, config)]: reg...
Combine variants in parallel by chromosome, concatenating final outputs.
def graph_response(graph, format): fmt = guess_format(format) if not fmt: abort(404) headers = { 'Content-Type': RDF_MIME_TYPES[fmt] } kwargs = {} if fmt == 'json-ld': kwargs['context'] = context if isinstance(graph, RdfResource): graph = graph.graph retur...
Return a proper flask response for a RDF resource given an expected format.
def assertpath(path_, msg='', **kwargs): if NO_ASSERTS: return if path_ is None: raise AssertionError('path is None! %s' % (path_, msg)) if path_ == '': raise AssertionError('path=%r is the empty string! %s' % (path_, msg)) if not checkpath(path_, **kwargs): raise Asserti...
Asserts that a patha exists
def to_dict(self): return {'schema': self.schema, 'table': self.table, 'name': self.name, 'type': self.type}
Serialize representation of the column for local caching.
def apply(self, coordinates): for i in self.affected_atoms: coordinates[i] = self.transformation*coordinates[i]
Apply this distortion to Cartesian coordinates
def hard_reset(self): if self.seq is not None and self.shuffle: random.shuffle(self.seq) if self.imgrec is not None: self.imgrec.reset() self.cur = 0 self._allow_read = True self._cache_data = None self._cache_label = None self._cache_idx =...
Resets the iterator and ignore roll over data
def GenerateFile(self, input_filename=None, output_filename=None): if input_filename is None: input_filename = output_filename + ".in" if output_filename[-3:] == ".in": output_filename = output_filename[:-3] logging.debug("Generating file %s from %s", output_filename, input_filename) with io...
Generates a file from a template, interpolating config values.
def to_task(self): task_args = self.get_task_args() name = task_args.get('name', MESSAGE_PROCESSOR_NAME) if not 'countdown' in task_args: task_args['countdown'] = self.frequency task_args['name'] = "%s-%s-%s-%s" % ( name, self.tag, self.current_batch, self.time_th...
Return a task object representing this MessageProcessor job.
def skull_strip(dset,suffix='_ns',prefix=None,unifize=True): if prefix==None: prefix = nl.suffix(dset,suffix) unifize_dset = nl.suffix(dset,'_u') cmd = bet2 if bet2 else 'bet2' if unifize: info = nl.dset_info(dset) if info==None: nl.notify('Error: could not read info ...
use bet to strip skull from given anatomy
def generate_table(self, rows): table = PrettyTable(**self.kwargs) for row in self.rows: if len(row[0]) < self.max_row_width: appends = self.max_row_width - len(row[0]) for i in range(1, appends): row[0].append("-") if row[1] is...
Generates from a list of rows a PrettyTable object.
def view(self): print("") description, count = "", 0 if self.repo == "sbo": description = SBoGrep(self.name).description() else: PACKAGES_TXT = Utils().read_file(self.lib) for line in PACKAGES_TXT.splitlines(): if line.startswith(self.n...
Print package description by repository
def hash(self): if not hasattr(self, '_hash'): self._hash = conf.lib.clang_hashCursor(self) return self._hash
Returns a hash of the cursor as an int.
def get(self, key): return self._object_class(json.loads(self._db[key]))
Get data associated with provided key.
def dumps(self): return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
Override the default to avoid duplicate dump.
def calculate_hash_of_dir(directory, file_list=None): md5_hash = md5() if not os.path.exists(directory): return -1 try: for subdir, dirs, files in os.walk(directory): for _file in files: file_path = os.path.join(subdir, _file) if file_list is not N...
Calculate hash of directory.
def first_seen(self, first_seen): self._group_data['firstSeen'] = self._utils.format_datetime( first_seen, date_format='%Y-%m-%dT%H:%M:%SZ' )
Set Document first seen.
def cublasCtpsv(handle, uplo, trans, diag, n, AP, x, incx): status = _libcublas.cublasCtpsv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], ...
Solve complex triangular-packed system with one right-hand side.
def open_python(self, message, namespace): from code import InteractiveConsole import readline import rlcompleter readline.set_completer(rlcompleter.Completer(namespace).complete) readline.parse_and_bind('tab: complete') console = InteractiveConsole(namespace) con...
Open interactive python console
def example(fn): @functools.wraps(fn) def wrapped(): try: sys.stdout.write('Running: %s\n' % fn.__name__) fn() sys.stdout.write('\n') except KeyboardInterrupt: sys.stdout.write('\nSkipping example.\n\n') time.sleep(0.2) examples.app...
Wrap the examples so they generate readable output
def unit_conversion(thing, units, length=False): if 'n/a' == thing: return 'n/a' try: thing = round(thing * CONVERSION[units][0 + length], 2) except TypeError: thing = 'fubar' return thing, CONVERSION[units][2 + length]
converts base data between metric, imperial, or nautical units
def run_tensorboard(run_id, tflog_id): data = current_app.config["data"] run = data.get_run_dao().get(run_id) base_dir = Path(run["experiment"]["base_dir"]) log_dir = Path(run["info"]["tensorflow"]["logdirs"][tflog_id]) if log_dir.is_absolute(): path_to_log_dir = log_dir else: pa...
Launch TensorBoard for a given run ID and log ID of that run.
def _create_placeholders(self): self.input_data = tf.placeholder( tf.int32, [self.batch_size, self.num_steps]) self.input_labels = tf.placeholder( tf.int32, [self.batch_size, self.num_steps])
Create the computational graph's placeholders.
def cfitsio_version(asfloat=False): ver = '%0.3f' % _fitsio_wrap.cfitsio_version() if asfloat: return float(ver) else: return ver
Return the cfitsio version as a string.
def findrec(s, data): datablock = [] for rec in data: if s == rec[0]: datablock.append([rec[1], rec[2], rec[3], rec[4]]) return datablock
finds all the records belonging to s in data
def remove(id_): with LOCK: thread = REGISTRY.pop(id_, None) if thread is not None: thread.cancel() return thread
Remove the callback and its schedule
def right(ctx, text, num_chars): num_chars = conversions.to_integer(num_chars, ctx) if num_chars < 0: raise ValueError("Number of chars can't be negative") elif num_chars == 0: return '' else: return conversions.to_string(text, ctx)[-num_chars:]
Returns the last characters in a text string
def fetcher_loop_v1(data_queue, data_buffer, pin_memory=False, pin_device_id=0, data_buffer_lock=None): while True: idx, batch = data_queue.get() if idx is None: break if pin_memory: batch = _as_in_context(batch, context.cpu_pinned(pin_device_id)) ...
Fetcher loop for fetching data from queue and put in reorder dict.
def transform_aglistener_output(result): from collections import OrderedDict from msrestazure.tools import parse_resource_id try: resource_group = getattr(result, 'resource_group', None) or parse_resource_id(result.id)['resource_group'] output = OrderedDict([('id', result.id), ...
Transforms the result of Availability Group Listener to eliminate unnecessary parameters.
def hparams_to_batching_scheme(hparams, drop_long_sequences=False, shard_multiplier=1, length_multiplier=1): return batching_scheme( batch_size=hparams.batch_size, min_length=hparams.min_length, max_length=h...
Wrapper around _batching_scheme with hparams.
def _ident(*elements): try: return len(set(elements)) == 1 except TypeError: for e1, e2 in zip(elements, elements[1:]): if e1 != e2: return False return True
Return True if all sequences are equal.
def ps(): containers = get_containers() containers = [pretty_container(c) for c in containers] print_table(containers, ['name', 'ip', 'ports', 'created', 'image'], sort='name')
Print a table of all running containers on a host
def _next_move_direction(self): nmoves = len(self.moves) move = np.random.randint(1, nmoves+1) while self.prev_move == (move + 3) % nmoves: move = np.random.randint(1, nmoves+1) self.prev_move = move return np.array(self.moves[move])
pick a move at random from the list of moves
def insert(self, resource, doc_or_docs, **kwargs): ids = [] kwargs.update(self._es_args(resource)) for doc in doc_or_docs: self._update_parent_args(resource, kwargs, doc) _id = doc.pop('_id', None) res = self.elastic(resource).index(body=doc, id=_id, **kwargs)...
Insert document, it must be new if there is ``_id`` in it.
def _get_domain_text_of_authoritative_zone(self): from bs4 import BeautifulSoup zones_response = self.session.get(self.URLS['domain_list']) self._log('Zone', zones_response) assert zones_response.status_code == 200, \ 'Could not retrieve domain list due to a network error.' ...
Get the authoritative name zone.
def _getID(self): id = [] for key in self._sqlPrimary: value = self.__dict__[key] if isinstance(value, Forgetter): if value._new: value.save() try: (value,) = value._getID() except: ...
Get the ID values as a tuple annotated by sqlPrimary
def pop_trigger(data): trigger_name = data.pop('trigger') trigger_args = {} if trigger_name == 'date': trigger_arg_names = ('run_date', 'timezone') elif trigger_name == 'interval': trigger_arg_names = ('weeks', 'days', 'hours', 'minutes', 'seconds', 'start_date', 'end_date', 'timezone') ...
Pops trigger and trigger args from a given dict.
def timex_ends(self): if not self.is_tagged(TIMEXES): self.tag_timexes() return self.ends(TIMEXES)
The list of end positions of ``timexes`` layer elements.
def add_definitions(definitions, operations): for definition_schema in iter_definitions(definitions, operations): if definition_schema is None: continue if isinstance(definition_schema, str): continue for name, schema in iter_schemas(definition_schema): de...
Add definitions to swagger.
def reminders_list(self, **kwargs) -> SlackResponse: self._validate_xoxp_token() return self.api_call("reminders.list", http_verb="GET", params=kwargs)
Lists all reminders created by or for a given user.
def make_valid_xml_name(key, attr): LOG.info('Inside make_valid_xml_name(). Testing key "%s" with attr "%s"' % ( unicode_me(key), unicode_me(attr)) ) key = escape_xml(key) attr = escape_xml(attr) if key_is_valid_xml(key): return key, attr if key.isdigit(): return 'n%s' % ...
Tests an XML name and fixes it if invalid
def shortDescription(self): cd = getattr(self,'classDescription',None) if cd: sd = getattr(cd,'shortDescription','') d = getattr(cd,'description','') return sd if sd else d return ''
Overrides property from Event base class.
def write_to_file(self, f): for section, values in self._values.iteritems(): try: section_name, subsection_name = section except ValueError: (section_name, ) = section subsection_name = None if subsection_name is None: ...
Write configuration to a file-like object.
def switch_or_run(cmd, venv_name=None): if cmd: return _run(venv_name, cmd) inenv = InenvManager() if not os.getenv(INENV_ENV_VAR): activator_warn(inenv) return else: venv = inenv.get_prepped_venv(venv_name) inenv.clear_extra_source_file() inenv.write_extr...
Switch or run in this env
def from_statement(cls, statement): if statement[0] in ["TS", "TIMESTAMP", "UTCTIMESTAMP", "UTCTS"]: return TimestampFunction.from_statement(statement) elif statement[0] in ["NOW", "UTCNOW"]: return NowFunction.from_statement(statement) else: raise SyntaxError...
Create a selection function from a statement
def Start(self): if self.hunt_obj.Get(self.hunt_obj.Schema.STATE) == "STARTED": return self.context.duration = self.runner_args.expiry_time self.context.next_client_due = rdfvalue.RDFDatetime.Now() self._CreateAuditEvent("HUNT_STARTED") self.hunt_obj.Set(self.hunt_obj.Schema.STATE("STARTED")) ...
This uploads the rules to the foreman and, thus, starts the hunt.
def _is_modification_or_activity(feature): if not (isinstance(feature, _bp('ModificationFeature')) or \ isinstance(feature, _bpimpl('ModificationFeature'))): return None mf_type = feature.getModificationType() if mf_type is None: return None mf_type_terms = mf_type.getTerm()....
Return True if the feature is a modification
def _debug(self, out, print_prefix=True): if self.debug: if print_prefix: pre = self.__class__.__name__ if hasattr(self, 'debug_prefix'): pre = getattr(self, 'debug_prefix') sys.stderr.write("%s: " % pre) sys.stderr.writ...
Print out to stderr, if debugging is enabled.
def lookup(self): assert not self.is_existing try: a = SocialAccount.objects.get(provider=self.account.provider, uid=self.account.uid) a.extra_data = self.account.extra_data self.account = a self.user = self.accoun...
Lookup existing account, if any.
def _get_data_collections(dnr_values, dhr_values, metadata, timestep, is_leap_year): analysis_period = AnalysisPeriod(timestep=timestep, is_leap_year=is_leap_year) dnr_header = Header(data_type=DirectNormalIrradiance(), unit='W/m2', analysis_period...
Return two data collections for Direct Normal , Diffuse Horizontal
def dumps(): d = {} for k, v in FILTERS.items(): d[dr.get_name(k)] = list(v) return _dumps(d)
Returns a string representation of the FILTERS dictionary.
def process_documentline(line, nanopubs_metadata): matches = re.match('SET DOCUMENT\s+(\w+)\s+=\s+"?(.*?)"?$', line) key = matches.group(1) val = matches.group(2) nanopubs_metadata[key] = val return nanopubs_metadata
Process SET DOCUMENT line in BEL script
def total_core(self): corefile = os.path.join(self.reffilepath, self.analysistype, 'Escherichia', 'core_combined.fasta') for record in SeqIO.parse(corefile, 'fasta'): gene_name = record.id.split('-')[0] if gene_name not in self.coregenomes: self.coregenomes.append...
Determine the total number of core genes present
def fai_from_bam(ref_file, bam_file, out_file, data): contigs = set([x.contig for x in idxstats(bam_file, data)]) if not utils.file_uptodate(out_file, bam_file): with open(ref.fasta_idx(ref_file, data["config"])) as in_handle: with file_transaction(data, out_file) as tx_out_file: ...
Create a fai index with only contigs in the input BAM file.
def clear_document(self): self._components.clear() self._sequences.clear() self._namespaces.clear() self._models.clear() self._modules.clear() self._collections.clear() self._annotations.clear() self._functional_component_store.clear() self._collec...
Clears ALL items from document, reseting it to clean
def process_cpp(self, path, suffix): _cpplint_state.ResetErrorCounts() cpplint.ProcessFile(str(path), _cpplint_state.verbose_level) _cpplint_state.PrintErrorCounts() errors = _cpplint_state.errors_by_category.copy() if suffix == 'h': self.cpp_header_map[str(path)] = e...
Process a cpp file.
def rollforward(self, date): if self.onOffset(date): return date else: return date + YearBegin(month=self.month)
Roll date forward to nearest start of year
def cyvcf_add_filter(rec, name): if rec.FILTER: filters = rec.FILTER.split(";") else: filters = [] if name not in filters: filters.append(name) rec.FILTER = filters return rec
Add a FILTER value to a cyvcf2 record
def lstm_init_states(batch_size): hp = Hyperparams() init_shapes = lstm.init_states(batch_size=batch_size, num_lstm_layer=hp.num_lstm_layer, num_hidden=hp.num_hidden) init_names = [s[0] for s in init_shapes] init_arrays = [mx.nd.zeros(x[1]) for x in init_shapes] return init_names, init_arrays
Returns a tuple of names and zero arrays for LSTM init states
def write_json_plan(self, proposed_layout, proposed_plan_file): with open(proposed_plan_file, 'w') as output: json.dump(proposed_layout, output)
Dump proposed json plan to given output file for future usage.
def transformer_parsing_big(): hparams = transformer_big() hparams.max_length = 512 hparams.shared_source_target_embedding = False hparams.learning_rate_warmup_steps = 4000 hparams.layer_prepostprocess_dropout = 0.1 hparams.batch_size = 2048 hparams.learning_rate = 0.05 return hparams
HParams for parsing on WSJ semi-supervised.
def tag(self, *tag, **kwtags): if not tag: pass elif len(tag) == 1 and isinstance(tag[0], dict): self._meta.update(tag[0]) else: raise TypeError('Tags must be provided as key-word arguments or ' 'a dictionary') self._meta.up...
Tag a Property instance with metadata dictionary
def cli(env, identifier, allocation, port, routing_type, routing_method): mgr = SoftLayer.LoadBalancerManager(env.client) _, loadbal_id = loadbal.parse_id(identifier) mgr.add_service_group(loadbal_id, allocation=allocation, port=port, ...
Adds a new load_balancer service.
def read_chunked(self): self._load_metadata() for i in range(self.npartitions): yield self._get_partition(i)
Return iterator over container fragments of data source
def init(self): if not self.export_enable: return None try: parameters = pika.URLParameters( 'amqp://' + self.user + ':' + self.password + '@' + self.host + ':' + self.port + '/') connection = pika.Blocki...
Init the connection to the rabbitmq server.
def delete_os_dummy_rtr_nwk(self, rtr_id, net_id, subnet_id): subnet_lst = set() subnet_lst.add(subnet_id) ret = self.os_helper.delete_intf_router(None, None, rtr_id, subnet_lst) if not ret: return ret return self.os_helper.delete_network_all_subnets(net_id)
Delete the dummy interface to the router.
def sponsor_or_site(self, value): if value not in Comment.VALID_SPONSOR_OR_SITE_RESPONSES: raise AttributeError("%s sponsor_or_site value of %s is not valid" % (self.__class__.__name__, value)) self._sponsor_or...
Set Originator with validation of input
def cli(env, volume_id, reason, immediate): file_storage_manager = SoftLayer.FileStorageManager(env.client) if not (env.skip_confirmations or formatting.no_going_back(volume_id)): raise exceptions.CLIAbort('Aborted') cancelled = file_storage_manager.cancel_snapshot_space( volume_id, reason, ...
Cancel existing snapshot space for a given volume.