code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _initialize_tables(self): self.table_struct, self.idnt_struct_size = self._create_struct_table() self.table_values, self.idnt_values_size = self._create_values_table()
Create tables for structure and values, word->vocabulary
def decode_offset_response(cls, response): return [ kafka.structs.OffsetResponsePayload(topic, partition, error, tuple(offsets)) for topic, partitions in response.topics for partition, error, offsets in partitions ]
Decode OffsetResponse into OffsetResponsePayloads Arguments: response: OffsetResponse Returns: list of OffsetResponsePayloads
def aggregate_history(slugs, granularity="daily", since=None, with_data_table=False): r = get_r() slugs = list(slugs) try: if since and len(since) == 10: since = datetime.strptime(since, "%Y-%m-%d") elif since and len(since) == 19: since = datetime.strptime(since, "%Y-%m-%d %H:%M:%S") except (TypeError, ValueError): pass history = r.get_metric_history_chart_data( slugs=slugs, since=since, granularity=granularity ) return { 'chart_id': "metric-aggregate-history-{0}".format("-".join(slugs)), 'slugs': slugs, 'since': since, 'granularity': granularity, 'metric_history': history, 'with_data_table': with_data_table, }
Template Tag to display history for multiple metrics. * ``slug_list`` -- A list of slugs to display * ``granularity`` -- the granularity: seconds, minutes, hourly, daily, weekly, monthly, yearly * ``since`` -- a datetime object or a string string matching one of the following patterns: "YYYY-mm-dd" for a date or "YYYY-mm-dd HH:MM:SS" for a date & time. * ``with_data_table`` -- if True, prints the raw data in a table.
def translate_month_abbr( date_str, source_lang=DEFAULT_DATE_LANG, target_lang=DEFAULT_DATE_LANG): month_num, month_abbr = get_month_from_date_str(date_str, source_lang) with calendar.different_locale(LOCALES[target_lang]): translated_abbr = calendar.month_abbr[month_num] return re.sub( month_abbr, translated_abbr, date_str, flags=re.IGNORECASE)
Translate the month abbreviation from one locale to another.
def discretize(value, factor=100): if not isinstance(value, Iterable): return int(value * factor) int_value = list(deepcopy(value)) for i in range(len(int_value)): int_value[i] = int(int_value[i] * factor) return int_value
Discretize the given value, pre-multiplying by the given factor
def drain_to(self, list, max_size=-1): def drain_result(f): resp = f.result() list.extend(resp) return len(resp) return self._encode_invoke(queue_drain_to_max_size_codec, max_size=max_size).continue_with( drain_result)
Transfers all available items to the given `list`_ and removes these items from this queue. If a max_size is specified, it transfers at most the given number of items. In case of a failure, an item can exist in both collections or none of them. This operation may be more efficient than polling elements repeatedly and putting into collection. :param list: (`list`_), the list where the items in this queue will be transferred. :param max_size: (int), the maximum number items to transfer (optional). :return: (int), number of transferred items. .. _list: https://docs.python.org/2/library/functions.html#list
def get_limit(self, request): if self.limit_query_param: try: return _positive_int( get_query_param(request, self.limit_query_param), strict=True, cutoff=self.max_limit ) except (KeyError, ValueError): pass return self.default_limit
Return limit parameter.
def _reset_plain(self): if self._text: self._blocks.append(BlockText('\n'.join(self._text))) self._text.clear()
Create a BlockText from the captured lines and clear _text.
def validate(self): super().validate() nb_entities = len(self.entities) if nb_entities != self.rows + self.columns: raise self.error( 'Number of entities: %s != number of rows + ' 'number of columns: %s+%s=%s' % ( nb_entities, self.rows, self.columns, self.rows + self.columns))
Base validation + entities = rows + columns.
def get_hosting_devices_for_agent(self, context): cctxt = self.client.prepare() return cctxt.call(context, 'get_hosting_devices_for_agent', host=self.host)
Get a list of hosting devices assigned to this agent.
def _one_or_more_stages_remain(self, deploymentId): stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages)
Helper function to find whether there are other stages still associated with a deployment
def check_package_exists(package, lib_dir): try: req = pkg_resources.Requirement.parse(package) except ValueError: req = pkg_resources.Requirement.parse(urlparse(package).fragment) if lib_dir is not None: if any(dist in req for dist in pkg_resources.find_distributions(lib_dir)): return True return any(dist in req for dist in pkg_resources.working_set)
Check if a package is installed globally or in lib_dir. Returns True when the requirement is met. Returns False when the package is not installed or doesn't meet req.
def myGrades(year, candidateNumber, badFormat, length): weights1 = [1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5] weights2 = [1, 1, 1, 1, 1, 1, 0.5, 0.5] if year == 1: myFinalResult = sum([int(badFormat[candidateNumber][2*(i + 1)]) * weights1[i] for i in range(length-1)]) / 6 elif year == 2 or year == 3: myFinalResult = sum([int(badFormat[candidateNumber][2*(i + 1)]) * weights2[i] for i in range(length-1)]) / 7 elif year == 4: myFinalResult = sum([int(badFormat[candidateNumber][2*(i + 1)]) for i in range(length-1)]) / 8 return myFinalResult
returns final result of candidateNumber in year Arguments: year {int} -- the year candidateNumber is in candidateNumber {str} -- the candidateNumber of candidateNumber badFormat {dict} -- candNumber : [results for candidate] length {int} -- length of each row in badFormat divided by 2 Returns: int -- a weighted average for a specific candidate number and year
async def probe_message(self, _message, context): client_id = context.user_data await self.probe(client_id)
Handle a probe message. See :meth:`AbstractDeviceAdapter.probe`.
def page(request): context = {} page = getattr(request, "page", None) if isinstance(page, Page): context = {"request": request, "page": page, "_current_page": page} page.set_helpers(context) return context
Adds the current page to the template context and runs its ``set_helper`` method. This was previously part of ``PageMiddleware``, but moved to a context processor so that we could assign these template context variables without the middleware depending on Django's ``TemplateResponse``.
def build_relation_predicate(relations: Strings) -> EdgePredicate: if isinstance(relations, str): @edge_predicate def relation_predicate(edge_data: EdgeData) -> bool: return edge_data[RELATION] == relations elif isinstance(relations, Iterable): relation_set = set(relations) @edge_predicate def relation_predicate(edge_data: EdgeData) -> bool: return edge_data[RELATION] in relation_set else: raise TypeError return relation_predicate
Build an edge predicate that passes for edges with the given relation.
def _check_pub_data(self, pub_data, listen=True): if pub_data == '': raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} if not self.opts.get('order_masters'): if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data
Common checks on the pub_data data structure returned from running pub
def resolve_input_references(to_resolve, inputs_to_reference): splitted = split_input_references(to_resolve) result = [] for part in splitted: if is_input_reference(part): result.append(str(resolve_input_reference(part, inputs_to_reference))) else: result.append(part) return ''.join(result)
Resolves input references given in the string to_resolve by using the inputs_to_reference. See http://www.commonwl.org/user_guide/06-params/index.html for more information. Example: "$(inputs.my_file.nameroot).md" -> "filename.md" :param to_resolve: The path to match :param inputs_to_reference: Inputs which are used to resolve input references like $(inputs.my_input_file.basename). :return: A string in which the input references are replaced with actual values.
def __update(self): width, height = self.size super(BaseWidget, self).__setattr__("width", width) super(BaseWidget, self).__setattr__("height", height) super(BaseWidget, self).__setattr__(self.anchor, self.pos)
This is called each time an attribute is asked, to be sure every params are updated, beceause of callbacks.
def _process_remove_objects_batch(self, bucket_name, objects_batch): content = xml_marshal_delete_objects(objects_batch) headers = { 'Content-Md5': get_md5_base64digest(content), 'Content-Length': len(content) } query = {'delete': ''} content_sha256_hex = get_sha256_hexdigest(content) response = self._url_open( 'POST', bucket_name=bucket_name, headers=headers, body=content, query=query, content_sha256=content_sha256_hex, ) return parse_multi_object_delete_response(response.data)
Requester and response parser for remove_objects
def get_mor_by_name(si, obj_type, obj_name): inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item.name == obj_name: return item return None
Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_name Name of the object
def create_columns(self): reader = self._get_csv_reader() headings = six.next(reader) try: examples = six.next(reader) except StopIteration: examples = [] found_fields = set() for i, value in enumerate(headings): if i >= 20: break infer_field = self.has_headings and value not in found_fields to_field = ( { "date": "date", "amount": "amount", "description": "description", "memo": "description", "notes": "description", }.get(value.lower(), "") if infer_field else "" ) if to_field: found_fields.add(to_field) TransactionCsvImportColumn.objects.update_or_create( transaction_import=self, column_number=i + 1, column_heading=value if self.has_headings else "", to_field=to_field, example=examples[i].strip() if examples else "", )
For each column in file create a TransactionCsvImportColumn
def body(self): content = [] length = 0 for chunk in self: content.append(chunk) length += len(chunk) if self.length_limit and length > self.length_limit: self.close() raise ContentLimitExceeded("Content length is more than %d " "bytes" % self.length_limit) return b("").join(content)
Response body. :raises: :class:`ContentLimitExceeded`, :class:`ContentDecodingError`
def fit(self, X, y): if not isinstance(X, pd.DataFrame): X = pd.DataFrame(X.copy()) if not isinstance(y, pd.Series): y = pd.Series(y.copy()) relevance_table = calculate_relevance_table( X, y, ml_task=self.ml_task, n_jobs=self.n_jobs, chunksize=self.chunksize, fdr_level=self.fdr_level, hypotheses_independent=self.hypotheses_independent, test_for_binary_target_real_feature=self.test_for_binary_target_real_feature) self.relevant_features = relevance_table.loc[relevance_table.relevant].feature.tolist() self.feature_importances_ = 1.0 - relevance_table.p_value.values self.p_values = relevance_table.p_value.values self.features = relevance_table.index.tolist() return self
Extract the information, which of the features are relevent using the given target. For more information, please see the :func:`~tsfresh.festure_selection.festure_selector.check_fs_sig_bh` function. All columns in the input data sample are treated as feature. The index of all rows in X must be present in y. :param X: data sample with the features, which will be classified as relevant or not :type X: pandas.DataFrame or numpy.array :param y: target vector to be used, to classify the features :type y: pandas.Series or numpy.array :return: the fitted estimator with the information, which features are relevant :rtype: FeatureSelector
def get_disease_mappings(self, att_ind_start): all_disease_ids = self.get_all_unique_diseases() disease_enum = enumerate(all_disease_ids, start=att_ind_start) disease_mappings = {} for num, dis in disease_enum: disease_mappings[dis] = num return disease_mappings
Get a dictionary of enumerations for diseases. :param int att_ind_start: Starting index for enumeration. :return: Dictionary of disease, number pairs.
def change_message_visibility(self, queue, receipt_handle, visibility_timeout, callback=None): params = {'ReceiptHandle' : receipt_handle, 'VisibilityTimeout' : visibility_timeout} return self.get_status('ChangeMessageVisibility', params, queue.id, callback=callback)
Extends the read lock timeout for the specified message from the specified queue to the specified value. :type queue: A :class:`boto.sqs.queue.Queue` object :param queue: The Queue from which messages are read. :type receipt_handle: str :param queue: The receipt handle associated with the message whose visibility timeout will be changed. :type visibility_timeout: int :param visibility_timeout: The new value of the message's visibility timeout in seconds.
def _chk_docopt_exit(self, args, exp_letters): if args is None: args = sys.argv[1:] keys_all = self.exp_keys.union(self.exp_elems) if exp_letters: keys_all |= exp_letters unknown_args = self._chk_docunknown(args, keys_all) if unknown_args: raise RuntimeError("{USAGE}\n **FATAL: UNKNOWN ARGS: {UNK}".format( USAGE=self.doc, UNK=" ".join(unknown_args)))
Check if docopt exit was for an unknown argument.
def has_option(self, option): if len(self.options) == 0: return False for op in self.options: if (self._sized_op and op[0] == option) or (op == option): return True return False
Return True if the option is included in this key. Parameters ---------- option : str The option. Returns ------- has : bool True if the option can be found. Otherwise False will be returned.
def register_project(self, path, ensure_uniqueness=False): if ensure_uniqueness: if self.get_project_nodes(path): raise foundations.exceptions.ProgrammingError("{0} | '{1}' project is already registered!".format( self.__class__.__name__, path)) LOGGER.debug("> Registering '{0}' project.".format(path)) row = self.root_node.children_count() self.beginInsertRows(self.get_node_index(self.root_node, ), row, row) project_node = ProjectNode(name=os.path.basename(path), path=path, parent=self.root_node) self.endInsertRows() self.project_registered.emit(project_node) return project_node
Registers given path in the Model as a project. :param path: Project path to register. :type path: unicode :param ensure_uniqueness: Ensure registrar uniqueness. :type ensure_uniqueness: bool :return: ProjectNode. :rtype: ProjectNode
def list_instances(self): response = self.get_proto(path='/instances') message = rest_pb2.ListInstancesResponse() message.ParseFromString(response.content) instances = getattr(message, 'instance') return iter([Instance(instance) for instance in instances])
Lists the instances. Instances are returned in lexicographical order. :rtype: ~collections.Iterable[.Instance]
def get_count(self,name): if name not in self.mlt_counter: self.mlt_counter[name] = 1 c = 0 else: c = self.mlt_counter[name] self.mlt_counter[name] += 1 return c
get the latest counter for a certain parameter type. Parameters ---------- name : str the parameter type Returns ------- count : int the latest count for a parameter type Note ---- calling this function increments the counter for the passed parameter type
def n_frames_total(self, stride=1, skip=0): r if not IteratorState.is_uniform_stride(stride): return len(stride) return sum(self.trajectory_lengths(stride=stride, skip=skip))
r"""Returns total number of frames. Parameters ---------- stride : int return value is the number of frames in trajectories when running through them with a step size of `stride`. skip : int, default=0 skip the first initial n frames per trajectory. Returns ------- n_frames_total : int total number of frames.
def startsafter(self, other): if self.is_valid_range(other): if self.lower == other.lower: return other.lower_inc or not self.lower_inc elif self.lower_inf: return False elif other.lower_inf: return True else: return self.lower > other.lower elif self.is_valid_scalar(other): return self.lower >= other else: raise TypeError( "Unsupported type to test for starts after '{}'".format( other.__class__.__name__))
Test if this range starts after `other`. `other` may be either range or scalar. This only takes the lower end of the ranges into consideration. If the scalar or the lower end of the given range is greater than or equal to this range's lower end, ``True`` is returned. >>> intrange(1, 5).startsafter(0) True >>> intrange(1, 5).startsafter(intrange(0, 5)) True If ``other`` has the same start as the given :param other: Range or scalar to test. :return: ``True`` if this range starts after `other`, otherwise ``False`` :raises TypeError: If `other` is of the wrong type.
def list_files(dir_path, recursive=True): for root, dirs, files in os.walk(dir_path): file_list = [os.path.join(root, f) for f in files] if recursive: for dir in dirs: dir = os.path.join(root, dir) file_list.extend(list_files(dir, recursive=True)) return file_list
Return a list of files in dir_path.
def publish_scene_velocity(self, scene_id, velocity): self.sequence_number += 1 self.publisher.send_multipart(msgs.MessageBuilder.scene_velocity(self.sequence_number, scene_id, velocity)) return self.sequence_number
publish a changed scene velovity
def _new_java_array(pylist, java_class): sc = SparkContext._active_spark_context java_array = None if len(pylist) > 0 and isinstance(pylist[0], list): inner_array_length = 0 for i in xrange(len(pylist)): inner_array_length = max(inner_array_length, len(pylist[i])) java_array = sc._gateway.new_array(java_class, len(pylist), inner_array_length) for i in xrange(len(pylist)): for j in xrange(len(pylist[i])): java_array[i][j] = pylist[i][j] else: java_array = sc._gateway.new_array(java_class, len(pylist)) for i in xrange(len(pylist)): java_array[i] = pylist[i] return java_array
Create a Java array of given java_class type. Useful for calling a method with a Scala Array from Python with Py4J. If the param pylist is a 2D array, then a 2D java array will be returned. The returned 2D java array is a square, non-jagged 2D array that is big enough for all elements. The empty slots in the inner Java arrays will be filled with null to make the non-jagged 2D array. :param pylist: Python list to convert to a Java Array. :param java_class: Java class to specify the type of Array. Should be in the form of sc._gateway.jvm.* (sc is a valid Spark Context). :return: Java Array of converted pylist. Example primitive Java classes: - basestring -> sc._gateway.jvm.java.lang.String - int -> sc._gateway.jvm.java.lang.Integer - float -> sc._gateway.jvm.java.lang.Double - bool -> sc._gateway.jvm.java.lang.Boolean
def make_dbsource(**kwargs): if 'spatialite' in connection.settings_dict.get('ENGINE'): kwargs.setdefault('file', connection.settings_dict['NAME']) return mapnik.SQLite(wkb_format='spatialite', **kwargs) names = (('dbname', 'NAME'), ('user', 'USER'), ('password', 'PASSWORD'), ('host', 'HOST'), ('port', 'PORT')) for mopt, dopt in names: val = connection.settings_dict.get(dopt) if val: kwargs.setdefault(mopt, val) return mapnik.PostGIS(**kwargs)
Returns a mapnik PostGIS or SQLite Datasource.
def save(self, outfile, close_file=True, **kwargs): if isinstance(outfile, text_type) or isinstance(outfile, binary_type): fid = open(outfile, 'wb') else: fid = outfile root = self.get_root() html = root.render(**kwargs) fid.write(html.encode('utf8')) if close_file: fid.close()
Saves an Element into a file. Parameters ---------- outfile : str or file object The file (or filename) where you want to output the html. close_file : bool, default True Whether the file has to be closed after write.
def as_singular(result_key): if result_key.endswith('ies'): return re.sub('ies$', 'y', result_key) elif result_key.endswith('uses'): return re.sub("uses$", "us", result_key) elif result_key.endswith('addresses'): return result_key[:-2] elif result_key.endswith('s'): return result_key[:-1] else: return result_key
Given a result key, return in the singular form
def frange(start, stop, step, digits_to_round=3): while start < stop: yield round(start, digits_to_round) start += step
Works like range for doubles :param start: starting value :param stop: ending value :param step: the increment_value :param digits_to_round: the digits to which to round \ (makes floating-point numbers much easier to work with) :return: generator
def hasIP(self, ip): for f in self.features: if (f.prop.startswith("net_interface.") and f.prop.endswith(".ip") and f.value == ip): return True return False
Return True if some system has this IP.
def compute_convex_hull(feed: "Feed") -> Polygon: m = sg.MultiPoint(feed.stops[["stop_lon", "stop_lat"]].values) return m.convex_hull
Return a Shapely Polygon representing the convex hull formed by the stops of the given Feed.
def parse_collection(obj: dict) -> BioCCollection: collection = BioCCollection() collection.source = obj['source'] collection.date = obj['date'] collection.key = obj['key'] collection.infons = obj['infons'] for doc in obj['documents']: collection.add_document(parse_doc(doc)) return collection
Deserialize a dict obj to a BioCCollection object
def parse_colors(s, length=1): if length and length > 1: return parse_ctuple(s,length=length); if re.match('^ *{} *$'.format(isrx_s), s): return [s]; elif re.match('^ *{} *$'.format(rgbrx_s), s): return [eval(s)]; else: return parse_ctuple(s,length=length);
helper for parsing a string that can be either a matplotlib color or be a tuple of colors. Returns a tuple of them either way.
def remove_population(self,pop): iremove=None for i in range(len(self.poplist)): if self.modelnames[i]==self.poplist[i].model: iremove=i if iremove is not None: self.modelnames.pop(i) self.shortmodelnames.pop(i) self.poplist.pop(i)
Removes population from PopulationSet
def _normalize_xml_search_response(self, xml): target = XMLSearchResult() parser = ElementTree.XMLParser(target=target) parser.feed(xml) return parser.close()
Normalizes an XML search response so that PB and HTTP have the same return value
def _adjust_regs(self): if not self.adjust_stack: return bp = self.state.arch.register_names[self.state.arch.bp_offset] sp = self.state.arch.register_names[self.state.arch.sp_offset] stack_shift = self.state.arch.initial_sp - self.real_stack_top self.state.registers.store(sp, self.state.regs.sp + stack_shift) if not self.omit_fp: self.state.registers.store(bp, self.state.regs.bp + stack_shift)
Adjust bp and sp w.r.t. stack difference between GDB session and angr. This matches sp and bp registers, but there is a high risk of pointers inconsistencies.
def refer(self, text): data = self.reply(text) data['refer_key'] = self['key'] return data
Refers current message and replys a new message Args: text(str): message content Returns: RTMMessage
def endGroup(self): if self._customFormat: self._customFormat.endGroup() else: super(XSettings, self).endGroup()
Ends the current group of xml data.
def GET_save_conditionvalues(self) -> None: state.conditions[self._id] = state.conditions.get(self._id, {}) state.conditions[self._id][state.idx2] = state.hp.conditions
Save the |StateSequence| and |LogSequence| object values of the current |HydPy| instance for the current simulation endpoint.
def to_tex(self, text_size='large', table_width=5, clear_pages = False): max_ex_scheme = 0 if self._rendered: for (week, day, dynamic_ex) in self._yield_week_day_dynamic(): lengths = [len(s) for s in self._rendered[week][day][dynamic_ex]['strings']] max_ex_scheme = max(max_ex_scheme, max(lengths)) env = self.jinja2_environment template = env.get_template(self.TEMPLATE_NAMES['tex']) return template.render(program=self, text_size=text_size, table_width=table_width, clear_pages = clear_pages)
Write the program information to a .tex file, which can be rendered to .pdf running pdflatex. The program can then be printed and brought to the gym. Parameters ---------- text_size The tex text size, e.g. '\small', 'normalsize', 'large', 'Large' or 'LARGE'. table_width The table with of the .tex code. Returns ------- string Program as tex.
def patch(self, delta): "Applies delta for local file to remote file via API." return self.api.post('path/sync/patch', self.path, delta=delta)
Applies delta for local file to remote file via API.
def run(self): self.busy = True for i in range(9): self.counter += 1 time.sleep(0.5) pass self.counter += 1 self.busy = False return
This method is run by a separated thread
def _parse_size(self, size, has_time=False): if has_time: size = size or 4 else: size = size or 10 if isinstance(size, str): size = {'column': size} if isinstance(size, dict): if 'column' not in size: raise ValueError("`size` must include a 'column' key/value") if has_time: raise ValueError("When time is specified, size can " "only be a fixed size") old_size = size size = { 'range': [5, 25], 'bins': 5, 'bin_method': BinMethod.quantiles, } old_size['range'] = old_size.get('range', size['range']) if 'min' in old_size: old_size['range'][0] = old_size['min'] old_size.pop('min') if 'max' in old_size: old_size['range'][1] = old_size['max'] old_size.pop('max') size.update(old_size) self.style_cols[size['column']] = None return size
Parse size inputs
def main(): trig_pin = 17 echo_pin = 27 hole_depth = 31.5 value = sensor.Measurement(trig_pin, echo_pin, temperature=68, unit='imperial', round_to=2 ) raw_measurement = value.raw_distance() liquid_depth = value.depth_imperial(raw_measurement, hole_depth) print("Depth = {} inches".format(liquid_depth))
Calculate the depth of a liquid in inches using a HCSR04 sensor and a Raspberry Pi
def url_builder(self, endpoint, *, root=None, params=None, url_params=None): if root is None: root = self.ROOT return ''.join([ root, endpoint, '?' + urlencode(url_params) if url_params else '', ]).format(**params or {})
Create a URL for the specified endpoint. Arguments: endpoint (:py:class:`str`): The API endpoint to access. root: (:py:class:`str`, optional): The root URL for the service API. params: (:py:class:`dict`, optional): The values for format into the created URL (defaults to ``None``). url_params: (:py:class:`dict`, optional): Parameters to add to the end of the URL (defaults to ``None``). Returns: :py:class:`str`: The resulting URL.
def future_check_sensor(self, name, update=None): exist = False yield self.until_data_synced() if name in self._sensors_index: exist = True else: if update or (update is None and self._update_on_lookup): yield self.inspect_sensors(name) exist = yield self.future_check_sensor(name, False) raise tornado.gen.Return(exist)
Check if the sensor exists. Used internally by future_get_sensor. This method is aware of synchronisation in progress and if inspection of the server is allowed. Parameters ---------- name : str Name of the sensor to verify. update : bool or None, optional If a katcp request to the server should be made to check if the sensor is on the server now. Notes ----- Ensure that self.state.data_synced == True if yielding to future_check_sensor from a state-change callback, or a deadlock will occur.
def spawn(mode, func, *args, **kwargs): if mode is None: mode = 'threading' elif mode not in spawn.modes: raise ValueError('Invalid spawn mode: %s' % mode) if mode == 'threading': return spawn_thread(func, *args, **kwargs) elif mode == 'gevent': import gevent import gevent.monkey gevent.monkey.patch_select() gevent.monkey.patch_socket() return gevent.spawn(func, *args, **kwargs) elif mode == 'eventlet': import eventlet eventlet.patcher.monkey_patch(select=True, socket=True) return eventlet.spawn(func, *args, **kwargs) assert False
Spawns a thread-like object which runs the given function concurrently. Available modes: - `threading` - `greenlet` - `eventlet`
def makeLabel(self, value): value, prefix = format_units(value, self.step, system=self.unitSystem) span, spanPrefix = format_units(self.span, self.step, system=self.unitSystem) if prefix: prefix += " " if value < 0.1: return "%g %s" % (float(value), prefix) elif value < 1.0: return "%.2f %s" % (float(value), prefix) if span > 10 or spanPrefix != prefix: if type(value) is float: return "%.1f %s" % (value, prefix) else: return "%d %s" % (int(value), prefix) elif span > 3: return "%.1f %s" % (float(value), prefix) elif span > 0.1: return "%.2f %s" % (float(value), prefix) else: return "%g %s" % (float(value), prefix)
Create a label for the specified value. Create a label string containing the value and its units (if any), based on the values of self.step, self.span, and self.unitSystem.
def map2matrix(data_map, layout): r layout = np.array(layout) n_obj = np.prod(layout) image_shape = (np.array(data_map.shape) // layout)[0] data_matrix = [] for i in range(n_obj): lower = (image_shape * (i // layout[1]), image_shape * (i % layout[1])) upper = (image_shape * (i // layout[1] + 1), image_shape * (i % layout[1] + 1)) data_matrix.append((data_map[lower[0]:upper[0], lower[1]:upper[1]]).reshape(image_shape ** 2)) return np.array(data_matrix).T
r"""Map to Matrix This method transforms a 2D map to a 2D matrix Parameters ---------- data_map : np.ndarray Input data map, 2D array layout : tuple 2D layout of 2D images Returns ------- np.ndarray 2D matrix Raises ------ ValueError For invalid layout Examples -------- >>> from modopt.base.transform import map2matrix >>> a = np.array([[0, 1, 4, 5], [2, 3, 6, 7], [8, 9, 12, 13], [10, 11, 14, 15]]) >>> map2matrix(a, (2, 2)) array([[ 0, 4, 8, 12], [ 1, 5, 9, 13], [ 2, 6, 10, 14], [ 3, 7, 11, 15]])
def strip_masked(fasta, min_len, print_masked): for seq in parse_fasta(fasta): nm, masked = parse_masked(seq, min_len) nm = ['%s removed_masked >=%s' % (seq[0], min_len), ''.join(nm)] yield [0, nm] if print_masked is True: for i, m in enumerate([i for i in masked if i != []], 1): m = ['%s insertion:%s' % (seq[0], i), ''.join(m)] yield [1, m]
remove masked regions from fasta file as long as they are longer than min_len
def configure_root_iam_credentials(self, access_key, secret_key, region=None, iam_endpoint=None, sts_endpoint=None, max_retries=-1, mount_point=DEFAULT_MOUNT_POINT): params = { 'access_key': access_key, 'secret_key': secret_key, 'region': region, 'iam_endpoint': iam_endpoint, 'sts_endpoint': sts_endpoint, 'max_retries': max_retries, } api_path = '/v1/{mount_point}/config/root'.format(mount_point=mount_point) return self._adapter.post( url=api_path, json=params, )
Configure the root IAM credentials to communicate with AWS. There are multiple ways to pass root IAM credentials to the Vault server, specified below with the highest precedence first. If credentials already exist, this will overwrite them. The official AWS SDK is used for sourcing credentials from env vars, shared files, or IAM/ECS instances. * Static credentials provided to the API as a payload * Credentials in the AWS_ACCESS_KEY, AWS_SECRET_KEY, and AWS_REGION environment variables on the server * Shared credentials files * Assigned IAM role or ECS task role credentials At present, this endpoint does not confirm that the provided AWS credentials are valid AWS credentials with proper permissions. Supported methods: POST: /{mount_point}/config/root. Produces: 204 (empty body) :param access_key: Specifies the AWS access key ID. :type access_key: str | unicode :param secret_key: Specifies the AWS secret access key. :type secret_key: str | unicode :param region: Specifies the AWS region. If not set it will use the AWS_REGION env var, AWS_DEFAULT_REGION env var, or us-east-1 in that order. :type region: str | unicode :param iam_endpoint: Specifies a custom HTTP IAM endpoint to use. :type iam_endpoint: str | unicode :param sts_endpoint: Specifies a custom HTTP STS endpoint to use. :type sts_endpoint: str | unicode :param max_retries: Number of max retries the client should use for recoverable errors. The default (-1) falls back to the AWS SDK's default behavior. :type max_retries: int :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response
def is_to_be_built_or_is_installed(self, shutit_module_obj): shutit_global.shutit_global_object.yield_to_draw() cfg = self.cfg if cfg[shutit_module_obj.module_id]['shutit.core.module.build']: return True return self.is_installed(shutit_module_obj)
Returns true if this module is configured to be built, or if it is already installed.
def get(self, filename): params = { "v": 'dreambox', "kolejka": "false", "nick": "", "pass": "", "napios": sys.platform, "l": self.language.upper(), "f": self.prepareHash(filename), } params['t'] = self.discombobulate(params['f']) url = self.url_base + urllib.urlencode(params) subs = urllib.urlopen(url).read() if subs.startswith('brak pliku tymczasowego'): raise NapiprojektException('napiprojekt.pl API error') if subs[0:4] != 'NPc0': for cdc in ['cp1250', 'utf8']: try: return codecs.decode(subs, cdc) except: pass
returns subtitles as string
def confirm_updated(value, check_fun, normalize_ret=False, wait=5): for i in range(wait): state = validate_enabled(check_fun()) if normalize_ret else check_fun() if value in state: return True time.sleep(1) return False
Wait up to ``wait`` seconds for a system parameter to be changed before deciding it hasn't changed. :param str value: The value indicating a successful change :param function check_fun: The function whose return is compared with ``value`` :param bool normalize_ret: Whether to normalize the return from ``check_fun`` with ``validate_enabled`` :param int wait: The maximum amount of seconds to wait for a system parameter to change
def parse_z(cls, offset): assert len(offset) == 5, 'Invalid offset string format, must be "+HHMM"' return timedelta(hours=int(offset[:3]), minutes=int(offset[0] + offset[3:]))
Parse %z offset into `timedelta`
def get_endpoint_server_root(self): parsed = urlparse(self._endpoint) root = parsed.scheme + "://" + parsed.hostname if parsed.port is not None: root += ":" + unicode(parsed.port) return root
Parses RemoteLRS object's endpoint and returns its root :return: Root of the RemoteLRS object endpoint :rtype: unicode
def do_serialize(self, line): opts = self.SERIALIZE_OPTS if not self.current: self._help_noontology() return line = line.split() g = self.current['graph'] if not line: line = ['turtle'] if line[0] not in opts: self.help_serialize() return elif self.currentEntity: self.currentEntity['object'].printSerialize(line[0]) else: self._print(g.rdf_source(format=line[0]))
Serialize an entity into an RDF flavour
def get_user(self, username): sql = self._db_curs.execute(sql, (username, )) user = self._db_curs.fetchone() return user
Fetch the user from the database The function will return None if the user is not found
def write(self): with open(storage.config_file, 'w') as cfg: yaml.dump(self.as_dict(), cfg, default_flow_style=False) storage.refresh()
write the current settings to the config file
def remove_from_known_hosts(self, hosts, known_hosts=DEFAULT_KNOWN_HOSTS, dry=False): for host in hosts: logger.info('[%s] Removing the remote host SSH public key from [%s]...', host.hostname, known_hosts) cmd = ['ssh-keygen', '-f', known_hosts, '-R', host.hostname] logger.debug('Call: %s', ' '.join(cmd)) if not dry: try: subprocess.check_call(cmd) except subprocess.CalledProcessError as ex: logger.error(format_error(format_exception(ex)))
Remove the remote host SSH public key to the `known_hosts` file. :param hosts: the list of the remote `Host` objects. :param known_hosts: the `known_hosts` file to store the SSH public keys. :param dry: perform a dry run.
def parse_eddystone_service_data(data): if data['frame_type'] == EDDYSTONE_UID_FRAME: return EddystoneUIDFrame(data['frame']) elif data['frame_type'] == EDDYSTONE_TLM_FRAME: if data['frame']['tlm_version'] == EDDYSTONE_TLM_ENCRYPTED: return EddystoneEncryptedTLMFrame(data['frame']['data']) elif data['frame']['tlm_version'] == EDDYSTONE_TLM_UNENCRYPTED: return EddystoneTLMFrame(data['frame']['data']) elif data['frame_type'] == EDDYSTONE_URL_FRAME: return EddystoneURLFrame(data['frame']) elif data['frame_type'] == EDDYSTONE_EID_FRAME: return EddystoneEIDFrame(data['frame']) else: return None
Parse Eddystone service data.
def check_type_of_param_list_elements(param_list): try: assert isinstance(param_list[0], np.ndarray) assert all([(x is None or isinstance(x, np.ndarray)) for x in param_list]) except AssertionError: msg = "param_list[0] must be a numpy array." msg_2 = "All other elements must be numpy arrays or None." total_msg = msg + "\n" + msg_2 raise TypeError(total_msg) return None
Ensures that all elements of param_list are ndarrays or None. Raises a helpful ValueError if otherwise.
def _subtoken_id_to_subtoken_string(self, subtoken): if 0 <= subtoken < self.vocab_size: return self._all_subtoken_strings[subtoken] return u""
Converts a subtoken integer ID to a subtoken string.
async def list(source): result = [] async with streamcontext(source) as streamer: async for item in streamer: result.append(item) yield result
Generate a single list from an asynchronous sequence.
def _AsList(arg): if (isinstance(arg, string_types) or not isinstance(arg, collections.Iterable)): return [arg] else: return list(arg)
Encapsulates an argument in a list, if it's not already iterable.
def least_squares(Cui, X, Y, regularization, num_threads=0): users, n_factors = X.shape YtY = Y.T.dot(Y) for u in range(users): X[u] = user_factor(Y, YtY, Cui, u, regularization, n_factors)
For each user in Cui, calculate factors Xu for them using least squares on Y. Note: this is at least 10 times slower than the cython version included here.
def StartingAgeEnum(ctx): return Enum( ctx, what=-2, unset=-1, dark=0, feudal=1, castle=2, imperial=3, postimperial=4, dmpostimperial=6 )
Starting Age Enumeration.
def show_floating_ip(kwargs=None, call=None): if call != 'function': log.error( 'The show_floating_ip function must be called with -f or --function.' ) return False if not kwargs: kwargs = {} if 'floating_ip' not in kwargs: log.error('A floating IP is required.') return False floating_ip = kwargs['floating_ip'] log.debug('Floating ip is %s', floating_ip) details = query(method='floating_ips', command=floating_ip) return details
Show the details of a floating IP .. versionadded:: 2016.3.0 CLI Examples: .. code-block:: bash salt-cloud -f show_floating_ip my-digitalocean-config floating_ip='45.55.96.47'
def get_inheritors(cls): subclasses = set() work = [cls] while work: parent = work.pop() for child in parent.__subclasses__(): if child not in subclasses: subclasses.add(child) work.append(child) return subclasses
Get a set of all classes that inherit from the given class.
def _parse_incval(incunit, incval): try: retn = [int(val) for val in incval.split(',')] except ValueError: return None return retn[0] if len(retn) == 1 else retn
Parse a non-day increment value. Should be an integer or a comma-separated integer list.
def options(self, context, module_options): if not 'URL' in module_options: context.log.error('URL option is required!') exit(1) self.url = module_options['URL']
URL URL for the download cradle
def getDate(): _ltime = _time.localtime(_time.time()) date_str = _time.strftime('%Y-%m-%dT%H:%M:%S',_ltime) return date_str
Returns a formatted string with the current date.
def CheckTaskReadyForMerge(self, task): if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Unsupported storage type.') if not self._processed_task_storage_path: raise IOError('Missing processed task storage path.') processed_storage_file_path = self._GetProcessedStorageFilePath(task) try: stat_info = os.stat(processed_storage_file_path) except (IOError, OSError): return False task.storage_file_size = stat_info.st_size return True
Checks if a task is ready for merging with this session storage. If the task is ready to be merged, this method also sets the task's storage file size. Args: task (Task): task. Returns: bool: True if the task is ready to be merged. Raises: IOError: if the storage type is not supported or OSError: if the storage type is not supported or if the temporary path for the task storage does not exist.
def find_span_linear(degree, knot_vector, num_ctrlpts, knot, **kwargs): span = 0 while span < num_ctrlpts and knot_vector[span] <= knot: span += 1 return span - 1
Finds the span of a single knot over the knot vector using linear search. Alternative implementation for the Algorithm A2.1 from The NURBS Book by Piegl & Tiller. :param degree: degree, :math:`p` :type degree: int :param knot_vector: knot vector, :math:`U` :type knot_vector: list, tuple :param num_ctrlpts: number of control points, :math:`n + 1` :type num_ctrlpts: int :param knot: knot or parameter, :math:`u` :type knot: float :return: knot span :rtype: int
def check_units_and_type(input, expected_units, num=None, is_scalar=False): if hasattr(input, 'unit'): if expected_units is None: raise ValueError('Expecting dimensionless input') elif input.unit != expected_units: raise ValueError('Expecting input units of ' + str(expected_units)) else: dimensionless = input.value else: dimensionless = input if is_scalar is False: dimensionfull = check_array_or_list(dimensionless) else: dimensionfull = dimensionless if expected_units is not None: dimensionfull = dimensionfull * expected_units if num is not None: check_input_size(dimensionfull, num) return dimensionfull
Check whether variable has expected units and type. If input does not have units and expected units is not None, then the output will be assigned those units. If input has units that conflict with expected units a ValueError will be raised. Parameters ---------- input : array_like or float Variable that will be checked for units and type. Variable should be 1D or scalar. expected_units : astropy.units or None Unit expected for input. num : int, optional Length expected for input, if it is an array or list. is_scalar : bool, optional Sets whether the input is a scalar quantity. Default is False for array_like inputs; set is_scalar=True to check scalar units only. Returns ---------- ndarray or float, with astropy.units Returns the input array or scalar with expected units, unless a conflict of units or array length occurs, which raise errors.
def linear_gradient(start_hex, finish_hex, n=10): s = hex2rgb(start_hex) f = hex2rgb(finish_hex) gradient = [s] for t in range(1, n): curr_vector = [int(s[j] + (float(t)/(n-1))*(f[j]-s[j])) for j in range(3)] gradient.append(curr_vector) return [rgb2hex([c/255. for c in rgb]) for rgb in gradient]
Interpolates the color gradient between to hex colors
def combine_recs(rec_list, key): final_recs = {} for rec in rec_list: rec_key = rec[key] if rec_key in final_recs: for k, v in rec.iteritems(): if k in final_recs[rec_key] and final_recs[rec_key][k] != v: raise Exception("Mis-match for key '%s'" % k) final_recs[rec_key][k] = v else: final_recs[rec_key] = rec return final_recs.values()
Use a common key to combine a list of recs
def Pc(x, y): r try: term = exp(-x*(1. - y)) return (1. - term)/(1. - y*term) except ZeroDivisionError: return x/(1. + x)
r'''Basic helper calculator which accepts a transformed R1 and NTU1 as inputs for a common term used in the calculation of the P-NTU method for plate exchangers. Returns a value which is normally used in other calculations before the actual P1 is calculated. Nominally used in counterflow calculations .. math:: P_c(x, y) = \frac{1 - \exp[-x(1 - y)]}{1 - y\exp[-x(1 - y)]} Parameters ---------- x : float A modification of NTU1, the Thermal Number of Transfer Units [-] y : float A modification of R1, the thermal effectiveness [-] Returns ------- z : float Just another term in the calculation, [-] Notes ----- Used with the P-NTU plate method for heat exchanger design. At y =-1, this function has a ZeroDivisionError but can be evaluated at the limit to be :math:`z = \frac{x}{1+x}`. Examples -------- >>> Pc(5, .7) 0.9206703686051108 References ---------- .. [1] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002. .. [2] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat Transfer, 3E. New York: McGraw-Hill, 1998.
def get_bin_indices(self, values): return tuple([self.get_axis_bin_index(values[ax_i], ax_i) for ax_i in range(self.dimensions)])
Returns index tuple in histogram of bin which contains value
def replace_with(self, other): self.after(other) self.parent.pop(self._own_index) return other
Replace this element with the given DOMElement.
def parse_compound(compound_def, context=None): compound_id = compound_def.get('id') _check_id(compound_id, 'Compound') mark = FileMark(context, None, None) return CompoundEntry(compound_def, mark)
Parse a structured compound definition as obtained from a YAML file Returns a CompoundEntry.
def get_section_by_label(label, include_instructor_not_on_time_schedule=True): validate_section_label(label) url = "{}/{}.json".format(course_res_url_prefix, encode_section_label(label)) return get_section_by_url(url, include_instructor_not_on_time_schedule)
Returns a uw_sws.models.Section object for the passed section label.
def _find_batch_containing_event(self, uuid): if self.estore.key_exists(uuid): return self.batchno else: for batchno in range(self.batchno - 1, -1, -1): db = self._open_event_store(batchno) with contextlib.closing(db): if db.key_exists(uuid): return batchno return None
Find the batch number that contains a certain event. Parameters: uuid -- the event uuid to search for. returns -- a batch number, or None if not found.
def _get_normalized_args(parser): env = os.environ if '_' in env and env['_'] != sys.argv[0] and len(sys.argv) >= 1 and " " in sys.argv[1]: return parser.parse_args(shlex.split(sys.argv[1]) + sys.argv[2:]) else: return parser.parse_args()
Return the parsed command line arguments. Support the case when executed from a shebang, where all the parameters come in sys.argv[1] in a single string separated by spaces (in this case, the third parameter is what is being executed)
def get_labels(self, field): return {c: self.cluster_meta.get(field, c) for c in self.clustering.cluster_ids}
Return the labels of all clusters, for a given field.
def _write_wrapped(self, line, sep=" ", indent="", width=78): words = line.split(sep) lines = [] line = "" buf = [] while len(words): buf.append(words.pop(0)) line = sep.join(buf) if len(line) > width: words.insert(0, buf.pop()) lines.append(sep.join(buf)) buf = [] line = "" if line: lines.append(line) result = lines.pop(0) if len(lines): eol = "" if sep == " ": eol = "\s" for item in lines: result += eol + "\n" + indent + "^ " + item return result
Word-wrap a line of RiveScript code for being written to a file. :param str line: The original line of text to word-wrap. :param str sep: The word separator. :param str indent: The indentation to use (as a set of spaces). :param int width: The character width to constrain each line to. :return str: The reformatted line(s).
def compute_dual_rmetric(self,Ynew=None): usedY = self.Y if Ynew is None else Ynew rieman_metric = RiemannMetric(usedY, self.laplacian_matrix) return rieman_metric.get_dual_rmetric()
Helper function to calculate the
def contains_exclusive(self, x, y): left, bottom, right, top = self._aarect.lbrt() return (left <= x < right) and (bottom < y <= top)
Return True if the given point is contained within the bounding box, where the bottom and right boundaries are considered exclusive.
def get(cls, object_version, key): return cls.query.filter_by( version_id=as_object_version_id(object_version), key=key, ).one_or_none()
Get the tag object.