code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def get_income_statement(self, **kwargs): """Income Statement Pulls income statement data. Available quarterly (4 quarters) or annually (4 years). Reference: https://iexcloud.io/docs/api/#income-statement Data Weighting: ``1000`` per symbol per period .. warning:: This endpoint is only available using IEX Cloud. See :ref:`Migrating` for more information. Parameters ---------- period: str, default 'quarterly', optional Allows you to specify annual or quarterly income statement. Defaults to quarterly. Values should be annual or quarter Returns ------- list or pandas.DataFrame Stocks Income Statement endpoint data """ def fmt(out): return {symbol: out[symbol]["income"] for symbol in self.symbols} def fmt_p(out): data = {(symbol, sheet["reportDate"]): sheet for symbol in out for sheet in out[symbol]["income"]} return pd.DataFrame(data) return self._get_endpoint("income", fmt_j=fmt, fmt_p=fmt_p, params=kwargs)
Income Statement Pulls income statement data. Available quarterly (4 quarters) or annually (4 years). Reference: https://iexcloud.io/docs/api/#income-statement Data Weighting: ``1000`` per symbol per period .. warning:: This endpoint is only available using IEX Cloud. See :ref:`Migrating` for more information. Parameters ---------- period: str, default 'quarterly', optional Allows you to specify annual or quarterly income statement. Defaults to quarterly. Values should be annual or quarter Returns ------- list or pandas.DataFrame Stocks Income Statement endpoint data
def add_validation_patch(self, patch): """ Extracts ground truth and classification results from the EOPatch and aggregates the results. """ # 2. Convert 8-bit mask self._transform_truth(patch) # 3. Count truth labeled pixels self._count_truth_pixels() # 5. Perform classification self._classify(patch) # 6. Count pixel classified as class i self._count_classified_pixels() self.n_validation_sets = self.n_validation_sets + 1
Extracts ground truth and classification results from the EOPatch and aggregates the results.
def list_proxy(root_package = 'vlcp'): ''' Walk through all the sub modules, find subclasses of vlcp.server.module._ProxyModule, list their default values ''' proxy_dict = OrderedDict() pkg = __import__(root_package, fromlist=['_']) for imp, module, _ in walk_packages(pkg.__path__, root_package + '.'): m = __import__(module, fromlist = ['_']) for _, v in vars(m).items(): if v is not None and isinstance(v, type) and issubclass(v, _ProxyModule) \ and v is not _ProxyModule \ and v.__module__ == module \ and hasattr(v, '_default'): name = v.__name__.lower() if name not in proxy_dict: proxy_dict[name] = {'defaultmodule': v._default.__name__.lower(), 'class': repr(v._default.__module__ + '.' + v._default.__name__)} return proxy_dict
Walk through all the sub modules, find subclasses of vlcp.server.module._ProxyModule, list their default values
def _cmp(self, other): """ Compare two Project Haystack version strings, then return -1 if self < other, 0 if self == other or 1 if self > other. """ if not isinstance(other, Version): other = Version(other) num1 = self.version_nums num2 = other.version_nums # Pad both to be the same length ver_len = max(len(num1), len(num2)) num1 += tuple([0 for n in range(len(num1), ver_len)]) num2 += tuple([0 for n in range(len(num2), ver_len)]) # Compare the versions for (p1, p2) in zip(num1, num2): if p1 < p2: return -1 elif p1 > p2: return 1 # All the same, compare the extra strings. # If a version misses the extra part; we consider that as coming *before*. if self.version_extra is None: if other.version_extra is None: return 0 else: return -1 elif other.version_extra is None: return 1 elif self.version_extra == other.version_extra: return 0 elif self.version_extra < other.version_extra: return -1 else: return 1
Compare two Project Haystack version strings, then return -1 if self < other, 0 if self == other or 1 if self > other.
def generate_data(self, data_dir, tmp_dir, task_id=-1): """Generates training/dev data. Args: data_dir: a string tmp_dir: a string task_id: an optional integer Returns: shard or shards for which data was generated. """ tf.logging.info("generate_data task_id=%s" % task_id) encoder = self.get_or_create_vocab(data_dir, tmp_dir) assert task_id >= 0 and task_id < self.num_generate_tasks if task_id < self.num_train_shards: out_file = self.training_filepaths( data_dir, self.num_train_shards, shuffled=False)[task_id] else: out_file = self.dev_filepaths( data_dir, self.num_dev_shards, shuffled=False)[task_id - self.num_train_shards] generator_utils.generate_files( self.example_generator(encoder, tmp_dir, task_id), [out_file]) generator_utils.shuffle_dataset([out_file])
Generates training/dev data. Args: data_dir: a string tmp_dir: a string task_id: an optional integer Returns: shard or shards for which data was generated.
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'gateways') and self.gateways is not None: _dict['gateways'] = [x._to_dict() for x in self.gateways] return _dict
Return a json dictionary representing this model.
def build_cpp(build_context, target, compiler_config, workspace_dir): """Compile and link a C++ binary for `target`.""" rmtree(workspace_dir) binary = join(*split(target.name)) objects = link_cpp_artifacts(build_context, target, workspace_dir, True) buildenv_workspace = build_context.conf.host_to_buildenv_path( workspace_dir) objects.extend(compile_cc( build_context, compiler_config, target.props.in_buildenv, get_source_files(target, build_context), workspace_dir, buildenv_workspace, target.props.cmd_env)) bin_file = join(buildenv_workspace, binary) link_cmd = ( [compiler_config.linker, '-o', bin_file] + objects + compiler_config.link_flags) build_context.run_in_buildenv( target.props.in_buildenv, link_cmd, target.props.cmd_env) target.artifacts.add(AT.binary, relpath(join(workspace_dir, binary), build_context.conf.project_root), binary)
Compile and link a C++ binary for `target`.
def merge_perchrom_vcfs(job, perchrom_vcfs, tool_name, univ_options): """ Merge per-chromosome vcf files into a single genome level vcf. :param dict perchrom_vcfs: Dictionary with chromosome name as key and fsID of the corresponding vcf as value :param str tool_name: Name of the tool that generated the vcfs :returns: fsID for the merged vcf :rtype: toil.fileStore.FileID """ work_dir = os.getcwd() input_files = {''.join([chrom, '.vcf']): jsid for chrom, jsid in perchrom_vcfs.items()} input_files = get_files_from_filestore(job, input_files, work_dir, docker=False) first = True with open(''.join([work_dir, '/', 'all_merged.vcf']), 'w') as outvcf: for chromvcfname in chrom_sorted([x.rstrip('.vcf') for x in input_files.keys()]): with open(input_files[chromvcfname + '.vcf'], 'r') as infile: for line in infile: line = line.strip() if line.startswith('#'): if first: print(line, file=outvcf) continue first = False print(line, file=outvcf) output_file = job.fileStore.writeGlobalFile(outvcf.name) export_results(job, output_file, outvcf.name, univ_options, subfolder='mutations/' + tool_name) job.fileStore.logToMaster('Ran merge_perchrom_vcfs for %s successfully' % tool_name) return output_file
Merge per-chromosome vcf files into a single genome level vcf. :param dict perchrom_vcfs: Dictionary with chromosome name as key and fsID of the corresponding vcf as value :param str tool_name: Name of the tool that generated the vcfs :returns: fsID for the merged vcf :rtype: toil.fileStore.FileID
def create(host, port, result_converter=None, testcase_converter=None, args=None): """ Function which is called by Icetea to create an instance of the cloud client. This function must exists. This function myust not return None. Either return an instance of Client or raise. """ return SampleClient(host, port, result_converter, testcase_converter, args)
Function which is called by Icetea to create an instance of the cloud client. This function must exists. This function myust not return None. Either return an instance of Client or raise.
def get_dashboards(self): '''**Description** Return the list of dashboards available under the given user account. This includes the dashboards created by the user and the ones shared with her by other users. **Success Return Value** A dictionary containing the list of available sampling intervals. **Example** `examples/list_dashboards.py <https://github.com/draios/python-sdc-client/blob/master/examples/list_dashboards.py>`_ ''' res = requests.get(self.url + self._dashboards_api_endpoint, headers=self.hdrs, verify=self.ssl_verify) return self._request_result(res)
**Description** Return the list of dashboards available under the given user account. This includes the dashboards created by the user and the ones shared with her by other users. **Success Return Value** A dictionary containing the list of available sampling intervals. **Example** `examples/list_dashboards.py <https://github.com/draios/python-sdc-client/blob/master/examples/list_dashboards.py>`_
def extractColumns(TableName,SourceParameterName,ParameterFormats,ParameterNames=None,FixCol=False): """ INPUT PARAMETERS: TableName: name of source table (required) SourceParameterName: name of source column to process (required) ParameterFormats: c formats of unpacked parameters (required) ParameterNames: list of resulting parameter names (optional) FixCol: column-fixed (True) format of source column (optional) OUTPUT PARAMETERS: none --- DESCRIPTION: Note, that this function is aimed to do some extra job on interpreting string parameters which is normally supposed to be done by the user. --- EXAMPLE OF USAGE: extractColumns('sampletab',SourceParameterName='p5', ParameterFormats=('%d','%d','%d'), ParameterNames=('p5_1','p5_2','p5_3')) This example extracts three integer parameters from a source column 'p5' and puts results in ('p5_1','p5_2','p5_3'). --- """ # ParameterNames = just the names without expressions # ParFormats contains python formats for par extraction # Example: ParameterNames=('v1','v2','v3') # ParameterFormats=('%1s','%1s','%1s') # By default the format of parameters is column-fixed if type(LOCAL_TABLE_CACHE[TableName]['header']['default'][SourceParameterName]) not in set([str,unicode]): raise Exception('Source parameter must be a string') i=-1 # bug when (a,) != (a) if ParameterNames and type(ParameterNames) not in set([list,tuple]): ParameterNames = [ParameterNames] if ParameterFormats and type(ParameterFormats) not in set([list,tuple]): ParameterFormats = [ParameterFormats] # if ParameterNames is empty, fill it with #1-2-3-... if not ParameterNames: ParameterNames = [] # using naming convension #i, i=0,1,2,3... for par_format in ParameterFormats: while True: i+=1 par_name = '#%d' % i fmt = LOCAL_TABLE_CACHE[TableName]['header']['format'].get(par_name,None) if not fmt: break ParameterNames.append(par_name) # check if ParameterNames are valid Intersection = set(ParameterNames).intersection(LOCAL_TABLE_CACHE[TableName]['header']['order']) if Intersection: raise Exception('Parameters %s already exist' % str(list(Intersection))) # loop over ParameterNames to prepare LOCAL_TABLE_CACHE i=0 for par_name in ParameterNames: par_format = ParameterFormats[i] LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]=par_format LOCAL_TABLE_CACHE[TableName]['data'][par_name]=[] i+=1 # append new parameters in order list LOCAL_TABLE_CACHE[TableName]['header']['order'] += ParameterNames # cope with default values i=0 format_regex = [] format_types = [] #print 'ParameterNames='+str(ParameterNames) for par_format in ParameterFormats: par_name = ParameterNames[i] regex = FORMAT_PYTHON_REGEX #print 'par_name: '+par_name #print 'par_format: '+par_format (lng,trail,lngpnt,ty) = re.search(regex,par_format).groups() ty = ty.lower() if ty == 'd': par_type = int if FixCol: format_regex_part = REGEX_INTEGER_FIXCOL(lng) else: format_regex_part = REGEX_INTEGER elif ty == 's': par_type = str if FixCol: format_regex_part = REGEX_STRING_FIXCOL(lng) else: format_regex_part = REGEX_STRING elif ty == 'f': par_type = float if FixCol: format_regex_part = REGEX_FLOAT_F_FIXCOL(lng) else: format_regex_part = REGEX_FLOAT_F elif ty == 'e': par_type = float if FixCol: format_regex_part = REGEX_FLOAT_E_FIXCOL(lng) else: format_regex_part = REGEX_FLOAT_E else: raise Exception('Unknown data type') format_regex.append('('+format_regex_part+')') format_types.append(par_type) def_val = getDefaultValue(par_type) LOCAL_TABLE_CACHE[TableName]['header']['default'][par_name]=def_val i+=1 format_regex = '\s*'.join(format_regex) #print 'format_regex='+str(format_regex) #return format_regex # loop through values of SourceParameter for SourceParameterString in LOCAL_TABLE_CACHE[TableName]['data'][SourceParameterName]: try: ExtractedValues = list(re.search(format_regex,SourceParameterString).groups()) except: raise Exception('Error with line \"%s\"' % SourceParameterString) i=0 # loop through all parameters which are supposed to be extracted for par_name in ParameterNames: #print 'ExtractedValues[i]='+ExtractedValues[i] #print 'par_name='+par_name par_value = format_types[i](ExtractedValues[i]) LOCAL_TABLE_CACHE[TableName]['data'][par_name].append(par_value) i+=1 # explicitly check that number of rows are equal number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'] number_of_rows2 = len(LOCAL_TABLE_CACHE[TableName]['data'][SourceParameterName]) number_of_rows3 = len(LOCAL_TABLE_CACHE[TableName]['data'][ParameterNames[0]]) if not (number_of_rows == number_of_rows2 == number_of_rows3): raise Exception('Error while extracting parameters: check your regexp')
INPUT PARAMETERS: TableName: name of source table (required) SourceParameterName: name of source column to process (required) ParameterFormats: c formats of unpacked parameters (required) ParameterNames: list of resulting parameter names (optional) FixCol: column-fixed (True) format of source column (optional) OUTPUT PARAMETERS: none --- DESCRIPTION: Note, that this function is aimed to do some extra job on interpreting string parameters which is normally supposed to be done by the user. --- EXAMPLE OF USAGE: extractColumns('sampletab',SourceParameterName='p5', ParameterFormats=('%d','%d','%d'), ParameterNames=('p5_1','p5_2','p5_3')) This example extracts three integer parameters from a source column 'p5' and puts results in ('p5_1','p5_2','p5_3'). ---
def get_areas(self, area_id=None, **kwargs): """ Alias for get_elements() but filter the result by Area :param area_id: The Id of the area :type area_id: Integer :return: List of elements """ return self.get_elements(Area, elem_id=area_id, **kwargs)
Alias for get_elements() but filter the result by Area :param area_id: The Id of the area :type area_id: Integer :return: List of elements
async def send_script(self, conn_id, data): """Send a a script to a device. See :meth:`AbstractDeviceAdapter.send_script`. """ progress_callback = functools.partial(_on_progress, self, 'script', conn_id) resp = await self._execute(self._adapter.send_script_sync, conn_id, data, progress_callback) _raise_error(conn_id, 'send_rpc', resp)
Send a a script to a device. See :meth:`AbstractDeviceAdapter.send_script`.
def report_hit_filename(zipfilename: str, contentsfilename: str, show_inner_file: bool) -> None: """ For "hits": prints either the ``.zip`` filename, or the ``.zip`` filename and the inner filename. Args: zipfilename: filename of the ``.zip`` file contentsfilename: filename of the inner file show_inner_file: if ``True``, show both; if ``False``, show just the ``.zip`` filename Returns: """ if show_inner_file: print("{} [{}]".format(zipfilename, contentsfilename)) else: print(zipfilename)
For "hits": prints either the ``.zip`` filename, or the ``.zip`` filename and the inner filename. Args: zipfilename: filename of the ``.zip`` file contentsfilename: filename of the inner file show_inner_file: if ``True``, show both; if ``False``, show just the ``.zip`` filename Returns:
def bam2fastq(job, bamfile, univ_options): """ split an input bam to paired fastqs. ARGUMENTS 1. bamfile: Path to a bam file 2. univ_options: Dict of universal arguments used by almost all tools univ_options |- 'dockerhub': <dockerhub to use> +- 'java_Xmx': value for max heap passed to java """ work_dir = os.path.split(bamfile)[0] base_name = os.path.split(os.path.splitext(bamfile)[0])[1] parameters = ['SamToFastq', ''.join(['I=', docker_path(bamfile)]), ''.join(['F=/data/', base_name, '_1.fastq']), ''.join(['F2=/data/', base_name, '_2.fastq']), ''.join(['FU=/data/', base_name, '_UP.fastq'])] docker_call(tool='picard', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], java_opts=univ_options['java_Xmx']) first_fastq = ''.join([work_dir, '/', base_name, '_1.fastq']) assert os.path.exists(first_fastq) return first_fastq
split an input bam to paired fastqs. ARGUMENTS 1. bamfile: Path to a bam file 2. univ_options: Dict of universal arguments used by almost all tools univ_options |- 'dockerhub': <dockerhub to use> +- 'java_Xmx': value for max heap passed to java
def combinations(l): """Pure-Python implementation of itertools.combinations(l, 2).""" result = [] for x in xrange(len(l) - 1): ls = l[x + 1:] for y in ls: result.append((l[x], y)) return result
Pure-Python implementation of itertools.combinations(l, 2).
def find_worst(rho, pval, m=1, rlim=.10, plim=.35): """Find the N "worst", i.e. insignificant/random and low, correlations Parameters ---------- rho : ndarray, list 1D array with correlation coefficients pval : ndarray, list 1D array with p-values m : int The desired number of indicies to return (How many "worst" correlations to find?) rlim : float Desired maximum absolute correlation coefficient (Default: 0.10) plim : float Desired minimum p-value (Default: 0.35) Return ------ selected : list Indicies of rho and pval of the "worst" correlations. """ # convert to lists n = len(rho) r = list(np.abs(rho)) p = list(pval) i = list(range(n)) # check m if m > n: warnings.warn( 'm is bigger than the available correlations in rho and pval.') m = n # selected indicies selected = list() # (1) pick the highest/worst p-value # |r| <= r_lim # p > p_lim it = 0 while (len(selected) < m) and (it < n): temp = p.index(max(p)) # temporary index of the remaining values worst = i[temp] # store original index as 'worst' before abort loop # check if (r[temp] <= rlim) and (p[temp] > plim): # delete from lists r.pop(temp) p.pop(temp) i.pop(temp) # append to abort selected.append(worst) # next step it = it + 1 # print(selected, i) # (2) Just pick the highest/worst p-value of the remaining # with bad correlations # |r| <= r_lim it = 0 n2 = len(i) while (len(selected) < m) and (it < n2): temp = p.index(max(p)) # temporary index of the remaining values worst = i[temp] # store original index as 'worst' before abort loop # check if (r[temp] <= rlim): # delete from lists r.pop(temp) p.pop(temp) i.pop(temp) # append to abort selected.append(worst) # next step it = it + 1 # (3) Pick the lowest correlations it = 0 n3 = len(i) while (len(selected) < m) and (it < n3): # find the smallest p-value temp = r.index(min(r)) worst = i[temp] # delete from lists r.pop(temp) p.pop(temp) i.pop(temp) # append to abort selected.append(worst) # next step it = it + 1 return selected
Find the N "worst", i.e. insignificant/random and low, correlations Parameters ---------- rho : ndarray, list 1D array with correlation coefficients pval : ndarray, list 1D array with p-values m : int The desired number of indicies to return (How many "worst" correlations to find?) rlim : float Desired maximum absolute correlation coefficient (Default: 0.10) plim : float Desired minimum p-value (Default: 0.35) Return ------ selected : list Indicies of rho and pval of the "worst" correlations.
def error_count(self): """Returns the total number of validation errors for this row.""" count = 0 for error_list in self.error_dict.values(): count += len(error_list) return count
Returns the total number of validation errors for this row.
def _setStartSegment(self, segmentIndex, **kwargs): """ Subclasses may override this method. """ segments = self.segments oldStart = segments[-1] oldLast = segments[0] # If the contour ends with a curve on top of a move, # delete the move. if oldLast.type == "curve" or oldLast.type == "qcurve": startOn = oldStart.onCurve lastOn = oldLast.onCurve if startOn.x == lastOn.x and startOn.y == lastOn.y: self.removeSegment(0) # Shift new the start index. segmentIndex = segmentIndex - 1 segments = self.segments # If the first point is a move, convert it to a line. if segments[0].type == "move": segments[0].type = "line" # Reorder the points internally. segments = segments[segmentIndex - 1:] + segments[:segmentIndex - 1] points = [] for segment in segments: for point in segment: points.append(((point.x, point.y), point.type, point.smooth, point.name, point.identifier)) # Clear the points. for point in self.points: self.removePoint(point) # Add the points. for point in points: position, type, smooth, name, identifier = point self.appendPoint( position, type=type, smooth=smooth, name=name, identifier=identifier )
Subclasses may override this method.
def is_dsub_operation(op): """Determine if a pipelines operation is a dsub request. We don't have a rigorous way to identify an operation as being submitted by dsub. Our best option is to check for certain fields that have always been part of dsub operations. - labels: job-id, job-name, and user-id have always existed. The dsub-version label has always existed for the google-v2 provider. Args: op: a pipelines operation. Returns: Boolean, true if the pipeline run was generated by dsub. """ if not is_pipeline(op): return False for name in ['dsub-version', 'job-id', 'job-name', 'user-id']: if not get_label(op, name): return False return True
Determine if a pipelines operation is a dsub request. We don't have a rigorous way to identify an operation as being submitted by dsub. Our best option is to check for certain fields that have always been part of dsub operations. - labels: job-id, job-name, and user-id have always existed. The dsub-version label has always existed for the google-v2 provider. Args: op: a pipelines operation. Returns: Boolean, true if the pipeline run was generated by dsub.
def get_rotations(self): """Return all rotations, including inversions for centrosymmetric crystals.""" if self.centrosymmetric: return np.vstack((self.rotations, -self.rotations)) else: return self.rotations
Return all rotations, including inversions for centrosymmetric crystals.
def wrap_url(s, l): """Wrap a URL string""" parts = s.split('/') if len(parts) == 1: return parts[0] else: i = 0 lines = [] for j in range(i, len(parts) + 1): tv = '/'.join(parts[i:j]) nv = '/'.join(parts[i:j + 1]) if len(nv) > l or nv == tv: i = j lines.append(tv) return '/\n'.join(lines)
Wrap a URL string
def report(self, item_id, report_format="json"): """Retrieves the specified report for the analyzed item, referenced by item_id. Available formats include: json, html, all, dropped, package_files. :type item_id: int :param item_id: Task ID number :type report_format: str :param report_format: Return format :rtype: dict :return: Dictionary representing the JSON parsed data or raw, for other formats / JSON parsing failure. """ report_format = report_format.lower() response = self._request("tasks/report/{id}/{format}".format(id=item_id, format=report_format)) # if response is JSON, return it as an object if report_format == "json": try: return json.loads(response.content.decode('utf-8')) except ValueError: pass # otherwise, return the raw content. return response.content
Retrieves the specified report for the analyzed item, referenced by item_id. Available formats include: json, html, all, dropped, package_files. :type item_id: int :param item_id: Task ID number :type report_format: str :param report_format: Return format :rtype: dict :return: Dictionary representing the JSON parsed data or raw, for other formats / JSON parsing failure.
def _group(self, element): """Parses the XML element as a group of [unknown] number of lines.""" for v in _get_xml_version(element): if "name" in element.attrib: g = TemplateGroup(element, self.versions[v].comment) self.versions[v].entries[g.identifier] = g self.versions[v].order.append(g.identifier) else: msg.warn("no name element in {}. Ignored. (_group)".format(element))
Parses the XML element as a group of [unknown] number of lines.
def process(self): """ Calls the external cleanser scripts to (optionally) purge the meta data and then send the contents of the dropbox via email. """ if self.num_attachments > 0: self.status = u'100 processor running' fs_dirty_archive = self._create_backup() # calling _process_attachments has the side-effect of updating `send_attachments` self._process_attachments() if self.status_int < 500 and not self.send_attachments: self._create_archive() if self.status_int >= 500 and self.status_int < 600: # cleansing failed # if configured, we need to move the uncleansed archive to # the appropriate folder and notify the editors if 'dropbox_dirty_archive_url_format' in self.settings: # create_archive shutil.move( fs_dirty_archive, '%s/%s.zip.pgp' % (self.container.fs_archive_dirty, self.drop_id)) # update status # it's now considered 'successful-ish' again self.status = '490 cleanser failure but notify success' if self.status_int == 800: # at least one attachment was not supported # if configured, we need to move the uncleansed archive to # the appropriate folder and notify the editors if 'dropbox_dirty_archive_url_format' in self.settings: # create_archive shutil.move( fs_dirty_archive, '%s/%s.zip.pgp' % (self.container.fs_archive_dirty, self.drop_id)) if self.status_int < 500 or self.status_int == 800: try: if self._notify_editors() > 0: if self.status_int < 500: self.status = '900 success' else: self.status = '605 smtp failure' except Exception: import traceback tb = traceback.format_exc() self.status = '610 smtp error (%s)' % tb self.cleanup() return self.status
Calls the external cleanser scripts to (optionally) purge the meta data and then send the contents of the dropbox via email.
def from_folder(cls, path:PathOrStr, train:str='train', valid:str='valid', test:Optional[str]=None, classes:Collection[Any]=None, tokenizer:Tokenizer=None, vocab:Vocab=None, chunksize:int=10000, max_vocab:int=60000, min_freq:int=2, mark_fields:bool=False, include_bos:bool=True, include_eos:bool=False, **kwargs): "Create a `TextDataBunch` from text files in folders." path = Path(path).absolute() processor = [OpenFileProcessor()] + _get_processor(tokenizer=tokenizer, vocab=vocab, chunksize=chunksize, max_vocab=max_vocab, min_freq=min_freq, mark_fields=mark_fields, include_bos=include_bos, include_eos=include_eos) src = (TextList.from_folder(path, processor=processor) .split_by_folder(train=train, valid=valid)) src = src.label_for_lm() if cls==TextLMDataBunch else src.label_from_folder(classes=classes) if test is not None: src.add_test_folder(path/test) return src.databunch(**kwargs)
Create a `TextDataBunch` from text files in folders.
def _setup_profiles(self, conversion_profiles): ''' Add given conversion profiles checking for invalid profiles ''' # Check for invalid profiles for key, path in conversion_profiles.items(): if isinstance(path, str): path = (path, ) for left, right in pair_looper(path): pair = (_format(left), _format(right)) if pair not in self.converters: msg = 'Invalid conversion profile %s, unknown step %s' log.warning(msg % (repr(key), repr(pair))) break else: # If it did not break, then add to conversion profiles self.conversion_profiles[key] = path
Add given conversion profiles checking for invalid profiles
def set_title(self,s, panel='top'): "set plot title" panel = self.get_panel(panel) panel.set_title(s)
set plot title
def get_child_bank_ids(self, bank_id): """Gets the child ``Ids`` of the given bank. arg: bank_id (osid.id.Id): the ``Id`` to query return: (osid.id.IdList) - the children of the bank raise: NotFound - ``bank_id`` is not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_child_bin_ids if self._catalog_session is not None: return self._catalog_session.get_child_catalog_ids(catalog_id=bank_id) return self._hierarchy_session.get_children(id_=bank_id)
Gets the child ``Ids`` of the given bank. arg: bank_id (osid.id.Id): the ``Id`` to query return: (osid.id.IdList) - the children of the bank raise: NotFound - ``bank_id`` is not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def on_channel_closed(self, channel, reply_code, reply_text): """ Invoked by pika when RabbitMQ unexpectedly closes the channel. Channels are usually closed if you attempt to do something that violates the protocol, such as re-declare an exchange or queue with different parameters. In this case, we'll close the connection to shutdown the object. :param pika.channel.Channel: The closed channel :param int reply_code: The numeric reason the channel was closed :param str reply_text: The text reason the channel was closed """ self._logger.warning('Channel was closed: (%s) %s', reply_code, reply_text) if not self._closing: self.close_connection()
Invoked by pika when RabbitMQ unexpectedly closes the channel. Channels are usually closed if you attempt to do something that violates the protocol, such as re-declare an exchange or queue with different parameters. In this case, we'll close the connection to shutdown the object. :param pika.channel.Channel: The closed channel :param int reply_code: The numeric reason the channel was closed :param str reply_text: The text reason the channel was closed
def run(self): """Thread main loop""" retries = 0 try: while not self._stopping: try: data = self.notifications_api.long_poll_notifications() except mds.rest.ApiException as e: # An HTTP 410 can be raised when stopping so don't log anything if not self._stopping: backoff = 2 ** retries - random.randint(int(retries / 2), retries) LOG.error('Notification long poll failed with exception (retry in %d seconds):\n%s', backoff, e) retries += 1 # Backoff for an increasing amount of time until we have tried 10 times, then reset the backoff. if retries >= 10: retries = 0 time.sleep(backoff) else: handle_channel_message( db=self.db, queues=self.queues, b64decode=self._b64decode, notification_object=data ) if self.subscription_manager: self.subscription_manager.notify(data.to_dict()) finally: self._stopped.set()
Thread main loop
def get_random_name(retry=False): """ generates a random name from the list of adjectives and birds in this package formatted as "adjective_surname". For example 'loving_sugarbird'. If retry is non-zero, a random integer between 0 and 100 will be added to the end of the name, e.g `loving_sugarbird3` """ name = "%s_%s" % (left[random.randint(0, len(left) - 1)], right[random.randint(0, len(right) - 1)]) if retry is True: name = "%s%d" % (name, random.randint(0, 100)) return name
generates a random name from the list of adjectives and birds in this package formatted as "adjective_surname". For example 'loving_sugarbird'. If retry is non-zero, a random integer between 0 and 100 will be added to the end of the name, e.g `loving_sugarbird3`
def new(params, event_shape=(), validate_args=False, name=None): """Create the distribution instance from a `params` vector.""" with tf.compat.v1.name_scope(name, 'IndependentLogistic', [params, event_shape]): params = tf.convert_to_tensor(value=params, name='params') event_shape = dist_util.expand_to_vector( tf.convert_to_tensor( value=event_shape, name='event_shape', dtype_hint=tf.int32), tensor_name='event_shape') output_shape = tf.concat([ tf.shape(input=params)[:-1], event_shape, ], axis=0) loc_params, scale_params = tf.split(params, 2, axis=-1) return tfd.Independent( tfd.Logistic( loc=tf.reshape(loc_params, output_shape), scale=tf.math.softplus(tf.reshape(scale_params, output_shape)), validate_args=validate_args), reinterpreted_batch_ndims=tf.size(input=event_shape), validate_args=validate_args)
Create the distribution instance from a `params` vector.
def gen_data_files(src_dir): """ generates a list of files contained in the given directory (and its subdirectories) in the format required by the ``package_data`` parameter of the ``setuptools.setup`` function. Parameters ---------- src_dir : str (relative) path to the directory structure containing the files to be included in the package distribution Returns ------- fpaths : list(str) a list of file paths """ fpaths = [] base = os.path.dirname(src_dir) for root, dir, files in os.walk(src_dir): if len(files) != 0: for f in files: fpaths.append(os.path.relpath(os.path.join(root, f), base)) return fpaths
generates a list of files contained in the given directory (and its subdirectories) in the format required by the ``package_data`` parameter of the ``setuptools.setup`` function. Parameters ---------- src_dir : str (relative) path to the directory structure containing the files to be included in the package distribution Returns ------- fpaths : list(str) a list of file paths
def safe_display_name(numobj, lang, script=None, region=None): """Gets the name of the carrier for the given PhoneNumber object only when it is 'safe' to display to users. A carrier name is onsidered safe if the number is valid and for a region that doesn't support mobile number portability (http://en.wikipedia.org/wiki/Mobile_number_portability). This function explicitly checks the validity of the number passed in Arguments: numobj -- The PhoneNumber object for which we want to get a carrier name. lang -- A 2-letter lowercase ISO 639-1 language code for the language in which the description should be returned (e.g. "en") script -- A 4-letter titlecase (first letter uppercase, rest lowercase) ISO script code as defined in ISO 15924, separated by an underscore (e.g. "Hant") region -- A 2-letter uppercase ISO 3166-1 country code (e.g. "GB") Returns a carrier name that is safe to display to users, or the empty string. """ if is_mobile_number_portable_region(region_code_for_number(numobj)): return U_EMPTY_STRING return name_for_number(numobj, lang, script, region)
Gets the name of the carrier for the given PhoneNumber object only when it is 'safe' to display to users. A carrier name is onsidered safe if the number is valid and for a region that doesn't support mobile number portability (http://en.wikipedia.org/wiki/Mobile_number_portability). This function explicitly checks the validity of the number passed in Arguments: numobj -- The PhoneNumber object for which we want to get a carrier name. lang -- A 2-letter lowercase ISO 639-1 language code for the language in which the description should be returned (e.g. "en") script -- A 4-letter titlecase (first letter uppercase, rest lowercase) ISO script code as defined in ISO 15924, separated by an underscore (e.g. "Hant") region -- A 2-letter uppercase ISO 3166-1 country code (e.g. "GB") Returns a carrier name that is safe to display to users, or the empty string.
def reorder_categories(self, new_categories, ordered=None, inplace=False): """ Reorder categories as specified in new_categories. `new_categories` need to include all old categories and no new category items. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, optional Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. inplace : bool, default False Whether or not to reorder the categories inplace or return a copy of this categorical with reordered categories. Returns ------- cat : Categorical with reordered categories or None if inplace. Raises ------ ValueError If the new categories do not contain all old category items or any new ones See Also -------- rename_categories add_categories remove_categories remove_unused_categories set_categories """ inplace = validate_bool_kwarg(inplace, 'inplace') if set(self.dtype.categories) != set(new_categories): raise ValueError("items in new_categories are not the same as in " "old categories") return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
Reorder categories as specified in new_categories. `new_categories` need to include all old categories and no new category items. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, optional Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. inplace : bool, default False Whether or not to reorder the categories inplace or return a copy of this categorical with reordered categories. Returns ------- cat : Categorical with reordered categories or None if inplace. Raises ------ ValueError If the new categories do not contain all old category items or any new ones See Also -------- rename_categories add_categories remove_categories remove_unused_categories set_categories
def sort_return_tuples(response, **options): """ If ``groups`` is specified, return the response as a list of n-element tuples with n being the value found in options['groups'] """ if not response or not options.get('groups'): return response n = options['groups'] return list(izip(*[response[i::n] for i in range(n)]))
If ``groups`` is specified, return the response as a list of n-element tuples with n being the value found in options['groups']
def _key(self, username, frozen=False): """Translate a username into a key for Redis.""" if frozen: return self.frozen + username return self.prefix + username
Translate a username into a key for Redis.
def get_app_logger_color(appname, app_log_level=logging.INFO, log_level=logging.WARN, logfile=None): """ Configure the logging for an app using reliure (it log's both the app and reliure lib) :param appname: the name of the application to log :parap app_log_level: log level for the app :param log_level: log level for the reliure :param logfile: file that store the log, time rotating file (by day), no if None """ # create lib handler stderr_handler = logging.StreamHandler() stderr_handler.setLevel(log_level) # create formatter and add it to the handlers name = "reliure" name += "_"*(max(0, len(appname)-len(name))) formatter = ColorFormatter('$BG-BLUE$WHITE%s$RESET:%%(asctime)s:$COLOR%%(levelname)s$RESET:$BOLD%%(name)s$RESET: %%(message)s' % name) stderr_handler.setFormatter(formatter) # get the logers it self logger = logging.getLogger("reliure") logger.setLevel(logging.DEBUG) # add the handlers to the loggers logger.addHandler(stderr_handler) # create app handler app_stderr_handler = logging.StreamHandler() app_stderr_handler.setLevel(app_log_level) # create formatter and add it to the handlers app_formatter = ColorFormatter("$BG-CYAN$WHITE%s$RESET:%%(asctime)s:$COLOR%%(levelname)s$RESET:$BOLD%%(name)s$RESET: %%(message)s" % appname.upper()) app_stderr_handler.setFormatter(app_formatter) # get the logers it self app_logger = logging.getLogger(appname) app_logger.setLevel(logging.DEBUG) # add the handlers to the loggers app_logger.addHandler(app_stderr_handler) if logfile is not None: file_format = '%(asctime)s:%(levelname)s:%(name)s: %(message)s' from logging.handlers import TimedRotatingFileHandler file_handler = TimedRotatingFileHandler(logfile, when="D", interval=1, backupCount=7) file_handler.setFormatter(logging.Formatter(file_format)) # add the handlers to the loggers logger.addHandler(file_handler) # add the handlers to the loggers app_logger.addHandler(file_handler) return app_logger
Configure the logging for an app using reliure (it log's both the app and reliure lib) :param appname: the name of the application to log :parap app_log_level: log level for the app :param log_level: log level for the reliure :param logfile: file that store the log, time rotating file (by day), no if None
def plot_di_mean_ellipse(dictionary, fignum=1, color='k', marker='o', markersize=20, label='', legend='no'): """ Plot a mean direction (declination, inclination) confidence ellipse. Parameters ----------- dictionary : a dictionary generated by the pmag.dobingham or pmag.dokent funcitons """ pars = [] pars.append(dictionary['dec']) pars.append(dictionary['inc']) pars.append(dictionary['Zeta']) pars.append(dictionary['Zdec']) pars.append(dictionary['Zinc']) pars.append(dictionary['Eta']) pars.append(dictionary['Edec']) pars.append(dictionary['Einc']) DI_dimap = pmag.dimap(dictionary['dec'], dictionary['inc']) if dictionary['inc'] < 0: plt.scatter(DI_dimap[0], DI_dimap[1], edgecolors=color, facecolors='white', marker=marker, s=markersize, label=label) if dictionary['inc'] >= 0: plt.scatter(DI_dimap[0], DI_dimap[1], edgecolors=color, facecolors=color, marker=marker, s=markersize, label=label) pmagplotlib.plot_ell(fignum, pars, color, 0, 1)
Plot a mean direction (declination, inclination) confidence ellipse. Parameters ----------- dictionary : a dictionary generated by the pmag.dobingham or pmag.dokent funcitons
def Xor(bytestr, key): """Returns a `bytes` object where each byte has been xored with key.""" # TODO(hanuszczak): Remove this import when string migration is done. # pytype: disable=import-error from builtins import bytes # pylint: disable=redefined-builtin, g-import-not-at-top # pytype: enable=import-error precondition.AssertType(bytestr, bytes) # TODO: This seemingly no-op operation actually changes things. # In Python 2 this function receives a `str` object which has different # iterator semantics. So we use a `bytes` wrapper from the `future` package to # get the Python 3 behaviour. In Python 3 this should be indeed a no-op. Once # the migration is completed and support for Python 2 is dropped, this line # can be removed. bytestr = bytes(bytestr) return bytes([byte ^ key for byte in bytestr])
Returns a `bytes` object where each byte has been xored with key.
def _run_check(self, check_method, ds, max_level): """ Runs a check and appends a result to the values list. @param bound method check_method: a given check method @param netCDF4 dataset ds @param int max_level: check level @return list: list of Result objects """ val = check_method(ds) if isinstance(val, list): check_val = [] for v in val: res = fix_return_value(v, check_method.__func__.__name__, check_method, check_method.__self__) if max_level is None or res.weight > max_level: check_val.append(res) return check_val else: check_val = fix_return_value(val, check_method.__func__.__name__, check_method, check_method.__self__) if max_level is None or check_val.weight > max_level: return [check_val] else: return []
Runs a check and appends a result to the values list. @param bound method check_method: a given check method @param netCDF4 dataset ds @param int max_level: check level @return list: list of Result objects
def update_query(self, *args, **kwargs): """Return a new URL with query part updated.""" s = self._get_str_query(*args, **kwargs) new_query = MultiDict(parse_qsl(s, keep_blank_values=True)) query = MultiDict(self.query) query.update(new_query) return URL(self._val._replace(query=self._get_str_query(query)), encoded=True)
Return a new URL with query part updated.
def determine_master(port=4000): """Determine address of master so that workers can connect to it. If the environment variable SPARK_LOCAL_IP is set, that address will be used. :param port: port on which the application runs :return: Master address Example usage: SPARK_LOCAL_IP=127.0.0.1 spark-submit --master \ local[8] examples/mllib_mlp.py """ if os.environ.get('SPARK_LOCAL_IP'): return os.environ['SPARK_LOCAL_IP'] + ":" + str(port) else: return gethostbyname(gethostname()) + ":" + str(port)
Determine address of master so that workers can connect to it. If the environment variable SPARK_LOCAL_IP is set, that address will be used. :param port: port on which the application runs :return: Master address Example usage: SPARK_LOCAL_IP=127.0.0.1 spark-submit --master \ local[8] examples/mllib_mlp.py
def update_course_enrollment(self, email, course_url, purchase_incomplete, mode, unit_cost=None, course_id=None, currency=None, message_id=None, site_code=None, sku=None): """Adds/updates Sailthru when a user adds to cart/purchases/upgrades a course Args: email(str): The user's email address course_url(str): Course home page url purchase_incomplete(boolean): True if adding to cart mode(string): enroll mode (audit, verification, ...) unit_cost(decimal): cost if purchase event course_id(CourseKey): course id currency(str): currency if purchase event - currently ignored since Sailthru only supports USD message_id(str): value from Sailthru marketing campaign cookie site_code(str): site code Returns: None """ # Get configuration config = get_sailthru_configuration(site_code) try: sailthru_client = get_sailthru_client(site_code) except SailthruError: # NOTE: We rely on the function to log the error for us return # Use event type to figure out processing required new_enroll = False send_template = None if not purchase_incomplete: if mode == 'verified': # upgrade complete send_template = config.get('SAILTHRU_UPGRADE_TEMPLATE') elif mode == 'audit' or mode == 'honor': # free enroll new_enroll = True send_template = config.get('SAILTHRU_ENROLL_TEMPLATE') else: # paid course purchase complete new_enroll = True send_template = config.get('SAILTHRU_PURCHASE_TEMPLATE') # calc price in pennies for Sailthru # https://getstarted.sailthru.com/new-for-developers-overview/advanced-features/purchase/ cost_in_cents = int(unit_cost * 100) # update the "unenrolled" course array in the user record on Sailthru if new enroll or unenroll if new_enroll: if not _update_unenrolled_list(sailthru_client, email, course_url, False): schedule_retry(self, config) # Get course data from Sailthru content library or cache course_data = _get_course_content(course_id, course_url, sailthru_client, site_code, config) # build item description item = _build_purchase_item(course_id, course_url, cost_in_cents, mode, course_data, sku) # build purchase api options list options = {} if purchase_incomplete and config.get('SAILTHRU_ABANDONED_CART_TEMPLATE'): options['reminder_template'] = config.get('SAILTHRU_ABANDONED_CART_TEMPLATE') # Sailthru reminder time format is '+n time unit' options['reminder_time'] = "+{} minutes".format(config.get('SAILTHRU_ABANDONED_CART_DELAY')) # add appropriate send template if send_template: options['send_template'] = send_template if not _record_purchase(sailthru_client, email, item, purchase_incomplete, message_id, options): schedule_retry(self, config)
Adds/updates Sailthru when a user adds to cart/purchases/upgrades a course Args: email(str): The user's email address course_url(str): Course home page url purchase_incomplete(boolean): True if adding to cart mode(string): enroll mode (audit, verification, ...) unit_cost(decimal): cost if purchase event course_id(CourseKey): course id currency(str): currency if purchase event - currently ignored since Sailthru only supports USD message_id(str): value from Sailthru marketing campaign cookie site_code(str): site code Returns: None
def unalias(self, annotationtype, alias): """Return the set for an alias (if applicable, raises an exception otherwise)""" if inspect.isclass(annotationtype): annotationtype = annotationtype.ANNOTATIONTYPE return self.alias_set[annotationtype][alias]
Return the set for an alias (if applicable, raises an exception otherwise)
def import_sql_select(connection_url, select_query, username, password, optimize=True, use_temp_table=None, temp_table_name=None, fetch_mode=None): """ Import the SQL table that is the result of the specified SQL query to H2OFrame in memory. Creates a temporary SQL table from the specified sql_query. Runs multiple SELECT SQL queries on the temporary table concurrently for parallel ingestion, then drops the table. Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath:: java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp Also see h2o.import_sql_table. Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle and Microsoft SQL Server. :param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC) Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false" :param select_query: SQL query starting with `SELECT` that returns rows from one or more database tables. :param username: username for SQL server :param password: password for SQL server :param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports. :param use_temp_table: whether a temporary table should be created from select_query :param temp_table_name: name of temporary table to be created from select_query :param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node from the database. :returns: an :class:`H2OFrame` containing data of the specified SQL query. :examples: >>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false" >>> select_query = "SELECT bikeid from citibike20k" >>> username = "root" >>> password = "abc123" >>> my_citibike_data = h2o.import_sql_select(conn_url, select_query, ... username, password, fetch_mode) """ assert_is_type(connection_url, str) assert_is_type(select_query, str) assert_is_type(username, str) assert_is_type(password, str) assert_is_type(optimize, bool) assert_is_type(use_temp_table, bool, None) assert_is_type(temp_table_name, str, None) assert_is_type(fetch_mode, str, None) p = {"connection_url": connection_url, "select_query": select_query, "username": username, "password": password, "use_temp_table": use_temp_table, "temp_table_name": temp_table_name, "fetch_mode": fetch_mode} j = H2OJob(api("POST /99/ImportSQLTable", data=p), "Import SQL Table").poll() return get_frame(j.dest_key)
Import the SQL table that is the result of the specified SQL query to H2OFrame in memory. Creates a temporary SQL table from the specified sql_query. Runs multiple SELECT SQL queries on the temporary table concurrently for parallel ingestion, then drops the table. Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath:: java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp Also see h2o.import_sql_table. Currently supported SQL databases are MySQL, PostgreSQL, MariaDB, Hive, Oracle and Microsoft SQL Server. :param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC) Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false" :param select_query: SQL query starting with `SELECT` that returns rows from one or more database tables. :param username: username for SQL server :param password: password for SQL server :param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports. :param use_temp_table: whether a temporary table should be created from select_query :param temp_table_name: name of temporary table to be created from select_query :param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node from the database. :returns: an :class:`H2OFrame` containing data of the specified SQL query. :examples: >>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false" >>> select_query = "SELECT bikeid from citibike20k" >>> username = "root" >>> password = "abc123" >>> my_citibike_data = h2o.import_sql_select(conn_url, select_query, ... username, password, fetch_mode)
def makeServiceDocXML(title, collections): """ Make an ATOM service doc here. The 'collections' parameter is a list of dictionaries, with the keys of 'title', 'accept' and 'categories' being valid """ serviceTag = etree.Element("service") workspaceTag = etree.SubElement(serviceTag, "workspace") titleTag = etree.SubElement(workspaceTag, ATOM + "title", nsmap=ATOM_NSMAP) titleTag.text = title for collection in collections: collectionTag = etree.SubElement(workspaceTag, "collection") if 'href' in collection: collectionTag.set("href", collection['href']) if 'title' in collection: colTitleTag = etree.SubElement( collectionTag, ATOM + "title", nsmap=ATOM_NSMAP ) colTitleTag.text = collection['title'] if 'accept' in collection: acceptTag = etree.SubElement(collectionTag, "accept") acceptTag.text = collection['accept'] return serviceTag
Make an ATOM service doc here. The 'collections' parameter is a list of dictionaries, with the keys of 'title', 'accept' and 'categories' being valid
def start_listener_thread(self, timeout_ms: int = 30000, exception_handler: Callable = None): """ Start a listener greenlet to listen for events in the background. Args: timeout_ms: How long to poll the Home Server for before retrying. exception_handler: Optional exception handler function which can be used to handle exceptions in the caller thread. """ assert not self.should_listen and self.sync_thread is None, 'Already running' self.should_listen = True self.sync_thread = gevent.spawn(self.listen_forever, timeout_ms, exception_handler) self.sync_thread.name = f'GMatrixClient.listen_forever user_id:{self.user_id}'
Start a listener greenlet to listen for events in the background. Args: timeout_ms: How long to poll the Home Server for before retrying. exception_handler: Optional exception handler function which can be used to handle exceptions in the caller thread.
def uncloak(request): """ Undo a masquerade session and redirect the user back to where they started cloaking from (or where ever the "next" POST parameter points) """ try: del request.session[SESSION_USER_KEY] except KeyError: pass # who cares # figure out where to redirect next = request.POST.get(REDIRECT_FIELD_NAME) or request.session.get(SESSION_REDIRECT_KEY) if next and is_safe_url(next, request.get_host()): return HttpResponseRedirect(next) return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)
Undo a masquerade session and redirect the user back to where they started cloaking from (or where ever the "next" POST parameter points)
def attribute(element, attribute, default=None): """ Returns the value of an attribute, or a default if it's not defined :param element: The XML Element object :type element: etree._Element :param attribute: The name of the attribute to evaluate :type attribute: basestring :param default: The default value to return if the attribute is not defined """ attribute_value = element.get(attribute) return attribute_value if attribute_value is not None else default
Returns the value of an attribute, or a default if it's not defined :param element: The XML Element object :type element: etree._Element :param attribute: The name of the attribute to evaluate :type attribute: basestring :param default: The default value to return if the attribute is not defined
def _pwr_optfcn(df, loc): ''' Function to find power from ``i_from_v``. ''' I = _lambertw_i_from_v(df['r_sh'], df['r_s'], # noqa: E741, N806 df['nNsVth'], df[loc], df['i_0'], df['i_l']) return I * df[loc]
Function to find power from ``i_from_v``.
def geo_max_distance(left, right): """Returns the 2-dimensional maximum distance between two geometries in projected units. If g1 and g2 is the same geometry the function will return the distance between the two vertices most far from each other in that geometry Parameters ---------- left : geometry right : geometry Returns ------- MaxDistance : double scalar """ op = ops.GeoMaxDistance(left, right) return op.to_expr()
Returns the 2-dimensional maximum distance between two geometries in projected units. If g1 and g2 is the same geometry the function will return the distance between the two vertices most far from each other in that geometry Parameters ---------- left : geometry right : geometry Returns ------- MaxDistance : double scalar
def _isdictclass(obj): '''Return True for known dict objects. ''' c = getattr(obj, '__class__', None) return c and c.__name__ in _dict_classes.get(c.__module__, ())
Return True for known dict objects.
def mock(config_or_spec=None, spec=None, strict=OMITTED): """Create 'empty' objects ('Mocks'). Will create an empty unconfigured object, that you can pass around. All interactions (method calls) will be recorded and can be verified using :func:`verify` et.al. A plain `mock()` will be not `strict`, and thus all methods regardless of the arguments will return ``None``. .. note:: Technically all attributes will return an internal interface. Because of that a simple ``if mock().foo:`` will surprisingly pass. If you set strict to ``True``: ``mock(strict=True)`` all unexpected interactions will raise an error instead. You configure a mock using :func:`when`, :func:`when2` or :func:`expect`. You can also very conveniently just pass in a dict here:: response = mock({'text': 'ok', 'raise_for_status': lambda: None}) You can also create an empty Mock which is specced against a given `spec`: ``mock(requests.Response)``. These mock are by default strict, thus they raise if you want to stub a method, the spec does not implement. Mockito will also match the function signature. You can pre-configure a specced mock as well:: response = mock({'json': lambda: {'status': 'Ok'}}, spec=requests.Response) Mocks are by default callable. Configure the callable behavior using `when`:: dummy = mock() when(dummy).__call_(1).thenReturn(2) All other magic methods must be configured this way or they will raise an AttributeError. See :func:`verify` to verify your interactions after usage. """ if type(config_or_spec) is dict: config = config_or_spec else: config = {} spec = config_or_spec if strict is OMITTED: strict = False if spec is None else True class Dummy(_Dummy): if spec: __class__ = spec # make isinstance work def __getattr__(self, method_name): if strict: raise AttributeError( "'Dummy' has no attribute %r configured" % method_name) return functools.partial( remembered_invocation_builder, theMock, method_name) def __repr__(self): name = 'Dummy' if spec: name += spec.__name__ return "<%s id=%s>" % (name, id(self)) # That's a tricky one: The object we will return is an *instance* of our # Dummy class, but the mock we register will point and patch the class. # T.i. so that magic methods (`__call__` etc.) can be configured. obj = Dummy() theMock = Mock(Dummy, strict=strict, spec=spec) for n, v in config.items(): if inspect.isfunction(v): invocation.StubbedInvocation(theMock, n)(Ellipsis).thenAnswer(v) else: setattr(obj, n, v) mock_registry.register(obj, theMock) return obj
Create 'empty' objects ('Mocks'). Will create an empty unconfigured object, that you can pass around. All interactions (method calls) will be recorded and can be verified using :func:`verify` et.al. A plain `mock()` will be not `strict`, and thus all methods regardless of the arguments will return ``None``. .. note:: Technically all attributes will return an internal interface. Because of that a simple ``if mock().foo:`` will surprisingly pass. If you set strict to ``True``: ``mock(strict=True)`` all unexpected interactions will raise an error instead. You configure a mock using :func:`when`, :func:`when2` or :func:`expect`. You can also very conveniently just pass in a dict here:: response = mock({'text': 'ok', 'raise_for_status': lambda: None}) You can also create an empty Mock which is specced against a given `spec`: ``mock(requests.Response)``. These mock are by default strict, thus they raise if you want to stub a method, the spec does not implement. Mockito will also match the function signature. You can pre-configure a specced mock as well:: response = mock({'json': lambda: {'status': 'Ok'}}, spec=requests.Response) Mocks are by default callable. Configure the callable behavior using `when`:: dummy = mock() when(dummy).__call_(1).thenReturn(2) All other magic methods must be configured this way or they will raise an AttributeError. See :func:`verify` to verify your interactions after usage.
def cubic_bezier(document, coords): "cubic bezier polyline" element = document.createElement('path') points = [(coords[i], coords[i+1]) for i in range(0, len(coords), 2)] path = ["M%s %s" %points[0]] for n in xrange(1, len(points), 3): A, B, C = points[n:n+3] path.append("C%s,%s %s,%s %s,%s" % (A[0], A[1], B[0], B[1], C[0], C[1])) element.setAttribute('d', ' '.join(path)) return element
cubic bezier polyline
def shot_end_data(shot, role): """Return the data for endframe :param shot: the shot that holds the data :type shot: :class:`jukeboxcore.djadapter.models.Shot` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the end :rtype: depending on role :raises: None """ if role == QtCore.Qt.DisplayRole: return str(shot.endframe)
Return the data for endframe :param shot: the shot that holds the data :type shot: :class:`jukeboxcore.djadapter.models.Shot` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the end :rtype: depending on role :raises: None
def array_map2(*referls,**kwargs): ''' obseleted just for compatible from elist.elist import * ol = [1,2,3,4] refl1 = ['+','+','+','+'] refl2 = [7,7,7,7] refl3 = ['=','=','=','='] def map_func(ele,ref_ele1,ref_ele2,ref_ele3,prefix,suffix): s = prefix+': ' + str(ele) + str(ref_ele1) + str(ref_ele2) + str(ref_ele3) + suffix return(s) #### rslt = array_map2(ol,refl1,refl2,refl3,map_func=map_func,map_func_args=['Q','?']) pobj(rslt) ''' map_func = kwargs['map_func'] if('map_func_args' in kwargs): map_func_args = kwargs['map_func_args'] else: map_func_args = [] length = referls.__len__() rslt = [] anum = list(referls)[0].__len__() for j in range(0,anum): args = [] for i in range(0,length): refl = referls[i] args.append(refl[j]) args.extend(map_func_args) v = map_func(*args) rslt.append(v) return(rslt)
obseleted just for compatible from elist.elist import * ol = [1,2,3,4] refl1 = ['+','+','+','+'] refl2 = [7,7,7,7] refl3 = ['=','=','=','='] def map_func(ele,ref_ele1,ref_ele2,ref_ele3,prefix,suffix): s = prefix+': ' + str(ele) + str(ref_ele1) + str(ref_ele2) + str(ref_ele3) + suffix return(s) #### rslt = array_map2(ol,refl1,refl2,refl3,map_func=map_func,map_func_args=['Q','?']) pobj(rslt)
def do_types_overlap(schema, type_a, type_b): """Check whether two types overlap in a given schema. Provided two composite types, determine if they "overlap". Two composite types overlap when the Sets of possible concrete types for each intersect. This is often used to determine if a fragment of a given type could possibly be visited in a context of another type. This function is commutative. """ # Equivalent types overlap if type_a is type_b: return True if is_abstract_type(type_a): if is_abstract_type(type_b): # If both types are abstract, then determine if there is any intersection # between possible concrete types of each. return any( schema.is_possible_type(type_b, type_) for type_ in schema.get_possible_types(type_a) ) # Determine if latter type is a possible concrete type of the former. return schema.is_possible_type(type_a, type_b) if is_abstract_type(type_b): # Determine if former type is a possible concrete type of the latter. return schema.is_possible_type(type_b, type_a) # Otherwise the types do not overlap. return False
Check whether two types overlap in a given schema. Provided two composite types, determine if they "overlap". Two composite types overlap when the Sets of possible concrete types for each intersect. This is often used to determine if a fragment of a given type could possibly be visited in a context of another type. This function is commutative.
def getRgbdData(self): ''' Returns last RgbdData. @return last JdeRobotTypes Rgbd saved ''' self.lock.acquire() data = self.data self.lock.release() return data
Returns last RgbdData. @return last JdeRobotTypes Rgbd saved
def from_geometry(cls, molecule, do_orders=False, scaling=1.0): """Construct a MolecularGraph object based on interatomic distances All short distances are computed with the binning module and compared with a database of bond lengths. Based on this comparison, bonded atoms are detected. Before marking a pair of atoms A and B as bonded, it is also checked that there is no third atom C somewhat between A and B. When an atom C exists that is closer to B (than A) and the angle A-B-C is less than 45 degrees, atoms A and B are not bonded. Similarly if C is closer to A (than B) and the angle B-A-C is less then 45 degrees, A and B are not connected. Argument: | ``molecule`` -- The molecule to derive the graph from Optional arguments: | ``do_orders`` -- set to True to estimate the bond order | ``scaling`` -- scale the threshold for the connectivity. increase this to 1.5 in case of transition states when a fully connected topology is required. """ from molmod.bonds import bonds unit_cell = molecule.unit_cell pair_search = PairSearchIntra( molecule.coordinates, bonds.max_length*bonds.bond_tolerance*scaling, unit_cell ) orders = [] lengths = [] edges = [] for i0, i1, delta, distance in pair_search: bond_order = bonds.bonded(molecule.numbers[i0], molecule.numbers[i1], distance/scaling) if bond_order is not None: if do_orders: orders.append(bond_order) lengths.append(distance) edges.append((i0,i1)) if do_orders: result = cls(edges, molecule.numbers, orders, symbols=molecule.symbols) else: result = cls(edges, molecule.numbers, symbols=molecule.symbols) # run a check on all neighbors. if two bonds point in a direction that # differs only by 45 deg. the longest of the two is discarded. the # double loop over the neighbors is done such that the longest bonds # are eliminated first slated_for_removal = set([]) threshold = 0.5**0.5 for c, ns in result.neighbors.items(): lengths_ns = [] for n in ns: delta = molecule.coordinates[n] - molecule.coordinates[c] if unit_cell is not None: delta = unit_cell.shortest_vector(delta) length = np.linalg.norm(delta) lengths_ns.append([length, delta, n]) lengths_ns.sort(reverse=True, key=(lambda r: r[0])) for i0, (length0, delta0, n0) in enumerate(lengths_ns): for i1, (length1, delta1, n1) in enumerate(lengths_ns[:i0]): if length1 == 0.0: continue cosine = np.dot(delta0, delta1)/length0/length1 if cosine > threshold: # length1 > length0 slated_for_removal.add((c,n1)) lengths_ns[i1][0] = 0.0 # construct a mask mask = np.ones(len(edges), bool) for i0, i1 in slated_for_removal: edge_index = result.edge_index.get(frozenset([i0,i1])) if edge_index is None: raise ValueError('Could not find edge that has to be removed: %i %i' % (i0, i1)) mask[edge_index] = False # actual removal edges = [edges[i] for i in range(len(edges)) if mask[i]] if do_orders: bond_order = [bond_order[i] for i in range(len(bond_order)) if mask[i]] result = cls(edges, molecule.numbers, orders) else: result = cls(edges, molecule.numbers) lengths = [lengths[i] for i in range(len(lengths)) if mask[i]] result.bond_lengths = np.array(lengths) return result
Construct a MolecularGraph object based on interatomic distances All short distances are computed with the binning module and compared with a database of bond lengths. Based on this comparison, bonded atoms are detected. Before marking a pair of atoms A and B as bonded, it is also checked that there is no third atom C somewhat between A and B. When an atom C exists that is closer to B (than A) and the angle A-B-C is less than 45 degrees, atoms A and B are not bonded. Similarly if C is closer to A (than B) and the angle B-A-C is less then 45 degrees, A and B are not connected. Argument: | ``molecule`` -- The molecule to derive the graph from Optional arguments: | ``do_orders`` -- set to True to estimate the bond order | ``scaling`` -- scale the threshold for the connectivity. increase this to 1.5 in case of transition states when a fully connected topology is required.
def copy(self): """ Returns a copy of ClusterGraph. Returns ------- ClusterGraph: copy of ClusterGraph Examples ------- >>> from pgmpy.factors.discrete import DiscreteFactor >>> G = ClusterGraph() >>> G.add_nodes_from([('a', 'b'), ('b', 'c')]) >>> G.add_edge(('a', 'b'), ('b', 'c')) >>> phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4)) >>> phi2 = DiscreteFactor(['b', 'c'], [2, 2], np.random.rand(4)) >>> G.add_factors(phi1, phi2) >>> graph_copy = G.copy() >>> graph_copy.factors [<DiscreteFactor representing phi(a:2, b:2) at 0xb71b19cc>, <DiscreteFactor representing phi(b:2, c:2) at 0xb4eaf3ac>] >>> graph_copy.edges() [(('a', 'b'), ('b', 'c'))] >>> graph_copy.nodes() [('a', 'b'), ('b', 'c')] """ copy = ClusterGraph(self.edges()) if self.factors: factors_copy = [factor.copy() for factor in self.factors] copy.add_factors(*factors_copy) return copy
Returns a copy of ClusterGraph. Returns ------- ClusterGraph: copy of ClusterGraph Examples ------- >>> from pgmpy.factors.discrete import DiscreteFactor >>> G = ClusterGraph() >>> G.add_nodes_from([('a', 'b'), ('b', 'c')]) >>> G.add_edge(('a', 'b'), ('b', 'c')) >>> phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4)) >>> phi2 = DiscreteFactor(['b', 'c'], [2, 2], np.random.rand(4)) >>> G.add_factors(phi1, phi2) >>> graph_copy = G.copy() >>> graph_copy.factors [<DiscreteFactor representing phi(a:2, b:2) at 0xb71b19cc>, <DiscreteFactor representing phi(b:2, c:2) at 0xb4eaf3ac>] >>> graph_copy.edges() [(('a', 'b'), ('b', 'c'))] >>> graph_copy.nodes() [('a', 'b'), ('b', 'c')]
def get_flagged_args(): """get_flagged_args Collects from the execution statement the arguments provided to this script. The items are then interpretted and returned. The object expected are the KvP's: --os_type - the operating system type to be built --os_version - the operating system version to be built NOTE: by not using these options, both Debian .deb and Redhat .rpm files are generated for the operating systems and versions natively as set by a global variable at the top of this script. FURTHER NOTE: there should be a dist_dir/Docker/<os_type>/<os_version>/DockerFile* Present for this script to work. CONFIGURATION: It is part of the standard for this script to run its own configuration parameters to generate a: dist_dir/scripts/config.JSON This is a part of a separate script and is executed by this one in an effort to make this code as translatable from project segment to segment. """ expected = ['os_type', 'os_version'] arguments = {} try: opts, adds = \ getopt.getopt(sys.argv, '', map(lambda x: x + "=", expected)) except getopt.GetoptError as Error: print(str(Error)) print("Defaulting to standard run...") return arguments for o, a in opts: opt = re.sub('^-+', '', o) if opt in expected: arguments[opt] = a if arguments: if 'os_type' not in arguments: print("Unsupported means of operation!") print("You can either specify both os_type and os_version " + "or just os_type") arguments = {} return arguments
get_flagged_args Collects from the execution statement the arguments provided to this script. The items are then interpretted and returned. The object expected are the KvP's: --os_type - the operating system type to be built --os_version - the operating system version to be built NOTE: by not using these options, both Debian .deb and Redhat .rpm files are generated for the operating systems and versions natively as set by a global variable at the top of this script. FURTHER NOTE: there should be a dist_dir/Docker/<os_type>/<os_version>/DockerFile* Present for this script to work. CONFIGURATION: It is part of the standard for this script to run its own configuration parameters to generate a: dist_dir/scripts/config.JSON This is a part of a separate script and is executed by this one in an effort to make this code as translatable from project segment to segment.
def find_skew(self): """Returns a tuple (deskew angle in degrees, confidence value). Returns (None, None) if no angle is available. """ with _LeptonicaErrorTrap(): angle = ffi.new('float *', 0.0) confidence = ffi.new('float *', 0.0) result = lept.pixFindSkew(self._cdata, angle, confidence) if result == 0: return (angle[0], confidence[0]) else: return (None, None)
Returns a tuple (deskew angle in degrees, confidence value). Returns (None, None) if no angle is available.
def write_json(dictionary, filename): """Write dictionary to JSON""" with open(filename, 'w') as data_file: json.dump(dictionary, data_file, indent=4, sort_keys=True) print('--> Wrote ' + os.path.basename(filename))
Write dictionary to JSON
def import_pyqt4(version=2): """ Import PyQt4 Parameters ---------- version : 1, 2, or None Which QString/QVariant API to use. Set to None to use the system default ImportErrors raised within this function are non-recoverable """ # The new-style string API (version=2) automatically # converts QStrings to Unicode Python strings. Also, automatically unpacks # QVariants to their underlying objects. import sip if version is not None: sip.setapi('QString', version) sip.setapi('QVariant', version) from PyQt4 import QtGui, QtCore, QtSvg if not check_version(QtCore.PYQT_VERSION_STR, '4.7'): raise ImportError("IPython requires PyQt4 >= 4.7, found %s" % QtCore.PYQT_VERSION_STR) # Alias PyQt-specific functions for PySide compatibility. QtCore.Signal = QtCore.pyqtSignal QtCore.Slot = QtCore.pyqtSlot # query for the API version (in case version == None) version = sip.getapi('QString') api = QT_API_PYQTv1 if version == 1 else QT_API_PYQT return QtCore, QtGui, QtSvg, api
Import PyQt4 Parameters ---------- version : 1, 2, or None Which QString/QVariant API to use. Set to None to use the system default ImportErrors raised within this function are non-recoverable
def role_create(auth=None, **kwargs): ''' Create a role CLI Example: .. code-block:: bash salt '*' keystoneng.role_create name=role1 salt '*' keystoneng.role_create name=role1 domain_id=b62e76fbeeff4e8fb77073f591cf211e ''' cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_role(**kwargs)
Create a role CLI Example: .. code-block:: bash salt '*' keystoneng.role_create name=role1 salt '*' keystoneng.role_create name=role1 domain_id=b62e76fbeeff4e8fb77073f591cf211e
def _deps_only_toggled(self, widget, data=None): """ Function deactivate options in case of deps_only and opposite """ active = widget.get_active() self.dir_name.set_sensitive(not active) self.entry_project_name.set_sensitive(not active) self.dir_name_browse_btn.set_sensitive(not active) self.run_btn.set_sensitive(active or not self.project_name_shown or self.entry_project_name.get_text() != "")
Function deactivate options in case of deps_only and opposite
def _schema_get_docstring(starting_class): """ Given a class, return its docstring. If no docstring is present for the class, search base classes in MRO for a docstring. """ for cls in inspect.getmro(starting_class): if inspect.getdoc(cls): return inspect.getdoc(cls)
Given a class, return its docstring. If no docstring is present for the class, search base classes in MRO for a docstring.
def _add_timedelta(self, delta): """ Add timedelta duration to the instance. :param delta: The timedelta instance :type delta: pendulum.Duration or datetime.timedelta :rtype: Date """ if isinstance(delta, pendulum.Duration): return self.add( years=delta.years, months=delta.months, weeks=delta.weeks, days=delta.remaining_days, ) return self.add(days=delta.days)
Add timedelta duration to the instance. :param delta: The timedelta instance :type delta: pendulum.Duration or datetime.timedelta :rtype: Date
def update_vip_request(self, vip_request, vip_request_id): """ Method to update vip request param vip_request: vip_request object param vip_request_id: vip_request id """ uri = 'api/v3/vip-request/%s/' % vip_request_id data = dict() data['vips'] = list() data['vips'].append(vip_request) return super(ApiVipRequest, self).put(uri, data)
Method to update vip request param vip_request: vip_request object param vip_request_id: vip_request id
def _handle_input_request(self, msg): """Save history and add a %plot magic.""" if self._hidden: raise RuntimeError('Request for raw input during hidden execution.') # Make sure that all output from the SUB channel has been processed # before entering readline mode. self.kernel_client.iopub_channel.flush() def callback(line): # Save history to browse it later if not (len(self._control.history) > 0 and self._control.history[-1] == line): # do not save pdb commands cmd = line.split(" ")[0] if "do_" + cmd not in dir(pdb.Pdb): self._control.history.append(line) # This is the Spyder addition: add a %plot magic to display # plots while debugging if line.startswith('%plot '): line = line.split()[-1] code = "__spy_code__ = get_ipython().run_cell('%s')" % line self.kernel_client.input(code) else: self.kernel_client.input(line) if self._reading: self._reading = False self._readline(msg['content']['prompt'], callback=callback, password=msg['content']['password'])
Save history and add a %plot magic.
def paragraph( self, nb_sentences=3, variable_nb_sentences=True, ext_word_list=None): """ :returns: A single paragraph. For example: 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.' Keyword arguments: :param nb_sentences: around how many sentences the paragraph should contain :param variable_nb_sentences: set to false if you want exactly ``nb`` sentences returned, otherwise the result may include a number of sentences of ``nb`` +/-40% (with a minimum of 1) :param ext_word_list: a list of words you would like to have instead of 'Lorem ipsum'. :rtype: str """ if nb_sentences <= 0: return '' if variable_nb_sentences: nb_sentences = self.randomize_nb_elements(nb_sentences, min=1) para = self.word_connector.join(self.sentences( nb_sentences, ext_word_list=ext_word_list, )) return para
:returns: A single paragraph. For example: 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.' Keyword arguments: :param nb_sentences: around how many sentences the paragraph should contain :param variable_nb_sentences: set to false if you want exactly ``nb`` sentences returned, otherwise the result may include a number of sentences of ``nb`` +/-40% (with a minimum of 1) :param ext_word_list: a list of words you would like to have instead of 'Lorem ipsum'. :rtype: str
def datetime_from_iso_format(string): """ Return a datetime object from an iso 8601 representation. Return None if string is non conforming. """ match = DATE_ISO_REGEX.match(string) if match: date = datetime.datetime(year=int(match.group(DATE_ISO_YEAR_GRP)), month=int(match.group(DATE_ISO_MONTH_GRP)), day=int(match.group(DATE_ISO_DAY_GRP)), hour=int(match.group(DATE_ISO_HOUR_GRP)), second=int(match.group(DATE_ISO_SEC_GRP)), minute=int(match.group(DATE_ISO_MIN_GRP))) return date else: return None
Return a datetime object from an iso 8601 representation. Return None if string is non conforming.
def next_frame_glow_hparams(): """Hparams for next_frame_glow.""" hparams = glow.glow_hparams() # Possible modes are conditional and unconditional hparams.add_hparam("gen_mode", "conditional") hparams.add_hparam("learn_top_scale", False) hparams.add_hparam("condition_all_levels", True) # For each video, substitutes "num_input_frames + num_output_frames" with a # randomly sampled patch of length "num_train_frames" during training. # -1 indicates that the entire video is used for training. hparams.add_hparam("num_train_frames", -1) # The following are hparams that model the latent transitions. # Encoder that maps the latents to a Gaussian distribution. # This function is used to model the prior over z_{t}. Can be, # Pointwise -> point-wise multiplication of z_{t-1}. # conv_net -> one-layer convolution over z_{t-1} .. z_{t - num_cond_latents} # conv3d_net or conv_lstm hparams.add_hparam("latent_dist_encoder", "conv_net") # Number of latents used in the encoder above. hparams.add_hparam("num_cond_latents", 1) hparams.add_hparam("latent_architecture", "glow_resnet") hparams.add_hparam("latent_apply_dilations", False) hparams.add_hparam("latent_dilation_rates", [1, 3]) # Use latent skip connections hparams.add_hparam("model_input", False) hparams.add_hparam("cond_first_frame", False) hparams.add_hparam("latent_skip", True) hparams.add_hparam("latent_encoder_depth", 2) hparams.add_hparam("latent_encoder_width", 512) hparams.add_hparam("latent_dropout", 0.0) hparams.add_hparam("latent_pre_output_channels", 512) hparams.add_hparam("latent_activation", "relu") hparams.add_hparam("latent_noise", 0.0) # Pretrains the glow encoder for "pretrain_steps" number of steps. # By default, don't pretrain and learn end-to-end hparams.add_hparam("pretrain_steps", -1) hparams.bottom = { "inputs": modalities.video_raw_bottom, "targets": modalities.video_raw_targets_bottom, } hparams.loss = { "targets": modalities.video_l1_raw_loss, } hparams.top = { "targets": modalities.video_raw_top, } hparams.init_batch_size = 256 hparams.batch_size = 32 # Possible options: are prev_frame, single_conv and normal hparams.top_prior = "single_conv" return hparams
Hparams for next_frame_glow.
def __build_libxml2(target, source, env): """ General XSLT builder (HTML/FO), using the libxml2 module. """ xsl_style = env.subst('$DOCBOOK_XSL') styledoc = libxml2.parseFile(xsl_style) style = libxslt.parseStylesheetDoc(styledoc) doc = libxml2.readFile(str(source[0]),None,libxml2.XML_PARSE_NOENT) # Support for additional parameters parampass = {} if parampass: result = style.applyStylesheet(doc, parampass) else: result = style.applyStylesheet(doc, None) style.saveResultToFilename(str(target[0]), result, 0) style.freeStylesheet() doc.freeDoc() result.freeDoc() return None
General XSLT builder (HTML/FO), using the libxml2 module.
def getComment(self, repo_user, repo_name, comment_id): """ GET /repos/:owner/:repo/pull/comments/:number :param comment_id: The review comment's ID. """ return self.api.makeRequest( ['repos', repo_user, repo_name, 'pulls', 'comments', str(comment_id)])
GET /repos/:owner/:repo/pull/comments/:number :param comment_id: The review comment's ID.
def transform(source): '''Used to convert the source code, making use of known transformers. "transformers" are modules which must contain a function transform_source(source) which returns a tranformed source. Some transformers (for example, those found in the standard library module lib2to3) cannot cope with non-standard syntax; as a result, they may fail during a first attempt. We keep track of all failing transformers and keep retrying them until either they all succeeded or a fixed set of them fails twice in a row. ''' source = extract_transformers_from_source(source) # Some transformer fail when multiple non-Python constructs # are present. So, we loop multiple times keeping track of # which transformations have been unsuccessfully performed. not_done = transformers while True: failed = {} for name in not_done: tr_module = import_transformer(name) try: source = tr_module.transform_source(source) except Exception as e: failed[name] = tr_module # from traceback import print_exc # print("Unexpected exception in transforms.transform", # e.__class__.__name__) # print_exc() if not failed: break # Insanity is doing the same Tting over and overaAgain and # expecting different results ... # If the exact same set of transformations are not performed # twice in a row, there is no point in trying out a third time. if failed == not_done: print("Warning: the following transforms could not be done:") for key in failed: print(key) break not_done = failed # attempt another pass return source
Used to convert the source code, making use of known transformers. "transformers" are modules which must contain a function transform_source(source) which returns a tranformed source. Some transformers (for example, those found in the standard library module lib2to3) cannot cope with non-standard syntax; as a result, they may fail during a first attempt. We keep track of all failing transformers and keep retrying them until either they all succeeded or a fixed set of them fails twice in a row.
def get(self, name, default=None): """ Returns the value of the given variable, or the given default value if the variable is not defined. :type name: string :param name: The name of the variable. :type default: object :param default: The default value. :rtype: object :return: The value of the variable. """ if self.vars is None: return default return self.vars.get(name, default)
Returns the value of the given variable, or the given default value if the variable is not defined. :type name: string :param name: The name of the variable. :type default: object :param default: The default value. :rtype: object :return: The value of the variable.
def _get_url(self, resource, item, sys_id=None): """Takes table and sys_id (if present), and returns a URL :param resource: API resource :param item: API resource item :param sys_id: Record sys_id :return: - url string """ url_str = '%(base_url)s/%(base_path)s/%(resource)s/%(item)s' % ( { 'base_url': self.base_url, 'base_path': self.base_path, 'resource': resource, 'item': item } ) if sys_id: return "%s/%s" % (url_str, sys_id) return url_str
Takes table and sys_id (if present), and returns a URL :param resource: API resource :param item: API resource item :param sys_id: Record sys_id :return: - url string
def get_resource(self, name=None, store=None, workspace=None): ''' returns a single resource object. Will return None if no resource is found. Will raise an error if more than one resource with the same name is found. ''' resources = self.get_resources(names=name, stores=store, workspaces=workspace) return self._return_first_item(resources)
returns a single resource object. Will return None if no resource is found. Will raise an error if more than one resource with the same name is found.
def load_csv(ctx, model, path, header=None, header_exclude=None, **fmtparams): """Load a CSV from a file path. :param ctx: Anthem context :param model: Odoo model name or model klass from env :param path: absolute or relative path to CSV file. If a relative path is given you must provide a value for `ODOO_DATA_PATH` in your environment or set `--odoo-data-path` option. :param header: whitelist of CSV columns to load :param header_exclude: blacklist of CSV columns to not load :param fmtparams: keyword params for `csv_unireader` Usage example:: from pkg_resources import Requirement, resource_string req = Requirement.parse('my-project') load_csv(ctx, ctx.env['res.users'], resource_string(req, 'data/users.csv'), delimiter=',') """ if not os.path.isabs(path): if ctx.options.odoo_data_path: path = os.path.join(ctx.options.odoo_data_path, path) else: raise AnthemError( 'Got a relative path. ' 'Please, provide a value for `ODOO_DATA_PATH` ' 'in your environment or set `--odoo-data-path` option.' ) with open(path, 'rb') as data: load_csv_stream(ctx, model, data, header=header, header_exclude=header_exclude, **fmtparams)
Load a CSV from a file path. :param ctx: Anthem context :param model: Odoo model name or model klass from env :param path: absolute or relative path to CSV file. If a relative path is given you must provide a value for `ODOO_DATA_PATH` in your environment or set `--odoo-data-path` option. :param header: whitelist of CSV columns to load :param header_exclude: blacklist of CSV columns to not load :param fmtparams: keyword params for `csv_unireader` Usage example:: from pkg_resources import Requirement, resource_string req = Requirement.parse('my-project') load_csv(ctx, ctx.env['res.users'], resource_string(req, 'data/users.csv'), delimiter=',')
def Vgg19_simple_api(rgb): """ Build the VGG 19 Model Parameters ----------- rgb : rgb image placeholder [batch, height, width, 3] values scaled [0, 1] """ start_time = time.time() print("build model started") rgb_scaled = rgb * 255.0 # Convert RGB to BGR red, green, blue = tf.split(rgb_scaled, 3, 3) if red.get_shape().as_list()[1:] != [224, 224, 1]: raise Exception("image size unmatch") if green.get_shape().as_list()[1:] != [224, 224, 1]: raise Exception("image size unmatch") if blue.get_shape().as_list()[1:] != [224, 224, 1]: raise Exception("image size unmatch") bgr = tf.concat([ blue - VGG_MEAN[0], green - VGG_MEAN[1], red - VGG_MEAN[2], ], axis=3) if bgr.get_shape().as_list()[1:] != [224, 224, 3]: raise Exception("image size unmatch") # input layer net_in = InputLayer(bgr, name='input') # conv1 net = Conv2d(net_in, 64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv1_1') net = Conv2d(net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv1_2') net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1') # conv2 net = Conv2d(net, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv2_1') net = Conv2d(net, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv2_2') net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2') # conv3 net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_1') net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_2') net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_3') net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_4') net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool3') # conv4 net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_1') net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_2') net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_3') net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_4') net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool4') # conv5 net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_1') net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_2') net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_3') net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_4') net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool5') # fc 6~8 net = FlattenLayer(net, name='flatten') net = DenseLayer(net, n_units=4096, act=tf.nn.relu, name='fc6') net = DenseLayer(net, n_units=4096, act=tf.nn.relu, name='fc7') net = DenseLayer(net, n_units=1000, act=None, name='fc8') print("build model finished: %fs" % (time.time() - start_time)) return net
Build the VGG 19 Model Parameters ----------- rgb : rgb image placeholder [batch, height, width, 3] values scaled [0, 1]
def add_price_entity(self, price: dal.Price): """ Adds the price """ from decimal import Decimal # check if the price already exists in db. repo = self.get_price_repository() existing = ( repo.query .filter(dal.Price.namespace == price.namespace) .filter(dal.Price.symbol == price.symbol) .filter(dal.Price.date == price.date) .filter(dal.Price.time == price.time) .first() ) if existing: # Update existing price. new_value = Decimal(price.value) / Decimal(price.denom) self.logger.info(f"Exists: {price}") if price.currency != existing.currency: raise ValueError( f"The currency is different for price {price}!") if existing.value != price.value: existing.value = price.value self.logger.info(f"Updating to {new_value}.") if existing.denom != price.denom: existing.denom = price.denom else: # Insert new price self.session.add(price) self.logger.info(f"Added {price}")
Adds the price
def update_payload(self, fields=None): """Wrap submitted data within an extra dict.""" payload = super(ProvisioningTemplate, self).update_payload(fields) if 'template_combinations' in payload: payload['template_combinations_attributes'] = payload.pop( 'template_combinations') return {u'provisioning_template': payload}
Wrap submitted data within an extra dict.
def insert_sections_some(ol,*secs,**kwargs): ''' ol = initRange(0,20,1) ol loc = 6 rslt = insert_sections_some(ol,['a','a','a'],['c','c','c','c'],index=loc) rslt #### ''' if('mode' in kwargs): mode = kwargs["mode"] else: mode = "new" loc = kwargs['index'] secs = list(secs) secs = [concat(*secs)] locs = [loc] return(insert_sections_many(ol,secs,locs,mode=mode))
ol = initRange(0,20,1) ol loc = 6 rslt = insert_sections_some(ol,['a','a','a'],['c','c','c','c'],index=loc) rslt ####
def rmswidth(self, floor=0): """Calculate :ref:`pysynphot-formula-rmswidth`. Parameters ---------- floor : float Throughput values equal or below this threshold are not included in the calculation. By default (0), all points are included. Returns ------- ans : float RMS band width. """ mywaveunits = self.waveunits.name self.convert('angstroms') wave = self.wave thru = self.throughput self.convert(mywaveunits) if floor != 0: idx = N.where(thru >= floor) wave = wave[idx] thru = thru[idx] integrand = (wave-self.avgwave())**2 * thru num = self.trapezoidIntegration(wave, integrand) den = self.trapezoidIntegration(wave, thru) if 0.0 in (num, den): return 0.0 else: ans = math.sqrt(num/den) return ans
Calculate :ref:`pysynphot-formula-rmswidth`. Parameters ---------- floor : float Throughput values equal or below this threshold are not included in the calculation. By default (0), all points are included. Returns ------- ans : float RMS band width.
def collapse( self, direction ): """ Collapses this splitter handle before or after other widgets based on \ the inputed CollapseDirection. :param direction | <XSplitterHandle.CollapseDirection> :return <bool> | success """ if ( self.isCollapsed() ): return False splitter = self.parent() if ( not splitter ): return False sizes = splitter.sizes() handles = [splitter.handle(i) for i in range(len(sizes))] index = handles.index(self) self.markCollapsed(direction, sizes) # determine the sizes to use based on the direction if ( direction == XSplitterHandle.CollapseDirection.Before ): sizes = [0 for i in range(i)] + sizes[i+1:] else: sizes = sizes[:i] + [0 for i in range(i, len(sizes))] splitter.setSizes(sizes) return True
Collapses this splitter handle before or after other widgets based on \ the inputed CollapseDirection. :param direction | <XSplitterHandle.CollapseDirection> :return <bool> | success
def tail( self, line_prefix=None, callback=None, output_callback=None, stop_callback=lambda x: False, timeout=None ): """ This function takes control of an SSH channel and displays line by line of output as \n is recieved. This function is specifically made for tail-like commands. :param line_prefix: Text to append to the left of each line of output. This is especially useful if you are using my MultiSSH class to run tail commands over multiple servers. :param callback: You may optionally supply a callback function which takes two paramaters. The first is the line prefix and the second is current line of output. The callback should return the string that is to be displayed (including the \n character). This allows users to grep the output or manipulate it as required. :param output_callback: A function used to print ssh output. Printed to stdout by default. A user-defined logger may be passed like output_callback=lambda m: mylog.debug(m) :param stop_callback: A function usesd to stop the tail, when function retruns True tail will stop, by default stop_callback=lambda x: False :param timeout: how much time to wait for data, default to None which mean almost forever. """ output_callback = output_callback if output_callback else self.output_callback # Set the channel timeout to the maximum integer the server allows, # setting this to None breaks the KeyboardInterrupt exception and # won't allow us to Ctrl+C out of teh script timeout = timeout if timeout else 2 ** (struct.Struct(str('i')).size * 8 - 1) - 1 self.channel.settimeout(timeout) # Create an empty line buffer and a line counter current_line = b'' line_counter = 0 line_feed_byte = '\n'.encode(self.encoding) # Loop forever, Ctrl+C (KeyboardInterrupt) is used to break the tail while True: # Read the output one byte at a time so we can detect \n correctly buffer = self.channel.recv(1) # If we have an empty buffer, then the SSH session has been closed if len(buffer) == 0: break # Add the currently read buffer to the current line output current_line += buffer # Display the last read line in realtime when we reach a \n # character if buffer == line_feed_byte: current_line_decoded = current_line.decode(self.encoding) if line_counter: if callback: output_callback(callback(line_prefix, current_line_decoded)) else: if line_prefix: output_callback(line_prefix) output_callback(current_line_decoded) if stop_callback(current_line_decoded): break line_counter += 1 current_line = b''
This function takes control of an SSH channel and displays line by line of output as \n is recieved. This function is specifically made for tail-like commands. :param line_prefix: Text to append to the left of each line of output. This is especially useful if you are using my MultiSSH class to run tail commands over multiple servers. :param callback: You may optionally supply a callback function which takes two paramaters. The first is the line prefix and the second is current line of output. The callback should return the string that is to be displayed (including the \n character). This allows users to grep the output or manipulate it as required. :param output_callback: A function used to print ssh output. Printed to stdout by default. A user-defined logger may be passed like output_callback=lambda m: mylog.debug(m) :param stop_callback: A function usesd to stop the tail, when function retruns True tail will stop, by default stop_callback=lambda x: False :param timeout: how much time to wait for data, default to None which mean almost forever.
def setParametersFromFile(dna, filename, parameters=None, bp=None): """Read a specific parameter from the do_x3dna output file. It automatically load the input parameter from a file to dna object or HDF5 file. It automatically decides from input parameter, what will be format of input file. Parameters ---------- dna : :class:`DNA` Input :class:`DNA` instance. filename : str Input filename. This file should be output from do_x3dna. parameter : str, list, None Name of parameter. For details about accepted keywords, see ``parameter`` in the method :meth:`DNA.get_parameters`. Note that parameter that are calculated from do_x3dna cannot be used here. In case of `Ǹone`, parameters name will be automatically determine from the input file. bp : list List containing lower and higher limit of base-pair/step range. * This list should not contain more than two number. * First number should be less than second number. Example for base-pairs/steps 4 to 15: ``bp = [4,15] # step_range = True`` If ``None``, all base-pairs/steps will be considered. """ gotParameterList = False param_type = None # In case of none try to determine from file if parameters is None: parameters = checkParametersInputFile(filename) if parameters is None: raise AssertionError(" Cannot determine the parameters name from file {0}.".format(filename)) if isinstance(parameters, list) or isinstance(parameters, np.ndarray): gotParameterList = True parameter = list(parameters) param_type = getParameterType(parameter[0]) else: param_type = getParameterType(parameters) if bp is None: if param_type == 'bps': bp = [dna.startBP, dna.num_step] else: bp = [dna.startBP, dna.num_bp] if len(bp) == 1: bp_range = False else: bp_range = True if not gotParameterList: tempParamName = parameters inputParameter = [ parameters ] else: tempParamName = parameters[0] inputParameter = parameter sys.stdout.write('\nLoading parameters: {0}'.format(inputParameter)) success = False if tempParamName in basePairParameters: dna.set_base_pair_parameters(filename, bp, parameters=inputParameter, bp_range=bp_range) success = True if tempParamName in baseStepParameters: dna.set_base_step_parameters(filename, bp, parameters=inputParameter, step_range=bp_range, helical=False) success = True if tempParamName in helicalBaseStepParameters: dna.set_base_step_parameters(filename, bp, parameters=inputParameter, step_range=bp_range, helical=True) success = True if tempParamName in groovesParameters: dna.set_major_minor_groove(filename, bp, parameters=inputParameter, step_range=bp_range) success = True if tempParamName in backboneDihedrals: dna.set_backbone_dihedrals(filename, bp, parameters=inputParameter, bp_range=bp_range) success = True if tempParamName in helicalRadiusParameters: dna.set_helical_radius(filename, bp, full=True, bp_range=bp_range) success = True if tempParamName in helicalAxisParameters: if len(bp) == 1: raise AssertionError("Axis cannot be read for a single base-step.\n Use a segment spanned over several basepairs.") dna.set_helical_axis(filename, step_range=True, step=bp) success = True if not success: raise ValueError ('Not able to load these parameters: {0}... '.format(parameter))
Read a specific parameter from the do_x3dna output file. It automatically load the input parameter from a file to dna object or HDF5 file. It automatically decides from input parameter, what will be format of input file. Parameters ---------- dna : :class:`DNA` Input :class:`DNA` instance. filename : str Input filename. This file should be output from do_x3dna. parameter : str, list, None Name of parameter. For details about accepted keywords, see ``parameter`` in the method :meth:`DNA.get_parameters`. Note that parameter that are calculated from do_x3dna cannot be used here. In case of `Ǹone`, parameters name will be automatically determine from the input file. bp : list List containing lower and higher limit of base-pair/step range. * This list should not contain more than two number. * First number should be less than second number. Example for base-pairs/steps 4 to 15: ``bp = [4,15] # step_range = True`` If ``None``, all base-pairs/steps will be considered.
def load_project_definition(path: str) -> dict: """ Load the cauldron.json project definition file for the given path. The path can be either a source path to the cauldron.json file or the source directory where a cauldron.json file resides. :param path: The source path or directory where the definition file will be loaded """ source_path = get_project_source_path(path) if not os.path.exists(source_path): raise FileNotFoundError('Missing project file: {}'.format(source_path)) with open(source_path, 'r') as f: out = json.load(f) project_folder = os.path.split(os.path.dirname(source_path))[-1] if 'id' not in out or not out['id']: out['id'] = project_folder return out
Load the cauldron.json project definition file for the given path. The path can be either a source path to the cauldron.json file or the source directory where a cauldron.json file resides. :param path: The source path or directory where the definition file will be loaded
def reply_message(self, message_url, body): """回复某条站内消息 :param message_url: 该条消息的页面 URL :param body: 内容(不能超过 1024 个字符) """ id = re.findall(r'(\d+)/?$', message_url)[0] api = 'http://www.shanbay.com/api/v1/message/%s/reply/' url = api % id data = { 'body': body } response = self.request(url, 'post', data=data) return response.json()['status_code'] == 0
回复某条站内消息 :param message_url: 该条消息的页面 URL :param body: 内容(不能超过 1024 个字符)
def blockvisit(self, nodes, frame): """Visit a list of nodes as block in a frame. If the current frame is no buffer a dummy ``if 0: yield None`` is written automatically unless the force_generator parameter is set to False. """ if frame.buffer is None: self.writeline('if 0: yield None') else: self.writeline('pass') try: for node in nodes: self.visit(node, frame) except CompilerExit: pass
Visit a list of nodes as block in a frame. If the current frame is no buffer a dummy ``if 0: yield None`` is written automatically unless the force_generator parameter is set to False.
def _check_pillar(kwargs, pillar=None): ''' Check the pillar for errors, refuse to run the state if there are errors in the pillar and return the pillar errors ''' if kwargs.get('force'): return True pillar_dict = pillar if pillar is not None else __pillar__ if '_errors' in pillar_dict: return False return True
Check the pillar for errors, refuse to run the state if there are errors in the pillar and return the pillar errors
def _generate_create_dict(self, hostname=None, domain=None, flavor=None, router=None, datacenter=None, hourly=True): """Translates args into a dictionary for creating a dedicated host.""" package = self._get_package() item = self._get_item(package, flavor) location = self._get_location(package['regions'], datacenter) price = self._get_price(item) routers = self._get_backend_router( location['location']['locationPackageDetails'], item) router = self._get_default_router(routers, router) hardware = { 'hostname': hostname, 'domain': domain, 'primaryBackendNetworkComponent': { 'router': { 'id': router } } } complex_type = "SoftLayer_Container_Product_Order_Virtual_DedicatedHost" order = { "complexType": complex_type, "quantity": 1, 'location': location['keyname'], 'packageId': package['id'], 'prices': [{'id': price}], 'hardware': [hardware], 'useHourlyPricing': hourly, } return order
Translates args into a dictionary for creating a dedicated host.
def get_gender(data): """Retrieve gender from metadata, codified as male/female/unknown. """ g = str(dd.get_gender(data)) if g and str(g).lower() in ["male", "m", "1"]: return "male" elif g and str(g).lower() in ["female", "f", "2"]: return "female" else: return "unknown"
Retrieve gender from metadata, codified as male/female/unknown.
def write_yum_repo(content, filename='ceph.repo'): """add yum repo file in /etc/yum.repos.d/""" repo_path = os.path.join('/etc/yum.repos.d', filename) if not isinstance(content, str): content = content.decode('utf-8') write_file(repo_path, content.encode('utf-8'))
add yum repo file in /etc/yum.repos.d/
def task_add(self, description, tags=None, **kw): """ Add a new task. Takes any of the keywords allowed by taskwarrior like proj or prior. """ task = self._stub_task(description, tags, **kw) # Check if there are annotations, if so remove them from the # task and add them after we've added the task. annotations = self._extract_annotations_from_task(task) # With older versions of taskwarrior, you can specify whatever uuid you # want when adding a task. if self.get_version() < LooseVersion('2.4'): task['uuid'] = str(uuid.uuid4()) elif 'uuid' in task: del task['uuid'] if self._marshal: args = taskw.utils.encode_task_experimental(task.serialized()) else: args = taskw.utils.encode_task_experimental(task) stdout, stderr = self._execute('add', *args) # However, in 2.4 and later, you cannot specify whatever uuid you want # when adding a task. Instead, you have to specify rc.verbose=new-uuid # and then parse the assigned uuid out from stdout. if self.get_version() >= LooseVersion('2.4'): task['uuid'] = stdout.strip().split()[-1].strip('.') id, added_task = self.get_task(uuid=task['uuid']) # Check if 'uuid' is in the task we just added. if not 'uuid' in added_task: raise KeyError( 'Error encountered while creating task;' 'STDOUT: %s; STDERR: %s' % ( stdout, stderr, ) ) if annotations and 'uuid' in added_task: for annotation in annotations: self.task_annotate(added_task, annotation) id, added_task = self.get_task(uuid=added_task[six.u('uuid')]) return added_task
Add a new task. Takes any of the keywords allowed by taskwarrior like proj or prior.
def collect_results(): """Runs all platforms/backends/benchmarks and returns as list of BenchmarkResults, sorted by benchmark and time taken. """ results = [] for exe, backendname in EXE_BACKEND_MATRIX: results.extend(benchmark_process_and_backend(exe, backendname)) results.extend(benchmark_go()) results.sort( key=lambda br: (br.benchmark, float(br.time), br.platform, br.backend)) return results
Runs all platforms/backends/benchmarks and returns as list of BenchmarkResults, sorted by benchmark and time taken.
def phases_with(self, **kwargs) -> [PhaseOutput]: """ Filters phases. If no arguments are passed all phases are returned. Arguments must be key value pairs, with phase, data or pipeline as the key. Parameters ---------- kwargs Filters, e.g. pipeline=pipeline1 """ return [phase for phase in self.phases if all([getattr(phase, key) == value for key, value in kwargs.items()])]
Filters phases. If no arguments are passed all phases are returned. Arguments must be key value pairs, with phase, data or pipeline as the key. Parameters ---------- kwargs Filters, e.g. pipeline=pipeline1