text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def send_group_image(self, sender, receiver, media_id): """ 发送群聊图片消息 :param sender: 发送人 :param receiver: 会话 ID :param media_id: 图片媒体文件id,可以调用上传素材文件接口获取 :return: 返回的 JSON 数据包 """ return self.send_image(sender, 'group', receiver, media_id)
[ "def", "send_group_image", "(", "self", ",", "sender", ",", "receiver", ",", "media_id", ")", ":", "return", "self", ".", "send_image", "(", "sender", ",", "'group'", ",", "receiver", ",", "media_id", ")" ]
29.3
14.9
def dicom_to_nifti(dicom_input, output_file): """ This function will convert an anatomical dicom series to a nifti Examples: See unit test :param output_file: filepath to the output nifti :param dicom_input: directory with the dicom files for a single scan, or list of read in dicoms """ if len(dicom_input) <= 0: raise ConversionError('NO_DICOM_FILES_FOUND') # remove duplicate slices based on position and data dicom_input = _remove_duplicate_slices(dicom_input) # remove localizers based on image type dicom_input = _remove_localizers_by_imagetype(dicom_input) if settings.validate_slicecount: # remove_localizers based on image orientation (only valid if slicecount is validated) dicom_input = _remove_localizers_by_orientation(dicom_input) # validate all the dicom files for correct orientations common.validate_slicecount(dicom_input) if settings.validate_orientation: # validate that all slices have the same orientation common.validate_orientation(dicom_input) if settings.validate_orthogonal: # validate that we have an orthogonal image (to detect gantry tilting etc) common.validate_orthogonal(dicom_input) # sort the dicoms dicom_input = common.sort_dicoms(dicom_input) # validate slice increment inconsistent slice_increment_inconsistent = False if settings.validate_slice_increment: # validate that all slices have a consistent slice increment common.validate_slice_increment(dicom_input) elif common.is_slice_increment_inconsistent(dicom_input): slice_increment_inconsistent = True # if inconsistent increment and we allow resampling then do the resampling based conversion to maintain the correct geometric shape if slice_increment_inconsistent and settings.resample: nii_image, max_slice_increment = _convert_slice_incement_inconsistencies(dicom_input) # do the normal conversion else: # Get data; originally z,y,x, transposed to x,y,z data = common.get_volume_pixeldata(dicom_input) affine, max_slice_increment = common.create_affine(dicom_input) # Convert to nifti nii_image = nibabel.Nifti1Image(data, affine) # Set TR and TE if available if Tag(0x0018, 0x0081) in dicom_input[0] and Tag(0x0018, 0x0081) in dicom_input[0]: common.set_tr_te(nii_image, float(dicom_input[0].RepetitionTime), float(dicom_input[0].EchoTime)) # Save to disk if output_file is not None: logger.info('Saving nifti to disk %s' % output_file) nii_image.to_filename(output_file) return {'NII_FILE': output_file, 'NII': nii_image, 'MAX_SLICE_INCREMENT': max_slice_increment}
[ "def", "dicom_to_nifti", "(", "dicom_input", ",", "output_file", ")", ":", "if", "len", "(", "dicom_input", ")", "<=", "0", ":", "raise", "ConversionError", "(", "'NO_DICOM_FILES_FOUND'", ")", "# remove duplicate slices based on position and data", "dicom_input", "=", ...
41.227273
22.924242
def prop_budget(self, budget): """ Set limit on the number of propagations. """ if self.glucose: pysolvers.glucose3_pbudget(self.glucose, budget)
[ "def", "prop_budget", "(", "self", ",", "budget", ")", ":", "if", "self", ".", "glucose", ":", "pysolvers", ".", "glucose3_pbudget", "(", "self", ".", "glucose", ",", "budget", ")" ]
26.857143
14
def updatetext(self): """Recompute textual value based on the text content of the children. Only supported on elements that are a ``TEXTCONTAINER``""" if self.TEXTCONTAINER: s = "" for child in self: if isinstance(child, AbstractElement): child.updatetext() s += child.text() elif isstring(child): s += child self.data = [s]
[ "def", "updatetext", "(", "self", ")", ":", "if", "self", ".", "TEXTCONTAINER", ":", "s", "=", "\"\"", "for", "child", "in", "self", ":", "if", "isinstance", "(", "child", ",", "AbstractElement", ")", ":", "child", ".", "updatetext", "(", ")", "s", "...
41.636364
9.636364
def rename(self, dn: str, new_rdn: str, new_base_dn: Optional[str] = None) -> None: """ rename a dn in the ldap database; see ldap module. doesn't return a result if transactions enabled. """ raise NotImplementedError()
[ "def", "rename", "(", "self", ",", "dn", ":", "str", ",", "new_rdn", ":", "str", ",", "new_base_dn", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "None", ":", "raise", "NotImplementedError", "(", ")" ]
42.333333
14
def configureLastWill(self, topic, payload, QoS): """ **Description** Used to configure the last will topic, payload and QoS of the client. Should be called before connect. This is a public facing API inherited by application level public clients. **Syntax** .. code:: python myShadowClient.configureLastWill("last/Will/Topic", "lastWillPayload", 0) myJobsClient.configureLastWill("last/Will/Topic", "lastWillPayload", 0) **Parameters** *topic* - Topic name that last will publishes to. *payload* - Payload to publish for last will. *QoS* - Quality of Service. Could be 0 or 1. **Returns** None """ # AWSIoTMQTTClient.configureLastWill(srcTopic, srcPayload, srcQos) self._AWSIoTMQTTClient.configureLastWill(topic, payload, QoS)
[ "def", "configureLastWill", "(", "self", ",", "topic", ",", "payload", ",", "QoS", ")", ":", "# AWSIoTMQTTClient.configureLastWill(srcTopic, srcPayload, srcQos)", "self", ".", "_AWSIoTMQTTClient", ".", "configureLastWill", "(", "topic", ",", "payload", ",", "QoS", ")"...
29.310345
30.068966
def getRolesForUser(self, username, filter=None, maxCount=None): """ This operation returns a list of role names that have been assigned to a particular user account. Inputs: username - name of the user for whom the returned roles filter - filter to be applied to the resultant role set. maxCount - maximum number of results to return for this query """ uURL = self._url + "/roles/getRolesForUser" params = { "f" : "json", "username" : username } if filter is not None: params['filter'] = filter if maxCount is not None: params['maxCount'] = maxCount return self._post(url=uURL, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "getRolesForUser", "(", "self", ",", "username", ",", "filter", "=", "None", ",", "maxCount", "=", "None", ")", ":", "uURL", "=", "self", ".", "_url", "+", "\"/roles/getRolesForUser\"", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"username\...
41.086957
17.347826
def resetPassword(self, userId): ''' Changes a user's password to a system-generated value. ''' self._setHeaders('resetPassword') return self._sforce.service.resetPassword(userId)
[ "def", "resetPassword", "(", "self", ",", "userId", ")", ":", "self", ".", "_setHeaders", "(", "'resetPassword'", ")", "return", "self", ".", "_sforce", ".", "service", ".", "resetPassword", "(", "userId", ")" ]
32.333333
18
def create_widget(self): """ Create the underlying widget. """ d = self.declaration self.widget = CoordinatorLayout(self.get_context(), None, d.style)
[ "def", "create_widget", "(", "self", ")", ":", "d", "=", "self", ".", "declaration", "self", ".", "widget", "=", "CoordinatorLayout", "(", "self", ".", "get_context", "(", ")", ",", "None", ",", "d", ".", "style", ")" ]
29.666667
17
def _quantize_params(qsym, params, th_dict): """Given a quantized symbol and a dict of params that have not been quantized, generate quantized params. Currently only supports quantizing the arg_params with names of `weight` or `bias`, not aux_params. If `qsym` contains symbols that are excluded from being quantized, their corresponding params will not be quantized, but saved together with quantized params of the symbols that have been quantized. Parameters ---------- qsym : Symbol Quantized symbol from FP32 symbol. params : dict of str->NDArray th_dict: dict of min/max pairs of layers' output """ inputs_name = qsym.list_arguments() quantized_params = {} for name in inputs_name: if name.endswith(('weight_quantize', 'bias_quantize')): original_name = name[:-len('_quantize')] param = params[original_name] val, vmin, vmax = ndarray.contrib.quantize(data=param, min_range=ndarray.min(param), max_range=ndarray.max(param), out_type='int8') quantized_params[name] = val quantized_params[name+'_min'] = vmin quantized_params[name+'_max'] = vmax elif name in params: quantized_params[name] = params[name] elif name.endswith(('_min')): output = name[: - len('_min')] if output in th_dict: quantized_params[name] = ndarray.array([th_dict[output][0]]) elif name.endswith(('_max')): output = name[: - len('_min')] if output in th_dict: quantized_params[name] = ndarray.array([th_dict[output][1]]) return quantized_params
[ "def", "_quantize_params", "(", "qsym", ",", "params", ",", "th_dict", ")", ":", "inputs_name", "=", "qsym", ".", "list_arguments", "(", ")", "quantized_params", "=", "{", "}", "for", "name", "in", "inputs_name", ":", "if", "name", ".", "endswith", "(", ...
46.538462
17.128205
def sample_to(self, count, skip_header_rows, strategy, target): """Sample rows from GCS or local file and save results to target file. Args: count: number of rows to sample. If strategy is "BIGQUERY", it is used as approximate number. skip_header_rows: whether to skip first row when reading from source. strategy: can be "LOCAL" or "BIGQUERY". If local, the sampling happens in local memory, and number of resulting rows matches count. If BigQuery, sampling is done with BigQuery in cloud, and the number of resulting rows will be approximated to count. target: The target file path, can be GCS or local path. Raises: Exception if strategy is "BIGQUERY" but source is not a GCS path. """ # TODO(qimingj) Add unit test # Read data from source into DataFrame. if sys.version_info.major > 2: xrange = range # for python 3 compatibility if strategy == 'BIGQUERY': import datalab.bigquery as bq if not self.path.startswith('gs://'): raise Exception('Cannot use BIGQUERY if data is not in GCS') federated_table = self._create_federated_table(skip_header_rows) row_count = self._get_gcs_csv_row_count(federated_table) query = bq.Query('SELECT * from data', data_sources={'data': federated_table}) sampling = bq.Sampling.random(count * 100 / float(row_count)) sample = query.sample(sampling=sampling) df = sample.to_dataframe() elif strategy == 'LOCAL': local_file = self.path if self.path.startswith('gs://'): local_file = tempfile.mktemp() datalab.utils.gcs_copy_file(self.path, local_file) with open(local_file) as f: row_count = sum(1 for line in f) start_row = 1 if skip_header_rows is True else 0 skip_count = row_count - count - 1 if skip_header_rows is True else row_count - count skip = sorted(random.sample(xrange(start_row, row_count), skip_count)) header_row = 0 if skip_header_rows is True else None df = pd.read_csv(local_file, skiprows=skip, header=header_row, delimiter=self._delimiter) if self.path.startswith('gs://'): os.remove(local_file) else: raise Exception('strategy must be BIGQUERY or LOCAL') # Write to target. if target.startswith('gs://'): with tempfile.NamedTemporaryFile() as f: df.to_csv(f, header=False, index=False) f.flush() datalab.utils.gcs_copy_file(f.name, target) else: with open(target, 'w') as f: df.to_csv(f, header=False, index=False, sep=str(self._delimiter))
[ "def", "sample_to", "(", "self", ",", "count", ",", "skip_header_rows", ",", "strategy", ",", "target", ")", ":", "# TODO(qimingj) Add unit test", "# Read data from source into DataFrame.", "if", "sys", ".", "version_info", ".", "major", ">", "2", ":", "xrange", "...
46.490909
21.018182
def get_input_info_dict(self, signature=None): """Describes the inputs required by a signature. Args: signature: A string with the signature to get inputs information for. If None, the default signature is used if defined. Returns: The result of ModuleSpec.get_input_info_dict() for the given signature, and the graph variant selected by `tags` when this Module was initialized. Raises: KeyError: if there is no such signature. """ return self._spec.get_input_info_dict(signature=signature, tags=self._tags)
[ "def", "get_input_info_dict", "(", "self", ",", "signature", "=", "None", ")", ":", "return", "self", ".", "_spec", ".", "get_input_info_dict", "(", "signature", "=", "signature", ",", "tags", "=", "self", ".", "_tags", ")" ]
36.8
25.933333
def trim_docstring(docstring): """Removes indentation from triple-quoted strings. This is the function specified in PEP 257 to handle docstrings: https://www.python.org/dev/peps/pep-0257/. Args: docstring: str, a python docstring. Returns: str, docstring with indentation removed. """ if not docstring: return '' # If you've got a line longer than this you have other problems... max_indent = 1 << 29 # Convert tabs to spaces (following the normal Python rules) # and split into a list of lines: lines = docstring.expandtabs().splitlines() # Determine minimum indentation (first line doesn't count): indent = max_indent for line in lines[1:]: stripped = line.lstrip() if stripped: indent = min(indent, len(line) - len(stripped)) # Remove indentation (first line is special): trimmed = [lines[0].strip()] if indent < max_indent: for line in lines[1:]: trimmed.append(line[indent:].rstrip()) # Strip off trailing and leading blank lines: while trimmed and not trimmed[-1]: trimmed.pop() while trimmed and not trimmed[0]: trimmed.pop(0) # Return a single string: return '\n'.join(trimmed)
[ "def", "trim_docstring", "(", "docstring", ")", ":", "if", "not", "docstring", ":", "return", "''", "# If you've got a line longer than this you have other problems...", "max_indent", "=", "1", "<<", "29", "# Convert tabs to spaces (following the normal Python rules)", "# and s...
28.55
17.875
def add_definition_tags(self, tags, project, definition_id): """AddDefinitionTags. [Preview API] Adds multiple tags to a definition. :param [str] tags: The tags to add. :param str project: Project ID or project name :param int definition_id: The ID of the definition. :rtype: [str] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if definition_id is not None: route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int') content = self._serialize.body(tags, '[str]') response = self._send(http_method='POST', location_id='cb894432-134a-4d31-a839-83beceaace4b', version='5.0-preview.2', route_values=route_values, content=content) return self._deserialize('[str]', self._unwrap_collection(response))
[ "def", "add_definition_tags", "(", "self", ",", "tags", ",", "project", ",", "definition_id", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", ...
51.35
17.95
def make_butterworth_b_a(lowcut, highcut, SampleFreq, order=5, btype='band'): """ Generates the b and a coefficients for a butterworth IIR filter. Parameters ---------- lowcut : float frequency of lower bandpass limit highcut : float frequency of higher bandpass limit SampleFreq : float Sample frequency of filter order : int, optional order of IIR filter. Is 5 by default btype : string, optional type of filter to make e.g. (band, low, high) Returns ------- b : ndarray coefficients multiplying the current and past inputs (feedforward coefficients) a : ndarray coefficients multiplying the past outputs (feedback coefficients) """ nyq = 0.5 * SampleFreq low = lowcut / nyq high = highcut / nyq if btype.lower() == 'band': b, a = scipy.signal.butter(order, [low, high], btype = btype) elif btype.lower() == 'low': b, a = scipy.signal.butter(order, low, btype = btype) elif btype.lower() == 'high': b, a = scipy.signal.butter(order, high, btype = btype) else: raise ValueError('Filter type unknown') return b, a
[ "def", "make_butterworth_b_a", "(", "lowcut", ",", "highcut", ",", "SampleFreq", ",", "order", "=", "5", ",", "btype", "=", "'band'", ")", ":", "nyq", "=", "0.5", "*", "SampleFreq", "low", "=", "lowcut", "/", "nyq", "high", "=", "highcut", "/", "nyq", ...
32.111111
19.611111
def make_cdisk_kernel(psf, sigma, npix, cdelt, xpix, ypix, psf_scale_fn=None, normalize=False): """Make a kernel for a PSF-convolved 2D disk. Parameters ---------- psf : `~fermipy.irfs.PSFModel` sigma : float 68% containment radius in degrees. """ sigma /= 0.8246211251235321 dtheta = psf.dtheta egy = psf.energies x = make_pixel_distance(npix, xpix, ypix) x *= cdelt k = np.zeros((len(egy), npix, npix)) for i in range(len(egy)): def fn(t): return psf.eval(i, t, scale_fn=psf_scale_fn) psfc = convolve2d_disk(fn, dtheta, sigma) k[i] = np.interp(np.ravel(x), dtheta, psfc).reshape(x.shape) if normalize: k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2) return k
[ "def", "make_cdisk_kernel", "(", "psf", ",", "sigma", ",", "npix", ",", "cdelt", ",", "xpix", ",", "ypix", ",", "psf_scale_fn", "=", "None", ",", "normalize", "=", "False", ")", ":", "sigma", "/=", "0.8246211251235321", "dtheta", "=", "psf", ".", "dtheta...
25.096774
22.903226
def execute(self): """ Execute the search and return an instance of ``Response`` wrapping all the data. """ if hasattr(self, "_executed"): return self._executed es = connections.get_connection(self._using) if getattr(self, "_full", False) is False: self._executed = ShallowResponse(es.search(index=self._index, doc_type=self._doc_type, body=self.to_dict(), **self._params), callbacks=self._doc_type_map) else: self._executed = FullResponse(es.search(index=self._index, doc_type=self._doc_type, body=self.to_dict(), **self._params)) return self._executed
[ "def", "execute", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "\"_executed\"", ")", ":", "return", "self", ".", "_executed", "es", "=", "connections", ".", "get_connection", "(", "self", ".", "_using", ")", "if", "getattr", "(", "self", "...
42.608696
23.565217
def serialize_html_fragment(el, skip_outer=False): """ Serialize a single lxml element as HTML. The serialized form includes the elements tail. If skip_outer is true, then don't serialize the outermost tag """ assert not isinstance(el, basestring), ( "You should pass in an element, not a string like %r" % el) html = etree.tostring(el, method="html", encoding=_unicode) if skip_outer: # Get rid of the extra starting tag: html = html[html.find('>')+1:] # Get rid of the extra end tag: html = html[:html.rfind('<')] return html.strip() else: return html
[ "def", "serialize_html_fragment", "(", "el", ",", "skip_outer", "=", "False", ")", ":", "assert", "not", "isinstance", "(", "el", ",", "basestring", ")", ",", "(", "\"You should pass in an element, not a string like %r\"", "%", "el", ")", "html", "=", "etree", "...
37
13.705882
def find_first_tag(tags, entity_type, after_index=-1): """Searches tags for entity type after given index Args: tags(list): a list of tags with entity types to be compaired too entity_type entity_type(str): This is he entity type to be looking for in tags after_index(int): the start token must be greaterthan this. Returns: ( tag, v, confidence ): tag(str): is the tag that matched v(str): ? the word that matched? confidence(float): is a mesure of accuacy. 1 is full confidence and 0 is none. """ for tag in tags: for entity in tag.get('entities'): for v, t in entity.get('data'): if t.lower() == entity_type.lower() and tag.get('start_token', 0) > after_index: return tag, v, entity.get('confidence') return None, None, None
[ "def", "find_first_tag", "(", "tags", ",", "entity_type", ",", "after_index", "=", "-", "1", ")", ":", "for", "tag", "in", "tags", ":", "for", "entity", "in", "tag", ".", "get", "(", "'entities'", ")", ":", "for", "v", ",", "t", "in", "entity", "."...
40.904762
22.857143
def createLrrBafPlot(raw_dir, problematic_samples, format, dpi, out_prefix): """Creates the LRR and BAF plot. :param raw_dir: the directory containing the intensities. :param problematic_samples: the file containing the problematic samples. :param format: the format of the plot. :param dpi: the DPI of the resulting images. :param out_prefix: the prefix of the output file. :type raw_dir: str :type problematic_samples: str :type format: str :type out_prefix: str Creates the LRR (Log R Ratio) and BAF (B Allele Frequency) of the problematic samples using the :py:mod:`pyGenClean.SexCheck.baf_lrr_plot` module. """ # First, we create an output directory dir_name = out_prefix + ".LRR_BAF" if not os.path.isdir(dir_name): os.mkdir(dir_name) # The options baf_lrr_plot_options = ["--problematic-samples", problematic_samples, "--raw-dir", raw_dir, "--format", format, "--dpi", str(dpi), "--out", os.path.join(dir_name, "baf_lrr")] try: baf_lrr_plot.main(baf_lrr_plot_options) except baf_lrr_plot.ProgramError as e: msg = "BAF LRR plot: {}".format(e) raise ProgramError(msg)
[ "def", "createLrrBafPlot", "(", "raw_dir", ",", "problematic_samples", ",", "format", ",", "dpi", ",", "out_prefix", ")", ":", "# First, we create an output directory", "dir_name", "=", "out_prefix", "+", "\".LRR_BAF\"", "if", "not", "os", ".", "path", ".", "isdir...
36.588235
19.558824
def __var_find_to_py_ast( var_name: str, ns_name: str, py_var_ctx: ast.AST ) -> GeneratedPyAST: """Generate Var.find calls for the named symbol.""" return GeneratedPyAST( node=ast.Attribute( value=ast.Call( func=_FIND_VAR_FN_NAME, args=[ ast.Call( func=_NEW_SYM_FN_NAME, args=[ast.Str(var_name)], keywords=[ast.keyword(arg="ns", value=ast.Str(ns_name))], ) ], keywords=[], ), attr="value", ctx=py_var_ctx, ) )
[ "def", "__var_find_to_py_ast", "(", "var_name", ":", "str", ",", "ns_name", ":", "str", ",", "py_var_ctx", ":", "ast", ".", "AST", ")", "->", "GeneratedPyAST", ":", "return", "GeneratedPyAST", "(", "node", "=", "ast", ".", "Attribute", "(", "value", "=", ...
30.714286
16.47619
def parse(md, model, encoding='utf-8', config=None): """ Translate the Versa Markdown syntax into Versa model relationships md -- markdown source text model -- Versa model to take the output relationship encoding -- character encoding (defaults to UTF-8) Returns: The overall base URI (`@base`) specified in the Markdown file, or None >>> from versa.driver import memory >>> from versa.reader.md import from_markdown >>> m = memory.connection() >>> from_markdown(open('test/resource/poetry.md').read(), m) 'http://uche.ogbuji.net/poems/' >>> m.size() 40 >>> next(m.match(None, 'http://uche.ogbuji.net/poems/updated', '2013-10-15')) (I(http://uche.ogbuji.net/poems/1), I(http://uche.ogbuji.net/poems/updated), '2013-10-15', {}) """ #Set up configuration to interpret the conventions for the Markdown config = config or {} #This mapping takes syntactical elements such as the various header levels in Markdown and associates a resource type with the specified resources syntaxtypemap = {} if config.get('autotype-h1'): syntaxtypemap['h1'] = config.get('autotype-h1') if config.get('autotype-h2'): syntaxtypemap['h2'] = config.get('autotype-h2') if config.get('autotype-h3'): syntaxtypemap['h3'] = config.get('autotype-h3') interp_stanza = config.get('interpretations', {}) interpretations = {} def setup_interpretations(interp): #Map the interpretation IRIs to functions to do the data prep for prop, interp_key in interp.items(): if interp_key.startswith('@'): interp_key = iri.absolutize(interp_key[1:], VERSA_BASEIRI) if interp_key in PREP_METHODS: interpretations[prop] = PREP_METHODS[interp_key] else: #just use the identity, i.e. no-op interpretations[prop] = lambda x, **kwargs: x setup_interpretations(interp_stanza) #Prep ID generator, in case needed idg = idgen(None) #Parse the Markdown #Alternately: #from xml.sax.saxutils import escape, unescape #h = markdown.markdown(escape(md.decode(encoding)), output_format='html5') #Note: even using safe_mode this should not be presumed safe from tainted input #h = markdown.markdown(md.decode(encoding), safe_mode='escape', output_format='html5') comments = mkdcomments.CommentsExtension() h = markdown.markdown(md, safe_mode='escape', output_format='html5', extensions=[comments]) #doc = html.markup_fragment(inputsource.text(h.encode('utf-8'))) tb = treebuilder() h = '<html>' + h + '</html>' root = tb.parse(h) #Each section contains one resource description, but the special one named @docheader contains info to help interpret the rest first_h1 = next(select_name(descendants(root), 'h1')) #top_section_fields = itertools.takewhile(lambda x: x.xml_name != 'h1', select_name(following_siblings(first_h1), 'h2')) #Extract header elements. Notice I use an empty element with an empty parent as the default result docheader = next(select_value(select_name(descendants(root), 'h1'), '@docheader'), element('empty', parent=root)) # //h1[.="@docheader"] sections = filter(lambda x: x.xml_value != '@docheader', select_name_pattern(descendants(root), HEADER_PAT)) # //h1[not(.="@docheader")]|h2[not(.="@docheader")]|h3[not(.="@docheader")] def fields(sect): ''' Each section represents a resource and contains a list with its properties This generator parses the list and yields the key value pairs representing the properties Some properties have attributes, expressed in markdown as a nested list. If present these attributes Are yielded as well, else None is yielded ''' #import logging; logging.debug(repr(sect)) #Pull all the list elements until the next header. This accommodates multiple lists in a section sect_body_items = itertools.takewhile(lambda x: HEADER_PAT.match(x.xml_name) is None, select_elements(following_siblings(sect))) #results_until(sect.xml_select('following-sibling::*'), 'self::h1|self::h2|self::h3') #field_list = [ U(li) for ul in sect.xml_select('following-sibling::ul') for li in ul.xml_select('./li') ] field_list = [ li for elem in select_name(sect_body_items, 'ul') for li in select_name(elem, 'li') ] def parse_li(pair): ''' Parse each list item into a property pair ''' if pair.strip(): matched = REL_PAT.match(pair) if not matched: raise ValueError(_('Syntax error in relationship expression: {0}'.format(pair))) #print matched.groups() if matched.group(3): prop = matched.group(3).strip() if matched.group(4): prop = matched.group(4).strip() if matched.group(7): val = matched.group(7).strip() typeindic = RES_VAL elif matched.group(9): val = matched.group(9).strip() typeindic = TEXT_VAL elif matched.group(11): val = matched.group(11).strip() typeindic = TEXT_VAL elif matched.group(12): val = matched.group(12).strip() typeindic = UNKNOWN_VAL else: val = '' typeindic = UNKNOWN_VAL #prop, val = [ part.strip() for part in U(li.xml_select('string(.)')).split(':', 1) ] #import logging; logging.debug(repr((prop, val))) return prop, val, typeindic return None, None, None #Go through each list item for li in field_list: #Is there a nested list, which expresses attributes on a property if list(select_name(li, 'ul')): #main = ''.join([ node.xml_value # for node in itertools.takewhile( # lambda x: x.xml_name != 'ul', select_elements(li) # ) # ]) main = ''.join(itertools.takewhile( lambda x: isinstance(x, text), li.xml_children )) #main = li.xml_select('string(ul/preceding-sibling::node())') prop, val, typeindic = parse_li(main) subfield_list = [ parse_li(sli.xml_value) for e in select_name(li, 'ul') for sli in ( select_name(e, 'li') ) ] subfield_list = [ (p, v, t) for (p, v, t) in subfield_list if p is not None ] #Support a special case for syntax such as in the @iri and @interpretations: stanza of @docheader if val is None: val = '' yield prop, val, typeindic, subfield_list #Just a regular, unadorned property else: prop, val, typeindic = parse_li(li.xml_value) if prop: yield prop, val, typeindic, None iris = {} #Gather the document-level metadata from the @docheader section base = propbase = rtbase = document_iri = default_lang = None for prop, val, typeindic, subfield_list in fields(docheader): #The @iri section is where key IRI prefixes can be set if prop == '@iri': for (k, uri, typeindic) in subfield_list: if k == '@base': base = propbase = rtbase = uri elif k == '@property': propbase = uri elif k == '@resource-type': rtbase = uri else: iris[k] = uri #The @interpretations section is where defaults can be set as to the primitive types of values from the Markdown, based on the relevant property/relationship elif prop == '@interpretations': #Iterate over items from the @docheader/@interpretations section to set up for further parsing interp = {} for k, v, x in subfield_list: interp[I(iri.absolutize(k, propbase))] = v setup_interpretations(interp) #Setting an IRI for this very document being parsed elif prop == '@document': document_iri = val elif prop == '@language': default_lang = val #If we have a resource to which to attach them, just attach all other properties elif document_iri or base: rid = document_iri or base fullprop = I(iri.absolutize(prop, propbase or base)) if fullprop in interpretations: val = interpretations[fullprop](val, rid=rid, fullprop=fullprop, base=base, model=model) if val is not None: model.add(rid, fullprop, val) else: model.add(rid, fullprop, val) #Default IRI prefixes if @iri/@base is set if not propbase: propbase = base if not rtbase: rtbase = base if not document_iri: document_iri = base #Go through the resources expressed in remaining sections for sect in sections: #if U(sect) == '@docheader': continue #Not needed because excluded by ss #The header can take one of 4 forms: "ResourceID" "ResourceID [ResourceType]" "[ResourceType]" or "[]" #The 3rd form is for an anonymous resource with specified type and the 4th an anonymous resource with unspecified type matched = RESOURCE_PAT.match(sect.xml_value) if not matched: raise ValueError(_('Syntax error in resource header: {0}'.format(sect.xml_value))) rid = matched.group(1) rtype = matched.group(3) if rtype: rtype = I(iri.absolutize(rtype, base)) if rid: rid = I(iri.absolutize(rid, base)) if not rid: rid = next(idg) #Resource type might be set by syntax config if not rtype: rtype = syntaxtypemap.get(sect.xml_name) if rtype: model.add(rid, TYPE_REL, rtype) #Add the property for prop, val, typeindic, subfield_list in fields(sect): attrs = {} for (aprop, aval, atype) in subfield_list or (): if atype == RES_VAL: valmatch = URI_ABBR_PAT.match(aval) if valmatch: uri = iris[valmatch.group(1)] attrs[aprop] = URI_ABBR_PAT.sub(uri + '\\2\\3', aval) else: attrs[aprop] = I(iri.absolutize(aval, rtbase)) elif atype == TEXT_VAL: attrs[aprop] = aval elif atype == UNKNOWN_VAL: attrs[aprop] = aval if aprop in interpretations: aval = interpretations[aprop](aval, rid=rid, fullprop=aprop, base=base, model=model) if aval is not None: attrs[aprop] = aval else: attrs[aprop] = aval propmatch = URI_ABBR_PAT.match(prop) if propmatch: uri = iris[propmatch.group(1)] fullprop = URI_ABBR_PAT.sub(uri + '\\2\\3', prop) else: fullprop = I(iri.absolutize(prop, propbase)) if typeindic == RES_VAL: valmatch = URI_ABBR_PAT.match(aval) if valmatch: uri = iris[valmatch.group(1)] val = URI_ABBR_PAT.sub(uri + '\\2\\3', val) else: val = I(iri.absolutize(val, rtbase)) model.add(rid, fullprop, val, attrs) elif typeindic == TEXT_VAL: if '@lang' not in attrs: attrs['@lang'] = default_lang model.add(rid, fullprop, val, attrs) elif typeindic == UNKNOWN_VAL: if fullprop in interpretations: val = interpretations[fullprop](val, rid=rid, fullprop=fullprop, base=base, model=model) if val is not None: model.add(rid, fullprop, val) else: model.add(rid, fullprop, val, attrs) #resinfo = AB_RESOURCE_PAT.match(val) #if resinfo: # val = resinfo.group(1) # valtype = resinfo.group(3) # if not val: val = model.generate_resource() # if valtype: attrs[TYPE_REL] = valtype return document_iri
[ "def", "parse", "(", "md", ",", "model", ",", "encoding", "=", "'utf-8'", ",", "config", "=", "None", ")", ":", "#Set up configuration to interpret the conventions for the Markdown", "config", "=", "config", "or", "{", "}", "#This mapping takes syntactical elements such...
48.273438
23.492188
def lhood(self, trsig, recalc=False, cachefile=None): """Returns likelihood of transit signal Returns sum of ``trsig`` MCMC samples evaluated at ``self.kde``. :param trsig: :class:`vespa.TransitSignal` object. :param recalc: (optional) Whether to recalculate likelihood (if calculation is cached). :param cachefile: (optional) File that holds likelihood calculation cache. """ if not hasattr(self,'kde'): self._make_kde() if cachefile is None: cachefile = self.lhoodcachefile if cachefile is None: cachefile = 'lhoodcache.dat' lhoodcache = _loadcache(cachefile) key = hashcombine(self, trsig) if key in lhoodcache and not recalc: return lhoodcache[key] if self.is_ruled_out: return 0 N = trsig.kde.dataset.shape[1] lh = self.kde(trsig.kde.dataset).sum() / N with open(cachefile, 'a') as fout: fout.write('%i %g\n' % (key, lh)) return lh
[ "def", "lhood", "(", "self", ",", "trsig", ",", "recalc", "=", "False", ",", "cachefile", "=", "None", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'kde'", ")", ":", "self", ".", "_make_kde", "(", ")", "if", "cachefile", "is", "None", ":", ...
27
17.65
def cond_remove_all(ol,**kwargs): ''' from elist.elist import * ol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9] id(ol) def afterCH(ele,ch): cond = (ord(str(ele)) > ord(ch)) return(cond) new = cond_remove_all(ol,cond_func=afterCH,cond_func_args=['B']) ol new id(ol) id(new) #### ol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9] id(ol) rslt = cond_remove_all(ol,cond_func=afterCH,cond_func_args=['B'],mode='original') ol rslt id(ol) id(rslt) ''' cond_func = kwargs['cond_func'] if('cond_func_args' in kwargs): cond_func_args = kwargs['cond_func_args'] else: cond_func_args = [] if('mode' in kwargs): mode = kwargs["mode"] else: mode = "new" new = copy.deepcopy(ol) selected = find_all(new,cond_func,*cond_func_args) selected_indexes = array_map(selected,lambda ele:ele['index']) new = pop_indexes(new,selected_indexes)['list'] if(mode == "new"): return(new) else: ol.clear() ol.extend(new) return(ol)
[ "def", "cond_remove_all", "(", "ol", ",", "*", "*", "kwargs", ")", ":", "cond_func", "=", "kwargs", "[", "'cond_func'", "]", "if", "(", "'cond_func_args'", "in", "kwargs", ")", ":", "cond_func_args", "=", "kwargs", "[", "'cond_func_args'", "]", "else", ":"...
26.209302
21.651163
def _handle_response(self, response): """Returns the given response or raises an APIError for non-2xx responses. :param requests.Response response: HTTP response :returns: requested data :rtype: requests.Response :raises APIError: for non-2xx responses """ if not str(response.status_code).startswith('2'): raise get_api_error(response) return response
[ "def", "_handle_response", "(", "self", ",", "response", ")", ":", "if", "not", "str", "(", "response", ".", "status_code", ")", ".", "startswith", "(", "'2'", ")", ":", "raise", "get_api_error", "(", "response", ")", "return", "response" ]
31.75
13.333333
def distances(self): """The matrix with the all-pairs shortest path lenghts""" from molmod.ext import graphs_floyd_warshall distances = np.zeros((self.num_vertices,)*2, dtype=int) #distances[:] = -1 # set all -1, which is just a very big integer #distances.ravel()[::len(distances)+1] = 0 # set diagonal to zero for i, j in self.edges: # set edges to one distances[i, j] = 1 distances[j, i] = 1 graphs_floyd_warshall(distances) return distances
[ "def", "distances", "(", "self", ")", ":", "from", "molmod", ".", "ext", "import", "graphs_floyd_warshall", "distances", "=", "np", ".", "zeros", "(", "(", "self", ".", "num_vertices", ",", ")", "*", "2", ",", "dtype", "=", "int", ")", "#distances[:] = -...
47.454545
15
def rst2node(doc_name, data): """Converts a reStructuredText into its node """ if not data: return parser = docutils.parsers.rst.Parser() document = docutils.utils.new_document('<%s>' % doc_name) document.settings = docutils.frontend.OptionParser().get_default_values() document.settings.tab_width = 4 document.settings.pep_references = False document.settings.rfc_references = False document.settings.env = Env() parser.parse(data, document) if len(document.children) == 1: return document.children[0] else: par = docutils.nodes.paragraph() for child in document.children: par += child return par
[ "def", "rst2node", "(", "doc_name", ",", "data", ")", ":", "if", "not", "data", ":", "return", "parser", "=", "docutils", ".", "parsers", ".", "rst", ".", "Parser", "(", ")", "document", "=", "docutils", ".", "utils", ".", "new_document", "(", "'<%s>'"...
34.1
11.45
def run(self): """ Perform phantomas run """ self._logger.info("running for <{url}>".format(url=self._url)) args = format_args(self._options) self._logger.debug("command: `{cmd}` / args: {args}". format(cmd=self._cmd, args=args)) # run the process try: process = Popen( args=[self._cmd] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE ) pid = process.pid self._logger.debug("running as PID #{pid}".format(pid=pid)) except OSError as ex: raise PhantomasRunError( "Failed to run phantomas: {0}".format(ex), ex.errno) # wait to complete try: stdout, stderr = process.communicate() returncode = process.returncode except Exception: raise PhantomasRunError("Failed to complete the run") # for Python 3.x - decode bytes to string stdout = stdout.decode('utf8') stderr = stderr.decode('utf8') # check the response code self._logger.debug("completed with return code #{returncode}". format(returncode=returncode)) if stderr != '': self._logger.debug("stderr: {stderr}".format(stderr=stderr)) raise PhantomasFailedError(stderr.strip(), returncode) # try parsing the response try: results = json.loads(stdout) except Exception: raise PhantomasResponseParsingError("Unable to parse the response") if self._options.get("runs", 0) > 1: return Runs(self._url, results) else: return Results(self._url, results)
[ "def", "run", "(", "self", ")", ":", "self", ".", "_logger", ".", "info", "(", "\"running for <{url}>\"", ".", "format", "(", "url", "=", "self", ".", "_url", ")", ")", "args", "=", "format_args", "(", "self", ".", "_options", ")", "self", ".", "_log...
32.45283
20.132075
def search( self, search_space, valid_data, init_args=[], train_args=[], init_kwargs={}, train_kwargs={}, module_args={}, module_kwargs={}, max_search=None, shuffle=True, verbose=True, **score_kwargs, ): """ Args: search_space: see config_generator() documentation valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of X (data) and Y (labels) for the dev split init_args: (list) positional args for initializing the model train_args: (list) positional args for training the model init_kwargs: (dict) keyword args for initializing the model train_kwargs: (dict) keyword args for training the model module_args: (dict) Dictionary of lists of module args module_kwargs: (dict) Dictionary of dictionaries of module kwargs max_search: see config_generator() documentation shuffle: see config_generator() documentation Returns: best_model: the highest performing trained model Note: Initialization is performed by ModelTuner instead of passing a pre-initialized model so that tuning may be performed over all model parameters, including the network architecture (which is defined before the train loop). """ raise NotImplementedError()
[ "def", "search", "(", "self", ",", "search_space", ",", "valid_data", ",", "init_args", "=", "[", "]", ",", "train_args", "=", "[", "]", ",", "init_kwargs", "=", "{", "}", ",", "train_kwargs", "=", "{", "}", ",", "module_args", "=", "{", "}", ",", ...
38.810811
22.810811
def _flatten_subsection(subsection, _type, offset, parent): '''Flatten a subsection from its nested version Args: subsection: Nested subsection as produced by _parse_section, except one level in _type: type of section, ie: AXON, etc parent: first element has this as it's parent offset: position in the final array of the first element Returns: Generator of values corresponding to [X, Y, Z, R, TYPE, ID, PARENT_ID] ''' for row in subsection: # TODO: Figure out what these correspond to in neurolucida if row in ('Low', 'Generated', 'High', ): continue elif isinstance(row[0], StringType): if len(row) in (4, 5, ): if len(row) == 5: assert row[4][0] == 'S', \ 'Only known usage of a fifth member is Sn, found: %s' % row[4][0] yield (float(row[0]), float(row[1]), float(row[2]), float(row[3]) / 2., _type, offset, parent) parent = offset offset += 1 elif isinstance(row[0], list): split_parent = offset - 1 start_offset = 0 slices = [] start = 0 for i, value in enumerate(row): if value == '|': slices.append(slice(start + start_offset, i)) start = i + 1 slices.append(slice(start + start_offset, len(row))) for split_slice in slices: for _row in _flatten_subsection(row[split_slice], _type, offset, split_parent): offset += 1 yield _row
[ "def", "_flatten_subsection", "(", "subsection", ",", "_type", ",", "offset", ",", "parent", ")", ":", "for", "row", "in", "subsection", ":", "# TODO: Figure out what these correspond to in neurolucida", "if", "row", "in", "(", "'Low'", ",", "'Generated'", ",", "'...
40.238095
19.666667
def occurrences_after(self, after=None): """ It is often useful to know what the next occurrence is given a list of events. This function produces a generator that yields the the most recent occurrence after the date ``after`` from any of the events in ``self.events`` """ from schedule.models import Occurrence if after is None: after = timezone.now() occ_replacer = OccurrenceReplacer( Occurrence.objects.filter(event__in=self.events)) generators = [event._occurrences_after_generator(after) for event in self.events] occurrences = [] for generator in generators: try: heapq.heappush(occurrences, (next(generator), generator)) except StopIteration: pass while occurrences: generator = occurrences[0][1] try: next_occurrence = heapq.heapreplace(occurrences, (next(generator), generator))[0] except StopIteration: next_occurrence = heapq.heappop(occurrences)[0] yield occ_replacer.get_occurrence(next_occurrence)
[ "def", "occurrences_after", "(", "self", ",", "after", "=", "None", ")", ":", "from", "schedule", ".", "models", "import", "Occurrence", "if", "after", "is", "None", ":", "after", "=", "timezone", ".", "now", "(", ")", "occ_replacer", "=", "OccurrenceRepla...
38.4
20.6
def add_error(self, txt): """Add a message in the configuration errors list so we can print them all in one place Set the object configuration as not correct :param txt: error message :type txt: str :return: None """ self.configuration_errors.append(txt) self.conf_is_correct = False
[ "def", "add_error", "(", "self", ",", "txt", ")", ":", "self", ".", "configuration_errors", ".", "append", "(", "txt", ")", "self", ".", "conf_is_correct", "=", "False" ]
29
14.583333
def df(unit = 'GB'): '''A wrapper for the df shell command.''' details = {} headers = ['Filesystem', 'Type', 'Size', 'Used', 'Available', 'Capacity', 'MountedOn'] n = len(headers) unit = df_conversions[unit] p = subprocess.Popen(args = ['df', '-TP'], stdout = subprocess.PIPE) # -P prevents line wrapping on long filesystem names stdout, stderr = p.communicate() lines = stdout.split("\n") lines[0] = lines[0].replace("Mounted on", "MountedOn").replace("1K-blocks", "Size").replace("1024-blocks", "Size") assert(lines[0].split() == headers) lines = [l.strip() for l in lines if l.strip()] for line in lines[1:]: tokens = line.split() if tokens[0] == 'none': # skip uninteresting entries continue assert(len(tokens) == n) d = {} for x in range(1, len(headers)): d[headers[x]] = tokens[x] d['Size'] = float(d['Size']) / unit assert(d['Capacity'].endswith("%")) d['Use%'] = d['Capacity'] d['Used'] = float(d['Used']) / unit d['Available'] = float(d['Available']) / unit d['Using'] = 100*(d['Used']/d['Size']) # same as Use% but with more precision if d['Type'].startswith('ext'): pass d['Using'] += 5 # ext2, ext3, and ext4 reserve 5% by default else: ext3_filesystems = ['ganon:', 'kortemmelab:', 'albana:'] for e3fs in ext3_filesystems: if tokens[0].find(e3fs) != -1: d['Using'] += 5 # ext3 reserves 5% break details[tokens[0]] = d return details
[ "def", "df", "(", "unit", "=", "'GB'", ")", ":", "details", "=", "{", "}", "headers", "=", "[", "'Filesystem'", ",", "'Type'", ",", "'Size'", ",", "'Used'", ",", "'Available'", ",", "'Capacity'", ",", "'MountedOn'", "]", "n", "=", "len", "(", "header...
36.386364
21.659091
def get_tamil_words( letters ): """ reverse a Tamil word according to letters, not unicode-points """ if not isinstance(letters,list): raise Exception("metehod needs to be used with list generated from 'tamil.utf8.get_letters(...)'") return [word for word in get_words_iterable( letters, tamil_only = True )]
[ "def", "get_tamil_words", "(", "letters", ")", ":", "if", "not", "isinstance", "(", "letters", ",", "list", ")", ":", "raise", "Exception", "(", "\"metehod needs to be used with list generated from 'tamil.utf8.get_letters(...)'\"", ")", "return", "[", "word", "for", "...
64.8
23.4
def add_options(self): """ Add configuration options. """ super(ScriptBaseWithConfig, self).add_options() self.add_value_option("--config-dir", "DIR", help="configuration directory [{}]".format(os.environ.get('PYRO_CONFIG_DIR', self.CONFIG_DIR_DEFAULT))) self.add_value_option("--config-file", "PATH", action="append", default=[], help="additional config file(s) to read") self.add_value_option("-D", "--define", "KEY=VAL [-D ...]", default=[], action="append", dest="defines", help="override configuration attributes")
[ "def", "add_options", "(", "self", ")", ":", "super", "(", "ScriptBaseWithConfig", ",", "self", ")", ".", "add_options", "(", ")", "self", ".", "add_value_option", "(", "\"--config-dir\"", ",", "\"DIR\"", ",", "help", "=", "\"configuration directory [{}]\"", "."...
47.384615
18.692308
def normalLines(actor, ratio=1, c=(0.6, 0.6, 0.6), alpha=0.8): """ Build an ``vtkActor`` made of the normals at vertices shown as lines. """ maskPts = vtk.vtkMaskPoints() maskPts.SetOnRatio(ratio) maskPts.RandomModeOff() actor = actor.computeNormals() src = actor.polydata() maskPts.SetInputData(src) arrow = vtk.vtkLineSource() arrow.SetPoint1(0, 0, 0) arrow.SetPoint2(0.75, 0, 0) glyph = vtk.vtkGlyph3D() glyph.SetSourceConnection(arrow.GetOutputPort()) glyph.SetInputConnection(maskPts.GetOutputPort()) glyph.SetVectorModeToUseNormal() b = src.GetBounds() sc = max([b[1] - b[0], b[3] - b[2], b[5] - b[4]]) / 20.0 glyph.SetScaleFactor(sc) glyph.OrientOn() glyph.Update() glyphActor = Actor(glyph.GetOutput(), c=vc.getColor(c), alpha=alpha) glyphActor.mapper.SetScalarModeToUsePointFieldData() glyphActor.PickableOff() prop = vtk.vtkProperty() prop.DeepCopy(actor.GetProperty()) glyphActor.SetProperty(prop) return glyphActor
[ "def", "normalLines", "(", "actor", ",", "ratio", "=", "1", ",", "c", "=", "(", "0.6", ",", "0.6", ",", "0.6", ")", ",", "alpha", "=", "0.8", ")", ":", "maskPts", "=", "vtk", ".", "vtkMaskPoints", "(", ")", "maskPts", ".", "SetOnRatio", "(", "rat...
34.827586
13.103448
def serve_file(self, load): ''' Serve up a chunk of a file ''' ret = {'data': '', 'dest': ''} if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'path' not in load or 'loc' not in load or 'saltenv' not in load: return ret if not isinstance(load['saltenv'], six.string_types): load['saltenv'] = six.text_type(load['saltenv']) fnd = self.find_file(load['path'], load['saltenv']) if not fnd.get('back'): return ret fstr = '{0}.serve_file'.format(fnd['back']) if fstr in self.servers: return self.servers[fstr](load, fnd) return ret
[ "def", "serve_file", "(", "self", ",", "load", ")", ":", "ret", "=", "{", "'data'", ":", "''", ",", "'dest'", ":", "''", "}", "if", "'env'", "in", "load", ":", "# \"env\" is not supported; Use \"saltenv\".", "load", ".", "pop", "(", "'env'", ")", "if", ...
31.26087
19.782609
def instr(str, substr): """ Locate the position of the first occurrence of substr column in the given string. Returns null if either of the arguments are null. .. note:: The position is not zero based, but 1 based index. Returns 0 if substr could not be found in str. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(instr(df.s, 'b').alias('s')).collect() [Row(s=2)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.instr(_to_java_column(str), substr))
[ "def", "instr", "(", "str", ",", "substr", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "instr", "(", "_to_java_column", "(", "str", ")", ",", "substr", ")", ")" ]
38.071429
21.214286
def as_cql_query(self, formatted=False): """ Returns a CQL query that can be used to recreate this function. If `formatted` is set to :const:`True`, extra whitespace will be added to make the query more readable. """ sep = '\n ' if formatted else ' ' keyspace = protect_name(self.keyspace) name = protect_name(self.name) arg_list = ', '.join(["%s %s" % (protect_name(n), t) for n, t in zip(self.argument_names, self.argument_types)]) typ = self.return_type lang = self.language body = self.body on_null = "CALLED" if self.called_on_null_input else "RETURNS NULL" return "CREATE FUNCTION %(keyspace)s.%(name)s(%(arg_list)s)%(sep)s" \ "%(on_null)s ON NULL INPUT%(sep)s" \ "RETURNS %(typ)s%(sep)s" \ "LANGUAGE %(lang)s%(sep)s" \ "AS $$%(body)s$$" % locals()
[ "def", "as_cql_query", "(", "self", ",", "formatted", "=", "False", ")", ":", "sep", "=", "'\\n '", "if", "formatted", "else", "' '", "keyspace", "=", "protect_name", "(", "self", ".", "keyspace", ")", "name", "=", "protect_name", "(", "self", ".", "n...
44.714286
15.095238
def from_uri(cls, uri: URI, w3: Web3) -> "Package": """ Returns a Package object instantiated by a manifest located at a content-addressed URI. A valid ``Web3`` instance is also required. URI schemes supported: - IPFS `ipfs://Qm...` - HTTP `https://api.github.com/repos/:owner/:repo/git/blobs/:file_sha` - Registry `ercXXX://registry.eth/greeter?version=1.0.0` .. code:: python OwnedPackage = Package.from_uri('ipfs://QmbeVyFLSuEUxiXKwSsEjef7icpdTdA4kGG9BcrJXKNKUW', w3) # noqa: E501 """ contents = to_text(resolve_uri_contents(uri)) validate_raw_manifest_format(contents) manifest = json.loads(contents) return cls(manifest, w3, uri)
[ "def", "from_uri", "(", "cls", ",", "uri", ":", "URI", ",", "w3", ":", "Web3", ")", "->", "\"Package\"", ":", "contents", "=", "to_text", "(", "resolve_uri_contents", "(", "uri", ")", ")", "validate_raw_manifest_format", "(", "contents", ")", "manifest", "...
44.647059
21.235294
def get_user_permissions(uid, **kwargs): """ Get the roles for a user. @param user_id """ try: _get_user(uid) user_perms = db.DBSession.query(Perm).filter(Perm.id==RolePerm.perm_id, RolePerm.role_id==Role.id, Role.id==RoleUser.role_id, RoleUser.user_id==uid).all() return user_perms except: raise HydraError("Permissions not found for user (user_id={})".format(uid))
[ "def", "get_user_permissions", "(", "uid", ",", "*", "*", "kwargs", ")", ":", "try", ":", "_get_user", "(", "uid", ")", "user_perms", "=", "db", ".", "DBSession", ".", "query", "(", "Perm", ")", ".", "filter", "(", "Perm", ".", "id", "==", "RolePerm"...
37.8
23.4
def delete(self, role, commit=True): """ Delete a role """ events.role_deleted_event.send(role) return super().delete(role, commit)
[ "def", "delete", "(", "self", ",", "role", ",", "commit", "=", "True", ")", ":", "events", ".", "role_deleted_event", ".", "send", "(", "role", ")", "return", "super", "(", ")", ".", "delete", "(", "role", ",", "commit", ")" ]
38
2.75
def render_impl(self, template, context, **options): """ Inherited class must implement this! :param template: Template file path :param context: A dict or dict-like object to instantiate given template file :param options: Same options as :meth:`renders_impl` - at_paths: Template search paths (common option) - at_encoding: Template encoding (common option) - safe: Safely substitute parameters in templates, that is, original template content will be returned if some of template parameters are not found in given context :return: To be rendered string in inherited classes """ ropts = dict((k, v) for k, v in options.items() if k != "safe") tmpl = anytemplate.engines.base.fallback_render(template, {}, **ropts) return self.renders_impl(tmpl, context, **options)
[ "def", "render_impl", "(", "self", ",", "template", ",", "context", ",", "*", "*", "options", ")", ":", "ropts", "=", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "options", ".", "items", "(", ")", "if", "k", "!=", "\"safe\...
45.3
21.7
def sinc(self, high_pass_frequency=None, low_pass_frequency=None, left_t=None, left_n=None, right_t=None, right_n=None, attenuation=None, beta=None, phase=None, M=None, I=None, L=None): """sinc takes 12 parameters: high_pass_frequency in Hz, low_pass_frequency in Hz, left_t, left_n, right_t, right_n, attenuation in dB, beta, phase, M, I, L This effect creates a steep bandpass or bandreject filter. You may specify as few as the first two parameters. Setting the high-pass parameter to a lower value than the low-pass creates a band-reject filter. """ self.command.append("sinc") if not mutually_exclusive(attenuation, beta): raise ValueError("Attenuation (-a) and beta (-b) are mutually exclusive arguments.") if attenuation is not None and beta is None: self.command.append('-a') self.command.append(str(attenuation)) elif attenuation is None and beta is not None: self.command.append('-b') self.command.append(str(beta)) if not mutually_exclusive(phase, M, I, L): raise ValueError("Phase (-p), -M, L, and -I are mutually exclusive arguments.") if phase is not None: self.command.append('-p') self.command.append(str(phase)) elif M is not None: self.command.append('-M') elif I is not None: self.command.append('-I') elif L is not None: self.command.append('-L') if not mutually_exclusive(left_t, left_t): raise ValueError("Transition bands options (-t or -n) are mutually exclusive.") if left_t is not None: self.command.append('-t') self.command.append(str(left_t)) if left_n is not None: self.command.append('-n') self.command.append(str(left_n)) if high_pass_frequency is not None and low_pass_frequency is None: self.command.append(str(high_pass_frequency)) elif high_pass_frequency is not None and low_pass_frequency is not None: self.command.append(str(high_pass_frequency) + '-' + str(low_pass_frequency)) elif high_pass_frequency is None and low_pass_frequency is not None: self.command.append(str(low_pass_frequency)) if not mutually_exclusive(right_t, right_t): raise ValueError("Transition bands options (-t or -n) are mutually exclusive.") if right_t is not None: self.command.append('-t') self.command.append(str(right_t)) if right_n is not None: self.command.append('-n') self.command.append(str(right_n)) return self
[ "def", "sinc", "(", "self", ",", "high_pass_frequency", "=", "None", ",", "low_pass_frequency", "=", "None", ",", "left_t", "=", "None", ",", "left_n", "=", "None", ",", "right_t", "=", "None", ",", "right_n", "=", "None", ",", "attenuation", "=", "None"...
36.4
17.6875
def has(self, name, ignore_empty=False): """Return ``True`` if any parameter in the template is named *name*. With *ignore_empty*, ``False`` will be returned even if the template contains a parameter with the name *name*, if the parameter's value is empty. Note that a template may have multiple parameters with the same name, but only the last one is read by the MediaWiki parser. """ name = str(name).strip() for param in self.params: if param.name.strip() == name: if ignore_empty and not param.value.strip(): continue return True return False
[ "def", "has", "(", "self", ",", "name", ",", "ignore_empty", "=", "False", ")", ":", "name", "=", "str", "(", "name", ")", ".", "strip", "(", ")", "for", "param", "in", "self", ".", "params", ":", "if", "param", ".", "name", ".", "strip", "(", ...
44.6
17.466667
def rdl_decomposition(T, k=None, norm='auto', ncv=None, reversible=False, mu=None): r"""Compute the decomposition into left and right eigenvectors. Parameters ---------- T : sparse matrix Transition matrix k : int (optional) Number of eigenvector/eigenvalue pairs norm: {'standard', 'reversible', 'auto'} standard: (L'R) = Id, L[:,0] is a probability distribution, the stationary distribution mu of T. Right eigenvectors R have a 2-norm of 1. reversible: R and L are related via L=L[:,0]*R. auto: will be reversible if T is reversible, otherwise standard. ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k reversible : bool, optional Indicate that transition matrix is reversible mu : (M,) ndarray, optional Stationary distribution of T Returns ------- R : (M, M) ndarray The normalized ("unit length") right eigenvectors, such that the column ``R[:,i]`` is the right eigenvector corresponding to the eigenvalue ``w[i]``, ``dot(T,R[:,i])``=``w[i]*R[:,i]`` D : (M, M) ndarray A diagonal matrix containing the eigenvalues, each repeated according to its multiplicity L : (M, M) ndarray The normalized (with respect to `R`) left eigenvectors, such that the row ``L[i, :]`` is the left eigenvector corresponding to the eigenvalue ``w[i]``, ``dot(L[i, :], T)``=``w[i]*L[i, :]`` """ if k is None: raise ValueError("Number of eigenvectors required for decomposition of sparse matrix") # auto-set norm if norm == 'auto': if is_reversible(T): norm = 'reversible' else: norm = 'standard' if reversible: return rdl_decomposition_rev(T, k, norm=norm, ncv=ncv, mu=mu) else: return rdl_decomposition_nrev(T, k, norm=norm, ncv=ncv)
[ "def", "rdl_decomposition", "(", "T", ",", "k", "=", "None", ",", "norm", "=", "'auto'", ",", "ncv", "=", "None", ",", "reversible", "=", "False", ",", "mu", "=", "None", ")", ":", "if", "k", "is", "None", ":", "raise", "ValueError", "(", "\"Number...
38.98
21.42
def get_single_by_flags(self, flags): """Get the register info matching the flag. Raises ValueError if more than one are found.""" regs = list(self.get_by_flags(flags)) if len(regs) != 1: raise ValueError("Flags do not return unique resigter. {!r}", regs) return regs[0]
[ "def", "get_single_by_flags", "(", "self", ",", "flags", ")", ":", "regs", "=", "list", "(", "self", ".", "get_by_flags", "(", "flags", ")", ")", "if", "len", "(", "regs", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"Flags do not return unique resig...
44.142857
17
def validate(self): """ validate: Makes sure input question is valid Args: None Returns: boolean indicating if input question is valid """ try: assert self.question_type == exercises.INPUT_QUESTION, "Assumption Failed: Question should be input answer type" assert len(self.answers) > 0, "Assumption Failed: Multiple selection question should have answers" for a in self.answers: assert 'answer' in a, "Assumption Failed: Answers must have an answer field" try: float(a['answer']) except ValueError: assert False, "Assumption Failed: Answer {} must be numeric".format(a['answer']) for h in self.hints: assert isinstance(h, str), "Assumption Failed: Hint in hints list is not a string" return super(InputQuestion, self).validate() except AssertionError as ae: raise InvalidQuestionException("Invalid question: {0}".format(self.__dict__))
[ "def", "validate", "(", "self", ")", ":", "try", ":", "assert", "self", ".", "question_type", "==", "exercises", ".", "INPUT_QUESTION", ",", "\"Assumption Failed: Question should be input answer type\"", "assert", "len", "(", "self", ".", "answers", ")", ">", "0",...
55.210526
27.789474
def __get_html(self, body=None): """ Returns the html content with given body tag content. :param body: Body tag content. :type body: unicode :return: Html. :rtype: unicode """ output = [] output.append("<html>") output.append("<head>") for javascript in (self.__jquery_javascript, self.__crittercism_javascript, self.__reporter_javascript): output.append("<script type=\"text/javascript\">") output.append(javascript) output.append("</script>") output.append("<style type=\"text/css\">") output.append(self.__style) output.append("</style>") output.append("</head>") if body is not None: output.append(body) else: output.append("<body>") output.append("<div id=\"report\">") output.append("</div>") output.append("</body>") output.append("</html>") return "\n".join(output)
[ "def", "__get_html", "(", "self", ",", "body", "=", "None", ")", ":", "output", "=", "[", "]", "output", ".", "append", "(", "\"<html>\"", ")", "output", ".", "append", "(", "\"<head>\"", ")", "for", "javascript", "in", "(", "self", ".", "__jquery_java...
32.71875
12.03125
def getElementsByAttr(self, attr, value): ''' getElementsByAttr - Get elements within this collection posessing a given attribute/value pair @param attr - Attribute name (lowercase) @param value - Matching value @return - TagCollection of all elements matching name/value ''' ret = TagCollection() if len(self) == 0: return ret attr = attr.lower() _cmpFunc = lambda tag : tag.getAttribute(attr) == value for tag in self: TagCollection._subset(ret, _cmpFunc, tag) return ret
[ "def", "getElementsByAttr", "(", "self", ",", "attr", ",", "value", ")", ":", "ret", "=", "TagCollection", "(", ")", "if", "len", "(", "self", ")", "==", "0", ":", "return", "ret", "attr", "=", "attr", ".", "lower", "(", ")", "_cmpFunc", "=", "lamb...
31.736842
23.736842
def results_from_cli(opts, load_samples=True, **kwargs): """Loads an inference result file along with any labels associated with it from the command line options. Parameters ---------- opts : ArgumentParser options The options from the command line. load_samples : bool, optional Load the samples from the file. Returns ------- fp_all : (list of) BaseInferenceFile type The result file as an hdf file. If more than one input file, then it returns a list. parameters : list of str List of the parameters to use, parsed from the parameters option. labels : dict Dictionary of labels to associate with the parameters. samples_all : (list of) FieldArray(s) or None If load_samples, the samples as a FieldArray; otherwise, None. If more than one input file, then it returns a list. \**kwargs : Any other keyword arguments that are passed to read samples using samples_from_cli """ # lists for files and samples from all input files fp_all = [] samples_all = [] input_files = opts.input_file if isinstance(input_files, str): input_files = [input_files] # loop over all input files for input_file in input_files: logging.info("Reading input file %s", input_file) # read input file fp = loadfile(input_file, "r") # load the samples if load_samples: logging.info("Loading samples") # check if need extra parameters for a non-sampling parameter file_parameters, ts = _transforms.get_common_cbc_transforms( opts.parameters, fp.variable_params) # read samples from file samples = fp.samples_from_cli(opts, parameters=file_parameters, **kwargs) logging.info("Using {} samples".format(samples.size)) # add parameters not included in file samples = _transforms.apply_transforms(samples, ts) # else do not read samples else: samples = None # add results to lists from all input files if len(input_files) > 1: fp_all.append(fp) samples_all.append(samples) # else only one input file then do not return lists else: fp_all = fp samples_all = samples return fp_all, opts.parameters, opts.parameters_labels, samples_all
[ "def", "results_from_cli", "(", "opts", ",", "load_samples", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# lists for files and samples from all input files", "fp_all", "=", "[", "]", "samples_all", "=", "[", "]", "input_files", "=", "opts", ".", "input_file...
32.108108
20.716216
def pause(path, service_names=None): ''' Pause running containers in the docker-compose file, service_names is a python list, if omitted pause all containers path Path where the docker-compose file is stored on the server service_names If specified will pause only the specified services CLI Example: .. code-block:: bash salt myminion dockercompose.pause /path/where/docker-compose/stored salt myminion dockercompose.pause /path/where/docker-compose/stored '[janus]' ''' project = __load_project(path) debug_ret = {} result = {} if isinstance(project, dict): return project else: try: project.pause(service_names) if debug: for container in project.containers(): if service_names is None or container.get('Name')[1:] in service_names: container.inspect_if_not_inspected() debug_ret[container.get('Name')] = container.inspect() result[container.get('Name')] = 'paused' except Exception as inst: return __handle_except(inst) return __standardize_result(True, 'Pausing containers via docker-compose', result, debug_ret)
[ "def", "pause", "(", "path", ",", "service_names", "=", "None", ")", ":", "project", "=", "__load_project", "(", "path", ")", "debug_ret", "=", "{", "}", "result", "=", "{", "}", "if", "isinstance", "(", "project", ",", "dict", ")", ":", "return", "p...
35.542857
25.714286
def close( self, exc_info: Union[ None, bool, BaseException, Tuple[ "Optional[Type[BaseException]]", Optional[BaseException], Optional[TracebackType], ], ] = False, ) -> None: """Close this stream. If ``exc_info`` is true, set the ``error`` attribute to the current exception from `sys.exc_info` (or if ``exc_info`` is a tuple, use that instead of `sys.exc_info`). """ if not self.closed(): if exc_info: if isinstance(exc_info, tuple): self.error = exc_info[1] elif isinstance(exc_info, BaseException): self.error = exc_info else: exc_info = sys.exc_info() if any(exc_info): self.error = exc_info[1] if self._read_until_close: self._read_until_close = False self._finish_read(self._read_buffer_size, False) if self._state is not None: self.io_loop.remove_handler(self.fileno()) self._state = None self.close_fd() self._closed = True self._signal_closed()
[ "def", "close", "(", "self", ",", "exc_info", ":", "Union", "[", "None", ",", "bool", ",", "BaseException", ",", "Tuple", "[", "\"Optional[Type[BaseException]]\"", ",", "Optional", "[", "BaseException", "]", ",", "Optional", "[", "TracebackType", "]", ",", "...
33.842105
13.842105
def method_call(receiver, message, args, pseudo_type=None): '''A shortcut for a method call, expands a str receiver to a identifier''' if not isinstance(receiver, Node): receiver = local(receiver) return Node('method_call', receiver=receiver, message=message, args=args, pseudo_type=pseudo_type)
[ "def", "method_call", "(", "receiver", ",", "message", ",", "args", ",", "pseudo_type", "=", "None", ")", ":", "if", "not", "isinstance", "(", "receiver", ",", "Node", ")", ":", "receiver", "=", "local", "(", "receiver", ")", "return", "Node", "(", "'m...
51.833333
27.833333
def _compute_mean(self, C, mag, ztor, rrup): """ Compute mean value as in ``subroutine getGeom`` in ``hazgridXnga2.f`` """ gc0 = 0.2418 ci = 0.3846 gch = 0.00607 g4 = 1.7818 ge = 0.554 gm = 1.414 mean = ( gc0 + ci + ztor * gch + C['gc1'] + gm * mag + C['gc2'] * (10 - mag) ** 3 + C['gc3'] * np.log(rrup + g4 * np.exp(ge * mag)) ) return mean
[ "def", "_compute_mean", "(", "self", ",", "C", ",", "mag", ",", "ztor", ",", "rrup", ")", ":", "gc0", "=", "0.2418", "ci", "=", "0.3846", "gch", "=", "0.00607", "g4", "=", "1.7818", "ge", "=", "0.554", "gm", "=", "1.414", "mean", "=", "(", "gc0",...
25.444444
19.888889
def CMOVNP(cpu, dest, src): """ Conditional move - Not parity/parity odd. Tests the status flags in the EFLAGS register and moves the source operand (second operand) to the destination operand (first operand) if the given test condition is true. :param cpu: current CPU. :param dest: destination operand. :param src: source operand. """ dest.write(Operators.ITEBV(dest.size, cpu.PF == False, src.read(), dest.read()))
[ "def", "CMOVNP", "(", "cpu", ",", "dest", ",", "src", ")", ":", "dest", ".", "write", "(", "Operators", ".", "ITEBV", "(", "dest", ".", "size", ",", "cpu", ".", "PF", "==", "False", ",", "src", ".", "read", "(", ")", ",", "dest", ".", "read", ...
37.461538
19.615385
def is_prime(n): """ Miller-Rabin primality test. Keep in mind that this is not a deterministic algorithm: if it return True, it means that n is probably a prime. Args: n (int): the integer to check Returns: True if n is probably a prime number, False if it is not Raises: TypeError: if n is not an integer Note: Adapted from https://rosettacode.org/wiki/Miller%E2%80%93Rabin_primality_test#Python """ if not isinstance(n, int): raise TypeError("Expecting an integer") if n < 2: return False if n in __known_primes: return True if any((n % p) == 0 for p in __known_primes): return False d, s = n - 1, 0 while not d % 2: d, s = d >> 1, s + 1 def try_composite(a): if pow(a, d, n) == 1: return False for i in range(s): if pow(a, 2 ** i * d, n) == n - 1: return False return True return not any(try_composite(a) for a in __known_primes[:16])
[ "def", "is_prime", "(", "n", ")", ":", "if", "not", "isinstance", "(", "n", ",", "int", ")", ":", "raise", "TypeError", "(", "\"Expecting an integer\"", ")", "if", "n", "<", "2", ":", "return", "False", "if", "n", "in", "__known_primes", ":", "return",...
24.512195
22.536585
def future(self, request_iterator, timeout=None, metadata=None, credentials=None): """Asynchronously invokes the underlying RPC on the client. Args: request_iterator: An ASYNC iterator that yields request values for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. If None, the timeout is considered infinite. metadata: Optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. Returns: An object that is both a Call for the RPC and a Future. In the event of RPC completion, the return Call-Future's result value will be the response message of the RPC. Should the event terminate with non-OK status, the returned Call-Future's exception value will be an RpcError. """ return _utils.wrap_future_call( self._inner.future( _utils.WrappedAsyncIterator(request_iterator, self._loop), timeout, metadata, credentials ), self._loop, self._executor)
[ "def", "future", "(", "self", ",", "request_iterator", ",", "timeout", "=", "None", ",", "metadata", "=", "None", ",", "credentials", "=", "None", ")", ":", "return", "_utils", ".", "wrap_future_call", "(", "self", ".", "_inner", ".", "future", "(", "_ut...
42
19.733333
def unproject(self, image_points): """Find (up to scale) 3D coordinate of an image point This is the inverse of the `project` function. The resulting 3D points are only valid up to an unknown scale. Parameters ---------------------- image_points : (2, N) ndarray Image points Returns ---------------------- points : (3, N) ndarray 3D coordinates (valid up to scale) """ undist_image_points = cv2.undistortPoints(image_points.T.reshape(1,-1,2), self.camera_matrix, self.dist_coefs, P=self.camera_matrix) world_points = np.dot(self.inv_camera_matrix, to_homogeneous(undist_image_points.reshape(-1,2).T)) return world_points
[ "def", "unproject", "(", "self", ",", "image_points", ")", ":", "undist_image_points", "=", "cv2", ".", "undistortPoints", "(", "image_points", ".", "T", ".", "reshape", "(", "1", ",", "-", "1", ",", "2", ")", ",", "self", ".", "camera_matrix", ",", "s...
38.631579
23.684211
def begin(self): """ Begin recording coverage information. """ log.debug("Coverage begin") self.skipModules = sys.modules.keys()[:] if self.coverErase: log.debug("Clearing previously collected coverage statistics") self.coverInstance.combine() self.coverInstance.erase() self.coverInstance.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]') self.coverInstance.load() self.coverInstance.start()
[ "def", "begin", "(", "self", ")", ":", "log", ".", "debug", "(", "\"Coverage begin\"", ")", "self", ".", "skipModules", "=", "sys", ".", "modules", ".", "keys", "(", ")", "[", ":", "]", "if", "self", ".", "coverErase", ":", "log", ".", "debug", "("...
37.769231
11
def get_build_logs_zip(self, project, build_id, **kwargs): """GetBuildLogsZip. Gets the logs for a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :rtype: object """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') response = self._send(http_method='GET', location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df', version='5.0', route_values=route_values, accept_media_type='application/zip') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback)
[ "def", "get_build_logs_zip", "(", "self", ",", "project", ",", "build_id", ",", "*", "*", "kwargs", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize",...
45.136364
16.5
def profile(): """View for editing a profile.""" # Create forms verification_form = VerificationForm(formdata=None, prefix="verification") profile_form = profile_form_factory() # Process forms form = request.form.get('submit', None) if form == 'profile': handle_profile_form(profile_form) elif form == 'verification': handle_verification_form(verification_form) return render_template( current_app.config['USERPROFILES_PROFILE_TEMPLATE'], profile_form=profile_form, verification_form=verification_form,)
[ "def", "profile", "(", ")", ":", "# Create forms", "verification_form", "=", "VerificationForm", "(", "formdata", "=", "None", ",", "prefix", "=", "\"verification\"", ")", "profile_form", "=", "profile_form_factory", "(", ")", "# Process forms", "form", "=", "requ...
33.235294
15.882353
def set_option(self, key, value): """Sets general options used by plugins and streams originating from this session object. :param key: key of the option :param value: value to set the option to **Available options**: ======================== ========================================= hds-live-edge ( float) Specify the time live HDS streams will start from the edge of stream, default: ``10.0`` hds-segment-attempts (int) How many attempts should be done to download each HDS segment, default: ``3`` hds-segment-threads (int) The size of the thread pool used to download segments, default: ``1`` hds-segment-timeout (float) HDS segment connect and read timeout, default: ``10.0`` hds-timeout (float) Timeout for reading data from HDS streams, default: ``60.0`` hls-live-edge (int) How many segments from the end to start live streams on, default: ``3`` hls-segment-attempts (int) How many attempts should be done to download each HLS segment, default: ``3`` hls-segment-threads (int) The size of the thread pool used to download segments, default: ``1`` hls-segment-timeout (float) HLS segment connect and read timeout, default: ``10.0`` hls-timeout (float) Timeout for reading data from HLS streams, default: ``60.0`` http-proxy (str) Specify a HTTP proxy to use for all HTTP requests https-proxy (str) Specify a HTTPS proxy to use for all HTTPS requests http-cookies (dict or str) A dict or a semi-colon (;) delimited str of cookies to add to each HTTP request, e.g. ``foo=bar;baz=qux`` http-headers (dict or str) A dict or semi-colon (;) delimited str of headers to add to each HTTP request, e.g. ``foo=bar;baz=qux`` http-query-params (dict or str) A dict or a ampersand (&) delimited string of query parameters to add to each HTTP request, e.g. ``foo=bar&baz=qux`` http-trust-env (bool) Trust HTTP settings set in the environment, such as environment variables (HTTP_PROXY, etc) and ~/.netrc authentication http-ssl-verify (bool) Verify SSL certificates, default: ``True`` http-ssl-cert (str or tuple) SSL certificate to use, can be either a .pem file (str) or a .crt/.key pair (tuple) http-timeout (float) General timeout used by all HTTP requests except the ones covered by other options, default: ``20.0`` http-stream-timeout (float) Timeout for reading data from HTTP streams, default: ``60.0`` subprocess-errorlog (bool) Log errors from subprocesses to a file located in the temp directory subprocess-errorlog-path (str) Log errors from subprocesses to a specific file ringbuffer-size (int) The size of the internal ring buffer used by most stream types, default: ``16777216`` (16MB) rtmp-proxy (str) Specify a proxy (SOCKS) that RTMP streams will use rtmp-rtmpdump (str) Specify the location of the rtmpdump executable used by RTMP streams, e.g. ``/usr/local/bin/rtmpdump`` rtmp-timeout (float) Timeout for reading data from RTMP streams, default: ``60.0`` ffmpeg-ffmpeg (str) Specify the location of the ffmpeg executable use by Muxing streams e.g. ``/usr/local/bin/ffmpeg`` ffmpeg-verbose (bool) Log stderr from ffmpeg to the console ffmpeg-verbose-path (str) Specify the location of the ffmpeg stderr log file ffmpeg-video-transcode (str) The codec to use if transcoding video when muxing with ffmpeg e.g. ``h264`` ffmpeg-audio-transcode (str) The codec to use if transcoding audio when muxing with ffmpeg e.g. ``aac`` stream-segment-attempts (int) How many attempts should be done to download each segment, default: ``3``. General option used by streams not covered by other options. stream-segment-threads (int) The size of the thread pool used to download segments, default: ``1``. General option used by streams not covered by other options. stream-segment-timeout (float) Segment connect and read timeout, default: ``10.0``. General option used by streams not covered by other options. stream-timeout (float) Timeout for reading data from stream, default: ``60.0``. General option used by streams not covered by other options. locale (str) Locale setting, in the RFC 1766 format eg. en_US or es_ES default: ``system locale``. user-input-requester (UserInputRequester) instance of UserInputRequester to collect input from the user at runtime. Must be set before the plugins are loaded. default: ``UserInputRequester``. ======================== ========================================= """ # Backwards compatibility if key == "rtmpdump": key = "rtmp-rtmpdump" elif key == "rtmpdump-proxy": key = "rtmp-proxy" elif key == "errorlog": key = "subprocess-errorlog" elif key == "errorlog-path": key = "subprocess-errorlog-path" if key == "http-proxy": self.http.proxies["http"] = update_scheme("http://", value) elif key == "https-proxy": self.http.proxies["https"] = update_scheme("https://", value) elif key == "http-cookies": if isinstance(value, dict): self.http.cookies.update(value) else: self.http.parse_cookies(value) elif key == "http-headers": if isinstance(value, dict): self.http.headers.update(value) else: self.http.parse_headers(value) elif key == "http-query-params": if isinstance(value, dict): self.http.params.update(value) else: self.http.parse_query_params(value) elif key == "http-trust-env": self.http.trust_env = value elif key == "http-ssl-verify": self.http.verify = value elif key == "http-disable-dh": if value: requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':!DH' try: requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST = \ requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS.encode("ascii") except AttributeError: # no ssl to disable the cipher on pass elif key == "http-ssl-cert": self.http.cert = value elif key == "http-timeout": self.http.timeout = value else: self.options.set(key, value)
[ "def", "set_option", "(", "self", ",", "key", ",", "value", ")", ":", "# Backwards compatibility", "if", "key", "==", "\"rtmpdump\"", ":", "key", "=", "\"rtmp-rtmpdump\"", "elif", "key", "==", "\"rtmpdump-proxy\"", ":", "key", "=", "\"rtmp-proxy\"", "elif", "k...
44.368687
24.661616
def find(cls, device=None): """ Factory method that returns the requested :py:class:`USBDevice` device, or the first device. :param device: Tuple describing the USB device to open, as returned by find_all(). :type device: tuple :returns: :py:class:`USBDevice` object utilizing the specified device :raises: :py:class:`~alarmdecoder.util.NoDeviceError` """ if not have_pyftdi: raise ImportError('The USBDevice class has been disabled due to missing requirement: pyftdi or pyusb.') cls.find_all() if len(cls.__devices) == 0: raise NoDeviceError('No AD2USB devices present.') if device is None: device = cls.__devices[0] vendor, product, sernum, ifcount, description = device return USBDevice(interface=sernum, vid=vendor, pid=product)
[ "def", "find", "(", "cls", ",", "device", "=", "None", ")", ":", "if", "not", "have_pyftdi", ":", "raise", "ImportError", "(", "'The USBDevice class has been disabled due to missing requirement: pyftdi or pyusb.'", ")", "cls", ".", "find_all", "(", ")", "if", "len",...
34.038462
25.576923
def find_proc_date(header): """Search the HISTORY fields of a header looking for the FLIPS processing date. """ import string, re for h in header.ascardlist(): if h.key=="HISTORY": g=h.value if ( string.find(g,'FLIPS 1.0 -:') ): result=re.search('imred: FLIPS 1.0 - \S{3} (.*) - ([\s\d]\d:\d\d:\d\d)\s*$',g) if result: date=result.group(1) time=result.group(2) datetime=date+" "+time return datetime return None
[ "def", "find_proc_date", "(", "header", ")", ":", "import", "string", ",", "re", "for", "h", "in", "header", ".", "ascardlist", "(", ")", ":", "if", "h", ".", "key", "==", "\"HISTORY\"", ":", "g", "=", "h", ".", "value", "if", "(", "string", ".", ...
35.25
12.4375
def evaluate(args): """ %prog evaluate prediction.bed reality.bed fastafile Make a truth table like: True False --- Reality True TP FP False FN TN |----Prediction Sn = TP / (all true in reality) = TP / (TP + FN) Sp = TP / (all true in prediction) = TP / (TP + FP) Ac = (TP + TN) / (TP + FP + FN + TN) """ from jcvi.formats.sizes import Sizes p = OptionParser(evaluate.__doc__) p.add_option("--query", help="Chromosome location [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) prediction, reality, fastafile = args query = opts.query prediction = mergeBed(prediction) reality = mergeBed(reality) sizes = Sizes(fastafile) sizesfile = sizes.filename prediction_complement = complementBed(prediction, sizesfile) reality_complement = complementBed(reality, sizesfile) TPbed = intersectBed(prediction, reality) FPbed = intersectBed(prediction, reality_complement) FNbed = intersectBed(prediction_complement, reality) TNbed = intersectBed(prediction_complement, reality_complement) beds = (TPbed, FPbed, FNbed, TNbed) if query: subbeds = [] rr = query_to_range(query, sizes) ce = 'echo "{0}"'.format("\t".join(str(x) for x in rr)) for b in beds: subbed = ".".join((b, query)) cmd = ce + " | intersectBed -a stdin -b {0}".format(b) sh(cmd, outfile=subbed) subbeds.append(subbed) beds = subbeds be = BedEvaluate(*beds) print(be, file=sys.stderr) if query: for b in subbeds: os.remove(b) return be
[ "def", "evaluate", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "sizes", "import", "Sizes", "p", "=", "OptionParser", "(", "evaluate", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--query\"", ",", "help", "=", "\"Chromosome location ...
28.644068
17.830508
def _valid_folder(self, base, name): """Return whether a folder can be searched.""" valid = True fullpath = os.path.join(base, name) if ( not self.recursive or ( self.folder_exclude_check is not None and not self.compare_directory(fullpath[self._base_len:] if self.dir_pathname else name) ) ): valid = False if valid and (not self.show_hidden and util.is_hidden(fullpath)): valid = False return self.on_validate_directory(base, name) if valid else valid
[ "def", "_valid_folder", "(", "self", ",", "base", ",", "name", ")", ":", "valid", "=", "True", "fullpath", "=", "os", ".", "path", ".", "join", "(", "base", ",", "name", ")", "if", "(", "not", "self", ".", "recursive", "or", "(", "self", ".", "fo...
36.6875
22.4375
def k_weights_int(self): """ Returns ------- ndarray Geometric k-point weights (number of arms of k-star in BZ). dtype='intc' shape=(irreducible_kpoints,) """ nk = np.prod(self.k_mesh) _weights = self.k_weights * nk weights = np.rint(_weights).astype('intc') assert (np.abs(weights - _weights) < 1e-7 * nk).all() return np.array(weights, dtype='intc')
[ "def", "k_weights_int", "(", "self", ")", ":", "nk", "=", "np", ".", "prod", "(", "self", ".", "k_mesh", ")", "_weights", "=", "self", ".", "k_weights", "*", "nk", "weights", "=", "np", ".", "rint", "(", "_weights", ")", ".", "astype", "(", "'intc'...
30.266667
14.933333
def _handle_request_exception(request): """Raise the proper exception based on the response""" try: data = request.json() except: data = {} code = request.status_code if code == requests.codes.bad: raise BadRequestException(response=data) if code == requests.codes.unauthorized: raise UnauthorizedException(response=data) if code == requests.codes.not_found: raise NotFoundException(response=data) # Generic error fallback request.raise_for_status()
[ "def", "_handle_request_exception", "(", "request", ")", ":", "try", ":", "data", "=", "request", ".", "json", "(", ")", "except", ":", "data", "=", "{", "}", "code", "=", "request", ".", "status_code", "if", "code", "==", "requests", ".", "codes", "."...
29.842105
16.263158
def path_fraction_point(points, fraction): '''Computes the point which corresponds to the fraction of the path length along the piecewise linear curve which is constructed from the set of points. Args: points: an iterable of indexable objects with indices 0, 1, 2 correspoding to 3D cartesian coordinates fraction: path length fraction (0 <= fraction <= 1) Returns: The 3D coordinates of the aforementioned point ''' seg_id, offset = path_fraction_id_offset(points, fraction, relative_offset=True) return linear_interpolate(points[seg_id], points[seg_id + 1], offset)
[ "def", "path_fraction_point", "(", "points", ",", "fraction", ")", ":", "seg_id", ",", "offset", "=", "path_fraction_id_offset", "(", "points", ",", "fraction", ",", "relative_offset", "=", "True", ")", "return", "linear_interpolate", "(", "points", "[", "seg_id...
41.266667
24.2
def write_netrc(host, entity, key): """Add our host and key to .netrc""" if len(key) != 40: click.secho( 'API-key must be exactly 40 characters long: {} ({} chars)'.format(key, len(key))) return None try: normalized_host = host.split("/")[-1].split(":")[0] print("Appending key for %s to your netrc file: %s" % (normalized_host, os.path.expanduser('~/.netrc'))) machine_line = 'machine %s' % normalized_host path = os.path.expanduser('~/.netrc') orig_lines = None try: with open(path) as f: orig_lines = f.read().strip().split('\n') except (IOError, OSError) as e: pass with open(path, 'w') as f: if orig_lines: # delete this machine from the file if it's already there. skip = 0 for line in orig_lines: if machine_line in line: skip = 2 elif skip: skip -= 1 else: f.write('%s\n' % line) f.write(textwrap.dedent("""\ machine {host} login {entity} password {key} """).format(host=normalized_host, entity=entity, key=key)) os.chmod(os.path.expanduser('~/.netrc'), stat.S_IRUSR | stat.S_IWUSR) return True except IOError as e: click.secho("Unable to read ~/.netrc", fg="red") return None
[ "def", "write_netrc", "(", "host", ",", "entity", ",", "key", ")", ":", "if", "len", "(", "key", ")", "!=", "40", ":", "click", ".", "secho", "(", "'API-key must be exactly 40 characters long: {} ({} chars)'", ".", "format", "(", "key", ",", "len", "(", "k...
37.8
14.25
def dataset_walker(datasets): """Walk through *datasets* and their ancillary data. Yields datasets and their parent. """ for dataset in datasets: yield dataset, None for anc_ds in dataset.attrs.get('ancillary_variables', []): try: anc_ds.attrs yield anc_ds, dataset except AttributeError: continue
[ "def", "dataset_walker", "(", "datasets", ")", ":", "for", "dataset", "in", "datasets", ":", "yield", "dataset", ",", "None", "for", "anc_ds", "in", "dataset", ".", "attrs", ".", "get", "(", "'ancillary_variables'", ",", "[", "]", ")", ":", "try", ":", ...
30
12.846154
def setup(self, glbls): """ Sets up the resource manager as modular functions. :param glbls | <dict> """ if not self.pluginPath() in sys.path: log.debug(self.pluginPath()) sys.path.append(self.pluginPath()) glbls['find'] = self.find glbls['listdir'] = self.listdir glbls['load'] = self.load glbls['read'] = self.read glbls['exists'] = self.exists glbls['setdefault'] = self.setDefault glbls['basePath'] = self.basePath glbls['setBasePath'] = self.setBasePath glbls['walk'] = self.walk glbls['isdir'] = self.isdir glbls['isfile'] = self.isfile glbls['init'] = self.init # setup the build system if 'python' in sys.executable and not self.useFilepath(): self.build()
[ "def", "setup", "(", "self", ",", "glbls", ")", ":", "if", "not", "self", ".", "pluginPath", "(", ")", "in", "sys", ".", "path", ":", "log", ".", "debug", "(", "self", ".", "pluginPath", "(", ")", ")", "sys", ".", "path", ".", "append", "(", "s...
34
9.615385
def bounds(self): """The bounds of the random variable. Set `self.i=0.95` to return the 95% interval if this is used for setting bounds on optimizers/etc. where infinite bounds may not be useful. """ return [scipy.stats.lognorm.interval(self.i, s, loc=0, scale=em) for s, em in zip(self.sigma, self.emu)]
[ "def", "bounds", "(", "self", ")", ":", "return", "[", "scipy", ".", "stats", ".", "lognorm", ".", "interval", "(", "self", ".", "i", ",", "s", ",", "loc", "=", "0", ",", "scale", "=", "em", ")", "for", "s", ",", "em", "in", "zip", "(", "self...
49.571429
28.714286
def center_origin(self): """Sets the origin to the center of the image.""" self.set_origin(Vector2(self.image.get_width() / 2.0, self.image.get_height() / 2.0))
[ "def", "center_origin", "(", "self", ")", ":", "self", ".", "set_origin", "(", "Vector2", "(", "self", ".", "image", ".", "get_width", "(", ")", "/", "2.0", ",", "self", ".", "image", ".", "get_height", "(", ")", "/", "2.0", ")", ")" ]
58
23
def _get_input(self, length): """! @brief Extract requested amount of data from the read buffer.""" self._buffer_lock.acquire() try: if length == -1: actualLength = len(self._buffer) else: actualLength = min(length, len(self._buffer)) if actualLength: data = self._buffer[:actualLength] self._buffer = self._buffer[actualLength:] else: data = bytearray() return data finally: self._buffer_lock.release()
[ "def", "_get_input", "(", "self", ",", "length", ")", ":", "self", ".", "_buffer_lock", ".", "acquire", "(", ")", "try", ":", "if", "length", "==", "-", "1", ":", "actualLength", "=", "len", "(", "self", ".", "_buffer", ")", "else", ":", "actualLengt...
35.75
13.6875
def _pad(arr, n, dir='right'): """Pad an array with zeros along the first axis. Parameters ---------- n : int Size of the returned array in the first axis. dir : str Direction of the padding. Must be one 'left' or 'right'. """ assert dir in ('left', 'right') if n < 0: raise ValueError("'n' must be positive: {0}.".format(n)) elif n == 0: return np.zeros((0,) + arr.shape[1:], dtype=arr.dtype) n_arr = arr.shape[0] shape = (n,) + arr.shape[1:] if n_arr == n: assert arr.shape == shape return arr elif n_arr < n: out = np.zeros(shape, dtype=arr.dtype) if dir == 'left': out[-n_arr:, ...] = arr elif dir == 'right': out[:n_arr, ...] = arr assert out.shape == shape return out else: if dir == 'left': out = arr[-n:, ...] elif dir == 'right': out = arr[:n, ...] assert out.shape == shape return out
[ "def", "_pad", "(", "arr", ",", "n", ",", "dir", "=", "'right'", ")", ":", "assert", "dir", "in", "(", "'left'", ",", "'right'", ")", "if", "n", "<", "0", ":", "raise", "ValueError", "(", "\"'n' must be positive: {0}.\"", ".", "format", "(", "n", ")"...
26.621622
17.621622
def accept(self): """ This is the other part of the shutdown() workaround. Since servers create new sockets, we have to infect them with our magic. :) """ c, a = self.__dict__["conn"].accept() return (SSLWrapper(c), a)
[ "def", "accept", "(", "self", ")", ":", "c", ",", "a", "=", "self", ".", "__dict__", "[", "\"conn\"", "]", ".", "accept", "(", ")", "return", "(", "SSLWrapper", "(", "c", ")", ",", "a", ")" ]
33.375
10.375
def beginlock(self, container): "Start to acquire lock in another routine. Call trylock or lock later to acquire the lock. Call unlock to cancel the lock routine" if self.locked: return True if self.lockroutine: return False self.lockroutine = container.subroutine(self._lockroutine(container), False) return self.locked
[ "def", "beginlock", "(", "self", ",", "container", ")", ":", "if", "self", ".", "locked", ":", "return", "True", "if", "self", ".", "lockroutine", ":", "return", "False", "self", ".", "lockroutine", "=", "container", ".", "subroutine", "(", "self", ".", ...
47.125
28.375
def get_composition_query_session_for_repository(self, repository_id): """Gets a composition query session for the given repository. arg: repository_id (osid.id.Id): the ``Id`` of the repository return: (osid.repository.CompositionQuerySession) - a ``CompositionQuerySession`` raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_composition_query()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_composition_query()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_composition_query(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.CompositionQuerySession(repository_id, runtime=self._runtime)
[ "def", "get_composition_query_session_for_repository", "(", "self", ",", "repository_id", ")", ":", "if", "not", "self", ".", "supports_composition_query", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "##", "# Also include check to see if the catalo...
48.826087
21.434783
def load_app(self, app): """ Tries to load an initial data class for a specified app. If the specified file does not exist, an error will be raised. If the class does exist, but it isn't a subclass of `BaseInitialData` then None will be returned. :param app: The name of the app in which to load the initial data class. This should be the same path as defined in settings.INSTALLED_APPS :type app: str :return: A subclass instance of BaseInitialData or None :rtype: BaseInitialData or None """ if self.loaded_apps.get(app): return self.loaded_apps.get(app) self.loaded_apps[app] = None initial_data_class = import_string(self.get_class_path(app)) if issubclass(initial_data_class, BaseInitialData): self.log('Loaded app {0}'.format(app)) self.loaded_apps[app] = initial_data_class return self.loaded_apps[app]
[ "def", "load_app", "(", "self", ",", "app", ")", ":", "if", "self", ".", "loaded_apps", ".", "get", "(", "app", ")", ":", "return", "self", ".", "loaded_apps", ".", "get", "(", "app", ")", "self", ".", "loaded_apps", "[", "app", "]", "=", "None", ...
45.285714
20.52381
def _unquote(self, value): """Return an unquoted version of a value""" if not value: # should only happen during parsing of lists raise SyntaxError if (value[0] == value[-1]) and (value[0] in ('"', "'")): value = value[1:-1] return value
[ "def", "_unquote", "(", "self", ",", "value", ")", ":", "if", "not", "value", ":", "# should only happen during parsing of lists", "raise", "SyntaxError", "if", "(", "value", "[", "0", "]", "==", "value", "[", "-", "1", "]", ")", "and", "(", "value", "["...
37.25
14.125
def hxbyterle_decode(output_size, data): """Decode HxRLE data stream If C-extension is not compiled it will use a (slower) Python equivalent :param int output_size: the number of items when ``data`` is uncompressed :param str data: a raw stream of data to be unpacked :return numpy.array output: an array of ``numpy.uint8`` """ output = byterle_decoder(data, output_size) assert len(output) == output_size return output
[ "def", "hxbyterle_decode", "(", "output_size", ",", "data", ")", ":", "output", "=", "byterle_decoder", "(", "data", ",", "output_size", ")", "assert", "len", "(", "output", ")", "==", "output_size", "return", "output" ]
38.25
17.583333
def get_SCAT(points, low_bound, high_bound, x_max, y_max): """ runs SCAT test and returns boolean """ # iterate through all relevant points and see if any of them fall outside of your SCAT box SCAT = True for point in points: result = in_SCAT_box(point[0], point[1], low_bound, high_bound, x_max, y_max) if result == False: # print "SCAT TEST FAILED" SCAT = False return SCAT
[ "def", "get_SCAT", "(", "points", ",", "low_bound", ",", "high_bound", ",", "x_max", ",", "y_max", ")", ":", "# iterate through all relevant points and see if any of them fall outside of your SCAT box", "SCAT", "=", "True", "for", "point", "in", "points", ":", "result",...
35.916667
18.083333
def tensor_components_to_use(mrr, mtt, mpp, mrt, mrp, mtp): ''' Converts components to Up, South, East definition:: USE = [[mrr, mrt, mrp], [mtt, mtt, mtp], [mrp, mtp, mpp]] ''' return np.array([[mrr, mrt, mrp], [mrt, mtt, mtp], [mrp, mtp, mpp]])
[ "def", "tensor_components_to_use", "(", "mrr", ",", "mtt", ",", "mpp", ",", "mrt", ",", "mrp", ",", "mtp", ")", ":", "return", "np", ".", "array", "(", "[", "[", "mrr", ",", "mrt", ",", "mrp", "]", ",", "[", "mrt", ",", "mtt", ",", "mtp", "]", ...
31.555556
23.111111
def restore_configuration_files(self): """ restore a previously saved postgresql.conf """ try: for f in self._configuration_to_save: config_file = os.path.join(self._config_dir, f) backup_file = os.path.join(self._data_dir, f + '.backup') if not os.path.isfile(config_file): if os.path.isfile(backup_file): shutil.copy(backup_file, config_file) # Previously we didn't backup pg_ident.conf, if file is missing just create empty elif f == 'pg_ident.conf': open(config_file, 'w').close() except IOError: logger.exception('unable to restore configuration files from backup')
[ "def", "restore_configuration_files", "(", "self", ")", ":", "try", ":", "for", "f", "in", "self", ".", "_configuration_to_save", ":", "config_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_config_dir", ",", "f", ")", "backup_file", "=", ...
54.357143
19.785714
def exception_wrapper(f): """Decorator to convert dbus exception to pympris exception.""" @wraps(f) def wrapper(*args, **kwds): try: return f(*args, **kwds) except dbus.exceptions.DBusException as err: _args = err.args raise PyMPRISException(*_args) return wrapper
[ "def", "exception_wrapper", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "try", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kwds", ")", "except", "dbus", ".", ...
32.3
13.2
def success(self, buf, newline=True): """ Same as `write`, but adds success coloring if enabled. `buf` Data buffer to write. `newline` Append newline character to buffer before writing. """ if self._colored: buf = self.ESCAPE_GREEN + buf + self.ESCAPE_CLEAR self.write(buf, newline)
[ "def", "success", "(", "self", ",", "buf", ",", "newline", "=", "True", ")", ":", "if", "self", ".", "_colored", ":", "buf", "=", "self", ".", "ESCAPE_GREEN", "+", "buf", "+", "self", ".", "ESCAPE_CLEAR", "self", ".", "write", "(", "buf", ",", "new...
29
18.307692
def fit(self, X, chunks): """Learn the RCA model. Parameters ---------- data : (n x d) data matrix Each row corresponds to a single instance chunks : (n,) array of ints When ``chunks[i] == -1``, point i doesn't belong to any chunklet. When ``chunks[i] == j``, point i belongs to chunklet j. """ X = self._prepare_inputs(X, ensure_min_samples=2) # PCA projection to remove noise and redundant information. if self.pca_comps is not None: pca = decomposition.PCA(n_components=self.pca_comps) X_t = pca.fit_transform(X) M_pca = pca.components_ else: X_t = X - X.mean(axis=0) M_pca = None chunks = np.asanyarray(chunks, dtype=int) chunk_mask, chunked_data = _chunk_mean_centering(X_t, chunks) inner_cov = np.atleast_2d(np.cov(chunked_data, rowvar=0, bias=1)) dim = self._check_dimension(np.linalg.matrix_rank(inner_cov), X_t) # Fisher Linear Discriminant projection if dim < X_t.shape[1]: total_cov = np.cov(X_t[chunk_mask], rowvar=0) tmp = np.linalg.lstsq(total_cov, inner_cov)[0] vals, vecs = np.linalg.eig(tmp) inds = np.argsort(vals)[:dim] A = vecs[:, inds] inner_cov = np.atleast_2d(A.T.dot(inner_cov).dot(A)) self.transformer_ = _inv_sqrtm(inner_cov).dot(A.T) else: self.transformer_ = _inv_sqrtm(inner_cov).T if M_pca is not None: self.transformer_ = np.atleast_2d(self.transformer_.dot(M_pca)) return self
[ "def", "fit", "(", "self", ",", "X", ",", "chunks", ")", ":", "X", "=", "self", ".", "_prepare_inputs", "(", "X", ",", "ensure_min_samples", "=", "2", ")", "# PCA projection to remove noise and redundant information.", "if", "self", ".", "pca_comps", "is", "no...
33.136364
19.818182
def doubleclickrowindex(self, window_name, object_name, row_index, col_index=0): """ Double click row matching given text @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param row_index: Row index to click @type row_index: integer @param col_index: Column index to click @type col_index: integer @return: row index matching the text on success. @rtype: integer """ object_handle = self._get_object_handle(window_name, object_name) if not object_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % object_name) count = len(object_handle.AXRows) if row_index < 0 or row_index > count: raise LdtpServerException('Row index out of range: %d' % row_index) cell = object_handle.AXRows[row_index] self._grabfocus(cell) x, y, width, height = self._getobjectsize(cell) # Mouse double click on the object cell.doubleClickMouse((x + width / 2, y + height / 2)) return 1
[ "def", "doubleclickrowindex", "(", "self", ",", "window_name", ",", "object_name", ",", "row_index", ",", "col_index", "=", "0", ")", ":", "object_handle", "=", "self", ".", "_get_object_handle", "(", "window_name", ",", "object_name", ")", "if", "not", "objec...
41.483871
16.451613
def tokenize(self, docs): """ The first pass consists of converting documents into "transactions" (sets of their tokens) and the initial frequency/support filtering. Then iterate until we close in on a final set. `docs` can be any iterator or generator so long as it yields lists. Each list represents a document (i.e. is a list of tokens). For example, it can be a list of lists of nouns and noun phrases if trying to identify aspects, where each list represents a sentence or document. `min_sup` defines the minimum frequency (as a ratio over the total) necessary to keep a candidate. """ if self.min_sup < 1/len(docs): raise Exception('`min_sup` must be greater than or equal to `1/len(docs)`.') # First pass candidates = set() transactions = [] # Use nouns and noun phrases. for doc in POSTokenizer().tokenize(docs): transaction = set(doc) candidates = candidates.union({(t,) for t in transaction}) transactions.append(transaction) freq_set = filter_support(candidates, transactions, self.min_sup) # Iterate k = 2 last_set = set() while freq_set != set(): last_set = freq_set cands = generate_candidates(freq_set, k) freq_set = filter_support(cands, transactions, self.min_sup) k += 1 # Map documents to their keywords. keywords = flatten(last_set) return prune([[kw for kw in keywords if kw in doc] for doc in docs])
[ "def", "tokenize", "(", "self", ",", "docs", ")", ":", "if", "self", ".", "min_sup", "<", "1", "/", "len", "(", "docs", ")", ":", "raise", "Exception", "(", "'`min_sup` must be greater than or equal to `1/len(docs)`.'", ")", "# First pass", "candidates", "=", ...
37.880952
22.071429
def factory_chat(js_obj, driver=None): """Factory function for creating appropriate object given selenium JS object""" if js_obj["kind"] not in ["chat", "group", "broadcast"]: raise AssertionError("Expected chat, group or broadcast object, got {0}".format(js_obj["kind"])) if js_obj["isGroup"]: return GroupChat(js_obj, driver) if js_obj["kind"] == "broadcast": return BroadcastChat(js_obj, driver) return UserChat(js_obj, driver)
[ "def", "factory_chat", "(", "js_obj", ",", "driver", "=", "None", ")", ":", "if", "js_obj", "[", "\"kind\"", "]", "not", "in", "[", "\"chat\"", ",", "\"group\"", ",", "\"broadcast\"", "]", ":", "raise", "AssertionError", "(", "\"Expected chat, group or broadca...
38.833333
19.416667
def copy_groups_to_folder(dicom_groups, folder_path, groupby_field_name): """Copy the DICOM file groups to folder_path. Each group will be copied into a subfolder with named given by groupby_field. Parameters ---------- dicom_groups: boyle.dicom.sets.DicomFileSet folder_path: str Path to where copy the DICOM files. groupby_field_name: str DICOM field name. Will get the value of this field to name the group folder. """ if dicom_groups is None or not dicom_groups: raise ValueError('Expected a boyle.dicom.sets.DicomFileSet.') if not os.path.exists(folder_path): os.makedirs(folder_path, exist_ok=False) for dcmg in dicom_groups: if groupby_field_name is not None and len(groupby_field_name) > 0: dfile = DicomFile(dcmg) dir_name = '' for att in groupby_field_name: dir_name = os.path.join(dir_name, dfile.get_attributes(att)) dir_name = str(dir_name) else: dir_name = os.path.basename(dcmg) group_folder = os.path.join(folder_path, dir_name) os.makedirs(group_folder, exist_ok=False) log.debug('Copying files to {}.'.format(group_folder)) import shutil dcm_files = dicom_groups[dcmg] for srcf in dcm_files: destf = os.path.join(group_folder, os.path.basename(srcf)) while os.path.exists(destf): destf += '+' shutil.copy2(srcf, destf)
[ "def", "copy_groups_to_folder", "(", "dicom_groups", ",", "folder_path", ",", "groupby_field_name", ")", ":", "if", "dicom_groups", "is", "None", "or", "not", "dicom_groups", ":", "raise", "ValueError", "(", "'Expected a boyle.dicom.sets.DicomFileSet.'", ")", "if", "n...
34.162791
19.209302
def set_objective(self, objective, extraobjexpr=None): """Set or change the objective function of the polynomial optimization problem. :param objective: Describes the objective function. :type objective: :class:`sympy.core.expr.Expr` :param extraobjexpr: Optional parameter of a string expression of a linear combination of moment matrix elements to be included in the objective function :type extraobjexpr: str. """ if objective is not None and self.matrix_var_dim is not None: facvar = self.__get_trace_facvar(objective) self.obj_facvar = facvar[1:] self.constant_term = facvar[0] if self.verbose > 0 and facvar[0] != 0: print("Warning: The objective function has a non-zero %s " "constant term. It is not included in the SDP objective." % facvar[0]) else: super(SteeringHierarchy, self).\ set_objective(objective, extraobjexpr=extraobjexpr)
[ "def", "set_objective", "(", "self", ",", "objective", ",", "extraobjexpr", "=", "None", ")", ":", "if", "objective", "is", "not", "None", "and", "self", ".", "matrix_var_dim", "is", "not", "None", ":", "facvar", "=", "self", ".", "__get_trace_facvar", "("...
49.409091
18.545455
def query_remote_ref(self, remote, ref): """Query remote repo about given ref. :return: ``('tag', sha)`` if ref is a tag in remote ``('branch', sha)`` if ref is branch (aka "head") in remote ``(None, ref)`` if ref does not exist in remote. This happens notably if ref if a commit sha (they can't be queried) """ out = self.log_call(['git', 'ls-remote', remote, ref], cwd=self.cwd, callwith=subprocess.check_output).strip() for sha, fullref in (l.split() for l in out.splitlines()): if fullref == 'refs/heads/' + ref: return 'branch', sha elif fullref == 'refs/tags/' + ref: return 'tag', sha elif fullref == ref and ref == 'HEAD': return 'HEAD', sha return None, ref
[ "def", "query_remote_ref", "(", "self", ",", "remote", ",", "ref", ")", ":", "out", "=", "self", ".", "log_call", "(", "[", "'git'", ",", "'ls-remote'", ",", "remote", ",", "ref", "]", ",", "cwd", "=", "self", ".", "cwd", ",", "callwith", "=", "sub...
49.333333
14.333333
def jdn_to_gdate(jdn): """ Convert from the Julian day to the Gregorian day. Algorithm from 'Julian and Gregorian Day Numbers' by Peter Meyer. Return: day, month, year """ # pylint: disable=invalid-name # The algorithm is a verbatim copy from Peter Meyer's article # No explanation in the article is given for the variables # Hence the exceptions for pylint and for flake8 (E741) l = jdn + 68569 # noqa: E741 n = (4 * l) // 146097 l = l - (146097 * n + 3) // 4 # noqa: E741 i = (4000 * (l + 1)) // 1461001 # that's 1,461,001 l = l - (1461 * i) // 4 + 31 # noqa: E741 j = (80 * l) // 2447 day = l - (2447 * j) // 80 l = j // 11 # noqa: E741 month = j + 2 - (12 * l) year = 100 * (n - 49) + i + l # that's a lower-case L return datetime.date(year, month, day)
[ "def", "jdn_to_gdate", "(", "jdn", ")", ":", "# pylint: disable=invalid-name", "# The algorithm is a verbatim copy from Peter Meyer's article", "# No explanation in the article is given for the variables", "# Hence the exceptions for pylint and for flake8 (E741)", "l", "=", "jdn", "+", "...
32.92
16.92
def p_suffix(self, length=None, elipsis=False): "Return the rest of the input" if length is not None: result = self.input[self.pos:self.pos + length] if elipsis and len(result) == length: result += "..." return result return self.input[self.pos:]
[ "def", "p_suffix", "(", "self", ",", "length", "=", "None", ",", "elipsis", "=", "False", ")", ":", "if", "length", "is", "not", "None", ":", "result", "=", "self", ".", "input", "[", "self", ".", "pos", ":", "self", ".", "pos", "+", "length", "]...
39.375
9.375
def name(self): """ Application name. It's used as a process name. """ try: return self.config_parser.get('application', 'name') except CONFIGPARSER_EXC: return super(IniConfig, self).name
[ "def", "name", "(", "self", ")", ":", "try", ":", "return", "self", ".", "config_parser", ".", "get", "(", "'application'", ",", "'name'", ")", "except", "CONFIGPARSER_EXC", ":", "return", "super", "(", "IniConfig", ",", "self", ")", ".", "name" ]
30.625
13.125
def com_adobe_fonts_check_cff_call_depth(ttFont): """Is the CFF subr/gsubr call depth > 10?""" any_failures = False cff = ttFont['CFF '].cff for top_dict in cff.topDictIndex: if hasattr(top_dict, 'FDArray'): for fd_index, font_dict in enumerate(top_dict.FDArray): if hasattr(font_dict, 'Private'): private_dict = font_dict.Private else: private_dict = None failed = yield from \ _check_call_depth(top_dict, private_dict, fd_index) any_failures = any_failures or failed else: if hasattr(top_dict, 'Private'): private_dict = top_dict.Private else: private_dict = None failed = yield from _check_call_depth(top_dict, private_dict) any_failures = any_failures or failed if not any_failures: yield PASS, 'Maximum call depth not exceeded.'
[ "def", "com_adobe_fonts_check_cff_call_depth", "(", "ttFont", ")", ":", "any_failures", "=", "False", "cff", "=", "ttFont", "[", "'CFF '", "]", ".", "cff", "for", "top_dict", "in", "cff", ".", "topDictIndex", ":", "if", "hasattr", "(", "top_dict", ",", "'FDA...
38.84
14.92
def validateEmail(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None): """Raises ValidationException if value is not an email address. Returns the value argument. * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateEmail('al@inventwithpython.com') 'al@inventwithpython.com' >>> pysv.validateEmail('alinventwithpython.com') Traceback (most recent call last): ... pysimplevalidate.ValidationException: 'alinventwithpython.com' is not a valid email address. """ # Reuse the logic in validateRegex() try: result = validateRegex(value=value, regex=EMAIL_REGEX, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) if result is not None: return result except ValidationException: _raiseValidationException(_('%r is not a valid email address.') % (value), excMsg)
[ "def", "validateEmail", "(", "value", ",", "blank", "=", "False", ",", "strip", "=", "None", ",", "allowlistRegexes", "=", "None", ",", "blocklistRegexes", "=", "None", ",", "excMsg", "=", "None", ")", ":", "# Reuse the logic in validateRegex()", "try", ":", ...
58.555556
36.407407