text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def to_json(self): """ Returns the JSON Representation of the content type field validation. """ result = {} for k, v in self._data.items(): result[camel_case(k)] = v return result
[ "def", "to_json", "(", "self", ")", ":", "result", "=", "{", "}", "for", "k", ",", "v", "in", "self", ".", "_data", ".", "items", "(", ")", ":", "result", "[", "camel_case", "(", "k", ")", "]", "=", "v", "return", "result" ]
25.888889
15.888889
def setPageSizeOptions( self, options ): """ Sets the options that will be displayed for this default size. :param options | [<str>,. ..] """ self._pageSizeCombo.blockSignals(True) self._pageSizeCombo.addItems(options) ssize = nativestring(self.pageSize()) if ( ssize == '0' ): ssize = '' index = self._pageSizeCombo.findText() self._pageSizeCombo.setCurrentIndex(index) self._pageSizeCombo.blockSignals(False)
[ "def", "setPageSizeOptions", "(", "self", ",", "options", ")", ":", "self", ".", "_pageSizeCombo", ".", "blockSignals", "(", "True", ")", "self", ".", "_pageSizeCombo", ".", "addItems", "(", "options", ")", "ssize", "=", "nativestring", "(", "self", ".", "...
33.875
12.5
def next(self, match, predicate=None, index=None): """ Retrieves the nearest next matches. :param match: :type match: :param predicate: :type predicate: :param index: :type index: int :return: :rtype: """ current = match.start + 1 while current <= self._max_end: next_matches = self.starting(current) if next_matches: return filter_index(next_matches, predicate, index) current += 1 return filter_index(_BaseMatches._base(), predicate, index)
[ "def", "next", "(", "self", ",", "match", ",", "predicate", "=", "None", ",", "index", "=", "None", ")", ":", "current", "=", "match", ".", "start", "+", "1", "while", "current", "<=", "self", ".", "_max_end", ":", "next_matches", "=", "self", ".", ...
30.947368
14
def editCell(self, vcolidx=None, rowidx=None, **kwargs): 'Call `editText` at its place on the screen. Returns the new value, properly typed' if vcolidx is None: vcolidx = self.cursorVisibleColIndex x, w = self.visibleColLayout.get(vcolidx, (0, 0)) col = self.visibleCols[vcolidx] if rowidx is None: rowidx = self.cursorRowIndex if rowidx < 0: # header y = 0 currentValue = col.name else: y = self.rowLayout.get(rowidx, 0) currentValue = col.getDisplayValue(self.rows[self.cursorRowIndex]) editargs = dict(value=currentValue, fillchar=options.disp_edit_fill, truncchar=options.disp_truncator) editargs.update(kwargs) # update with user-specified args r = self.vd.editText(y, x, w, **editargs) if rowidx >= 0: # if not header r = col.type(r) # convert input to column type, let exceptions be raised return r
[ "def", "editCell", "(", "self", ",", "vcolidx", "=", "None", ",", "rowidx", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "vcolidx", "is", "None", ":", "vcolidx", "=", "self", ".", "cursorVisibleColIndex", "x", ",", "w", "=", "self", ".", "...
39.115385
20.269231
def translate_text( self, contents, target_language_code, mime_type=None, source_language_code=None, parent=None, model=None, glossary_config=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Translates input text and returns translated text. Example: >>> from google.cloud import translate_v3beta1 >>> >>> client = translate_v3beta1.TranslationServiceClient() >>> >>> # TODO: Initialize `contents`: >>> contents = [] >>> >>> # TODO: Initialize `target_language_code`: >>> target_language_code = '' >>> >>> response = client.translate_text(contents, target_language_code) Args: contents (list[str]): Required. The content of the input in string format. We recommend the total contents to be less than 30k codepoints. Please use BatchTranslateText for larger text. target_language_code (str): Required. The BCP-47 language code to use for translation of the input text, set to one of the language codes listed in Language Support. mime_type (str): Optional. The format of the source text, for example, "text/html", "text/plain". If left blank, the MIME type is assumed to be "text/html". source_language_code (str): Optional. The BCP-47 language code of the input text if known, for example, "en-US" or "sr-Latn". Supported language codes are listed in Language Support. If the source language isn't specified, the API attempts to identify the source language automatically and returns the the source language within the response. parent (str): Optional. Only used when making regionalized call. Format: projects/{project-id}/locations/{location-id}. Only custom model/glossary within the same location-id can be used. Otherwise 400 is returned. model (str): Optional. The ``model`` type requested for this translation. The format depends on model type: 1. Custom models: projects/{project-id}/locations/{location-id}/models/{model-id}. 2. General (built-in) models: projects/{project-id}/locations/{location-id}/models/general/nmt projects/{project-id}/locations/{location-id}/models/general/base For global (non-regionalized) requests, use {location-id} 'global'. For example, projects/{project-id}/locations/global/models/general/nmt If missing, the system decides which google base model to use. glossary_config (Union[dict, ~google.cloud.translate_v3beta1.types.TranslateTextGlossaryConfig]): Optional. Glossary to be applied. The glossary needs to be in the same region as the model, otherwise an INVALID\_ARGUMENT error is returned. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.translate_v3beta1.types.TranslateTextGlossaryConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.translate_v3beta1.types.TranslateTextResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "translate_text" not in self._inner_api_calls: self._inner_api_calls[ "translate_text" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.translate_text, default_retry=self._method_configs["TranslateText"].retry, default_timeout=self._method_configs["TranslateText"].timeout, client_info=self._client_info, ) request = translation_service_pb2.TranslateTextRequest( contents=contents, target_language_code=target_language_code, mime_type=mime_type, source_language_code=source_language_code, parent=parent, model=model, glossary_config=glossary_config, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["translate_text"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "translate_text", "(", "self", ",", "contents", ",", "target_language_code", ",", "mime_type", "=", "None", ",", "source_language_code", "=", "None", ",", "parent", "=", "None", ",", "model", "=", "None", ",", "glossary_config", "=", "None", ",", "ret...
46.5
27.434426
def get_attr_text(self): """Get html attr text to render in template""" return ' '.join([ '{}="{}"'.format(key, value) for key, value in self.attr.items() ])
[ "def", "get_attr_text", "(", "self", ")", ":", "return", "' '", ".", "join", "(", "[", "'{}=\"{}\"'", ".", "format", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "self", ".", "attr", ".", "items", "(", ")", "]", ")" ]
33.333333
11.333333
def dict_stack(dict_list, key_prefix=''): r""" stacks values from two dicts into a new dict where the values are list of the input values. the keys are the same. DEPRICATE in favor of dict_stack2 Args: dict_list (list): list of dicts with similar keys Returns: dict dict_stacked CommandLine: python -m utool.util_dict --test-dict_stack python -m utool.util_dict --test-dict_stack:1 Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> dict1_ = {'a': 1, 'b': 2} >>> dict2_ = {'a': 2, 'b': 3, 'c': 4} >>> dict_stacked = dict_stack([dict1_, dict2_]) >>> result = ut.repr2(dict_stacked, sorted_=True) >>> print(result) {'a': [1, 2], 'b': [2, 3], 'c': [4]} Example1: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> # Get equivalent behavior with dict_stack2? >>> # Almost, as long as None is not part of the list >>> dict1_ = {'a': 1, 'b': 2} >>> dict2_ = {'a': 2, 'b': 3, 'c': 4} >>> dict_stacked_ = dict_stack2([dict1_, dict2_]) >>> dict_stacked = {key: ut.filter_Nones(val) for key, val in dict_stacked_.items()} >>> result = ut.repr2(dict_stacked, sorted_=True) >>> print(result) {'a': [1, 2], 'b': [2, 3], 'c': [4]} """ dict_stacked_ = defaultdict(list) for dict_ in dict_list: for key, val in six.iteritems(dict_): dict_stacked_[key_prefix + key].append(val) dict_stacked = dict(dict_stacked_) return dict_stacked
[ "def", "dict_stack", "(", "dict_list", ",", "key_prefix", "=", "''", ")", ":", "dict_stacked_", "=", "defaultdict", "(", "list", ")", "for", "dict_", "in", "dict_list", ":", "for", "key", ",", "val", "in", "six", ".", "iteritems", "(", "dict_", ")", ":...
34.104167
16.75
def plot(self, win=None, newfig=True, figsize=None, orientation='hor', topfigfrac=0.8): """Plot layout Parameters ---------- win : list or tuple [x1, x2, y1, y2] """ if newfig: plt.figure(figsize=figsize) ax1 = None ax2 = None if orientation == 'both': ax1 = plt.axes([0.125, 0.18 + (1 - topfigfrac) * 0.7, (0.9 - 0.125), topfigfrac * 0.7]) ax2 = plt.axes([0.125, 0.11, (0.9 - 0.125), (1 - topfigfrac) * 0.7], sharex=ax1) elif orientation[:3] == 'hor': ax1 = plt.subplot() elif orientation[:3] == 'ver': ax2 = plt.subplot() else: if orientation == 'both': fig = plt.gcf() ax1 = fig.axes[0] ax2 = fig.axes[1] elif orientation[:3] == 'hor': fig = plt.gcf() ax1 = fig.axes[0] ax2 = None elif orientation[:3] == 'ver': fig = plt.gcf() ax1 = None ax2 = fig.axes[0] if ax1 is not None: plt.sca(ax1) for e in self.elementlist: e.plot() if orientation[:3] == 'hor': plt.axis('scaled') elif orientation == 'both': plt.axis('equal') # cannot be 'scaled' when sharing axes if win is not None: plt.axis(win) if ax2 is not None: plt.sca(ax2) for i in range(self.aq.nlayers): if self.aq.ltype[i] == 'l': plt.axhspan(ymin=self.aq.z[i + 1], ymax=self.aq.z[i], color=[0.8, 0.8, 0.8]) for i in range(1, self.aq.nlayers): if self.aq.ltype[i] == 'a' and self.aq.ltype[i - 1] == 'a': plt.axhspan(ymin=self.aq.z[i], ymax=self.aq.z[i], color=[0.8, 0.8, 0.8])
[ "def", "plot", "(", "self", ",", "win", "=", "None", ",", "newfig", "=", "True", ",", "figsize", "=", "None", ",", "orientation", "=", "'hor'", ",", "topfigfrac", "=", "0.8", ")", ":", "if", "newfig", ":", "plt", ".", "figure", "(", "figsize", "=",...
36.830189
16.018868
def remove_folder(self, folder, recurse=False, force=False, **kwargs): """ :param folder: Full path to the folder to remove :type folder: string :param recurse: If True, recursively remove all objects and subfolders in the folder :type recurse: bool :param force: If True, will suppress errors for folders that do not exist :type force: bool Removes the specified folder from the project or container. It must be empty to be removed, unless *recurse* is True. Removal propagates to any hidden objects that become unreachable from any visible object in the same project or container as a result of this operation. (This can only happen if *recurse* is True.) """ api_method = dxpy.api.container_remove_folder if isinstance(self, DXProject): api_method = dxpy.api.project_remove_folder completed = False while not completed: resp = api_method(self._dxid, {"folder": folder, "recurse": recurse, "force": force, "partial": True}, always_retry=force, # api call is idempotent under 'force' semantics **kwargs) if 'completed' not in resp: raise DXError('Error removing folder') completed = resp['completed']
[ "def", "remove_folder", "(", "self", ",", "folder", ",", "recurse", "=", "False", ",", "force", "=", "False", ",", "*", "*", "kwargs", ")", ":", "api_method", "=", "dxpy", ".", "api", ".", "container_remove_folder", "if", "isinstance", "(", "self", ",", ...
44.322581
22.709677
def derivativeZ(self,x,y,z): ''' Evaluate the first derivative with respect to z of the function at given state space points. Parameters ---------- x : np.array First input values. y : np.array Second input values; should be of same shape as x. z : np.array Third input values; should be of same shape as x. Returns ------- dfdz_out : np.array First derivative of function with respect to the third input, evaluated at (x,y,z), of same shape as inputs. ''' xShift = self.lowerBound(y) dfdz_out = self.func.derivativeZ(x-xShift,y,z) return dfdz_out
[ "def", "derivativeZ", "(", "self", ",", "x", ",", "y", ",", "z", ")", ":", "xShift", "=", "self", ".", "lowerBound", "(", "y", ")", "dfdz_out", "=", "self", ".", "func", ".", "derivativeZ", "(", "x", "-", "xShift", ",", "y", ",", "z", ")", "ret...
30.869565
22.173913
def to_full_path(cls, path): """ :return: string with a full repository-relative path which can be used to initialize a Reference instance, for instance by using ``Reference.from_path``""" if isinstance(path, SymbolicReference): path = path.path full_ref_path = path if not cls._common_path_default: return full_ref_path if not path.startswith(cls._common_path_default + "/"): full_ref_path = '%s/%s' % (cls._common_path_default, path) return full_ref_path
[ "def", "to_full_path", "(", "cls", ",", "path", ")", ":", "if", "isinstance", "(", "path", ",", "SymbolicReference", ")", ":", "path", "=", "path", ".", "path", "full_ref_path", "=", "path", "if", "not", "cls", ".", "_common_path_default", ":", "return", ...
45.75
14
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) C = self.COEFFS[imt] mean = self._compute_mean(C, rup.mag, dists.rrup) if isinstance(imt, SA) or isinstance(imt, PGA): # Convert from m/s**2 to g mean = mean - np.log(g) elif isinstance(imt, PGV): # Convert from m/s to cm/s mean = mean + np.log(100.0) stddevs = self._get_stddevs(C, stddev_types, rup.mag, dists.rrup.shape[0]) return mean, stddevs
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "assert", "all", "(", "stddev_type", "in", "self", ".", "DEFINED_FOR_STANDARD_DEVIATION_TYPES", "for", "stddev_type", "in", "stddev_typ...
41.05
15.55
def scatterAlign(seq1, seq2, window=7): """ Visually align two sequences. """ d1 = defaultdict(list) d2 = defaultdict(list) for (seq, section_dict) in [(seq1, d1), (seq2, d2)]: for i in range(len(seq) - window): section = seq[i:i + window] section_dict[section].append(i) matches = set(d1).intersection(d2) print('%i unique matches' % len(matches)) x = [] y = [] for section in matches: for i in d1[section]: for j in d2[section]: x.append(i) y.append(j) # plt.cla() # clear any prior graph plt.gray() plt.scatter(x, y) plt.xlim(0, len(seq1) - window) plt.ylim(0, len(seq2) - window) plt.xlabel('length %i bp' % (len(seq1))) plt.ylabel('length %i bp' % (len(seq2))) plt.title('Dot plot using window size %i\n(allowing no mis-matches)' % window) plt.show()
[ "def", "scatterAlign", "(", "seq1", ",", "seq2", ",", "window", "=", "7", ")", ":", "d1", "=", "defaultdict", "(", "list", ")", "d2", "=", "defaultdict", "(", "list", ")", "for", "(", "seq", ",", "section_dict", ")", "in", "[", "(", "seq1", ",", ...
31.241379
11.172414
def get_boto_client( client, region=None, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, endpoint_url=None ): """Get a boto3 client connection.""" cache_key = '{0}:{1}:{2}:{3}'.format( client, region, aws_access_key_id, endpoint_url or '' ) if not aws_session_token: if cache_key in CLIENT_CACHE: return CLIENT_CACHE[cache_key] session = get_boto_session( region, aws_access_key_id, aws_secret_access_key, aws_session_token ) if not session: logging.error("Failed to get {0} client.".format(client)) return None CLIENT_CACHE[cache_key] = session.client( client, endpoint_url=endpoint_url ) return CLIENT_CACHE[cache_key]
[ "def", "get_boto_client", "(", "client", ",", "region", "=", "None", ",", "aws_access_key_id", "=", "None", ",", "aws_secret_access_key", "=", "None", ",", "aws_session_token", "=", "None", ",", "endpoint_url", "=", "None", ")", ":", "cache_key", "=", "'{0}:{1...
25.151515
16.848485
def get_all_unresolved(self): """Returns a set of all unresolved imports.""" assert self.final, 'Call build() before using the graph.' out = set() for v in self.broken_deps.values(): out |= v return out
[ "def", "get_all_unresolved", "(", "self", ")", ":", "assert", "self", ".", "final", ",", "'Call build() before using the graph.'", "out", "=", "set", "(", ")", "for", "v", "in", "self", ".", "broken_deps", ".", "values", "(", ")", ":", "out", "|=", "v", ...
35.428571
14.571429
def handle_stream_features(self, stream, features): """Process incoming StartTLS related element of <stream:features/>. [initiating entity only] """ if self.stream and stream is not self.stream: raise ValueError("Single StreamTLSHandler instance can handle" " only one stream") self.stream = stream logger.debug(" tls: handling features") element = features.find(STARTTLS_TAG) if element is None: logger.debug(" tls: no starttls feature found") if self.settings["tls_require"]: raise TLSNegotiationFailed("StartTLS required," " but not supported by peer") return None if len(features) == 1: required = True else: required = element.find(REQUIRED_TAG) is not None if stream.tls_established: logger.warning("StartTLS offerred when already established") return StreamFeatureNotHandled("StartTLS", mandatory = required) if self.settings["starttls"]: logger.debug("StartTLS negotiated") self._request_tls() return StreamFeatureHandled("StartTLS", mandatory = required) else: logger.debug(" tls: not enabled") return StreamFeatureNotHandled("StartTLS", mandatory = required)
[ "def", "handle_stream_features", "(", "self", ",", "stream", ",", "features", ")", ":", "if", "self", ".", "stream", "and", "stream", "is", "not", "self", ".", "stream", ":", "raise", "ValueError", "(", "\"Single StreamTLSHandler instance can handle\"", "\" only o...
43.121212
19
def GetRarPassword(skipUserInput): """ Get password for rar archive from user input. Parameters ---------- skipUserInput : boolean Set to skip user input. Returns ---------- string or boolean If no password is given then returns False otherwise returns user response string. """ goodlogging.Log.Info("EXTRACT", "RAR file needs password to extract") if skipUserInput is False: prompt = "Enter password, 'x' to skip this file or 'exit' to quit this program: " response = goodlogging.Log.Input("EXTRACT", prompt) response = util.CheckEmptyResponse(response) else: response = 'x' if response.lower() == 'x': goodlogging.Log.Info("EXTRACT", "File extraction skipped without password") return False elif response.lower() == 'exit': goodlogging.Log.Fatal("EXTRACT", "Program terminated by user 'exit'") else: return response
[ "def", "GetRarPassword", "(", "skipUserInput", ")", ":", "goodlogging", ".", "Log", ".", "Info", "(", "\"EXTRACT\"", ",", "\"RAR file needs password to extract\"", ")", "if", "skipUserInput", "is", "False", ":", "prompt", "=", "\"Enter password, 'x' to skip this file or...
29.166667
22.433333
def make(self): """ Creates this directory and any of the missing directories in the path. Any errors that may occur are eaten. """ try: if not self.exists: logger.info("Creating %s" % self.path) os.makedirs(self.path) except os.error: pass return self
[ "def", "make", "(", "self", ")", ":", "try", ":", "if", "not", "self", ".", "exists", ":", "logger", ".", "info", "(", "\"Creating %s\"", "%", "self", ".", "path", ")", "os", ".", "makedirs", "(", "self", ".", "path", ")", "except", "os", ".", "e...
29.416667
15.083333
def get_type_string(self, data, type_string): """ Gets type string. Finds the type string for 'data' contained in ``python_type_strings`` using its ``type``. Non-``None`` 'type_string` overrides whatever type string is looked up. The override makes it easier for subclasses to convert something that the parent marshaller can write to disk but still put the right type string in place). Parameters ---------- data : type to be marshalled The Python object that is being written to disk. type_string : str or None If it is a ``str``, it overrides any looked up type string. ``None`` means don't override. Returns ------- str The type string associated with 'data'. Will be 'type_string' if it is not ``None``. Notes ----- Subclasses probably do not need to override this method. """ if type_string is not None: return type_string else: tp = type(data) try: return self.type_to_typestring[tp] except KeyError: return self.type_to_typestring[tp.__module__ + '.' + tp.__name__]
[ "def", "get_type_string", "(", "self", ",", "data", ",", "type_string", ")", ":", "if", "type_string", "is", "not", "None", ":", "return", "type_string", "else", ":", "tp", "=", "type", "(", "data", ")", "try", ":", "return", "self", ".", "type_to_typest...
33.894737
20.368421
def _is_wildcard_match(self, domain_labels, valid_domain_labels): """ Determines if the labels in a domain are a match for labels from a wildcard valid domain name :param domain_labels: A list of unicode strings, with A-label form for IDNs, of the labels in the domain name to check :param valid_domain_labels: A list of unicode strings, with A-label form for IDNs, of the labels in a wildcard domain pattern :return: A boolean - if the domain matches the valid domain """ first_domain_label = domain_labels[0] other_domain_labels = domain_labels[1:] wildcard_label = valid_domain_labels[0] other_valid_domain_labels = valid_domain_labels[1:] # The wildcard is only allowed in the first label, so if # The subsequent labels are not equal, there is no match if other_domain_labels != other_valid_domain_labels: return False if wildcard_label == '*': return True wildcard_regex = re.compile('^' + wildcard_label.replace('*', '.*') + '$') if wildcard_regex.match(first_domain_label): return True return False
[ "def", "_is_wildcard_match", "(", "self", ",", "domain_labels", ",", "valid_domain_labels", ")", ":", "first_domain_label", "=", "domain_labels", "[", "0", "]", "other_domain_labels", "=", "domain_labels", "[", "1", ":", "]", "wildcard_label", "=", "valid_domain_lab...
33.861111
22.361111
def update_models(new_obj, current_table, tables, relations): """ Update the state of the parsing. """ _update_check_inputs(current_table, tables, relations) _check_no_current_table(new_obj, current_table) if isinstance(new_obj, Table): tables_names = [t.name for t in tables] _check_not_creating_duplicates(new_obj.name, tables_names, 'table', DuplicateTableException) return new_obj, tables + [new_obj], relations if isinstance(new_obj, Relation): tables_names = [t.name for t in tables] _check_colname_in_lst(new_obj.right_col, tables_names) _check_colname_in_lst(new_obj.left_col, tables_names) return current_table, tables, relations + [new_obj] if isinstance(new_obj, Column): columns_names = [c.name for c in current_table.columns] _check_not_creating_duplicates(new_obj.name, columns_names, 'column', DuplicateColumnException) current_table.columns.append(new_obj) return current_table, tables, relations msg = "new_obj cannot be of type {}" raise ValueError(msg.format(new_obj.__class__.__name__))
[ "def", "update_models", "(", "new_obj", ",", "current_table", ",", "tables", ",", "relations", ")", ":", "_update_check_inputs", "(", "current_table", ",", "tables", ",", "relations", ")", "_check_no_current_table", "(", "new_obj", ",", "current_table", ")", "if",...
46.125
20.458333
def get_cpds(self): """ Adds tables to BIF Returns ------- dict: dict of type {variable: array} Example ------- >>> from pgmpy.readwrite import BIFReader, BIFWriter >>> model = BIFReader('dog-problem.bif').get_model() >>> writer = BIFWriter(model) >>> writer.get_cpds() {'bowel-problem': array([ 0.01, 0.99]), 'dog-out': array([ 0.99, 0.97, 0.9 , 0.3 , 0.01, 0.03, 0.1 , 0.7 ]), 'family-out': array([ 0.15, 0.85]), 'hear-bark': array([ 0.7 , 0.01, 0.3 , 0.99]), 'light-on': array([ 0.6 , 0.05, 0.4 , 0.95])} """ cpds = self.model.get_cpds() tables = {} for cpd in cpds: tables[cpd.variable] = cpd.values.ravel() return tables
[ "def", "get_cpds", "(", "self", ")", ":", "cpds", "=", "self", ".", "model", ".", "get_cpds", "(", ")", "tables", "=", "{", "}", "for", "cpd", "in", "cpds", ":", "tables", "[", "cpd", ".", "variable", "]", "=", "cpd", ".", "values", ".", "ravel",...
32.08
17.52
def log(self, output, exit_status): """Log given CompletedProcess and return exit status code.""" if exit_status != 0: self.logger.error(f'Error running command! Exit status: {exit_status}, {output}') return exit_status
[ "def", "log", "(", "self", ",", "output", ",", "exit_status", ")", ":", "if", "exit_status", "!=", "0", ":", "self", ".", "logger", ".", "error", "(", "f'Error running command! Exit status: {exit_status}, {output}'", ")", "return", "exit_status" ]
41.833333
20.666667
def add_shellwidget(self, shellwidget): """ Register shell with figure explorer. This function opens a new FigureBrowser for browsing the figures in the shell. """ shellwidget_id = id(shellwidget) if shellwidget_id not in self.shellwidgets: self.options_button.setVisible(True) fig_browser = FigureBrowser( self, options_button=self.options_button, background_color=MAIN_BG_COLOR) fig_browser.set_shellwidget(shellwidget) fig_browser.setup(**self.get_settings()) fig_browser.sig_option_changed.connect( self.sig_option_changed.emit) fig_browser.thumbnails_sb.redirect_stdio.connect( self.main.redirect_internalshell_stdio) self.register_widget_shortcuts(fig_browser) self.add_widget(fig_browser) self.shellwidgets[shellwidget_id] = fig_browser self.set_shellwidget_from_id(shellwidget_id) return fig_browser
[ "def", "add_shellwidget", "(", "self", ",", "shellwidget", ")", ":", "shellwidget_id", "=", "id", "(", "shellwidget", ")", "if", "shellwidget_id", "not", "in", "self", ".", "shellwidgets", ":", "self", ".", "options_button", ".", "setVisible", "(", "True", "...
43.208333
11.458333
def _parse_index(self, section, content): """ .. index: default :refguide: something, else, and more """ def strip_each_in(lst): return [s.strip() for s in lst] out = {} section = section.split('::') if len(section) > 1: out['default'] = strip_each_in(section[1].split(','))[0] for line in content: line = line.split(':') if len(line) > 2: out[line[1]] = strip_each_in(line[2].split(',')) return out
[ "def", "_parse_index", "(", "self", ",", "section", ",", "content", ")", ":", "def", "strip_each_in", "(", "lst", ")", ":", "return", "[", "s", ".", "strip", "(", ")", "for", "s", "in", "lst", "]", "out", "=", "{", "}", "section", "=", "section", ...
29.5
14.277778
def lookup(self, hostname): """ Find a hostkey entry for a given hostname or IP. If no entry is found, C{None} is returned. Otherwise a dictionary of keytype to key is returned. The keytype will be either C{"ssh-rsa"} or C{"ssh-dss"}. @param hostname: the hostname (or IP) to lookup @type hostname: str @return: keys associated with this host (or C{None}) @rtype: dict(str, L{PKey}) """ class SubDict (UserDict.DictMixin): def __init__(self, hostname, entries, hostkeys): self._hostname = hostname self._entries = entries self._hostkeys = hostkeys def __getitem__(self, key): for e in self._entries: if e.key.get_name() == key: return e.key raise KeyError(key) def __setitem__(self, key, val): for e in self._entries: if e.key is None: continue if e.key.get_name() == key: # replace e.key = val break else: # add a new one e = HostKeyEntry([hostname], val) self._entries.append(e) self._hostkeys._entries.append(e) def keys(self): return [e.key.get_name() for e in self._entries if e.key is not None] entries = [] for e in self._entries: for h in e.hostnames: if (h.startswith('|1|') and (self.hash_host(hostname, h) == h)) or (h == hostname): entries.append(e) if len(entries) == 0: return None return SubDict(hostname, entries, self)
[ "def", "lookup", "(", "self", ",", "hostname", ")", ":", "class", "SubDict", "(", "UserDict", ".", "DictMixin", ")", ":", "def", "__init__", "(", "self", ",", "hostname", ",", "entries", ",", "hostkeys", ")", ":", "self", ".", "_hostname", "=", "hostna...
37.583333
14.75
def xzhdr(self, header, msgid_range=None): """XZHDR command. Args: msgid_range: A message-id as a string, or an article number as an integer, or a tuple of specifying a range of article numbers in the form (first, [last]) - if last is omitted then all articles after first are included. A msgid_range of None (the default) uses the current article. """ args = header if msgid_range is not None: args += " " + utils.unparse_msgid_range(msgid_range) code, message = self.command("XZHDR", args) if code != 221: raise NNTPReplyError(code, message) return self.info(code, message, compressed=True)
[ "def", "xzhdr", "(", "self", ",", "header", ",", "msgid_range", "=", "None", ")", ":", "args", "=", "header", "if", "msgid_range", "is", "not", "None", ":", "args", "+=", "\" \"", "+", "utils", ".", "unparse_msgid_range", "(", "msgid_range", ")", "code",...
39
21.105263
def pca(df, n_components=2, mean_center=False, fcol=None, ecol=None, marker='o', markersize=40, threshold=None, label_threshold=None, label_weights=None, label_scores=None, return_df=False, show_covariance_ellipse=False, *args, **kwargs): """ Perform Principal Component Analysis (PCA) from input DataFrame and generate scores and weights plots. Principal Component Analysis is a technique for identifying the largest source of variation in a dataset. This function uses the implementation available in scikit-learn. The PCA is calculated via `analysis.pca` and will therefore give identical results. Resulting scores and weights plots are generated showing the distribution of samples within the resulting PCA space. Sample color and marker size can be controlled by label, lookup and calculation (lambda) to generate complex plots highlighting sample separation. For further information see the examples included in the documentation. :param df: Pandas `DataFrame` :param n_components: `int` number of Principal components to return :param mean_center: `bool` mean center the data before performing PCA :param fcol: `dict` of indexers:colors, where colors are hex colors or matplotlib color names :param ecol: `dict` of indexers:colors, where colors are hex colors or matplotlib color names :param marker: `str` matplotlib marker name (default "o") :param markersize: `int` or `callable` which returns an `int` for a given indexer :param threshold: `float` weight threshold for plot (horizontal line) :param label_threshold: `float` weight threshold over which to draw labels :param label_weights: `list` of `str` :param label_scores: `list` of `str` :param return_df: `bool` return the resulting scores, weights as pandas DataFrames :param show_covariance_ellipse: `bool` show the covariance ellipse around each group :param args: additional arguments passed to analysis.pca :param kwargs: additional arguments passed to analysis.pca :return: """ scores, weights = analysis.pca(df, n_components=n_components, *args, **kwargs) scores_ax = _pca_scores(scores, fcol=fcol, ecol=ecol, marker=marker, markersize=markersize, label_scores=label_scores, show_covariance_ellipse=show_covariance_ellipse) weights_ax = [] for pc in range(0, weights.shape[1]): weights_ax.append( _pca_weights(weights, pc, threshold=threshold, label_threshold=label_threshold, label_weights=label_weights) ) if return_df: return scores, weights else: return scores_ax, weights_ax
[ "def", "pca", "(", "df", ",", "n_components", "=", "2", ",", "mean_center", "=", "False", ",", "fcol", "=", "None", ",", "ecol", "=", "None", ",", "marker", "=", "'o'", ",", "markersize", "=", "40", ",", "threshold", "=", "None", ",", "label_threshol...
58.568182
40.022727
def network_create(self, name, **kwargs): ''' Create extra private network ''' nt_ks = self.compute_conn kwargs['label'] = name kwargs = self._sanatize_network_params(kwargs) net = nt_ks.networks.create(**kwargs) return net.__dict__
[ "def", "network_create", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "nt_ks", "=", "self", ".", "compute_conn", "kwargs", "[", "'label'", "]", "=", "name", "kwargs", "=", "self", ".", "_sanatize_network_params", "(", "kwargs", ")", "net"...
32
12.444444
def _getPropertyValue(schema, propertyName, options): """Checks to see if property is specified in 'options'. If not, reads the default value from the schema""" if propertyName not in options: paramsSchema = schema['properties'][propertyName] if 'default' in paramsSchema: options[propertyName] = paramsSchema['default'] else: options[propertyName] = None
[ "def", "_getPropertyValue", "(", "schema", ",", "propertyName", ",", "options", ")", ":", "if", "propertyName", "not", "in", "options", ":", "paramsSchema", "=", "schema", "[", "'properties'", "]", "[", "propertyName", "]", "if", "'default'", "in", "paramsSche...
37.7
13
def connect(signal, receiver): """Register `receiver` method/function as a receiver for the `signal`. When the signal is emitted, this receiver will be invoked along with all other associated signals. Args: signal: A signal identifier (e.g., a signal name) receiver: A callable object to connect to the signal. """ __check_receiver(receiver) if __is_bound_method(receiver): ref = WeakMethod else: ref = weakref.ref with __lock: __purge() __receivers[signal].append(ref(receiver))
[ "def", "connect", "(", "signal", ",", "receiver", ")", ":", "__check_receiver", "(", "receiver", ")", "if", "__is_bound_method", "(", "receiver", ")", ":", "ref", "=", "WeakMethod", "else", ":", "ref", "=", "weakref", ".", "ref", "with", "__lock", ":", "...
27.4
20.55
def insert_file(file, media_type): """Upsert the ``file`` and ``media_type`` into the files table. Returns the ``fileid`` and ``sha1`` of the upserted file. """ resource_hash = get_file_sha1(file) with db_connect() as db_conn: with db_conn.cursor() as cursor: cursor.execute("SELECT fileid FROM files WHERE sha1 = %s", (resource_hash,)) try: fileid = cursor.fetchone()[0] except (IndexError, TypeError): cursor.execute("INSERT INTO files (file, media_type) " "VALUES (%s, %s)" "RETURNING fileid", (psycopg2.Binary(file.read()), media_type,)) fileid = cursor.fetchone()[0] return fileid, resource_hash
[ "def", "insert_file", "(", "file", ",", "media_type", ")", ":", "resource_hash", "=", "get_file_sha1", "(", "file", ")", "with", "db_connect", "(", ")", "as", "db_conn", ":", "with", "db_conn", ".", "cursor", "(", ")", "as", "cursor", ":", "cursor", ".",...
43.105263
12.473684
def corner_depots(self) -> Set[Point2]: """ Finds the 2 depot positions on the outside """ if len(self.upper2_for_ramp_wall) == 2: points = self.upper2_for_ramp_wall p1 = points.pop().offset((self.x_offset, self.y_offset)) # still an error with pixelmap? p2 = points.pop().offset((self.x_offset, self.y_offset)) center = p1.towards(p2, p1.distance_to(p2) / 2) depotPosition = self.depot_in_middle # Offset from middle depot to corner depots is (2, 1) intersects = center.circle_intersection(depotPosition, 5 ** 0.5) return intersects raise Exception("Not implemented. Trying to access a ramp that has a wrong amount of upper points.")
[ "def", "corner_depots", "(", "self", ")", "->", "Set", "[", "Point2", "]", ":", "if", "len", "(", "self", ".", "upper2_for_ramp_wall", ")", "==", "2", ":", "points", "=", "self", ".", "upper2_for_ramp_wall", "p1", "=", "points", ".", "pop", "(", ")", ...
62
22.5
def array(self): """ The underlying array of shape (N, L, I) """ return numpy.array([self[sid].array for sid in sorted(self)])
[ "def", "array", "(", "self", ")", ":", "return", "numpy", ".", "array", "(", "[", "self", "[", "sid", "]", ".", "array", "for", "sid", "in", "sorted", "(", "self", ")", "]", ")" ]
30.8
12
def get_by_index(self, index): """ Return a dataset by its index. Args: index (int): The index of the dataset that should be returned. Raises: DataInvalidIndex: If the index does not represent a valid dataset. """ if index >= len(self._datasets): raise DataInvalidIndex('A dataset with index {} does not exist'.format(index)) return self._datasets[index]
[ "def", "get_by_index", "(", "self", ",", "index", ")", ":", "if", "index", ">=", "len", "(", "self", ".", "_datasets", ")", ":", "raise", "DataInvalidIndex", "(", "'A dataset with index {} does not exist'", ".", "format", "(", "index", ")", ")", "return", "s...
33
23.692308
def get_preorder_burn_info( outputs ): """ Given the set of outputs, find the fee sent to our burn address. This is always the third output. Return the fee and burn address on success as {'op_fee': ..., 'burn_address': ...} Return None if not found """ if len(outputs) != 3: # not a well-formed preorder return None op_fee = outputs[2]['value'] burn_address = None try: burn_address = virtualchain.script_hex_to_address(outputs[2]['script']) assert burn_address except: log.error("Not a well-formed preorder burn: {}".format(outputs[2]['script'])) return None return {'op_fee': op_fee, 'burn_address': burn_address}
[ "def", "get_preorder_burn_info", "(", "outputs", ")", ":", "if", "len", "(", "outputs", ")", "!=", "3", ":", "# not a well-formed preorder ", "return", "None", "op_fee", "=", "outputs", "[", "2", "]", "[", "'value'", "]", "burn_address", "=", "None", "try", ...
29.5
22.333333
def run_tpm(tpm, time_scale): """Iterate a TPM by the specified number of time steps. Args: tpm (np.ndarray): A state-by-node tpm. time_scale (int): The number of steps to run the tpm. Returns: np.ndarray """ sbs_tpm = convert.state_by_node2state_by_state(tpm) if sparse(tpm): tpm = sparse_time(sbs_tpm, time_scale) else: tpm = dense_time(sbs_tpm, time_scale) return convert.state_by_state2state_by_node(tpm)
[ "def", "run_tpm", "(", "tpm", ",", "time_scale", ")", ":", "sbs_tpm", "=", "convert", ".", "state_by_node2state_by_state", "(", "tpm", ")", "if", "sparse", "(", "tpm", ")", ":", "tpm", "=", "sparse_time", "(", "sbs_tpm", ",", "time_scale", ")", "else", "...
29.1875
18.0625
def create_vm(client, name, compute_resource, datastore, disksize, nics, memory, num_cpus, guest_id, host=None): """Create a virtual machine using the specified values. :param name: The name of the VM to create. :type name: str :param compute_resource: The name of a ComputeResource in which to \ create the VM. :type compute_resource: str :param datastore: The name of the datastore on which to create the VM. :type datastore: str :param disksize: The size of the disk, specified in KB, MB or GB. e.g. \ 20971520KB, 20480MB, 20GB. :type disksize: str :param nics: The NICs to create, specified in a list of dict's which \ contain a "network_name" and "type" key. e.g. \ {"network_name": "VM Network", "type": "VirtualE1000"} :type nics: list of dict's :param memory: The amount of memory for the VM. Specified in KB, MB or \ GB. e.g. 2097152KB, 2048MB, 2GB. :type memory: str :param num_cpus: The number of CPUs the VM will have. :type num_cpus: int :param guest_id: The vSphere string of the VM guest you are creating. \ The list of VMs can be found at \ http://pubs.vmware.com/vsphere-50/index.jsp?topic=/com.vmware.wssdk.apiref.doc_50/right-pane.html :type guest_id: str :param host: The name of the host (default: None), if you want to \ provision the VM on a \ specific host. :type host: str """ print("Creating VM %s" % name) # If the host is not set, use the ComputeResource as the target if host is None: target = client.find_entity_view("ComputeResource", filter={"name": compute_resource}) resource_pool = target.resourcePool else: target = client.find_entity_view("HostSystem", filter={"name": host}) resource_pool = target.parent.resourcePool disksize_pattern = re.compile("^\d+[KMG]B") if disksize_pattern.match(disksize) is None: print("Disk size %s is invalid. Try \"12G\" or similar" % disksize) sys.exit(1) if disksize.endswith("GB"): disksize_kb = int(disksize[:-2]) * 1024 * 1024 elif disksize.endswith("MB"): disksize_kb = int(disksize[:-2]) * 1024 elif disksize.endswith("KB"): disksize_kb = int(disksize[:-2]) else: print("Disk size %s is invalid. Try \"12G\" or similar" % disksize) memory_pattern = re.compile("^\d+[KMG]B") if memory_pattern.match(memory) is None: print("Memory size %s is invalid. Try \"12G\" or similar" % memory) sys.exit(1) if memory.endswith("GB"): memory_mb = int(memory[:-2]) * 1024 elif memory.endswith("MB"): memory_mb = int(memory[:-2]) elif memory.endswith("KB"): memory_mb = int(memory[:-2]) / 1024 else: print("Memory size %s is invalid. Try \"12G\" or similar" % memory) # A list of devices to be assigned to the VM vm_devices = [] # Create a disk controller controller = create_controller(client, "VirtualLsiLogicController") vm_devices.append(controller) ds_to_use = None for ds in target.datastore: if ds.name == datastore: ds_to_use = ds break if ds_to_use is None: print("Could not find datastore on %s with name %s" % (target.name, datastore)) sys.exit(1) # Ensure the datastore is accessible and has enough space if ds_to_use.summary.accessible is not True: print("Datastore (%s) exists, but is not accessible" % ds_to_use.summary.name) sys.exit(1) if ds_to_use.summary.freeSpace < disksize_kb * 1024: print("Datastore (%s) exists, but does not have sufficient" " free space." % ds_to_use.summary.name) sys.exit(1) disk = create_disk(client, datastore=ds_to_use, disksize_kb=disksize_kb) vm_devices.append(disk) for nic in nics: nic_spec = create_nic(client, target, nic) if nic_spec is None: print("Could not create spec for NIC") sys.exit(1) # Append the nic spec to the vm_devices list vm_devices.append(nic_spec) vmfi = client.create("VirtualMachineFileInfo") vmfi.vmPathName = "[%s]" % ds_to_use.summary.name vm_config_spec = client.create("VirtualMachineConfigSpec") vm_config_spec.name = name vm_config_spec.memoryMB = memory_mb vm_config_spec.files = vmfi vm_config_spec.annotation = "Auto-provisioned by psphere" vm_config_spec.numCPUs = num_cpus vm_config_spec.guestId = guest_id vm_config_spec.deviceChange = vm_devices # Find the datacenter of the target if target.__class__.__name__ == "HostSystem": datacenter = target.parent.parent.parent else: datacenter = target.parent.parent try: task = datacenter.vmFolder.CreateVM_Task(config=vm_config_spec, pool=resource_pool) except VimFault as e: print("Failed to create %s: " % e) sys.exit() while task.info.state in ["queued", "running"]: time.sleep(5) task.update() print("Waiting 5 more seconds for VM creation") if task.info.state == "success": elapsed_time = task.info.completeTime - task.info.startTime print("Successfully created new VM %s. Server took %s seconds." % (name, elapsed_time.seconds)) elif task.info.state == "error": print("ERROR: The task for creating the VM has finished with" " an error. If an error was reported it will follow.") try: print("ERROR: %s" % task.info.error.localizedMessage) except AttributeError: print("ERROR: There is no error message available.") else: print("UNKNOWN: The task reports an unknown state %s" % task.info.state)
[ "def", "create_vm", "(", "client", ",", "name", ",", "compute_resource", ",", "datastore", ",", "disksize", ",", "nics", ",", "memory", ",", "num_cpus", ",", "guest_id", ",", "host", "=", "None", ")", ":", "print", "(", "\"Creating VM %s\"", "%", "name", ...
38
19.647059
def export_wavefront(mesh, include_normals=True, include_texture=True): """ Export a mesh as a Wavefront OBJ file Parameters ----------- mesh: Trimesh object Returns ----------- export: str, string of OBJ format output """ # store the multiple options for formatting # a vertex index for a face face_formats = {('v',): '{}', ('v', 'vn'): '{}//{}', ('v', 'vt'): '{}/{}', ('v', 'vn', 'vt'): '{}/{}/{}'} # we are going to reference face_formats with this face_type = ['v'] export = 'v ' export += util.array_to_string(mesh.vertices, col_delim=' ', row_delim='\nv ', digits=8) + '\n' if include_normals and 'vertex_normals' in mesh._cache: # if vertex normals are stored in cache export them # these will have been autogenerated if they have ever been called face_type.append('vn') export += 'vn ' export += util.array_to_string(mesh.vertex_normals, col_delim=' ', row_delim='\nvn ', digits=8) + '\n' if (include_texture and 'vertex_texture' in mesh.metadata and len(mesh.metadata['vertex_texture']) == len(mesh.vertices)): # if vertex texture exists and is the right shape export here face_type.append('vt') export += 'vt ' export += util.array_to_string(mesh.metadata['vertex_texture'], col_delim=' ', row_delim='\nvt ', digits=8) + '\n' # the format for a single vertex reference of a face face_format = face_formats[tuple(face_type)] faces = 'f ' + util.array_to_string(mesh.faces + 1, col_delim=' ', row_delim='\nf ', value_format=face_format) # add the exported faces to the export export += faces return export
[ "def", "export_wavefront", "(", "mesh", ",", "include_normals", "=", "True", ",", "include_texture", "=", "True", ")", ":", "# store the multiple options for formatting", "# a vertex index for a face", "face_formats", "=", "{", "(", "'v'", ",", ")", ":", "'{}'", ","...
36.833333
17.4
def _add_span_node_ids_to_token_nodes(self): """ Adds to every token node the list of spans (span node IDs) that it belongs to. TokenNode.spans - a list of `int` ids of `SpanNode`s """ span_dict = defaultdict(list) for span_edge in self._spanning_relation_ids: token_node_id = self.edges[span_edge].target span_node_id = self.edges[span_edge].source span_dict[token_node_id].append(span_node_id) for token_node_id in span_dict: self.nodes[token_node_id].spans = span_dict[token_node_id]
[ "def", "_add_span_node_ids_to_token_nodes", "(", "self", ")", ":", "span_dict", "=", "defaultdict", "(", "list", ")", "for", "span_edge", "in", "self", ".", "_spanning_relation_ids", ":", "token_node_id", "=", "self", ".", "edges", "[", "span_edge", "]", ".", ...
39.066667
16.933333
def userItem(self): """returns a reference to the UserItem class""" if self.ownerFolder is not None: url = "%s/users/%s/%s/items/%s" % (self.root.split('/items/')[0], self.owner,self.ownerFolder, self.id) else: url = "%s/users/%s/items/%s" % (self.root.split('/items/')[0], self.owner, self.id) return UserItem(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "userItem", "(", "self", ")", ":", "if", "self", ".", "ownerFolder", "is", "not", "None", ":", "url", "=", "\"%s/users/%s/%s/items/%s\"", "%", "(", "self", ".", "root", ".", "split", "(", "'/items/'", ")", "[", "0", "]", ",", "self", ".", "own...
53.3
23
def to_web(self, host=None, user=None, password=None): """Send the model to BEL Commons by wrapping :py:func:`pybel.to_web` The parameters ``host``, ``user``, and ``password`` all check the PyBEL configuration, which is located at ``~/.config/pybel/config.json`` by default Parameters ---------- host : Optional[str] The host name to use. If none, first checks the PyBEL configuration entry ``PYBEL_REMOTE_HOST``, then the environment variable ``PYBEL_REMOTE_HOST``. Finally, defaults to https://bel-commons.scai.fraunhofer.de. user : Optional[str] The username (email) to use. If none, first checks the PyBEL configuration entry ``PYBEL_REMOTE_USER``, then the environment variable ``PYBEL_REMOTE_USER``. password : Optional[str] The password to use. If none, first checks the PyBEL configuration entry ``PYBEL_REMOTE_PASSWORD``, then the environment variable ``PYBEL_REMOTE_PASSWORD``. Returns ------- response : requests.Response The response from the BEL Commons network upload endpoint. """ response = pybel.to_web(self.model, host=host, user=user, password=password) return response
[ "def", "to_web", "(", "self", ",", "host", "=", "None", ",", "user", "=", "None", ",", "password", "=", "None", ")", ":", "response", "=", "pybel", ".", "to_web", "(", "self", ".", "model", ",", "host", "=", "host", ",", "user", "=", "user", ",",...
43.387097
20.516129
def playlist_songs(self, playlist): """Get a listing of songs from a playlist. Paramters: playlist (dict): A playlist dict. Returns: list: Playlist song dicts. """ playlist_type = playlist.get('type') playlist_song_list = [] if playlist_type in ('USER_GENERATED', None): start_token = None playlist_song_list = [] while True: response = self._call( mc_calls.PlaylistEntryFeed, max_results=49995, start_token=start_token ) items = response.body.get('data', {}).get('items', []) if items: playlist_song_list.extend(items) start_token = response.body.get('nextPageToken') if start_token is None: break elif playlist_type == 'SHARED': playlist_share_token = playlist['shareToken'] start_token = None playlist_song_list = [] while True: response = self._call( mc_calls.PlaylistEntriesShared, playlist_share_token, max_results=49995, start_token=start_token ) entry = response.body['entries'][0] items = entry.get('playlistEntry', []) if items: playlist_song_list.extend(items) start_token = entry.get('nextPageToken') if start_token is None: break playlist_song_list.sort(key=itemgetter('absolutePosition')) return playlist_song_list
[ "def", "playlist_songs", "(", "self", ",", "playlist", ")", ":", "playlist_type", "=", "playlist", ".", "get", "(", "'type'", ")", "playlist_song_list", "=", "[", "]", "if", "playlist_type", "in", "(", "'USER_GENERATED'", ",", "None", ")", ":", "start_token"...
22.527273
19.527273
def estimate_angle(coord, angle, new_frame, offset=1e-7): """ https://github.com/astropy/astropy/issues/3093 """ delta = delta_coord(coord, angle, offset) new_coord = coord.transform_to(new_frame) new_delta = delta.transform_to(new_frame) return new_coord.position_angle(new_delta).deg
[ "def", "estimate_angle", "(", "coord", ",", "angle", ",", "new_frame", ",", "offset", "=", "1e-7", ")", ":", "delta", "=", "delta_coord", "(", "coord", ",", "angle", ",", "offset", ")", "new_coord", "=", "coord", ".", "transform_to", "(", "new_frame", ")...
38.25
6.5
def _get_game_type_des(cls, game_type): """ get game type description :param game_type: game type :return: game type description """ if game_type == 'S': return 'Spring Training' elif game_type == 'R': return 'Regular Season' elif game_type == 'F': return 'Wild-card Game' elif game_type == 'D': return 'Divisional Series' elif game_type == 'L': return 'LCS' elif game_type == 'W': return 'World Series' return MlbamConst.UNKNOWN_FULL
[ "def", "_get_game_type_des", "(", "cls", ",", "game_type", ")", ":", "if", "game_type", "==", "'S'", ":", "return", "'Spring Training'", "elif", "game_type", "==", "'R'", ":", "return", "'Regular Season'", "elif", "game_type", "==", "'F'", ":", "return", "'Wil...
30.736842
6.210526
def create(self, data, **kwargs): """Create a new object. Args: data (dict): parameters to send to the server to create the resource **kwargs: Extra options to send to the server (e.g. sudo) Returns: RESTObject, RESTObject: The source and target issues Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server cannot perform the request """ self._check_missing_create_attrs(data) server_data = self.gitlab.http_post(self.path, post_data=data, **kwargs) source_issue = ProjectIssue(self._parent.manager, server_data['source_issue']) target_issue = ProjectIssue(self._parent.manager, server_data['target_issue']) return source_issue, target_issue
[ "def", "create", "(", "self", ",", "data", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_check_missing_create_attrs", "(", "data", ")", "server_data", "=", "self", ".", "gitlab", ".", "http_post", "(", "self", ".", "path", ",", "post_data", "=", "d...
41.304348
21.217391
def add_model(self, ic, N=1, index=0): """ Should only be able to do this to a leaf node. Either N and index both integers OR index is list of length=N """ if type(index) in [list,tuple]: if len(index) != N: raise ValueError('If a list, index must be of length N.') else: index = [index]*N for idx in index: existing = self.get_system(idx) tag = len(existing) self.add_child(ModelNode(ic, index=idx, tag=tag))
[ "def", "add_model", "(", "self", ",", "ic", ",", "N", "=", "1", ",", "index", "=", "0", ")", ":", "if", "type", "(", "index", ")", "in", "[", "list", ",", "tuple", "]", ":", "if", "len", "(", "index", ")", "!=", "N", ":", "raise", "ValueError...
31.470588
14.882353
def replace(self, infile): '''Replace: 任意の箇所のバイト列と 同サイズの任意のバイト列を入れ換える ''' gf = infile[31:] same_size_index = [] while len(same_size_index) <= 1: index = random.randint(0,len(gf)-1) index_len = len(gf[index]) same_size_index = [i for (i,g) in enumerate(gf) if len(g) == index_len] else: same_size_index = random.choice(same_size_index[:]) gf[index], gf[same_size_index] = gf[same_size_index], gf[index] return infile[:31] + gf
[ "def", "replace", "(", "self", ",", "infile", ")", ":", "gf", "=", "infile", "[", "31", ":", "]", "same_size_index", "=", "[", "]", "while", "len", "(", "same_size_index", ")", "<=", "1", ":", "index", "=", "random", ".", "randint", "(", "0", ",", ...
40.384615
17.153846
def raise_for_status(self): '''Raise Postmark-specific HTTP errors. If there isn't one, the standard HTTP error is raised. HTTP 401 raises :class:`UnauthorizedError` HTTP 422 raises :class:`UnprocessableEntityError` HTTP 500 raises :class:`InternalServerError` ''' if self.status_code == 401: raise UnauthorizedError(self._requests_response) elif self.status_code == 422: raise UnprocessableEntityError(self._requests_response) elif self.status_code == 500: raise InternalServerError(self._requests_response) return self._requests_response.raise_for_status()
[ "def", "raise_for_status", "(", "self", ")", ":", "if", "self", ".", "status_code", "==", "401", ":", "raise", "UnauthorizedError", "(", "self", ".", "_requests_response", ")", "elif", "self", ".", "status_code", "==", "422", ":", "raise", "UnprocessableEntity...
38.882353
19.470588
def get_object(self, resource_url): """Get remote resource information. Creates a local directory for the resource if this is the first access to the resource. Downloads the resource Json representation and writes it into a .json file in the cache directory. Raises ValueError if resource is not cached and does not exist. If the resource no longer exists on the server but in the local cache, a reference to the local copy is returned and the value of the is_active flag is False. Parameters ---------- cache_id : string Unique cache identifier resource_url : string Url of the resource Returns ------- (string, Json, Boolean, string) Returns a 4-tuple containing local resource directory, the Json object representing the resource, an active flag indicating if the resource still exists on the remote server or only in the local cache, and the resource unique cache identifier. """ # Check if resource is in local cache. If not, create a new cache # identifier and set is_cached flag to false if resource_url in self.cache: cache_id = self.cache[resource_url] else: cache_id = str(uuid.uuid4()) # The local cahce directory for resource is given by cache identifier obj_dir = os.path.join(self.directory, cache_id) # File for local copy of object's Json representation f_json = os.path.join(obj_dir, '.json') # Object active flag is_active = True # Read the remote resource representation try: obj_json = sco.JsonResource(resource_url).json # Save local copy of Json object. Create local resource directory if # it doesn't exist if not os.path.isdir(obj_dir): os.mkdir(obj_dir) with open(f_json, 'w') as f: json.dump(obj_json, f) except ValueError as ex: # If the resource does not exists but we have a local copy then read # object from local disk. Set is_active flag to false. Raise # ValueError if no local copy exists if os.path.isfile(f_json): with open(f_json, 'r') as f: obj_json = json.load(f) is_active = False else: raise ex # Return object directory, Json, active flag, and cache identifier return obj_dir, obj_json, is_active, cache_id
[ "def", "get_object", "(", "self", ",", "resource_url", ")", ":", "# Check if resource is in local cache. If not, create a new cache", "# identifier and set is_cached flag to false", "if", "resource_url", "in", "self", ".", "cache", ":", "cache_id", "=", "self", ".", "cache"...
43.338983
18.59322
def check_public_permissions(payload): """Raise ``PermissionDenied`` if public permissions are too open.""" allowed_public_permissions = ['view', 'add', 'download'] for perm_type in ['add', 'remove']: for perm in payload.get('public', {}).get(perm_type, []): if perm not in allowed_public_permissions: raise exceptions.PermissionDenied("Permissions for public users are too open")
[ "def", "check_public_permissions", "(", "payload", ")", ":", "allowed_public_permissions", "=", "[", "'view'", ",", "'add'", ",", "'download'", "]", "for", "perm_type", "in", "[", "'add'", ",", "'remove'", "]", ":", "for", "perm", "in", "payload", ".", "get"...
60.285714
16.571429
def write_genotypes(self, genotypes): """Write genotypes to binary file. Args: genotypes (numpy.ndarray): The genotypes to write in the BED file. """ if self._mode != "w": raise UnsupportedOperation("not available in 'r' mode") # Initializing the number of samples if required if self._nb_values is None: self._nb_values = len(genotypes) # Checking the expected number of samples if self._nb_values != len(genotypes): raise ValueError("{:,d} samples expected, got {:,d}".format( self._nb_values, len(genotypes), )) # Writing to file byte_array = [ g[0] | (g[1] << 2) | (g[2] << 4) | (g[3] << 6) for g in self._grouper((_byte_recode[geno] for geno in genotypes), 4) ] self._bed.write(bytearray(byte_array))
[ "def", "write_genotypes", "(", "self", ",", "genotypes", ")", ":", "if", "self", ".", "_mode", "!=", "\"w\"", ":", "raise", "UnsupportedOperation", "(", "\"not available in 'r' mode\"", ")", "# Initializing the number of samples if required", "if", "self", ".", "_nb_v...
33.185185
20.333333
def flairlist(self, r, limit=1000, after=None, before=None): """Login required. Gets flairlist for subreddit `r`. See https://github.com/reddit/reddit/wiki/API%3A-flairlist. However, the wiki docs are wrong (as of 2012/5/4). Returns :class:`things.ListBlob` of :class:`things.Blob` objects, each object being a mapping with `user`, `flair\_css\_class`, and `flair\_text` attributes. URL: ``http://www.reddit.com/r/<r>/api/flairlist`` :param r: name of subreddit :param limit: max number of items to return :param after: full id of user to return entries after :param before: full id of user to return entries *before* """ params = dict(limit=limit) if after: params['after'] = after elif before: params['before'] = before b = self.get('r', r, 'api', 'flairlist', params=params) return b.users
[ "def", "flairlist", "(", "self", ",", "r", ",", "limit", "=", "1000", ",", "after", "=", "None", ",", "before", "=", "None", ")", ":", "params", "=", "dict", "(", "limit", "=", "limit", ")", "if", "after", ":", "params", "[", "'after'", "]", "=",...
49.105263
24.842105
def lazy_import(path, pattern=None): """ Import a single file or collection of files. :param path: A path to a data file (remote or local). :param pattern: Character string containing a regular expression to match file(s) in the folder. :returns: either a :class:`H2OFrame` with the content of the provided file, or a list of such frames if importing multiple files. """ assert_is_type(path, str, [str]) assert_is_type(pattern, str, None) paths = [path] if is_type(path, str) else path return _import_multi(paths, pattern)
[ "def", "lazy_import", "(", "path", ",", "pattern", "=", "None", ")", ":", "assert_is_type", "(", "path", ",", "str", ",", "[", "str", "]", ")", "assert_is_type", "(", "pattern", ",", "str", ",", "None", ")", "paths", "=", "[", "path", "]", "if", "i...
43
16.846154
def show_bokehjs(bokehjs_action, develop=False): ''' Print a useful report after setuptools output describing where and how BokehJS is installed. Args: bokehjs_action (str) : one of 'built', 'installed', or 'packaged' how (or if) BokehJS was installed into the python source tree develop (bool, optional) : whether the command was for "develop" mode (default: False) Returns: None ''' print() if develop: print("Installed Bokeh for DEVELOPMENT:") else: print("Installed Bokeh:") if bokehjs_action in ['built', 'installed']: print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if bokehjs_action=='built' else bright(yellow("PREVIOUSLY")))) else: print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED"))) print()
[ "def", "show_bokehjs", "(", "bokehjs_action", ",", "develop", "=", "False", ")", ":", "print", "(", ")", "if", "develop", ":", "print", "(", "\"Installed Bokeh for DEVELOPMENT:\"", ")", "else", ":", "print", "(", "\"Installed Bokeh:\"", ")", "if", "bokehjs_actio...
35.28
31.52
def persist(self, name, project=None, drop_model=False, **kwargs): """ Persist the execution into a new model. :param name: model name :param project: name of the project :param drop_model: drop model before creation """ return super(ODPSModelExpr, self).persist(name, project=project, drop_model=drop_model, **kwargs)
[ "def", "persist", "(", "self", ",", "name", ",", "project", "=", "None", ",", "drop_model", "=", "False", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "ODPSModelExpr", ",", "self", ")", ".", "persist", "(", "name", ",", "project", "=", ...
40.777778
18.111111
def viewAt(self, point): """ Looks up the view at the inputed point. :param point | <QtCore.QPoint> :return <projexui.widgets.xviewwidget.XView> || None """ widget = self.childAt(point) if widget: return projexui.ancestor(widget, XView) else: return None
[ "def", "viewAt", "(", "self", ",", "point", ")", ":", "widget", "=", "self", ".", "childAt", "(", "point", ")", "if", "widget", ":", "return", "projexui", ".", "ancestor", "(", "widget", ",", "XView", ")", "else", ":", "return", "None" ]
26.230769
16.230769
def read(path, corpus=True, index_by='wosid', streaming=False, parse_only=None, corpus_class=Corpus, **kwargs): """ Parse one or more WoS field-tagged data files. Examples -------- .. code-block:: python >>> from tethne.readers import wos >>> corpus = wos.read("/path/to/some/wos/data") >>> corpus <tethne.classes.corpus.Corpus object at 0x10057c2d0> Parameters ---------- path : str Path to WoS field-tagged data. Can be a path directly to a single data file, or to a directory containing several data files. corpus : bool If True (default), returns a :class:`.Corpus`\. If False, will return only a list of :class:`.Paper`\s. Returns ------- :class:`.Corpus` or :class:`.Paper` """ if not os.path.exists(path): raise ValueError('No such file or directory') # We need the primary index field in the parse results. if parse_only: parse_only.append(index_by) if streaming: return streaming_read(path, corpus=corpus, index_by=index_by, parse_only=parse_only, **kwargs) if os.path.isdir(path): # Directory containing 1+ WoS data files. papers = [] for sname in os.listdir(path): if sname.endswith('txt') and not sname.startswith('.'): papers += read(os.path.join(path, sname), corpus=False, parse_only=parse_only) else: # A single data file. papers = WoSParser(path).parse(parse_only=parse_only) if corpus: return corpus_class(papers, index_by=index_by, **kwargs) return papers
[ "def", "read", "(", "path", ",", "corpus", "=", "True", ",", "index_by", "=", "'wosid'", ",", "streaming", "=", "False", ",", "parse_only", "=", "None", ",", "corpus_class", "=", "Corpus", ",", "*", "*", "kwargs", ")", ":", "if", "not", "os", ".", ...
32.038462
22.153846
def set_status(self, status, msg): """ Set and return the status of the task. Args: status: Status object or string representation of the status msg: string with human-readable message used in the case of errors. """ # truncate string if it's long. msg will be logged in the object and we don't want to waste memory. if len(msg) > 2000: msg = msg[:2000] msg += "\n... snip ...\n" # Locked files must be explicitly unlocked if self.status == self.S_LOCKED or status == self.S_LOCKED: err_msg = ( "Locked files must be explicitly unlocked before calling set_status but\n" "task.status = %s, input status = %s" % (self.status, status)) raise RuntimeError(err_msg) status = Status.as_status(status) changed = True if hasattr(self, "_status"): changed = (status != self._status) self._status = status if status == self.S_RUN: # Set datetimes.start when the task enters S_RUN if self.datetimes.start is None: self.datetimes.start = datetime.datetime.now() # Add new entry to history only if the status has changed. if changed: if status == self.S_SUB: self.datetimes.submission = datetime.datetime.now() self.history.info("Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s " % ( self.mpi_procs, self.omp_threads, self.mem_per_proc.to("Gb"), msg)) elif status == self.S_OK: self.history.info("Task completed %s", msg) elif status == self.S_ABICRITICAL: self.history.info("Status set to S_ABI_CRITICAL due to: %s", msg) else: self.history.info("Status changed to %s. msg: %s", status, msg) ####################################################### # The section belows contains callbacks that should not # be executed if we are in spectator_mode ####################################################### if status == self.S_DONE: # Execute the callback self._on_done() if status == self.S_OK: # Finalize the task. if not self.finalized: self._on_ok() # here we remove the output files of the task and of its parents. if self.gc is not None and self.gc.policy == "task": self.clean_output_files() if self.status == self.S_OK: # Because _on_ok might have changed the status. self.send_signal(self.S_OK) return status
[ "def", "set_status", "(", "self", ",", "status", ",", "msg", ")", ":", "# truncate string if it's long. msg will be logged in the object and we don't want to waste memory.", "if", "len", "(", "msg", ")", ">", "2000", ":", "msg", "=", "msg", "[", ":", "2000", "]", ...
37.901408
22.661972
def make_nameko_helper(config): """Create a fake module that provides some convenient access to nameko standalone functionality for interactive shell usage. """ module = ModuleType('nameko') module.__doc__ = """Nameko shell helper for making rpc calls and dispatching events. Usage: >>> n.rpc.service.method() "reply" >>> n.dispatch_event('service', 'event_type', 'event_data') """ proxy = ClusterRpcProxy(config) module.rpc = proxy.start() module.dispatch_event = event_dispatcher(config) module.config = config module.disconnect = proxy.stop return module
[ "def", "make_nameko_helper", "(", "config", ")", ":", "module", "=", "ModuleType", "(", "'nameko'", ")", "module", ".", "__doc__", "=", "\"\"\"Nameko shell helper for making rpc calls and dispatching\nevents.\n\nUsage:\n >>> n.rpc.service.method()\n \"reply\"\n\n >>> n.dispa...
29.8
15.6
def convert_tags_to_dict(item): """ Convert AWS inconvenient tags model of a list of {"Key": <key>, "Value": <value>} pairs to a dict of {<key>: <value>} for easier querying. This returns a proxied object over given item to return a different tags format as the tags attribute is read-only and we cannot modify it directly. """ if hasattr(item, 'tags'): tags = item.tags if isinstance(tags, list): tags_dict = {} for kv_dict in tags: if isinstance(kv_dict, dict) and 'Key' in kv_dict and 'Value' in kv_dict: tags_dict[kv_dict['Key']] = kv_dict['Value'] return ObjectProxy(item, tags=tags_dict) return item
[ "def", "convert_tags_to_dict", "(", "item", ")", ":", "if", "hasattr", "(", "item", ",", "'tags'", ")", ":", "tags", "=", "item", ".", "tags", "if", "isinstance", "(", "tags", ",", "list", ")", ":", "tags_dict", "=", "{", "}", "for", "kv_dict", "in",...
41.764706
20.823529
def zones(self): """ Return a new raw REST interface to zone resources :rtype: :py:class:`ns1.rest.zones.Zones` """ import ns1.rest.zones return ns1.rest.zones.Zones(self.config)
[ "def", "zones", "(", "self", ")", ":", "import", "ns1", ".", "rest", ".", "zones", "return", "ns1", ".", "rest", ".", "zones", ".", "Zones", "(", "self", ".", "config", ")" ]
27.5
13.5
def make_posthook(self): """ Run the post hook into the project directory. """ print(id(self.posthook), self.posthook) print(id(super(self.__class__, self).posthook), super(self.__class__, self).posthook) import ipdb;ipdb.set_trace() if self.posthook: os.chdir(self.project_name) # enter the project main directory self.posthook()
[ "def", "make_posthook", "(", "self", ")", ":", "print", "(", "id", "(", "self", ".", "posthook", ")", ",", "self", ".", "posthook", ")", "print", "(", "id", "(", "super", "(", "self", ".", "__class__", ",", "self", ")", ".", "posthook", ")", ",", ...
48.375
17.75
def allow(self, ctx, ops): ''' Checks that the authorizer's request is authorized to perform all the given operations. Note that allow does not check first party caveats - if there is more than one macaroon that may authorize the request, it will choose the first one that does regardless. If all the operations are allowed, an AuthInfo is returned holding details of the decision and any first party caveats that must be checked before actually executing any operation. If operations include LOGIN_OP, the request should contain an authentication macaroon proving the client's identity. Once an authentication macaroon is chosen, it will be used for all other authorization requests. If an operation was not allowed, an exception will be raised which may be: - DischargeRequiredError holding the operations that remain to be authorized in order to allow authorization to proceed - PermissionDenied when no operations can be authorized and there's no third party to discharge macaroons for. @param ctx AuthContext @param ops an array of Op :return: an AuthInfo object. ''' auth_info, _ = self.allow_any(ctx, ops) return auth_info
[ "def", "allow", "(", "self", ",", "ctx", ",", "ops", ")", ":", "auth_info", ",", "_", "=", "self", ".", "allow_any", "(", "ctx", ",", "ops", ")", "return", "auth_info" ]
43.233333
25.833333
def gifs_trending_get(self, api_key, **kwargs): """ Trending GIFs Endpoint Fetch GIFs currently trending online. Hand curated by the GIPHY editorial team. The data returned mirrors the GIFs showcased on the <a href = \"http://www.giphy.com\">GIPHY homepage</a>. Returns 25 results by default. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.gifs_trending_get(api_key, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str api_key: Giphy API Key. (required) :param int limit: The maximum number of records to return. :param str rating: Filters results by specified rating. :param str fmt: Used to indicate the expected response format. Default is Json. :return: InlineResponse200 If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.gifs_trending_get_with_http_info(api_key, **kwargs) else: (data) = self.gifs_trending_get_with_http_info(api_key, **kwargs) return data
[ "def", "gifs_trending_get", "(", "self", ",", "api_key", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'callback'", ")", ":", "return", "self", ".", "gifs_trending_get_with_h...
52.142857
24.428571
def configureLogging(self): """Configure logging for nose, or optionally other packages. Any logger name may be set with the debug option, and that logger will be set to debug level and be assigned the same handler as the nose loggers, unless it already has a handler. """ if self.loggingConfig: from logging.config import fileConfig fileConfig(self.loggingConfig) return format = logging.Formatter('%(name)s: %(levelname)s: %(message)s') if self.debugLog: handler = logging.FileHandler(self.debugLog) else: handler = logging.StreamHandler(self.logStream) handler.setFormatter(format) logger = logging.getLogger('nose') logger.propagate = 0 # only add our default handler if there isn't already one there # this avoids annoying duplicate log messages. if handler not in logger.handlers: logger.addHandler(handler) # default level lvl = logging.WARNING if self.verbosity >= 5: lvl = 0 elif self.verbosity >= 4: lvl = logging.DEBUG elif self.verbosity >= 3: lvl = logging.INFO logger.setLevel(lvl) # individual overrides if self.debug: # no blanks debug_loggers = [ name for name in self.debug.split(',') if name ] for logger_name in debug_loggers: l = logging.getLogger(logger_name) l.setLevel(logging.DEBUG) if not l.handlers and not logger_name.startswith('nose'): l.addHandler(handler)
[ "def", "configureLogging", "(", "self", ")", ":", "if", "self", ".", "loggingConfig", ":", "from", "logging", ".", "config", "import", "fileConfig", "fileConfig", "(", "self", ".", "loggingConfig", ")", "return", "format", "=", "logging", ".", "Formatter", "...
36.369565
16.195652
def get_crimes_location(self, location_id, date=None): """ Get crimes at a particular snap-point location. Uses the crimes-at-location_ API call. .. _crimes-at-location: https://data.police.uk/docs/method/crimes-at-location/ :rtype: list :param int location_id: The ID of the location to get crimes for. :param date: The month in which the crimes were reported in the format ``YYYY-MM`` (the latest date is used if ``None``). :type date: str or None :return: A ``list`` of :class:`Crime` objects which were snapped to the :class:`Location` with the specified ID in the given month. """ kwargs = { 'location_id': location_id, } crimes = [] if date is not None: kwargs['date'] = date for c in self.service.request('GET', 'crimes-at-location', **kwargs): crimes.append(Crime(self, data=c)) return crimes
[ "def", "get_crimes_location", "(", "self", ",", "location_id", ",", "date", "=", "None", ")", ":", "kwargs", "=", "{", "'location_id'", ":", "location_id", ",", "}", "crimes", "=", "[", "]", "if", "date", "is", "not", "None", ":", "kwargs", "[", "'date...
38.115385
21.423077
def GetColLabelValue(self, col): """ Get col label from dataframe """ if len(self.dataframe): return self.dataframe.columns[col] return ''
[ "def", "GetColLabelValue", "(", "self", ",", "col", ")", ":", "if", "len", "(", "self", ".", "dataframe", ")", ":", "return", "self", ".", "dataframe", ".", "columns", "[", "col", "]", "return", "''" ]
26.285714
7.142857
def _slice_cov(self, cov): """ Slice the correct dimensions for use in the kernel, as indicated by `self.active_dims` for covariance matrices. This requires slicing the rows *and* columns. This will also turn flattened diagonal matrices into a tensor of full diagonal matrices. :param cov: Tensor of covariance matrices (NxDxD or NxD). :return: N x self.input_dim x self.input_dim. """ cov = tf.cond(tf.equal(tf.rank(cov), 2), lambda: tf.matrix_diag(cov), lambda: cov) if isinstance(self.active_dims, slice): cov = cov[..., self.active_dims, self.active_dims] else: cov_shape = tf.shape(cov) covr = tf.reshape(cov, [-1, cov_shape[-1], cov_shape[-1]]) gather1 = tf.gather(tf.transpose(covr, [2, 1, 0]), self.active_dims) gather2 = tf.gather(tf.transpose(gather1, [1, 0, 2]), self.active_dims) cov = tf.reshape(tf.transpose(gather2, [2, 0, 1]), tf.concat([cov_shape[:-2], [len(self.active_dims), len(self.active_dims)]], 0)) return cov
[ "def", "_slice_cov", "(", "self", ",", "cov", ")", ":", "cov", "=", "tf", ".", "cond", "(", "tf", ".", "equal", "(", "tf", ".", "rank", "(", "cov", ")", ",", "2", ")", ",", "lambda", ":", "tf", ".", "matrix_diag", "(", "cov", ")", ",", "lambd...
52.904762
25.761905
def export_translations(request, language): """ Export translations view. """ FieldTranslation.delete_orphan_translations() translations = FieldTranslation.objects.filter(lang=language) for trans in translations: trans.source_text = trans.source_text.replace("'","\'").replace("\"","\\\"") trans.translation = trans.translation.replace("'","\'").replace("\"","\\\"") replacements = {"translations":translations, "lang":language} if len(settings.ADMINS)>0: replacements["last_translator"] = settings.ADMINS[0][0] replacements["last_translator_email"] = settings.ADMINS[0][1] if settings.WEBSITE_NAME: replacements["website_name"] = settings.WEBSITE_NAME response = render(request=request, template_name='modeltranslation/admin/export_translations.po', dictionary=replacements, context_instance=RequestContext(request), content_type="text/x-gettext-translation") response['Content-Disposition'] = 'attachment; filename="{0}.po"'.format(language) return response
[ "def", "export_translations", "(", "request", ",", "language", ")", ":", "FieldTranslation", ".", "delete_orphan_translations", "(", ")", "translations", "=", "FieldTranslation", ".", "objects", ".", "filter", "(", "lang", "=", "language", ")", "for", "trans", "...
53.555556
26.222222
def init_distributed(cuda): """ Initializes distributed backend. :param cuda: (bool) if True initializes nccl backend, if False initializes gloo backend """ world_size = int(os.environ.get('WORLD_SIZE', 1)) distributed = (world_size > 1) if distributed: backend = 'nccl' if cuda else 'gloo' dist.init_process_group(backend=backend, init_method='env://') assert dist.is_initialized() return distributed
[ "def", "init_distributed", "(", "cuda", ")", ":", "world_size", "=", "int", "(", "os", ".", "environ", ".", "get", "(", "'WORLD_SIZE'", ",", "1", ")", ")", "distributed", "=", "(", "world_size", ">", "1", ")", "if", "distributed", ":", "backend", "=", ...
32.266667
13.466667
def serialize_tag(tag, *, indent=None, compact=False, quote=None): """Serialize an nbt tag to its literal representation.""" serializer = Serializer(indent=indent, compact=compact, quote=quote) return serializer.serialize(tag)
[ "def", "serialize_tag", "(", "tag", ",", "*", ",", "indent", "=", "None", ",", "compact", "=", "False", ",", "quote", "=", "None", ")", ":", "serializer", "=", "Serializer", "(", "indent", "=", "indent", ",", "compact", "=", "compact", ",", "quote", ...
58.75
15.5
def peek(self, size=-1): """ Return bytes from the stream without advancing the position. Args: size (int): Number of bytes to read. -1 to read the full stream. Returns: bytes: bytes read """ if not self._readable: raise UnsupportedOperation('read') with self._seek_lock: self._raw.seek(self._seek) return self._raw._peek(size)
[ "def", "peek", "(", "self", ",", "size", "=", "-", "1", ")", ":", "if", "not", "self", ".", "_readable", ":", "raise", "UnsupportedOperation", "(", "'read'", ")", "with", "self", ".", "_seek_lock", ":", "self", ".", "_raw", ".", "seek", "(", "self", ...
26.235294
17.647059
def from_jsons(graph_json_str: str, check_version: bool = True) -> BELGraph: """Read a BEL graph from a Node-Link JSON string.""" graph_json_dict = json.loads(graph_json_str) return from_json(graph_json_dict, check_version=check_version)
[ "def", "from_jsons", "(", "graph_json_str", ":", "str", ",", "check_version", ":", "bool", "=", "True", ")", "->", "BELGraph", ":", "graph_json_dict", "=", "json", ".", "loads", "(", "graph_json_str", ")", "return", "from_json", "(", "graph_json_dict", ",", ...
61.5
17.5
def format_help_text(self, ctx, formatter): """Writes the help text to the formatter if it exists.""" if self.help: formatter.write_paragraph() with formatter.indentation(): formatter.write_text(self.help)
[ "def", "format_help_text", "(", "self", ",", "ctx", ",", "formatter", ")", ":", "if", "self", ".", "help", ":", "formatter", ".", "write_paragraph", "(", ")", "with", "formatter", ".", "indentation", "(", ")", ":", "formatter", ".", "write_text", "(", "s...
42.666667
5.166667
def build(matrix): """Yield lines generated from given matrix""" max_x = max(matrix, key=lambda t: t[0])[0] min_x = min(matrix, key=lambda t: t[0])[0] max_y = max(matrix, key=lambda t: t[1])[1] min_y = min(matrix, key=lambda t: t[1])[1] yield from ( # '{}:'.format(j).ljust(4) + ''.join(matrix[i, j] for i in range(min_x, max_x+1)) ''.join(matrix[i, j] for i in range(min_x, max_x+1)) for j in range(min_y, max_y+1) )
[ "def", "build", "(", "matrix", ")", ":", "max_x", "=", "max", "(", "matrix", ",", "key", "=", "lambda", "t", ":", "t", "[", "0", "]", ")", "[", "0", "]", "min_x", "=", "min", "(", "matrix", ",", "key", "=", "lambda", "t", ":", "t", "[", "0"...
41.727273
16
def _makes_clone(_func, *args, **kw): """ A decorator that returns a clone of the current object so that we can re-use the object for similar requests. """ self = args[0]._clone() _func(self, *args[1:], **kw) return self
[ "def", "_makes_clone", "(", "_func", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "self", "=", "args", "[", "0", "]", ".", "_clone", "(", ")", "_func", "(", "self", ",", "*", "args", "[", "1", ":", "]", ",", "*", "*", "kw", ")", "return...
30.125
10.625
def QuickAddEvent(self, event_text, reminders=None): """Wrapper around Google Calendar API's quickAdd""" if not event_text: raise GcalcliError('event_text is required for a quickAdd') if len(self.cals) != 1: # TODO: get a better name for this exception class # and use it elsewhere raise GcalcliError('You must only specify a single calendar\n') new_event = self._retry_with_backoff( self.get_cal_service() .events() .quickAdd( calendarId=self.cals[0]['id'], text=event_text ) ) if reminders or not self.options['default_reminders']: rem = {} rem['reminders'] = {'useDefault': False, 'overrides': []} for r in reminders: n, m = utils.parse_reminder(r) rem['reminders']['overrides'].append({'minutes': n, 'method': m}) new_event = self._retry_with_backoff( self.get_cal_service() .events() .patch( calendarId=self.cals[0]['id'], eventId=new_event['id'], body=rem ) ) if self.details.get('url'): hlink = new_event['htmlLink'] self.printer.msg('New event added: %s\n' % hlink, 'green') return new_event
[ "def", "QuickAddEvent", "(", "self", ",", "event_text", ",", "reminders", "=", "None", ")", ":", "if", "not", "event_text", ":", "raise", "GcalcliError", "(", "'event_text is required for a quickAdd'", ")", "if", "len", "(", "self", ".", "cals", ")", "!=", "...
37.697674
17.534884
def load_suite_from_stdin(self): """Load a test suite with test lines from the TAP stream on STDIN. :returns: A ``unittest.TestSuite`` instance """ suite = unittest.TestSuite() rules = Rules("stream", suite) line_generator = self._parser.parse_stdin() return self._load_lines("stream", line_generator, suite, rules)
[ "def", "load_suite_from_stdin", "(", "self", ")", ":", "suite", "=", "unittest", ".", "TestSuite", "(", ")", "rules", "=", "Rules", "(", "\"stream\"", ",", "suite", ")", "line_generator", "=", "self", ".", "_parser", ".", "parse_stdin", "(", ")", "return",...
40.444444
11.888889
def project_activities(self, project_id_or_key, extra_query_params={}): """ client = BacklogClient("your_space_name", "your_api_key") client.project_activities("YOUR_PROJECT") client.project_activities("YOUR_PROJECT", {"activityTypeId[]": [1, 2],}) """ return self.do("get", "projects/{project_id_or_key}/activities", url_params={"project_id_or_key": project_id_or_key}, query_params=extra_query_params, )
[ "def", "project_activities", "(", "self", ",", "project_id_or_key", ",", "extra_query_params", "=", "{", "}", ")", ":", "return", "self", ".", "do", "(", "\"get\"", ",", "\"projects/{project_id_or_key}/activities\"", ",", "url_params", "=", "{", "\"project_id_or_key...
51.3
20.3
def remove_bgp(self, **kwargs): """Remove BGP process completely. Args: vrf (str): The VRF for this BGP process. rbridge_id (str): The rbridge ID of the device on which BGP will be configured in a VCS fabric. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: None Examples: >>> import pynos.device >>> conn = ('10.24.39.211', '22') >>> auth = ('admin', 'password') >>> with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.bgp.local_asn(local_as='65535', ... rbridge_id='225') ... output = dev.bgp.remove_bgp(rbridge_id='225') """ vrf = kwargs.pop('vrf', 'default') rbridge_id = kwargs.pop('rbridge_id', '1') callback = kwargs.pop('callback', self._callback) bgp_args = dict(vrf_name=vrf, rbridge_id=rbridge_id) config = self._rbridge.rbridge_id_router_bgp_vrf_name(**bgp_args) config.find('.//*bgp').set('operation', 'delete') return callback(config)
[ "def", "remove_bgp", "(", "self", ",", "*", "*", "kwargs", ")", ":", "vrf", "=", "kwargs", ".", "pop", "(", "'vrf'", ",", "'default'", ")", "rbridge_id", "=", "kwargs", ".", "pop", "(", "'rbridge_id'", ",", "'1'", ")", "callback", "=", "kwargs", ".",...
38.088235
19.5
def tss(self, up=0, down=0): """ Return a start, end tuple of positions around the transcription-start site Parameters ---------- up : int if greature than 0, the strand is used to add this many upstream bases in the appropriate direction down : int if greature than 0, the strand is used to add this many downstream bases into the gene. """ if not self.is_gene_pred: return None tss = self.txEnd if self.strand == '-' else self.txStart start, end = tss, tss if self.strand == '+': start -= up end += down else: start += up end -= down start, end = end, start return max(0, start), max(end, start, 0)
[ "def", "tss", "(", "self", ",", "up", "=", "0", ",", "down", "=", "0", ")", ":", "if", "not", "self", ".", "is_gene_pred", ":", "return", "None", "tss", "=", "self", ".", "txEnd", "if", "self", ".", "strand", "==", "'-'", "else", "self", ".", "...
29.37037
19.666667
def token_permission_view(token): """Show permission garanted to authorized application token.""" scopes = [current_oauth2server.scopes[x] for x in token.scopes] return render_template( "invenio_oauth2server/settings/token_permission_view.html", token=token, scopes=scopes, )
[ "def", "token_permission_view", "(", "token", ")", ":", "scopes", "=", "[", "current_oauth2server", ".", "scopes", "[", "x", "]", "for", "x", "in", "token", ".", "scopes", "]", "return", "render_template", "(", "\"invenio_oauth2server/settings/token_permission_view....
38.5
18.375
async def update(self, _id=None, **new_data): """Updates fields values. Accepts id of sigle entry and fields with values. update(id, **kwargs) => {"success":200, "reason":"Updated"} (if success) update(id, **kwargs) => {"error":400, "reason":"Missed required fields"} (if error) """ if not _id or not new_data: return {"error":400, "reason":"Missed required fields"} document = await self.collection.find_one({"id":_id}) if not document: return {"error":404, "reason":"Not found"} for key in new_data: await self.collection.find_one_and_update( {"id": _id}, {"$set": {key: new_data[key]}} ) updated = await self.collection.find_one({"id":_id}) return {"success":200, "reason": "Updated", **updated}
[ "async", "def", "update", "(", "self", ",", "_id", "=", "None", ",", "*", "*", "new_data", ")", ":", "if", "not", "_id", "or", "not", "new_data", ":", "return", "{", "\"error\"", ":", "400", ",", "\"reason\"", ":", "\"Missed required fields\"", "}", "d...
31.041667
18.125
def security_rule_absent(name, security_group, resource_group, connection_auth=None): ''' .. versionadded:: 2019.2.0 Ensure a security rule does not exist in the network security group. :param name: Name of the security rule. :param security_group: The network security group containing the security rule. :param resource_group: The resource group assigned to the network security group. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API. ''' ret = { 'name': name, 'result': False, 'comment': '', 'changes': {} } if not isinstance(connection_auth, dict): ret['comment'] = 'Connection information must be specified via connection_auth dictionary!' return ret rule = __salt__['azurearm_network.security_rule_get']( name, security_group, resource_group, azurearm_log_level='info', **connection_auth ) if 'error' in rule: ret['result'] = True ret['comment'] = 'Security rule {0} was not found.'.format(name) return ret elif __opts__['test']: ret['comment'] = 'Security rule {0} would be deleted.'.format(name) ret['result'] = None ret['changes'] = { 'old': rule, 'new': {}, } return ret deleted = __salt__['azurearm_network.security_rule_delete'](name, security_group, resource_group, **connection_auth) if deleted: ret['result'] = True ret['comment'] = 'Security rule {0} has been deleted.'.format(name) ret['changes'] = { 'old': rule, 'new': {} } return ret ret['comment'] = 'Failed to delete security rule {0}!'.format(name) return ret
[ "def", "security_rule_absent", "(", "name", ",", "security_group", ",", "resource_group", ",", "connection_auth", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", ",", "'changes'", "...
28.092308
26.553846
def without_extra_phrases(self): """Removes parenthethical and dashed phrases""" # the last parenthesis is optional, because sometimes they are truncated name = re.sub(r'\s*\([^)]*\)?\s*$', '', self.name) name = re.sub(r'(?i)\s* formerly.*$', '', name) name = re.sub(r'(?i)\s*and its affiliates$', '', name) name = re.sub(r'\bet al\b', '', name) # in some datasets, the name of an organization is followed by a hyphen and an abbreviated name, or a specific # department or geographic subdivision; we want to remove this extraneous stuff without breaking names like # Wal-Mart or Williams-Sonoma # if there's a hyphen at least four characters in, proceed if "-" in name: hyphen_parts = name.rsplit("-", 1) # if the part after the hyphen is shorter than the part before, # AND isn't either a number (often occurs in Union names) or a single letter (e.g., Tech-X), # AND the hyphen is preceded by either whitespace or at least four characters, # discard the hyphen and whatever follows if len(hyphen_parts[1]) < len(hyphen_parts[0]) and re.search(r'(\w{4,}|\s+)$', hyphen_parts[0]) and not re.match(r'^([a-zA-Z]|[0-9]+)$', hyphen_parts[1]): name = hyphen_parts[0].strip() return name
[ "def", "without_extra_phrases", "(", "self", ")", ":", "# the last parenthesis is optional, because sometimes they are truncated", "name", "=", "re", ".", "sub", "(", "r'\\s*\\([^)]*\\)?\\s*$'", ",", "''", ",", "self", ".", "name", ")", "name", "=", "re", ".", "sub"...
58.826087
32.173913
def build_input_pipeline(x_train, x_test, y_train, y_test, batch_size, valid_size): """Build an Iterator switching between train and heldout data.""" x_train = x_train.astype("float32") x_test = x_test.astype("float32") x_train /= 255 x_test /= 255 y_train = y_train.flatten() y_test = y_test.flatten() if FLAGS.subtract_pixel_mean: x_train_mean = np.mean(x_train, axis=0) x_train -= x_train_mean x_test -= x_train_mean print("x_train shape:" + str(x_train.shape)) print(str(x_train.shape[0]) + " train samples") print(str(x_test.shape[0]) + " test samples") # Build an iterator over training batches. training_dataset = tf.data.Dataset.from_tensor_slices( (x_train, np.int32(y_train))) training_batches = training_dataset.shuffle( 50000, reshuffle_each_iteration=True).repeat().batch(batch_size) training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches) # Build a iterator over the heldout set with batch_size=heldout_size, # i.e., return the entire heldout set as a constant. heldout_dataset = tf.data.Dataset.from_tensor_slices( (x_test, np.int32(y_test))) heldout_batches = heldout_dataset.repeat().batch(valid_size) heldout_iterator = tf.compat.v1.data.make_one_shot_iterator(heldout_batches) # Combine these into a feedable iterator that can switch between training # and validation inputs. handle = tf.compat.v1.placeholder(tf.string, shape=[]) feedable_iterator = tf.compat.v1.data.Iterator.from_string_handle( handle, training_batches.output_types, training_batches.output_shapes) images, labels = feedable_iterator.get_next() return images, labels, handle, training_iterator, heldout_iterator
[ "def", "build_input_pipeline", "(", "x_train", ",", "x_test", ",", "y_train", ",", "y_test", ",", "batch_size", ",", "valid_size", ")", ":", "x_train", "=", "x_train", ".", "astype", "(", "\"float32\"", ")", "x_test", "=", "x_test", ".", "astype", "(", "\"...
38.681818
20.931818
def begin_update(self, x_data, drop=0.0): """Return the output of the wrapped PyTorch model for the given input, along with a callback to handle the backward pass. """ x_var = torch.autograd.Variable(xp2torch(x_data), requires_grad=True) # Make prediction y_var = self._model(x_var) def backward_pytorch(dy_data, sgd=None): dy_var = xp2torch(dy_data) torch.autograd.backward((y_var,), grad_tensors=(dy_var,)) if sgd is not None: if self._optimizer is None: self._optimizer = self._create_optimizer(sgd) self._optimizer.step() self._optimizer.zero_grad() return torch2xp(x_var.grad) return torch2xp(y_var), backward_pytorch
[ "def", "begin_update", "(", "self", ",", "x_data", ",", "drop", "=", "0.0", ")", ":", "x_var", "=", "torch", ".", "autograd", ".", "Variable", "(", "xp2torch", "(", "x_data", ")", ",", "requires_grad", "=", "True", ")", "# Make prediction", "y_var", "=",...
39.3
14.35
def get_start_time(self, file_path): """ :param file_path: the path to the file that's being processed :type file_path: unicode :return: the start time of the process that's processing the specified file or None if the file is not currently being processed :rtype: datetime """ if file_path in self._processors: return self._processors[file_path].start_time return None
[ "def", "get_start_time", "(", "self", ",", "file_path", ")", ":", "if", "file_path", "in", "self", ".", "_processors", ":", "return", "self", ".", "_processors", "[", "file_path", "]", ".", "start_time", "return", "None" ]
40.636364
14.818182
def find_clique_embedding(k, m=None, target_graph=None): """Find an embedding of a k-sized clique on a Pegasus graph (target_graph). This clique is found by transforming the Pegasus graph into a K2,2 Chimera graph and then applying a Chimera clique finding algorithm. The results are then converted back in terms of Pegasus coordinates. Note: If target_graph is None, m will be used to generate a m-by-m Pegasus graph. Hence m and target_graph cannot both be None. Args: k (int/iterable/:obj:`networkx.Graph`): Number of members in the requested clique; list of nodes; a complete graph that you want to embed onto the target_graph m (int): Number of tiles in a row of a square Pegasus graph target_graph (:obj:`networkx.Graph`): A Pegasus graph Returns: dict: A dictionary representing target_graphs's clique embedding. Each dictionary key represents a node in said clique. Each corresponding dictionary value is a list of pegasus coordinates that should be chained together to represent said node. """ # Organize parameter values if target_graph is None: if m is None: raise TypeError("m and target_graph cannot both be None.") target_graph = pegasus_graph(m) m = target_graph.graph['rows'] # We only support square Pegasus graphs _, nodes = k # Deal with differences in ints vs coordinate target_graphs if target_graph.graph['labels'] == 'nice': fwd_converter = get_nice_to_pegasus_fn(m = m) back_converter = get_pegasus_to_nice_fn(m = m) pegasus_coords = [fwd_converter(*p) for p in target_graph.nodes] back_translate = lambda embedding: {key: [back_converter(*p) for p in chain] for key, chain in embedding.items()} elif target_graph.graph['labels'] == 'int': # Convert nodes in terms of Pegasus coordinates coord_converter = pegasus_coordinates(m) pegasus_coords = map(coord_converter.tuple, target_graph.nodes) # A function to convert our final coordinate embedding to an ints embedding back_translate = lambda embedding: {key: list(coord_converter.ints(chain)) for key, chain in embedding.items()} else: pegasus_coords = target_graph.nodes back_translate = lambda embedding: embedding # Break each Pegasus qubits into six Chimera fragments # Note: By breaking the graph in this way, you end up with a K2,2 Chimera graph fragment_tuple = get_tuple_fragmentation_fn(target_graph) fragments = fragment_tuple(pegasus_coords) # Create a K2,2 Chimera graph # Note: 6 * m because Pegasus qubits split into six pieces, so the number of rows and columns # get multiplied by six chim_m = 6 * m chim_graph = chimera_graph(chim_m, t=2, coordinates=True) # Determine valid fragment couplers in a K2,2 Chimera graph edges = chim_graph.subgraph(fragments).edges() # Find clique embedding in K2,2 Chimera graph embedding_processor = processor(edges, M=chim_m, N=chim_m, L=2, linear=False) chimera_clique_embedding = embedding_processor.tightestNativeClique(len(nodes)) # Convert chimera fragment embedding in terms of Pegasus coordinates defragment_tuple = get_tuple_defragmentation_fn(target_graph) pegasus_clique_embedding = map(defragment_tuple, chimera_clique_embedding) pegasus_clique_embedding = dict(zip(nodes, pegasus_clique_embedding)) pegasus_clique_embedding = back_translate(pegasus_clique_embedding) if len(pegasus_clique_embedding) != len(nodes): raise ValueError("No clique embedding found") return pegasus_clique_embedding
[ "def", "find_clique_embedding", "(", "k", ",", "m", "=", "None", ",", "target_graph", "=", "None", ")", ":", "# Organize parameter values", "if", "target_graph", "is", "None", ":", "if", "m", "is", "None", ":", "raise", "TypeError", "(", "\"m and target_graph ...
47.217949
28.141026
def delete_gauge(self, slug): """Removes all gauges with the given ``slug``.""" key = self._gauge_key(slug) self.r.delete(key) # Remove the Gauge self.r.srem(self._gauge_slugs_key, slug)
[ "def", "delete_gauge", "(", "self", ",", "slug", ")", ":", "key", "=", "self", ".", "_gauge_key", "(", "slug", ")", "self", ".", "r", ".", "delete", "(", "key", ")", "# Remove the Gauge", "self", ".", "r", ".", "srem", "(", "self", ".", "_gauge_slugs...
43
6
def asl_obctrl_encode(self, timestamp, uElev, uThrot, uThrot2, uAilL, uAilR, uRud, obctrl_status): ''' Off-board controls/commands for ASLUAVs timestamp : Time since system start [us] (uint64_t) uElev : Elevator command [~] (float) uThrot : Throttle command [~] (float) uThrot2 : Throttle 2 command [~] (float) uAilL : Left aileron command [~] (float) uAilR : Right aileron command [~] (float) uRud : Rudder command [~] (float) obctrl_status : Off-board computer status (uint8_t) ''' return MAVLink_asl_obctrl_message(timestamp, uElev, uThrot, uThrot2, uAilL, uAilR, uRud, obctrl_status)
[ "def", "asl_obctrl_encode", "(", "self", ",", "timestamp", ",", "uElev", ",", "uThrot", ",", "uThrot2", ",", "uAilL", ",", "uAilR", ",", "uRud", ",", "obctrl_status", ")", ":", "return", "MAVLink_asl_obctrl_message", "(", "timestamp", ",", "uElev", ",", "uTh...
60.866667
37.133333
def get_default_config_help(self): """ Return help text for collector configuration. """ config_help = super(MemoryLxcCollector, self).get_default_config_help() config_help.update({ "sys_path": "Defaults to '/sys/fs/cgroup/lxc'", }) return config_help
[ "def", "get_default_config_help", "(", "self", ")", ":", "config_help", "=", "super", "(", "MemoryLxcCollector", ",", "self", ")", ".", "get_default_config_help", "(", ")", "config_help", ".", "update", "(", "{", "\"sys_path\"", ":", "\"Defaults to '/sys/fs/cgroup/l...
34.555556
14.777778
def pformat( object, indent=_UNSET_SENTINEL, width=_UNSET_SENTINEL, depth=_UNSET_SENTINEL, *, ribbon_width=_UNSET_SENTINEL, max_seq_len=_UNSET_SENTINEL, compact=_UNSET_SENTINEL, sort_dict_keys=_UNSET_SENTINEL ): """ Returns a pretty printed representation of the object as a ``str``. Accepts the same parameters as :func:`~prettyprinter.pprint`. The output is not colored. """ sdocs = python_to_sdocs( object, **_merge_defaults( indent=indent, width=width, depth=depth, ribbon_width=ribbon_width, max_seq_len=max_seq_len, sort_dict_keys=sort_dict_keys, ) ) stream = StringIO() default_render_to_stream(stream, sdocs) return stream.getvalue()
[ "def", "pformat", "(", "object", ",", "indent", "=", "_UNSET_SENTINEL", ",", "width", "=", "_UNSET_SENTINEL", ",", "depth", "=", "_UNSET_SENTINEL", ",", "*", ",", "ribbon_width", "=", "_UNSET_SENTINEL", ",", "max_seq_len", "=", "_UNSET_SENTINEL", ",", "compact",...
26.133333
15.733333
def get_config(self, slot, config_id): """Get a config variable assignment previously set on this sensor graph. Args: slot (SlotIdentifier): The slot that we are setting this config variable on. config_id (int): The 16-bit config variable identifier. Returns: (str, str|int): Returns a tuple with the type of the config variable and the value that is being set. Raises: ArgumentError: If the config variable is not currently set on the specified slot. """ if slot not in self.config_database: raise ArgumentError("No config variables have been set on specified slot", slot=slot) if config_id not in self.config_database[slot]: raise ArgumentError("Config variable has not been set on specified slot", slot=slot, config_id=config_id) return self.config_database[slot][config_id]
[ "def", "get_config", "(", "self", ",", "slot", ",", "config_id", ")", ":", "if", "slot", "not", "in", "self", ".", "config_database", ":", "raise", "ArgumentError", "(", "\"No config variables have been set on specified slot\"", ",", "slot", "=", "slot", ")", "i...
39.333333
28.708333
def transliterate(table, text): """ Transliterate text according to one of the tables above. `table` chooses the table. It looks like a language code but comes from a very restricted set: - 'sr-Latn' means to convert Serbian, which may be in Cyrillic, into the Latin alphabet. - 'az-Latn' means the same for Azerbaijani Cyrillic to Latn. """ if table == 'sr-Latn': return text.translate(SR_LATN_TABLE) elif table == 'az-Latn': return text.translate(AZ_LATN_TABLE) else: raise ValueError("Unknown transliteration table: {!r}".format(table))
[ "def", "transliterate", "(", "table", ",", "text", ")", ":", "if", "table", "==", "'sr-Latn'", ":", "return", "text", ".", "translate", "(", "SR_LATN_TABLE", ")", "elif", "table", "==", "'az-Latn'", ":", "return", "text", ".", "translate", "(", "AZ_LATN_TA...
35
20.176471
def _get_clause_words( sentence_text, clause_id ): ''' Collects clause with index *clause_id* from given *sentence_text*. Returns a pair (clause, isEmbedded), where: *clause* is a list of word tokens in the clause; *isEmbedded* is a bool indicating whether the clause is embedded; ''' clause = [] isEmbedded = False indices = sentence_text.clause_indices clause_anno = sentence_text.clause_annotations for wid, token in enumerate(sentence_text[WORDS]): if indices[wid] == clause_id: if not clause and clause_anno[wid] == EMBEDDED_CLAUSE_START: isEmbedded = True clause.append((wid, token)) return clause, isEmbedded
[ "def", "_get_clause_words", "(", "sentence_text", ",", "clause_id", ")", ":", "clause", "=", "[", "]", "isEmbedded", "=", "False", "indices", "=", "sentence_text", ".", "clause_indices", "clause_anno", "=", "sentence_text", ".", "clause_annotations", "for", "wid",...
44.125
16.375
def check_render_pipe_str(pipestr, renderers, blacklist, whitelist): ''' Check that all renderers specified in the pipe string are available. If so, return the list of render functions in the pipe as (render_func, arg_str) tuples; otherwise return []. ''' if pipestr is None: return [] parts = [r.strip() for r in pipestr.split('|')] # Note: currently, | is not allowed anywhere in the shebang line except # as pipes between renderers. results = [] try: if parts[0] == pipestr and pipestr in OLD_STYLE_RENDERERS: parts = OLD_STYLE_RENDERERS[pipestr].split('|') for part in parts: name, argline = (part + ' ').split(' ', 1) if whitelist and name not in whitelist or \ blacklist and name in blacklist: log.warning( 'The renderer "%s" is disallowed by configuration and ' 'will be skipped.', name ) continue results.append((renderers[name], argline.strip())) return results except KeyError: log.error('The renderer "%s" is not available', pipestr) return []
[ "def", "check_render_pipe_str", "(", "pipestr", ",", "renderers", ",", "blacklist", ",", "whitelist", ")", ":", "if", "pipestr", "is", "None", ":", "return", "[", "]", "parts", "=", "[", "r", ".", "strip", "(", ")", "for", "r", "in", "pipestr", ".", ...
39.466667
21.4
def for_category(self, category, live_only=False): """ Returns queryset of EntryTag instances for specified category. :param category: the Category instance. :param live_only: flag to include only "live" entries. :rtype: django.db.models.query.QuerySet. """ filters = {'tag': category.tag} if live_only: filters.update({'entry__live': True}) return self.filter(**filters)
[ "def", "for_category", "(", "self", ",", "category", ",", "live_only", "=", "False", ")", ":", "filters", "=", "{", "'tag'", ":", "category", ".", "tag", "}", "if", "live_only", ":", "filters", ".", "update", "(", "{", "'entry__live'", ":", "True", "}"...
31.785714
16.357143
def write_fits(self, data, outfile, extname="SKYMAP", clobber=True): """ Write input data to a FITS file data : The data begin stored outfile : The name of the output file extname : The HDU extension name clobber : True -> overwrite existing files """ hdu_prim = fits.PrimaryHDU() hdu_hpx = self.make_hdu(data, extname=extname) hl = [hdu_prim, hdu_hpx] if self.conv.energy_hdu == 'EBOUNDS': hdu_energy = self.make_energy_bounds_hdu() elif self.conv.energy_hdu == 'ENERGIES': hdu_energy = self.make_energies_hdu() if hdu_energy is not None: hl.append(hdu_energy) hdulist = fits.HDUList(hl) hdulist.writeto(outfile, overwrite=clobber)
[ "def", "write_fits", "(", "self", ",", "data", ",", "outfile", ",", "extname", "=", "\"SKYMAP\"", ",", "clobber", "=", "True", ")", ":", "hdu_prim", "=", "fits", ".", "PrimaryHDU", "(", ")", "hdu_hpx", "=", "self", ".", "make_hdu", "(", "data", ",", ...
41.157895
10