text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def search_tags(self, *args, **kwargs): """ Args: query (string): elasticsearch string query order_by (optional[string]): property by which to order results offset (optional[int]): number of results to skip for pagination (default=0) limit (optional[int]): how many results to return (default=50) timeout (optional[int]): how long to wait for response (in seconds) Returns: result of query search on tags """ return self._search_metrics_and_metadata(self._TAG_ENDPOINT_SUFFIX, *args, **kwargs)
[ "def", "search_tags", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_search_metrics_and_metadata", "(", "self", ".", "_TAG_ENDPOINT_SUFFIX", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
41.0625
22.8125
def wrap_line(line, limit=None, chars=80): """Wraps the specified line of text on whitespace to make sure that none of the lines' lengths exceeds 'chars' characters. """ result = [] builder = [] length = 0 if limit is not None: sline = line[0:limit] else: sline = line for word in sline.split(): if length <= chars: builder.append(word) length += len(word) + 1 else: result.append(' '.join(builder)) builder = [word] length = 0 result.append(' '.join(builder)) return result
[ "def", "wrap_line", "(", "line", ",", "limit", "=", "None", ",", "chars", "=", "80", ")", ":", "result", "=", "[", "]", "builder", "=", "[", "]", "length", "=", "0", "if", "limit", "is", "not", "None", ":", "sline", "=", "line", "[", "0", ":", ...
26
15.478261
def get_word_input(data, word_dict, embed, embed_dim): ''' Get word input. ''' batch_size = len(data) max_sequence_length = max(len(d) for d in data) sequence_length = max_sequence_length word_input = np.zeros((max_sequence_length, batch_size, embed_dim), dtype=np.float32) ids = np.zeros((sequence_length, batch_size), dtype=np.int32) masks = np.zeros((sequence_length, batch_size), dtype=np.float32) lengths = np.zeros([batch_size], dtype=np.int32) for batch_idx in range(0, min(len(data), batch_size)): batch_data = data[batch_idx] lengths[batch_idx] = len(batch_data) for sample_idx in range(0, min(len(batch_data), sequence_length)): word = batch_data[sample_idx]['word'].lower() if word in word_dict.keys(): word_input[sample_idx, batch_idx] = embed[word_dict[word]] ids[sample_idx, batch_idx] = word_dict[word] masks[sample_idx, batch_idx] = 1 word_input = np.reshape(word_input, (-1, embed_dim)) return word_input, ids, masks, lengths
[ "def", "get_word_input", "(", "data", ",", "word_dict", ",", "embed", ",", "embed_dim", ")", ":", "batch_size", "=", "len", "(", "data", ")", "max_sequence_length", "=", "max", "(", "len", "(", "d", ")", "for", "d", "in", "data", ")", "sequence_length", ...
40.407407
20.037037
def number(self): # type: () -> int """ Return this commits number. This is the same as the total number of commits in history up until this commit. This value can be useful in some CI scenarios as it allows to track progress on any given branch (although there can be two commits with the same number existing on different branches). Returns: int: The commit number/index. """ cmd = 'git log --oneline {}'.format(self.sha1) out = shell.run(cmd, capture=True, never_pretend=True).stdout.strip() return len(out.splitlines())
[ "def", "number", "(", "self", ")", ":", "# type: () -> int", "cmd", "=", "'git log --oneline {}'", ".", "format", "(", "self", ".", "sha1", ")", "out", "=", "shell", ".", "run", "(", "cmd", ",", "capture", "=", "True", ",", "never_pretend", "=", "True", ...
36.352941
22.352941
def summarize(self, test_arr, vectorizable_token, sentence_list, limit=5): ''' Summarize input document. Args: test_arr: `np.ndarray` of observed data points.. vectorizable_token: is-a `VectorizableToken`. sentence_list: `list` of all sentences. limit: The number of selected abstract sentence. Returns: `list` of `str` of abstract sentences. ''' if isinstance(vectorizable_token, VectorizableToken) is False: raise TypeError() _ = self.inference(test_arr) _, loss_arr, _ = self.compute_retrospective_loss() loss_list = loss_arr.tolist() abstract_list = [] for i in range(limit): key = loss_arr.argmin() _ = loss_list.pop(key) loss_arr = np.array(loss_list) seq_arr = test_arr[key] token_arr = vectorizable_token.tokenize(seq_arr.tolist()) s = " ".join(token_arr.tolist()) _s = "".join(token_arr.tolist()) for sentence in sentence_list: if s in sentence or _s in sentence: abstract_list.append(sentence) abstract_list = list(set(abstract_list)) if len(abstract_list) >= limit: break return abstract_list
[ "def", "summarize", "(", "self", ",", "test_arr", ",", "vectorizable_token", ",", "sentence_list", ",", "limit", "=", "5", ")", ":", "if", "isinstance", "(", "vectorizable_token", ",", "VectorizableToken", ")", "is", "False", ":", "raise", "TypeError", "(", ...
34.439024
20.487805
def is_offsetlike(arr_or_obj): """ Check if obj or all elements of list-like is DateOffset Parameters ---------- arr_or_obj : object Returns ------- boolean Whether the object is a DateOffset or listlike of DatetOffsets Examples -------- >>> is_offsetlike(pd.DateOffset(days=1)) True >>> is_offsetlike('offset') False >>> is_offsetlike([pd.offsets.Minute(4), pd.offsets.MonthEnd()]) True >>> is_offsetlike(np.array([pd.DateOffset(months=3), pd.Timestamp.now()])) False """ if isinstance(arr_or_obj, ABCDateOffset): return True elif (is_list_like(arr_or_obj) and len(arr_or_obj) and is_object_dtype(arr_or_obj)): return all(isinstance(x, ABCDateOffset) for x in arr_or_obj) return False
[ "def", "is_offsetlike", "(", "arr_or_obj", ")", ":", "if", "isinstance", "(", "arr_or_obj", ",", "ABCDateOffset", ")", ":", "return", "True", "elif", "(", "is_list_like", "(", "arr_or_obj", ")", "and", "len", "(", "arr_or_obj", ")", "and", "is_object_dtype", ...
26.033333
23.1
def user_organisations(cls, user_id, state=None, include_deactivated=False): """ Get organisations that the user has joined :param user_id: the user ID :param state: the user's "join" state :param include_deactivated: Include deactivated resources in response :returns: list of Organisation instances :raises: SocketError, CouchException """ if state and state not in validators.VALID_STATES: raise exceptions.ValidationError('Invalid "state"') if include_deactivated: organisations = yield views.joined_organisations.get( key=[user_id, state], include_docs=True) else: organisations = yield views.active_joined_organisations.get( key=[user_id, state], include_docs=True) raise Return([cls(**org['doc']) for org in organisations['rows']])
[ "def", "user_organisations", "(", "cls", ",", "user_id", ",", "state", "=", "None", ",", "include_deactivated", "=", "False", ")", ":", "if", "state", "and", "state", "not", "in", "validators", ".", "VALID_STATES", ":", "raise", "exceptions", ".", "Validatio...
42.142857
20.238095
def _parse_coverage(header_str): """Attempts to retrieve the coverage value from the header string. It splits the header by "_" and then screens the list backwards in search of the first float value. This will be interpreted as the coverage value. If it cannot find a float value, it returns None. This search methodology is based on the strings of assemblers like spades and skesa that put the mean kmer coverage for each contig in its corresponding fasta header. Parameters ---------- header_str : str String Returns ------- float or None The coverage value for the contig. None if it cannot find the value in the provide string. """ cov = None for i in header_str.split("_")[::-1]: try: cov = float(i) break except ValueError: continue return cov
[ "def", "_parse_coverage", "(", "header_str", ")", ":", "cov", "=", "None", "for", "i", "in", "header_str", ".", "split", "(", "\"_\"", ")", "[", ":", ":", "-", "1", "]", ":", "try", ":", "cov", "=", "float", "(", "i", ")", "break", "except", "Val...
31.225806
22.16129
def add_virtualip(self, lb, vip): """Adds the VirtualIP to the specified load balancer.""" resp, body = self.api.method_post("/loadbalancers/%s/virtualips" % lb.id, body=vip.to_dict()) return resp, body
[ "def", "add_virtualip", "(", "self", ",", "lb", ",", "vip", ")", ":", "resp", ",", "body", "=", "self", ".", "api", ".", "method_post", "(", "\"/loadbalancers/%s/virtualips\"", "%", "lb", ".", "id", ",", "body", "=", "vip", ".", "to_dict", "(", ")", ...
47.6
13.6
def execute(self): """ Entry point to the execution of the program. """ # Ensure that we have commands registered if not self.commands or self._parser is None: raise NotImplementedError( "No commands registered with this program!" ) # Handle input from the command line args = self.parser.parse_args() # Parse the arguments try: handle_default_args(args) # Handle the default args if not hasattr(args, 'func'): # Handle no subcommands self.parser.print_help() self.exit(0) msg = args.func(args) # Call the default function msg = "{}\n".format(msg) if msg else '' # Format the message self.exit(0, msg) # Exit cleanly with message except Exception as e: if hasattr(args, 'traceback') and args.traceback: traceback.print_exc() msg = color.format(str(e), color.RED) self.exit(1, msg)
[ "def", "execute", "(", "self", ")", ":", "# Ensure that we have commands registered", "if", "not", "self", ".", "commands", "or", "self", ".", "_parser", "is", "None", ":", "raise", "NotImplementedError", "(", "\"No commands registered with this program!\"", ")", "# H...
35.709677
23.064516
def get_container_instance_group(access_token, subscription_id, resource_group, container_group_name): '''Get the JSON definition of a container group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. Returns: HTTP response. JSON body of container group. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '?api-version=', CONTAINER_API]) return do_get(endpoint, access_token)
[ "def", "get_container_instance_group", "(", "access_token", ",", "subscription_id", ",", "resource_group", ",", "container_group_name", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_...
44.55
21.75
def cosine_similarity(evaluated_model, reference_model): """ Computes cosine similarity of two text documents. Each document has to be represented as TF model of non-empty document. :returns float: 0 <= cos <= 1, where 0 means independence and 1 means exactly the same. """ if not (isinstance(evaluated_model, TfModel) and isinstance(reference_model, TfModel)): raise ValueError( "Arguments has to be instances of 'sumy.models.TfDocumentModel'") terms = frozenset(evaluated_model.terms) | frozenset(reference_model.terms) numerator = 0.0 for term in terms: numerator += evaluated_model.term_frequency(term) * reference_model.term_frequency(term) denominator = evaluated_model.magnitude * reference_model.magnitude if denominator == 0.0: raise ValueError("Document model can't be empty. Given %r & %r" % ( evaluated_model, reference_model)) return numerator / denominator
[ "def", "cosine_similarity", "(", "evaluated_model", ",", "reference_model", ")", ":", "if", "not", "(", "isinstance", "(", "evaluated_model", ",", "TfModel", ")", "and", "isinstance", "(", "reference_model", ",", "TfModel", ")", ")", ":", "raise", "ValueError", ...
38.52
25.96
def add_task(self, fn, inputs=None, outputs=None): """ Adds a task to the workflow. Returns self to facilitate chaining method calls """ # self.tasks.append({'task': task, 'inputs': inputs, 'outputs': outputs}) self.tasks.append(Task(fn, inputs, outputs)) return self
[ "def", "add_task", "(", "self", ",", "fn", ",", "inputs", "=", "None", ",", "outputs", "=", "None", ")", ":", "# self.tasks.append({'task': task, 'inputs': inputs, 'outputs': outputs})", "self", ".", "tasks", ".", "append", "(", "Task", "(", "fn", ",", "inputs",...
35.111111
16
def shutdown(self): """ Shut down the entire application. """ logging.info("Shutting down") self.closeAllWindows() self.notifier.hide() self.service.shutdown() self.monitor.stop() self.quit() os.remove(common.LOCK_FILE) # TODO: maybe use atexit to remove the lock/pid file? logging.debug("All shutdown tasks complete... quitting")
[ "def", "shutdown", "(", "self", ")", ":", "logging", ".", "info", "(", "\"Shutting down\"", ")", "self", ".", "closeAllWindows", "(", ")", "self", ".", "notifier", ".", "hide", "(", ")", "self", ".", "service", ".", "shutdown", "(", ")", "self", ".", ...
34
13.666667
def plot(self, hist=False, show=False, **kwargs): """ Plot the distribution of the UncertainFunction. By default, the distribution is shown with a kernel density estimate (kde). Optional -------- hist : bool If true, a density histogram is displayed (histtype='stepfilled') show : bool If ``True``, the figure will be displayed after plotting the distribution. If ``False``, an explicit call to ``plt.show()`` is required to display the figure. kwargs : any valid matplotlib.pyplot.plot or .hist kwarg """ import matplotlib.pyplot as plt vals = self._mcpts low = min(vals) high = max(vals) p = ss.kde.gaussian_kde(vals) xp = np.linspace(low, high, 100) if hist: h = plt.hist( vals, bins=int(np.sqrt(len(vals)) + 0.5), histtype="stepfilled", normed=True, **kwargs ) plt.ylim(0, 1.1 * h[0].max()) else: plt.plot(xp, p.evaluate(xp), **kwargs) plt.xlim(low - (high - low) * 0.1, high + (high - low) * 0.1) if show: self.show()
[ "def", "plot", "(", "self", ",", "hist", "=", "False", ",", "show", "=", "False", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "vals", "=", "self", ".", "_mcpts", "low", "=", "min", "(", "vals", ")", "hig...
30.390244
20.487805
def phmmer(**kwargs): """Search a protein sequence against a HMMER sequence database. Arguments: seq - The sequence to search -- a Fasta string. seqdb -- Sequence database to search against. range -- A string range of results to return (ie. 1,10 for the first ten) output -- The output format (defaults to JSON). """ logging.debug(kwargs) args = {'seq' : kwargs.get('seq'), 'seqdb' : kwargs.get('seqdb')} args2 = {'output' : kwargs.get('output', 'json'), 'range' : kwargs.get('range')} return _hmmer("http://hmmer.janelia.org/search/phmmer", args, args2)
[ "def", "phmmer", "(", "*", "*", "kwargs", ")", ":", "logging", ".", "debug", "(", "kwargs", ")", "args", "=", "{", "'seq'", ":", "kwargs", ".", "get", "(", "'seq'", ")", ",", "'seqdb'", ":", "kwargs", ".", "get", "(", "'seqdb'", ")", "}", "args2"...
41.2
15.2
def _wrap(text, wrap_max=80, indent=4): """Wrap text at given width using textwrap module. text (unicode): Text to wrap. If it's a Path, it's converted to string. wrap_max (int): Maximum line length (indent is deducted). indent (int): Number of spaces for indentation. RETURNS (unicode): Wrapped text. """ indent = indent * ' ' wrap_width = wrap_max - len(indent) if isinstance(text, Path): text = path2str(text) return textwrap.fill(text, width=wrap_width, initial_indent=indent, subsequent_indent=indent, break_long_words=False, break_on_hyphens=False)
[ "def", "_wrap", "(", "text", ",", "wrap_max", "=", "80", ",", "indent", "=", "4", ")", ":", "indent", "=", "indent", "*", "' '", "wrap_width", "=", "wrap_max", "-", "len", "(", "indent", ")", "if", "isinstance", "(", "text", ",", "Path", ")", ":", ...
42.6
14.8
def write_float_matrices(scp_path, ark_path, matrices): """ Write the given dict matrices (utt-id/float ndarray) to the given scp and ark files. """ scp_entries = [] with open(ark_path, 'wb') as f: for utterance_id in sorted(list(matrices.keys())): matrix = matrices[utterance_id] assert (matrix.dtype == np.float32) f.write(('{} '.format(utterance_id)).encode('utf-8')) offset = f.tell() f.write(b'\x00B') f.write(b'FM ') f.write(b'\x04') f.write(struct.pack('<i', np.size(matrix, 0))) f.write(b'\x04') f.write(struct.pack('<i', np.size(matrix, 1))) flattened = matrix.reshape(np.size(matrix, 0) * np.size(matrix, 1)) flattened.tofile(f, sep='') scp_entries.append('{} {}:{}'.format(utterance_id, ark_path, offset)) with open(scp_path, 'w') as f: f.write('\n'.join(scp_entries))
[ "def", "write_float_matrices", "(", "scp_path", ",", "ark_path", ",", "matrices", ")", ":", "scp_entries", "=", "[", "]", "with", "open", "(", "ark_path", ",", "'wb'", ")", "as", "f", ":", "for", "utterance_id", "in", "sorted", "(", "list", "(", "matrice...
35.310345
22.068966
def subdevicenames(self) -> Tuple[str, ...]: """A |tuple| containing the (sub)device names. Property |NetCDFVariableFlat.subdevicenames| clarifies which row of |NetCDFVariableAgg.array| contains which time series. For 0-dimensional series like |lland_inputs.Nied|, the plain device names are returned >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() >>> from hydpy.core.netcdftools import NetCDFVariableFlat >>> ncvar = NetCDFVariableFlat('input_nied', isolate=False, timeaxis=1) >>> for element in elements: ... nied1 = element.model.sequences.inputs.nied ... ncvar.log(nied1, nied1.series) >>> ncvar.subdevicenames ('element1', 'element2', 'element3') For higher dimensional sequences like |lland_fluxes.NKor|, an additional suffix defines the index of the respective subdevice. For example contains the third row of |NetCDFVariableAgg.array| the time series of the first hydrological response unit of the second element: >>> ncvar = NetCDFVariableFlat('flux_nkor', isolate=False, timeaxis=1) >>> for element in elements: ... nkor1 = element.model.sequences.fluxes.nkor ... ncvar.log(nkor1, nkor1.series) >>> ncvar.subdevicenames[1:3] ('element2_0', 'element2_1') """ stats: List[str] = collections.deque() for devicename, seq in self.sequences.items(): if seq.NDIM: temp = devicename + '_' for prod in self._product(seq.shape): stats.append(temp + '_'.join(str(idx) for idx in prod)) else: stats.append(devicename) return tuple(stats)
[ "def", "subdevicenames", "(", "self", ")", "->", "Tuple", "[", "str", ",", "...", "]", ":", "stats", ":", "List", "[", "str", "]", "=", "collections", ".", "deque", "(", ")", "for", "devicename", ",", "seq", "in", "self", ".", "sequences", ".", "it...
44.875
18.25
def update_params_for_auth(self, headers, querys, auth_settings): """Updates header and query params based on authentication setting. :param headers: Header parameters dict to be updated. :param querys: Query parameters tuple list to be updated. :param auth_settings: Authentication setting identifiers list. """ if self.auth_token_holder.token is not None: headers[Configuration.AUTH_TOKEN_HEADER_NAME] = self.auth_token_holder.token else: headers['Authorization'] = self.configuration.get_basic_auth_token()
[ "def", "update_params_for_auth", "(", "self", ",", "headers", ",", "querys", ",", "auth_settings", ")", ":", "if", "self", ".", "auth_token_holder", ".", "token", "is", "not", "None", ":", "headers", "[", "Configuration", ".", "AUTH_TOKEN_HEADER_NAME", "]", "=...
52.727273
24.363636
def delete(cls, schedule_id, schedule_instance_id, note_attachment_schedule_instance_id, monetary_account_id=None, custom_headers=None): """ :type user_id: int :type monetary_account_id: int :type schedule_id: int :type schedule_instance_id: int :type note_attachment_schedule_instance_id: int :type custom_headers: dict[str, str]|None :rtype: BunqResponseNone """ if custom_headers is None: custom_headers = {} api_client = client.ApiClient(cls._get_api_context()) endpoint_url = cls._ENDPOINT_URL_DELETE.format(cls._determine_user_id(), cls._determine_monetary_account_id( monetary_account_id), schedule_id, schedule_instance_id, note_attachment_schedule_instance_id) response_raw = api_client.delete(endpoint_url, custom_headers) return BunqResponseNone.cast_from_bunq_response( client.BunqResponse(None, response_raw.headers) )
[ "def", "delete", "(", "cls", ",", "schedule_id", ",", "schedule_instance_id", ",", "note_attachment_schedule_instance_id", ",", "monetary_account_id", "=", "None", ",", "custom_headers", "=", "None", ")", ":", "if", "custom_headers", "is", "None", ":", "custom_heade...
43.448276
22.344828
def create(self, name, ip_address): """ Creates a new domain Parameters ---------- name: str new domain name ip_address: str IP address for the new domain """ return (self.post(name=name, ip_address=ip_address) .get(self.singular, None))
[ "def", "create", "(", "self", ",", "name", ",", "ip_address", ")", ":", "return", "(", "self", ".", "post", "(", "name", "=", "name", ",", "ip_address", "=", "ip_address", ")", ".", "get", "(", "self", ".", "singular", ",", "None", ")", ")" ]
25.384615
13.538462
def scaled_array_2d_with_sub_dimensions_from_sub_array_1d(self, sub_array_1d): """ Map a 1D sub-array the same dimension as the sub-grid to its original masked 2D sub-array and return it as a scaled array. Parameters ----------- sub_array_1d : ndarray The 1D sub-array of which is mapped to a 2D scaled sub-array the dimensions. """ return scaled_array.ScaledSquarePixelArray(array=self.sub_array_2d_from_sub_array_1d(sub_array_1d=sub_array_1d), pixel_scale=self.mask.pixel_scale / self.sub_grid_size, origin=self.mask.origin)
[ "def", "scaled_array_2d_with_sub_dimensions_from_sub_array_1d", "(", "self", ",", "sub_array_1d", ")", ":", "return", "scaled_array", ".", "ScaledSquarePixelArray", "(", "array", "=", "self", ".", "sub_array_2d_from_sub_array_1d", "(", "sub_array_1d", "=", "sub_array_1d", ...
57.166667
31.416667
def community_user_comments(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/post_comments#list-comments" api_path = "/api/v2/community/users/{id}/comments.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
[ "def", "community_user_comments", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/community/users/{id}/comments.json\"", "api_path", "=", "api_path", ".", "format", "(", "id", "=", "id", ")", "return", "self", ".", "call"...
57.8
17.8
def gradient_local(self, f, index): """ Return the gradient at a specified node. This routine employs a local method, in which values depend only on nearby data points, to compute an estimated gradient at a node. gradient_local() is more efficient than gradient() only if it is unnecessary to compute gradients at all of the nodes. Both routines have similar accuracy. Parameters ---------- """ if f.size != self.npoints: raise ValueError('f should be the same size as mesh') f = self._shuffle_field(f) gradX, gradY, l = _srfpack.gradl(index + 1, self._x, self._y, f,\ self.lst, self.lptr, self.lend) return gradX, gradY
[ "def", "gradient_local", "(", "self", ",", "f", ",", "index", ")", ":", "if", "f", ".", "size", "!=", "self", ".", "npoints", ":", "raise", "ValueError", "(", "'f should be the same size as mesh'", ")", "f", "=", "self", ".", "_shuffle_field", "(", "f", ...
33.130435
26.434783
def stats(self): """ Get the stats for the current :class:`Milestone` """ response = self.requester.get( '/{endpoint}/{id}/stats', endpoint=self.endpoint, id=self.id ) return response.json()
[ "def", "stats", "(", "self", ")", ":", "response", "=", "self", ".", "requester", ".", "get", "(", "'/{endpoint}/{id}/stats'", ",", "endpoint", "=", "self", ".", "endpoint", ",", "id", "=", "self", ".", "id", ")", "return", "response", ".", "json", "("...
28.222222
10.222222
def line_evaluate(t, p0, a, b, c): """Evaluate the orthogonal polynomial defined by its recurrence coefficients a, b, and c at the point(s) t. """ vals1 = numpy.zeros_like(t, dtype=int) # The order is important here; see # <https://github.com/sympy/sympy/issues/13637>. vals2 = numpy.ones_like(t) * p0 for a_k, b_k, c_k in zip(a, b, c): vals0, vals1 = vals1, vals2 vals2 = vals1 * (t * a_k - b_k) - vals0 * c_k return vals2
[ "def", "line_evaluate", "(", "t", ",", "p0", ",", "a", ",", "b", ",", "c", ")", ":", "vals1", "=", "numpy", ".", "zeros_like", "(", "t", ",", "dtype", "=", "int", ")", "# The order is important here; see", "# <https://github.com/sympy/sympy/issues/13637>.", "v...
35.692308
9
def cookie_decode(data, key, digestmod=None): """ Verify and decode an encoded string. Return an object or None.""" depr(0, 13, "cookie_decode() will be removed soon.", "Do not use this API directly.") data = tob(data) if cookie_is_encoded(data): sig, msg = data.split(tob('?'), 1) digestmod = digestmod or hashlib.sha256 hashed = hmac.new(tob(key), msg, digestmod=digestmod).digest() if _lscmp(sig[1:], base64.b64encode(hashed)): return pickle.loads(base64.b64decode(msg)) return None
[ "def", "cookie_decode", "(", "data", ",", "key", ",", "digestmod", "=", "None", ")", ":", "depr", "(", "0", ",", "13", ",", "\"cookie_decode() will be removed soon.\"", ",", "\"Do not use this API directly.\"", ")", "data", "=", "tob", "(", "data", ")", "if", ...
46.166667
12.416667
def _bss_decomp_mtifilt_images(reference_sources, estimated_source, j, flen, Gj=None, G=None): """Decomposition of an estimated source image into four components representing respectively the true source image, spatial (or filtering) distortion, interference and artifacts, derived from the true source images using multichannel time-invariant filters. Adapted version to work with multichannel sources. Improved performance can be gained by passing Gj and G parameters initially as all zeros. These parameters store the results from the computation of the G matrix in _project_images and then return them for subsequent calls to this function. This only works when not computing permuations. """ nsampl = np.shape(estimated_source)[0] nchan = np.shape(estimated_source)[1] # are we saving the Gj and G parameters? saveg = Gj is not None and G is not None # decomposition # true source image s_true = np.hstack((np.reshape(reference_sources[j], (nsampl, nchan), order="F").transpose(), np.zeros((nchan, flen - 1)))) # spatial (or filtering) distortion if saveg: e_spat, Gj = _project_images(reference_sources[j, np.newaxis, :], estimated_source, flen, Gj) else: e_spat = _project_images(reference_sources[j, np.newaxis, :], estimated_source, flen) e_spat = e_spat - s_true # interference if saveg: e_interf, G = _project_images(reference_sources, estimated_source, flen, G) else: e_interf = _project_images(reference_sources, estimated_source, flen) e_interf = e_interf - s_true - e_spat # artifacts e_artif = -s_true - e_spat - e_interf e_artif[:, :nsampl] += estimated_source.transpose() # return Gj and G only if they were passed in if saveg: return (s_true, e_spat, e_interf, e_artif, Gj, G) else: return (s_true, e_spat, e_interf, e_artif)
[ "def", "_bss_decomp_mtifilt_images", "(", "reference_sources", ",", "estimated_source", ",", "j", ",", "flen", ",", "Gj", "=", "None", ",", "G", "=", "None", ")", ":", "nsampl", "=", "np", ".", "shape", "(", "estimated_source", ")", "[", "0", "]", "nchan...
46.5
18.391304
def acquire(self) -> Connection: '''Register and return a connection. Coroutine. ''' assert not self._closed yield from self._condition.acquire() while True: if self.ready: connection = self.ready.pop() break elif len(self.busy) < self.max_connections: connection = self._connection_factory() break else: yield from self._condition.wait() self.busy.add(connection) self._condition.release() return connection
[ "def", "acquire", "(", "self", ")", "->", "Connection", ":", "assert", "not", "self", ".", "_closed", "yield", "from", "self", ".", "_condition", ".", "acquire", "(", ")", "while", "True", ":", "if", "self", ".", "ready", ":", "connection", "=", "self"...
25.173913
19.347826
def _run_purecn_dx(out, paired): """Extract signatures and mutational burdens from PureCN rds file. """ out_base, out, all_files = _get_purecn_dx_files(paired, out) if not utils.file_uptodate(out["mutation_burden"], out["rds"]): with file_transaction(paired.tumor_data, out_base) as tx_out_base: cmd = ["PureCN_Dx.R", "--rds", out["rds"], "--callable", dd.get_sample_callable(paired.tumor_data), "--signatures", "--out", tx_out_base] do.run(cmd, "PureCN Dx mutational burden and signatures") for f in all_files: if os.path.exists(os.path.join(os.path.dirname(tx_out_base), f)): shutil.move(os.path.join(os.path.dirname(tx_out_base), f), os.path.join(os.path.dirname(out_base), f)) return out
[ "def", "_run_purecn_dx", "(", "out", ",", "paired", ")", ":", "out_base", ",", "out", ",", "all_files", "=", "_get_purecn_dx_files", "(", "paired", ",", "out", ")", "if", "not", "utils", ".", "file_uptodate", "(", "out", "[", "\"mutation_burden\"", "]", ",...
59.214286
25.571429
def dtdQElementDesc(self, name, prefix): """Search the DTD for the description of this element """ ret = libxml2mod.xmlGetDtdQElementDesc(self._o, name, prefix) if ret is None:raise treeError('xmlGetDtdQElementDesc() failed') __tmp = xmlElement(_obj=ret) return __tmp
[ "def", "dtdQElementDesc", "(", "self", ",", "name", ",", "prefix", ")", ":", "ret", "=", "libxml2mod", ".", "xmlGetDtdQElementDesc", "(", "self", ".", "_o", ",", "name", ",", "prefix", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'x...
50.333333
14.166667
def wait_for(self, condition, timeout=None, interval=0.1, errmsg=None): '''Wait for a condition to be True. Wait for condition, a callable, to return True. If timeout is nonzero, raise a TimeoutError(errmsg) if the condition is not True after timeout seconds. Check the condition everal interval seconds. ''' t0 = time.time() while not condition(): t1 = time.time() if timeout and (t1 - t0) >= timeout: raise TimeoutError(errmsg) time.sleep(interval)
[ "def", "wait_for", "(", "self", ",", "condition", ",", "timeout", "=", "None", ",", "interval", "=", "0.1", ",", "errmsg", "=", "None", ")", ":", "t0", "=", "time", ".", "time", "(", ")", "while", "not", "condition", "(", ")", ":", "t1", "=", "ti...
34.75
21
def plotted_data(self): """The data that is shown to the user""" return InteractiveList( [arr for arr, val in zip(self.iter_data, cycle(slist(self.value))) if val is not None])
[ "def", "plotted_data", "(", "self", ")", ":", "return", "InteractiveList", "(", "[", "arr", "for", "arr", ",", "val", "in", "zip", "(", "self", ".", "iter_data", ",", "cycle", "(", "slist", "(", "self", ".", "value", ")", ")", ")", "if", "val", "is...
41.5
11.166667
def add_unqualified_edge(self, u: BaseEntity, v: BaseEntity, relation: str) -> str: """Add a unique edge that has no annotations. :param u: The source node :param v: The target node :param relation: A relationship label from :mod:`pybel.constants` :return: The key for this edge (a unique hash) """ attr = {RELATION: relation} return self._help_add_edge(u, v, attr)
[ "def", "add_unqualified_edge", "(", "self", ",", "u", ":", "BaseEntity", ",", "v", ":", "BaseEntity", ",", "relation", ":", "str", ")", "->", "str", ":", "attr", "=", "{", "RELATION", ":", "relation", "}", "return", "self", ".", "_help_add_edge", "(", ...
42.1
15.5
def greedy_replace(self, seq): ''' Greedily matches strings in ``seq``, and replaces them with their node values. Arguments: - `seq`: an iterable of characters to perform search-and-replace on ''' if not self._suffix_links_set: self._set_suffix_links() # start at the root current = self.root buffered = '' outstr = '' for char in seq: while char not in current: if current.has_dict_suffix: current = current.dict_suffix outstr += buffered[:-current.depth] outstr += current.value buffered = '' current = self.root break elif current.has_suffix: current = current.suffix if current.depth: outstr += buffered[:-current.depth] buffered = buffered[-current.depth:] else: outstr += buffered buffered = '' break else: current = self.root outstr += buffered buffered = '' break if char in current: buffered += char current = current[char] if current.has_value: outstr += buffered[:-current.depth] outstr += current.value buffered = '' current = self.root else: assert current is self.root outstr += buffered + char buffered = '' if current.has_dict_suffix: current = current.dict_suffix outstr += buffered[:-current.depth] outstr += current.value else: outstr += buffered return outstr
[ "def", "greedy_replace", "(", "self", ",", "seq", ")", ":", "if", "not", "self", ".", "_suffix_links_set", ":", "self", ".", "_set_suffix_links", "(", ")", "# start at the root", "current", "=", "self", ".", "root", "buffered", "=", "''", "outstr", "=", "'...
34.732143
11.446429
def canonical_url(configs, vip_setting='vip'): ''' Returns the correct HTTP URL to this host given the state of HTTPS configuration and hacluster. :configs : OSTemplateRenderer: A config tempating object to inspect for a complete https context. :vip_setting: str: Setting in charm config that specifies VIP address. ''' scheme = 'http' if 'https' in configs.complete_contexts(): scheme = 'https' if is_clustered(): addr = config_get(vip_setting) else: addr = unit_get('private-address') return '%s://%s' % (scheme, addr)
[ "def", "canonical_url", "(", "configs", ",", "vip_setting", "=", "'vip'", ")", ":", "scheme", "=", "'http'", "if", "'https'", "in", "configs", ".", "complete_contexts", "(", ")", ":", "scheme", "=", "'https'", "if", "is_clustered", "(", ")", ":", "addr", ...
35.052632
20.842105
def create(self, price_estimate): """ Take configuration from previous month, it it exists. Set last_update_time equals to the beginning of the month. """ kwargs = {} try: previous_price_estimate = price_estimate.get_previous() except ObjectDoesNotExist: pass else: configuration = previous_price_estimate.consumption_details.configuration kwargs['configuration'] = configuration month_start = core_utils.month_start(datetime.date(price_estimate.year, price_estimate.month, 1)) kwargs['last_update_time'] = month_start return super(ConsumptionDetailsQuerySet, self).create(price_estimate=price_estimate, **kwargs)
[ "def", "create", "(", "self", ",", "price_estimate", ")", ":", "kwargs", "=", "{", "}", "try", ":", "previous_price_estimate", "=", "price_estimate", ".", "get_previous", "(", ")", "except", "ObjectDoesNotExist", ":", "pass", "else", ":", "configuration", "=",...
48.733333
24.066667
def start(self, io_handler, bundle_id, *bundles_ids): """ Starts the bundles with the given IDs. Stops on first failure. """ for bid in (bundle_id,) + bundles_ids: try: # Got an int => it's a bundle ID bid = int(bid) except ValueError: # Got something else, we will try to install it first bid = self.install(io_handler, bid) bundle = self.__get_bundle(io_handler, bid) if bundle is not None: io_handler.write_line( "Starting bundle {0} ({1})...", bid, bundle.get_symbolic_name(), ) bundle.start() else: return False return None
[ "def", "start", "(", "self", ",", "io_handler", ",", "bundle_id", ",", "*", "bundles_ids", ")", ":", "for", "bid", "in", "(", "bundle_id", ",", ")", "+", "bundles_ids", ":", "try", ":", "# Got an int => it's a bundle ID", "bid", "=", "int", "(", "bid", "...
33.125
15.291667
def extrude(self, height, **kwargs): """ Extrude the current 2D path into a 3D mesh. Parameters ---------- height: float, how far to extrude the profile kwargs: passed directly to meshpy.triangle.build: triangle.build(mesh_info, verbose=False, refinement_func=None, attributes=False, volume_constraints=True, max_volume=None, allow_boundary_steiner=True, allow_volume_steiner=True, quality_meshing=True, generate_edges=None, generate_faces=False, min_angle=None) Returns -------- mesh: trimesh object representing extruded polygon """ from ..primitives import Extrusion result = [Extrusion(polygon=i, height=height, **kwargs) for i in self.polygons_full] if len(result) == 1: return result[0] return result
[ "def", "extrude", "(", "self", ",", "height", ",", "*", "*", "kwargs", ")", ":", "from", ".", ".", "primitives", "import", "Extrusion", "result", "=", "[", "Extrusion", "(", "polygon", "=", "i", ",", "height", "=", "height", ",", "*", "*", "kwargs", ...
39.233333
13.166667
def flag_length_outliers(df, columnname): """Return index of records with length-outliers above 3 standard deviations from the median.""" return df[columnname] > (np.median(df[columnname]) + 3 * np.std(df[columnname]))
[ "def", "flag_length_outliers", "(", "df", ",", "columnname", ")", ":", "return", "df", "[", "columnname", "]", ">", "(", "np", ".", "median", "(", "df", "[", "columnname", "]", ")", "+", "3", "*", "np", ".", "std", "(", "df", "[", "columnname", "]"...
74.666667
15
def hook_setup(parent, hook_fpath): """Setup hook""" hook = copy.deepcopy(HOOK) hook["name"] = os.path.splitext(os.path.basename(hook_fpath))[0] hook["name"] = hook["name"].replace("_enter", "").replace("_exit", "") hook["res_root"] = parent["res_root"] hook["fpath_orig"] = hook_fpath hook["fname"] = "hook_%s" % os.path.basename(hook["fpath_orig"]) hook["fpath"] = os.sep.join([hook["res_root"], hook["fname"]]) hook["log_fpath"] = os.sep.join([ hook["res_root"], "%s.log" % hook["fname"] ]) hook["evars"].update(copy.deepcopy(parent["evars"])) shutil.copyfile(hook["fpath_orig"], hook["fpath"]) return hook
[ "def", "hook_setup", "(", "parent", ",", "hook_fpath", ")", ":", "hook", "=", "copy", ".", "deepcopy", "(", "HOOK", ")", "hook", "[", "\"name\"", "]", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "hook_fpath"...
33.1
20.6
def contains(self, name: str) -> List[str]: """Return a list of all keywords containing the given string. >>> from hydpy.core.devicetools import Keywords >>> keywords = Keywords('first_keyword', 'second_keyword', ... 'keyword_3', 'keyword_4', ... 'keyboard') >>> keywords.contains('keyword') ['first_keyword', 'keyword_3', 'keyword_4', 'second_keyword'] """ return sorted(keyword for keyword in self if name in keyword)
[ "def", "contains", "(", "self", ",", "name", ":", "str", ")", "->", "List", "[", "str", "]", ":", "return", "sorted", "(", "keyword", "for", "keyword", "in", "self", "if", "name", "in", "keyword", ")" ]
47.454545
14.727273
def log_exception(exc_info=None, stream=None): """Log the 'exc_info' tuple in the server log.""" exc_info = exc_info or sys.exc_info() stream = stream or sys.stderr try: from traceback import print_exception print_exception(exc_info[0], exc_info[1], exc_info[2], None, stream) stream.flush() finally: exc_info = None
[ "def", "log_exception", "(", "exc_info", "=", "None", ",", "stream", "=", "None", ")", ":", "exc_info", "=", "exc_info", "or", "sys", ".", "exc_info", "(", ")", "stream", "=", "stream", "or", "sys", ".", "stderr", "try", ":", "from", "traceback", "impo...
35.9
15
def _mwi(self): """ Apply moving wave integration (mwi) with a ricker (Mexican hat) wavelet onto the filtered signal, and save the square of the integrated signal. The width of the hat is equal to the qrs width After integration, find all local peaks in the mwi signal. """ wavelet_filter = signal.ricker(self.qrs_width, 4) self.sig_i = signal.filtfilt(wavelet_filter, [1], self.sig_f, axis=0) ** 2 # Save the mwi gain (x2 due to double filtering) and the total # gain from raw to mwi self.mwi_gain = get_filter_gain(wavelet_filter, [1], np.mean([self.fc_low, self.fc_high]), self.fs) * 2 self.transform_gain = self.filter_gain * self.mwi_gain self.peak_inds_i = find_local_peaks(self.sig_i, radius=self.qrs_radius) self.n_peaks_i = len(self.peak_inds_i)
[ "def", "_mwi", "(", "self", ")", ":", "wavelet_filter", "=", "signal", ".", "ricker", "(", "self", ".", "qrs_width", ",", "4", ")", "self", ".", "sig_i", "=", "signal", ".", "filtfilt", "(", "wavelet_filter", ",", "[", "1", "]", ",", "self", ".", "...
41.772727
23.409091
def show_message(device, msg, y_offset=0, fill=None, font=None, scroll_delay=0.03): """ Scrolls a message right-to-left across the devices display. :param device: The device to scroll across. :param msg: The text message to display (must be ASCII only). :type msg: str :param y_offset: The row to use to display the text. :type y_offset: int :param fill: The fill color to use (standard Pillow color name or RGB tuple). :param font: The font (from :py:mod:`luma.core.legacy.font`) to use. :param scroll_delay: The number of seconds to delay between scrolling. :type scroll_delay: float """ fps = 0 if scroll_delay == 0 else 1.0 / scroll_delay regulator = framerate_regulator(fps) font = font or DEFAULT_FONT with canvas(device) as draw: w, h = textsize(msg, font) x = device.width virtual = viewport(device, width=w + x + x, height=device.height) with canvas(virtual) as draw: text(draw, (x, y_offset), msg, font=font, fill=fill) i = 0 while i <= w + x: with regulator: virtual.set_position((i, 0)) i += 1
[ "def", "show_message", "(", "device", ",", "msg", ",", "y_offset", "=", "0", ",", "fill", "=", "None", ",", "font", "=", "None", ",", "scroll_delay", "=", "0.03", ")", ":", "fps", "=", "0", "if", "scroll_delay", "==", "0", "else", "1.0", "/", "scro...
34.363636
19.272727
def generate_confirmation_token(self, user): """ Generates a unique confirmation token for the specified user. :param user: The user to work with """ data = [str(user.id), self.hash_data(user.email)] return self.security.confirm_serializer.dumps(data)
[ "def", "generate_confirmation_token", "(", "self", ",", "user", ")", ":", "data", "=", "[", "str", "(", "user", ".", "id", ")", ",", "self", ".", "hash_data", "(", "user", ".", "email", ")", "]", "return", "self", ".", "security", ".", "confirm_seriali...
36.625
13.875
def _parse_args(): """ Parses the command line arguments. :return: Namespace with arguments. :rtype: Namespace """ parser = argparse.ArgumentParser(description='rain - a new sort of automated builder.') parser.add_argument('action', help='what shall we do?', default='build', nargs='?', choices=['build', 'ls', 'keep'] + removal_cmds) parser.add_argument('-c', '--count', type=int, default=1, help='a count of items on which to operate. [default: %(default)s]') parser.add_argument('--keep', type=int, default=-1, help='how many builds should we keep around? [default: %(default)s]') parser.add_argument('-v', '--verbose', action='count', default=0, help='Be more verbose. (can be repeated)') parser.add_argument('--version', default=False, action='store_true', help='print version number and exit. [default: %(default)s]') return parser.parse_args()
[ "def", "_parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'rain - a new sort of automated builder.'", ")", "parser", ".", "add_argument", "(", "'action'", ",", "help", "=", "'what shall we do?'", ",", "default",...
40.230769
28.384615
def extern_store_dict(self, context_handle, vals_ptr, vals_len): """Given storage and an array of Handles, return a new Handle to represent the dict. Array of handles alternates keys and values (i.e. key0, value0, key1, value1, ...). It is assumed that an even number of values were passed. """ c = self._ffi.from_handle(context_handle) tup = tuple(c.from_value(val[0]) for val in self._ffi.unpack(vals_ptr, vals_len)) d = dict() for i in range(0, len(tup), 2): d[tup[i]] = tup[i + 1] return c.to_value(d)
[ "def", "extern_store_dict", "(", "self", ",", "context_handle", ",", "vals_ptr", ",", "vals_len", ")", ":", "c", "=", "self", ".", "_ffi", ".", "from_handle", "(", "context_handle", ")", "tup", "=", "tuple", "(", "c", ".", "from_value", "(", "val", "[", ...
41.307692
21.538462
def stack(self, new_column_name=None, drop_na=False, new_column_type=None): """ Convert a "wide" SArray to one or two "tall" columns in an SFrame by stacking all values. The stack works only for columns of dict, list, or array type. If the column is dict type, two new columns are created as a result of stacking: one column holds the key and another column holds the value. The rest of the columns are repeated for each key/value pair. If the column is array or list type, one new column is created as a result of stacking. With each row holds one element of the array or list value, and the rest columns from the same original row repeated. The returned SFrame includes the newly created column(s). Parameters -------------- new_column_name : str | list of str, optional The new column name(s). If original column is list/array type, new_column_name must a string. If original column is dict type, new_column_name must be a list of two strings. If not given, column names are generated automatically. drop_na : boolean, optional If True, missing values and empty list/array/dict are all dropped from the resulting column(s). If False, missing values are maintained in stacked column(s). new_column_type : type | list of types, optional The new column types. If original column is a list/array type new_column_type must be a single type, or a list of one type. If original column is of dict type, new_column_type must be a list of two types. If not provided, the types are automatically inferred from the first 100 values of the SFrame. Returns ------- out : SFrame A new SFrame that contains the newly stacked column(s). Examples --------- Suppose 'sa' is an SArray of dict type: >>> sa = turicreate.SArray([{'a':3, 'cat':2}, ... {'a':1, 'the':2}, ... {'the':1, 'dog':3}, ... {}]) [{'a': 3, 'cat': 2}, {'a': 1, 'the': 2}, {'the': 1, 'dog': 3}, {}] Stack would stack all keys in one column and all values in another column: >>> sa.stack(new_column_name=['word', 'count']) +------+-------+ | word | count | +------+-------+ | a | 3 | | cat | 2 | | a | 1 | | the | 2 | | the | 1 | | dog | 3 | | None | None | +------+-------+ [7 rows x 2 columns] Observe that since topic 4 had no words, an empty row is inserted. To drop that row, set drop_na=True in the parameters to stack. """ from .sframe import SFrame as _SFrame return _SFrame({'SArray': self}).stack('SArray', new_column_name=new_column_name, drop_na=drop_na, new_column_type=new_column_type)
[ "def", "stack", "(", "self", ",", "new_column_name", "=", "None", ",", "drop_na", "=", "False", ",", "new_column_type", "=", "None", ")", ":", "from", ".", "sframe", "import", "SFrame", "as", "_SFrame", "return", "_SFrame", "(", "{", "'SArray'", ":", "se...
41.671053
25.25
def _get_thumbnail_src_from_file(dir_path, image_file, force_no_processing=False): """ Get base-64 encoded data as a string for the given image file's thumbnail, for use directly in HTML <img> tags, or a path to the original if image scaling is not supported. @param {String} dir_path - The directory containing the image file @param {String} image_file - The filename of the image file within dir_path @param {Boolean=False} force_no_processing - If True, do not attempt to actually process a thumbnail, PIL image or anything. Simply return the image filename as src. @return {String} The base-64 encoded image data string, or path to the file itself if not supported. """ # If we've specified to force no processing, just return the image filename if force_no_processing: if image_file.endswith('tif') or image_file.endswith('tiff'): return UNSUPPORTED_IMAGE_TYPE_DATA return image_file # First try to get a thumbnail image img = _get_thumbnail_image_from_file(dir_path, image_file) return _get_src_from_image(img, image_file)
[ "def", "_get_thumbnail_src_from_file", "(", "dir_path", ",", "image_file", ",", "force_no_processing", "=", "False", ")", ":", "# If we've specified to force no processing, just return the image filename", "if", "force_no_processing", ":", "if", "image_file", ".", "endswith", ...
53.142857
21.714286
def _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning, ignore_duplicate_updates=True, return_untouched=False): """ Generates the postgres specific sql necessary to perform an upsert (ON CONFLICT) INSERT INTO table_name (field1, field2) VALUES (1, 'two') ON CONFLICT (unique_field) DO UPDATE SET field2 = EXCLUDED.field2; """ model = queryset.model # Use all fields except pk unless the uniqueness constraint is the pk field all_fields = [ field for field in model._meta.fields if field.column != model._meta.pk.name or not field.auto_created ] all_field_names = [field.column for field in all_fields] returning = returning if returning is not True else [f.column for f in model._meta.fields] all_field_names_sql = ', '.join([_quote(field) for field in all_field_names]) # Convert field names to db column names unique_fields = [ model._meta.get_field(unique_field) for unique_field in unique_fields ] update_fields = [ model._meta.get_field(update_field) for update_field in update_fields ] unique_field_names_sql = ', '.join([ _quote(field.column) for field in unique_fields ]) update_fields_sql = ', '.join([ '{0} = EXCLUDED.{0}'.format(_quote(field.column)) for field in update_fields ]) row_values, sql_args = _get_values_for_rows(model_objs, all_fields) return_sql = 'RETURNING ' + _get_return_fields_sql(returning, return_status=True) if returning else '' ignore_duplicates_sql = '' if ignore_duplicate_updates: ignore_duplicates_sql = ( ' WHERE ({update_fields_sql}) IS DISTINCT FROM ({excluded_update_fields_sql}) ' ).format( update_fields_sql=', '.join( '{0}.{1}'.format(model._meta.db_table, _quote(field.column)) for field in update_fields ), excluded_update_fields_sql=', '.join( 'EXCLUDED.' + _quote(field.column) for field in update_fields ) ) on_conflict = ( 'DO UPDATE SET {0} {1}'.format(update_fields_sql, ignore_duplicates_sql) if update_fields else 'DO NOTHING' ) if return_untouched: row_values_sql = ', '.join([ '(\'{0}\', {1})'.format(i, row_value[1:-1]) for i, row_value in enumerate(row_values) ]) sql = ( ' WITH input_rows("temp_id_", {all_field_names_sql}) AS (' ' VALUES {row_values_sql}' ' ), ins AS ( ' ' INSERT INTO {table_name} ({all_field_names_sql})' ' SELECT {all_field_names_sql} FROM input_rows ORDER BY temp_id_' ' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}' ' )' ' SELECT DISTINCT ON ({table_pk_name}) * FROM (' ' SELECT status_, {return_fields_sql}' ' FROM ins' ' UNION ALL' ' SELECT \'n\' AS status_, {aliased_return_fields_sql}' ' FROM input_rows' ' JOIN {table_name} c USING ({unique_field_names_sql})' ' ) as results' ' ORDER BY results."{table_pk_name}", CASE WHEN(status_ = \'n\') THEN 1 ELSE 0 END;' ).format( all_field_names_sql=all_field_names_sql, row_values_sql=row_values_sql, table_name=model._meta.db_table, unique_field_names_sql=unique_field_names_sql, on_conflict=on_conflict, return_sql=return_sql, table_pk_name=model._meta.pk.name, return_fields_sql=_get_return_fields_sql(returning), aliased_return_fields_sql=_get_return_fields_sql(returning, alias='c') ) else: row_values_sql = ', '.join(row_values) sql = ( ' INSERT INTO {table_name} ({all_field_names_sql})' ' VALUES {row_values_sql}' ' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}' ).format( table_name=model._meta.db_table, all_field_names_sql=all_field_names_sql, row_values_sql=row_values_sql, unique_field_names_sql=unique_field_names_sql, on_conflict=on_conflict, return_sql=return_sql ) return sql, sql_args
[ "def", "_get_upsert_sql", "(", "queryset", ",", "model_objs", ",", "unique_fields", ",", "update_fields", ",", "returning", ",", "ignore_duplicate_updates", "=", "True", ",", "return_untouched", "=", "False", ")", ":", "model", "=", "queryset", ".", "model", "# ...
39.642202
21.917431
def categorical2transactions(x): # type: (np.ndarray) -> List """ Convert a 2D int array into a transaction list: [ ['x0=1', 'x1=0', ...], ... ] :param x: :return: """ assert len(x.shape) == 2 transactions = [] for entry in x: transactions.append(['x%d=%d' % (i, val) for i, val in enumerate(entry)]) return transactions
[ "def", "categorical2transactions", "(", "x", ")", ":", "# type: (np.ndarray) -> List", "assert", "len", "(", "x", ".", "shape", ")", "==", "2", "transactions", "=", "[", "]", "for", "entry", "in", "x", ":", "transactions", ".", "append", "(", "[", "'x%d=%d...
21.833333
20.277778
def separate_reach_logs(log_str): """Get the list of reach logs from the overall logs.""" log_lines = log_str.splitlines() reach_logs = [] reach_lines = [] adding_reach_lines = False for l in log_lines[:]: if not adding_reach_lines and 'Beginning reach' in l: adding_reach_lines = True elif adding_reach_lines and 'Reach finished' in l: adding_reach_lines = False reach_logs.append(('SUCCEEDED', '\n'.join(reach_lines))) reach_lines = [] elif adding_reach_lines: reach_lines.append(l.split('readers - ')[1]) log_lines.remove(l) if adding_reach_lines: reach_logs.append(('FAILURE', '\n'.join(reach_lines))) return '\n'.join(log_lines), reach_logs
[ "def", "separate_reach_logs", "(", "log_str", ")", ":", "log_lines", "=", "log_str", ".", "splitlines", "(", ")", "reach_logs", "=", "[", "]", "reach_lines", "=", "[", "]", "adding_reach_lines", "=", "False", "for", "l", "in", "log_lines", "[", ":", "]", ...
40.157895
12.210526
async def _set_persistent_menu(self): """ Define the persistent menu for all pages """ page = self.settings() if 'menu' in page: await self._send_to_messenger_profile(page, { 'persistent_menu': page['menu'], }) logger.info('Set menu for page %s', page['page_id'])
[ "async", "def", "_set_persistent_menu", "(", "self", ")", ":", "page", "=", "self", ".", "settings", "(", ")", "if", "'menu'", "in", "page", ":", "await", "self", ".", "_send_to_messenger_profile", "(", "page", ",", "{", "'persistent_menu'", ":", "page", "...
26.615385
17.692308
def write_single_file(args, base_dir, crawler): """Write to a single output file and/or subdirectory.""" if args['urls'] and args['html']: # Create a directory to save PART.html files in domain = utils.get_domain(args['urls'][0]) if not args['quiet']: print('Storing html files in {0}/'.format(domain)) utils.mkdir_and_cd(domain) infilenames = [] for query in args['query']: if query in args['files']: infilenames.append(query) elif query.strip('/') in args['urls']: if args['crawl'] or args['crawl_all']: # Crawl and save HTML files/image files to disk infilenames += crawler.crawl_links(query) else: raw_resp = utils.get_raw_resp(query) if raw_resp is None: return False prev_part_num = utils.get_num_part_files() utils.write_part_file(args, query, raw_resp) curr_part_num = prev_part_num + 1 infilenames += utils.get_part_filenames(curr_part_num, prev_part_num) # Convert output or leave as PART.html files if args['html']: # HTML files have been written already, so return to base directory os.chdir(base_dir) else: # Write files to text or pdf if infilenames: if args['out']: outfilename = args['out'][0] else: outfilename = utils.get_single_outfilename(args) if outfilename: write_files(args, infilenames, outfilename) else: utils.remove_part_files() return True
[ "def", "write_single_file", "(", "args", ",", "base_dir", ",", "crawler", ")", ":", "if", "args", "[", "'urls'", "]", "and", "args", "[", "'html'", "]", ":", "# Create a directory to save PART.html files in", "domain", "=", "utils", ".", "get_domain", "(", "ar...
38.162791
16.44186
def time_to_jump( self ): """ The timestep until the next jump. Args: None Returns: (Float): The timestep until the next jump. """ k_tot = rate_prefactor * np.sum( self.p ) return -( 1.0 / k_tot ) * math.log( random.random() )
[ "def", "time_to_jump", "(", "self", ")", ":", "k_tot", "=", "rate_prefactor", "*", "np", ".", "sum", "(", "self", ".", "p", ")", "return", "-", "(", "1.0", "/", "k_tot", ")", "*", "math", ".", "log", "(", "random", ".", "random", "(", ")", ")" ]
24.833333
17.916667
def main(codelabel, submit): """Command line interface for testing and submitting calculations. This script extends submit.py, adding flexibility in the selected code/computer. Run './cli.py --help' to see options. """ code = Code.get_from_string(codelabel) # set up calculation calc = code.new_calc() calc.label = "compute rips from distance matrix" calc.set_max_wallclock_seconds(1 * 60) calc.set_withmpi(False) calc.set_resources({"num_machines": 1, "num_mpiprocs_per_machine": 1}) # Prepare input parameters from aiida.orm import DataFactory Parameters = DataFactory('gudhi.rdm') parameters = Parameters(dict={'max-edge-length': 4.2}) calc.use_parameters(parameters) SinglefileData = DataFactory('singlefile') distance_matrix = SinglefileData( file=os.path.join(gt.TEST_DIR, 'sample_distance.matrix')) calc.use_distance_matrix(distance_matrix) if submit: calc.store_all() calc.submit() print("submitted calculation; calc=Calculation(uuid='{}') # ID={}"\ .format(calc.uuid,calc.dbnode.pk)) else: subfolder, script_filename = calc.submit_test() path = os.path.relpath(subfolder.abspath) print("submission test successful") print("Find remote folder in {}".format(path)) print("In order to actually submit, add '--submit'")
[ "def", "main", "(", "codelabel", ",", "submit", ")", ":", "code", "=", "Code", ".", "get_from_string", "(", "codelabel", ")", "# set up calculation", "calc", "=", "code", ".", "new_calc", "(", ")", "calc", ".", "label", "=", "\"compute rips from distance matri...
35.921053
17.473684
def start_watcher(self): """ Start the watcher thread that tries to upload usage statistics. """ if self._watcher and self._watcher.is_alive: self._watcher_enabled = True else: logger.debug('Starting watcher.') self._watcher = threading.Thread(target=self._watcher_thread, name='usage_tracker') self._watcher.setDaemon(True) self._watcher_enabled = True self._watcher.start()
[ "def", "start_watcher", "(", "self", ")", ":", "if", "self", ".", "_watcher", "and", "self", ".", "_watcher", ".", "is_alive", ":", "self", ".", "_watcher_enabled", "=", "True", "else", ":", "logger", ".", "debug", "(", "'Starting watcher.'", ")", "self", ...
39.666667
12.833333
def move_grid_to_radial_minimum(func): """ Checks whether any coordinates in the grid are radially near (0.0, 0.0), which can lead to numerical faults in \ the evaluation of a light or mass profiles. If any coordinates are radially within the the radial minimum \ threshold, their (y,x) coordinates are shifted to that value to ensure they are evaluated correctly. By default this radial minimum is not used, and users should be certain they use a value that does not impact \ results. Parameters ---------- func : (profile, *args, **kwargs) -> Object A function that takes a grid of coordinates which may have a singularity as (0.0, 0.0) Returns ------- A function that can except cartesian or transformed coordinates """ @wraps(func) def wrapper(profile, grid, *args, **kwargs): """ Parameters ---------- profile : SphericalProfile The profiles that owns the function grid : ndarray PlaneCoordinates in either cartesian or profiles coordinate system args kwargs Returns ------- A value or coordinate in the same coordinate system as those passed in. """ radial_minimum_config = conf.NamedConfig(f"{conf.instance.config_path}/radial_minimum.ini") grid_radial_minimum = radial_minimum_config.get("radial_minimum", profile.__class__.__name__, float) with np.errstate(all='ignore'): # Division by zero fixed via isnan grid_radii = profile.grid_to_grid_radii(grid=grid) grid_radial_scale = np.where(grid_radii < grid_radial_minimum, grid_radial_minimum / grid_radii, 1.0) grid = np.multiply(grid, grid_radial_scale[:, None]) grid[np.isnan(grid)] = grid_radial_minimum return func(profile, grid, *args, **kwargs) return wrapper
[ "def", "move_grid_to_radial_minimum", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "profile", ",", "grid", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n\n Parameters\n ----------\n profile : Sp...
41.133333
29.733333
def get_user_orders(self, id, **data): """ GET /users/:id/orders/ Returns a :ref:`paginated <pagination>` response of :format:`orders <order>`, under the key ``orders``, of all orders the user has placed (i.e. where the user was the person buying the tickets). :param int id: The id assigned to a user. :param datetime changed_since: (optional) Only return attendees changed on or after the time given. .. note:: A datetime represented as a string in ISO8601 combined date and time format, always in UTC. """ return self.get("/users/{0}/orders/".format(id), data=data)
[ "def", "get_user_orders", "(", "self", ",", "id", ",", "*", "*", "data", ")", ":", "return", "self", ".", "get", "(", "\"/users/{0}/orders/\"", ".", "format", "(", "id", ")", ",", "data", "=", "data", ")" ]
63.2
37.8
def parse_from_args(synonyms): ''' Parse an array of string from argparser to SynonymSet ''' syns_str = ''.join(synonyms) syns_str = syns_str.replace(' ', '') syn_set = SynonymSet() # to check if we are parsing inside the parenthesis inside_set = False current_syn = '' current_syn_set = set() for char in syns_str: if char == '{': inside_set = True current_syn_set = set() continue if char == '}': inside_set = False current_syn_set.add(current_syn) syn_set.add_set(current_syn_set) current_syn = '' continue if not inside_set: raise Exception("Incorrect synonyms {}".format(syns_str)) if char == ',': if current_syn == '': raise Exception("Incorrect synonyms {}".format(syns_str)) current_syn_set.add(current_syn) current_syn = '' continue current_syn += char return syn_set
[ "def", "parse_from_args", "(", "synonyms", ")", ":", "syns_str", "=", "''", ".", "join", "(", "synonyms", ")", "syns_str", "=", "syns_str", ".", "replace", "(", "' '", ",", "''", ")", "syn_set", "=", "SynonymSet", "(", ")", "# to check if we are parsing insi...
21.083333
22.75
def session_rollback(self, session): """Send session_rollback signal in sqlalchemy ``after_rollback``. This marks the failure of session so the session may enter commit phase. """ # this may happen when there's nothing to rollback if not hasattr(session, 'meepo_unique_id'): self.logger.debug("skipped - session_rollback") return # del session meepo id after rollback self.logger.debug("%s - after_rollback" % session.meepo_unique_id) signal("session_rollback").send(session) self._session_del(session)
[ "def", "session_rollback", "(", "self", ",", "session", ")", ":", "# this may happen when there's nothing to rollback", "if", "not", "hasattr", "(", "session", ",", "'meepo_unique_id'", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"skipped - session_rollback\...
39.6
17.733333
def plot(self, columns=None, **errorbar_kwargs): """ Produces a visual representation of the coefficients, including their standard errors and magnitudes. Parameters ---------- columns : list, optional specify a subset of the columns to plot errorbar_kwargs: pass in additional plotting commands to matplotlib errorbar command Returns ------- ax: matplotlib axis the matplotlib axis that be edited. """ from matplotlib import pyplot as plt ax = errorbar_kwargs.pop("ax", None) or plt.figure().add_subplot(111) errorbar_kwargs.setdefault("c", "k") errorbar_kwargs.setdefault("fmt", "s") errorbar_kwargs.setdefault("markerfacecolor", "white") errorbar_kwargs.setdefault("markeredgewidth", 1.25) errorbar_kwargs.setdefault("elinewidth", 1.25) errorbar_kwargs.setdefault("capsize", 3) z = inv_normal_cdf(1 - self.alpha / 2) if columns is None: columns = self.hazards_.index yaxis_locations = list(range(len(columns))) symmetric_errors = z * self.standard_errors_[columns].to_frame().squeeze(axis=1).values.copy() hazards = self.hazards_[columns].values.copy() order = np.argsort(hazards) ax.errorbar(hazards[order], yaxis_locations, xerr=symmetric_errors[order], **errorbar_kwargs) best_ylim = ax.get_ylim() ax.vlines(0, -2, len(columns) + 1, linestyles="dashed", linewidths=1, alpha=0.65) ax.set_ylim(best_ylim) tick_labels = [columns[i] for i in order] ax.set_yticks(yaxis_locations) ax.set_yticklabels(tick_labels) ax.set_xlabel("log(HR) (%g%% CI)" % ((1 - self.alpha) * 100)) return ax
[ "def", "plot", "(", "self", ",", "columns", "=", "None", ",", "*", "*", "errorbar_kwargs", ")", ":", "from", "matplotlib", "import", "pyplot", "as", "plt", "ax", "=", "errorbar_kwargs", ".", "pop", "(", "\"ax\"", ",", "None", ")", "or", "plt", ".", "...
34.509804
23.568627
def line(*args, **kwargs): """This function creates a line chart. Specifcally it creates an :py:class:`.AxisChart` and then adds a :py:class:`.LineSeries` to it. :param \*data: The data for the line series as either (x,y) values or two\ big tuples/lists of x and y values respectively. :param str name: The name to be associated with the series. :param str color: The hex colour of the line. :param str linestyle: The line pattern. See\ `OmniCanvas docs <https://omnicanvas.readthedocs.io/en/latest/api/graphics.\ html#omnicanvas.graphics.ShapeGraphic.line_style>`_ for acceptable values. :raises ValueError: if the size and length of the data doesn't match either\ format. :param Number linewidth: The width in pixels of the data points' edge. :param str title: The chart's title. This will be displayed at the top of\ the chart. :param width: The width in pixels of the chart. :param height: The height in pixels of the chart. :param str x_label: The label for the x-axis. :param str y_label: The label for the y-axis. :rtype: :py:class:`.AxisChart`""" line_series_kwargs = {} for kwarg in ("name", "color", "linestyle", "linewidth"): if kwarg in kwargs: line_series_kwargs[kwarg] = kwargs[kwarg] del kwargs[kwarg] if "color" not in line_series_kwargs: line_series_kwargs["color"] = colors[0] series = LineSeries(*args, **line_series_kwargs) chart = AxisChart(series, **kwargs) return chart
[ "def", "line", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "line_series_kwargs", "=", "{", "}", "for", "kwarg", "in", "(", "\"name\"", ",", "\"color\"", ",", "\"linestyle\"", ",", "\"linewidth\"", ")", ":", "if", "kwarg", "in", "kwargs", ":", ...
46.96875
19.375
def unmerged_blobs(self): """ :return: Iterator yielding dict(path : list( tuple( stage, Blob, ...))), being a dictionary associating a path in the index with a list containing sorted stage/blob pairs :note: Blobs that have been removed in one side simply do not exist in the given stage. I.e. a file removed on the 'other' branch whose entries are at stage 3 will not have a stage 3 entry. """ is_unmerged_blob = lambda t: t[0] != 0 path_map = {} for stage, blob in self.iter_blobs(is_unmerged_blob): path_map.setdefault(blob.path, []).append((stage, blob)) # END for each unmerged blob for l in mviter(path_map): l.sort() return path_map
[ "def", "unmerged_blobs", "(", "self", ")", ":", "is_unmerged_blob", "=", "lambda", "t", ":", "t", "[", "0", "]", "!=", "0", "path_map", "=", "{", "}", "for", "stage", ",", "blob", "in", "self", ".", "iter_blobs", "(", "is_unmerged_blob", ")", ":", "p...
39.85
20.35
def DbPutProperty(self, argin): """ Create / Update free object property(ies) :param argin: Str[0] = Object name Str[1] = Property number Str[2] = Property name Str[3] = Property value number Str[4] = Property value 1 Str[n] = Property value n .... :type: tango.DevVarStringArray :return: :rtype: tango.DevVoid """ self._log.debug("In DbPutProperty()") object_name = argin[0] nb_properties = int(argin[1]) self.db.put_property(object_name, properties, argin[2:])
[ "def", "DbPutProperty", "(", "self", ",", "argin", ")", ":", "self", ".", "_log", ".", "debug", "(", "\"In DbPutProperty()\"", ")", "object_name", "=", "argin", "[", "0", "]", "nb_properties", "=", "int", "(", "argin", "[", "1", "]", ")", "self", ".", ...
33.352941
10.647059
def dew_point_from_db_enth(db_temp, enthlpy, b_press=101325): """Dew Point Temperature (C) at Temperature db_temp (C), enthalpy (kJ/kg) and Pressure b_press (Pa). """ rh = rel_humid_from_db_enth(db_temp, enthlpy, b_press) td = dew_point_from_db_rh(db_temp, rh) return td
[ "def", "dew_point_from_db_enth", "(", "db_temp", ",", "enthlpy", ",", "b_press", "=", "101325", ")", ":", "rh", "=", "rel_humid_from_db_enth", "(", "db_temp", ",", "enthlpy", ",", "b_press", ")", "td", "=", "dew_point_from_db_rh", "(", "db_temp", ",", "rh", ...
41.142857
11.142857
def extract_protocol(self): """extract 802.11 protocol from radiotap.channel.flags :return: str protocol name one of below in success [.11a, .11b, .11g, .11n, .11ac] None in fail """ if self.present.mcs: return '.11n' if self.present.vht: return '.11ac' if self.present.channel and hasattr(self, 'chan'): if self.chan.five_g: if self.chan.ofdm: return '.11a' elif self.chan.two_g: if self.chan.cck: return '.11b' elif self.chan.ofdm or self.chan.dynamic: return '.11g' return 'None'
[ "def", "extract_protocol", "(", "self", ")", ":", "if", "self", ".", "present", ".", "mcs", ":", "return", "'.11n'", "if", "self", ".", "present", ".", "vht", ":", "return", "'.11ac'", "if", "self", ".", "present", ".", "channel", "and", "hasattr", "("...
30.041667
12.833333
def _get(self, url, params=None): """ Wrapper method for GET calls. """ self._call(self.GET, url, params, None)
[ "def", "_get", "(", "self", ",", "url", ",", "params", "=", "None", ")", ":", "self", ".", "_call", "(", "self", ".", "GET", ",", "url", ",", "params", ",", "None", ")" ]
41.666667
4.666667
def transfer_project(self, to_namespace, **kwargs): """Transfer a project to the given namespace ID Args: to_namespace (str): ID or path of the namespace to transfer the project to **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTransferProjectError: If the project could not be transfered """ path = '/projects/%s/transfer' % (self.id,) self.manager.gitlab.http_put(path, post_data={"namespace": to_namespace}, **kwargs)
[ "def", "transfer_project", "(", "self", ",", "to_namespace", ",", "*", "*", "kwargs", ")", ":", "path", "=", "'/projects/%s/transfer'", "%", "(", "self", ".", "id", ",", ")", "self", ".", "manager", ".", "gitlab", ".", "http_put", "(", "path", ",", "po...
42.125
21.75
def find_target_container(portal_type, record): """Locates a target container for the given portal_type and record :param record: The dictionary representation of a content object :type record: dict :returns: folder which contains the object :rtype: object """ portal_type = portal_type or record.get("portal_type") container = get_container_for(portal_type) if container: return container parent_uid = record.pop("parent_uid", None) parent_path = record.pop("parent_path", None) target = None # Try to find the target object if parent_uid: target = get_object_by_uid(parent_uid) elif parent_path: target = get_object_by_path(parent_path) else: fail(404, "No target UID/PATH information found") if not target: fail(404, "No target container found") return target
[ "def", "find_target_container", "(", "portal_type", ",", "record", ")", ":", "portal_type", "=", "portal_type", "or", "record", ".", "get", "(", "\"portal_type\"", ")", "container", "=", "get_container_for", "(", "portal_type", ")", "if", "container", ":", "retu...
28.366667
19.4
def _buildTemplates(self): """ OVERRIDING THIS METHOD from Factory """ jsontree_classes = build_D3treeStandard( 0, 99, 1, self.ontospy_graph.toplayer_classes) c_total = len(self.ontospy_graph.all_classes) JSON_DATA_CLASSES = json.dumps({ 'children': jsontree_classes, 'name': 'owl:Thing', }) extra_context = { "ontograph": self.ontospy_graph, "TOTAL_CLASSES": c_total, 'JSON_DATA_CLASSES': JSON_DATA_CLASSES, } # Ontology - MAIN PAGE contents = self._renderTemplate( "d3/d3_rotatingCluster.html", extraContext=extra_context) FILE_NAME = "index.html" main_url = self._save2File(contents, FILE_NAME, self.output_path) return main_url
[ "def", "_buildTemplates", "(", "self", ")", ":", "jsontree_classes", "=", "build_D3treeStandard", "(", "0", ",", "99", ",", "1", ",", "self", ".", "ontospy_graph", ".", "toplayer_classes", ")", "c_total", "=", "len", "(", "self", ".", "ontospy_graph", ".", ...
29.851852
16.888889
def add_accent_char(char, accent): """ Add accent to a single char. Parameter accent is member of class Accent """ if char == "": return "" case = char.isupper() char = char.lower() index = utils.VOWELS.find(char) if (index != -1): index = index - index % 6 + 5 char = utils.VOWELS[index - accent] return utils.change_case(char, case)
[ "def", "add_accent_char", "(", "char", ",", "accent", ")", ":", "if", "char", "==", "\"\"", ":", "return", "\"\"", "case", "=", "char", ".", "isupper", "(", ")", "char", "=", "char", ".", "lower", "(", ")", "index", "=", "utils", ".", "VOWELS", "."...
27.571429
12.285714
def name_backbone(name, rank=None, kingdom=None, phylum=None, clazz=None, order=None, family=None, genus=None, strict=False, verbose=False, offset=None, limit=100, **kwargs): ''' Lookup names in the GBIF backbone taxonomy. :param name: [str] Full scientific name potentially with authorship (required) :param rank: [str] The rank given as our rank enum. (optional) :param kingdom: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param phylum: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param class: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param order: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param family: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param genus: [str] If provided default matching will also try to match against this if no direct match is found for the name alone. (optional) :param strict: [bool] If True it (fuzzy) matches only the given name, but never a taxon in the upper classification (optional) :param verbose: [bool] If True show alternative matches considered which had been rejected. :param offset: [int] Record to start at. Default: ``0`` :param limit: [int] Number of results to return. Default: ``100`` A list for a single taxon with many slots (with ``verbose=False`` - default), or a list of length two, first element for the suggested taxon match, and a data.frame with alternative name suggestions resulting from fuzzy matching (with ``verbose=True``). If you don't get a match GBIF gives back a list of length 3 with slots synonym, confidence, and ``matchType='NONE'``. reference: http://www.gbif.org/developer/species#searching Usage:: from pygbif import species species.name_backbone(name='Helianthus annuus', kingdom='plants') species.name_backbone(name='Helianthus', rank='genus', kingdom='plants') species.name_backbone(name='Poa', rank='genus', family='Poaceae') # Verbose - gives back alternatives species.name_backbone(name='Helianthus annuus', kingdom='plants', verbose=True) # Strictness species.name_backbone(name='Poa', kingdom='plants', verbose=True, strict=False) species.name_backbone(name='Helianthus annuus', kingdom='plants', verbose=True, strict=True) # Non-existent name species.name_backbone(name='Aso') # Multiple equal matches species.name_backbone(name='Oenante') ''' url = gbif_baseurl + 'species/match' args = {'name': name, 'rank': rank, 'kingdom': kingdom, 'phylum': phylum, 'class': clazz, 'order': order, 'family': family, 'genus': genus, 'strict': strict, 'verbose': verbose, 'offset': offset, 'limit': limit} tt = gbif_GET(url, args, **kwargs) return tt
[ "def", "name_backbone", "(", "name", ",", "rank", "=", "None", ",", "kingdom", "=", "None", ",", "phylum", "=", "None", ",", "clazz", "=", "None", ",", "order", "=", "None", ",", "family", "=", "None", ",", "genus", "=", "None", ",", "strict", "=",...
50.786885
30.360656
def _check_authorization(self, properties, stream): """Check authorization id and other properties returned by the authentication mechanism. [receiving entity only] Allow only no authzid or authzid equal to current username@domain FIXME: other rules in s2s :Parameters: - `properties`: data obtained during authentication :Types: - `properties`: mapping :return: `True` if user is authorized to use a provided authzid :returntype: `bool` """ authzid = properties.get("authzid") if not authzid: return True try: jid = JID(authzid) except ValueError: return False if "username" not in properties: result = False elif jid.local != properties["username"]: result = False elif jid.domain != stream.me.domain: result = False elif jid.resource: result = False else: result = True return result
[ "def", "_check_authorization", "(", "self", ",", "properties", ",", "stream", ")", ":", "authzid", "=", "properties", ".", "get", "(", "\"authzid\"", ")", "if", "not", "authzid", ":", "return", "True", "try", ":", "jid", "=", "JID", "(", "authzid", ")", ...
28
18.189189
def bytes_block_cast(block, include_text=True, include_link_tokens=True, include_css=True, include_features=True, **kwargs): """ Converts any string-like items in input Block object to bytes-like values, with respect to python version Parameters ---------- block : blocks.Block any string-like objects contained in the block object will be converted to bytes include_text : bool, default=True if True, cast text to bytes, else ignore include_link_tokens : bool, default=True if True, cast link_tokens to bytes, else ignore include_css : bool, default=True if True, cast css to bytes, else ignore include_features : bool, default=True if True, cast features to bytes, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when encoding string """ if include_text: block.text = bytes_cast(block.text, **kwargs) if include_link_tokens: block.link_tokens = bytes_list_cast(block.link_tokens, **kwargs) if include_css: block.css = bytes_dict_cast(block.css, **kwargs) if include_features: block.features = bytes_dict_cast(block.features, **kwargs) return block
[ "def", "bytes_block_cast", "(", "block", ",", "include_text", "=", "True", ",", "include_link_tokens", "=", "True", ",", "include_css", "=", "True", ",", "include_features", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "include_text", ":", "block", ...
36.444444
14.611111
def get_subport_statistics(self, id_or_uri, port_name, subport_number): """ Gets the subport statistics on an interconnect. Args: id_or_uri: Can be either the interconnect id or the interconnect uri. port_name (str): A specific port name of an interconnect. subport_number (int): The subport. Returns: dict: The statistics for the interconnect that matches id, port_name, and subport_number. """ uri = self._client.build_uri(id_or_uri) + "/statistics/{0}/subport/{1}".format(port_name, subport_number) return self._client.get(uri)
[ "def", "get_subport_statistics", "(", "self", ",", "id_or_uri", ",", "port_name", ",", "subport_number", ")", ":", "uri", "=", "self", ".", "_client", ".", "build_uri", "(", "id_or_uri", ")", "+", "\"/statistics/{0}/subport/{1}\"", ".", "format", "(", "port_name...
44.642857
28.071429
def RFC3339(self): """RFC3339. `Link to RFC3339.`__ __ https://www.ietf.org/rfc/rfc3339.txt """ # get timezone offset delta_sec = time.timezone m, s = divmod(delta_sec, 60) h, m = divmod(m, 60) # timestamp format_string = "%Y-%m-%dT%H:%M:%S.%f" out = self.datetime.strftime(format_string) # timezone if delta_sec == 0.: out += "Z" else: if delta_sec > 0: sign = "+" elif delta_sec < 0: sign = "-" def as_string(num): return str(np.abs(int(num))).zfill(2) out += sign + as_string(h) + ":" + as_string(m) return out
[ "def", "RFC3339", "(", "self", ")", ":", "# get timezone offset", "delta_sec", "=", "time", ".", "timezone", "m", ",", "s", "=", "divmod", "(", "delta_sec", ",", "60", ")", "h", ",", "m", "=", "divmod", "(", "m", ",", "60", ")", "# timestamp", "forma...
25.642857
16.571429
def getParameter(self, name, index=-1): """ Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`. """ if name == "trainRecords": return self.trainRecords elif name == "anomalyThreshold": return self.anomalyThreshold elif name == "activeColumnCount": return self._activeColumnCount elif name == "classificationMaxDist": return self._classificationMaxDist else: # If any spec parameter name is the same as an attribute, this call # will get it automatically, e.g. self.learningMode return PyRegion.getParameter(self, name, index)
[ "def", "getParameter", "(", "self", ",", "name", ",", "index", "=", "-", "1", ")", ":", "if", "name", "==", "\"trainRecords\"", ":", "return", "self", ".", "trainRecords", "elif", "name", "==", "\"anomalyThreshold\"", ":", "return", "self", ".", "anomalyTh...
37.8125
10.5625
def xlsx_to_strio(xlsx_wb): """ convert xlwt Workbook instance to a BytesIO instance """ _xlrd_required() fh = BytesIO() xlsx_wb.filename = fh xlsx_wb.close() # prep for reading fh.seek(0) return fh
[ "def", "xlsx_to_strio", "(", "xlsx_wb", ")", ":", "_xlrd_required", "(", ")", "fh", "=", "BytesIO", "(", ")", "xlsx_wb", ".", "filename", "=", "fh", "xlsx_wb", ".", "close", "(", ")", "# prep for reading", "fh", ".", "seek", "(", "0", ")", "return", "f...
21.090909
16.545455
def lacp_timeout(self, **kwargs): """Set lacp timeout. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet, etc) timeout (str): Timeout length. (short, long) name (str): Name of interface. (1/0/5, 1/0/10, etc) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `int_type`, `name`, or `timeout` is not specified. ValueError: if `int_type`, `name`, or `timeout is not valid. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> int_type = 'tengigabitethernet' >>> name = '225/0/39' >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.channel_group(name=name, ... int_type=int_type, port_int='1', ... channel_type='standard', mode='active') ... output = dev.interface.lacp_timeout(name=name, ... int_type=int_type, timeout='long') ... dev.interface.lacp_timeout() ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError """ int_type = kwargs.pop('int_type').lower() name = kwargs.pop('name') timeout = kwargs.pop('timeout') callback = kwargs.pop('callback', self._callback) int_types = [ 'gigabitethernet', 'tengigabitethernet', 'fortygigabitethernet', 'hundredgigabitethernet' ] if int_type not in int_types: raise ValueError("Incorrect int_type value.") valid_timeouts = ['long', 'short'] if timeout not in valid_timeouts: raise ValueError("Incorrect timeout value") timeout_args = dict(name=name, timeout=timeout) if not pynos.utilities.valid_interface(int_type, name): raise ValueError("Incorrect name value.") config = getattr( self._interface, 'interface_%s_lacp_timeout' % int_type )(**timeout_args) return callback(config)
[ "def", "lacp_timeout", "(", "self", ",", "*", "*", "kwargs", ")", ":", "int_type", "=", "kwargs", ".", "pop", "(", "'int_type'", ")", ".", "lower", "(", ")", "name", "=", "kwargs", ".", "pop", "(", "'name'", ")", "timeout", "=", "kwargs", ".", "pop...
37.41791
18.268657
def create_report(self, **params): """https://developers.coinbase.com/api/v2#generate-a-new-report""" if 'type' not in params and 'email' not in params: raise ValueError("Missing required parameter: 'type' or 'email'") response = self._post('v2', 'reports', data=params) return self._make_api_object(response, Report)
[ "def", "create_report", "(", "self", ",", "*", "*", "params", ")", ":", "if", "'type'", "not", "in", "params", "and", "'email'", "not", "in", "params", ":", "raise", "ValueError", "(", "\"Missing required parameter: 'type' or 'email'\"", ")", "response", "=", ...
59.333333
15.666667
def rpc_refactor(self, filename, method, args): """Return a list of changes from the refactoring action. A change is a dictionary describing the change. See elpy.refactor.translate_changes for a description. """ try: from elpy import refactor except: raise ImportError("Rope not installed, refactorings unavailable") if args is None: args = () ref = refactor.Refactor(self.project_root, filename) return ref.get_changes(method, *args)
[ "def", "rpc_refactor", "(", "self", ",", "filename", ",", "method", ",", "args", ")", ":", "try", ":", "from", "elpy", "import", "refactor", "except", ":", "raise", "ImportError", "(", "\"Rope not installed, refactorings unavailable\"", ")", "if", "args", "is", ...
35.333333
18.466667
def get_all_roles(resource_root, service_name, cluster_name="default", view=None): """ Get all roles @param resource_root: The root Resource object. @param service_name: Service name @param cluster_name: Cluster name @return: A list of ApiRole objects. """ return call(resource_root.get, _get_roles_path(cluster_name, service_name), ApiRole, True, params=view and dict(view=view) or None)
[ "def", "get_all_roles", "(", "resource_root", ",", "service_name", ",", "cluster_name", "=", "\"default\"", ",", "view", "=", "None", ")", ":", "return", "call", "(", "resource_root", ".", "get", ",", "_get_roles_path", "(", "cluster_name", ",", "service_name", ...
36.909091
11.636364
def get_commensurate_points_in_integers(supercell_matrix): """Commensurate q-points in integer representation are returned. A set of integer representation of lattice points is transformed to the equivalent set of lattice points in fractional coordinates with respect to supercell basis vectors by integer_lattice_points / det(supercell_matrix) Parameters ---------- supercell_matrix : array_like Supercell matrix with respect to primitive cell basis vectors. shape=(3, 3) dtype=intc Returns ------- lattice_points : ndarray Integer representation of lattice points in supercell. shape=(N, 3) """ smat = np.array(supercell_matrix, dtype=int) snf = SNF3x3(smat.T) snf.run() D = snf.A.diagonal() b, c, a = np.meshgrid(range(D[1]), range(D[2]), range(D[0])) lattice_points = np.dot(np.c_[a.ravel() * D[1] * D[2], b.ravel() * D[0] * D[2], c.ravel() * D[0] * D[1]], snf.Q.T) lattice_points = np.array(lattice_points % np.prod(D), dtype='intc', order='C') return lattice_points
[ "def", "get_commensurate_points_in_integers", "(", "supercell_matrix", ")", ":", "smat", "=", "np", ".", "array", "(", "supercell_matrix", ",", "dtype", "=", "int", ")", "snf", "=", "SNF3x3", "(", "smat", ".", "T", ")", "snf", ".", "run", "(", ")", "D", ...
35.30303
21.212121
def _dT_h_delta(T_in_kK, eta, k, threenk, c_v): """ internal function for calculation of temperature along a Hugoniot :param T_in_kK: temperature in kK scale, see Jamieson for detail :param eta: = 1 - rho0/rho :param k: = [rho0, c0, s, gamma0, q, theta0] :param threenk: see the definition in Jamieson 1983, it is a correction term mostly for Jamieson gold scale :param c_v: manual input of Cv value, if 0 calculated through Debye function :return: eta derivative of temperature """ rho0 = k[0] # g/m^3 gamma0 = k[3] # no unit q = k[4] # no unit theta0_in_kK = k[5] # K, see Jamieson 1983 for detail rho = rho0 / (1. - eta) c0 = k[1] # km/s s = k[2] # no unit dPhdelta_H = rho0 * c0 * c0 * (1. + s * eta) / \ np.power((1. - s * eta), 3.) # [g/cm^3][km/s]^2 = 1e9[kg m^2/s^2] = [GPa] Ph = hugoniot_p(rho, rho0, c0, s) # in [GPa] # calculate Cv gamma = gamma0 * np.power((1. - eta), q) theta_in_kK = theta0_in_kK * np.exp((gamma0 - gamma) / q) x = theta_in_kK / T_in_kK debye3 = debye_E(x) if c_v == 0.: c_v = threenk * (4. * debye3 - 3. * x / (np.exp(x) - 1.)) # [J/g/K] # calculate dYdX dYdX = (gamma / (1. - eta) * T_in_kK) + (dPhdelta_H * eta - Ph) / \ (2. * c_v * rho0) # print('dYdX', dYdX) return dYdX
[ "def", "_dT_h_delta", "(", "T_in_kK", ",", "eta", ",", "k", ",", "threenk", ",", "c_v", ")", ":", "rho0", "=", "k", "[", "0", "]", "# g/m^3", "gamma0", "=", "k", "[", "3", "]", "# no unit", "q", "=", "k", "[", "4", "]", "# no unit", "theta0_in_kK...
37.111111
15.388889
def peek(self, size=-1): """ Return bytes from the stream without advancing the position. Args: size (int): Number of bytes to read. -1 to read the full stream. Returns: bytes: bytes read """ if not self._readable: raise UnsupportedOperation('read') with self._seek_lock: self._raw.seek(self._seek) return self._raw._peek(size)
[ "def", "peek", "(", "self", ",", "size", "=", "-", "1", ")", ":", "if", "not", "self", ".", "_readable", ":", "raise", "UnsupportedOperation", "(", "'read'", ")", "with", "self", ".", "_seek_lock", ":", "self", ".", "_raw", ".", "seek", "(", "self", ...
26.235294
17.647059
def export_mesh(mesh, file_obj, file_type=None, **kwargs): """ Export a Trimesh object to a file- like object, or to a filename Parameters --------- file_obj : str, file-like Where should mesh be exported to file_type : str or None Represents file type (eg: 'stl') Returns ---------- exported : bytes or str Result of exporter """ # if we opened a file object in this function # we will want to close it when we're done was_opened = False if util.is_string(file_obj): if file_type is None: file_type = (str(file_obj).split('.')[-1]).lower() if file_type in _mesh_exporters: was_opened = True file_obj = open(file_obj, 'wb') file_type = str(file_type).lower() if not (file_type in _mesh_exporters): raise ValueError('%s exporter not available!', file_type) if isinstance(mesh, (list, tuple, set, np.ndarray)): faces = 0 for m in mesh: faces += len(m.faces) log.debug('Exporting %d meshes with a total of %d faces as %s', len(mesh), faces, file_type.upper()) else: log.debug('Exporting %d faces as %s', len(mesh.faces), file_type.upper()) export = _mesh_exporters[file_type](mesh, **kwargs) if hasattr(file_obj, 'write'): result = util.write_encoded(file_obj, export) else: result = export if was_opened: file_obj.close() return result
[ "def", "export_mesh", "(", "mesh", ",", "file_obj", ",", "file_type", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# if we opened a file object in this function", "# we will want to close it when we're done", "was_opened", "=", "False", "if", "util", ".", "is_stri...
28.72549
18.764706
def last_child(self): """ Get the latest PID as pointed by the Head PID. If the 'pid' is a Head PID, return the latest of its children. If the 'pid' is a Version PID, return the latest of its siblings. Return None for the non-versioned PIDs. """ return self.children.filter( PIDRelation.index.isnot(None)).ordered().first()
[ "def", "last_child", "(", "self", ")", ":", "return", "self", ".", "children", ".", "filter", "(", "PIDRelation", ".", "index", ".", "isnot", "(", "None", ")", ")", ".", "ordered", "(", ")", ".", "first", "(", ")" ]
38.3
16.7
def get_backend(self, backend_name): """ returns the given backend instance """ if backend_name == 'twitter': from social_friends_finder.backends.twitter_backend import TwitterFriendsProvider friends_provider = TwitterFriendsProvider() elif backend_name == 'facebook': from social_friends_finder.backends.facebook_backend import FacebookFriendsProvider friends_provider = FacebookFriendsProvider() elif backend_name == 'vkontakte-oauth2': from social_friends_finder.backends.vkontakte_backend import VKontakteFriendsProvider friends_provider = VKontakteFriendsProvider() else: raise NotImplementedError("provider: %s is not implemented") return friends_provider
[ "def", "get_backend", "(", "self", ",", "backend_name", ")", ":", "if", "backend_name", "==", "'twitter'", ":", "from", "social_friends_finder", ".", "backends", ".", "twitter_backend", "import", "TwitterFriendsProvider", "friends_provider", "=", "TwitterFriendsProvider...
46.705882
19.882353
def _walk_req_to_install(self, handler): """Call handler for all pending reqs. :param handler: Handle a single requirement. Should take a requirement to install. Can optionally return an iterable of additional InstallRequirements to cover. """ # The list() here is to avoid potential mutate-while-iterating bugs. discovered_reqs = [] reqs = itertools.chain( list(self.unnamed_requirements), list(self.requirements.values()), discovered_reqs) for req_to_install in reqs: more_reqs = handler(req_to_install) if more_reqs: discovered_reqs.extend(more_reqs)
[ "def", "_walk_req_to_install", "(", "self", ",", "handler", ")", ":", "# The list() here is to avoid potential mutate-while-iterating bugs.", "discovered_reqs", "=", "[", "]", "reqs", "=", "itertools", ".", "chain", "(", "list", "(", "self", ".", "unnamed_requirements",...
42.6875
15.8125
def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to turn of month. """ start_date = self._align(finite_start) aligned_stop = self._align(finite_stop) dates = [] for m in itertools.count(): t = start_date + relativedelta(months=m) if t >= aligned_stop: return dates if t >= finite_start: dates.append(t)
[ "def", "finite_datetimes", "(", "self", ",", "finite_start", ",", "finite_stop", ")", ":", "start_date", "=", "self", ".", "_align", "(", "finite_start", ")", "aligned_stop", "=", "self", ".", "_align", "(", "finite_stop", ")", "dates", "=", "[", "]", "for...
36.461538
10.461538
def set_cpu_property(self, property_p, value): """Sets the virtual CPU boolean value of the specified property. in property_p of type :class:`CPUPropertyType` Property type to query. in value of type bool Property value. raises :class:`OleErrorInvalidarg` Invalid property. """ if not isinstance(property_p, CPUPropertyType): raise TypeError("property_p can only be an instance of type CPUPropertyType") if not isinstance(value, bool): raise TypeError("value can only be an instance of type bool") self._call("setCPUProperty", in_p=[property_p, value])
[ "def", "set_cpu_property", "(", "self", ",", "property_p", ",", "value", ")", ":", "if", "not", "isinstance", "(", "property_p", ",", "CPUPropertyType", ")", ":", "raise", "TypeError", "(", "\"property_p can only be an instance of type CPUPropertyType\"", ")", "if", ...
36.368421
16.947368
def get_cached_image(self, width, height, zoom, parameters=None, clear=False): """Get ImageSurface object, if possible, cached The method checks whether the image was already rendered. This is done by comparing the passed size and parameters with those of the last image. If they are equal, the cached image is returned. Otherwise, a new ImageSurface with the specified dimensions is created and returned. :param width: The width of the image :param height: The height of the image :param zoom: The current scale/zoom factor :param parameters: The parameters used for the image :param clear: If True, the cache is emptied, thus the image won't be retrieved from cache :returns: The flag is True when the image is retrieved from the cache, otherwise False; The cached image surface or a blank one with the desired size; The zoom parameter when the image was stored :rtype: bool, ImageSurface, float """ global MAX_ALLOWED_AREA if not parameters: parameters = {} if self.__compare_parameters(width, height, zoom, parameters) and not clear: return True, self.__image, self.__zoom # Restrict image surface size to prevent excessive use of memory while True: try: self.__limiting_multiplicator = 1 area = width * zoom * self.__zoom_multiplicator * height * zoom * self.__zoom_multiplicator if area > MAX_ALLOWED_AREA: self.__limiting_multiplicator = sqrt(MAX_ALLOWED_AREA / area) image = ImageSurface(self.__format, int(ceil(width * zoom * self.multiplicator)), int(ceil(height * zoom * self.multiplicator))) break # If we reach this point, the area was successfully allocated and we can break the loop except Error: MAX_ALLOWED_AREA *= 0.8 self.__set_cached_image(image, width, height, zoom, parameters) return False, self.__image, zoom
[ "def", "get_cached_image", "(", "self", ",", "width", ",", "height", ",", "zoom", ",", "parameters", "=", "None", ",", "clear", "=", "False", ")", ":", "global", "MAX_ALLOWED_AREA", "if", "not", "parameters", ":", "parameters", "=", "{", "}", "if", "self...
54.315789
30.315789
def has_no_dangling_branch(neuron): '''Check if the neuron has dangling neurites''' soma_center = neuron.soma.points[:, COLS.XYZ].mean(axis=0) recentered_soma = neuron.soma.points[:, COLS.XYZ] - soma_center radius = np.linalg.norm(recentered_soma, axis=1) soma_max_radius = radius.max() def is_dangling(neurite): '''Is the neurite dangling ?''' starting_point = neurite.points[1][COLS.XYZ] if np.linalg.norm(starting_point - soma_center) - soma_max_radius <= 12.: return False if neurite.type != NeuriteType.axon: return True all_points = list(chain.from_iterable(n.points[1:] for n in iter_neurites(neurite) if n.type != NeuriteType.axon)) res = [np.linalg.norm(starting_point - p[COLS.XYZ]) >= 2 * p[COLS.R] + 2 for p in all_points] return all(res) bad_ids = [(n.root_node.id, [n.root_node.points[1]]) for n in iter_neurites(neuron) if is_dangling(n)] return CheckResult(len(bad_ids) == 0, bad_ids)
[ "def", "has_no_dangling_branch", "(", "neuron", ")", ":", "soma_center", "=", "neuron", ".", "soma", ".", "points", "[", ":", ",", "COLS", ".", "XYZ", "]", ".", "mean", "(", "axis", "=", "0", ")", "recentered_soma", "=", "neuron", ".", "soma", ".", "...
41.222222
21.814815
def _process_non_parallel(self, X: Union[pd.DataFrame, np.ndarray], n_refs: int, cluster_array: np.ndarray): """ Process calling of .calculate_gap() method using no parallel backend; simple for loop generator """ for gap_value, n_clusters in [self._calculate_gap(X, n_refs, n_clusters) for n_clusters in cluster_array]: yield (gap_value, n_clusters)
[ "def", "_process_non_parallel", "(", "self", ",", "X", ":", "Union", "[", "pd", ".", "DataFrame", ",", "np", ".", "ndarray", "]", ",", "n_refs", ":", "int", ",", "cluster_array", ":", "np", ".", "ndarray", ")", ":", "for", "gap_value", ",", "n_clusters...
60.714286
29
def weekday_to_str( weekday: Union[int, str], *, inverse: bool = False ) -> Union[int, str]: """ Given a weekday number (integer in the range 0, 1, ..., 6), return its corresponding weekday name as a lowercase string. Here 0 -> 'monday', 1 -> 'tuesday', and so on. If ``inverse``, then perform the inverse operation. """ s = [ "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday", ] if not inverse: try: return s[weekday] except: return else: try: return s.index(weekday) except: return
[ "def", "weekday_to_str", "(", "weekday", ":", "Union", "[", "int", ",", "str", "]", ",", "*", ",", "inverse", ":", "bool", "=", "False", ")", "->", "Union", "[", "int", ",", "str", "]", ":", "s", "=", "[", "\"monday\"", ",", "\"tuesday\"", ",", "...
23.678571
20.107143
def gen_colors(img): """Generate a colorscheme using Colorz.""" palette = Haishoku.getPalette(img) return [util.rgb_to_hex(col[1]) for col in palette]
[ "def", "gen_colors", "(", "img", ")", ":", "palette", "=", "Haishoku", ".", "getPalette", "(", "img", ")", "return", "[", "util", ".", "rgb_to_hex", "(", "col", "[", "1", "]", ")", "for", "col", "in", "palette", "]" ]
39.75
9.25
def same_notebook_code(nb1, nb2): """ Return true of the code cells of notebook objects `nb1` and `nb2` are the same. """ # Notebooks do not match of the number of cells differ if len(nb1['cells']) != len(nb2['cells']): return False # Iterate over cells in nb1 for n in range(len(nb1['cells'])): # Notebooks do not match if corresponding cells have different # types if nb1['cells'][n]['cell_type'] != nb2['cells'][n]['cell_type']: return False # Notebooks do not match if source of corresponding code cells # differ if nb1['cells'][n]['cell_type'] == 'code' and \ nb1['cells'][n]['source'] != nb2['cells'][n]['source']: return False return True
[ "def", "same_notebook_code", "(", "nb1", ",", "nb2", ")", ":", "# Notebooks do not match of the number of cells differ", "if", "len", "(", "nb1", "[", "'cells'", "]", ")", "!=", "len", "(", "nb2", "[", "'cells'", "]", ")", ":", "return", "False", "# Iterate ov...
32.956522
20.782609