Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
373,300
def edit_inputs(client, workflow): types = { : int, : str, : lambda x: File(path=Path(x).resolve()), } for input_ in workflow.inputs: convert = types.get(input_.type, str) input_.default = convert( click.prompt( .format(input_), default=_format_default(client, input_.default), ) ) return workflow
Edit workflow inputs.
373,301
def emit(self, action, payload=None, retry=0): payload = payload or {} if retry: _retry = self.transport.retry(retry) emit = _retry(self.transport.emit) else: emit = self.transport.emit return emit(action, payload)
Emit action with payload. :param action: an action slug :param payload: data, default {} :param retry: integer, default 0. :return: information in form of dict.
373,302
def _load_image_labels(self): temp = [] for idx in self.image_set_index: label_file = self._label_path_from_index(idx) tree = ET.parse(label_file) root = tree.getroot() size = root.find() width = float(size.find().text) height = float(size.find().text) label = [] for obj in root.iter(): difficult = int(obj.find().text) cls_name = obj.find().text if cls_name not in self.classes: continue cls_id = self.classes.index(cls_name) xml_box = obj.find() xmin = float(xml_box.find().text) / width ymin = float(xml_box.find().text) / height xmax = float(xml_box.find().text) / width ymax = float(xml_box.find().text) / height label.append([cls_id, xmin, ymin, xmax, ymax, difficult]) temp.append(np.array(label)) return temp
preprocess all ground-truths Returns: ---------- labels packed in [num_images x max_num_objects x 5] tensor
373,303
def hierarchy_spectrum(mg, filter=True, plot=False): real_table = [[, , , , , ]] imag_table = [[, , , , , ]] for i in range(len(mg.levels)): A = mg.levels[i].A.tocsr() if filter is True: A.eliminate_zeros() nnz_per_row = A.indptr[0:-1] - A.indptr[1:] nonzero_rows = (nnz_per_row != 0).nonzero()[0] A = A.tocsc() nnz_per_col = A.indptr[0:-1] - A.indptr[1:] nonzero_cols = (nnz_per_col != 0).nonzero()[0] nonzero_rowcols = sp.union1d(nonzero_rows, nonzero_cols) A = np.mat(A.todense()) A = A[nonzero_rowcols, :][:, nonzero_rowcols] else: A = np.mat(A.todense()) e = eigvals(A) c = cond(A) lambda_min = min(sp.real(e)) lambda_max = max(sp.real(e)) num_neg = max(e[sp.real(e) < 0.0].shape) num_pos = max(e[sp.real(e) > 0.0].shape) real_table.append([str(i), ( % lambda_min), ( % lambda_max), str(num_neg), str(num_pos), ( % c)]) lambda_min = min(sp.imag(e)) lambda_max = max(sp.imag(e)) num_neg = max(e[sp.imag(e) < 0.0].shape) num_pos = max(e[sp.imag(e) > 0.0].shape) imag_table.append([str(i), ( % lambda_min), ( % lambda_max), str(num_neg), str(num_pos), ( % c)]) if plot: import pylab pylab.figure(i+1) pylab.plot(sp.real(e), sp.imag(e), ) handle = pylab.title( % i) handle.set_fontsize(19) handle = pylab.xlabel() handle.set_fontsize(17) handle = pylab.ylabel() handle.set_fontsize(17) print(print_table(real_table)) print(print_table(imag_table)) if plot: pylab.show()
Examine a multilevel hierarchy's spectrum. Parameters ---------- mg { pyamg multilevel hierarchy } e.g. generated with smoothed_aggregation_solver(...) or ruge_stuben_solver(...) Returns ------- (1) table to standard out detailing the spectrum of each level in mg (2) if plot==True, a sequence of plots in the complex plane of the spectrum at each level Notes ----- This can be useful for troubleshooting and when examining how your problem's nature changes from level to level Examples -------- >>> from pyamg import smoothed_aggregation_solver >>> from pyamg.gallery import poisson >>> from pyamg.util.utils import hierarchy_spectrum >>> A = poisson( (1,), format='csr' ) >>> ml = smoothed_aggregation_solver(A) >>> hierarchy_spectrum(ml) <BLANKLINE> Level min(re(eig)) max(re(eig)) num re(eig) < 0 num re(eig) > 0 cond_2(A) --------------------------------------------------------------------------- 0 2.000 2.000 0 1 1.00e+00 <BLANKLINE> <BLANKLINE> Level min(im(eig)) max(im(eig)) num im(eig) < 0 num im(eig) > 0 cond_2(A) --------------------------------------------------------------------------- 0 0.000 0.000 0 0 1.00e+00 <BLANKLINE>
373,304
def assert_image_exists(self, pattern, timeout=20.0, **kwargs): pattern = self.d.pattern_open(pattern) match_kwargs = kwargs.copy() match_kwargs.pop(, None) match_kwargs.update({ : timeout, : True, }) res = self.d.wait(pattern, **match_kwargs) is_success = res is not None message = if res: x, y = res.pos kwargs[] = {: x, : y} message = % (res.pos, res.confidence, res.method) else: res = self.d.match(pattern) if res is None: message = else: th = kwargs.get() or pattern.threshold or self.image_match_threshold message = % ( res.matched, res.pos, res.confidence, th) kwargs[] = self._save_screenshot(pattern, name_prefix=) kwargs[] = self.last_screenshot kwargs.update({ : , : message, : is_success, }) self._add_assert(**kwargs)
Assert if image exists Args: - pattern: image filename # not support pattern for now - timeout (float): seconds - safe (bool): not raise assert error even throung failed.
373,305
def validate_unit_process_ids(self, expected, actual): self.log.debug() self.log.debug(.format(expected)) self.log.debug(.format(actual)) if len(actual) != len(expected): return ( .format(len(expected), len(actual))) for (e_sentry, e_proc_names) in six.iteritems(expected): e_sentry_name = e_sentry.info[] if e_sentry in actual.keys(): a_proc_names = actual[e_sentry] else: return ( .format(e_sentry_name, e_sentry)) if len(e_proc_names.keys()) != len(a_proc_names.keys()): return ( .format(len(expected), len(actual))) for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ zip(e_proc_names.items(), a_proc_names.items()): if e_proc_name != a_proc_name: return ( .format(e_proc_name, a_proc_name)) a_pids_length = len(a_pids) fail_msg = ( .format(e_sentry_name, e_proc_name, e_pids, a_pids_length, a_pids)) if isinstance(e_pids, list) and \ a_pids_length not in e_pids: return fail_msg elif not isinstance(e_pids, bool) and \ not isinstance(e_pids, list) and \ a_pids_length != e_pids: return fail_msg elif isinstance(e_pids, bool) and \ e_pids is True and a_pids_length < 1: return fail_msg elif isinstance(e_pids, bool) and \ e_pids is False and a_pids_length != 0: return fail_msg else: self.log.debug( .format(e_sentry_name, e_proc_name, e_pids, a_pids)) return None
Validate process id quantities for services on units.
373,306
def snmp_server_community_ipv4_acl(self, **kwargs): config = ET.Element("config") snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp") community = ET.SubElement(snmp_server, "community") community_key = ET.SubElement(community, "community") community_key.text = kwargs.pop() ipv4_acl = ET.SubElement(community, "ipv4-acl") ipv4_acl.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
373,307
def convert_cifar10(directory, output_directory, output_filename=): output_path = os.path.join(output_directory, output_filename) h5file = h5py.File(output_path, mode=) input_file = os.path.join(directory, DISTRIBUTION_FILE) tar_file = tarfile.open(input_file, ) train_batches = [] for batch in range(1, 6): file = tar_file.extractfile( % batch) try: if six.PY3: array = cPickle.load(file, encoding=) else: array = cPickle.load(file) train_batches.append(array) finally: file.close() train_features = numpy.concatenate( [batch[].reshape(batch[].shape[0], 3, 32, 32) for batch in train_batches]) train_labels = numpy.concatenate( [numpy.array(batch[], dtype=numpy.uint8) for batch in train_batches]) train_labels = numpy.expand_dims(train_labels, 1) file = tar_file.extractfile() try: if six.PY3: test = cPickle.load(file, encoding=) else: test = cPickle.load(file) finally: file.close() test_features = test[].reshape(test[].shape[0], 3, 32, 32) test_labels = numpy.array(test[], dtype=numpy.uint8) test_labels = numpy.expand_dims(test_labels, 1) data = ((, , train_features), (, , train_labels), (, , test_features), (, , test_labels)) fill_hdf5_file(h5file, data) h5file[].dims[0].label = h5file[].dims[1].label = h5file[].dims[2].label = h5file[].dims[3].label = h5file[].dims[0].label = h5file[].dims[1].label = h5file.flush() h5file.close() return (output_path,)
Converts the CIFAR-10 dataset to HDF5. Converts the CIFAR-10 dataset to an HDF5 dataset compatible with :class:`fuel.datasets.CIFAR10`. The converted dataset is saved as 'cifar10.hdf5'. It assumes the existence of the following file: * `cifar-10-python.tar.gz` Parameters ---------- directory : str Directory in which input files reside. output_directory : str Directory in which to save the converted dataset. output_filename : str, optional Name of the saved dataset. Defaults to 'cifar10.hdf5'. Returns ------- output_paths : tuple of str Single-element tuple containing the path to the converted dataset.
373,308
def functions(self): out = {} for key in self._func_names: out[key[len(self._prefix):]] = getattr(self, key) return out
Returns a dictionary containing the functions defined in this object. The keys are function names (as exposed in templates) and the values are Python functions.
373,309
def delete(self, request, key): request.DELETE = http.QueryDict(request.body) email_addr = request.DELETE.get() user_id = request.DELETE.get() if not email_addr: return http.HttpResponseBadRequest() try: email = EmailAddressValidation.objects.get(address=email_addr, user_id=user_id) except EmailAddressValidation.DoesNotExist: pass else: email.delete() return http.HttpResponse(status=204) try: email = EmailAddress.objects.get(address=email_addr, user_id=user_id) except EmailAddress.DoesNotExist: raise http.Http404 email.user = None email.save() return http.HttpResponse(status=204)
Remove an email address, validated or not.
373,310
def make_owner(user): tutor_group, owner_group = _get_user_groups() user.is_staff = True user.is_superuser = False user.save() owner_group.user_set.add(user) owner_group.save() tutor_group.user_set.add(user) tutor_group.save()
Makes the given user a owner and tutor.
373,311
def keystoneclient(request, admin=False): client_version = VERSIONS.get_active_version() user = request.user token_id = user.token.id if is_multi_domain_enabled(): if is_domain_admin(request): domain_token = request.session.get() if domain_token: token_id = getattr(domain_token, , None) if admin: if not policy.check((("identity", "admin_required"),), request): raise exceptions.NotAuthorized endpoint_type = else: endpoint_type = getattr(settings, , ) cache_attr = "_keystoneclient_admin" if admin \ else backend.KEYSTONE_CLIENT_ATTR if (hasattr(request, cache_attr) and (not user.token.id or getattr(request, cache_attr).auth_token == user.token.id)): conn = getattr(request, cache_attr) else: endpoint = _get_endpoint_url(request, endpoint_type) verify = not getattr(settings, , False) cacert = getattr(settings, , None) verify = verify and cacert LOG.debug("Creating a new keystoneclient connection to %s.", endpoint) remote_addr = request.environ.get(, ) token_auth = token_endpoint.Token(endpoint=endpoint, token=token_id) keystone_session = session.Session(auth=token_auth, original_ip=remote_addr, verify=verify) conn = client_version[].Client(session=keystone_session, debug=settings.DEBUG) setattr(request, cache_attr, conn) return conn
Returns a client connected to the Keystone backend. Several forms of authentication are supported: * Username + password -> Unscoped authentication * Username + password + tenant id -> Scoped authentication * Unscoped token -> Unscoped authentication * Unscoped token + tenant id -> Scoped authentication * Scoped token -> Scoped authentication Available services and data from the backend will vary depending on whether the authentication was scoped or unscoped. Lazy authentication if an ``endpoint`` parameter is provided. Calls requiring the admin endpoint should have ``admin=True`` passed in as a keyword argument. The client is cached so that subsequent API calls during the same request/response cycle don't have to be re-authenticated.
373,312
def transitive_subgraph_of_addresses_bfs(self, addresses, predicate=None, dep_predicate=None): walk = self._walk_factory(dep_predicate) ordered_closure = OrderedSet() to_walk = deque((0, addr) for addr in addresses) while len(to_walk) > 0: level, address = to_walk.popleft() if not walk.expand_once(address, level): continue target = self._target_by_address[address] if predicate and not predicate(target): continue if walk.do_work_once(address): ordered_closure.add(target) for dep_address in self._target_dependencies_by_address[address]: if walk.expanded_or_worked(dep_address): continue if walk.dep_predicate(target, self._target_by_address[dep_address], level): to_walk.append((level + 1, dep_address)) return ordered_closure
Returns the transitive dependency closure of `addresses` using BFS. :API: public :param list<Address> addresses: The closure of `addresses` will be walked. :param function predicate: If this parameter is not given, no Targets will be filtered out of the closure. If it is given, any Target which fails the predicate will not be walked, nor will its dependencies. Thus predicate effectively trims out any subgraph that would only be reachable through Targets that fail the predicate. :param function dep_predicate: Takes two parameters, the current target and the dependency of the current target. If this parameter is not given, no dependencies will be filtered when traversing the closure. If it is given, when the predicate fails, the edge to the dependency will not be expanded.
373,313
def lookup(self): if self.domain: if self.subdomain: self.domain_unsplit = % (self.subdomain, self.domain) else: self.domain_unsplit = self.domain self.domain_requested = self.domain_unsplit cache_key = % self.domain_unsplit site_id = cache.get(cache_key) if site_id: SITE_ID.value = site_id try: self.site = Site.objects.get(id=site_id) except Site.DoesNotExist: cache.delete(cache_key) else: return None try: self.site = Site.objects.get(domain=self.domain) except Site.DoesNotExist: return False if not self.site: return False SITE_ID.value = self.site.pk cache.set(cache_key, SITE_ID.value, 5*60) return None
The meat of this middleware. Returns None and sets settings.SITE_ID if able to find a Site object by domain and its subdomain is valid. Returns an HttpResponsePermanentRedirect to the Site's default subdomain if a site is found but the requested subdomain is not supported, or if domain_unsplit is defined in settings.HOSTNAME_REDIRECTS Otherwise, returns False.
373,314
def kvlclient(self): if self._kvlclient is None: self._kvlclient = kvlayer.client() return self._kvlclient
Return a thread local ``kvlayer`` client.
373,315
def connection_lost(self, exc=None): if self._loop.get_debug(): self.producer.logger.debug(, self) self.event().fire(exc=exc)
Fires the ``connection_lost`` event.
373,316
def _iteratively_analyze_function_features(self, all_funcs_completed=False): changes = { : set(), : set() } while True: new_changes = self._analyze_function_features(all_funcs_completed=all_funcs_completed) changes[] |= set(new_changes[]) changes[] |= set(new_changes[]) if not new_changes[] and not new_changes[]: break return changes
Iteratively analyze function features until a fixed point is reached. :return: the "changes" dict :rtype: dict
373,317
def read(self, size=None): if not self._is_open: raise IOError() if self._current_offset < 0: raise IOError( .format( self._current_offset)) if self._decrypted_stream_size is None: self._decrypted_stream_size = self._GetDecryptedStreamSize() if self._decrypted_stream_size < 0: raise IOError() if self._current_offset >= self._decrypted_stream_size: return b if self._realign_offset: self._AlignDecryptedDataOffset(self._current_offset) self._realign_offset = False if size is None: size = self._decrypted_stream_size if self._current_offset + size > self._decrypted_stream_size: size = self._decrypted_stream_size - self._current_offset decrypted_data = b if size == 0: return decrypted_data while size > self._decrypted_data_size: decrypted_data = b.join([ decrypted_data, self._decrypted_data[self._decrypted_data_offset:]]) remaining_decrypted_data_size = ( self._decrypted_data_size - self._decrypted_data_offset) self._current_offset += remaining_decrypted_data_size size -= remaining_decrypted_data_size if self._current_offset >= self._decrypted_stream_size: break read_count = self._ReadEncryptedData(self._ENCRYPTED_DATA_BUFFER_SIZE) self._decrypted_data_offset = 0 if read_count == 0: break if size > 0: slice_start_offset = self._decrypted_data_offset slice_end_offset = slice_start_offset + size decrypted_data = b.join([ decrypted_data, self._decrypted_data[slice_start_offset:slice_end_offset]]) self._decrypted_data_offset += size self._current_offset += size return decrypted_data
Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed.
373,318
def ping(): try: response = salt.utils.http.query( "{0}/ping".format(CONFIG[CONFIG_BASE_URL]), decode_type=, decode=True, ) log.debug( , response, ) if in response and response[].strip() == : return True except Exception as ex: log.error( , CONFIG[CONFIG_BASE_URL], ex, ) return False
Is the marathon api responding?
373,319
def corrcoef(time, crossf, integration_window=0.): N = len(crossf) cc = np.zeros(np.shape(crossf)[:-1]) tbin = abs(time[1] - time[0]) lim = int(integration_window / tbin) if len(time)%2 == 0: mid = len(time)/2-1 else: mid = np.floor(len(time)/2.) for i in range(N): ai = np.sum(crossf[i, i][mid - lim:mid + lim + 1]) offset_autoi = np.mean(crossf[i,i][:mid-1]) for j in range(N): cij = np.sum(crossf[i, j][mid - lim:mid + lim + 1]) offset_cross = np.mean(crossf[i,j][:mid-1]) aj = np.sum(crossf[j, j][mid - lim:mid + lim + 1]) offset_autoj = np.mean(crossf[j,j][:mid-1]) if ai > 0. and aj > 0.: cc[i, j] = (cij-offset_cross) / np.sqrt((ai-offset_autoi) * \ (aj-offset_autoj)) else: cc[i, j] = 0. return cc
Calculate the correlation coefficient for given auto- and crosscorrelation functions. Standard settings yield the zero lag correlation coefficient. Setting integration_window > 0 yields the correlation coefficient of integrated auto- and crosscorrelation functions. The correlation coefficient between a zero signal with any other signal is defined as 0. Parameters ---------- time : numpy.ndarray 1 dim array of times corresponding to signal. crossf : numpy.ndarray Crosscorrelation functions, 1st axis first unit, 2nd axis second unit, 3rd axis times. integration_window: float Size of the integration window. Returns ------- cc : numpy.ndarray 2 dim array of correlation coefficient between two units.
373,320
def delete(handler, item_id, id_name): data = {: , : item_id, : id_name} handler.invoke(data)
Delete an item
373,321
def _createStructure(self, linkResult, replaceParamFile): WEIRS = (, ) CULVERTS = (, ) CURVES = (, , ) header = linkResult[] link = StreamLink(linkNumber=header[], type=linkResult[], numElements=header[]) link.channelInputFile = self for s in linkResult[]: structType = s[] if structType in WEIRS: weir = Weir(type=structType, crestLength=vrp(s[], replaceParamFile), crestLowElevation=vrp(s[], replaceParamFile), dischargeCoeffForward=vrp(s[], replaceParamFile), dischargeCoeffReverse=vrp(s[], replaceParamFile), crestLowLocation=vrp(s[], replaceParamFile), steepSlope=vrp(s[], replaceParamFile), shallowSlope=vrp(s[], replaceParamFile)) weir.streamLink = link elif structType in CULVERTS: culvert = Culvert(type=structType, upstreamInvert=vrp(s[], replaceParamFile), downstreamInvert=vrp(s[], replaceParamFile), inletDischargeCoeff=vrp(s[], replaceParamFile), reverseFlowDischargeCoeff=vrp(s[], replaceParamFile), slope=vrp(s[], replaceParamFile), length=vrp(s[], replaceParamFile), roughness=vrp(s[], replaceParamFile), diameter=vrp(s[], replaceParamFile), width=vrp(s[], replaceParamFile), height=vrp(s[], replaceParamFile)) culvert.streamLink = link elif structType in CURVES: pass return link
Create GSSHAPY Structure Objects Method
373,322
def check_and_order_id_inputs(rid, ridx, cid, cidx, row_meta_df, col_meta_df): (row_type, row_ids) = check_id_idx_exclusivity(rid, ridx) (col_type, col_ids) = check_id_idx_exclusivity(cid, cidx) row_ids = check_and_convert_ids(row_type, row_ids, row_meta_df) ordered_ridx = get_ordered_idx(row_type, row_ids, row_meta_df) col_ids = check_and_convert_ids(col_type, col_ids, col_meta_df) ordered_cidx = get_ordered_idx(col_type, col_ids, col_meta_df) return (ordered_ridx, ordered_cidx)
Makes sure that (if entered) id inputs entered are of one type (string id or index) Input: - rid (list or None): if not None, a list of rids - ridx (list or None): if not None, a list of indexes - cid (list or None): if not None, a list of cids - cidx (list or None): if not None, a list of indexes Output: - a tuple of the ordered ridx and cidx
373,323
def median_date(dt_list): idx = len(dt_list)/2 if len(dt_list) % 2 == 0: md = mean_date([dt_list[idx-1], dt_list[idx]]) else: md = dt_list[idx] return md
Calcuate median datetime from datetime list
373,324
def set_datastore_policy(self, func): if func is None: func = self.default_datastore_policy elif isinstance(func, bool): func = lambda unused_key, flag=func: flag self._datastore_policy = func
Set the context datastore policy function. Args: func: A function that accepts a Key instance as argument and returns a bool indicating if it should use the datastore. May be None.
373,325
def _decode_png(self, encoded_observation): return self._session.obj.run( self._decoded_image_t.obj, feed_dict={self._encoded_image_p.obj: encoded_observation} )
Decodes a single observation from PNG.
373,326
def postprocess(options): resdir = options.resdir out_file = options.outfile tol = options.tol print() file_name = os.path.join(resdir,,) files = glob.glob(file_name) LLR0 = [] for _file in files: print(_file) LLR0.append(NP.loadtxt(_file,usecols=[6])) LLR0 = NP.concatenate(LLR0) print() t0 = time.time() c2m = C2M.Chi2mixture(tol=4e-3) c2m.estimate_chi2mixture(LLR0) pv0 = c2m.sf(LLR0) t1 = time.time() print((%(t1-t0))) print() perm_file = out_file+ RV = NP.array([LLR0,pv0]).T NP.savetxt(perm_file,RV,delimiter=,fmt=) print() file_name = os.path.join(resdir,,) files = glob.glob(file_name) RV_test = [] for _file in files: print(_file) RV_test.append(NP.loadtxt(_file)) RV_test = NP.concatenate(RV_test) print() pv = c2m.sf(RV_test[:,-1])[:,NP.newaxis] print() perm_file = out_file+ RV_test = NP.hstack([RV_test,pv]) NP.savetxt(perm_file,RV_test,delimiter=,fmt=) if options.manhattan: manhattan_file = out_file+ plot_manhattan(pv,manhattan_file)
perform parametric fit of the test statistics and provide permutation and test pvalues
373,327
def fig_height(self): return ( 4 + len(self.data) * len(self.var_names) - 1 + 0.1 * sum(1 for j in self.plotters.values() for _ in j.iterator()) )
Figure out the height of this plot.
373,328
def abort(self): if (self.reply and self.reply.isRunning()): self.on_abort = True self.reply.abort()
Handle request to cancel HTTP call
373,329
async def read(cls, id: int): data = await cls._handler.read(id=id) return cls(data)
Get `BootResource` by `id`.
373,330
def _gen_glob_data(dir, pattern, child_table): dir = pathlib.Path(dir) matched = False used_names = set() for filepath in sorted(dir.glob(pattern)): if filepath.is_dir(): continue else: matched = True node_table = {} if child_table is None else child_table.copy() filepath = filepath.relative_to(dir) node_table[RESERVED[]] = str(filepath) node_name = to_nodename(filepath.stem, invalid=used_names) used_names.add(node_name) print("Matched with {!r}: {!r} from {!r}".format(pattern, node_name, str(filepath))) yield node_name, node_table if not matched: print("Warning: {!r} matched no files.".format(pattern)) return
Generates node data by globbing a directory for a pattern
373,331
def getCell(self, row, width=None): cellval = wrapply(self.getValue, row) typedval = wrapply(self.type, cellval) if isinstance(typedval, TypedWrapper): if isinstance(cellval, TypedExceptionWrapper): exc = cellval.exception if cellval.forwarded: dispval = str(cellval) else: dispval = options.disp_error_val return DisplayWrapper(cellval.val, error=exc.stacktrace, display=dispval, note=options.note_getter_exc, notecolor=) elif typedval.val is None: return DisplayWrapper(None, display=, note=options.disp_note_none, notecolor=) elif isinstance(typedval, TypedExceptionWrapper): return DisplayWrapper(typedval.val, display=str(cellval), error=typedval.exception.stacktrace, note=options.note_type_exc, notecolor=) else: return DisplayWrapper(typedval.val, display=str(typedval.val), note=options.note_type_exc, notecolor=) elif isinstance(typedval, threading.Thread): return DisplayWrapper(None, display=options.disp_pending, note=options.note_pending, notecolor=) dw = DisplayWrapper(cellval) try: dw.display = self.format(typedval) or if width and isNumeric(self): dw.display = dw.display.rjust(width-1) if self.type is anytype and type(cellval) is not str: typedesc = typemap.get(type(cellval), None) dw.note = typedesc.icon if typedesc else options.note_unknown_type dw.notecolor = except Exception as e: e.stacktrace = stacktrace() dw.error = e try: dw.display = str(cellval) except Exception as e: dw.display = str(e) dw.note = options.note_format_exc dw.notecolor = return dw
Return DisplayWrapper for displayable cell value.
373,332
def write_document(document, out, validate=True): if validate: messages = [] messages = document.validate(messages) if messages: raise InvalidDocumentError(messages) writer = Writer(document, out) writer.write()
Write an SPDX RDF document. - document - spdx.document instance. - out - file like object that will be written to. Optionally `validate` the document before writing and raise InvalidDocumentError if document.validate returns False.
373,333
def get_file_size(filename): if os.path.isfile(filename): return convert_size(os.path.getsize(filename)) return None
Get the file size of a given file :param filename: string: pathname of a file :return: human readable filesize
373,334
def ladder_length(begin_word, end_word, word_list): if len(begin_word) != len(end_word): return -1 if begin_word == end_word: return 0 if sum(c1 != c2 for c1, c2 in zip(begin_word, end_word)) == 1: return 1 begin_set = set() end_set = set() begin_set.add(begin_word) end_set.add(end_word) result = 2 while begin_set and end_set: if len(begin_set) > len(end_set): begin_set, end_set = end_set, begin_set next_begin_set = set() for word in begin_set: for ladder_word in word_range(word): if ladder_word in end_set: return result if ladder_word in word_list: next_begin_set.add(ladder_word) word_list.remove(ladder_word) begin_set = next_begin_set result += 1 return -1
Bidirectional BFS!!! :type begin_word: str :type end_word: str :type word_list: Set[str] :rtype: int
373,335
def get_queryset(self, request): if not request.user.has_perm(): queryset = self.model.objects.filter(authors__pk=request.user.pk) else: queryset = super(EntryAdmin, self).get_queryset(request) return queryset.prefetch_related(, , )
Make special filtering by user's permissions.
373,336
def odata_converter(data, str_type): if not str_type: return _str(data) if str_type in ["Edm.Single", "Edm.Double"]: return float(data) elif "Edm.Int" in str_type: return int(data) else: return _str(data)
Convert odata type http://www.odata.org/documentation/odata-version-2-0/overview#AbstractTypeSystem To be completed
373,337
def action_log_create(sender, instance, created, **kwargs): if created: changes = model_instance_diff(None, instance) log_entry = LogAction.objects.create_log_action( instance=instance, action=LogAction.CREATE, changes=json.dumps(changes), )
Signal receiver that creates a log entry when a model instance is first saved to the database. Direct use is discouraged, connect your model through :py:func:`actionslog.registry.register` instead.
373,338
def press_button(self, value): button = find_button(world.browser, value) if not button: raise AssertionError( "Cannot find a button named .".format(value)) button.click()
Click the button with the given label.
373,339
def validateAuthCode(code, redirect_uri, client_id, state=None, validationEndpoint=, headers={}): payload = {: code, : redirect_uri, : client_id, } if state is not None: payload[] = state authURL = None authEndpoints = discoverAuthEndpoints(client_id, headers=headers) for url in authEndpoints[]: authURL = url break if authURL is not None: validationEndpoint = ParseResult(authURL.scheme, authURL.netloc, authURL.path, , , ).geturl() r = requests.post(validationEndpoint, verify=True, data=payload, headers=headers) result = { : r.status_code, : r.headers } if in r.headers.get(, ): result[] = r.text else: result[] = r.content if r.status_code == requests.codes.ok: result[] = parse_qs(result[]) return result
Call authorization endpoint to validate given auth code. :param code: the auth code to validate :param redirect_uri: redirect_uri for the given auth code :param client_id: where to find the auth endpoint for the given auth code :param state: state for the given auth code :param validationEndpoint: URL to make the validation request at :param headers: optional headers to send with any request :rtype: True if auth code is valid
373,340
def terminate(self): logger.info(__( "Terminating Resolwe listener on channel .", state.MANAGER_EXECUTOR_CHANNELS.queue )) self._should_stop = True
Stop the standalone manager.
373,341
def get(self, name_or_klass): if not isinstance(name_or_klass, str): name_or_klass = name_or_klass.__name__ return self._modes[name_or_klass]
Gets a mode by name (or class) :param name_or_klass: The name or the class of the mode to get :type name_or_klass: str or type :rtype: pyqode.core.api.Mode
373,342
def reactToAMQPMessage(message, send_back): _hnas_protection() if _instanceof(message, SaveRequest): if _instanceof(message.record, Tree): tree_handler().add_tree(message.record) return TreeInfo( path=message.record.path, url_by_path=_compose_tree_url(message.record), url_by_issn=_compose_tree_url(message.record, issn_url=True), ) save_fn = save_publication class_ref = DBPublication if _instanceof(message.record, Archive): save_fn = save_archive class_ref = DBArchive return save_fn( class_ref.from_comm(message.record) ) elif _instanceof(message, SearchRequest): search_fn = search_publications class_ref = DBPublication if _instanceof(message.query, Archive): search_fn = search_archives class_ref = DBArchive results = search_fn( class_ref.from_comm(message.query) ) return SearchResult( records=[ record.to_comm(light_request=message.light_request) for record in results ] ) raise ValueError(" is unknown type of request!" % str(type(message)))
React to given (AMQP) message. `message` is expected to be :py:func:`collections.namedtuple` structure from :mod:`.structures` filled with all necessary data. Args: message (object): One of the request objects defined in :mod:`.structures`. send_back (fn reference): Reference to function for responding. This is useful for progress monitoring for example. Function takes one parameter, which may be response structure/namedtuple, or string or whatever would be normally returned. Returns: object: Response class from :mod:`structures`. Raises: ValueError: if bad type of `message` structure is given.
373,343
def stats_enabled(self, value): if value: self.statistics.enable() else: self.statistics.disable()
Setter method; for a description see the getter method.
373,344
def get_endpoint_server_root(self): parsed = urlparse(self._endpoint) root = parsed.scheme + "://" + parsed.hostname if parsed.port is not None: root += ":" + unicode(parsed.port) return root
Parses RemoteLRS object's endpoint and returns its root :return: Root of the RemoteLRS object endpoint :rtype: unicode
373,345
def construct_txt_file(self): textlines = [ % self.mol.pymol_name.upper(), ] textlines.append("=" * len(textlines[0])) textlines.append( % (time.strftime("%Y/%m/%d"), __version__)) textlines.append() textlines.append() textlines.append() if len(self.excluded) != 0: textlines.append( % .join([lig for lig in self.excluded])) if config.DNARECEPTOR: textlines.append() return textlines
Construct the header of the txt file
373,346
def matrixplot(adata, var_names, groupby=None, use_raw=None, log=False, num_categories=7, figsize=None, dendrogram=False, gene_symbols=None, var_group_positions=None, var_group_labels=None, var_group_rotation=None, layer=None, standard_scale=None, swap_axes=False, show=None, save=None, **kwds): if use_raw is None and adata.raw is not None: use_raw = True if isinstance(var_names, str): var_names = [var_names] categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby, use_raw, log, num_categories, gene_symbols=gene_symbols, layer=layer) if groupby is None or len(categories) <= 1: dendrogram = False mean_obs = obs_tidy.groupby(level=0).mean() if standard_scale == : mean_obs = mean_obs.sub(mean_obs.min(1), axis=0) mean_obs = mean_obs.div(mean_obs.max(1), axis=0).fillna(0) elif standard_scale == : mean_obs -= mean_obs.min(0) mean_obs = (mean_obs / mean_obs.max(0)).fillna(0) elif standard_scale is None: pass else: logg.warn() if dendrogram: dendro_data = _reorder_categories_after_dendrogram(adata, groupby, dendrogram, var_names=var_names, var_group_labels=var_group_labels, var_group_positions=var_group_positions) var_group_labels = dendro_data[] var_group_positions = dendro_data[] if dendro_data[] is not None: mean_obs = mean_obs.iloc[:,dendro_data[]] mean_obs = mean_obs.iloc[dendro_data[], :] colorbar_width = 0.2 if not swap_axes: dendro_width = 0.8 if dendrogram else 0 if figsize is None: height = len(categories) * 0.2 + 1 heatmap_width = len(var_names) * 0.32 width = heatmap_width + dendro_width + colorbar_width else: width, height = figsize heatmap_width = width - (dendro_width + colorbar_width) if var_group_positions is not None and len(var_group_positions) > 0: height_ratios = [0.5, 10] height += 0.5 else: height_ratios = [0, 10.5] fig = pl.figure(figsize=(width, height)) axs = gridspec.GridSpec(nrows=2, ncols=3, wspace=0.02, hspace=0.04, width_ratios=[heatmap_width, dendro_width, colorbar_width], height_ratios=height_ratios) matrix_ax = fig.add_subplot(axs[1, 0]) y_ticks = np.arange(mean_obs.shape[0]) + 0.5 matrix_ax.set_yticks(y_ticks) matrix_ax.set_yticklabels([mean_obs.index[idx] for idx in range(mean_obs.shape[0])]) if dendrogram: dendro_ax = fig.add_subplot(axs[1, 1], sharey=matrix_ax) _plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram, ticks=y_ticks) pc = matrix_ax.pcolor(mean_obs, edgecolor=, **kwds) matrix_ax.set_ylim(mean_obs.shape[0], 0) x_ticks = np.arange(mean_obs.shape[1]) + 0.5 matrix_ax.set_xticks(x_ticks) matrix_ax.set_xticklabels([mean_obs.columns[idx] for idx in range(mean_obs.shape[1])], rotation=90) matrix_ax.tick_params(axis=, labelsize=) matrix_ax.grid(False) matrix_ax.set_xlim(-0.5, len(var_names) + 0.5) matrix_ax.set_ylabel(groupby) matrix_ax.set_xlim(0, mean_obs.shape[1]) if var_group_positions is not None and len(var_group_positions) > 0: gene_groups_ax = fig.add_subplot(axs[0, 0], sharex=matrix_ax) _plot_gene_groups_brackets(gene_groups_ax, group_positions=var_group_positions, group_labels=var_group_labels, rotation=var_group_rotation, left_adjustment=0.2, right_adjustment=0.8) _plot_colorbar(pc, fig, axs[1, 2]) else: dendro_height = 0.5 if dendrogram else 0 if var_group_positions is not None and len(var_group_positions) > 0: vargroups_width = 0.4 else: vargroups_width = 0 if figsize is None: heatmap_height = len(var_names) * 0.2 height = dendro_height + heatmap_height + 1 heatmap_width = len(categories) * 0.3 width = heatmap_width + vargroups_width + colorbar_width else: width, height = figsize heatmap_width = width - (vargroups_width + colorbar_width) heatmap_height = height - dendro_height fig = pl.figure(figsize=(width, height)) axs = gridspec.GridSpec(nrows=2, ncols=3, wspace=0.05, hspace=0.005, width_ratios=[heatmap_width, vargroups_width, colorbar_width], height_ratios=[dendro_height, heatmap_height]) mean_obs = mean_obs.T matrix_ax = fig.add_subplot(axs[1, 0]) pc = matrix_ax.pcolor(mean_obs, edgecolor=, **kwds) y_ticks = np.arange(mean_obs.shape[0]) + 0.5 matrix_ax.set_yticks(y_ticks) matrix_ax.set_yticklabels([mean_obs.index[idx] for idx in range(mean_obs.shape[0])]) x_ticks = np.arange(mean_obs.shape[1]) + 0.5 matrix_ax.set_xticks(x_ticks) matrix_ax.set_xticklabels([mean_obs.columns[idx] for idx in range(mean_obs.shape[1])], rotation=90) matrix_ax.tick_params(axis=, labelsize=) matrix_ax.grid(False) matrix_ax.set_xlim(0, len(categories)) matrix_ax.set_xlabel(groupby) matrix_ax.set_ylim(mean_obs.shape[0], 0) if dendrogram: dendro_ax = fig.add_subplot(axs[0, 0], sharex=matrix_ax) _plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram, ticks=x_ticks, orientation=) if var_group_positions is not None and len(var_group_positions) > 0: gene_groups_ax = fig.add_subplot(axs[1, 1], sharey=matrix_ax) _plot_gene_groups_brackets(gene_groups_ax, group_positions=var_group_positions, group_labels=var_group_labels, rotation=var_group_rotation, left_adjustment=0.2, right_adjustment=0.8, orientation=) _plot_colorbar(pc, fig, axs[1, 2]) utils.savefig_or_show(, show=show, save=save) return axs
\ Creates a heatmap of the mean expression values per cluster of each var_names If groupby is not given, the matrixplot assumes that all data belongs to a single category. Parameters ---------- {common_plot_args} standard_scale : {{'var', 'group'}}, optional (default: None) Whether or not to standardize that dimension between 0 and 1, meaning for each variable or group, subtract the minimum and divide each by its maximum. {show_save_ax} **kwds : keyword arguments Are passed to `matplotlib.pyplot.pcolor`. Returns ------- List of :class:`~matplotlib.axes.Axes` Examples -------- >>> adata = sc.datasets.pbmc68k_reduced() >>> sc.pl.matrixplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'], ... groupby='bulk_labels', dendrogram=True)
373,347
def get_forms(self): forms = {} objects = self.get_objects() initial = self.get_initial() form_kwargs = self.get_form_kwargs() for key, form_class in six.iteritems(self.form_classes): forms[key] = form_class(instance=objects[key], initial=initial[key], **form_kwargs[key]) return forms
Initializes the forms defined in `form_classes` with initial data from `get_initial()`, kwargs from get_form_kwargs() and form instance object from `get_objects()`.
373,348
def draw_bars(out_value, features, feature_type, width_separators, width_bar): rectangle_list = [] separator_list = [] pre_val = out_value for index, features in zip(range(len(features)), features): if feature_type == : left_bound = float(features[0]) right_bound = pre_val pre_val = left_bound separator_indent = np.abs(width_separators) separator_pos = left_bound colors = [, ] else: left_bound = pre_val right_bound = float(features[0]) pre_val = right_bound separator_indent = - np.abs(width_separators) separator_pos = right_bound colors = [, ] if index == 0: if feature_type == : points_rectangle = [[left_bound, 0], [right_bound, 0], [right_bound, width_bar], [left_bound, width_bar], [left_bound + separator_indent, (width_bar / 2)] ] else: points_rectangle = [[right_bound, 0], [left_bound, 0], [left_bound, width_bar], [right_bound, width_bar], [right_bound + separator_indent, (width_bar / 2)] ] else: points_rectangle = [[left_bound, 0], [right_bound, 0], [right_bound + separator_indent * 0.90, (width_bar / 2)], [right_bound, width_bar], [left_bound, width_bar], [left_bound + separator_indent * 0.90, (width_bar / 2)]] line = plt.Polygon(points_rectangle, closed=True, fill=True, facecolor=colors[0], linewidth=0) rectangle_list += [line] points_separator = [[separator_pos, 0], [separator_pos + separator_indent, (width_bar / 2)], [separator_pos, width_bar]] line = plt.Polygon(points_separator, closed=None, fill=None, edgecolor=colors[1], lw=3) separator_list += [line] return rectangle_list, separator_list
Draw the bars and separators.
373,349
def reduce_object_file_names(self, dirn): py_so_files = shprint(sh.find, dirn, , ) filens = py_so_files.stdout.decode().split()[:-1] for filen in filens: file_dirname, file_basename = split(filen) parts = file_basename.split() if len(parts) <= 2: continue shprint(sh.mv, filen, join(file_dirname, parts[0] + ))
Recursively renames all files named XXX.cpython-...-linux-gnu.so" to "XXX.so", i.e. removing the erroneous architecture name coming from the local system.
373,350
def restore(ctx, filename): filename = filename or background_path if not os.path.isfile(filename): LOG.warning("File {} does not exist. Please point to a valid file".format(filename)) ctx.abort() call = [, , , , .format(filename)] LOG.info(, filename) start_time = datetime.now() try: completed = subprocess.run(call, check=True) except subprocess.CalledProcessError as err: LOG.warning(err) ctx.abort() LOG.info() LOG.info(.format(datetime.now()-start_time))
Restore the database from a zipped file. Default is to restore from db dump in loqusdb/resources/
373,351
def get_bearing(origin_point, destination_point): if not (isinstance(origin_point, tuple) and isinstance(destination_point, tuple)): raise TypeError() lat1 = math.radians(origin_point[0]) lat2 = math.radians(destination_point[0]) diff_lng = math.radians(destination_point[1] - origin_point[1]) x = math.sin(diff_lng) * math.cos(lat2) y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1) * math.cos(lat2) * math.cos(diff_lng)) initial_bearing = math.atan2(x, y) initial_bearing = math.degrees(initial_bearing) bearing = (initial_bearing + 360) % 360 return bearing
Calculate the bearing between two lat-long points. Each tuple should represent (lat, lng) as decimal degrees. Parameters ---------- origin_point : tuple destination_point : tuple Returns ------- bearing : float the compass bearing in decimal degrees from the origin point to the destination point
373,352
def split_by_rand_pct(self, valid_pct:float=0.2, seed:int=None)->: "Split the items randomly by putting `valid_pct` in the validation set, optional `seed` can be passed." if valid_pct==0.: return self.split_none() if seed is not None: np.random.seed(seed) rand_idx = np.random.permutation(range_of(self)) cut = int(valid_pct * len(self)) return self.split_by_idx(rand_idx[:cut])
Split the items randomly by putting `valid_pct` in the validation set, optional `seed` can be passed.
373,353
def _proxy(self): if self._context is None: self._context = SyncListContext( self._version, service_sid=self._solution[], sid=self._solution[], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: SyncListContext for this SyncListInstance :rtype: twilio.rest.sync.v1.service.sync_list.SyncListContext
373,354
def scan_keys(self, match=None, count=None): cursor = 0 while True: cursor, keys = self.connection.scan(cursor, match=match, count=count) for key in keys: yield key if not cursor or cursor == : break
Take a pattern expected by the redis `scan` command and iter on all matching keys Parameters ---------- match: str The pattern of keys to look for count: int, default to None (redis uses 10) Hint for redis about the number of expected result Yields ------- str All keys found by the scan, one by one. A key can be returned multiple times, it's related to the way the SCAN command works in redis.
373,355
def _quote(data): data = data.replace(b, b) data = data.replace(b, b) return data
Prepare a string for quoting for DIGEST-MD5 challenge or response. Don't add the quotes, only escape '"' and "\\" with backslashes. :Parameters: - `data`: a raw string. :Types: - `data`: `bytes` :return: `data` with '"' and "\\" escaped using "\\". :returntype: `bytes`
373,356
def ipv6_range_to_list(start_packed, end_packed): new_list = list() start = int(binascii.hexlify(start_packed), 16) end = int(binascii.hexlify(end_packed), 16) for value in range(start, end + 1): high = value >> 64 low = value & ((1 << 64) - 1) new_ip = inet_ntop(socket.AF_INET6, struct.pack(, high, low)) new_list.append(new_ip) return new_list
Return a list of IPv6 entries from start_packed to end_packed.
373,357
def ReleaseSW(self): while self.ReadStatusBit(2) == 1: spi.SPI_write(self.CS, [0x92, 0x92] | (~self.Dir & 1)) while self.IsBusy(): pass self.MoveWait(10)
Go away from Limit Switch
373,358
def fix_e112(self, result): line_index = result[] - 1 target = self.source[line_index] if not target.lstrip().startswith(): return [] self.source[line_index] = self.indent_word + target
Fix under-indented comments.
373,359
def status(url="http://127.0.0.1/status"): resp = _urlopen(url) status_data = resp.read() resp.close() lines = status_data.splitlines() if not len(lines) == 4: return active_connections = lines[0].split()[2] accepted, handled, requests = lines[2].split() _, reading, _, writing, _, waiting = lines[3].split() return { : int(active_connections), : int(accepted), : int(handled), : int(requests), : int(reading), : int(writing), : int(waiting), }
Return the data from an Nginx status page as a dictionary. http://wiki.nginx.org/HttpStubStatusModule url The URL of the status page. Defaults to 'http://127.0.0.1/status' CLI Example: .. code-block:: bash salt '*' nginx.status
373,360
def fit(self, inputs=None, wait=True, logs=True, job_name=None): self._prepare_for_training(job_name=job_name) self.latest_training_job = _TrainingJob.start_new(self, inputs) if wait: self.latest_training_job.wait(logs=logs)
Train a model using the input training dataset. The API calls the Amazon SageMaker CreateTrainingJob API to start model training. The API uses configuration you provided to create the estimator and the specified input training data to send the CreatingTrainingJob request to Amazon SageMaker. This is a synchronous operation. After the model training successfully completes, you can call the ``deploy()`` method to host the model using the Amazon SageMaker hosting services. Args: inputs (str or dict or sagemaker.session.s3_input): Information about the training data. This can be one of three types: * (str) the S3 location where training data is saved. * (dict[str, str] or dict[str, sagemaker.session.s3_input]) If using multiple channels for training data, you can specify a dict mapping channel names to strings or :func:`~sagemaker.session.s3_input` objects. * (sagemaker.session.s3_input) - channel configuration for S3 data sources that can provide additional information as well as the path to the training dataset. See :func:`sagemaker.session.s3_input` for full details. wait (bool): Whether the call should wait until the job completes (default: True). logs (bool): Whether to show the logs produced by the job. Only meaningful when wait is True (default: True). job_name (str): Training job name. If not specified, the estimator generates a default job name, based on the training image name and current timestamp.
373,361
def dcshift(self, shift=0.0): if not is_number(shift) or shift < -2 or shift > 2: raise ValueError() effect_args = [, .format(shift)] self.effects.extend(effect_args) self.effects_log.append() return self
Apply a DC shift to the audio. Parameters ---------- shift : float Amount to shift audio between -2 and 2. (Audio is between -1 and 1) See Also -------- highpass
373,362
def options(self, parser, env): parser.add_option(, action=, dest=self.enableOpt, default=env.get(), help="Enable collect-only: %s [COLLECT_ONLY]" % (self.help()))
Register commandline options.
373,363
def gridmake(*arrays): if all([i.ndim == 1 for i in arrays]): d = len(arrays) if d == 2: out = _gridmake2(*arrays) else: out = _gridmake2(arrays[0], arrays[1]) for arr in arrays[2:]: out = _gridmake2(out, arr) return out else: raise NotImplementedError("Come back here")
Expands one or more vectors (or matrices) into a matrix where rows span the cartesian product of combinations of the input arrays. Each column of the input arrays will correspond to one column of the output matrix. Parameters ---------- *arrays : tuple/list of np.ndarray Tuple/list of vectors to be expanded. Returns ------- out : np.ndarray The cartesian product of combinations of the input arrays. Notes ----- Based of original function ``gridmake`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002.
373,364
def _process_dependencies(self, anexec, contents, mode="insert"): for dmatch in self.RE_DEPEND.finditer(contents): isSubroutine = dmatch.group("sub") is not None if "!" in dmatch.group("exec"): execline = self._depend_exec_clean(dmatch.group("exec")) else: execline = "(" + dmatch.group("exec").split("!")[0].replace(",", ", ") + ")" if not "::" in execline: try: dependent = self.nester.parseString(execline).asList()[0] except: msg.err("parsing executable dependency call {}".format(anexec.name)) msg.gen("\t" + execline) self._process_dependlist(dependent, anexec, isSubroutine, mode)
Extracts a list of subroutines and functions that are called from within this executable. :arg mode: specifies whether the matches should be added, removed or merged into the specified executable.
373,365
def stop(self): with self.synclock: if self.syncthread is not None: self.syncthread.cancel() self.syncthread = None
Stops the background synchronization thread
373,366
def addLogicalInterfaceToDeviceType(self, typeId, logicalInterfaceId): req = ApiClient.allDeviceTypeLogicalInterfacesUrl % (self.host, "/draft", typeId) body = {"id" : logicalInterfaceId} resp = requests.post(req, auth=self.credentials, headers={"Content-Type":"application/json"}, data=json.dumps(body), verify=self.verify) if resp.status_code == 201: self.logger.debug("Logical interface added to a device type") else: raise ibmiotf.APIException(resp.status_code, "HTTP error adding logical interface to a device type", resp) return resp.json()
Adds a logical interface to a device type. Parameters: - typeId (string) - the device type - logicalInterfaceId (string) - the id returned by the platform on creation of the logical interface - description (string) - optional (not used) Throws APIException on failure.
373,367
def create_tag(self, tags): if isinstance(tags, str): tags = [tags] evt = self._client._request_point_tag_update(self._type, self.__lid, self.__pid, tags, delete=False) self._client._wait_and_except_if_failed(evt)
Create tags for a Point in the language you specify. Tags can only contain alphanumeric (unicode) characters and the underscore. Tags will be stored lower-cased. Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure tags (mandatory) (list) - the list of tags you want to add to your Point, e.g. ["garden", "soil"]
373,368
def joinCommissioned(self, strPSKd=, waitTime=20): print % self.port self.__sendCommand() cmd = %(strPSKd, self.provisioningUrl) print cmd if self.__sendCommand(cmd)[0] == "Done": maxDuration = 150 self.joinCommissionedStatus = self.joinStatus[] if self.logThreadStatus == self.logStatus[]: self.logThread = ThreadRunner.run(target=self.__readCommissioningLogs, args=(maxDuration,)) t_end = time.time() + maxDuration while time.time() < t_end: if self.joinCommissionedStatus == self.joinStatus[]: break elif self.joinCommissionedStatus == self.joinStatus[]: return False time.sleep(1) self.__sendCommand() time.sleep(30) return True else: return False
start joiner Args: strPSKd: Joiner's PSKd Returns: True: successful to start joiner False: fail to start joiner
373,369
def python_2_unicode_compatible(klass): if sys.version_info[0] == 2: if not in klass.__dict__: raise ValueError("@python_2_unicode_compatible cannot be applied " "to %s because it doesnutf-8') return klass
A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. From django.utils.encoding.py in 1.4.2+, minus the dependency on Six. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class.
373,370
def __check(self, decorated_function, *args, **kwargs): if len(args) >= 1: obj = args[0] function_name = decorated_function.__name__ if hasattr(obj, function_name) is True: fn = getattr(obj, function_name) if callable(fn) and fn.__self__ == obj: return raise RuntimeError()
Check whether function is a bounded method or not. If check fails then exception is raised :param decorated_function: called function (original) :param args: args with which function is called :param kwargs: kwargs with which function is called :return: None
373,371
def tokenizer(text): stream = deque(text.split("\n")) while len(stream) > 0: line = stream.popleft() if line.startswith(" yield KeyValue(" yield KeyValue("HEADER", line) for identifier in line.split(" "): if ":" in identifier: key, value = identifier.split(":") yield KeyValue(key, value) elif line.startswith(" yield KeyValue("HEADER", line) elif line.startswith(" yield KeyValue(" yield KeyValue(" elif line.startswith(" yield KeyValue(" yield KeyValue(line.strip(), "\n") elif line.startswith("SUBJECT_SAMPLE_FACTORS"): key, subject_type, local_sample_id, factors, additional_sample_data = line.split("\t") yield SubjectSampleFactors(key.strip(), subject_type, local_sample_id, factors, additional_sample_data) elif line.endswith("_START"): yield KeyValue(line, "\n") while not line.endswith("_END"): line = stream.popleft() if line.endswith("_END"): yield KeyValue(line.strip(), "\n") else: data = line.split("\t") yield KeyValue(data[0], tuple(data)) else: if line: if line.startswith("MS:MS_RESULTS_FILE") or line.startswith("NM:NMR_RESULTS_FILE"): try: key, value, extra = line.split("\t") extra_key, extra_value = extra.strip().split(":") yield KeyValueExtra(key.strip()[3:], value, extra_key, extra_value) except ValueError: key, value = line.split("\t") yield KeyValue(key.strip()[3:], value) else: try: key, value = line.split("\t") if ":" in key: if key.startswith("MS_METABOLITE_DATA:UNITS"): yield KeyValue(key.strip(), value) else: yield KeyValue(key.strip()[3:], value) else: yield KeyValue(key.strip(), value) except ValueError: print("LINE WITH ERROR:\n\t", repr(line)) raise yield KeyValue(" yield KeyValue("!
A lexical analyzer for the `mwtab` formatted files. :param str text: `mwtab` formatted text. :return: Tuples of data. :rtype: py:class:`~collections.namedtuple`
373,372
def ReadHashes(self): len = self.ReadVarInt() items = [] for i in range(0, len): ba = bytearray(self.ReadBytes(32)) ba.reverse() items.append(ba.hex()) return items
Read Hash values from the stream. Returns: list: a list of hash values. Each value is of the bytearray type.
373,373
def verify_leaf_hash_inclusion(self, leaf_hash: bytes, leaf_index: int, proof: List[bytes], sth: STH): leaf_index = int(leaf_index) tree_size = int(sth.tree_size) if tree_size <= leaf_index: raise ValueError("Provided STH is for a tree that is smaller " "than the leaf index. Tree size: %d Leaf " "index: %d" % (tree_size, leaf_index)) if tree_size < 0 or leaf_index < 0: raise ValueError("Negative tree size or leaf index: " "Tree size: %d Leaf index: %d" % (tree_size, leaf_index)) calculated_root_hash = self._calculate_root_hash_from_audit_path( leaf_hash, leaf_index, proof[:], tree_size) if calculated_root_hash == sth.sha256_root_hash: return True raise error.ProofError("Constructed root hash differs from provided " "root hash. Constructed: %s Expected: %s" % (hexlify(calculated_root_hash).strip(), hexlify(sth.sha256_root_hash).strip()))
Verify a Merkle Audit Path. See section 2.1.1 of RFC6962 for the exact path description. Args: leaf_hash: The hash of the leaf for which the proof was provided. leaf_index: Index of the leaf in the tree. proof: A list of SHA-256 hashes representing the Merkle audit path. sth: STH with the same tree size as the one used to fetch the proof. The sha256_root_hash from this STH will be compared against the root hash produced from the proof. Returns: True. The return value is enforced by a decorator and need not be checked by the caller. Raises: ProofError: the proof is invalid.
373,374
def upsert_many(col, data): ready_to_insert = list() for doc in data: res = col.update({"_id": doc["_id"]}, {"$set": doc}, upsert=False) if res["nModified"] == 0 and res["updatedExisting"] is False: ready_to_insert.append(doc) col.insert(ready_to_insert)
Only used when having "_id" field. **中文文档** 要求 ``data`` 中的每一个 ``document`` 都必须有 ``_id`` 项。这样才能进行 ``upsert`` 操作。
373,375
def zero_pad(m, n=1): return np.pad(m, (n, n), mode=, constant_values=[0])
Pad a matrix with zeros, on all sides.
373,376
def lock(self): url = self.reddit_session.config[] data = {: self.fullname} return self.reddit_session.request_json(url, data=data)
Lock thread. Requires that the currently authenticated user has the modposts oauth scope or has user/password authentication as a mod of the subreddit. :returns: The json response from the server.
373,377
def parallel(fsms, test): alphabet = set().union(*[fsm.alphabet for fsm in fsms]) initial = dict([(i, fsm.initial) for (i, fsm) in enumerate(fsms)]) def follow(current, symbol): next = {} for i in range(len(fsms)): if symbol not in fsms[i].alphabet and anything_else in fsms[i].alphabet: actual_symbol = anything_else else: actual_symbol = symbol if i in current \ and current[i] in fsms[i].map \ and actual_symbol in fsms[i].map[current[i]]: next[i] = fsms[i].map[current[i]][actual_symbol] if len(next.keys()) == 0: raise OblivionError return next def final(state): accepts = [i in state and state[i] in fsm.finals for (i, fsm) in enumerate(fsms)] return test(accepts) return crawl(alphabet, initial, final, follow).reduce()
Crawl several FSMs in parallel, mapping the states of a larger meta-FSM. To determine whether a state in the larger FSM is final, pass all of the finality statuses (e.g. [True, False, False] to `test`.
373,378
def theme(self, value): if value is not None: assert type(value) is dict, " attribute: type is not !".format("theme", value) self.__theme = value
Setter for **self.__theme** attribute. :param value: Attribute value. :type value: dict
373,379
def delete_job(self, job_id): if hasattr(job_id, ): job_id = job_id.job_id with self._sock_ctx() as socket: self._send_message(.format(job_id), socket) self._receive_word(socket, b)
Delete the given job id. The job must have been previously reserved by this connection
373,380
def Popup(*args, **_3to2kwargs): if in _3to2kwargs: location = _3to2kwargs[]; del _3to2kwargs[] else: location = (None, None) if in _3to2kwargs: keep_on_top = _3to2kwargs[]; del _3to2kwargs[] else: keep_on_top = False if in _3to2kwargs: grab_anywhere = _3to2kwargs[]; del _3to2kwargs[] else: grab_anywhere = False if in _3to2kwargs: no_titlebar = _3to2kwargs[]; del _3to2kwargs[] else: no_titlebar = False if in _3to2kwargs: font = _3to2kwargs[]; del _3to2kwargs[] else: font = None if in _3to2kwargs: line_width = _3to2kwargs[]; del _3to2kwargs[] else: line_width = None if in _3to2kwargs: icon = _3to2kwargs[]; del _3to2kwargs[] else: icon = DEFAULT_WINDOW_ICON if in _3to2kwargs: non_blocking = _3to2kwargs[]; del _3to2kwargs[] else: non_blocking = False if in _3to2kwargs: custom_text = _3to2kwargs[]; del _3to2kwargs[] else: custom_text = (None, None) if in _3to2kwargs: auto_close_duration = _3to2kwargs[]; del _3to2kwargs[] else: auto_close_duration = None if in _3to2kwargs: auto_close = _3to2kwargs[]; del _3to2kwargs[] else: auto_close = False if in _3to2kwargs: button_type = _3to2kwargs[]; del _3to2kwargs[] else: button_type = POPUP_BUTTONS_OK if in _3to2kwargs: text_color = _3to2kwargs[]; del _3to2kwargs[] else: text_color = None if in _3to2kwargs: background_color = _3to2kwargs[]; del _3to2kwargs[] else: background_color = None if in _3to2kwargs: button_color = _3to2kwargs[]; del _3to2kwargs[] else: button_color = None if in _3to2kwargs: title = _3to2kwargs[]; del _3to2kwargs[] else: title = None if not args: args_to_print = [] else: args_to_print = args if line_width != None: local_line_width = line_width else: local_line_width = MESSAGE_BOX_LINE_WIDTH _title = title if title is not None else args_to_print[0] window = Window(_title, auto_size_text=True, background_color=background_color, button_color=button_color, auto_close=auto_close, auto_close_duration=auto_close_duration, icon=icon, font=font, no_titlebar=no_titlebar, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top, location=location) max_line_total, total_lines = 0, 0 for message in args_to_print: message = str(message) if message.count(): message_wrapped = message else: message_wrapped = textwrap.fill(message, local_line_width) message_wrapped_lines = message_wrapped.count() + 1 longest_line_len = max([len(l) for l in message.split()]) width_used = min(longest_line_len, local_line_width) max_line_total = max(max_line_total, width_used) height = message_wrapped_lines window.AddRow( Text(message_wrapped, auto_size_text=True, text_color=text_color, background_color=background_color)) total_lines += height if non_blocking: PopupButton = DummyButton else: PopupButton = CloseButton if custom_text != (None, None): if type(custom_text) is not tuple: window.AddRow(PopupButton(custom_text,size=(len(custom_text),1), button_color=button_color, focus=True, bind_return_key=True)) elif custom_text[1] is None: window.AddRow(PopupButton(custom_text[0],size=(len(custom_text[0]),1), button_color=button_color, focus=True, bind_return_key=True)) else: window.AddRow(PopupButton(custom_text[0], button_color=button_color, focus=True, bind_return_key=True, size=(len(custom_text[0]), 1)), PopupButton(custom_text[1], button_color=button_color, size=(len(custom_text[0]), 1))) elif button_type is POPUP_BUTTONS_YES_NO: window.AddRow(PopupButton(, button_color=button_color, focus=True, bind_return_key=True, pad=((20, 5), 3), size=(5, 1)), PopupButton(, button_color=button_color, size=(5, 1))) elif button_type is POPUP_BUTTONS_CANCELLED: window.AddRow( PopupButton(, button_color=button_color, focus=True, bind_return_key=True, pad=((20, 0), 3))) elif button_type is POPUP_BUTTONS_ERROR: window.AddRow(PopupButton(, size=(6, 1), button_color=button_color, focus=True, bind_return_key=True, pad=((20, 0), 3))) elif button_type is POPUP_BUTTONS_OK_CANCEL: window.AddRow(PopupButton(, size=(6, 1), button_color=button_color, focus=True, bind_return_key=True), PopupButton(, size=(6, 1), button_color=button_color)) elif button_type is POPUP_BUTTONS_NO_BUTTONS: pass else: window.AddRow(PopupButton(, size=(5, 1), button_color=button_color, focus=True, bind_return_key=True, pad=((20, 0), 3))) if non_blocking: button, values = window.Read(timeout=0) else: button, values = window.Read() return button
Popup - Display a popup box with as many parms as you wish to include :param args: :param button_color: :param background_color: :param text_color: :param button_type: :param auto_close: :param auto_close_duration: :param non_blocking: :param icon: :param line_width: :param font: :param no_titlebar: :param grab_anywhere: :param keep_on_top: :param location: :return:
373,381
def to_xdr_object(self): if self.is_native(): xdr_type = Xdr.const.ASSET_TYPE_NATIVE return Xdr.types.Asset(type=xdr_type) else: x = Xdr.nullclass() length = len(self.code) pad_length = 4 - length if length <= 4 else 12 - length x.assetCode = bytearray(self.code, ) + b * pad_length x.issuer = account_xdr_object(self.issuer) if length <= 4: xdr_type = Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM4 return Xdr.types.Asset(type=xdr_type, alphaNum4=x) else: xdr_type = Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM12 return Xdr.types.Asset(type=xdr_type, alphaNum12=x)
Create an XDR object for this :class:`Asset`. :return: An XDR Asset object
373,382
def rest_put(url, data, timeout): try: response = requests.put(url, headers={: , : },\ data=data, timeout=timeout) return response except Exception as e: print(.format(str(e), url)) return None
Call rest put method
373,383
def get_listening(self, listen=[]): if listen == []: return listen value = [] for network in listen: try: ip = get_address_in_network(network=network, fatal=True) except ValueError: if is_ip(network): ip = network else: try: ip = get_iface_addr(iface=network, fatal=False)[0] except IndexError: continue value.append(ip) if value == []: return [] return value
Returns a list of addresses SSH can list on Turns input into a sensible list of IPs SSH can listen on. Input must be a python list of interface names, IPs and/or CIDRs. :param listen: list of IPs, CIDRs, interface names :returns: list of IPs available on the host
373,384
def combineblocks(blks, imgsz, stpsz=None, fn=np.median): def listapp(x, y): x.append(y) veclistapp = np.vectorize(listapp, otypes=[np.object_]) blksz = blks.shape[:-1] if stpsz is None: stpsz = tuple(1 for _ in blksz) numblocks = tuple(int(np.floor((a-b)/c) + 1) for a, b, c in zip_longest(imgsz, blksz, stpsz, fillvalue=1)) new_shape = blksz + numblocks blks = np.reshape(blks, new_shape) imgs = np.empty(imgsz, dtype=np.object_) imgs.fill([]) imgs = np.frompyfunc(list, 1, 1)(imgs) for pos in np.ndindex(numblocks): slices = tuple(slice(a*c, a*c + b) for a, b, c in zip_longest(pos, blksz, stpsz, fillvalue=1)) veclistapp(imgs[slices].squeeze(), blks[(Ellipsis, ) + pos].squeeze()) return np.vectorize(fn, otypes=[blks.dtype])(imgs)
Combine blocks from an ndarray to reconstruct ndarray signal. Parameters ---------- blks : ndarray nd array of blocks of a signal imgsz : tuple tuple of the signal size stpsz : tuple, optional (default None, corresponds to steps of 1) tuple of step sizes between neighboring blocks fn : function, optional (default np.median) the function used to resolve multivalued cells Returns ------- imgs : ndarray reconstructed signal, unknown pixels are returned as np.nan
373,385
def conditional(self, condition, requirements): assert is_iterable_typed(condition, basestring) assert is_iterable_typed(requirements, basestring) c = string.join(condition, ",") if c.find(":") != -1: return [c + r for r in requirements] else: return [c + ":" + r for r in requirements]
Calculates conditional requirements for multiple requirements at once. This is a shorthand to be reduce duplication and to keep an inline declarative syntax. For example: lib x : x.cpp : [ conditional <toolset>gcc <variant>debug : <define>DEBUG_EXCEPTION <define>DEBUG_TRACE ] ;
373,386
def t_KEYWORD_AS_TAG(self, t): r t.type = self.reserved.get(t.value, ) t.value = t.value.strip() return t
r'[a-zA-Z]+
373,387
def BROKER_TYPE(self): broker_type = get(, DEFAULT_BROKER_TYPE) if broker_type not in SUPPORTED_BROKER_TYPES: log.warn("Specified BROKER_TYPE {} not supported. Backing to default {}".format( broker_type, DEFAULT_BROKER_TYPE)) return DEFAULT_BROKER_TYPE else: return broker_type
Custom setting allowing switch between rabbitmq, redis
373,388
def asDictionary(self): return { "type": "joinTable", "leftTableSource": self._leftTableSource, "rightTableSource": self._rightTableSource, "leftTableKey": self._leftTableKey, "rightTableKey": self._rightTableKey, "joinType": self._joinType }
returns the data source as a dictionary
373,389
def build_import_keychain( cls, keychain_dir, namespace_id, pubkey_hex ): pubkey_addr = virtualchain.BitcoinPublicKey(str(pubkey_hex)).address() cached_keychain = cls.get_import_keychain_path(keychain_dir, namespace_id) if os.path.exists( cached_keychain ): child_addrs = [] try: lines = [] with open(cached_keychain, "r") as f: lines = f.readlines() child_attrs = [l.strip() for l in lines] log.debug("Loaded cached import keychain for (%s)" % (pubkey_hex, pubkey_addr)) return child_attrs except Exception, e: log.exception(e) pass pubkey_hex = str(pubkey_hex) public_keychain = keychain.PublicKeychain.from_public_key( pubkey_hex ) child_addrs = [] for i in xrange(0, NAME_IMPORT_KEYRING_SIZE): public_child = public_keychain.child(i) public_child_address = public_child.address() return child_addrs
Generate all possible NAME_IMPORT addresses from the NAMESPACE_REVEAL public key
373,390
def loaders(self): if self.LOADERS_FOR_DYNACONF in (None, 0, "0", "false", False): self.logger.info("No loader defined") return [] if not self._loaders: for loader_module_name in self.LOADERS_FOR_DYNACONF: loader = importlib.import_module(loader_module_name) self._loaders.append(loader) return self._loaders
Return available loaders
373,391
def AUC_analysis(AUC): try: if AUC == "None": return "None" if AUC < 0.6: return "Poor" if AUC >= 0.6 and AUC < 0.7: return "Fair" if AUC >= 0.7 and AUC < 0.8: return "Good" if AUC >= 0.8 and AUC < 0.9: return "Very Good" return "Excellent" except Exception: return "None"
Analysis AUC with interpretation table. :param AUC: area under the ROC curve :type AUC : float :return: interpretation result as str
373,392
def resample_ann(resampled_t, ann_sample): tmp = np.zeros(len(resampled_t), dtype=) j = 0 tprec = resampled_t[j] for i, v in enumerate(ann_sample): while True: d = False if v < tprec: j -= 1 tprec = resampled_t[j] if j+1 == len(resampled_t): tmp[j] += 1 break tnow = resampled_t[j+1] if tprec <= v and v <= tnow: if v-tprec < tnow-v: tmp[j] += 1 else: tmp[j+1] += 1 d = True j += 1 tprec = tnow if d: break idx = np.where(tmp>0)[0].astype() res = [] for i in idx: for j in range(tmp[i]): res.append(i) assert len(res) == len(ann_sample) return np.asarray(res, dtype=)
Compute the new annotation indices Parameters ---------- resampled_t : numpy array Array of signal locations as returned by scipy.signal.resample ann_sample : numpy array Array of annotation locations Returns ------- resampled_ann_sample : numpy array Array of resampled annotation locations
373,393
def _safe_run_theta(input_file, out_dir, output_ext, args, data): out_file = os.path.join(out_dir, _split_theta_ext(input_file) + output_ext) skip_file = out_file + ".skipped" if utils.file_exists(skip_file): return None if not utils.file_exists(out_file): with file_transaction(data, out_dir) as tx_out_dir: utils.safe_makedir(tx_out_dir) cmd = _get_cmd("RunTHetA.py") + args + \ [input_file, "--NUM_PROCESSES", dd.get_cores(data), "--FORCE", "-d", tx_out_dir] try: do.run(cmd, "Run THetA to calculate purity", log_error=False) except subprocess.CalledProcessError as msg: if ("Number of intervals must be greater than 1" in str(msg) or "This sample isn't a good candidate for THetA analysis" in str(msg)): with open(os.path.join(tx_out_dir, os.path.basename(skip_file)), "w") as out_handle: out_handle.write("Expected TheTA failure, skipping") return None else: raise return out_file
Run THetA, catching and continuing on any errors.
373,394
def search(self, query, indices=None, doc_types=None, model=None, scan=False, headers=None, **query_params): if isinstance(query, Search): search = query elif isinstance(query, (Query, dict)): search = Search(query) else: raise InvalidQuery("search() must be supplied with a Search or Query object, or a dict") if scan: query_params.setdefault("search_type", "scan") query_params.setdefault("scroll", "10m") return ResultSet(self, search, indices=indices, doc_types=doc_types, model=model, query_params=query_params, headers=headers)
Execute a search against one or more indices to get the resultset. `query` must be a Search object, a Query object, or a custom dictionary of search parameters using the query DSL to be passed directly.
373,395
def junos_call(fun, *args, **kwargs): *show system commit prep = _junos_prep_fun(napalm_device) if not prep[]: return prep if not in fun: mod_fun = .format(fun) else: mod_fun = fun if mod_fun not in __salt__: return { : None, : False, : .format(fun) } return __salt__[mod_fun](*args, **kwargs)
.. versionadded:: 2019.2.0 Execute an arbitrary function from the :mod:`junos execution module <salt.module.junos>`. To check what ``args`` and ``kwargs`` you must send to the function, please consult the appropriate documentation. fun The name of the function. E.g., ``set_hostname``. args List of arguments to send to the ``junos`` function invoked. kwargs Dictionary of key-value arguments to send to the ``juno`` function invoked. CLI Example: .. code-block:: bash salt '*' napalm.junos_fun cli 'show system commit'
373,396
def getTransitionUsers(obj, action_id, last_user=False): workflow = getToolByName(obj, ) users = [] try: review_history = list(workflow.getInfoFor(obj, )) except WorkflowException: logger.error( "workflow history is inexplicably missing." " https://jira.bikalabs.com/browse/LIMS-2242") return users review_history.reverse() for event in review_history: if event.get(, ) == action_id: value = event.get(, ) users.append(value) if last_user: return users return users
This function returns a list with the users who have done the transition. :action_id: a sring as the transition id. :last_user: a boolean to return only the last user triggering the transition or all of them. :returns: a list of user ids.
373,397
def get_file(path=None, content=None): if path is None: path = request.args.get() if path is None: return error() filename = os.path.split(path.rstrip())[-1] extension = filename.rsplit(, 1)[-1] os_file_path = web_path_to_os_path(path) if os.path.isdir(os_file_path): file_type = if path[-1] != : path += else: file_type = ctime = int(os.path.getctime(os_file_path)) mtime = int(os.path.getmtime(os_file_path)) height = 0 width = 0 if extension in [, , , ]: try: im = PIL.Image.open(os_file_path) height, width = im.size except OSError: log.exception(.format(os_file_path)) attributes = { : filename, : get_url_path(path), : 1 if os.access(os_file_path, os.R_OK) else 0, : 1 if os.access(os_file_path, os.W_OK) else 0, : datetime.datetime.fromtimestamp(ctime).ctime(), : datetime.datetime.fromtimestamp(mtime).ctime(), : mtime, : width, : height, : os.path.getsize(os_file_path) } if content: attributes[] = content return { : path, : file_type, : attributes }
:param path: relative path, or None to get from request :param content: file content, output in data. Used for editfile
373,398
def register_link(self, link): keys = tuple((ref, link.initial_hook_value) for ref in link.hook_references) for k in keys: if k in self._record_hooks: link.set_target(target_record=self._record_hooks[k].target_record) break else: for k in keys: if k in self._table_hooks: link.set_target(target_table=self._table_hooks[k]) break else: field_descriptor = link.source_record.get_field_descriptor(link.source_index) raise FieldValidationError( f"No object found with any of given references : {keys}. " f"{field_descriptor.get_error_location_message(link.initial_hook_value)}" ) if link.source_record not in self._links_by_source: self._links_by_source[link.source_record] = set() self._links_by_source[link.source_record].add(link) if link.target not in self._links_by_target: self._links_by_target[link.target] = set() self._links_by_target[link.target].add(link)
source record and index must have been set
373,399
def split(table, field, pattern, newfields=None, include_original=False, maxsplit=0, flags=0): return SplitView(table, field, pattern, newfields, include_original, maxsplit, flags)
Add one or more new fields with values generated by splitting an existing value around occurrences of a regular expression. E.g.:: >>> import petl as etl >>> table1 = [['id', 'variable', 'value'], ... ['1', 'parad1', '12'], ... ['2', 'parad2', '15'], ... ['3', 'tempd1', '18'], ... ['4', 'tempd2', '19']] >>> table2 = etl.split(table1, 'variable', 'd', ['variable', 'day']) >>> table2 +-----+-------+----------+-----+ | id | value | variable | day | +=====+=======+==========+=====+ | '1' | '12' | 'para' | '1' | +-----+-------+----------+-----+ | '2' | '15' | 'para' | '2' | +-----+-------+----------+-----+ | '3' | '18' | 'temp' | '1' | +-----+-------+----------+-----+ | '4' | '19' | 'temp' | '2' | +-----+-------+----------+-----+ By default the field on which the split is performed is omitted. It can be included using the `include_original` argument.