positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def get_input(context, conf): """Gets a user parameter, either from the console or from an outer submodule/system Assumes conf has name, default, prompt and debug """ name = conf['name']['value'] prompt = conf['prompt']['value'] default = conf['default']['value'] or conf['debug']['value'] if context.submodule or context.inputs: value = context.inputs.get(name, default) elif not context.test: # we skip user interaction during tests raw = raw_input("%s (default=%s) " % (encode(prompt), encode(default))) value = raw or default else: value = default return value
Gets a user parameter, either from the console or from an outer submodule/system Assumes conf has name, default, prompt and debug
def _scroll(clicks, x=None, y=None): """Send the mouse vertical scroll event to Windows by calling the mouse_event() win32 function. Args: clicks (int): The amount of scrolling to do. A positive value is the mouse wheel moving forward (scrolling up), a negative value is backwards (down). x (int): The x position of the mouse event. y (int): The y position of the mouse event. Returns: None """ startx, starty = _position() width, height = _size() if x is None: x = startx else: if x < 0: x = 0 elif x >= width: x = width - 1 if y is None: y = starty else: if y < 0: y = 0 elif y >= height: y = height - 1 try: _sendMouseEvent(MOUSEEVENTF_WHEEL, x, y, dwData=clicks) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass
Send the mouse vertical scroll event to Windows by calling the mouse_event() win32 function. Args: clicks (int): The amount of scrolling to do. A positive value is the mouse wheel moving forward (scrolling up), a negative value is backwards (down). x (int): The x position of the mouse event. y (int): The y position of the mouse event. Returns: None
def tcp_client(tcp_addr): """Connect to the tcp server, and return the settings.""" family = socket.AF_INET6 if ":" in tcp_addr.ip else socket.AF_INET sock = socket.socket(family, socket.SOCK_STREAM, socket.IPPROTO_TCP) for i in range(300): logging.info("Connecting to: %s, attempt %d", tcp_addr, i) try: sock.connect(tcp_addr) break except socket.error: time.sleep(1) else: sock.connect(tcp_addr) # One last try, but don't catch this error. logging.info("Connected.") map_data = read_tcp(sock) settings_str = read_tcp(sock) if not settings_str: raise socket.error("Failed to read") settings = json.loads(settings_str.decode()) logging.info("Got settings. map_name: %s.", settings["map_name"]) logging.debug("settings: %s", settings) settings["map_data"] = map_data return sock, settings
Connect to the tcp server, and return the settings.
def init(envVarName, enableColorOutput=False): """ Initialize the logging system and parse the environment variable of the given name. Needs to be called before starting the actual application. """ global _initialized if _initialized: return global _ENV_VAR_NAME _ENV_VAR_NAME = envVarName if enableColorOutput: _preformatLevels(envVarName + "_NO_COLOR") else: _preformatLevels(None) if envVarName in os.environ: # install a log handler that uses the value of the environment var setDebug(os.environ[envVarName]) addLimitedLogHandler(stderrHandler) _initialized = True
Initialize the logging system and parse the environment variable of the given name. Needs to be called before starting the actual application.
def make_named_stemmer(stem=None, min_len=3): """Construct a callable object and a string sufficient to reconstruct it later (unpickling) >>> make_named_stemmer('str_lower') ('str_lower', <function str_lower at ...>) >>> make_named_stemmer('Lancaster') ('lancaster', <Stemmer object at ...>) """ name, stem = stringify(stem), make_stemmer(stem=stem, min_len=min_len) if hasattr(stem, '__name__'): return stem.__name__, stem if name.strip().lower() in STEMMER_TYPES: return name.strip().lower(), stem if hasattr(stem, 'pattern'): return stem.pattern, stem return stringify(stem), stem
Construct a callable object and a string sufficient to reconstruct it later (unpickling) >>> make_named_stemmer('str_lower') ('str_lower', <function str_lower at ...>) >>> make_named_stemmer('Lancaster') ('lancaster', <Stemmer object at ...>)
def data(self): """Return Indicator data.""" # add attributes if self._attributes: self._indicator_data['attribute'] = [] for attr in self._attributes: if attr.valid: self._indicator_data['attribute'].append(attr.data) # add file actions if self._file_actions: self._indicator_data.setdefault('fileAction', {}) self._indicator_data['fileAction'].setdefault('children', []) for action in self._file_actions: self._indicator_data['fileAction']['children'].append(action.data) # add file occurrences if self._occurrences: self._indicator_data.setdefault('fileOccurrence', []) for occurrence in self._occurrences: self._indicator_data['fileOccurrence'].append(occurrence.data) # add security labels if self._labels: self._indicator_data['securityLabel'] = [] for label in self._labels: self._indicator_data['securityLabel'].append(label.data) # add tags if self._tags: self._indicator_data['tag'] = [] for tag in self._tags: if tag.valid: self._indicator_data['tag'].append(tag.data) return self._indicator_data
Return Indicator data.
def print_err(*args, **kwargs): """ A wrapper for print() that uses stderr by default. """ if kwargs.get('file', None) is None: kwargs['file'] = sys.stderr color = dict_pop_or(kwargs, 'color', True) # Use color if asked, but only if the file is a tty. if color and kwargs['file'].isatty(): # Keep any Colr args passed, convert strs into Colrs. msg = kwargs.get('sep', ' ').join( str(a) if isinstance(a, C) else str(C(a, 'red')) for a in args ) else: # The file is not a tty anyway, no escape codes. msg = kwargs.get('sep', ' ').join( str(a.stripped() if isinstance(a, C) else a) for a in args ) newline = dict_pop_or(kwargs, 'newline', False) if newline: msg = '\n{}'.format(msg) print(msg, **kwargs)
A wrapper for print() that uses stderr by default.
def angle_single(self, g_num, at_1, at_2, at_3): """ Spanning angle among three atoms. The indices `at_1` and `at_3` can be the same (yielding a trivial zero angle), but `at_2` must be different from both `at_1` and `at_3`. Parameters ---------- g_num |int| -- Index of the desired geometry at_1 |int| -- Index of the first atom at_2 |int| -- Index of the second atom at_3 |int| -- Index of the third atom Returns ------- angle |npfloat_| -- Spanning angle in degrees between `at_1`-`at_2`-`at_3`, from geometry `g_num` Raises ------ ~exceptions.IndexError If an invalid (out-of-range) `g_num` or `at_#` is provided ~exceptions.ValueError If `at_2` is equal to either `at_1` or `at_3` """ # Imports import numpy as np from .utils import safe_cast as scast from .utils.vector import vec_angle # The below errors are explicitly thrown since they are multiplied by # three when they are used as an index and thus give non-intuitive # errors in later code. # Complain if at_1 is invalid if not(-self.num_atoms <= at_1 < self.num_atoms): raise IndexError("Invalid index for 'at_1' ({0})".format(at_1)) # Complain if at_2 is invalid if not(-self.num_atoms <= at_2 < self.num_atoms): raise IndexError("Invalid index for 'at_2' ({0})".format(at_2)) # Complain if at_3 is invalid if not(-self.num_atoms <= at_3 < self.num_atoms): raise IndexError("Invalid index for 'at_3' ({0})".format(at_3)) # Should never be necessary (save for badly erroneous calling code), # but coerce the at_x to their floor() values. This is again # needed since they are multiplied by three in the index expresssions # below, and can cause funny behavior when truncated by the indexing at_1 = scast(np.floor(at_1), np.int_) at_2 = scast(np.floor(at_2), np.int_) at_3 = scast(np.floor(at_3), np.int_) # Complain if at_2 is equal to either at_1 or at_3. Must factor in # the possibility of negative indexing via modulo arithmetic. if (at_2 % self.num_atoms) == (at_1 % self.num_atoms): raise ValueError("'at_1' and 'at_2' must be different") if (at_2 % self.num_atoms) == (at_3 % self.num_atoms): raise ValueError("'at_2' and 'at_3' must be different") # Trivial return if at_1 and at_3 are the same if (at_1 % self.num_atoms) == (at_3 % self.num_atoms): # Angle is identically zero in this case return 0.0 ## end if # Store the displacement vectors from at_2 to at_1 and to at_3 # The np.float64 type should be retained through the displ_single call. vec_2_1 = self.displ_single(g_num, at_2, at_1) vec_2_3 = self.displ_single(g_num, at_2, at_3) # Compute and return the calculated angle, in degrees # v1 {dot} v2 == |v1||v2| * cos(theta) angle = vec_angle(vec_2_1, vec_2_3) return angle
Spanning angle among three atoms. The indices `at_1` and `at_3` can be the same (yielding a trivial zero angle), but `at_2` must be different from both `at_1` and `at_3`. Parameters ---------- g_num |int| -- Index of the desired geometry at_1 |int| -- Index of the first atom at_2 |int| -- Index of the second atom at_3 |int| -- Index of the third atom Returns ------- angle |npfloat_| -- Spanning angle in degrees between `at_1`-`at_2`-`at_3`, from geometry `g_num` Raises ------ ~exceptions.IndexError If an invalid (out-of-range) `g_num` or `at_#` is provided ~exceptions.ValueError If `at_2` is equal to either `at_1` or `at_3`
def load(fname: str) -> 'ParallelDataSet': """ Loads a dataset from a binary .npy file. """ data = mx.nd.load(fname) n = len(data) // 3 source = data[:n] target = data[n:2 * n] label = data[2 * n:] assert len(source) == len(target) == len(label) return ParallelDataSet(source, target, label)
Loads a dataset from a binary .npy file.
def delete_field(self, field_name_or_ref_name, project=None): """DeleteField. [Preview API] Deletes the field. :param str field_name_or_ref_name: Field simple name or reference name :param str project: Project ID or project name """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if field_name_or_ref_name is not None: route_values['fieldNameOrRefName'] = self._serialize.url('field_name_or_ref_name', field_name_or_ref_name, 'str') self._send(http_method='DELETE', location_id='b51fd764-e5c2-4b9b-aaf7-3395cf4bdd94', version='5.1-preview.2', route_values=route_values)
DeleteField. [Preview API] Deletes the field. :param str field_name_or_ref_name: Field simple name or reference name :param str project: Project ID or project name
def auth(self): """Authenticate with the miner and obtain a JSON web token (JWT).""" response = requests.post( parse.urljoin(self.base_url, '/api/auth'), timeout=self.timeout, data={'username': self.username, 'password': self.password}) response.raise_for_status() json = response.json() if 'jwt' not in json: raise ValueError("Not authorized: didn't receive token, check username or password.") self.jwt = json['jwt'] return json
Authenticate with the miner and obtain a JSON web token (JWT).
def walk_cgroups(cgroup, action, opaque): """ The function applies the action function with the opaque object to each control group under the cgroup recursively. """ action(cgroup, opaque) for child in cgroup.childs: walk_cgroups(child, action, opaque)
The function applies the action function with the opaque object to each control group under the cgroup recursively.
def write(self, text='', wrap=True): """Write text and scroll Parameters ---------- text : str Text to write. ``''`` can be used for a blank line, as a newline is automatically added to the end of each line. wrap : str If True, long messages will be wrapped to span multiple lines. """ # Clear line if not isinstance(text, string_types): raise TypeError('text must be a string') # ensure we only have ASCII chars text = text.encode('utf-8').decode('ascii', errors='replace') self._pending_writes.append((text, wrap)) self.update()
Write text and scroll Parameters ---------- text : str Text to write. ``''`` can be used for a blank line, as a newline is automatically added to the end of each line. wrap : str If True, long messages will be wrapped to span multiple lines.
def _logger_api(self): """Add API logging handler.""" from .tcex_logger import TcExLogHandler, TcExLogFormatter api = TcExLogHandler(self.session) api.set_name('api') api.setLevel(logging.DEBUG) api.setFormatter(TcExLogFormatter()) self.log.addHandler(api)
Add API logging handler.
def _getFromTime(self, atDate=None): """ What was the time of this event? Due to time zones that depends what day we are talking about. If no day is given, assume today. """ if atDate is None: atDate = timezone.localdate(timezone=self.tz) return getLocalTime(atDate, self.time_from, self.tz)
What was the time of this event? Due to time zones that depends what day we are talking about. If no day is given, assume today.
def find_by_type_or_id(type_or_id, prs): """ :param type_or_id: Type of the data to process or ID of the processor class :param prs: A list of :class:`anyconfig.models.processor.Processor` classes :return: A list of processor classes to process files of given data type or processor 'type_or_id' found by its ID :raises: UnknownProcessorTypeError """ def pred(pcls): """Predicate""" return pcls.cid() == type_or_id or pcls.type() == type_or_id pclss = findall_with_pred(pred, prs) if not pclss: raise UnknownProcessorTypeError(type_or_id) return pclss
:param type_or_id: Type of the data to process or ID of the processor class :param prs: A list of :class:`anyconfig.models.processor.Processor` classes :return: A list of processor classes to process files of given data type or processor 'type_or_id' found by its ID :raises: UnknownProcessorTypeError
def _resolve_child(self, path): 'Return a member generator by a dot-delimited path' obj = self for component in path.split('.'): ptr = obj if not isinstance(ptr, Permuter): raise self.MessageNotFound("Bad element path [wrong type]") # pylint: disable=protected-access found_gen = (_ for _ in ptr._generators if _.name() == component) obj = next(found_gen, None) if not obj: raise self.MessageNotFound("Path '{}' unresolved to member." .format(path)) return ptr, obj
Return a member generator by a dot-delimited path
def process(self, uid, add_footer=False, no_exec=False, disable_cache=False, ignore_cache=False): """ Execute notebook :return: self """ self.exec_begin = time.perf_counter() self.exec_begin_dt = datetime.datetime.now() ep = CachedExecutePreprocessor(timeout=None, kernel_name='python3') ep.disable_cache = disable_cache ep.ignore_cache = ignore_cache ep.uid = uid # Execute the notebook if not no_exec: with warnings.catch_warnings(): # On MacOS, annoying warning "RuntimeWarning: Failed to set sticky bit on" # Let's suppress it. warnings.simplefilter("ignore") ep.preprocess(self.nb, {'metadata': {'path': '.'}}) self.exec_time = time.perf_counter() - self.exec_begin if add_footer: self.add_cell_footer() if not no_exec: logging.info('Execution time: {0:.2f}s'.format(self.exec_time)) return self
Execute notebook :return: self
def accuracy(self, X=None, y=None, mu=None): """ computes the accuracy of the LogisticGAM Parameters ---------- note: X or mu must be defined. defaults to mu X : array-like of shape (n_samples, m_features), optional (default=None) containing input data y : array-like of shape (n,) containing target data mu : array-like of shape (n_samples,), optional (default=None expected value of the targets given the model and inputs Returns ------- float in [0, 1] """ if not self._is_fitted: raise AttributeError('GAM has not been fitted. Call fit first.') y = check_y(y, self.link, self.distribution, verbose=self.verbose) if X is not None: X = check_X(X, n_feats=self.statistics_['m_features'], edge_knots=self.edge_knots_, dtypes=self.dtype, features=self.feature, verbose=self.verbose) if mu is None: mu = self.predict_mu(X) check_X_y(mu, y) return ((mu > 0.5).astype(int) == y).mean()
computes the accuracy of the LogisticGAM Parameters ---------- note: X or mu must be defined. defaults to mu X : array-like of shape (n_samples, m_features), optional (default=None) containing input data y : array-like of shape (n,) containing target data mu : array-like of shape (n_samples,), optional (default=None expected value of the targets given the model and inputs Returns ------- float in [0, 1]
def _compute_iso_line(self): """ compute LineVisual vertices, connects and color-index """ level_index = [] connects = [] verts = [] # calculate which level are within data range # this works for now and the existing examples, but should be tested # thoroughly also with the data-sanity check in set_data-function choice = np.nonzero((self.levels > self._data.min()) & (self._levels < self._data.max())) levels_to_calc = np.array(self.levels)[choice] # save minimum level index self._level_min = choice[0][0] for level in levels_to_calc: # if we use matplotlib isoline algorithm we need to add half a # pixel in both (x,y) dimensions because isolines are aligned to # pixel centers if _HAS_MPL: nlist = self._iso.trace(level, level, 0) paths = nlist[:len(nlist)//2] v, c = self._get_verts_and_connect(paths) v += np.array([0.5, 0.5]) else: paths = isocurve(self._data.astype(float).T, level, extend_to_edge=True, connected=True) v, c = self._get_verts_and_connect(paths) level_index.append(v.shape[0]) connects.append(np.hstack((c, [False]))) verts.append(v) self._li = np.hstack(level_index) self._connect = np.hstack(connects) self._verts = np.vstack(verts)
compute LineVisual vertices, connects and color-index
def prt_report_grp1(self, prt=sys.stdout, **kws_grp): """Print full GO/gene report with grouping.""" summaryline = self.str_summaryline() # Print grouped GO IDs prt.write("{SUMMARY}\n".format(SUMMARY=summaryline)) self.prt_gos_grouped(prt, **kws_grp) # genes genes = sorted(self.gene2gos.keys()) prt.write("\n\n{SUMMARY}\n\n".format(SUMMARY=summaryline)) self.prt_section_key(prt) self.prt_gene_aart(genes, prt) # Sort genes prt.write("\n\n{SUMMARY}\n\n".format(SUMMARY=summaryline)) self.prt_gene_aart_details(genes, prt) return (self.name, self.get_section_marks())
Print full GO/gene report with grouping.
def fill_nan(self, val: str, *cols): """ Fill NaN values with new values in the main dataframe :param val: new value :type val: str :param \*cols: names of the colums :type \*cols: str, at least one :example: ``ds.fill_nan("new value", "mycol1", "mycol2")`` """ df = self._fill_nan(val, *cols) if df is not None: self.df = df else: self.err("Can not fill nan values")
Fill NaN values with new values in the main dataframe :param val: new value :type val: str :param \*cols: names of the colums :type \*cols: str, at least one :example: ``ds.fill_nan("new value", "mycol1", "mycol2")``
def get_connection_state(self, connection: str) -> Dict[str, Any]: """ For an already established connection return its state. """ if connection not in self.connections: raise ConnectionNotOpen(connection) return self.connections[connection].state
For an already established connection return its state.
def invert(self, output_directory=None, catch_output=True, **kwargs): """Invert this instance, and import the result files No directories/files will be overwritten. Raise an IOError if the output directory exists. Parameters ---------- output_directory: string, optional use this directory as output directory for the generated tomodir. If None, then a temporary directory will be used that is deleted after import. catch_output: bool, optional Do not show CRTomo output cores: int, optional how many cores to use for CRTomo Returns ------- return_code: bool Return 0 if the inversion completed successfully. Return 1 if no measurements are present. """ self._check_state() if self.can_invert: if output_directory is not None: if not os.path.isdir(output_directory): os.makedirs(output_directory) tempdir = output_directory self._invert(tempdir, catch_output, **kwargs) else: raise IOError( 'output directory already exists: {0}'.format( output_directory ) ) else: with tempfile.TemporaryDirectory(dir=self.tempdir) as tempdir: self._invert(tempdir, catch_output, **kwargs) return 0 else: print( 'Sorry, no measurements present, cannot model yet' ) return 1
Invert this instance, and import the result files No directories/files will be overwritten. Raise an IOError if the output directory exists. Parameters ---------- output_directory: string, optional use this directory as output directory for the generated tomodir. If None, then a temporary directory will be used that is deleted after import. catch_output: bool, optional Do not show CRTomo output cores: int, optional how many cores to use for CRTomo Returns ------- return_code: bool Return 0 if the inversion completed successfully. Return 1 if no measurements are present.
def ParseConfigCommandLine(): """Parse all the command line options which control the config system.""" # The user may specify the primary config file on the command line. if flags.FLAGS.config: _CONFIG.Initialize(filename=flags.FLAGS.config, must_exist=True) else: raise RuntimeError("A config file is not specified.") # Allow secondary configuration files to be specified. if flags.FLAGS.secondary_configs: for config_file in flags.FLAGS.secondary_configs: _CONFIG.LoadSecondaryConfig(config_file) # Allow individual options to be specified as global overrides. for statement in flags.FLAGS.parameter: if "=" not in statement: raise RuntimeError("statement %s on command line not valid." % statement) name, value = statement.split("=", 1) _CONFIG.global_override[name] = value # Load additional contexts from the command line. for context in flags.FLAGS.context: if context: _CONFIG.AddContext(context) if _CONFIG["Config.writeback"]: _CONFIG.SetWriteBack(_CONFIG["Config.writeback"]) # Does the user want to dump help? We do this after the config system is # initialized so the user can examine what we think the value of all the # parameters are. if flags.FLAGS.config_help: print("Configuration overview.") _CONFIG.PrintHelp() sys.exit(0)
Parse all the command line options which control the config system.
def parse_TSVline(self, line): """ Parses result lines """ split_row = [token.strip() for token in line.split('\t')] _results = {'DefaultResult': 'Conc.'} # ID# 1 if split_row[0] == 'ID#': return 0 # Name CBDV - cannabidivarin elif split_row[0] == 'Name': if split_row[1]: self._currentanalysiskw = split_row[1] return 0 else: self.warn("Analysis Keyword not found or empty", numline=self._numline, line=line) # Data Filename Sample Name Sample ID Sample Type Level# elif 'Sample ID' in split_row: split_row.insert(0, '#') self._currentresultsheader = split_row return 0 # 1 QC PREP A_QC PREP A_009.lcd QC PREP elif split_row[0].isdigit(): _results.update(dict(zip(self._currentresultsheader, split_row))) # 10/17/2016 7:55:06 PM try: da = datetime.strptime( _results['Date Acquired'], "%m/%d/%Y %I:%M:%S %p") self._header['Output Date'] = da self._header['Output Time'] = da except ValueError: self.err("Invalid Output Time format", numline=self._numline, line=line) result = _results[_results['DefaultResult']] column_name = _results['DefaultResult'] result = self.zeroValueDefaultInstrumentResults( column_name, result, line) _results[_results['DefaultResult']] = result self._addRawResult(_results['Sample ID'], values={self._currentanalysiskw: _results}, override=False)
Parses result lines
def subscriptgroup_handle(tokens): """Process subscriptgroups.""" internal_assert(0 < len(tokens) <= 3, "invalid slice args", tokens) args = [] for arg in tokens: if not arg: arg = "None" args.append(arg) if len(args) == 1: return args[0] else: return "_coconut.slice(" + ", ".join(args) + ")"
Process subscriptgroups.
def invariant_image_similarity(image1, image2, local_search_iterations=0, metric='MI', thetas=np.linspace(0,360,5), thetas2=np.linspace(0,360,5), thetas3=np.linspace(0,360,5), scale_image=1, do_reflection=False, txfn=None, transform='Affine'): """ Similarity metrics between two images as a function of geometry Compute similarity metric between two images as image is rotated about its center w/ or w/o optimization ANTsR function: `invariantImageSimilarity` Arguments --------- image1 : ANTsImage reference image image2 : ANTsImage moving image local_search_iterations : integer integer controlling local search in multistart metric : string which metric to use MI GC thetas : 1D-ndarray/list/tuple numeric vector of search angles in degrees thetas2 : 1D-ndarray/list/tuple numeric vector of search angles in degrees around principal axis 2 (3D) thetas3 : 1D-ndarray/list/tuple numeric vector of search angles in degrees around principal axis 3 (3D) scale_image : scalar global scale do_reflection : boolean whether to reflect image about principal axis txfn : string (optional) if present, write optimal tx to .mat file transform : string type of transform to use Rigid Similarity Affine Returns ------- pd.DataFrame dataframe with metric values and transformation parameters Example ------- >>> import ants >>> img1 = ants.image_read(ants.get_ants_data('r16')) >>> img2 = ants.image_read(ants.get_ants_data('r64')) >>> metric = ants.invariant_image_similarity(img1,img2) """ if transform not in {'Rigid', 'Similarity', 'Affine'}: raise ValueError('transform must be one of Rigid/Similarity/Affine') if image1.pixeltype != 'float': image1 = image1.clone('float') if image2.pixeltype != 'float': image2 = image2.clone('float') if txfn is None: txfn = mktemp(suffix='.mat') # convert thetas to radians thetain = (thetas * math.pi) / 180. thetain2 = (thetas2 * math.pi) / 180. thetain3 = (thetas3 * math.pi) / 180. image1 = utils.iMath(image1, 'Normalize') image2 = utils.iMath(image2, 'Normalize') idim = image1.dimension fpname = ['FixedParam%i'%i for i in range(1,idim+1)] if not do_reflection: libfn = utils.get_lib_fn('invariantImageSimilarity_%s%iD' % (transform, idim)) r1 = libfn(image1.pointer, image2.pointer, list(thetain), list(thetain2), list(thetain3), local_search_iterations, metric, scale_image, int(do_reflection), txfn) r1 = np.asarray(r1) pnames = ['Param%i'%i for i in range(1,r1.shape[1])] pnames[(len(pnames)-idim):len(pnames)] = fpname r1 = pd.DataFrame(r1, columns=['MetricValue']+pnames) return r1, txfn else: txfn1 = mktemp(suffix='.mat') txfn2 = mktemp(suffix='.mat') txfn3 = mktemp(suffix='.mat') txfn4 = mktemp(suffix='.mat') libfn = utils.get_lib_fn('invariantImageSimilarity_%s%iD' % (transform, idim)) ## R1 ## r1 = libfn(image1.pointer, image2.pointer, list(thetain), list(thetain2), list(thetain3), local_search_iterations, metric, scale_image, 0, txfn1) r1 = np.asarray(r1) pnames = ['Param%i'%i for i in range(1,r1.shape[1])] pnames[(len(pnames)-idim):len(pnames)] = fpname r1 = pd.DataFrame(r1, columns=['MetricValue']+pnames) ## R2 ## r2 = libfn(image1.pointer, image2.pointer, list(thetain), list(thetain2), list(thetain3), local_search_iterations, metric, scale_image, 1, txfn2) r2 = np.asarray(r2) r2 = pd.DataFrame(r2, columns=['MetricValue']+pnames) ## R3 ## r3 = libfn(image1.pointer, image2.pointer, list(thetain), list(thetain2), list(thetain3), local_search_iterations, metric, scale_image, 2, txfn3) r3 = np.asarray(r3) r3 = pd.DataFrame(r3, columns=['MetricValue']+pnames) ## R4 ## r4 = libfn(image1.pointer, image2.pointer, list(thetain), list(thetain2), list(thetain3), local_search_iterations, metric, scale_image, 3, txfn4) r4 = np.asarray(r4) r4 = pd.DataFrame(r4, columns=['MetricValue']+pnames) rmins = [np.min(r1.iloc[:,0]), np.min(r2.iloc[:,0]), np.min(r3.iloc[:,0]), np.min(r4.iloc[:,0])] ww = np.argmin(rmins) if ww == 0: return r1, txfn1 elif ww == 1: return r2, txfn2 elif ww == 2: return r3, txfn3 elif ww == 3: return r4, txfn4
Similarity metrics between two images as a function of geometry Compute similarity metric between two images as image is rotated about its center w/ or w/o optimization ANTsR function: `invariantImageSimilarity` Arguments --------- image1 : ANTsImage reference image image2 : ANTsImage moving image local_search_iterations : integer integer controlling local search in multistart metric : string which metric to use MI GC thetas : 1D-ndarray/list/tuple numeric vector of search angles in degrees thetas2 : 1D-ndarray/list/tuple numeric vector of search angles in degrees around principal axis 2 (3D) thetas3 : 1D-ndarray/list/tuple numeric vector of search angles in degrees around principal axis 3 (3D) scale_image : scalar global scale do_reflection : boolean whether to reflect image about principal axis txfn : string (optional) if present, write optimal tx to .mat file transform : string type of transform to use Rigid Similarity Affine Returns ------- pd.DataFrame dataframe with metric values and transformation parameters Example ------- >>> import ants >>> img1 = ants.image_read(ants.get_ants_data('r16')) >>> img2 = ants.image_read(ants.get_ants_data('r64')) >>> metric = ants.invariant_image_similarity(img1,img2)
def prepare(*, operation='CREATE', signers=None, recipients=None, asset=None, metadata=None, inputs=None): """Prepares a transaction payload, ready to be fulfilled. Args: operation (str): The operation to perform. Must be ``'CREATE'`` or ``'TRANSFER'``. Case insensitive. Defaults to ``'CREATE'``. signers (:obj:`list` | :obj:`tuple` | :obj:`str`, optional): One or more public keys representing the issuer(s) of the asset being created. Only applies for ``'CREATE'`` operations. Defaults to ``None``. recipients (:obj:`list` | :obj:`tuple` | :obj:`str`, optional): One or more public keys representing the new recipients(s) of the asset being created or transferred. Defaults to ``None``. asset (:obj:`dict`, optional): The asset to be created or transferred. MUST be supplied for ``'TRANSFER'`` operations. Defaults to ``None``. metadata (:obj:`dict`, optional): Metadata associated with the transaction. Defaults to ``None``. inputs (:obj:`dict` | :obj:`list` | :obj:`tuple`, optional): One or more inputs holding the condition(s) that this transaction intends to fulfill. Each input is expected to be a :obj:`dict`. Only applies to, and MUST be supplied for, ``'TRANSFER'`` operations. Returns: dict: The prepared transaction. Raises: :class:`~.exceptions.BigchaindbException`: If ``operation`` is not ``'CREATE'`` or ``'TRANSFER'``. .. important:: **CREATE operations** * ``signers`` MUST be set. * ``recipients``, ``asset``, and ``metadata`` MAY be set. * If ``asset`` is set, it MUST be in the form of:: { 'data': { ... } } * The argument ``inputs`` is ignored. * If ``recipients`` is not given, or evaluates to ``False``, it will be set equal to ``signers``:: if not recipients: recipients = signers **TRANSFER operations** * ``recipients``, ``asset``, and ``inputs`` MUST be set. * ``asset`` MUST be in the form of:: { 'id': '<Asset ID (i.e. TX ID of its CREATE transaction)>' } * ``metadata`` MAY be set. * The argument ``signers`` is ignored. """ return prepare_transaction( operation=operation, signers=signers, recipients=recipients, asset=asset, metadata=metadata, inputs=inputs, )
Prepares a transaction payload, ready to be fulfilled. Args: operation (str): The operation to perform. Must be ``'CREATE'`` or ``'TRANSFER'``. Case insensitive. Defaults to ``'CREATE'``. signers (:obj:`list` | :obj:`tuple` | :obj:`str`, optional): One or more public keys representing the issuer(s) of the asset being created. Only applies for ``'CREATE'`` operations. Defaults to ``None``. recipients (:obj:`list` | :obj:`tuple` | :obj:`str`, optional): One or more public keys representing the new recipients(s) of the asset being created or transferred. Defaults to ``None``. asset (:obj:`dict`, optional): The asset to be created or transferred. MUST be supplied for ``'TRANSFER'`` operations. Defaults to ``None``. metadata (:obj:`dict`, optional): Metadata associated with the transaction. Defaults to ``None``. inputs (:obj:`dict` | :obj:`list` | :obj:`tuple`, optional): One or more inputs holding the condition(s) that this transaction intends to fulfill. Each input is expected to be a :obj:`dict`. Only applies to, and MUST be supplied for, ``'TRANSFER'`` operations. Returns: dict: The prepared transaction. Raises: :class:`~.exceptions.BigchaindbException`: If ``operation`` is not ``'CREATE'`` or ``'TRANSFER'``. .. important:: **CREATE operations** * ``signers`` MUST be set. * ``recipients``, ``asset``, and ``metadata`` MAY be set. * If ``asset`` is set, it MUST be in the form of:: { 'data': { ... } } * The argument ``inputs`` is ignored. * If ``recipients`` is not given, or evaluates to ``False``, it will be set equal to ``signers``:: if not recipients: recipients = signers **TRANSFER operations** * ``recipients``, ``asset``, and ``inputs`` MUST be set. * ``asset`` MUST be in the form of:: { 'id': '<Asset ID (i.e. TX ID of its CREATE transaction)>' } * ``metadata`` MAY be set. * The argument ``signers`` is ignored.
def usersettings(request): """ Returns the current ``UserSettings`` based on the SITE_ID in the project's settings as context variables If there is no 'usersettings' attribute in the request, fetches the current UserSettings (from usersettings.shortcuts.get_current_usersettings). """ if hasattr(request, 'usersettings'): usersettings = request.usersettings else: from .shortcuts import get_current_usersettings usersettings = get_current_usersettings() return { 'usersettings': usersettings }
Returns the current ``UserSettings`` based on the SITE_ID in the project's settings as context variables If there is no 'usersettings' attribute in the request, fetches the current UserSettings (from usersettings.shortcuts.get_current_usersettings).
def term_regex(term): """ Returns a case-insensitive regex for searching terms """ return re.compile(r'^{0}$'.format(re.escape(term)), re.IGNORECASE)
Returns a case-insensitive regex for searching terms
def leaf(self, node, elem, module, path): """Create a sample leaf element.""" if node.i_default is None: nel, newm, path = self.sample_element(node, elem, module, path) if path is None: return if self.annots: nel.append(etree.Comment( " type: %s " % node.search_one("type").arg)) elif self.defaults: nel, newm, path = self.sample_element(node, elem, module, path) if path is None: return nel.text = str(node.i_default_str)
Create a sample leaf element.
def pdfFromPOST(self): """ It returns the pdf for the sampling rounds printed """ html = self.request.form.get('html') style = self.request.form.get('style') reporthtml = "<html><head>%s</head><body><div id='report'>%s</body></html>" % (style, html) return self.printFromHTML(safe_unicode(reporthtml).encode('utf-8'))
It returns the pdf for the sampling rounds printed
def bin2str(b): """ Binary to string. """ ret = [] for pos in range(0, len(b), 8): ret.append(chr(int(b[pos:pos + 8], 2))) return ''.join(ret)
Binary to string.
def add(self, model): """raises an exception if the model cannot be added""" def foo(m, p, i): if m[i][0].name == model.name: raise ValueError("Model already exists") return # checks if already existing self.foreach(foo) self.append((model,)) return
raises an exception if the model cannot be added
def validate_deprecation_semver(version_string, version_description): """Validates that version_string is a valid semver. If so, returns that semver. Raises an error otherwise. :param str version_string: A pantsbuild.pants version which affects some deprecated entity. :param str version_description: A string used in exception messages to describe what the `version_string` represents. :rtype: `packaging.version.Version` :raises DeprecationApplicationError: if the version_string parameter is invalid. """ if version_string is None: raise MissingSemanticVersionError('The {} must be provided.'.format(version_description)) if not isinstance(version_string, six.string_types): raise BadSemanticVersionError('The {} must be a version string.'.format(version_description)) try: # NB: packaging will see versions like 1.a.0 as 1a0, and are "valid" # We explicitly want our versions to be of the form x.y.z. v = Version(version_string) if len(v.base_version.split('.')) != 3: raise BadSemanticVersionError('The given {} is not a valid version: ' '{}'.format(version_description, version_string)) if not v.is_prerelease: raise NonDevSemanticVersionError('The given {} is not a dev version: {}\n' 'Features should generally be removed in the first `dev` release ' 'of a release cycle.'.format(version_description, version_string)) return v except InvalidVersion as e: raise BadSemanticVersionError('The given {} {} is not a valid version: ' '{}'.format(version_description, version_string, e))
Validates that version_string is a valid semver. If so, returns that semver. Raises an error otherwise. :param str version_string: A pantsbuild.pants version which affects some deprecated entity. :param str version_description: A string used in exception messages to describe what the `version_string` represents. :rtype: `packaging.version.Version` :raises DeprecationApplicationError: if the version_string parameter is invalid.
def process_ssh(self, data, name): """ Processes SSH keys :param data: :param name: :return: """ if data is None or len(data) == 0: return ret = [] try: lines = [x.strip() for x in data.split(b'\n')] for idx, line in enumerate(lines): ret.append(self.process_ssh_line(line, name, idx)) except Exception as e: logger.debug('Exception in processing SSH public key %s : %s' % (name, e)) self.trace_logger.log(e) return ret
Processes SSH keys :param data: :param name: :return:
def groupby_field(records, field_name, skip_none=True): """ Given a list of objects, group them into a dictionary by the unique values of a given field name. """ return apply_groupby( records, lambda obj: getattr(obj, field_name), skip_none=skip_none)
Given a list of objects, group them into a dictionary by the unique values of a given field name.
def parse(response): """Parse a postdata-style response format from the API into usable data""" """Split a a=1b=2c=3 string into a dictionary of pairs""" tokens = {r[0]: r[1] for r in [r.split('=') for r in response.split("&")]} # The odd dummy parameter is of no use to us if 'dummy' in tokens: del tokens['dummy'] """ If we have key names that end in digits, these indicate the result set contains multiple sets For example, planet0=Hoth&x=1&y=-10&planet1=Naboo&x=9&y=13 is actually data for two planets Elements that end in digits (like tag0, tag1 for planets) are formatted like (tag0_1, tag1_1), so we rstrip underscores afterwards. """ if re.match('\D\d+$', tokens.keys()[0]): # Produce a list of dictionaries set_tokens = [] for key, value in tokens: key = re.match('^(.+\D)(\d+)$', key) # If the key isn't in the format (i.e. a failsafe), skip it if key is not None: if key.group(1) not in set_tokens: set_tokens[key.group(1)] = {} set_tokens[key.group(1)][key.group(0).rstrip('_')] = value tokens = set_tokens return tokens
Parse a postdata-style response format from the API into usable data
def _createMagConversionDict(): """ loads magnitude_conversion.dat which is table A% 1995ApJS..101..117K """ magnitude_conversion_filepath = resource_stream(__name__, 'data/magnitude_conversion.dat') raw_table = np.loadtxt(magnitude_conversion_filepath, '|S5') magDict = {} for row in raw_table: if sys.hexversion >= 0x03000000: starClass = row[1].decode("utf-8") # otherwise we get byte ints or b' caused by 2to3 tableData = [x.decode("utf-8") for x in row[3:]] else: starClass = row[1] tableData = row[3:] magDict[starClass] = tableData return magDict
loads magnitude_conversion.dat which is table A% 1995ApJS..101..117K
def set_backend_for_mayavi(self, command): """ Mayavi plots require the Qt backend, so we try to detect if one is generated to change backends """ calling_mayavi = False lines = command.splitlines() for l in lines: if not l.startswith('#'): if 'import mayavi' in l or 'from mayavi' in l: calling_mayavi = True break if calling_mayavi: message = _("Changing backend to Qt for Mayavi") self._append_plain_text(message + '\n') self.silent_execute("%gui inline\n%gui qt")
Mayavi plots require the Qt backend, so we try to detect if one is generated to change backends
def _constraint_lb_and_ub_to_gurobi_sense_rhs_and_range_value(lb, ub): """Helper function used by Constraint and Model""" if lb is None and ub is None: raise Exception("Free constraint ...") elif lb is None: sense = '<' rhs = float(ub) range_value = 0. elif ub is None: sense = '>' rhs = float(lb) range_value = 0. elif lb == ub: sense = '=' rhs = float(lb) range_value = 0. elif lb > ub: raise ValueError("Lower bound is larger than upper bound.") else: sense = '=' rhs = float(lb) range_value = float(ub - lb) return sense, rhs, range_value
Helper function used by Constraint and Model
def call(node: Node, key: str, value: Any): """Calls node or node instance method""" value = _to_list(value) if not value or not isinstance(value[-1], dict): value.append({}) args = value[0:-1] kwargs = value[-1] node.__dict__[key](*args, **kwargs)
Calls node or node instance method
def second_order_score(y, mean, scale, shape, skewness): """ GAS t Update term potentially using second-order information - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the t distribution scale : float scale parameter for the t distribution shape : float tail thickness parameter for the t distribution skewness : float skewness parameter for the t distribution Returns ---------- - Adjusted score of the t family """ return ((shape+1)/shape)*(y-mean)/(np.power(scale,2) + (np.power(y-mean,2)/shape))/((shape+1)*((np.power(scale,2)*shape) - np.power(y-mean,2))/np.power((np.power(scale,2)*shape) + np.power(y-mean,2),2))
GAS t Update term potentially using second-order information - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the t distribution scale : float scale parameter for the t distribution shape : float tail thickness parameter for the t distribution skewness : float skewness parameter for the t distribution Returns ---------- - Adjusted score of the t family
def _deserialize_v1(self, deserialized): '''Deserialize a JSON macaroon in v1 format. @param serialized the macaroon in v1 JSON format. @return the macaroon object. ''' from pymacaroons.macaroon import Macaroon, MACAROON_V1 from pymacaroons.caveat import Caveat caveats = [] for c in deserialized.get('caveats', []): caveat = Caveat( caveat_id=c['cid'], verification_key_id=( utils.raw_b64decode(c['vid']) if c.get('vid') else None ), location=( c['cl'] if c.get('cl') else None ), version=MACAROON_V1 ) caveats.append(caveat) return Macaroon( location=deserialized.get('location'), identifier=deserialized['identifier'], caveats=caveats, signature=deserialized['signature'], version=MACAROON_V1 )
Deserialize a JSON macaroon in v1 format. @param serialized the macaroon in v1 JSON format. @return the macaroon object.
def angle(self, other): """Return the angle to the vector other""" return math.acos(self.dot(other) / (self.magnitude() * other.magnitude()))
Return the angle to the vector other
def mat_to_laplacian(mat, normalized): """ Converts a sparse or dence adjacency matrix to Laplacian. Parameters ---------- mat : obj Input adjacency matrix. If it is a Laplacian matrix already, return it. normalized : bool Whether to use normalized Laplacian. Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian. Returns ------- obj Laplacian of the input adjacency matrix Examples -------- >>> mat_to_laplacian(numpy.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]), False) [[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]] """ if sps.issparse(mat): if np.all(mat.diagonal()>=0): # Check diagonal if np.all((mat-sps.diags(mat.diagonal())).data <= 0): # Check off-diagonal elements return mat else: if np.all(np.diag(mat)>=0): # Check diagonal if np.all(mat - np.diag(mat) <= 0): # Check off-diagonal elements return mat deg = np.squeeze(np.asarray(mat.sum(axis=1))) if sps.issparse(mat): L = sps.diags(deg) - mat else: L = np.diag(deg) - mat if not normalized: return L with np.errstate(divide='ignore'): sqrt_deg = 1.0 / np.sqrt(deg) sqrt_deg[sqrt_deg==np.inf] = 0 if sps.issparse(mat): sqrt_deg_mat = sps.diags(sqrt_deg) else: sqrt_deg_mat = np.diag(sqrt_deg) return sqrt_deg_mat.dot(L).dot(sqrt_deg_mat)
Converts a sparse or dence adjacency matrix to Laplacian. Parameters ---------- mat : obj Input adjacency matrix. If it is a Laplacian matrix already, return it. normalized : bool Whether to use normalized Laplacian. Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian. Returns ------- obj Laplacian of the input adjacency matrix Examples -------- >>> mat_to_laplacian(numpy.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]), False) [[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]
def _break_line(self, line_source, point_size): """ Return a (line, remainder) pair where *line* is the longest line in *line_source* that will fit in this fitter's width and *remainder* is a |_LineSource| object containing the text following the break point. """ lines = _BinarySearchTree.from_ordered_sequence(line_source) predicate = self._fits_in_width_predicate(point_size) return lines.find_max(predicate)
Return a (line, remainder) pair where *line* is the longest line in *line_source* that will fit in this fitter's width and *remainder* is a |_LineSource| object containing the text following the break point.
def emailclients(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the email clients used to open your emails. This is only recorded when open tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/opens/emailclients", tag=tag, fromdate=fromdate, todate=todate)
Gets an overview of the email clients used to open your emails. This is only recorded when open tracking is enabled for that email.
def __insert_frond_LF(d_w, d_u, dfs_data): """Encapsulates the process of inserting a frond uw into the left side frond group.""" # --Add the frond to the left side dfs_data['LF'].append( (d_w, d_u) ) dfs_data['FG']['l'] += 1 dfs_data['last_inserted_side'] = 'LF'
Encapsulates the process of inserting a frond uw into the left side frond group.
async def read(self, n=None): """Read all content """ if self._streamed: return b'' buffer = [] async for body in self: buffer.append(body) return b''.join(buffer)
Read all content
def dftphotom(cfg): """Run the discrete-Fourier-transform photometry algorithm. See the module-level documentation and the output of ``casatask dftphotom --help`` for help. All of the algorithm configuration is specified in the *cfg* argument, which is an instance of :class:`Config`. """ tb = util.tools.table() ms = util.tools.ms() me = util.tools.measures() # Read stuff in. Even if the weight values don't have their # absolute scale set correctly, we can still use them to set the # relative weighting of the data points. # # datacol is (ncorr, nchan, nchunk) # flag is (ncorr, nchan, nchunk) # weight is (ncorr, nchunk) # uvw is (3, nchunk) # time is (nchunk) # axis_info.corr_axis is (ncorr) # axis_info.freq_axis.chan_freq is (nchan, 1) [for now?] # # Note that we apply msselect() again when reading the data because # selectinit() is broken, but the invocation here is good because it # affects the results from ms.range() and friends. if ':' in (cfg.spw or ''): warn('it looks like you are attempting to select channels within one or more spws') warn('this is NOT IMPLEMENTED; I will average over the whole spw instead') ms.open(b(cfg.vis)) totrows = ms.nrow() ms_sels = dict((n, cfg.get(n)) for n in util.msselect_keys if cfg.get(n) is not None) ms.msselect(b(ms_sels)) rangeinfo = ms.range(b'data_desc_id field_id'.split()) ddids = rangeinfo['data_desc_id'] fields = rangeinfo['field_id'] colnames = [cfg.datacol] + 'flag weight time axis_info'.split() rephase = (cfg.rephase is not None) if fields.size != 1: # I feel comfortable making this a fatal error, even if we're # not rephasing. die('selected data should contain precisely one field; got %d', fields.size) if rephase: fieldid = fields[0] tb.open(b(os.path.join(cfg.vis, 'FIELD'))) phdirinfo = tb.getcell(b'PHASE_DIR', fieldid) tb.close() if phdirinfo.shape[1] != 1: die('trying to rephase but target field (#%d) has a ' 'time-variable phase center, which I can\'t handle', fieldid) ra0, dec0 = phdirinfo[:,0] # in radians. # based on intflib/pwflux.py, which was copied from # hex/hex-lib-calcgainerr: dra = cfg.rephase[0] - ra0 dec = cfg.rephase[1] l = np.sin(dra) * np.cos(dec) m = np.sin(dec) * np.cos(dec0) - np.cos(dra) * np.cos(dec) * np.sin(dec0) n = np.sin(dec) * np.sin(dec0) + np.cos(dra) * np.cos(dec) * np.cos(dec0) n -= 1 # makes the work below easier lmn = np.asarray([l, m, n]) colnames.append('uvw') # Also need this although 99% of the time `ddid` and `spwid` are the same tb.open(b(os.path.join(cfg.vis, 'DATA_DESCRIPTION'))) ddspws = np.asarray(tb.getcol(b'SPECTRAL_WINDOW_ID')) tb.close() tbins = {} colnames = b(colnames) for ddindex, ddid in enumerate(ddids): # Starting in CASA 4.6, selectinit(ddid) stopped actually filtering # your data to match the specified DDID! What garbage. Work around # with our own filtering. ms_sels['taql'] = 'DATA_DESC_ID == %d' % ddid ms.msselect(b(ms_sels)) ms.selectinit(ddid) if cfg.polarization is not None: ms.selectpolarization(b(cfg.polarization.split(','))) ms.iterinit(maxrows=4096) ms.iterorigin() while True: cols = ms.getdata(items=colnames) if rephase: # With appropriate spw/DDID selection, `freqs` has shape # (nchan, 1). Convert to m^-1 so we can multiply against UVW # directly. freqs = cols['axis_info']['freq_axis']['chan_freq'] assert freqs.shape[1] == 1, 'internal inconsistency, chan_freq??' freqs = freqs[:,0] * util.INVERSE_C_MS for i in range(cols['time'].size): # all records time = cols['time'][i] # get out of UTC as fast as we can! For some reason # giving 'unit=s' below doesn't do what one might hope it would. # CASA can convert to a variety of timescales; TAI is probably # the safest conversion in terms of being helpful while remaining # close to the fundamental data, but TT is possible and should # be perfectly precise for standard applications. mq = me.epoch(b'utc', b({'value': time / 86400., 'unit': 'd'})) mjdtt = me.measure(b(mq), b'tt')['m0']['value'] tdata = tbins.get(mjdtt, None) if tdata is None: tdata = tbins[mjdtt] = [0., 0., 0., 0., 0] if rephase: uvw = cols['uvw'][:,i] ph = np.exp((0-2j) * np.pi * np.dot(lmn, uvw) * freqs) for j in range(cols['flag'].shape[0]): # all polns # We just average together all polarizations right now! # (Not actively, but passively by just iterating over them.) data = cols[cfg.datacol][j,:,i] flags = cols['flag'][j,:,i] # XXXXX casacore is currently (ca. 2012) broken and # returns the raw weights from the dataset rather than # applying the polarization selection. Fortunately all of # our weights are the same, and you can never fetch more # pol types than the dataset has, so this bit works # despite the bug. w = np.where(~flags)[0] if not w.size: continue # all flagged if rephase: data *= ph d = data[w].mean() # account for flagged parts. 90% sure this is the # right thing to do: wt = cols['weight'][j,i] * w.size / data.size wd = wt * d # note a little bit of a hack here to encode real^2 and # imag^2 separately: wd2 = wt * (d.real**2 + (1j) * d.imag**2) tdata[0] += wd tdata[1] += wd2 tdata[2] += wt tdata[3] += wt**2 tdata[4] += 1 if not ms.iternext(): break ms.reset() # reset selection filter so we can get next DDID ms.close() # Could gain some efficiency by using a better data structure than a dict(). smjd = sorted(six.iterkeys(tbins)) cfg.format.header(cfg) for mjd in smjd: wd, wd2, wt, wt2, n = tbins[mjd] if n < 3: # not enough data for meaningful statistics continue dtmin = 1440 * (mjd - smjd[0]) r_sc = wd.real / wt * cfg.datascale i_sc = wd.imag / wt * cfg.datascale r2_sc = wd2.real / wt * cfg.datascale**2 i2_sc = wd2.imag / wt * cfg.datascale**2 if cfg.believeweights: ru_sc = wt**-0.5 * cfg.datascale iu_sc = wt**-0.5 * cfg.datascale else: rv_sc = r2_sc - r_sc**2 # variance among real/imag msmts iv_sc = i2_sc - i_sc**2 ru_sc = np.sqrt(rv_sc * wt2) / wt # uncert in mean real/img values iu_sc = np.sqrt(iv_sc * wt2) / wt mag = np.sqrt(r_sc**2 + i_sc**2) umag = np.sqrt(r_sc**2 * ru_sc**2 + i_sc**2 * iu_sc**2) / mag cfg.format.row(cfg, mjd, dtmin, r_sc, ru_sc, i_sc, iu_sc, mag, umag, n)
Run the discrete-Fourier-transform photometry algorithm. See the module-level documentation and the output of ``casatask dftphotom --help`` for help. All of the algorithm configuration is specified in the *cfg* argument, which is an instance of :class:`Config`.
def on_bus(self, bus_idx): """ Return the indices of elements on the given buses for shunt-connected elements :param bus_idx: idx of the buses to which the elements are connected :return: idx of elements connected to bus_idx """ assert hasattr(self, 'bus') ret = [] if isinstance(bus_idx, (int, float, str)): bus_idx = [bus_idx] for item in bus_idx: idx = [] for e, b in enumerate(self.bus): if b == item: idx.append(self.idx[e]) if len(idx) == 1: idx = idx[0] elif len(idx) == 0: idx = None ret.append(idx) if len(ret) == 1: ret = ret[0] return ret
Return the indices of elements on the given buses for shunt-connected elements :param bus_idx: idx of the buses to which the elements are connected :return: idx of elements connected to bus_idx
def fifo(rst, clk, full, we, din, empty, re, dout, afull=None, aempty=None, afull_th=None, aempty_th=None, ovf=None, udf=None, count=None, count_max=None, depth=None, width=None): """ Synchronous FIFO Input interface: full, we, din Output interface: empty, re, dout It s possible to set din and dout to None. Then the fifo width will be 0 and the fifo will contain no storage. Extra interface: afull (o) - almost full flag, asserted when the number of empty cells <= afull_th aempty (o) - almost empty flag, asserted when the number of full cells <= aempty_th afull_th (i) - almost full threshold, in terms of fifo cells; signal or constant; Optional, default depth/2 aempty_th (i) - almost empty threshold, in terms of fifo cells; signal or constant; Optional, default depth/2 count (o) - number of occupied fifo cells count_max (o) - max number of occupied fifo cells reached since the last reset ovf (o) - overflow flag, set at the first write in a full fifo, cleared at reset udf (o) - underflow flag, set at the first read from an empty fifo, cleared at reset Parameters: depth - fifo depth, must be >= 1; if not set or set to `None` default value 2 is used width - data width in bits, must be >= 0; if not set or set to `None` the `din` width is used """ if (width == None): width = 0 if din is not None: width = len(din) if (depth == None): depth = 2 full_flg = Signal(bool(1)) empty_flg = Signal(bool(1)) we_safe = Signal(bool(0)) re_safe = Signal(bool(0)) rd_ptr = Signal(intbv(0, min=0, max=depth)) rd_ptr_new = Signal(intbv(0, min=0, max=depth)) wr_ptr = Signal(intbv(0, min=0, max=depth)) wr_ptr_new = Signal(intbv(0, min=0, max=depth)) @always_comb def safe_read_write(): full.next = full_flg empty.next = empty_flg we_safe.next = we and not full_flg re_safe.next = re and not empty_flg #=========================================================================== # Write, Read, Full, Empty #=========================================================================== @always_comb def ptrs_new(): rd_ptr_new.next = ((rd_ptr + 1) % depth) wr_ptr_new.next = ((wr_ptr + 1) % depth) @always(clk.posedge) def state_main(): if (rst): wr_ptr.next = 0 rd_ptr.next = 0 full_flg.next = 0 empty_flg.next = 1 else: # Write pointer if (we_safe): wr_ptr.next = wr_ptr_new # Read pointer if (re_safe): rd_ptr.next = rd_ptr_new # Empty flag if (we_safe): empty_flg.next = 0 elif (re_safe and (rd_ptr_new == wr_ptr)): empty_flg.next = 1 # Full flag if (re_safe): full_flg.next = 0 elif (we_safe and (wr_ptr_new == rd_ptr)): full_flg.next = 1 #=========================================================================== # Count, CountMax #=========================================================================== ''' Count ''' if (count != None) or (count_max != None) or (afull != None) or (aempty != None): count_r = Signal(intbv(0, min=0, max=depth+1)) count_new = Signal(intbv(0, min=-1, max=depth+2)) if (count != None): assert count.max > depth @always_comb def count_out(): count.next = count_r @always_comb def count_comb(): if (we_safe and not re_safe): count_new.next = count_r + 1 elif (not we_safe and re_safe): count_new.next = count_r - 1 else: count_new.next = count_r @always(clk.posedge) def count_proc(): if (rst): count_r.next = 0 else: count_r.next = count_new ''' Count max ''' if (count_max != None): assert count_max.max > depth count_max_r = Signal(intbv(0, min=0,max=count_max.max)) @always(clk.posedge) def count_max_proc(): if (rst): count_max_r.next = 0 else: if (count_max_r < count_new): count_max_r.next = count_new @always_comb def count_max_out(): count_max.next = count_max_r #=========================================================================== # AlmostFull, AlmostEmpty #=========================================================================== ''' AlmostFull flag ''' if (afull != None): if (afull_th == None): afull_th = depth//2 @always(clk.posedge) def afull_proc(): if (rst): afull.next = 0 else: afull.next = (count_new >= depth-afull_th) ''' AlmostEmpty flag ''' if (aempty != None): if (aempty_th == None): aempty_th = depth//2 @always(clk.posedge) def aempty_proc(): if (rst): aempty.next = 1 else: aempty.next = (count_new <= aempty_th) #=========================================================================== # Overflow, Underflow #=========================================================================== ''' Overflow flag ''' if (ovf != None): @always(clk.posedge) def ovf_proc(): if (rst): ovf.next = 0 else: if (we and full_flg ): ovf.next = 1 ''' Underflow flag ''' if (udf != None): @always(clk.posedge) def udf_proc(): if (rst): udf.next = 0 else: if (re and empty_flg): udf.next = 1 if width>0: #=========================================================================== # Memory instance #=========================================================================== mem_we = Signal(bool(0)) mem_addrw = Signal(intbv(0, min=0, max=depth)) mem_addrr = Signal(intbv(0, min=0, max=depth)) mem_di = Signal(intbv(0)[width:0]) mem_do = Signal(intbv(0)[width:0]) # RAM: Simple-Dual-Port, Asynchronous read mem = ram_sdp_ar( clk = clk, we = mem_we, addrw = mem_addrw, addrr = mem_addrr, di = mem_di, do = mem_do ) @always_comb def mem_connect(): mem_we.next = we_safe mem_addrw.next = wr_ptr mem_addrr.next = rd_ptr mem_di.next = din dout.next = mem_do return instances()
Synchronous FIFO Input interface: full, we, din Output interface: empty, re, dout It s possible to set din and dout to None. Then the fifo width will be 0 and the fifo will contain no storage. Extra interface: afull (o) - almost full flag, asserted when the number of empty cells <= afull_th aempty (o) - almost empty flag, asserted when the number of full cells <= aempty_th afull_th (i) - almost full threshold, in terms of fifo cells; signal or constant; Optional, default depth/2 aempty_th (i) - almost empty threshold, in terms of fifo cells; signal or constant; Optional, default depth/2 count (o) - number of occupied fifo cells count_max (o) - max number of occupied fifo cells reached since the last reset ovf (o) - overflow flag, set at the first write in a full fifo, cleared at reset udf (o) - underflow flag, set at the first read from an empty fifo, cleared at reset Parameters: depth - fifo depth, must be >= 1; if not set or set to `None` default value 2 is used width - data width in bits, must be >= 0; if not set or set to `None` the `din` width is used
def change_color(color): """Change the color of the currently selected objects. *color* is represented as a string. Otherwise color can be passed as an rgba tuple of values between 0, 255 Reset the color by passing *color=None*. You can call this function interactively by using:: change_color.interactive() A new dialog will popup with a color chooser. """ rep = current_representation() # Let's parse the color first if isinstance(color, str): # The color should be a string col = color_from_string(color) if isinstance(color, tuple): col = color if color is None: col = None # Color array rep.change_color(rep.selection_state, col)
Change the color of the currently selected objects. *color* is represented as a string. Otherwise color can be passed as an rgba tuple of values between 0, 255 Reset the color by passing *color=None*. You can call this function interactively by using:: change_color.interactive() A new dialog will popup with a color chooser.
def stop_gradient(cls, x: 'TensorFluent') -> 'TensorFluent': '''Returns a copy of the input fluent with stop_gradient at tensor level. Args: x: The input fluent. Returns: A TensorFluent that stops backpropagation of gradient computations. ''' scope = x.scope.as_list() batch = x.batch return TensorFluent(tf.stop_gradient(x.tensor), scope, batch)
Returns a copy of the input fluent with stop_gradient at tensor level. Args: x: The input fluent. Returns: A TensorFluent that stops backpropagation of gradient computations.
def select_inputs(self, amount): '''Maximize transaction priority. Select the oldest inputs, that are sufficient to cover the spent amount. Then, remove any unneeded inputs, starting with the smallest in value. Returns sum of amounts of inputs selected''' sorted_txin = sorted(self.ins, key=lambda x:-x['input']['confirmations']) total_amount = 0 for (idx, tx_in) in enumerate(sorted_txin): total_amount += tx_in['input']['amount'] if (total_amount >= amount): break sorted_txin = sorted(sorted_txin[:idx+1], key=lambda x:x['input']['amount']) for (idx, tx_in) in enumerate(sorted_txin): value = tx_in['input']['amount'] if (total_amount - value < amount): break else: total_amount -= value self.ins = sorted_txin[idx:] return total_amount
Maximize transaction priority. Select the oldest inputs, that are sufficient to cover the spent amount. Then, remove any unneeded inputs, starting with the smallest in value. Returns sum of amounts of inputs selected
def acquire(self, *, raise_on_failure=True): """Attempt to acquire a slot under this rate limiter. Parameters: raise_on_failure(bool): Whether or not failures should raise an exception. If this is false, the context manager will instead return a boolean value representing whether or not the rate limit slot was acquired. Returns: bool: Whether or not the slot could be acquired. """ acquired = False try: acquired = self._acquire() if raise_on_failure and not acquired: raise RateLimitExceeded("rate limit exceeded for key %(key)r" % vars(self)) yield acquired finally: if acquired: self._release()
Attempt to acquire a slot under this rate limiter. Parameters: raise_on_failure(bool): Whether or not failures should raise an exception. If this is false, the context manager will instead return a boolean value representing whether or not the rate limit slot was acquired. Returns: bool: Whether or not the slot could be acquired.
def pr_num(self): """Return the PR number or None if not on a PR""" result = get_pr_num(repo=self.repo) if result is None: result = get_travis_pr_num() return result
Return the PR number or None if not on a PR
def data64_send(self, type, len, data, force_mavlink1=False): ''' Data packet, size 64 type : data type (uint8_t) len : data length (uint8_t) data : raw data (uint8_t) ''' return self.send(self.data64_encode(type, len, data), force_mavlink1=force_mavlink1)
Data packet, size 64 type : data type (uint8_t) len : data length (uint8_t) data : raw data (uint8_t)
def expandPath(self, path): """ Follows the path and expand all nodes along the way. Returns (item, index) tuple of the last node in the path (the leaf node). This can be reused e.g. to select it. """ iiPath = self.model().findItemAndIndexPath(path) for (item, index) in iiPath[1:]: # skip invisible root assert index.isValid(), "Sanity check: invalid index in path for item: {}".format(item) self.expand(index) leaf = iiPath[-1] return leaf
Follows the path and expand all nodes along the way. Returns (item, index) tuple of the last node in the path (the leaf node). This can be reused e.g. to select it.
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: """ This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test time, to finalize predictions. This is (confusingly) a separate notion from the "decoder" in "encoder/decoder", where that decoder logic lives in the ``TransitionFunction``. This method trims the output predictions to the first end symbol, replaces indices with corresponding tokens, and adds a field called ``predicted_tokens`` to the ``output_dict``. """ action_mapping = output_dict['action_mapping'] best_actions = output_dict["best_action_sequence"] debug_infos = output_dict['debug_info'] batch_action_info = [] for batch_index, (predicted_actions, debug_info) in enumerate(zip(best_actions, debug_infos)): instance_action_info = [] for predicted_action, action_debug_info in zip(predicted_actions, debug_info): action_info = {} action_info['predicted_action'] = predicted_action considered_actions = action_debug_info['considered_actions'] probabilities = action_debug_info['probabilities'] actions = [] for action, probability in zip(considered_actions, probabilities): if action != -1: actions.append((action_mapping[(batch_index, action)], probability)) actions.sort() considered_actions, probabilities = zip(*actions) action_info['considered_actions'] = considered_actions action_info['action_probabilities'] = probabilities action_info['question_attention'] = action_debug_info.get('question_attention', []) instance_action_info.append(action_info) batch_action_info.append(instance_action_info) output_dict["predicted_actions"] = batch_action_info return output_dict
This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test time, to finalize predictions. This is (confusingly) a separate notion from the "decoder" in "encoder/decoder", where that decoder logic lives in the ``TransitionFunction``. This method trims the output predictions to the first end symbol, replaces indices with corresponding tokens, and adds a field called ``predicted_tokens`` to the ``output_dict``.
def register(self, es, append=None, modulo=None): """register a `CMAEvolutionStrategy` instance for logging, ``append=True`` appends to previous data logged under the same name, by default previous data are overwritten. """ if not isinstance(es, CMAEvolutionStrategy): raise TypeError("only class CMAEvolutionStrategy can be " + "registered for logging") self.es = es if append is not None: self.append = append if modulo is not None: self.modulo = modulo self.registered = True return self
register a `CMAEvolutionStrategy` instance for logging, ``append=True`` appends to previous data logged under the same name, by default previous data are overwritten.
def cleanup_bundle(): """Deletes files used for creating bundle. * vendored/* * bundle.zip """ paths = ['./vendored', './bundle.zip'] for path in paths: if os.path.exists(path): log.debug("Deleting %s..." % path) if os.path.isdir(path): shutil.rmtree(path) else: os.remove(path)
Deletes files used for creating bundle. * vendored/* * bundle.zip
def export_for_schema(self): """ Returns a string version of these replication options which are suitable for use in a CREATE KEYSPACE statement. """ ret = "{'class': 'NetworkTopologyStrategy'" for dc, repl_factor in sorted(self.dc_replication_factors.items()): ret += ", '%s': '%d'" % (dc, repl_factor) return ret + "}"
Returns a string version of these replication options which are suitable for use in a CREATE KEYSPACE statement.
def task(__decorated__=None, **Config): r"""A decorator to make tasks out of functions. Config: * name (str): The name of the task. Defaults to __decorated__.__name__. * desc (str): The description of the task (optional). * alias (str): The alias for the task (optional). """ if isinstance(__decorated__, tuple): # the task has some args _Task = Task(__decorated__[0], __decorated__[1], Config=Config) else: _Task = Task(__decorated__, [], Config) state.ActiveModuleMemberQ.insert(0, _Task) return _Task.Underlying
r"""A decorator to make tasks out of functions. Config: * name (str): The name of the task. Defaults to __decorated__.__name__. * desc (str): The description of the task (optional). * alias (str): The alias for the task (optional).
def to_internal_value(self, value): """Convert to integer id.""" natural_key = value.split("_") content_type = ContentType.objects.get_by_natural_key(*natural_key) return content_type.id
Convert to integer id.
def prepare_headers(table, bound_columns): """ :type bound_columns: list of BoundColumn """ if table.request is None: return for column in bound_columns: if column.sortable: params = table.request.GET.copy() param_path = _with_path_prefix(table, 'order') order = table.request.GET.get(param_path, None) start_sort_desc = column.sort_default_desc params[param_path] = column.name if not start_sort_desc else '-' + column.name column.is_sorting = False if order is not None: is_desc = order.startswith('-') order_field = order if not is_desc else order[1:] if order_field == column.name: new_order = order_field if is_desc else ('-' + order_field) params[param_path] = new_order column.sort_direction = DESCENDING if is_desc else ASCENDING column.is_sorting = True column.url = "?" + params.urlencode() else: column.is_sorting = False
:type bound_columns: list of BoundColumn
def mimeType(self): """The official MIME type for this document, guessed from the extensions of the :py:attr:`openxmllib.document.Document.filename` attribute, as opposed to the :py:attr:`openxmllib.document.Document.mime_type` attribute. :return: ``application/xxx`` for this file """ if self.mime_type: # Supposed validated by the factory return self.mime_type for pattern, mime_type in self._extpattern_to_mime.items(): if fnmatch.fnmatch(self.filename, pattern): return mime_type
The official MIME type for this document, guessed from the extensions of the :py:attr:`openxmllib.document.Document.filename` attribute, as opposed to the :py:attr:`openxmllib.document.Document.mime_type` attribute. :return: ``application/xxx`` for this file
def _make_dir(self, client_kwargs): """ Make a directory. args: client_kwargs (dict): Client arguments. """ with _handle_azure_exception(): # Directory if 'directory_name' in client_kwargs: return self.client.create_directory( share_name=client_kwargs['share_name'], directory_name=client_kwargs['directory_name']) # Share return self.client.create_share(**client_kwargs)
Make a directory. args: client_kwargs (dict): Client arguments.
def _bpe_to_words(sentence, delimiter='@@'): """Convert a sequence of bpe words into sentence.""" words = [] word = '' delimiter_len = len(delimiter) for subwords in sentence: if len(subwords) >= delimiter_len and subwords[-delimiter_len:] == delimiter: word += subwords[:-delimiter_len] else: word += subwords words.append(word) word = '' return words
Convert a sequence of bpe words into sentence.
def _set_stp(self, v, load=False): """ Setter method for stp, mapped from YANG variable /brocade_xstp_ext_rpc/get_stp_brief_info/output/spanning_tree_info/stp (container) If this variable is read-only (config: false) in the source YANG file, then _set_stp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_stp() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=stp.stp, is_container='container', presence=False, yang_name="stp", rest_name="stp", parent=self, choice=(u'spanning-tree-mode', u'stp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-xstp-ext', defining_module='brocade-xstp-ext', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """stp must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=stp.stp, is_container='container', presence=False, yang_name="stp", rest_name="stp", parent=self, choice=(u'spanning-tree-mode', u'stp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-xstp-ext', defining_module='brocade-xstp-ext', yang_type='container', is_config=True)""", }) self.__stp = t if hasattr(self, '_set'): self._set()
Setter method for stp, mapped from YANG variable /brocade_xstp_ext_rpc/get_stp_brief_info/output/spanning_tree_info/stp (container) If this variable is read-only (config: false) in the source YANG file, then _set_stp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_stp() directly.
async def handle_player_update(self, state: "node.PlayerState"): """ Handles player updates from lavalink. Parameters ---------- state : websocket.PlayerState """ if state.position > self.position: self._is_playing = True self.position = state.position
Handles player updates from lavalink. Parameters ---------- state : websocket.PlayerState
def _get_local_fields(self, model): "Return the names of all locally defined fields on the model class." local = [f for f in model._meta.fields] m2m = [f for f in model._meta.many_to_many] fields = local + m2m names = tuple([x.name for x in fields]) return { ':local': dict(list(zip(names, fields))), }
Return the names of all locally defined fields on the model class.
def _fit_full(self=self, X=X, n_components=6): """Fit the model by computing full SVD on X""" n_samples, n_features = X.shape # Center data self.mean_ = np.mean(X, axis=0) print(self.mean_) X -= self.mean_ print(X.round(2)) U, S, V = linalg.svd(X, full_matrices=False) print(V.round(2)) # flip eigenvectors' sign to enforce deterministic output U, V = svd_flip(U, V) components_ = V print(components_.round(2)) # Get variance explained by singular values explained_variance_ = (S ** 2) / (n_samples - 1) total_var = explained_variance_.sum() explained_variance_ratio_ = explained_variance_ / total_var singular_values_ = S.copy() # Store the singular values. # Postprocess the number of components required if n_components == 'mle': n_components = \ _infer_dimension_(explained_variance_, n_samples, n_features) elif 0 < n_components < 1.0: # number of components for which the cumulated explained # variance percentage is superior to the desired threshold ratio_cumsum = stable_cumsum(explained_variance_ratio_) n_components = np.searchsorted(ratio_cumsum, n_components) + 1 # Compute noise covariance using Probabilistic PCA model # The sigma2 maximum likelihood (cf. eq. 12.46) if n_components < min(n_features, n_samples): self.noise_variance_ = explained_variance_[n_components:].mean() else: self.noise_variance_ = 0. self.n_samples_, self.n_features_ = n_samples, n_features self.components_ = components_[:n_components] print(self.components_.round(2)) self.n_components_ = n_components self.explained_variance_ = explained_variance_[:n_components] self.explained_variance_ratio_ = \ explained_variance_ratio_[:n_components] self.singular_values_ = singular_values_[:n_components] return U, S, V
Fit the model by computing full SVD on X
def best(cls): """ Select the best ScriptWriter for this environment. """ if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'): return WindowsScriptWriter.best() else: return cls
Select the best ScriptWriter for this environment.
def write(domain, key, value, type='string', user=None): ''' Write a default to the system CLI Example: .. code-block:: bash salt '*' macdefaults.write com.apple.CrashReporter DialogType Server salt '*' macdefaults.write NSGlobalDomain ApplePersistence True type=bool domain The name of the domain to write to key The key of the given domain to write to value The value to write to the given key type The type of value to be written, valid types are string, data, int[eger], float, bool[ean], date, array, array-add, dict, dict-add user The user to write the defaults to ''' if type == 'bool' or type == 'boolean': if value is True: value = 'TRUE' elif value is False: value = 'FALSE' cmd = 'defaults write "{0}" "{1}" -{2} "{3}"'.format(domain, key, type, value) return __salt__['cmd.run_all'](cmd, runas=user)
Write a default to the system CLI Example: .. code-block:: bash salt '*' macdefaults.write com.apple.CrashReporter DialogType Server salt '*' macdefaults.write NSGlobalDomain ApplePersistence True type=bool domain The name of the domain to write to key The key of the given domain to write to value The value to write to the given key type The type of value to be written, valid types are string, data, int[eger], float, bool[ean], date, array, array-add, dict, dict-add user The user to write the defaults to
def post(self, url, postParameters=None, urlParameters=None): """ Convenience method for requesting to google with proper cookies/params. """ if urlParameters: url = url + "?" + self.getParameters(urlParameters) headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token, 'Content-Type': 'application/x-www-form-urlencoded' } postString = self.postParameters(postParameters) req = requests.post(url, data=postString, headers=headers) return req.text
Convenience method for requesting to google with proper cookies/params.
def _add_history(self, redo_func, redo_kwargs, undo_func, undo_kwargs, **kwargs): """ Add a new log (undo/redoable) to this history context :parameter str redo_func: function to redo the action, must be a method of :class:`Bundle` :parameter dict redo_kwargs: kwargs to pass to the redo_func. Each item must be serializable (float or str, not objects) :parameter str undo_func: function to undo the action, must be a method of :class:`Bundle` :parameter dict undo_kwargs: kwargs to pass to the undo_func. Each item must be serializable (float or str, not objects) :parameter str history: label of the history parameter :raises ValueError: if the label for this history item is forbidden or already exists """ if not self.history_enabled: return param = HistoryParameter(self, redo_func, redo_kwargs, undo_func, undo_kwargs) metawargs = {'context': 'history', 'history': kwargs.get('history', self._default_label('hist', **{'context': 'history'}))} self._check_label(metawargs['history']) self._attach_params([param], **metawargs)
Add a new log (undo/redoable) to this history context :parameter str redo_func: function to redo the action, must be a method of :class:`Bundle` :parameter dict redo_kwargs: kwargs to pass to the redo_func. Each item must be serializable (float or str, not objects) :parameter str undo_func: function to undo the action, must be a method of :class:`Bundle` :parameter dict undo_kwargs: kwargs to pass to the undo_func. Each item must be serializable (float or str, not objects) :parameter str history: label of the history parameter :raises ValueError: if the label for this history item is forbidden or already exists
def is_docstring(tokens, previous_logical): """Return found docstring 'A docstring is a string literal that occurs as the first statement in a module, function, class,' http://www.python.org/dev/peps/pep-0257/#what-is-a-docstring """ for token_type, text, start, _, _ in tokens: if token_type == tokenize.STRING: break elif token_type != tokenize.INDENT: return False else: return False line = text.lstrip() start, start_triple = _find_first_of(line, START_DOCSTRING_TRIPLE) if (previous_logical.startswith("def ") or previous_logical.startswith("class ")): if start == 0: return text
Return found docstring 'A docstring is a string literal that occurs as the first statement in a module, function, class,' http://www.python.org/dev/peps/pep-0257/#what-is-a-docstring
def _query_by_installer(self, table_name): """ Query for download data broken down by installer, for one day. :param table_name: table name to query against :type table_name: str :return: dict of download information by installer; keys are project name, values are a dict of installer names to dicts of installer version to download count. :rtype: dict """ logger.info('Querying for downloads by installer in table %s', table_name) q = "SELECT file.project, details.installer.name, " \ "details.installer.version, COUNT(*) as dl_count " \ "%s " \ "%s " \ "GROUP BY file.project, details.installer.name, " \ "details.installer.version;" % ( self._from_for_table(table_name), self._where_for_projects ) res = self._run_query(q) result = self._dict_for_projects() # iterate through results for row in res: # pointer to the per-project result dict proj = result[row['file_project']] # grab the name and version; change None to 'unknown' iname = row['details_installer_name'] iver = row['details_installer_version'] if iname not in proj: proj[iname] = {} if iver not in proj[iname]: proj[iname][iver] = 0 proj[iname][iver] += int(row['dl_count']) return result
Query for download data broken down by installer, for one day. :param table_name: table name to query against :type table_name: str :return: dict of download information by installer; keys are project name, values are a dict of installer names to dicts of installer version to download count. :rtype: dict
def set_entry(key, value): """ Set a configuration entry :param key: key name :param value: value for this key :raises KeyError: if key is not str """ if type(key) != str: raise KeyError('key must be str') _config[key] = value
Set a configuration entry :param key: key name :param value: value for this key :raises KeyError: if key is not str
def _is_control(self, char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char in ['\t', '\n', '\r']: return False cat = unicodedata.category(char) if cat.startswith('C'): return True return False
Checks whether `chars` is a control character.
async def items(self, *, dc=None, watch=None, consistency=None): """Provides a listing of all prepared queries Parameters: dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency Returns: Collection: List of prepared queries This returns a list of prepared queries, which looks like:: [ { "ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05", "Name": "my-query", "Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e", "Token": "<hidden>", "Service": { "Service": "redis", "Failover": { "NearestN": 3, "Datacenters": ["dc1", "dc2"] }, "OnlyPassing": False, "Tags": ["master", "!experimental"] }, "DNS": { "TTL": timedelta(seconds=10) }, "RaftIndex": { "CreateIndex": 23, "ModifyIndex": 42 } } ] """ response = await self._api.get("/v1/query", params={"dc": dc}) return response.body
Provides a listing of all prepared queries Parameters: dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency Returns: Collection: List of prepared queries This returns a list of prepared queries, which looks like:: [ { "ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05", "Name": "my-query", "Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e", "Token": "<hidden>", "Service": { "Service": "redis", "Failover": { "NearestN": 3, "Datacenters": ["dc1", "dc2"] }, "OnlyPassing": False, "Tags": ["master", "!experimental"] }, "DNS": { "TTL": timedelta(seconds=10) }, "RaftIndex": { "CreateIndex": 23, "ModifyIndex": 42 } } ]
def decrypt(ciphertext_blob, encryption_context=None, grant_tokens=None, region=None, key=None, keyid=None, profile=None): ''' Decrypt ciphertext. CLI example:: salt myminion boto_kms.decrypt encrypted_ciphertext ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} try: plaintext = conn.decrypt( ciphertext_blob, encryption_context=encryption_context, grant_tokens=grant_tokens ) r['plaintext'] = plaintext['Plaintext'] except boto.exception.BotoServerError as e: r['error'] = __utils__['boto.get_error'](e) return r
Decrypt ciphertext. CLI example:: salt myminion boto_kms.decrypt encrypted_ciphertext
def setup(app): """ Setup for Sphinx extension. :param app: Sphinx application context. """ app.info('adding remote-include directive...', nonl=True) app.add_directive('remote-include', RemoteInclude) app.info(' done') return { 'version': __version__, 'parallel_read_safe': True, 'parallel_write_safe': True, }
Setup for Sphinx extension. :param app: Sphinx application context.
def compute_rewards(self, scores): """ Compute the "velocity" of (average distance between) the k+1 best scores. Return a list with those k velocities padded out with zeros so that the count remains the same. """ # get the k + 1 best scores in descending order best_scores = sorted(scores, reverse=True)[:self.k + 1] velocities = [best_scores[i] - best_scores[i + 1] for i in range(len(best_scores) - 1)] # pad the list out with zeros to maintain the length of the list zeros = (len(scores) - self.k) * [0] return velocities + zeros
Compute the "velocity" of (average distance between) the k+1 best scores. Return a list with those k velocities padded out with zeros so that the count remains the same.
def update(self, id=None, new_data={}, **kwargs): """Update an object on the server. Args: id: ID of the object to update (can be None if not required) new_data: the update data for the object **kwargs: Extra options to send to the server (e.g. sudo) Returns: dict: The new object data (*not* a RESTObject) Raises: GitlabAuthenticationError: If authentication is not correct GitlabUpdateError: If the server cannot perform the request """ if id is None: path = self.path else: path = '%s/%s' % (self.path, id) self._check_missing_update_attrs(new_data) files = {} # We get the attributes that need some special transformation types = getattr(self, '_types', {}) if types: # Duplicate data to avoid messing with what the user sent us new_data = new_data.copy() for attr_name, type_cls in types.items(): if attr_name in new_data.keys(): type_obj = type_cls(new_data[attr_name]) # if the type if FileAttribute we need to pass the data as # file if issubclass(type_cls, g_types.FileAttribute): k = type_obj.get_file_name(attr_name) files[attr_name] = (k, new_data.pop(attr_name)) else: new_data[attr_name] = type_obj.get_for_api() http_method = self._get_update_method() return http_method(path, post_data=new_data, files=files, **kwargs)
Update an object on the server. Args: id: ID of the object to update (can be None if not required) new_data: the update data for the object **kwargs: Extra options to send to the server (e.g. sudo) Returns: dict: The new object data (*not* a RESTObject) Raises: GitlabAuthenticationError: If authentication is not correct GitlabUpdateError: If the server cannot perform the request
def to_FIB(self, other): """ Creates a ForwardInfluenceBlanket object representing the intersection of this model with the other input model. Args: other: The GroundedFunctionNetwork object to compare this model to. Returns: A ForwardInfluenceBlanket object to use for model comparison. """ if not isinstance(other, GroundedFunctionNetwork): raise TypeError( f"Expected GroundedFunctionNetwork, but got {type(other)}" ) def shortname(var): return var[var.find("::") + 2 : var.rfind("_")] def shortname_vars(graph, shortname): return [v for v in graph.nodes() if shortname in v] this_var_nodes = [ shortname(n) for (n, d) in self.nodes(data=True) if d["type"] == "variable" ] other_var_nodes = [ shortname(n) for (n, d) in other.nodes(data=True) if d["type"] == "variable" ] shared_vars = set(this_var_nodes).intersection(set(other_var_nodes)) full_shared_vars = { full_var for shared_var in shared_vars for full_var in shortname_vars(self, shared_var) } return ForwardInfluenceBlanket(self, full_shared_vars)
Creates a ForwardInfluenceBlanket object representing the intersection of this model with the other input model. Args: other: The GroundedFunctionNetwork object to compare this model to. Returns: A ForwardInfluenceBlanket object to use for model comparison.
def valueof(records, key): """Extract the value corresponding to the given key in all the dictionaries >>> bands = [{'name': 'Led Zeppelin', 'singer': 'Robert Plant', 'guitarist': 'Jimmy Page'}, ... {'name': 'Metallica', 'singer': 'James Hetfield', 'guitarist': 'Kirk Hammet'}] >>> valueof(bands, 'singer') ['Robert Plant', 'James Hetfield'] """ if isinstance(records, dict): records = [records] return map(operator.itemgetter(key), records)
Extract the value corresponding to the given key in all the dictionaries >>> bands = [{'name': 'Led Zeppelin', 'singer': 'Robert Plant', 'guitarist': 'Jimmy Page'}, ... {'name': 'Metallica', 'singer': 'James Hetfield', 'guitarist': 'Kirk Hammet'}] >>> valueof(bands, 'singer') ['Robert Plant', 'James Hetfield']
def _StartProfiling(self, configuration): """Starts profiling. Args: configuration (ProfilingConfiguration): profiling configuration. """ if not configuration: return if configuration.HaveProfileMemoryGuppy(): self._guppy_memory_profiler = profilers.GuppyMemoryProfiler( self._name, configuration) self._guppy_memory_profiler.Start() if configuration.HaveProfileMemory(): self._memory_profiler = profilers.MemoryProfiler( self._name, configuration) self._memory_profiler.Start() if configuration.HaveProfileProcessing(): identifier = '{0:s}-processing'.format(self._name) self._processing_profiler = profilers.ProcessingProfiler( identifier, configuration) self._processing_profiler.Start() if configuration.HaveProfileSerializers(): identifier = '{0:s}-serializers'.format(self._name) self._serializers_profiler = profilers.SerializersProfiler( identifier, configuration) self._serializers_profiler.Start() if configuration.HaveProfileStorage(): self._storage_profiler = profilers.StorageProfiler( self._name, configuration) self._storage_profiler.Start() if configuration.HaveProfileTasks(): self._tasks_profiler = profilers.TasksProfiler(self._name, configuration) self._tasks_profiler.Start()
Starts profiling. Args: configuration (ProfilingConfiguration): profiling configuration.
def detect(self, filename, offset, standalone=False): """Verifies NTFS filesystem signature. Returns: bool: True if filesystem signature at offset 0x03 \ matches 'NTFS ', False otherwise. """ r = RawStruct( filename=filename, offset=offset + SIG_OFFSET, length=SIG_SIZE) oem_id = r.data if oem_id == b"NTFS ": return True return False
Verifies NTFS filesystem signature. Returns: bool: True if filesystem signature at offset 0x03 \ matches 'NTFS ', False otherwise.
def build_from_issue_comment(gh_token, body): """Create a WebhookMetadata from a comment added to an issue. """ if body["action"] in ["created", "edited"]: github_con = Github(gh_token) repo = github_con.get_repo(body['repository']['full_name']) issue = repo.get_issue(body['issue']['number']) text = body['comment']['body'] try: comment = issue.get_comment(body['comment']['id']) except UnknownObjectException: # If the comment has already disapeared, skip the command return None return WebhookMetadata(repo, issue, text, comment) return None
Create a WebhookMetadata from a comment added to an issue.
def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload)
Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html
def derived_sequence(graph): """ Compute the derived sequence of the graph G The intervals of G are collapsed into nodes, intervals of these nodes are built, and the process is repeated iteratively until we obtain a single node (if the graph is not irreducible) """ deriv_seq = [graph] deriv_interv = [] single_node = False while not single_node: interv_graph, interv_heads = intervals(graph) deriv_interv.append(interv_heads) single_node = len(interv_graph) == 1 if not single_node: deriv_seq.append(interv_graph) graph = interv_graph graph.compute_rpo() return deriv_seq, deriv_interv
Compute the derived sequence of the graph G The intervals of G are collapsed into nodes, intervals of these nodes are built, and the process is repeated iteratively until we obtain a single node (if the graph is not irreducible)
def configure(self, *args, **kwargs): """Configure the Application through a varied number of sources of different types. This function chains multiple possible configuration methods together in order to just "make it work". You can pass multiple configuration sources in to the method and each one will be tried in a sane fashion. Later sources will override earlier sources if keys collide. For example: .. code:: python from application import default_config app.configure(default_config, os.environ, '.secrets') In the above example, values stored in ``default_config`` will be loaded first, then overwritten by those in ``os.environ``, and so on. An endless number of configuration sources may be passed. Configuration sources are type checked and processed according to the following rules: * ``string`` - if the source is a ``str``, we will assume it is a file or module that should be loaded. If the file ends in ``.json``, then :meth:`flask.Config.from_json` is used; if the file ends in ``.py`` or ``.cfg``, then :meth:`flask.Config.from_pyfile` is used; if the module has any other extension we assume it is an import path, import the module and pass that to :meth:`flask.Config.from_object`. See below for a few more semantics on module loading. * ``dict-like`` - if the source is ``dict-like``, then :meth:`flask.Config.from_mapping` will be used. ``dict-like`` is defined as anything implementing an ``items`` method that returns a tuple of ``key``, ``val``. * ``class`` or ``module`` - if the source is an uninstantiated ``class`` or ``module``, then :meth:`flask.Config.from_object` will be used. Just like Flask's standard configuration, only uppercased keys will be loaded into the config. If the item we are passed is a ``string`` and it is determined to be a possible Python module, then a leading ``.`` is relevant. If a leading ``.`` is provided, we assume that the module to import is located in the current package and operate as such; if it begins with anything else we assume the import path provided is absolute. This allows you to source configuration stored in a module in your package, or in another package. Args: *args (object): Any object you want us to try to configure from. Keyword Args: whitelist_keys_from_mappings (bool): Should we whitelist the keys we pull from mappings? Very useful if you're passing in an entire OS ``environ`` and you want to omit things like ``LESSPIPE``. If no whitelist is provided, we use the pre-existing config keys as a whitelist. whitelist (list[str]): An explicit list of keys that should be allowed. If provided and ``whitelist_keys`` is ``True``, we will use that as our whitelist instead of pre-existing app config keys. """ whitelist_keys_from_mappings = kwargs.get( 'whitelist_keys_from_mappings', False ) whitelist = kwargs.get('whitelist') for item in args: if isinstance(item, string_types): _, ext = splitext(item) if ext == '.json': self._configure_from_json(item) elif ext in ('.cfg', '.py'): self._configure_from_pyfile(item) else: self._configure_from_module(item) elif isinstance(item, (types.ModuleType, type)): self._configure_from_object(item) elif hasattr(item, 'items'): # assume everything else is a mapping like object; ``.items()`` # is what Flask uses under the hood for this method # @TODO: This doesn't handle the edge case of using a tuple of # two element tuples to config; but Flask does that. IMO, if # you do that, you're a monster. self._configure_from_mapping( item, whitelist_keys=whitelist_keys_from_mappings, whitelist=whitelist ) else: raise TypeError("Could not determine a valid type for this" " configuration object: `{}`!".format(item)) # we just finished here, run the post configure callbacks self._run_post_configure_callbacks(args)
Configure the Application through a varied number of sources of different types. This function chains multiple possible configuration methods together in order to just "make it work". You can pass multiple configuration sources in to the method and each one will be tried in a sane fashion. Later sources will override earlier sources if keys collide. For example: .. code:: python from application import default_config app.configure(default_config, os.environ, '.secrets') In the above example, values stored in ``default_config`` will be loaded first, then overwritten by those in ``os.environ``, and so on. An endless number of configuration sources may be passed. Configuration sources are type checked and processed according to the following rules: * ``string`` - if the source is a ``str``, we will assume it is a file or module that should be loaded. If the file ends in ``.json``, then :meth:`flask.Config.from_json` is used; if the file ends in ``.py`` or ``.cfg``, then :meth:`flask.Config.from_pyfile` is used; if the module has any other extension we assume it is an import path, import the module and pass that to :meth:`flask.Config.from_object`. See below for a few more semantics on module loading. * ``dict-like`` - if the source is ``dict-like``, then :meth:`flask.Config.from_mapping` will be used. ``dict-like`` is defined as anything implementing an ``items`` method that returns a tuple of ``key``, ``val``. * ``class`` or ``module`` - if the source is an uninstantiated ``class`` or ``module``, then :meth:`flask.Config.from_object` will be used. Just like Flask's standard configuration, only uppercased keys will be loaded into the config. If the item we are passed is a ``string`` and it is determined to be a possible Python module, then a leading ``.`` is relevant. If a leading ``.`` is provided, we assume that the module to import is located in the current package and operate as such; if it begins with anything else we assume the import path provided is absolute. This allows you to source configuration stored in a module in your package, or in another package. Args: *args (object): Any object you want us to try to configure from. Keyword Args: whitelist_keys_from_mappings (bool): Should we whitelist the keys we pull from mappings? Very useful if you're passing in an entire OS ``environ`` and you want to omit things like ``LESSPIPE``. If no whitelist is provided, we use the pre-existing config keys as a whitelist. whitelist (list[str]): An explicit list of keys that should be allowed. If provided and ``whitelist_keys`` is ``True``, we will use that as our whitelist instead of pre-existing app config keys.
def datedif(ctx, start_date, end_date, unit): """ Calculates the number of days, months, or years between two dates. """ start_date = conversions.to_date(start_date, ctx) end_date = conversions.to_date(end_date, ctx) unit = conversions.to_string(unit, ctx).lower() if start_date > end_date: raise ValueError("Start date cannot be after end date") if unit == 'y': return relativedelta(end_date, start_date).years elif unit == 'm': delta = relativedelta(end_date, start_date) return 12 * delta.years + delta.months elif unit == 'd': return (end_date - start_date).days elif unit == 'md': return relativedelta(end_date, start_date).days elif unit == 'ym': return relativedelta(end_date, start_date).months elif unit == 'yd': return (end_date - start_date.replace(year=end_date.year)).days raise ValueError("Invalid unit value: %s" % unit)
Calculates the number of days, months, or years between two dates.
def add_letter_to_axis(ax, let, col, x, y, height): """Add 'let' with position x,y and height height to matplotlib axis 'ax'. """ if len(let) == 2: colors = [col, "white"] elif len(let) == 1: colors = [col] else: raise ValueError("3 or more Polygons are not supported") for polygon, color in zip(let, colors): new_polygon = affinity.scale( polygon, yfact=height, origin=(0, 0, 0)) new_polygon = affinity.translate( new_polygon, xoff=x, yoff=y) patch = PolygonPatch( new_polygon, edgecolor=color, facecolor=color) ax.add_patch(patch) return
Add 'let' with position x,y and height height to matplotlib axis 'ax'.
def res_phi_pie(pst,logger=None, **kwargs): """plot current phi components as a pie chart. Parameters ---------- pst : pyemu.Pst logger : pyemu.Logger kwargs : dict accepts 'include_zero' as a flag to include phi groups with only zero-weight obs (not sure why anyone would do this, but whatevs). Returns ------- ax : matplotlib.Axis """ if logger is None: logger=Logger('Default_Loggger.log',echo=False) logger.log("plot res_phi_pie") if "ensemble" in kwargs: try: res=pst_utils.res_from_en(pst,kwargs['ensemble']) except: logger.statement("res_1to1: could not find ensemble file {0}".format(kwargs['ensemble'])) else: try: res = pst.res except: logger.lraise("res_phi_pie: pst.res is None, couldn't find residuals file") obs = pst.observation_data phi = pst.phi phi_comps = pst.phi_components norm_phi_comps = pst.phi_components_normalized keys = list(phi_comps.keys()) if "include_zero" not in kwargs or kwargs["include_zero"] is True: phi_comps = {k:phi_comps[k] for k in keys if phi_comps[k] > 0.0} keys = list(phi_comps.keys()) norm_phi_comps = {k:norm_phi_comps[k] for k in keys} if "ax" in kwargs: ax = kwargs["ax"] else: fig = plt.figure(figsize=figsize) ax = plt.subplot(1,1,1,aspect="equal") labels = ["{0}\n{1:4G}\n({2:3.1f}%)".format(k,phi_comps[k],100. * (phi_comps[k] / phi)) for k in keys] ax.pie([float(norm_phi_comps[k]) for k in keys],labels=labels) logger.log("plot res_phi_pie") if "filename" in kwargs: plt.savefig(kwargs["filename"]) return ax
plot current phi components as a pie chart. Parameters ---------- pst : pyemu.Pst logger : pyemu.Logger kwargs : dict accepts 'include_zero' as a flag to include phi groups with only zero-weight obs (not sure why anyone would do this, but whatevs). Returns ------- ax : matplotlib.Axis
def atlas_renderer(layout, coverage_layer, output_path, file_format): """Extract composition using atlas generation. :param layout: QGIS Print Layout object used for producing the report. :type layout: qgis.core.QgsPrintLayout :param coverage_layer: Coverage Layer used for atlas map. :type coverage_layer: QgsMapLayer :param output_path: The output path of the product. :type output_path: str :param file_format: File format of map output, 'pdf' or 'png'. :type file_format: str :return: Generated output path(s). :rtype: str, list """ # set the composer map to be atlas driven composer_map = layout_item( layout, 'impact-map', QgsLayoutItemMap) composer_map.setAtlasDriven(True) composer_map.setAtlasScalingMode(QgsLayoutItemMap.Auto) # setup the atlas composition and composition atlas mode atlas_composition = layout.atlas() atlas_composition.setCoverageLayer(coverage_layer) atlas_on_single_file = layout.customProperty('singleFile', True) if file_format == QgisComposerComponentsMetadata.OutputFormat.PDF: if not atlas_composition.filenameExpression(): atlas_composition.setFilenameExpression( "'output_'||@atlas_featurenumber") output_directory = os.path.dirname(output_path) # we need to set the predefined scales for atlas project_scales = [] scales = QgsProject.instance().readListEntry( "Scales", "/ScalesList")[0] has_project_scales = QgsProject.instance().readBoolEntry( "Scales", "/useProjectScales")[0] if not has_project_scales or not scales: scales_string = str(general_setting("Map/scales", PROJECT_SCALES)) scales = scales_string.split(',') for scale in scales: parts = scale.split(':') if len(parts) == 2: project_scales.append(float(parts[1])) layout.reportContext().setPredefinedScales(project_scales) settings = QgsLayoutExporter.PdfExportSettings() LOGGER.info('Exporting Atlas') atlas_output = [] if atlas_on_single_file: res, error = QgsLayoutExporter.exportToPdf( atlas_composition, output_path, settings) atlas_output.append(output_path) else: res, error = QgsLayoutExporter.exportToPdfs( atlas_composition, output_directory, settings) if res != QgsLayoutExporter.Success: LOGGER.error(error) return atlas_output
Extract composition using atlas generation. :param layout: QGIS Print Layout object used for producing the report. :type layout: qgis.core.QgsPrintLayout :param coverage_layer: Coverage Layer used for atlas map. :type coverage_layer: QgsMapLayer :param output_path: The output path of the product. :type output_path: str :param file_format: File format of map output, 'pdf' or 'png'. :type file_format: str :return: Generated output path(s). :rtype: str, list