code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def provide_label(self): return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self.label]
The name and shape of label provided by this iterator
def cli_reload(self, event): self.log('Reloading all components.') self.update_components(forcereload=True) initialize() from hfos.debugger import cli_compgraph self.fireEvent(cli_compgraph())
Experimental call to reload the component tree
def get_records_with_attachments(attachment_table, rel_object_field="REL_OBJECTID"): if arcpyFound == False: raise Exception("ArcPy is required to use this function") OIDs = [] with arcpy.da.SearchCursor(attachment_table, [rel_object_field]) as rows: for row in rows: if not str(row[0]) in OIDs: OIDs.append("%s" % str(row[0])) del row del rows return OIDs
returns a list of ObjectIDs for rows in the attachment table
def create_objective_bank(self, *args, **kwargs): return ObjectiveBank( self._provider_manager, self._get_provider_session('objective_bank_admin_session').create_objective_bank(*args, **kwargs), self._runtime, self._proxy)
Pass through to provider ObjectiveBankAdminSession.create_objective_bank
def iterintervals(self, n=2): streams = tee(iter(self), n) for stream_index, stream in enumerate(streams): for i in range(stream_index): next(stream) for intervals in zip(*streams): yield intervals
Iterate over groups of `n` consecutive measurement points in the time series.
def filter_create(self, phrase, context, irreversible = False, whole_word = True, expires_in = None): params = self.__generate_params(locals()) for context_val in context: if not context_val in ['home', 'notifications', 'public', 'thread']: raise MastodonIllegalArgumentError('Invalid filter context.') return self.__api_request('POST', '/api/v1/filters', params)
Creates a new keyword filter. `phrase` is the phrase that should be filtered out, `context` specifies from where to filter the keywords. Valid contexts are 'home', 'notifications', 'public' and 'thread'. Set `irreversible` to True if you want the filter to just delete statuses server side. This works only for the 'home' and 'notifications' contexts. Set `whole_word` to False if you want to allow filter matches to start or end within a word, not only at word boundaries. Set `expires_in` to specify for how many seconds the filter should be kept around. Returns the `filter dict`_ of the newly created filter.
def parse(self, type, data): try: return self.registered_formats[type]['parser'](data) except KeyError: raise NotImplementedError("No parser found for " "type '{type}'".format(type=type))
Parse text as a format. :param type: The unique name of the format :param data: The text to parse as the format
def add_view(self, *args, **kwargs): try: singleton = self.model.objects.get() except (self.model.DoesNotExist, self.model.MultipleObjectsReturned): kwargs.setdefault("extra_context", {}) kwargs["extra_context"]["singleton"] = True response = super(SingletonAdmin, self).add_view(*args, **kwargs) return self.handle_save(args[0], response) return redirect(admin_url(self.model, "change", singleton.id))
Redirect to the change view if the singleton instance exists.
def imageFields(self): if self._imageFields is None: ctx = SparkContext._active_spark_context self._imageFields = list(ctx._jvm.org.apache.spark.ml.image.ImageSchema.imageFields()) return self._imageFields
Returns field names of image columns. :return: a list of field names. .. versionadded:: 2.3.0
def get_tensor_num_entries(self, tensor_name, partial_layout=None, mesh_dimension_to_size=None): shape = self.get_tensor_shape(tensor_name) num_entries = 1 for dim in shape.dims: num_entries = num_entries * dim.value if not partial_layout: return num_entries for mtf_dimension_name in self.get_tensor_mtf_dimension_names(tensor_name): if mtf_dimension_name not in partial_layout: continue mesh_dimension_name = partial_layout[mtf_dimension_name] mesh_dimension_size = mesh_dimension_to_size[mesh_dimension_name] num_entries = int(math.ceil(num_entries / mesh_dimension_size)) return num_entries
The number of entries in a tensor. If partial_layout is specified, then mesh_dimension_to_size must also be. In this case, the number of entries on a single device is returned. Args: tensor_name: a string, name of a tensor in the graph. partial_layout: an optional {string: string}, from MTF dimension name to mesh dimension name. mesh_dimension_to_size: an optional {string: int}, from mesh dimension name to size. Returns: an integer
def _get_folds(n_rows, n_folds, use_stored): if use_stored is not None: with open(os.path.expanduser(use_stored)) as json_file: json_data = json.load(json_file) if json_data['N_rows'] != n_rows: raise Exception('N_rows from folds doesnt match the number of rows of X_seq, X_feat, y') if json_data['N_folds'] != n_folds: raise Exception('n_folds dont match', json_data['N_folds'], n_folds) kf = [(np.array(train), np.array(test)) for (train, test) in json_data['folds']] else: kf = KFold(n_splits=n_folds).split(np.zeros((n_rows, 1))) i = 1 folds = [] for train, test in kf: fold = "fold_" + str(i) folds.append((fold, train, test)) i = i + 1 return folds
Get the used CV folds
def cpu_count(logical=True): if logical: from multiprocessing import cpu_count ncpu=cpu_count() else: import psutil ncpu=psutil.cpu_count(logical=False) return ncpu
Return system CPU count
def set_codes(self, codes, reject=False): self.codes = set(codes) self.reject = reject
Set the accepted or rejected codes codes list. :param codes: A list of the response codes. :param reject: If True, the listed codes will be rejected, and the conversion will format as "-"; if False, only the listed codes will be accepted, and the conversion will format as "-" for all the others.
def initialize_unbounded(obj, dimensions, key): select = dict(zip([d.name for d in dimensions], key)) try: obj.select([DynamicMap], **select) except KeyError: pass
Initializes any DynamicMaps in unbounded mode.
def runcmds_plus_hooks(self, cmds: List[str]) -> bool: stop = False self.cmdqueue = list(cmds) + self.cmdqueue try: while self.cmdqueue and not stop: line = self.cmdqueue.pop(0) if self.echo and line != 'eos': self.poutput('{}{}'.format(self.prompt, line)) stop = self.onecmd_plus_hooks(line) finally: self.cmdqueue = [] self._script_dir = [] return stop
Convenience method to run multiple commands by onecmd_plus_hooks. This method adds the given cmds to the command queue and processes the queue until completion or an error causes it to abort. Scripts that are loaded will have their commands added to the queue. Scripts may even load other scripts recursively. This means, however, that you should not use this method if there is a running cmdloop or some other event-loop. This method is only intended to be used in "one-off" scenarios. NOTE: You may need this method even if you only have one command. If that command is a load, then you will need this command to fully process all the subsequent commands that are loaded from the script file. This is an improvement over onecmd_plus_hooks, which expects to be used inside of a command loop which does the processing of loaded commands. Example: cmd_obj.runcmds_plus_hooks(['load myscript.txt']) :param cmds: command strings suitable for onecmd_plus_hooks. :return: True implies the entire application should exit.
def args(self): return (self.base, self.item, self.leng, self.refs, self.both, self.kind, self.type)
Return all attributes as arguments tuple.
def energy_upperbound(self, spins): subtheta = self.theta.copy() subtheta.fix_variables(spins) trees = self._trees if not trees: assert not subtheta.linear and not subtheta.quadratic return subtheta.offset energy = Plus(self.message_upperbound(trees, {}, subtheta), subtheta.offset) return energy
A formula for an upper bound on the energy of Theta with spins fixed. Args: spins (dict): Spin values for a subset of the variables in Theta. Returns: Formula that upper bounds the energy with spins fixed.
def load_emacs_open_in_editor_bindings(): registry = Registry() registry.add_binding(Keys.ControlX, Keys.ControlE, filter=EmacsMode() & ~HasSelection())( get_by_name('edit-and-execute-command')) return registry
Pressing C-X C-E will open the buffer in an external editor.
def sync_focus(self, *_): if self.display_popup: self.app.layout.focus(self.layout_manager.popup_dialog) return if self.confirm_text: return if self.prompt_command: return if self.command_mode: return if not self.pymux.arrangement.windows: return pane = self.pymux.arrangement.get_active_pane() self.app.layout.focus(pane.terminal)
Focus the focused window from the pymux arrangement.
def describe_db_subnet_groups(name=None, filters=None, jmespath='DBSubnetGroups', region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) pag = conn.get_paginator('describe_db_subnet_groups') args = {} args.update({'DBSubnetGroupName': name}) if name else None args.update({'Filters': filters}) if filters else None pit = pag.paginate(**args) pit = pit.search(jmespath) if jmespath else pit return [p for p in pit]
Return a detailed listing of some, or all, DB Subnet Groups visible in the current scope. Arbitrary subelements or subsections of the returned dataset can be selected by passing in a valid JMSEPath filter as well. CLI example:: salt myminion boto_rds.describe_db_subnet_groups
def get_link(self, task_id): links = [x for x in self.links if x.task_id == task_id] if len(links) != 1: raise CoTError("No single Link matches task_id {}!\n{}".format(task_id, self.dependent_task_ids())) return links[0]
Get a ``LinkOfTrust`` by task id. Args: task_id (str): the task id to find. Returns: LinkOfTrust: the link matching the task id. Raises: CoTError: if no ``LinkOfTrust`` matches.
def is_ready(self): response = self.get() if response.status == 204: return False self._state = self.read(response) ready = self._state.content['dispatchState'] not in ['QUEUED', 'PARSING'] return ready
Indicates whether this job is ready for querying. :return: ``True`` if the job is ready, ``False`` if not. :rtype: ``boolean``
def to_trip( self, smooth, smooth_strategy, smooth_noise, seg, seg_eps, seg_min_time, simplify, simplify_max_dist_error, simplify_max_speed_error ): self.compute_metrics() self.remove_noise() print (smooth, seg, simplify) if smooth: self.compute_metrics() self.smooth(smooth_strategy, smooth_noise) if seg: self.compute_metrics() self.segment(seg_eps, seg_min_time) if simplify: self.compute_metrics() self.simplify(0, simplify_max_dist_error, simplify_max_speed_error) self.compute_metrics() return self
In-place, transformation of a track into a trip A trip is a more accurate depiction of reality than a track. For a track to become a trip it need to go through the following steps: + noise removal + smoothing + spatio-temporal segmentation + simplification At the end of these steps we have a less noisy, track that has less points, but that holds the same information. It's required that each segment has their metrics calculated or has been preprocessed. Args: name: An optional string with the name of the trip. If none is given, one will be generated by generateName Returns: This Track instance
def coerce(value): if isinstance(value, ListCell): return value elif isinstance(value, (list)): return ListCell(value) else: return ListCell([value])
Turns a value into a list
def relpath_to_site(lang, target_lang): path = _SITES_RELPATH_DB.get((lang, target_lang), None) if path is None: siteurl = _SITE_DB.get(lang, _MAIN_SITEURL) target_siteurl = _SITE_DB.get(target_lang, _MAIN_SITEURL) path = posixpath.relpath(get_site_path(target_siteurl), get_site_path(siteurl)) _SITES_RELPATH_DB[(lang, target_lang)] = path return path
Get relative path from siteurl of lang to siteurl of base_lang the output is cached in _SITES_RELPATH_DB
def command(cls, command, stdin=None, shell=False): if not shell and isinstance(command, str): command = cls.shlex.split(command) collate_original = None try: collate_original = cls.os.environ['LC_ALL'] except KeyError: pass cls.os.environ['LC_ALL'] = "C" try: process = cls.subprocess.Popen(command, stdout=cls.subprocess.PIPE, stderr=cls.subprocess.PIPE, stdin=cls.subprocess.PIPE, shell=shell) (stdout, stderr) = process.communicate(stdin) finally: if collate_original: cls.os.environ['LC_ALL'] = collate_original else: del cls.os.environ['LC_ALL'] return cls(stdout, stderr, stdin, process.returncode, command)
Runs specified command. The command can be fed with data on stdin with parameter ``stdin``. The command can also be treated as a shell command with parameter ``shell``. Please refer to subprocess.Popen on how does this stuff work :returns: Run() instance with resulting data
def activate_boost_by_name(self, zone_name, target_temperature, num_hours=1): zone = self.get_zone(zone_name) if zone is None: raise RuntimeError("Unknown zone") return self.activate_boost_by_id(zone["zoneId"], target_temperature, num_hours)
Activate boost by the name of the zone
def _check_arg_length(fname, args, max_fname_arg_count, compat_args): if max_fname_arg_count < 0: raise ValueError("'max_fname_arg_count' must be non-negative") if len(args) > len(compat_args): max_arg_count = len(compat_args) + max_fname_arg_count actual_arg_count = len(args) + max_fname_arg_count argument = 'argument' if max_arg_count == 1 else 'arguments' raise TypeError( "{fname}() takes at most {max_arg} {argument} " "({given_arg} given)".format( fname=fname, max_arg=max_arg_count, argument=argument, given_arg=actual_arg_count))
Checks whether 'args' has length of at most 'compat_args'. Raises a TypeError if that is not the case, similar to in Python when a function is called with too many arguments.
def getPos(position, pagesize): position = str(position).split() if len(position) != 2: raise Exception("position not defined right way") x, y = [getSize(pos) for pos in position] return getCoords(x, y, None, None, pagesize)
Pair of coordinates
def run_job(self, job=None): if job is None: if not self.queue: return None job = self.queue.popleft() start = timer() func, args = job reply = func(*args) end = timer() if end - start > 0.1: _LOGGER.debug( 'Handle queue with call %s(%s) took %.3f seconds', func, args, end - start) return reply
Run a job, either passed in or from the queue. A job is a tuple of function and optional args. Keyword arguments can be passed via use of functools.partial. The job should return a string that should be sent by the gateway protocol. The function will be called with the arguments and the result will be returned.
def hvenvup(package, directory): pip = Command(DIR.gen.joinpath("hvenv", "bin", "pip")) pip("uninstall", package, "-y").run() pip("install", DIR.project.joinpath(directory).abspath()).run()
Install a new version of a package in the hitch venv.
def save(self): with open(self.filename, 'w') as plist_file: plist_file.write(str(self.soup))
Save current property list representation to the original file.
def split(self): self._annotate_groups() index = 0 for group in range(self._max_group): subgraph = copy(self) subgraph.metadata = self.metadata[:] subgraph.edges = self.edges.copy() if subgraph._filter_group(group): subgraph.total_size = sum([x.size for x in subgraph.metadata]) subgraph.index = index index += 1 yield subgraph
Split the graph into sub-graphs. Only connected objects belong to the same graph. `split` yields copies of the Graph object. Shallow copies are used that only replicate the meta-information, but share the same object list ``self.objects``. >>> from pympler.refgraph import ReferenceGraph >>> a = 42 >>> b = 'spam' >>> c = {a: b} >>> t = (1,2,3) >>> rg = ReferenceGraph([a,b,c,t]) >>> for subgraph in rg.split(): ... print subgraph.index 0 1
def delete_row(self, index): body = { "requests": [{ "deleteDimension": { "range": { "sheetId": self.id, "dimension": "ROWS", "startIndex": index - 1, "endIndex": index } } }] } return self.spreadsheet.batch_update(body)
Deletes the row from the worksheet at the specified index. :param index: Index of a row for deletion. :type index: int
def get_other_keys(self, key, including_current=False): other_keys = [] if key in self: other_keys.extend(self.__dict__[str(type(key))][key]) if not including_current: other_keys.remove(key) return other_keys
Returns list of other keys that are mapped to the same value as specified key. @param key - key for which other keys should be returned. @param including_current if set to True - key will also appear on this list.
def get_secret_registration_block_by_secrethash( self, secrethash: SecretHash, block_identifier: BlockSpecification, ) -> Optional[BlockNumber]: result = self.proxy.contract.functions.getSecretRevealBlockHeight( secrethash, ).call(block_identifier=block_identifier) if result == 0: return None return result
Return the block number at which the secret for `secrethash` was registered, None if the secret was never registered.
def im_detect(self, im_list, root_dir=None, extension=None, show_timer=False): test_db = TestDB(im_list, root_dir=root_dir, extension=extension) test_iter = DetIter(test_db, 1, self.data_shape, self.mean_pixels, is_train=False) return self.detect_iter(test_iter, show_timer)
wrapper for detecting multiple images Parameters: ---------- im_list : list of str image path or list of image paths root_dir : str directory of input images, optional if image path already has full directory information extension : str image extension, eg. ".jpg", optional Returns: ---------- list of detection results in format [det0, det1...], det is in format np.array([id, score, xmin, ymin, xmax, ymax]...)
def arbitrary_object_to_string(a_thing): if a_thing is None: return '' if isinstance(a_thing, six.string_types): return a_thing if six.PY3 and isinstance(a_thing, six.binary_type): try: return a_thing.decode('utf-8') except UnicodeDecodeError: pass try: return a_thing.to_str() except (AttributeError, KeyError, TypeError): pass try: return arbitrary_object_to_string(a_thing.a_type) except (AttributeError, KeyError, TypeError): pass try: return known_mapping_type_to_str[a_thing] except (KeyError, TypeError): pass try: if a_thing.__module__ not in ('__builtin__', 'builtins', 'exceptions'): if a_thing.__module__ == "__main__": module_name = ( sys.modules['__main__'] .__file__[:-3] .replace('/', '.') .strip('.') ) else: module_name = a_thing.__module__ return "%s.%s" % (module_name, a_thing.__name__) except AttributeError: pass try: return a_thing.__name__ except AttributeError: pass return str(a_thing)
take a python object of some sort, and convert it into a human readable string. this function is used extensively to convert things like "subject" into "subject_key, function -> function_key, etc.
def chdir(new_dir): cur_dir = os.getcwd() _mkdir(new_dir) os.chdir(new_dir) try: yield finally: os.chdir(cur_dir)
stolen from bcbio. Context manager to temporarily change to a new directory. http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/
def check_if_not(x, *checks, **params): for p in params: if params[p] is not x and params[p] != x: [check(**{p: params[p]}) for check in checks]
Run checks only if parameters are not equal to a specified value Parameters ---------- x : excepted value Checks not run if parameters equal x checks : function Unnamed arguments, check functions to be run params : object Named arguments, parameters to be checked Raises ------ ValueError : unacceptable choice of parameters
def spin_gen_op(oper, gauge): slaves = len(gauge) oper['O'] = np.array([spin_gen(slaves, i, c) for i, c in enumerate(gauge)]) oper['O_d'] = np.transpose(oper['O'], (0, 2, 1)) oper['O_dO'] = np.einsum('...ij,...jk->...ik', oper['O_d'], oper['O']) oper['Sfliphop'] = spinflipandhop(slaves)
Generates the generic spin matrices for the system
def circulation(self): L = self.angular_momentum() if L.ndim == 2: single_orbit = True L = L[...,None] else: single_orbit = False ndim,ntimes,norbits = L.shape L0 = L[:,0] circ = np.ones((ndim,norbits)) for ii in range(ndim): cnd = (np.sign(L0[ii]) != np.sign(L[ii,1:])) | \ (np.abs(L[ii,1:]).value < 1E-13) ix = np.atleast_1d(np.any(cnd, axis=0)) circ[ii,ix] = 0 circ = circ.astype(int) if single_orbit: return circ.reshape((ndim,)) else: return circ
Determine which axes the Orbit circulates around by checking whether there is a change of sign of the angular momentum about an axis. Returns a 2D array with ``ndim`` integers per orbit point. If a box orbit, all integers will be 0. A 1 indicates circulation about the corresponding axis. TODO: clockwise / counterclockwise? For example, for a single 3D orbit: - Box and boxlet = [0,0,0] - z-axis (short-axis) tube = [0,0,1] - x-axis (long-axis) tube = [1,0,0] Returns ------- circulation : :class:`numpy.ndarray` An array that specifies whether there is circulation about any of the axes of the input orbit. For a single orbit, will return a 1D array, but for multiple orbits, the shape will be ``(3, norbits)``.
def getSampleTypeTitles(self): sample_types = self.getSampleTypes() sample_type_titles = map(lambda obj: obj.Title(), sample_types) if not sample_type_titles: return [""] return sample_type_titles
Returns a list of sample type titles
def get_by_id(self, reply_id): reply = MReply.get_by_uid(reply_id) logger.info('get_reply: {0}'.format(reply_id)) self.render('misc/reply/show_reply.html', reply=reply, username=reply.user_name, date=reply.date, vote=reply.vote, uid=reply.uid, userinfo=self.userinfo, kwd={})
Get the reply by id.
def linreg_mle(y, X, algorithm='Nelder-Mead', debug=False): import numpy as np import scipy.stats as sstat import scipy.optimize as sopt def objective_nll_linreg(theta, y, X): yhat = np.dot(X, theta[:-1]) return -1.0 * sstat.norm.logpdf(y, loc=yhat, scale=theta[-1]).sum() if algorithm not in ('Nelder-Mead', 'CG', 'BFGS'): raise Exception('Optimization Algorithm not supported.') theta0 = np.ones((X.shape[1] + 1, )) results = sopt.minimize( objective_nll_linreg, theta0, args=(y, X), method=algorithm, options={'disp': False}) if debug: return results return results.x[:-1]
MLE for Linear Regression Model Parameters: ----------- y : ndarray target variable with N observations X : ndarray The <N x C> design matrix with C independent variables, features, factors, etc. algorithm : str Optional. Default 'Nelder-Mead' (Simplex). The algorithm used in scipy.optimize.minimize debug : bool Optional. Returns: -------- beta : ndarray Estimated regression coefficients. results : scipy.optimize.optimize.OptimizeResult Optional. If debug=True then only scipy's optimization result variable is returned.
def _load_lib(): lib_path = find_lib_path() if len(lib_path) == 0: return None lib = ctypes.cdll.LoadLibrary(lib_path[0]) lib.LGBM_GetLastError.restype = ctypes.c_char_p return lib
Load LightGBM library.
def to_numpy(self): assert HAS_NUMPY, 'numpy is not installed.' import numpy return numpy.transpose(numpy.asarray([self[x] for x in self.column_names()]))
Converts this SFrame to a numpy array This operation will construct a numpy array in memory. Care must be taken when size of the returned object is big. Returns ------- out : numpy.ndarray A Numpy Array containing all the values of the SFrame
def project_hidden(x, projection_tensors, hidden_size, num_blocks): batch_size, latent_dim, _ = common_layers.shape_list(x) x = tf.reshape(x, shape=[1, -1, hidden_size]) x_tiled = tf.reshape( tf.tile(x, multiples=[num_blocks, 1, 1]), shape=[num_blocks, -1, hidden_size]) x_projected = tf.matmul(x_tiled, projection_tensors) x_projected = tf.transpose(x_projected, perm=[1, 0, 2]) x_4d = tf.reshape(x_projected, [batch_size, latent_dim, num_blocks, -1]) return x_4d
Project encoder hidden state under num_blocks using projection tensors. Args: x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size]. projection_tensors: Projection tensors used to project the hidden state. hidden_size: Dimension of the latent space. num_blocks: Number of blocks in DVQ. Returns: x_projected: Projected states of shape [batch_size, latent_dim, num_blocks, hidden_size / num_blocks].
def insert_table(self, label = None, name = None, **kwargs): data_frame = kwargs.pop('data_frame', None) if data_frame is None: data_frame = kwargs.pop('dataframe', None) to_hdf_kwargs = kwargs.pop('to_hdf_kwargs', dict()) if data_frame is not None: assert isinstance(data_frame, pandas.DataFrame) if data_frame is not None: if label is None: label = name table = Table(label = label, name = name, survey = self) assert table.survey.hdf5_file_path is not None log.debug("Saving table {} in {}".format(name, table.survey.hdf5_file_path)) table.save_data_frame(data_frame, **to_hdf_kwargs) if name not in self.tables: self.tables[name] = dict() for key, val in kwargs.items(): self.tables[name][key] = val
Insert a table in the Survey object
def iters(cls, batch_size=32, device=0, root='.data', vectors=None, trees=False, **kwargs): if trees: TEXT = ParsedTextField() TRANSITIONS = ShiftReduceField() else: TEXT = data.Field(tokenize='spacy') TRANSITIONS = None LABEL = data.Field(sequential=False) train, val, test = cls.splits( TEXT, LABEL, TRANSITIONS, root=root, **kwargs) TEXT.build_vocab(train, vectors=vectors) LABEL.build_vocab(train) return data.BucketIterator.splits( (train, val, test), batch_size=batch_size, device=device)
Create iterator objects for splits of the SNLI dataset. This is the simplest way to use the dataset, and assumes common defaults for field, vocabulary, and iterator parameters. Arguments: batch_size: Batch size. device: Device to create batches on. Use -1 for CPU and None for the currently active GPU device. root: The root directory that the dataset's zip archive will be expanded into; therefore the directory in whose wikitext-2 subdirectory the data files will be stored. vectors: one of the available pretrained vectors or a list with each element one of the available pretrained vectors (see Vocab.load_vectors) trees: Whether to include shift-reduce parser transitions. Default: False. Remaining keyword arguments: Passed to the splits method.
def train_all(ctx, output): click.echo('chemdataextractor.pos.train_all') click.echo('Output: %s' % output) ctx.invoke(train, output='%s_wsj_nocluster.pickle' % output, corpus='wsj', clusters=False) ctx.invoke(train, output='%s_wsj.pickle' % output, corpus='wsj', clusters=True) ctx.invoke(train, output='%s_genia_nocluster.pickle' % output, corpus='genia', clusters=False) ctx.invoke(train, output='%s_genia.pickle' % output, corpus='genia', clusters=True) ctx.invoke(train, output='%s_wsj_genia_nocluster.pickle' % output, corpus='wsj+genia', clusters=False) ctx.invoke(train, output='%s_wsj_genia.pickle' % output, corpus='wsj+genia', clusters=True)
Train POS tagger on WSJ, GENIA, and both. With and without cluster features.
def open(self): self._connection = \ amqp.Connection(host='%s:%s' % (self.hostname, self.port), userid=self.username, password=self.password, virtual_host=self.virtual_host, insist=False) self.channel = self._connection.channel()
Open a connection to the AMQP compliant broker.
def _interleave(self): from pandas.core.dtypes.common import is_sparse dtype = _interleaved_dtype(self.blocks) if is_sparse(dtype): dtype = dtype.subtype elif is_extension_array_dtype(dtype): dtype = 'object' result = np.empty(self.shape, dtype=dtype) itemmask = np.zeros(self.shape[0]) for blk in self.blocks: rl = blk.mgr_locs result[rl.indexer] = blk.get_values(dtype) itemmask[rl.indexer] = 1 if not itemmask.all(): raise AssertionError('Some items were not contained in blocks') return result
Return ndarray from blocks with specified item order Items must be contained in the blocks
def remove_this_tlink(self,tlink_id): for tlink in self.get_tlinks(): if tlink.get_id() == tlink_id: self.node.remove(tlink.get_node()) break
Removes the tlink for the given tlink identifier @type tlink_id: string @param tlink_id: the tlink identifier to be removed
def to_multidimensional(tpm): tpm = np.array(tpm) N = tpm.shape[-1] return tpm.reshape([2] * N + [N], order="F").astype(float)
Reshape a state-by-node TPM to the multidimensional form. See documentation for the |Network| object for more information on TPM formats.
def synchronise(func): def inner(request, *args): lock_id = '%s-%s-built-%s' % ( datetime.date.today(), func.__name__, ",".join([str(a) for a in args])) if cache.add(lock_id, 'true', LOCK_EXPIRE): result = func(request, *args) cache.set(lock_id, result.task_id) else: task_id = cache.get(lock_id) if not task_id: return None cache.set(lock_id, "") result = Task.AsyncResult(task_id) if result.ready(): result.forget() return None return result return inner
If task already queued, running, or finished, don't restart.
def apply(self, builder, model): builder.where_null(model.get_qualified_deleted_at_column()) self.extend(builder)
Apply the scope to a given query builder. :param builder: The query builder :type builder: orator.orm.builder.Builder :param model: The model :type model: orator.orm.Model
def gfpa(target, illmin, abcorr, obsrvr, relate, refval, adjust, step, nintvals, cnfine, result=None): assert isinstance(cnfine, stypes.SpiceCell) assert cnfine.is_double() if result is None: result = stypes.SPICEDOUBLE_CELL(2000) else: assert isinstance(result, stypes.SpiceCell) assert result.is_double() target = stypes.stringToCharP(target) illmin = stypes.stringToCharP(illmin) abcorr = stypes.stringToCharP(abcorr) obsrvr = stypes.stringToCharP(obsrvr) relate = stypes.stringToCharP(relate) refval = ctypes.c_double(refval) adjust = ctypes.c_double(adjust) step = ctypes.c_double(step) nintvals = ctypes.c_int(nintvals) libspice.gfpa_c(target, illmin, abcorr, obsrvr, relate, refval, adjust, step, nintvals, ctypes.byref(cnfine), ctypes.byref(result)) return result
Determine time intervals for which a specified constraint on the phase angle between an illumination source, a target, and observer body centers is met. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfpa_c.html :param target: Name of the target body. :type target: str :param illmin: Name of the illuminating body. :type illmin: str :param abcorr: Aberration correction flag. :type abcorr: str :param obsrvr: Name of the observing body. :type obsrvr: str :param relate: Relational operator. :type relate: str :param refval: Reference value. :type refval: float :param adjust: Adjustment value for absolute extrema searches. :type adjust: float :param step: Step size used for locating extrema and roots. :type step: float :param nintvals: Workspace window interval count. :type nintvals: int :param cnfine: SPICE window to which the search is restricted. :type cnfine: spiceypy.utils.support_types.SpiceCell :param result: Optional SPICE window containing results. :type result: spiceypy.utils.support_types.SpiceCell
def get_details(self, jobname, jobkey): fullkey = JobDetails.make_fullkey(jobname, jobkey) return self._cache[fullkey]
Get the `JobDetails` associated to a particular job instance
def get_missing_deps(self, obj): deps = self.get_deps(obj) ret = [] for key in deps: provider = self._providers.get(key) if provider and provider.providable: continue ret.append(key) return ret
Returns missing dependencies for provider key. Missing meaning no instance can be provided at this time. :param key: Provider key :type key: object :return: Missing dependencies :rtype: list
def db_dp010(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db_dp010`'.format(value)) self._db_dp010 = value
Corresponds to IDD Field `db_dp010` mean coincident dry-bulb temperature to Dew-point temperature corresponding to 1.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `db_dp010` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def rolling(self, window, min_periods=None, center=False, win_type=None, on=None, axis=0, closed=None): kwds = { "window": window, "min_periods": min_periods, "center": center, "win_type": win_type, "on": on, "axis": axis, "closed": closed, } return Rolling(self._obj, self._npartitions, self._dask_threshold, self._scheduler, self._progress_bar, **kwds)
Create a swifter rolling object
def find_coord_vars(ncds): coord_vars = [] for d in ncds.dimensions: if d in ncds.variables and ncds.variables[d].dimensions == (d,): coord_vars.append(ncds.variables[d]) return coord_vars
Finds all coordinate variables in a dataset. A variable with the same name as a dimension is called a coordinate variable.
def owner_search_fields(self): try: from django.contrib.auth import get_user_model except ImportError: from django.contrib.auth.models import User else: User = get_user_model() return [ field.name for field in User._meta.fields if isinstance(field, models.CharField) and field.name != 'password' ]
Returns all the fields that are CharFields except for password from the User model. For the built-in User model, that means username, first_name, last_name, and email.
def get_all_chains(self): return [self.get_chain(i) for i in range(len(self.leaves))]
Assemble and return a list of all chains for all leaf nodes to the merkle root.
def notice(self, target, msg): self.cmd(u'NOTICE', u'{0} :{1}'.format(target, msg))
Sends a NOTICE to an user or channel. :param target: user or channel to send to. :type target: str :param msg: message to send. :type msg: basestring
def dcc_send(self, mask, filepath): return self.dcc.create('send', mask, filepath=filepath).ready
DCC SEND a file to mask. filepath must be an absolute path to existing file
def _check_valid_translation(self, translation): if not isinstance(translation, np.ndarray) or not np.issubdtype(translation.dtype, np.number): raise ValueError('Translation must be specified as numeric numpy array') t = translation.squeeze() if len(t.shape) != 1 or t.shape[0] != 3: raise ValueError('Translation must be specified as a 3-vector, 3x1 ndarray, or 1x3 ndarray')
Checks that the translation vector is valid.
def sync_to(self): device_group_collection = self._meta_data['container'] cm = device_group_collection._meta_data['container'] sync_cmd = 'config-sync to-group %s' % self.name cm.exec_cmd('run', utilCmdArgs=sync_cmd)
Wrapper method that synchronizes configuration to DG. Executes the containing object's cm :meth:`~f5.bigip.cm.Cm.exec_cmd` method to sync the configuration TO the device-group. :note:: Both sync_to, and sync_from methods are convenience methods which usually are not what this SDK offers. It is best to execute config-sync with the use of exec_cmd() method on the cm endpoint.
def plot(self, save_path=None, close=False, bbox_inches="tight", pad_inches=1): for plot in self.subplots.flatten(): plot.set_xlim(-0.1, 1.1) plot.set_ylim(-0.1, 1.1) plot.axis("off") if save_path: plt.savefig( save_path, transparent=True, dpi=300, bbox_inches=bbox_inches, pad_inches=pad_inches, ) if close: plt.close()
Plot figure. Parameters ---------- save_path : string (optional) Save path. Default is None. close : boolean (optional) Toggle automatic figure closure after plotting. Default is False. bbox_inches : number (optional) Bounding box size, in inches. Default is 'tight'. pad_inches : number (optional) Pad inches. Default is 1.
def _valid_baremetal_port(port): if port.get(portbindings.VNIC_TYPE) != portbindings.VNIC_BAREMETAL: return False sgs = port.get('security_groups', []) if len(sgs) == 0: return False if len(port.get('security_groups', [])) > 1: LOG.warning('SG provisioning failed for %(port)s. Only one ' 'SG may be applied per port.', {'port': port['id']}) return False return True
Check if port is a baremetal port with exactly one security group
def load_module(name): spec = importlib.util.find_spec(name) mod = importlib.util.module_from_spec(spec) mod.__spec__ = spec mod.__loader__ = spec.loader spec.loader.exec_module(mod) return mod
Load the named module without registering it in ``sys.modules``. Parameters ---------- name : string Module name Returns ------- mod : module Loaded module
def random(magnitude=1): theta = random.uniform(0, 2 * math.pi) return magnitude * Vector(math.cos(theta), math.sin(theta))
Create a unit vector pointing in a random direction.
def signal_handler(sig, frame): print('\nYou pressed Ctrl+C') if board is not None: board.send_reset() board.shutdown() sys.exit(0)
Helper method to shutdown the RedBot if Ctrl-c is pressed
def get_available_hashes(): if sys.version_info >= (3,2): return hashlib.algorithms_available elif sys.version_info >= (2,7) and sys.version_info < (3,0): return hashlib.algorithms else: return 'md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'
Returns a tuple of the available hashes
def _have_conf(self, magic_hash=None): self.app.have_conf = getattr(self.app, 'cur_conf', None) not in [None, {}] if magic_hash is not None: magic_hash = int(magic_hash) return self.app.have_conf and self.app.cur_conf.magic_hash == magic_hash return self.app.have_conf
Get the daemon current configuration state If the daemon has received a configuration from its arbiter, this will return True If a `magic_hash` is provided it is compared with the one included in the daemon configuration and this function returns True only if they match! :return: boolean indicating if the daemon has a configuration :rtype: bool
def is_pdf(document): if not executable_exists('pdftotext'): current_app.logger.warning( "GNU file was not found on the system. " "Switching to a weak file extension test." ) if document.lower().endswith(".pdf"): return True return False file_output = os.popen('file ' + re.escape(document)).read() try: filetype = file_output.split(":")[-1] except IndexError: current_app.logger.error( "Your version of the 'file' utility seems to be unsupported." ) raise IncompatiblePDF2Text('Incompatible pdftotext') pdf = filetype.find("PDF") > -1 return pdf
Check if a document is a PDF file and return True if is is.
def exit_if_path_exists(self): if os.path.exists(self.output_path): ui.error(c.MESSAGES["path_exists"], self.output_path) sys.exit(1)
Exit early if the path cannot be found.
def purge_items(app, env, docname): keys = list(env.traceability_all_items.keys()) for key in keys: if env.traceability_all_items[key]['docname'] == docname: del env.traceability_all_items[key]
Clean, if existing, ``item`` entries in ``traceability_all_items`` environment variable, for all the source docs being purged. This function should be triggered upon ``env-purge-doc`` event.
def on_nicknameinuse(self, connection, event): digits = "" while self.nickname[-1].isdigit(): digits = self.nickname[-1] + digits self.nickname = self.nickname[:-1] digits = 1 if not digits else int(digits) + 1 self.nickname += str(digits) self.connect(self.host, self.port, self.nickname)
Increment a digit on the nickname if it's in use, and re-connect.
def logit(self, msg, pid, user, cname, priority=None): if self.stream: print(msg, file=self.stream) elif priority == logging.WARNING: self.logger.warning("{0}[pid:{1}] user:{2}: WARNING - {3}".format(cname, pid, user, msg)) elif priority == logging.ERROR: self.logger.error("{0}[pid:{1}] user:{2}: ERROR - {3}".format(cname, pid, user, msg)) else: self.logger.info("{0}[pid:{1}] user:{2}: INFO - {3}".format(cname, pid, user, msg))
Function for formatting content and logging to syslog
def answered_by(self, rec): return self.clazz == rec.clazz and \ (self.type == rec.type or self.type == _TYPE_ANY) and \ self.name == rec.name
Returns true if the question is answered by the record
def transform_incoming(self, son, collection): def transform_value(value): if isinstance(value, collections.MutableMapping): if "_id" in value and "_ns" in value: return DBRef(value["_ns"], transform_value(value["_id"])) else: return transform_dict(SON(value)) elif isinstance(value, list): return [transform_value(v) for v in value] return value def transform_dict(object): for (key, value) in object.items(): object[key] = transform_value(value) return object return transform_dict(SON(son))
Replace embedded documents with DBRefs.
def transformToNative(obj): if obj.isNative: return obj obj.isNative = True if obj.value == '': return obj obj.value=obj.value obj.value=parseDtstart(obj, allowSignatureMismatch=True) if getattr(obj, 'value_param', 'DATE-TIME').upper() == 'DATE-TIME': if hasattr(obj, 'tzid_param'): obj.params['X-VOBJ-ORIGINAL-TZID'] = [obj.tzid_param] del obj.tzid_param return obj
Turn obj.value into a date or datetime.
def sorensen(seq1, seq2): set1, set2 = set(seq1), set(seq2) return 1 - (2 * len(set1 & set2) / float(len(set1) + len(set2)))
Compute the Sorensen distance between the two sequences `seq1` and `seq2`. They should contain hashable items. The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
def set_credentials(self, client_id=None, client_secret=None): self._client_id = client_id self._client_secret = client_secret self._session = None
set given credentials and reset the session
def merge_extends(self, target, extends, inherit_key="inherit", inherit=False): if isinstance(target, dict): if inherit and inherit_key in target and not to_boolean(target[inherit_key]): return if not isinstance(extends, dict): raise ValueError("Unable to merge: Dictionnary expected") for key in extends: if key not in target: target[str(key)] = extends[key] else: self.merge_extends(target[key], extends[key], inherit_key, True) elif isinstance(target, list): if not isinstance(extends, list): raise ValueError("Unable to merge: List expected") target += extends
Merge extended dicts
def set_permitted_ip(address=None, deploy=False): if not address: raise CommandExecutionError("Address option must not be empty.") ret = {} query = {'type': 'config', 'action': 'set', 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/permitted-ip', 'element': '<entry name=\'{0}\'></entry>'.format(address)} ret.update(__proxy__['panos.call'](query)) if deploy is True: ret.update(commit()) return ret
Add an IPv4 address or network to the permitted IP list. CLI Example: Args: address (str): The IPv4 address or network to allow access to add to the Palo Alto device. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_permitted_ip 10.0.0.1 salt '*' panos.set_permitted_ip 10.0.0.0/24 salt '*' panos.set_permitted_ip 10.0.0.1 deploy=True
def set_user_presence(self, userid, presence): response, status_code = self.__pod__.Presence.post_v2_user_uid_presence( sessionToken=self.__session__, uid=userid, presence=presence ).result() self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
set presence of user
def build_url_request(self): params = {} headers = {} self._authenticator(params, headers) self._grant(params) return Request(self._endpoint, urlencode(params), headers)
Consults the authenticator and grant for HTTP request parameters and headers to send with the access token request, builds the request using the stored endpoint and returns it.
def choices(self): model_parent_part = self.part.model() property_model = model_parent_part.property(self.name) referenced_model = self._client.model(pk=property_model._value['id']) possible_choices = self._client.parts(model=referenced_model) return possible_choices
Retrieve the parts that you can reference for this `ReferenceProperty`. This method makes 2 API calls: 1) to retrieve the referenced model, and 2) to retrieve the instances of that model. :return: the :class:`Part`'s that can be referenced as a :class:`~pykechain.model.PartSet`. :raises APIError: When unable to load and provide the choices Example ------- >>> property = project.part('Bike').property('RefTest') >>> reference_part_choices = property.choices()
def encrypt(message, modN, e, blockSize): numList = string2numList(message) numBlocks = numList2blocks(numList, blockSize) message = numBlocks[0] return modExp(message, e, modN)
given a string message, public keys and blockSize, encrypt using RSA algorithms.
def assert_variable_type(variable, expected_type, raise_exception=True): if not isinstance(expected_type, list): expected_type = [expected_type] for t in expected_type: if not isinstance(t, type): raise ValueError('expected_type argument "%s" is not a type' %str(t)) if not isinstance(raise_exception, bool): raise ValueError('raise_exception argument "%s" is not a bool' %str(raise_exception)) if not len([(t) for t in expected_type if isinstance(variable, t)]): error_message = '"%s" is not an instance of type %s. It is of type %s' %(str(variable),' or '.join([str(t) for t in expected_type]), str(type(variable))) if raise_exception: raise ValueError(error_message) else: return False, error_message return True, None
Return True if a variable is of a certain type or types. Otherwise raise a ValueError exception. Positional arguments: variable -- the variable to be checked expected_type -- the expected type or types of the variable raise_exception -- whether to raise an exception or just return False on failure, with error message
def cmd(name, tgt, func, arg=(), tgt_type='glob', ret='', kwarg=None, **kwargs): ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} local = salt.client.get_local_client(mopts=__opts__) jid = local.cmd_async(tgt, func, arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) ret['changes']['jid'] = jid return ret
Execute a remote execution command USAGE: .. code-block:: yaml run_remote_ex: local.cmd: - tgt: '*' - func: test.ping run_remote_ex: local.cmd: - tgt: '*' - func: test.sleep - arg: - 30 run_remote_ex: local.cmd: - tgt: '*' - func: test.sleep - kwarg: length: 30
def move_by_offset(self, xoffset, yoffset): if self._driver.w3c: self.w3c_actions.pointer_action.move_by(xoffset, yoffset) self.w3c_actions.key_action.pause() else: self._actions.append(lambda: self._driver.execute( Command.MOVE_TO, { 'xoffset': int(xoffset), 'yoffset': int(yoffset)})) return self
Moving the mouse to an offset from current mouse position. :Args: - xoffset: X offset to move to, as a positive or negative integer. - yoffset: Y offset to move to, as a positive or negative integer.
def set_mode_label_to_ifcw(self): self.setWindowTitle(self.ifcw_name) self.lblSubtitle.setText(tr( 'Use this wizard to run a guided impact assessment'))
Set the mode label to the IFCW.
def process_shells(self, shells): result = {'success': True, 'output': []} if self.parallel and len(shells) > 1: result = self.process_shells_parallel(shells) elif len(shells) > 0: result = self.process_shells_ordered(shells) return result
Processing a list of shells.
async def set_password(self, password): await self.controller.change_user_password(self.username, password) self._user_info.password = password
Update this user's password.
def cltext(fname): fnameP = stypes.stringToCharP(fname) fname_len = ctypes.c_int(len(fname)) libspice.cltext_(fnameP, fname_len)
Internal undocumented command for closing a text file opened by RDTEXT. No URL available; relevant lines from SPICE source: FORTRAN SPICE, rdtext.f:: C$Procedure CLTEXT ( Close a text file opened by RDTEXT) ENTRY CLTEXT ( FILE ) CHARACTER*(*) FILE C VARIABLE I/O DESCRIPTION C -------- --- -------------------------------------------------- C FILE I Text file to be closed. CSPICE, rdtext.c:: /* $Procedure CLTEXT ( Close a text file opened by RDTEXT) */ /* Subroutine */ int cltext_(char *file, ftnlen file_len) :param fname: Text file to be closed. :type fname: str
def ndims(self): if self._dims is None: return None else: if self._ndims is None: self._ndims = len(self._dims) return self._ndims
Returns the rank of this shape, or None if it is unspecified.