docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Append text to the current buffer. Args: text (str or Sequence[str]): One or many lines of text to append. afterline (Optional[int]): Line number to append after. If 0, text is prepended before the first line; if ``None``, at end of the buffer.
def append(self, text, afterline=None): if afterline: self._vim.current.buffer.append(text, afterline) else: self._vim.current.buffer.append(text)
381,577
Get a line from the current buffer. Args: lnum (Optional[str]): Number of the line to get, current if ``None``. Todo: - Give this more behavior of Vim ``getline()``? - ``buffer[index]`` is zero-based, this is probably too confusing
def getline(self, lnum=None): return self._vim.current.buffer[lnum] if lnum else self._vim.current.line
381,578
Get all lines of a buffer as a list. Args: bufnr (Optional[int]): A Vim buffer number, current if ``None``. Returns: List[str]
def getlines(self, bufnr=None): buf = self._vim.buffers[bufnr] if bufnr else self._vim.current.buffer return buf[:]
381,579
Presents a selection menu and returns the user's choice. Args: prompt (str): Text to ask the user what to select. choices (Sequence[str]): Values for the user to select from. Returns: The value selected by the user, or ``None``. Todo: Nice opportunity to provide a hook for Unite.vim, etc. here.
def menu(self, prompt, choices): menu = [prompt] + [ "{0}. {1}".format(*choice) for choice in enumerate(choices, start=1) ] command = 'inputlist({})'.format(repr(menu)) choice = int(self._vim.eval(command)) # Vim returns weird stuff if user clicks outside choices with mouse if not 0 < choice < len(menu): return return choices[choice - 1]
381,581
Set filetype for a buffer. Note: it's a quirk of Vim's Python API that using the buffer.options dictionary to set filetype does not trigger ``FileType`` autocommands, hence this implementation executes as a command instead. Args: filetype (str): The filetype to set. bufnr (Optional[int]): A Vim buffer number, current if ``None``.
def set_filetype(self, filetype, bufnr=None): if bufnr: self._vim.command(str(bufnr) + 'bufdo set filetype=' + filetype) else: self._vim.command('set filetype=' + filetype)
381,583
Open file in a new split window. Args: fpath (str): Path of the file to open. If ``None``, a new empty split is created. vertical (bool): Whether to open a vertical split. size (Optional[int]): The height (or width) to set for the new window. bufopts (Optional[dict]): Buffer-local options to set in the split window. See :func:`.set_buffer_options`.
def split_window(self, fpath, vertical=False, size=None, bufopts=None): command = 'split {}'.format(fpath) if fpath else 'new' if vertical: command = 'v' + command if size: command = str(size) + command self._vim.command(command) if bufopts: self.set_buffer_options(bufopts)
381,584
Writes the file of the current buffer. Args: noautocmd (bool): If true, write will skip autocommands. Todo: We should consider whether ``SourceFileInfo`` can replace most usage of noautocmd. See #298
def write(self, noautocmd=False): cmd = 'noautocmd write' if noautocmd else 'write' self._vim.command(cmd)
381,585
Find path of an .ensime config, searching recursively upward from path. Args: path (str): Path of a file or directory from where to start searching. Returns: str: Canonical path of nearest ``.ensime``, or ``None`` if not found.
def find_from(path): realpath = os.path.realpath(path) config_path = os.path.join(realpath, '.ensime') if os.path.isfile(config_path): return config_path elif realpath == os.path.abspath('/'): return None else: dirname = os.path.dirname(realpath) return ProjectConfig.find_from(dirname)
381,695
Parse an ``.ensime`` config file from S-expressions. Args: path (str): Path of an ``.ensime`` file to parse. Returns: dict: Configuration values with string keys.
def parse(path): def paired(iterable): cursor = iter(iterable) return zip(cursor, cursor) def unwrap_if_sexp_symbol(datum): return datum.value() if isinstance(datum, sexpdata.Symbol) else datum def sexp2dict(sexps): newdict = {} # Turn flat list into associative pairs for key, value in paired(sexps): key = str(unwrap_if_sexp_symbol(key)).lstrip(':') # Recursively transform nested lists if isinstance(value, list) and value: if isinstance(value[0], list): newdict[key] = [sexp2dict(val) for val in value] elif isinstance(value[0], sexpdata.Symbol): newdict[key] = sexp2dict(value) else: newdict[key] = value else: newdict[key] = value return newdict conf = sexpdata.loads(Util.read_file(path)) return sexp2dict(conf)
381,696
Get the balance for this account, including child accounts Args: as_of (Date): Only include transactions on or before this date raw (bool): If true the returned balance should not have its sign adjusted for display purposes. kwargs (dict): Will be used to filter the transaction legs Returns: Balance See Also: :meth:`simple_balance()`
def balance(self, as_of=None, raw=False, leg_query=None, **kwargs): balances = [ account.simple_balance(as_of=as_of, raw=raw, leg_query=leg_query, **kwargs) for account in self.get_descendants(include_self=True) ] return sum(balances, Balance())
383,229
Create a transaction for this statement amount and account, into to_account This will also set this StatementLine's ``transaction`` attribute to the newly created transaction. Args: to_account (Account): The account the transaction is into / out of. Returns: Transaction: The newly created (and committed) transaction.
def create_transaction(self, to_account): from_account = self.statement_import.bank_account transaction = Transaction.objects.create() Leg.objects.create( transaction=transaction, account=from_account, amount=+(self.amount * -1) ) Leg.objects.create(transaction=transaction, account=to_account, amount=-(self.amount * -1)) transaction.date = self.date transaction.save() self.transaction = transaction self.save() return transaction
383,237
Normalise this balance into a single currency Args: to_currency (str): Destination currency Returns: (Balance): A new balance object containing a single Money value in the specified currency
def normalise(self, to_currency): out = Money(currency=to_currency) for money in self._money_obs: out += converter.convert(money, to_currency) return Balance([out])
383,256
Returns a specified invitation backend Args: backend: dotted path to the invitation backend class namespace: URL namespace to use Returns: an instance of an InvitationBackend
def invitation_backend(backend=None, namespace=None): # type: (Optional[Text], Optional[Text]) -> BaseBackend backend = backend or ORGS_INVITATION_BACKEND class_module, class_name = backend.rsplit(".", 1) mod = import_module(class_module) return getattr(mod, class_name)(namespace=namespace)
384,181
Returns a specified registration backend Args: backend: dotted path to the registration backend class namespace: URL namespace to use Returns: an instance of an RegistrationBackend
def registration_backend(backend=None, namespace=None): # type: (Optional[Text], Optional[Text]) -> BaseBackend backend = backend or ORGS_REGISTRATION_BACKEND class_module, class_name = backend.rsplit(".", 1) mod = import_module(class_module) return getattr(mod, class_name)(namespace=namespace)
384,182
Primary interface method by which one user invites another to join Args: email: request: **kwargs: Returns: an invitation instance Raises: MultipleObjectsReturned if multiple matching users are found
def invite_by_email(self, email, user, organization, **kwargs): # type: (Text, AbstractUser, AbstractBaseOrganization) -> OrganizationInvitationBase try: invitee = self.user_model.objects.get(email__iexact=email) except self.user_model.DoesNotExist: invitee = None # TODO allow sending just the OrganizationUser instance user_invitation = self.invitation_model.objects.create( invitee=invitee, invitee_identifier=email.lower(), invited_by=user, organization=organization, ) self.send_invitation(user_invitation) return user_invitation
384,198
Sends an invitation message for a specific invitation. This could be overridden to do other things, such as sending a confirmation email to the sender. Args: invitation: Returns:
def send_invitation(self, invitation, **kwargs): # type: (OrganizationInvitationBase) -> bool return self.email_message( invitation.invitee_identifier, self.invitation_subject, self.invitation_body, invitation.invited_by, **kwargs ).send()
384,199
Updates the `invitee` value and saves the instance Provided as a way of extending the behavior. Args: user: the newly created user Returns: the linking organization user
def activate(self, user): org_user = self.organization.add_user(user, **self.activation_kwargs()) self.invitee = user self.save() return org_user
384,210
Read a GDAL file. Opens any file GDAL can read, selects the first raster band, and loads it and its metadata into a RichDEM array of the appropriate data type. If you need to do something more complicated, look at the source of this function. Args: filename (str): Name of the raster file to open no_data (float): Optionally, set the no_data value to this. Returns: A RichDEM array
def LoadGDAL(filename, no_data=None): if not GDAL_AVAILABLE: raise Exception("richdem.LoadGDAL() requires GDAL.") allowed_types = {gdal.GDT_Byte,gdal.GDT_Int16,gdal.GDT_Int32,gdal.GDT_UInt16,gdal.GDT_UInt32,gdal.GDT_Float32,gdal.GDT_Float64} #Read in data src_ds = gdal.Open(filename) srcband = src_ds.GetRasterBand(1) if no_data is None: no_data = srcband.GetNoDataValue() if no_data is None: raise Exception("The source data did not have a NoData value. Please use the no_data argument to specify one. If should not be equal to any of the actual data values. If you are using all possible data values, then the situation is pretty hopeless - sorry.") srcdata = rdarray(srcband.ReadAsArray(), no_data=no_data) # raster_srs = osr.SpatialReference() # raster_srs.ImportFromWkt(raster.GetProjectionRef()) if not srcband.DataType in allowed_types: raise Exception("This datatype is not supported. Please file a bug report on RichDEM.") srcdata.projection = src_ds.GetProjectionRef() srcdata.geotransform = src_ds.GetGeoTransform() srcdata.metadata = dict() for k,v in src_ds.GetMetadata().items(): srcdata.metadata[k] = v _AddAnalysis(srcdata, "LoadGDAL(filename={0}, no_data={1})".format(filename, no_data)) return srcdata
384,581
Save a GDAL file. Saves a RichDEM array to a data file in GeoTIFF format. If you need to do something more complicated, look at the source of this function. Args: filename (str): Name of the raster file to be created rda (rdarray): Data to save. Returns: No Return
def SaveGDAL(filename, rda): if type(rda) is not rdarray: raise Exception("A richdem.rdarray or numpy.ndarray is required!") if not GDAL_AVAILABLE: raise Exception("richdem.SaveGDAL() requires GDAL.") driver = gdal.GetDriverByName('GTiff') data_type = gdal.GDT_Float32 #TODO data_set = driver.Create(filename, xsize=rda.shape[1], ysize=rda.shape[0], bands=1, eType=data_type) data_set.SetGeoTransform(rda.geotransform) data_set.SetProjection(rda.projection) band = data_set.GetRasterBand(1) band.SetNoDataValue(rda.no_data) band.WriteArray(np.array(rda)) for k,v in rda.metadata.items(): data_set.SetMetadataItem(str(k),str(v))
384,582
Breaches all depressions in a DEM. Args: dem (rdarray): An elevation model in_place (bool): If True, the DEM is modified in place and there is no return; otherwise, a new, altered DEM is returned. topology (string): A topology indicator Returns: DEM without depressions.
def BreachDepressions( dem, in_place = False, topology = 'D8' ): if type(dem) is not rdarray: raise Exception("A richdem.rdarray or numpy.ndarray is required!") if topology not in ['D8','D4']: raise Exception("Unknown topology!") if not in_place: dem = dem.copy() _AddAnalysis(dem, "BreachDepressions(dem)") demw = dem.wrap() if topology=='D8': _richdem.rdBreachDepressionsD8(demw) elif topology=='D4': _richdem.rdBreachDepressionsD4(demw) dem.copyFromWrapped(demw) if not in_place: return dem
384,584
Attempts to resolve flats by imposing a local gradient Args: dem (rdarray): An elevation model in_place (bool): If True, the DEM is modified in place and there is no return; otherwise, a new, altered DEM is returned. Returns: DEM modified such that all flats drain.
def ResolveFlats( dem, in_place = False ): if type(dem) is not rdarray: raise Exception("A richdem.rdarray or numpy.ndarray is required!") if not in_place: dem = dem.copy() _AddAnalysis(dem, "ResolveFlats(dem, in_place={in_place})".format(in_place=in_place)) demw = dem.wrap() _richdem.rdResolveFlatsEpsilon(demw) dem.copyFromWrapped(demw) if not in_place: return dem
384,585
Evaluate a polynomial along specified axes. Args: poly (Poly): Input polynomial. args (numpy.ndarray): Argument to be evaluated. Masked values keeps the variable intact. Returns: (Poly, numpy.ndarray): If masked values are used the Poly is returned. Else an numpy array matching the polynomial's shape is returned.
def call(poly, args): args = list(args) # expand args to match dim if len(args) < poly.dim: args = args + [np.nan]*(poly.dim-len(args)) elif len(args) > poly.dim: raise ValueError("too many arguments") # Find and perform substitutions, if any x0, x1 = [], [] for idx, arg in enumerate(args): if isinstance(arg, Poly): poly_ = Poly({ tuple(np.eye(poly.dim)[idx]): np.array(1) }) x0.append(poly_) x1.append(arg) args[idx] = np.nan if x0: poly = call(poly, args) return substitute(poly, x0, x1) # Create masks masks = np.zeros(len(args), dtype=bool) for idx, arg in enumerate(args): if np.ma.is_masked(arg) or np.any(np.isnan(arg)): masks[idx] = True args[idx] = 0 shape = np.array( args[ np.argmax( [np.prod(np.array(arg).shape) for arg in args] ) ] ).shape args = np.array([np.ones(shape, dtype=int)*arg for arg in args]) A = {} for key in poly.keys: key_ = np.array(key)*(1-masks) val = np.outer(poly.A[key], np.prod((args.T**key_).T, \ axis=0)) val = np.reshape(val, poly.shape + tuple(shape)) val = np.where(val != val, 0, val) mkey = tuple(np.array(key)*(masks)) if not mkey in A: A[mkey] = val else: A[mkey] = A[mkey] + val out = Poly(A, poly.dim, None, None) if out.keys and not np.sum(out.keys): out = out.A[out.keys[0]] elif not out.keys: out = np.zeros(out.shape, dtype=out.dtype) return out
385,168
Check if a polynomial (array) is on component form. Args: P (Poly): Input data. Returns: (bool): True if all polynomials in ``P`` are on component form. Examples: >>> x,y = cp.variable(2) >>> print(cp.is_decomposed(cp.Poly([1,x,x*y]))) True >>> print(cp.is_decomposed(cp.Poly([x+1,x*y]))) False
def is_decomposed(P): if P.shape: return min([is_decomposed(poly) for poly in P]) return len(P.keys) <= 1
385,170
Distribution multiplication. Args: left (Dist, numpy.ndarray) : left hand side. right (Dist, numpy.ndarray) : right hand side.
def mul(left, right): from .mv_mul import MvMul length = max(left, right) if length == 1: return Mul(left, right) return MvMul(left, right)
385,177
Hermite Genz-Keister 22 rule. Args: order (int): The quadrature order. Must be in the interval (0, 8). Returns: (:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]): Abscissas and weights Examples: >>> abscissas, weights = quad_genz_keister_22(1) >>> print(numpy.around(abscissas, 4)) [-1.7321 0. 1.7321] >>> print(numpy.around(weights, 4)) [0.1667 0.6667 0.1667]
def quad_genz_keister_22 ( order ): order = sorted(GENZ_KEISTER_22.keys())[order] abscissas, weights = GENZ_KEISTER_22[order] abscissas = numpy.array(abscissas) weights = numpy.array(weights) weights /= numpy.sum(weights) abscissas *= numpy.sqrt(2) return abscissas, weights
385,197
Chebyshev sampling function. Args: order (int): The number of samples to create along each axis. dim (int): The number of dimensions to create samples for. Returns: samples following Chebyshev sampling scheme mapped to the ``[0, 1]^dim`` hyper-cube and ``shape == (dim, order)``.
def create_chebyshev_samples(order, dim=1): x_data = .5*numpy.cos(numpy.arange(order, 0, -1)*numpy.pi/(order+1)) + .5 x_data = chaospy.quad.combine([x_data]*dim) return x_data.T
385,241
Generate the upper and lower bounds of a distribution. Args: x_data (numpy.ndarray) : The bounds might vary over the sample space. By providing x_data you can specify where in the space the bound should be taken. If omitted, a (pseudo-)random sample is used. Returns: (numpy.ndarray): The lower (out[0]) and upper (out[1]) bound where out.shape=(2,)+x_data.shape
def range(self, x_data=None): if x_data is None: try: x_data = evaluation.evaluate_inverse( self, numpy.array([[0.5]]*len(self))) except StochasticallyDependentError: x_data = approximation.find_interior_point(self) shape = (len(self),) if hasattr(self, "_range"): return self._range(x_data, {}) else: x_data = numpy.asfarray(x_data) shape = x_data.shape x_data = x_data.reshape(len(self), -1) q_data = evaluation.evaluate_bound(self, x_data) q_data = q_data.reshape((2,)+shape) return q_data
385,277
Forward Rosenblatt transformation. Args: x_data (numpy.ndarray): Location for the distribution function. ``x_data.shape`` must be compatible with distribution shape. Returns: (numpy.ndarray): Evaluated distribution function values, where ``out.shape==x_data.shape``.
def fwd(self, x_data): x_data = numpy.asfarray(x_data) shape = x_data.shape x_data = x_data.reshape(len(self), -1) lower, upper = evaluation.evaluate_bound(self, x_data) q_data = numpy.zeros(x_data.shape) indices = x_data > upper q_data[indices] = 1 indices = ~indices & (x_data >= lower) q_data[indices] = numpy.clip(evaluation.evaluate_forward( self, x_data), a_min=0, a_max=1)[indices] q_data = q_data.reshape(shape) return q_data
385,278
Three terms relation's coefficient generator Args: k (numpy.ndarray, int): The order of the coefficients. acc (int): Accuracy of discretized Stieltjes if analytical methods are unavailable. Returns: (Recurrence coefficients): Where out[0] is the first (A) and out[1] is the second coefficient With ``out.shape==(2,)+k.shape``.
def ttr(self, kloc, acc=10**3, verbose=1): kloc = numpy.asarray(kloc, dtype=int) shape = kloc.shape kloc = kloc.reshape(len(self), -1) cache = {} out = [evaluation.evaluate_recurrence_coefficients(self, k) for k in kloc.T] out = numpy.array(out).T return out.reshape((2,)+shape)
385,284
Flatten a shapeable quantity. Args: vari (chaospy.poly.base.Poly, numpy.ndarray): Shapeable input quantity. Returns: (chaospy.poly.base.Poly, numpy.ndarray): Same type as ``vari`` with `len(Q.shape)==1`. Examples: >>> P = chaospy.reshape(chaospy.prange(4), (2,2)) >>> print(P) [[1, q0], [q0^2, q0^3]] >>> print(chaospy.flatten(P)) [1, q0, q0^2, q0^3]
def flatten(vari): if isinstance(vari, Poly): shape = int(numpy.prod(vari.shape)) return reshape(vari, (shape,)) return numpy.array(vari).flatten()
385,302
Roll the specified axis backwards, until it lies in a given position. Args: vari (chaospy.poly.base.Poly, numpy.ndarray): Input array or polynomial. axis (int): The axis to roll backwards. The positions of the other axes do not change relative to one another. start (int): The axis is rolled until it lies before thes position.
def rollaxis(vari, axis, start=0): if isinstance(vari, Poly): core_old = vari.A.copy() core_new = {} for key in vari.keys: core_new[key] = rollaxis(core_old[key], axis, start) return Poly(core_new, vari.dim, None, vari.dtype) return numpy.rollaxis(vari, axis, start)
385,304
Transpose a shapeable quantety. Args: vari (chaospy.poly.base.Poly, numpy.ndarray): Quantety of interest. Returns: (chaospy.poly.base.Poly, numpy.ndarray): Same type as ``vari``. Examples: >>> P = chaospy.reshape(chaospy.prange(4), (2,2)) >>> print(P) [[1, q0], [q0^2, q0^3]] >>> print(chaospy.transpose(P)) [[1, q0^2], [q0, q0^3]]
def transpose(vari): if isinstance(vari, Poly): core = vari.A.copy() for key in vari.keys: core[key] = transpose(core[key]) return Poly(core, vari.dim, vari.shape[::-1], vari.dtype) return numpy.transpose(vari)
385,307
Create samples from a regular grid. Args: order (int): The order of the grid. Defines the number of samples. dim (int): The number of dimensions in the grid Returns (numpy.ndarray): Regular grid with ``shape == (dim, order)``.
def create_grid_samples(order, dim=1): x_data = numpy.arange(1, order+1)/(order+1.) x_data = chaospy.quad.combine([x_data]*dim) return x_data.T
385,326
Smolyak sparse grid constructor. Args: func (:py:data:typing.Callable): Function that takes a single argument ``order`` of type ``numpy.ndarray`` and with ``order.shape = (dim,)`` order (int, numpy.ndarray): The order of the grid. If ``numpy.ndarray``, it overrides both ``dim`` and ``skew``. dim (int): Number of dimension. skew (list): Order skewness.
def sparse_grid(func, order, dim=None, skew=None): if not isinstance(order, int): orders = numpy.array(order).flatten() dim = orders.size m_order = int(numpy.min(orders)) skew = [order-m_order for order in orders] return sparse_grid(func, m_order, dim, skew) abscissas, weights = [], [] bindex = chaospy.bertran.bindex(order-dim+1, order, dim) if skew is None: skew = numpy.zeros(dim, dtype=int) else: skew = numpy.array(skew, dtype=int) assert len(skew) == dim for idx in range( chaospy.bertran.terms(order, dim) - chaospy.bertran.terms(order-dim, dim)): idb = bindex[idx] abscissa, weight = func(skew+idb) weight *= (-1)**(order-sum(idb))*comb(dim-1, order-sum(idb)) abscissas.append(abscissa) weights.append(weight) abscissas = numpy.concatenate(abscissas, 1) weights = numpy.concatenate(weights, 0) abscissas = numpy.around(abscissas, 15) order = numpy.lexsort(tuple(abscissas)) abscissas = abscissas.T[order].T weights = weights[order] # identify non-unique terms diff = numpy.diff(abscissas.T, axis=0) unique = numpy.ones(len(abscissas.T), bool) unique[1:] = (diff != 0).any(axis=1) # merge duplicate nodes length = len(weights) idx = 1 while idx < length: while idx < length and unique[idx]: idx += 1 idy = idx+1 while idy < length and not unique[idy]: idy += 1 if idy-idx > 1: weights[idx-1] = numpy.sum(weights[idx-1:idy]) idx = idy+1 abscissas = abscissas[:, unique] weights = weights[unique] return abscissas, weights
385,431
Inner product of a polynomial set. Args: args (chaospy.poly.base.Poly): The polynomials to perform inner product on. Returns: (chaospy.poly.base.Poly): Resulting polynomial. Examples: >>> x,y = cp.variable(2) >>> P = cp.Poly([x-1, y]) >>> Q = cp.Poly([x+1, x*y]) >>> print(cp.inner(P, Q)) q0^2+q0q1^2-1 >>> x = numpy.arange(4) >>> print(cp.inner(x, x)) 14
def inner(*args): haspoly = sum([isinstance(arg, Poly) for arg in args]) # Numpy if not haspoly: return numpy.sum(numpy.prod(args, 0), 0) # Poly out = args[0] for arg in args[1:]: out = out * arg return sum(out)
385,439
Dot product of polynomial vectors. Args: poly1 (Poly) : left part of product. poly2 (Poly) : right part of product. Returns: (Poly) : product of poly1 and poly2. Examples: >>> poly = cp.prange(3, 1) >>> print(poly) [1, q0, q0^2] >>> print(cp.dot(poly, numpy.arange(3))) 2q0^2+q0 >>> print(cp.dot(poly, poly)) q0^4+q0^2+1
def dot(poly1, poly2): if not isinstance(poly1, Poly) and not isinstance(poly2, Poly): return numpy.dot(poly1, poly2) poly1 = Poly(poly1) poly2 = Poly(poly2) poly = poly1*poly2 if numpy.prod(poly1.shape) <= 1 or numpy.prod(poly2.shape) <= 1: return poly return chaospy.poly.sum(poly, 0)
385,441
Hermite Genz-Keister 16 rule. Args: order (int): The quadrature order. Must be in the interval (0, 8). Returns: (:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]): Abscissas and weights Examples: >>> abscissas, weights = quad_genz_keister_16(1) >>> print(numpy.around(abscissas, 4)) [-1.7321 0. 1.7321] >>> print(numpy.around(weights, 4)) [0.1667 0.6667 0.1667]
def quad_genz_keister_16(order): order = sorted(GENZ_KEISTER_16.keys())[order] abscissas, weights = GENZ_KEISTER_16[order] abscissas = numpy.array(abscissas) weights = numpy.array(weights) weights /= numpy.sum(weights) abscissas *= numpy.sqrt(2) return abscissas, weights
385,442
Hermite Genz-Keister 18 rule. Args: order (int): The quadrature order. Must be in the interval (0, 8). Returns: (:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]): Abscissas and weights Examples: >>> abscissas, weights = quad_genz_keister_18(1) >>> print(numpy.around(abscissas, 4)) [-1.7321 0. 1.7321] >>> print(numpy.around(weights, 4)) [0.1667 0.6667 0.1667]
def quad_genz_keister_18(order): order = sorted(GENZ_KEISTER_18.keys())[order] abscissas, weights = GENZ_KEISTER_18[order] abscissas = numpy.array(abscissas) weights = numpy.array(weights) weights /= numpy.sum(weights) abscissas *= numpy.sqrt(2) return abscissas, weights
385,457
Convert polynomial array into a numpy.asarray of polynomials. Args: vari (Poly, numpy.ndarray): Input data. Returns: (numpy.ndarray): A numpy array with ``Q.shape==A.shape``. Examples: >>> poly = cp.prange(3) >>> print(poly) [1, q0, q0^2] >>> array = cp.toarray(poly) >>> print(isinstance(array, numpy.ndarray)) True >>> print(array[1]) q0
def toarray(vari): if isinstance(vari, Poly): shape = vari.shape out = numpy.asarray( [{} for _ in range(numpy.prod(shape))], dtype=object ) core = vari.A.copy() for key in core.keys(): core[key] = core[key].flatten() for i in range(numpy.prod(shape)): if not numpy.all(core[key][i] == 0): out[i][key] = core[key][i] for i in range(numpy.prod(shape)): out[i] = Poly(out[i], vari.dim, (), vari.dtype) out = out.reshape(shape) return out return numpy.asarray(vari)
385,466
Van der Corput samples. Args: idx (int, numpy.ndarray): The index of the sequence. If array is provided, all values in array is returned. number_base (int): The numerical base from where to create the samples from. Returns (float, numpy.ndarray): Van der Corput samples.
def create_van_der_corput_samples(idx, number_base=2): assert number_base > 1 idx = numpy.asarray(idx).flatten() + 1 out = numpy.zeros(len(idx), dtype=float) base = float(number_base) active = numpy.ones(len(idx), dtype=bool) while numpy.any(active): out[active] += (idx[active] % number_base)/base idx //= number_base base *= number_base active = idx > 0 return out
385,516
Gradient of a polynomial. Args: poly (Poly) : polynomial to take gradient of. Returns: (Poly) : The resulting gradient. Examples: >>> q0, q1, q2 = chaospy.variable(3) >>> poly = 2*q0 + q1*q2 >>> print(chaospy.gradient(poly)) [2, q2, q1]
def gradient(poly): return differential(poly, chaospy.poly.collection.basis(1, 1, poly.dim))
385,555
Create Korobov lattice samples. Args: order (int): The order of the Korobov latice. Defines the number of samples. dim (int): The number of dimensions in the output. base (int): The number based used to calculate the distribution of values. Returns (numpy.ndarray): Korobov lattice with ``shape == (dim, order)``
def create_korobov_samples(order, dim, base=17797): values = numpy.empty(dim) values[0] = 1 for idx in range(1, dim): values[idx] = base*values[idx-1] % (order+1) grid = numpy.mgrid[:dim, :order+1] out = values[grid[0]] * (grid[1]+1) / (order+1.) % 1. return out[:, :order]
385,563
Probabilistic collocation method. Args: order (int, numpy.ndarray) : Quadrature order along each axis. dist (Dist) : Distribution to generate samples from. subset (float) : Rate of which to removed samples.
def probabilistic_collocation(order, dist, subset=.1): abscissas, weights = chaospy.quad.collection.golub_welsch(order, dist) likelihood = dist.pdf(abscissas) alpha = numpy.random.random(len(weights)) alpha = likelihood > alpha*subset*numpy.max(likelihood) abscissas = abscissas.T[alpha].T weights = weights[alpha] return abscissas, weights
385,569
Hermite Genz-Keister 24 rule. Args: order (int): The quadrature order. Must be in the interval (0, 8). Returns: (:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]): Abscissas and weights Examples: >>> abscissas, weights = quad_genz_keister_24(1) >>> print(numpy.around(abscissas, 4)) [-1.7321 0. 1.7321] >>> print(numpy.around(weights, 4)) [0.1667 0.6667 0.1667]
def quad_genz_keister_24 ( order ): order = sorted(GENZ_KEISTER_24.keys())[order] abscissas, weights = GENZ_KEISTER_24[order] abscissas = numpy.array(abscissas) weights = numpy.array(weights) weights /= numpy.sum(weights) abscissas *= numpy.sqrt(2) return abscissas, weights
385,579
Latin Hypercube sampling. Args: order (int): The order of the latin hyper-cube. Defines the number of samples. dim (int): The number of dimensions in the latin hyper-cube. Returns (numpy.ndarray): Latin hyper-cube with ``shape == (dim, order)``.
def create_latin_hypercube_samples(order, dim=1): randoms = numpy.random.random(order*dim).reshape((dim, order)) for dim_ in range(dim): perm = numpy.random.permutation(order) # pylint: disable=no-member randoms[dim_] = (perm + randoms[dim_])/order return randoms
385,604
Generate prime values using sieve of Eratosthenes method. Args: threshold (int): The upper bound for the size of the prime values. Returns (List[int]): All primes from 2 and up to ``threshold``.
def create_primes(threshold): if threshold == 2: return [2] elif threshold < 2: return [] numbers = list(range(3, threshold+1, 2)) root_of_threshold = threshold ** 0.5 half = int((threshold+1)/2-1) idx = 0 counter = 3 while counter <= root_of_threshold: if numbers[idx]: idy = int((counter*counter-3)/2) numbers[idy] = 0 while idy < half: numbers[idy] = 0 idy += counter idx += 1 counter = 2*idx+3 return [2] + [number for number in numbers if number]
385,606
Constructor. Args: left (Dist, numpy.ndarray) : Left hand side. right (Dist, numpy.ndarray) : Right hand side.
def __init__(self, left, right): if isinstance(left, Dist) and len(left) > 1: if (not isinstance(left, J) or evaluation.get_dependencies(*list(left.inverse_map))): raise StochasticallyDependentError( "Joint distribution with dependencies not supported.") if isinstance(right, Dist) and len(right) > 1: if (not isinstance(right, J) or evaluation.get_dependencies(*list(right.inverse_map))): raise StochasticallyDependentError( "Joint distribution with dependencies not supported.") assert isinstance(left, Dist) or isinstance(right, Dist) Dist.__init__(self, left=left, right=right)
385,618
Polynomial differential operator. Args: P (Poly): Polynomial to be differentiated. Q (Poly): Polynomial to differentiate by. Must be decomposed. If polynomial array, the output is the Jacobian matrix.
def differential(P, Q): P, Q = Poly(P), Poly(Q) if not chaospy.poly.is_decomposed(Q): differential(chaospy.poly.decompose(Q)).sum(0) if Q.shape: return Poly([differential(P, q) for q in Q]) if Q.dim>P.dim: P = chaospy.poly.setdim(P, Q.dim) else: Q = chaospy.poly.setdim(Q, P.dim) qkey = Q.keys[0] A = {} for key in P.keys: newkey = numpy.array(key) - numpy.array(qkey) if numpy.any(newkey<0): continue A[tuple(newkey)] = P.A[key]*numpy.prod([fac(key[i], \ exact=True)/fac(newkey[i], exact=True) \ for i in range(P.dim)]) return Poly(B, P.dim, P.shape, P.dtype)
385,627
Constructor to create a range of polynomials where the exponent vary. Args: N (int): Number of polynomials in the array. dim (int): The dimension the polynomial should span. Returns: (Poly): A polynomial array of length N containing simple polynomials with increasing exponent. Examples: >>> print(prange(4)) [1, q0, q0^2, q0^3] >>> print(prange(4, dim=3)) [1, q2, q2^2, q2^3]
def prange(N=1, dim=1): A = {} r = numpy.arange(N, dtype=int) key = numpy.zeros(dim, dtype=int) for i in range(N): key[-1] = i A[tuple(key)] = 1*(r==i) return Poly(A, dim, (N,), int)
385,628
Roll the axes. Args: P (Poly) : Input polynomial. n (int) : The axis that after rolling becomes the 0th axis. Returns: (Poly) : Polynomial with new axis configuration. Examples: >>> x,y,z = variable(3) >>> P = x*x*x + y*y + z >>> print(P) q0^3+q1^2+q2 >>> print(rolldim(P)) q0^2+q2^3+q1
def rolldim(P, n=1): dim = P.dim shape = P.shape dtype = P.dtype A = dict(((key[n:]+key[:n],P.A[key]) for key in P.keys)) return Poly(A, dim, shape, dtype)
385,629
Swap the dim between two variables. Args: P (Poly): Input polynomial. dim1 (int): First dim dim2 (int): Second dim. Returns: (Poly): Polynomial with swapped dimensions. Examples: >>> x,y = variable(2) >>> P = x**4-y >>> print(P) q0^4-q1 >>> print(swapdim(P)) q1^4-q0
def swapdim(P, dim1=1, dim2=0): if not isinstance(P, Poly): return numpy.swapaxes(P, dim1, dim2) dim = P.dim shape = P.shape dtype = P.dtype if dim1==dim2: return P m = max(dim1, dim2) if P.dim <= m: P = chaospy.poly.dimension.setdim(P, m+1) dim = m+1 A = {} for key in P.keys: val = P.A[key] key = list(key) key[dim1], key[dim2] = key[dim2], key[dim1] A[tuple(key)] = val return Poly(A, dim, shape, dtype)
385,630
Simple constructor to create single variables to create polynomials. Args: dims (int): Number of dimensions in the array. Returns: (Poly): Polynomial array with unit components in each dimension. Examples: >>> print(variable()) q0 >>> print(variable(3)) [q0, q1, q2]
def variable(dims=1): if dims == 1: return Poly({(1,): 1}, dim=1, shape=()) return Poly({ tuple(indices): indices for indices in numpy.eye(dims, dtype=int) }, dim=dims, shape=(dims,))
385,633
Convert the contents of a file from Markdown to reStructuredText. Returns the converted text as a Unicode string. Arguments: md_path: a path to a UTF-8 encoded Markdown file to convert. rst_temp_path: a temporary path to which to write the converted contents.
def convert_md_to_rst(md_path, rst_temp_path): # Pandoc uses the UTF-8 character encoding for both input and output. command = "pandoc --write=rst --output=%s %s" % (rst_temp_path, md_path) print("converting with pandoc: %s to %s\n-->%s" % (md_path, rst_temp_path, command)) if os.path.exists(rst_temp_path): os.remove(rst_temp_path) os.system(command) if not os.path.exists(rst_temp_path): s = ("Error running: %s\n" " Did you install pandoc per the %s docstring?" % (command, __file__)) sys.exit(s) return read(rst_temp_path)
386,352
Construct a template locator. Arguments: extension: the template file extension, without the leading dot. Pass False for no extension (e.g. to use extensionless template files). Defaults to the package default.
def __init__(self, extension=None): if extension is None: extension = defaults.TEMPLATE_EXTENSION self.template_extension = extension
386,356
Generate and return the file name for the given template name. Arguments: template_extension: defaults to the instance's extension.
def make_file_name(self, template_name, template_extension=None): file_name = template_name if template_extension is None: template_extension = self.template_extension if template_extension is not False: file_name += os.path.extsep + template_extension return file_name
386,359
Return the path to a template with the given name. Arguments: template_name: the name of the template. search_dirs: the list of directories in which to search.
def find_name(self, template_name, search_dirs): file_name = self.make_file_name(template_name) return self._find_path_required(search_dirs, file_name)
386,362
Find and return the template with the given file name. Arguments: file_name: the file name of the template.
def load_file(self, file_name): locator = self._make_locator() path = locator.find_file(file_name, self.search_dirs) return self.read(path)
386,368
Find and return the template with the given template name. Arguments: name: the name of the template.
def load_name(self, name): locator = self._make_locator() path = locator.find_name(name, self.search_dirs) return self.read(path)
386,369
Find and return the template associated to the given object. Arguments: obj: an instance of a user-defined class. search_dirs: the list of directories in which to search.
def load_object(self, obj): locator = self._make_locator() path = locator.find_object(obj, self.search_dirs) return self.read(path)
386,370
Parse a unicode template string and return a ParsedTemplate instance. Arguments: template: a unicode template string. delimiters: a 2-tuple of delimiters. Defaults to the package default. Examples: >>> parsed = parse(u"Hey {{#who}}{{name}}!{{/who}}") >>> print str(parsed).replace('u', '') # This is a hack to get the test to pass both in Python 2 and 3. ['Hey ', _SectionNode(key='who', index_begin=12, index_end=21, parsed=[_EscapeNode(key='name'), '!'])]
def parse(template, delimiters=None): if type(template) is not unicode: raise Exception("Template is not unicode: %s" % type(template)) parser = _Parser(delimiters) return parser.parse(template)
386,372
Parse a template string starting at some index. This method uses the current tag delimiter. Arguments: template: a unicode string that is the template to parse. index: the index at which to start parsing. Returns: a ParsedTemplate instance.
def parse(self, template): self._compile_delimiters() start_index = 0 content_end_index, parsed_section, section_key = None, None, None parsed_template = ParsedTemplate() states = [] while True: match = self._template_re.search(template, start_index) if match is None: break match_index = match.start() end_index = match.end() matches = match.groupdict() # Normalize the matches dictionary. if matches['change'] is not None: matches.update(tag='=', tag_key=matches['delims']) elif matches['raw'] is not None: matches.update(tag='&', tag_key=matches['raw_name']) tag_type = matches['tag'] tag_key = matches['tag_key'] leading_whitespace = matches['whitespace'] # Standalone (non-interpolation) tags consume the entire line, # both leading whitespace and trailing newline. did_tag_begin_line = match_index == 0 or template[match_index - 1] in END_OF_LINE_CHARACTERS did_tag_end_line = end_index == len(template) or template[end_index] in END_OF_LINE_CHARACTERS is_tag_interpolating = tag_type in ['', '&'] if did_tag_begin_line and did_tag_end_line and not is_tag_interpolating: if end_index < len(template): end_index += template[end_index] == '\r' and 1 or 0 if end_index < len(template): end_index += template[end_index] == '\n' and 1 or 0 elif leading_whitespace: match_index += len(leading_whitespace) leading_whitespace = '' # Avoid adding spurious empty strings to the parse tree. if start_index != match_index: parsed_template.add(template[start_index:match_index]) start_index = end_index if tag_type in ('#', '^'): # Cache current state. state = (tag_type, end_index, section_key, parsed_template) states.append(state) # Initialize new state section_key, parsed_template = tag_key, ParsedTemplate() continue if tag_type == '/': if tag_key != section_key: raise ParsingError("Section end tag mismatch: %s != %s" % (tag_key, section_key)) # Restore previous state with newly found section data. parsed_section = parsed_template (tag_type, section_start_index, section_key, parsed_template) = states.pop() node = self._make_section_node(template, tag_type, tag_key, parsed_section, section_start_index, match_index) else: node = self._make_interpolation_node(tag_type, tag_key, leading_whitespace) parsed_template.add(node) # Avoid adding spurious empty strings to the parse tree. if start_index != len(template): parsed_template.add(template[start_index:]) return parsed_template
386,384
Find and return the template associated to a TemplateSpec instance. Returns the template as a unicode string. Arguments: spec: a TemplateSpec instance.
def load(self, spec): if spec.template is not None: return self.loader.unicode(spec.template, spec.template_encoding) path = self._find(spec) return self.loader.read(path, spec.template_encoding)
386,389
Render a unicode template string, and return as unicode. Arguments: template: a template string of type unicode (but not a proper subclass of unicode). context_stack: a ContextStack instance.
def render(self, template, context_stack, delimiters=None): parsed_template = parse(template, delimiters) return parsed_template.render(self, context_stack)
386,394
I2C Interface for ADS1x15-based ADCs reads. params: :param pin: individual or differential pin. :param bool is_differential: single-ended or differential read.
def read(self, pin, is_differential=False): pin = pin if is_differential else pin + 0x04 return self._read(pin)
386,750
Writes a jsonl file. Args: data: list of json encoded data
def write_jsonl_file(fname, data): if not isinstance(data, list): print('warning: malformed json data for file', fname) return with open(fname, 'w') as of: for row in data: # TODO: other malformed cases? if row.strip(): of.write('%s\n' % row.strip())
387,058
Lists projects in W&B scoped by entity. Args: entity (str, optional): The entity to scope this project to. Returns: [{"id","name","description"}]
def list_projects(self, entity=None): query = gql() return self._flatten_edges(self.gql(query, variable_values={ 'entity': entity or self.settings('entity')})['models'])
387,160
Retrive project Args: project (str): The project to get details for entity (str, optional): The entity to scope this project to. Returns: [{"id","name","repo","dockerImage","description"}]
def project(self, project, entity=None): query = gql() return self.gql(query, variable_values={ 'entity': entity, 'project': project})['model']
387,161
Lists runs in W&B scoped by project. Args: project (str): The project to scope the runs to entity (str, optional): The entity to scope this project to. Defaults to public models Returns: [{"id",name","description"}]
def list_runs(self, project, entity=None): query = gql() return self._flatten_edges(self.gql(query, variable_values={ 'entity': entity or self.settings('entity'), 'model': project or self.settings('project')})['model']['buckets'])
387,162
Launch a run in the cloud. Args: command (str): The command to run program (str): The file to run project (str): The project to scope the runs to entity (str, optional): The entity to scope this project to. Defaults to public models run_id (str, optional): The run_id to scope to Returns: [{"podName","status"}]
def launch_run(self, command, project=None, entity=None, run_id=None): query = gql() patch = BytesIO() if self.git.dirty: self.git.repo.git.execute(['git', 'diff'], output_stream=patch) patch.seek(0) cwd = "." if self.git.enabled: cwd = cwd + os.getcwd().replace(self.git.repo.working_dir, "") return self.gql(query, variable_values={ 'entity': entity or self.settings('entity'), 'model': project or self.settings('project'), 'command': command, 'runId': run_id, 'patch': patch.read().decode("utf8"), 'cwd': cwd })
387,163
Get the relevant configs for a run Args: project (str): The project to download, (can include bucket) run (str, optional): The run to download entity (str, optional): The entity to scope this project to.
def run_config(self, project, run=None, entity=None): query = gql() response = self.gql(query, variable_values={ 'name': project, 'run': run, 'entity': entity }) if response['model'] == None: raise ValueError("Run {}/{}/{} not found".format(entity, project, run) ) run = response['model']['bucket'] commit = run['commit'] patch = run['patch'] config = json.loads(run['config'] or '{}') if len(run['files']['edges']) > 0: url = run['files']['edges'][0]['node']['url'] res = requests.get(url) res.raise_for_status() metadata = res.json() else: metadata = {} return (commit, config, patch, metadata)
387,164
Check if a run exists and get resume information. Args: entity (str, optional): The entity to scope this project to. project_name (str): The project to download, (can include bucket) run (str, optional): The run to download
def run_resume_status(self, entity, project_name, name): query = gql() response = self.gql(query, variable_values={ 'entity': entity, 'project': project_name, 'name': name, }) if 'model' not in response or 'bucket' not in response['model']: return None project = response['model'] self.set_setting('project', project_name) if 'entity' in project: self.set_setting('entity', project['entity']['name']) return project['bucket']
387,165
Create a new project Args: project (str): The project to create description (str, optional): A description of this project entity (str, optional): The entity to scope this project to.
def upsert_project(self, project, id=None, description=None, entity=None): mutation = gql() response = self.gql(mutation, variable_values={ 'name': self.format_project(project), 'entity': entity or self.settings('entity'), 'description': description, 'repo': self.git.remote_url, 'id': id}) return response['upsertModel']['model']
387,166
Generate download urls Args: project (str): The project to download file_name (str): The name of the file to download run (str, optional): The run to upload to entity (str, optional): The entity to scope this project to. Defaults to wandb models Returns: A dict of extensions and urls { "url": "https://weights.url", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' }
def download_url(self, project, file_name, run=None, entity=None): query = gql() query_result = self.gql(query, variable_values={ 'name': project, 'run': run or self.settings('run'), 'fileName': file_name, 'entity': entity or self.settings('entity')}) files = self._flatten_edges(query_result['model']['bucket']['files']) return files[0] if len(files) > 0 and files[0].get('updatedAt') else None
387,170
Initiate a streaming download Args: url (str): The url to download Returns: A tuple of the content length and the streaming response
def download_file(self, url): response = requests.get(url, stream=True) response.raise_for_status() return (int(response.headers.get('content-length', 0)), response)
387,171
Download a file from a run and write it to wandb/ Args: metadata (obj): The metadata object for the file to download. Comes from Api.download_urls(). Returns: A tuple of the file's local path and the streaming response. The streaming response is None if the file already existed and was up to date.
def download_write_file(self, metadata, out_dir=None): fileName = metadata['name'] path = os.path.join(out_dir or wandb_dir(), fileName) if self.file_current(fileName, metadata['md5']): return path, None size, response = self.download_file(metadata['url']) with open(path, "wb") as file: for data in response.iter_content(chunk_size=1024): file.write(data) return path, response
387,172
Uploads a file to W&B with failure resumption Args: url (str): The url to download file (str): The path to the file you want to upload callback (:obj:`func`, optional): A callback which is passed the number of bytes uploaded since the last time it was called, used to report progress Returns: The requests library response object
def upload_file(self, url, file, callback=None, extra_headers={}): extra_headers = extra_headers.copy() response = None if os.stat(file.name).st_size == 0: raise CommError("%s is an empty file" % file.name) try: progress = Progress(file, callback=callback) response = requests.put( url, data=progress, headers=extra_headers) response.raise_for_status() except requests.exceptions.RequestException as e: total = progress.len status = self._status_request(url, total) # TODO(adrian): there's probably even more stuff we should add here # like if we're offline, we should retry then too if status.status_code in (308, 408, 500, 502, 503, 504): util.sentry_reraise(retry.TransientException(exc=e)) else: util.sentry_reraise(e) return response
387,173
Register a new agent Args: host (str): hostname persistent (bool): long running or oneoff sweep (str): sweep id project_name: (str): model that contains sweep
def register_agent(self, host, sweep_id=None, project_name=None): mutation = gql() if project_name is None: project_name = self.settings('project') # don't retry on validation errors def no_retry_400(e): if not isinstance(e, requests.HTTPError): return True if e.response.status_code != 400: return True body = json.loads(e.response.content) raise UsageError(body['errors'][0]['message']) response = self.gql(mutation, variable_values={ 'host': host, 'entityName': self.settings("entity"), 'projectName': project_name, 'sweep': sweep_id}, check_retry_fn=no_retry_400) return response['createAgent']['agent']
387,174
Notify server about agent state, receive commands. Args: agent_id (str): agent_id metrics (dict): system metrics run_states (dict): run_id: state mapping Returns: List of commands to execute.
def agent_heartbeat(self, agent_id, metrics, run_states): mutation = gql() try: response = self.gql(mutation, variable_values={ 'id': agent_id, 'metrics': json.dumps(metrics), 'runState': json.dumps(run_states)}) except Exception as e: # GQL raises exceptions with stringified python dictionaries :/ message = ast.literal_eval(e.args[0])["message"] logger.error('Error communicating with W&B: %s', message) return [] else: return json.loads(response['agentHeartbeat']['commands'])
387,175
Upsert a sweep object. Args: config (str): sweep config (will be converted to yaml)
def upsert_sweep(self, config): mutation = gql() # don't retry on validation errors # TODO(jhr): generalize error handling routines def no_retry_400_or_404(e): if not isinstance(e, requests.HTTPError): return True if e.response.status_code != 400 and e.response.status_code != 404: return True body = json.loads(e.response.content) raise UsageError(body['errors'][0]['message']) response = self.gql(mutation, variable_values={ 'config': yaml.dump(config), 'description': config.get("description"), 'entityName': self.settings("entity"), 'projectName': self.settings("project")}, check_retry_fn=no_retry_400_or_404) return response['upsertSweep']['sweep']['name']
387,176
Download files from W&B Args: project (str): The project to download run (str, optional): The run to upload to entity (str, optional): The entity to scope this project to. Defaults to wandb models Returns: The requests library response object
def pull(self, project, run=None, entity=None): project, run = self.parse_slug(project, run=run) urls = self.download_urls(project, run, entity) responses = [] for fileName in urls: _, response = self.download_write_file(urls[fileName]) if response: responses.append(response) return responses
387,178
Call the wrapped function, with retries. Args: retry_timedelta (kwarg): amount of time to retry before giving up. sleep_base (kwarg): amount of time to sleep upon first failure, all other sleeps are derived from this one.
def __call__(self, *args, **kwargs): retry_timedelta = kwargs.pop('retry_timedelta', self._retry_timedelta) if retry_timedelta is None: retry_timedelta = datetime.timedelta(days=1000000) num_retries = kwargs.pop('num_retries', self._num_retries) if num_retries is None: num_retries = 1000000 if os.environ.get('WANDB_TEST'): num_retries = 0 sleep_base = kwargs.pop('retry_sleep_base', 1) # an extra function to allow performing more logic on the filtered exceptiosn check_retry_fn = kwargs.pop('check_retry_fn', self._check_retry_fn) first = True sleep = sleep_base start_time = datetime.datetime.now() now = start_time self._num_iter = 0 while True: try: result = self._call_fn(*args, **kwargs) if not first: wandb.termlog('{} resolved after {}, resuming normal operation.'.format( self._error_prefix, datetime.datetime.now() - start_time)) return result except self._retryable_exceptions as e: # if the secondary check fails, re-raise if not check_retry_fn(e): raise if (datetime.datetime.now() - start_time >= retry_timedelta or self._num_iter >= num_retries): raise if self._num_iter == 2: logger.exception('Retry attempt failed:') wandb.termlog( '{} ({}), entering retry loop. See {} for full traceback.'.format( self._error_prefix, e.__class__.__name__, util.get_log_file_path())) if wandb.env.is_debug(): traceback.print_exc() first = False time.sleep(sleep + random.random() * 0.25 * sleep) sleep *= 2 if sleep > self.MAX_SLEEP_SECONDS: sleep = self.MAX_SLEEP_SECONDS now = datetime.datetime.now() self._num_iter += 1
387,233
Upsert the Run (ie. for the first time with all its attributes) Arguments: retry: (bool) Whether to retry if the connection fails (ie. if the backend is down). False is useful so we can start running the user process even when the W&B backend is down, and let syncing finish later. Returns: True if the upsert succeeded, False if it failed because the backend is down. Throws: LaunchError on other failures
def _upsert_run(self, retry, storage_id, env): if retry: num_retries = None else: num_retries = 0 # no retries because we want to let the user process run even if the backend is down try: upsert_result = self._run.save( id=storage_id, num_retries=num_retries, api=self._api) except wandb.apis.CommError as e: logger.exception("communication error with wandb %s" % e.exc) # TODO: Get rid of str contains check if self._run.resume == 'never' and 'exists' in str(e): raise LaunchError( "resume='never' but run (%s) exists" % self._run.id) else: # Detect bad request code -- this is usually trying to # create a run that has been already deleted if (isinstance(e.exc, requests.exceptions.HTTPError) and e.exc.response.status_code == 400): raise LaunchError( 'Failed to connect to W&B. See {} for details.'.format( util.get_log_file_path())) if isinstance(e.exc, (requests.exceptions.HTTPError, requests.exceptions.Timeout, requests.exceptions.ConnectionError)): wandb.termerror( 'Failed to connect to W&B. Retrying in the background.') return False launch_error_s = 'Launch exception: {}, see {} for details. To disable wandb set WANDB_MODE=dryrun'.format(e, util.get_log_file_path()) if 'Permission denied' in str(e): launch_error_s += '\nRun "wandb login", or provide your API key with the WANDB_API_KEY environment variable.' raise LaunchError(launch_error_s) if self._output: url = self._run.get_url(self._api) wandb.termlog("Syncing to %s" % url) wandb.termlog("Run `wandb off` to turn off syncing.") self._run.set_environment(environment=env) logger.info("saving patches") self._api.save_patches(self._watch_dir) logger.info("saving pip packages") self._api.save_pip(self._watch_dir) logger.info("initializing streaming files api") self._api.get_file_stream_api().set_file_policy( OUTPUT_FNAME, CRDedupeFilePolicy()) self._api.get_file_stream_api().start() self._project = self._api.settings("project") # unblock file syncing and console streaming, which need the Run to have a .storage_id logger.info("unblocking file change observer, beginning sync with W&B servers") self._unblock_file_observer() return True
387,366
Perform a requests http call, retrying with exponential backoff. Args: func: An http-requesting function to call, like requests.post max_retries: Maximum retries before giving up. By default we retry 30 times in ~2 hours before dropping the chunk *args: passed through to func **kwargs: passed through to func
def request_with_retry(func, *args, **kwargs): max_retries = kwargs.pop('max_retries', 30) sleep = 2 retry_count = 0 while True: try: response = func(*args, **kwargs) response.raise_for_status() return response except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError, # XXX 500s aren't retryable requests.exceptions.Timeout) as e: if retry_count == max_retries: return e retry_count += 1 delay = sleep + random.random() * 0.25 * sleep if isinstance(e, requests.exceptions.HTTPError) and e.response.status_code == 429: logger.info( "Rate limit exceeded, retrying in %s seconds" % delay) else: logger.warning('requests_with_retry encountered retryable exception: %s. args: %s, kwargs: %s', e, args, kwargs) time.sleep(delay) sleep *= 2 if sleep > MAX_SLEEP_SECONDS: sleep = MAX_SLEEP_SECONDS except requests.exceptions.RequestException as e: logger.error(response.json()['error']) # XXX clean this up logger.exception( 'requests_with_retry encountered unretryable exception: %s', e) return e
387,569
Return a command that will run program. Args: program: The string name of the program to try to run. Returns: commandline list of strings to run the program (eg. with subprocess.call()) or None
def find_runner(program): if os.path.isfile(program) and not os.access(program, os.X_OK): # program is a path to a non-executable file try: opened = open(program) except PermissionError: return None first_line = opened.readline().strip() if first_line.startswith('#!'): return shlex.split(first_line[2:]) if program.endswith('.py'): return [sys.executable] return None
387,570
Spawn a thread that reads from a data source and writes to a sink. The thread will terminate if it receives a Falsey value from the source. Args: get_data_fn: Data-reading function. Called repeatedly until it returns False-y to indicate that the thread should terminate. put_data_fn: Data-writing function. Returns: threading.Thread
def spawn_reader_writer(get_data_fn, put_data_fn): def _reader_thread(): while True: out = get_data_fn() put_data_fn(out) if not out: # EOF. # We've passed this on so things farther down the pipeline will # know to shut down. break t = threading.Thread(target=_reader_thread) t.daemon = True t.start() return t
387,611
Constructor. Args: src_file: file to read from. sync_dst_file: file to write to synchronously when `self.write()` is called. async_dst_files: files to write to asynchronously
def __init__(self, src_file, sync_dst_file, *async_dst_files): # save the stack at construction time for debugging later self._origin_stack = '\n'.join(traceback.format_stack()) self.tee_file = None # convenience for users that want a writable file to put things into the tee self._src_file = src_file self._sync_dst_file = sync_dst_file self._async_dst_files = list(async_dst_files) self._write_queues = [] self._write_threads = [] for f in async_dst_files: q = queue.Queue() t = spawn_reader_writer(q.get, functools.partial(self._write, f)) self._write_queues.append(q) self._write_threads.append(t) src_fd = self._src_file.fileno() def read(): # We use `os.read()` instead of `file.read()` because `os.read()` will return # any non-empty amount of data, blocking only until there is data available to # be read. On the other hand, `file.read()` waits until its buffer is full. # Since we use this code for console output, `file.read()`'s stuttering output # is undesirable. try: return os.read(src_fd, 1024) except OSError: # errno 5 on linux; happens with PTYs if the slave is closed. mac os just # returns b'' from os.read(). return six.b('') self._read_thread = spawn_reader_writer(read, self._write_to_all)
387,618
Constructor Args: redir_file: (file) The file object to redirect to_file: (file) The file object `redir_file` should be redirected to.
def __init__(self, redir_file, to_file): self.redir_file = redir_file self._from_fd = redir_file.fileno() self._to_fd = to_file.fileno() # copy from_fd before it is overwritten # NOTE: `self._from_fd` is inheritable on Windows when duplicating a standard stream # we make this unbuffered because we want to rely on buffers earlier in the I/O chain self.orig_file = os.fdopen(os.dup(self._from_fd), 'wb', 0)
387,622
Return history metrics for a run Args: samples (int, optional): The number of samples to return pandas (bool, optional): Return a pandas dataframe stream (str, optional): "default" for metrics, "system" for machine metrics
def history(self, samples=500, pandas=True, stream="default"): node = "history" if stream == "default" else "events" query = gql( % node) response = self._exec(query, samples=samples) lines = [json.loads(line) for line in response['project']['run'][node]] if pandas: pandas = util.get_module("pandas") if pandas: lines = pandas.DataFrame.from_records(lines) else: print("Unable to load pandas, call history with pandas=False") return lines
387,659
Write a secrets.env file with the W&B ApiKey and any additional secrets passed. Args: overrides (dict, optional): Additional environment variables to write to secrets.env path (str, optional): The path to write the secrets file.
def sagemaker_auth(overrides={}, path="."): api_key = overrides.get(env.API_KEY, Api().api_key) if api_key is None: raise ValueError( "Can't find W&B ApiKey, set the WANDB_API_KEY env variable or run `wandb login`") overrides[env.API_KEY] = api_key with open(os.path.join(path, "secrets.env"), "w") as file: for k, v in six.iteritems(overrides): file.write("{}={}\n".format(k, v))
387,714
Set the column types args: types: iterable of (column_name, type) pairs.
def set_columns(self, types): if self._types: raise wandb.Error('TypedTable.set_columns called more than once.') try: for key, type_ in types: if type_ not in TYPE_TO_TYPESTRING: raise wandb.Error('TypedTable.set_columns received invalid type ({}) for key "{}".\n Valid types: {}'.format( type_, key, '[%s]' % ', '.join(VALID_TYPE_NAMES))) except TypeError: raise wandb.Error( 'TypedTable.set_columns requires iterable of (column_name, type) pairs.') self._types = dict(types) self._output.add({ 'typemap': {k: TYPE_TO_TYPESTRING[type_] for k, type_ in types}, 'columns': [t[0] for t in types]})
387,784
Add a row to the table. Args: row: A dict whose keys match the keys added in set_columns, and whose values can be cast to the types added in set_columns.
def add(self, row): if not self._types: raise wandb.Error( 'TypedTable.set_columns must be called before add.') mapped_row = {} for key, val in row.items(): try: typed_val = self._types[key](val) if hasattr(typed_val, 'encode'): typed_val = typed_val.encode() mapped_row[key] = typed_val except KeyError: raise wandb.Error( 'TypedTable.add received key ("%s") which wasn\'t provided to set_columns' % key) except: raise wandb.Error('TypedTable.add couldn\'t convert and encode ("{}") provided for key ("{}") to type ({})'.format( val, key, self._types[key])) self._output.add(mapped_row) self._count += 1
387,785
Push a chunk of a file to the streaming endpoint. Args: filename: Name of file that this is a chunk of. chunk_id: TODO: change to 'offset' chunk: File data.
def push(self, filename, data): self._queue.put(Chunk(filename, data))
387,821
Cleans up. Anything pushed after finish will be dropped. Args: exitcode: The exitcode of the watched process.
def finish(self, exitcode): self._queue.put(self.Finish(exitcode)) self._thread.join()
387,822
Process some data splitting it into complete lines and buffering the rest Args: data: A `str` in Python 2 or `bytes` in Python 3 Returns: list of complete lines ending with a carriage return (eg. a progress bar) or a newline.
def add_string(self, data): lines = [] while data: match = self._line_end_re.search(data) if match is None: chunk = data else: chunk = data[:match.end()] data = data[len(chunk):] if self._buf and self._buf[-1].endswith(b('\r')) and not chunk.startswith(b('\n')): # if we get a carriage return followed by something other than # a newline then we assume that we're overwriting the current # line (ie. a progress bar) # # We don't terminate lines that end with a carriage return until # we see what's coming next so we can distinguish between a # progress bar situation and a Windows line terminator. # # TODO(adrian): some day these hacks should be replaced with # real terminal emulation lines.append(self._finish_line()) self._buf.append(chunk) if chunk.endswith(b('\n')): lines.append(self._finish_line()) return lines
387,945
Constructor. Args: fsapi: api.FileStreamApi instance filename: Name of the file this stream is pushed to. line_prepend: string to prepend to every line for this stream. prepend_timestamp: If true a timestamp will be prepended to each line (after line_prepend).
def __init__(self, fsapi, filename, line_prepend='', prepend_timestamp=False): self._fsapi = fsapi self._filename = filename if line_prepend: line_prepend += ' ' self._line_prepend = line_prepend self._prepend_timestamp = prepend_timestamp self._line_buffer = LineBuffer()
387,947
Write some text to the pusher. Args: message: a string to push for this file. cur_time: used for unit testing. override line timestamp.
def write(self, message, cur_time=None): if cur_time is None: cur_time = time.time() lines = self._line_buffer.add_string(message) for line in lines: #print('ts line', repr(line)) timestamp = '' if self._prepend_timestamp: timestamp = datetime.datetime.utcfromtimestamp( cur_time).isoformat() + ' ' line = u'{}{}{}'.format(self._line_prepend, timestamp, line) self._fsapi.push(self._filename, line)
387,948
Monkeypatches tensorboard or tensorboardX so that all events are logged to tfevents files and wandb. We save the tfevents files and graphs to wandb by default. Arguments: save, default: True - Passing False will skip sending events. tensorboardX, default: True if module can be imported - You can override this when calling patch
def patch(save=True, tensorboardX=tensorboardX_loaded): global Summary, Event if tensorboardX: tensorboard_module = "tensorboardX.writer" if tensorflow_loaded: wandb.termlog( "Found TensorboardX and tensorflow, pass tensorboardX=False to patch regular tensorboard.") from tensorboardX.proto.summary_pb2 import Summary from tensorboardX.proto.event_pb2 import Event else: tensorboard_module = "tensorflow.python.summary.writer.writer" from tensorflow.summary import Summary, Event writers = set() def _add_event(self, event, step, walltime=None): event.wall_time = time.time() if walltime is None else walltime if step is not None: event.step = int(step) try: # TensorboardX uses _file_name if hasattr(self.event_writer._ev_writer, "_file_name"): name = self.event_writer._ev_writer._file_name else: name = self.event_writer._ev_writer.FileName().decode("utf-8") writers.add(name) # This is a little hacky, there is a case where the log_dir changes. # Because the events files will have the same names in sub directories # we simply overwrite the previous symlink in wandb.save if the log_dir # changes. log_dir = os.path.dirname(os.path.commonprefix(list(writers))) filename = os.path.basename(name) # Tensorboard loads all tfevents files in a directory and prepends # their values with the path. Passing namespace to log allows us # to nest the values in wandb namespace = name.replace(filename, "").replace( log_dir, "").strip(os.sep) if save: wandb.save(name, base_path=log_dir) wandb.save(os.path.join(log_dir, "*.pbtxt"), base_path=log_dir) log(event, namespace=namespace, step=event.step) except Exception as e: wandb.termerror("Unable to log event %s" % e) # six.reraise(type(e), e, sys.exc_info()[2]) self.event_writer.add_event(event) writer = wandb.util.get_module(tensorboard_module) writer.SummaryToEventTransformer._add_event = _add_event
387,957
Fetch credentials, and set them for this client. Note that we can't simply return credentials, since creating them may involve side-effecting self. Args: **kwds: Additional keyword arguments are passed on to GetCredentials. Returns: None. Sets self._credentials.
def _SetCredentials(self, **kwds): args = { 'api_key': self._API_KEY, 'client': self, 'client_id': self._CLIENT_ID, 'client_secret': self._CLIENT_SECRET, 'package_name': self._PACKAGE, 'scopes': self._SCOPES, 'user_agent': self._USER_AGENT, } args.update(kwds) # credentials_lib can be expensive to import so do it only if needed. from apitools.base.py import credentials_lib # TODO(craigcitro): It's a bit dangerous to pass this # still-half-initialized self into this method, but we might need # to set attributes on it associated with our credentials. # Consider another way around this (maybe a callback?) and whether # or not it's worth it. self._credentials = credentials_lib.GetCredentials(**args)
388,277
Build descriptor for Enum instance. Args: enum_value: Enum value to provide descriptor for. Returns: Initialized EnumValueDescriptor instance describing the Enum instance.
def describe_enum_value(enum_value): enum_value_descriptor = EnumValueDescriptor() enum_value_descriptor.name = six.text_type(enum_value.name) enum_value_descriptor.number = enum_value.number return enum_value_descriptor
388,307
Build descriptor for Enum class. Args: enum_definition: Enum class to provide descriptor for. Returns: Initialized EnumDescriptor instance describing the Enum class.
def describe_enum(enum_definition): enum_descriptor = EnumDescriptor() enum_descriptor.name = enum_definition.definition_name().split('.')[-1] values = [] for number in enum_definition.numbers(): value = enum_definition.lookup_by_number(number) values.append(describe_enum_value(value)) if values: enum_descriptor.values = values return enum_descriptor
388,308