docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
r"""Product of a Cholesky matrix with itself transposed. Args: L (array_like): Cholesky matrix. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: :math:`\mathrm L\mathrm L^\intercal`.
def cdot(L, out=None): r L = asarray(L, float) layout_error = "Wrong matrix layout." if L.ndim != 2: raise ValueError(layout_error) if L.shape[0] != L.shape[1]: raise ValueError(layout_error) if out is None: out = empty((L.shape[0], L.shape[1]), float) return einsum("ij,kj->ik", L, L, out=out)
703,848
r"""Log of the pseudo-determinant. It assumes that ``K`` is a positive semi-definite matrix. Args: K (array_like): matrix. Returns: float: log of the pseudo-determinant.
def plogdet(K): r egvals = eigvalsh(K) return npsum(log(egvals[egvals > epsilon]))
703,853
r"""Economic eigen decomposition for symmetric matrices ``dot(G, G.T)``. It is theoretically equivalent to ``economic_qs(dot(G, G.T))``. Refer to :func:`numpy_sugar.economic_qs` for further information. Args: G (array_like): Matrix. Returns: tuple: ``((Q0, Q1), S0)``.
def economic_qs_linear(G): r import dask.array as da if not isinstance(G, da.Array): G = asarray(G, float) if G.shape[0] > G.shape[1]: (Q, Ssq, _) = svd(G, full_matrices=True) S0 = Ssq ** 2 rank = len(S0) Q0, Q1 = Q[:, :rank], Q[:, rank:] return ((Q0, Q1), S0) return economic_qs(G.dot(G.T))
703,855
r"""Find the unique elements of an array. It uses ``dask.array.unique`` if necessary. Args: ar (array_like): Input array. Returns: array_like: the sorted unique elements.
def unique(ar): r import dask.array as da if isinstance(ar, da.core.Array): return da.unique(ar) return _unique(ar)
703,859
r"""Natural logarithm of a LU decomposition. Args: LU (tuple): LU decomposition. Returns: tuple: sign and log-determinant.
def lu_slogdet(LU): r LU = (asarray(LU[0], float), asarray(LU[1], float)) adet = _sum(log(_abs(LU[0].diagonal()))) s = prod(sign(LU[0].diagonal())) nrows_exchange = LU[1].size - _sum(LU[1] == arange(LU[1].size, dtype="int32")) odd = nrows_exchange % 2 == 1 if odd: s *= -1.0 return (s, adet)
703,860
r"""Return the least-squares solution to a linear matrix equation. Args: A (array_like): Coefficient matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Least-squares solution.
def lstsq(A, b): r A = asarray(A, float) b = asarray(b, float) if A.ndim == 1: A = A[:, newaxis] if A.shape[1] == 1: return dot(A.T, b) / squeeze(dot(A.T, A)) rcond = finfo(double).eps * max(*A.shape) return npy_lstsq(A, b, rcond=rcond)[0]
703,862
r"""Diagonal of :math:`\mathrm A\mathrm B^\intercal`. If ``A`` is :math:`n\times p` and ``B`` is :math:`p\times n`, it is done in :math:`O(pn)`. Args: A (array_like): Left matrix. B (array_like): Right matrix. Returns: :class:`numpy.ndarray`: Resulting diagonal.
def dotd(A, B): r A = asarray(A, float) B = asarray(B, float) if A.ndim == 1 and B.ndim == 1: return dot(A, B) out = empty((A.shape[0],), float) out[:] = sum(A * B.T, axis=1) return out
703,864
r"""Solve for the linear equations :math:`\mathrm A \mathbf x = \mathbf b`. Args: A (array_like): Coefficient matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Solution ``x``.
def solve(A, b): r A = asarray(A, float) b = asarray(b, float) if A.shape[0] == 1: with errstate(divide="ignore"): A_ = array([[1.0 / A[0, 0]]]) if not isfinite(A_[0, 0]): raise LinAlgError("Division error.") return dot(A_, b) elif A.shape[0] == 2: a = A[0, 0] b_ = A[0, 1] c = A[1, 0] d = A[1, 1] A_ = array([[d, -b_], [-c, a]]) with errstate(divide="ignore"): A_ /= a * d - b_ * c if not npy_all(isfinite(A_)): raise LinAlgError("Division error.") return dot(A_, b) return _solve(A, b)
703,868
r"""Robust solve for the linear equations. Args: A (array_like): Coefficient matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Solution ``x``.
def rsolve(A, b, epsilon=_epsilon): r A = asarray(A, float) b = asarray(b, float) if A.shape[0] == 0: return zeros((A.shape[1],)) if A.shape[1] == 0: return zeros((0,)) try: x = lstsq(A, b, rcond=epsilon) r = sum(x[3] > epsilon) if r == 0: return zeros(A.shape[1]) return x[0] except (ValueError, LinAlgError) as e: warnings.warn(str(e), RuntimeWarning) return solve(A, b)
703,869
Check if ``A`` is a semi-definite positive matrix. Args: A (array_like): Matrix. Returns: bool: ``True`` if ``A`` is definite positive; ``False`` otherwise.
def check_semidefinite_positiveness(A): B = empty_like(A) B[:] = A B[diag_indices_from(B)] += sqrt(finfo(float).eps) try: cholesky(B) except LinAlgError: return False return True
703,871
Check if ``A`` is a symmetric matrix. Args: A (array_like): Matrix. Returns: bool: ``True`` if ``A`` is symmetric; ``False`` otherwise.
def check_symmetry(A): A = asanyarray(A) if A.ndim != 2: raise ValueError("Checks symmetry only for bi-dimensional arrays.") if A.shape[0] != A.shape[1]: return False return abs(A - A.T).max() < sqrt(finfo(float).eps)
703,872
r"""Shortcut to ``solve_triangular(A, b, lower=True, check_finite=False)``. Solve linear systems :math:`\mathrm A \mathbf x = \mathbf b` when :math:`\mathrm A` is a lower-triangular matrix. Args: A (array_like): A lower-triangular matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Solution ``x``. See Also -------- scipy.linalg.solve_triangular: Solve triangular linear equations.
def stl(A, b): r from scipy.linalg import solve_triangular A = asarray(A, float) b = asarray(b, float) return solve_triangular(A, b, lower=True, check_finite=False)
703,883
r"""Trace of :math:`\mathrm A \mathrm B^\intercal`. Args: A (array_like): Left-hand side. B (array_like): Right-hand side. Returns: float: Trace of :math:`\mathrm A \mathrm B^\intercal`.
def trace2(A, B): r A = asarray(A, float) B = asarray(B, float) layout_error = "Wrong matrix layout." if not (len(A.shape) == 2 and len(B.shape) == 2): raise ValueError(layout_error) if not (A.shape[1] == B.shape[0] and A.shape[0] == B.shape[1]): raise ValueError(layout_error) return _sum(A.T * B)
703,896
r"""Add values ``D`` to the diagonal of matrix ``A``. Args: A (array_like): Left-hand side. D (array_like or float): Values to add. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: Resulting matrix.
def sum2diag(A, D, out=None): r A = asarray(A, float) D = asarray(D, float) if out is None: out = copy(A) else: copyto(out, A) einsum("ii->i", out)[:] += D return out
703,897
Perform a scan for access points in the area. Arguments: interface - device to use for scanning (e.g. eth1, wlan0).
def scan(interface): interface = _get_bytes(interface) head = ffi.new('wireless_scan_head *') with iwlib_socket() as sock: range = _get_range_info(interface, sock=sock) if iwlib.iw_scan(sock, interface, range.we_version_compiled, head) != 0: errno = ffi.errno strerror = "Error while scanning: %s" % os.strerror(errno) raise OSError(errno, strerror) results = [] scan = head.result buf = ffi.new('char []', 1024) while scan != ffi.NULL: parsed_scan = {} if scan.b.has_mode: parsed_scan['Mode'] = ffi.string(iwlib.iw_operation_mode[scan.b.mode]) if scan.b.essid_on: parsed_scan['ESSID'] = ffi.string(scan.b.essid) else: parsed_scan['ESSID'] = b'Auto' if scan.has_ap_addr: iwlib.iw_ether_ntop( ffi.cast('struct ether_addr *', scan.ap_addr.sa_data), buf) if scan.b.has_mode and scan.b.mode == iwlib.IW_MODE_ADHOC: parsed_scan['Cell'] = ffi.string(buf) else: parsed_scan['Access Point'] = ffi.string(buf) if scan.has_maxbitrate: iwlib.iw_print_bitrate(buf, len(buf), scan.maxbitrate.value) parsed_scan['BitRate'] = ffi.string(buf) if scan.has_stats: parsed_scan['stats'] = _parse_stats(scan.stats) results.append(parsed_scan) scan = scan.next return results
703,988
Creates a 2D multi-indexed matrix representation of the data. This representation allows the data to be sent to the machine learning algorithms. Args: :param values: The value or values that are going to fill the matrix. :param selected_regions: The index to one axis of the matrix. :param default_value: The default fill value of the matrix
def to_matrix(self, values, selected_regions, default_value=0): if isinstance(values, list): for v in values: try: self.data[v] = self.data[v].map(float) except: print(self.data[v]) else: self.data[values] = self.data[values].map(float) print("started pivoting") self.data = pd.pivot_table(self.data, values=values, columns=selected_regions, index=['sample'], fill_value=default_value) print("end of pivoting")
704,155
Set the ESSID of a given interface Arguments: interface - device to work on (e.g. eth1, wlan0). essid - ESSID to set. Must be no longer than IW_ESSID_MAX_SIZE (typically 32 characters).
def set_essid(interface, essid): interface = _get_bytes(interface) essid = _get_bytes(essid) wrq = ffi.new('struct iwreq*') with iwlib_socket() as sock: if essid.lower() in (b'off', b'any'): wrq.u.essid.flags = 0 essid = b'' elif essid.lower() == b'on': buf = ffi.new('char []', iwlib.IW_ESSID_MAX_SIZE+1) wrq.u.essid.pointer = buf wrq.u.essid.length = iwlib.IW_ESSID_MAX_SIZE + 1 wrq.u.essid.flags = 0 if iwlib.iw_get_ext(sock, interface, iwlib.SIOCGIWESSID, wrq) < 0: raise ValueError("Error retrieving previous ESSID: %s" % (os.strerror(ffi.errno))) wrq.u.essid.flags = 1 elif len(essid) > iwlib.IW_ESSID_MAX_SIZE: raise ValueError("ESSID '%s' is longer than the maximum %d" % (essid, iwlib.IW_ESSID_MAX_SIZE)) else: wrq.u.essid.pointer = ffi.new_handle(essid) wrq.u.essid.length = len(essid) wrq.u.essid.flags = 1 if iwlib.iw_get_kernel_we_version() < 21: wrq.u.essid.length += 1 if iwlib.iw_set_ext(sock, interface, iwlib.SIOCSIWESSID, wrq) < 0: errno = ffi.errno strerror = "Couldn't set essid on device '%s': %s" % (interface.decode('utf8'), os.strerror(errno)) raise OSError(errno, strerror)
704,396
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.DeleteLogEntry = channel.unary_unary( '/garuda.Garuda/DeleteLogEntry', request_serializer=garuda__pb2.ID.SerializeToString, response_deserializer=garuda__pb2.Void.FromString, ) self.UpdateLogEntry = channel.unary_unary( '/garuda.Garuda/UpdateLogEntry', request_serializer=garuda__pb2.LogEntry.SerializeToString, response_deserializer=garuda__pb2.Void.FromString, ) self.ReadLogEntry = channel.unary_unary( '/garuda.Garuda/ReadLogEntry', request_serializer=garuda__pb2.ID.SerializeToString, response_deserializer=garuda__pb2.LogEntry.FromString, ) self.CreateLogEntry = channel.unary_unary( '/garuda.Garuda/CreateLogEntry', request_serializer=garuda__pb2.LogEntry.SerializeToString, response_deserializer=garuda__pb2.LogEntry.FromString, ) self.ReadLogEntrysFilter = channel.unary_stream( '/garuda.Garuda/ReadLogEntrysFilter', request_serializer=garuda__pb2.Void.SerializeToString, response_deserializer=garuda__pb2.LogEntry.FromString, ) self.DeletePermission = channel.unary_unary( '/garuda.Garuda/DeletePermission', request_serializer=garuda__pb2.ID.SerializeToString, response_deserializer=garuda__pb2.Void.FromString, ) self.UpdatePermission = channel.unary_unary( '/garuda.Garuda/UpdatePermission', request_serializer=garuda__pb2.Permission.SerializeToString, response_deserializer=garuda__pb2.Void.FromString, ) self.ReadPermission = channel.unary_unary( '/garuda.Garuda/ReadPermission', request_serializer=garuda__pb2.ID.SerializeToString, response_deserializer=garuda__pb2.Permission.FromString, ) self.CreatePermission = channel.unary_unary( '/garuda.Garuda/CreatePermission', request_serializer=garuda__pb2.Permission.SerializeToString, response_deserializer=garuda__pb2.Permission.FromString, ) self.ReadPermissionsFilter = channel.unary_stream( '/garuda.Garuda/ReadPermissionsFilter', request_serializer=garuda__pb2.Void.SerializeToString, response_deserializer=garuda__pb2.Permission.FromString, ) self.DeleteGroup = channel.unary_unary( '/garuda.Garuda/DeleteGroup', request_serializer=garuda__pb2.ID.SerializeToString, response_deserializer=garuda__pb2.Void.FromString, ) self.UpdateGroup = channel.unary_unary( '/garuda.Garuda/UpdateGroup', request_serializer=garuda__pb2.Group.SerializeToString, response_deserializer=garuda__pb2.Void.FromString, ) self.ReadGroup = channel.unary_unary( '/garuda.Garuda/ReadGroup', request_serializer=garuda__pb2.ID.SerializeToString, response_deserializer=garuda__pb2.Group.FromString, ) self.CreateGroup = channel.unary_unary( '/garuda.Garuda/CreateGroup', request_serializer=garuda__pb2.Group.SerializeToString, response_deserializer=garuda__pb2.Group.FromString, ) self.ReadGroupsFilter = channel.unary_stream( '/garuda.Garuda/ReadGroupsFilter', request_serializer=garuda__pb2.Void.SerializeToString, response_deserializer=garuda__pb2.Group.FromString, ) self.DeleteUser = channel.unary_unary( '/garuda.Garuda/DeleteUser', request_serializer=garuda__pb2.ID.SerializeToString, response_deserializer=garuda__pb2.Void.FromString, ) self.UpdateUser = channel.unary_unary( '/garuda.Garuda/UpdateUser', request_serializer=garuda__pb2.User.SerializeToString, response_deserializer=garuda__pb2.Void.FromString, ) self.ReadUser = channel.unary_unary( '/garuda.Garuda/ReadUser', request_serializer=garuda__pb2.ID.SerializeToString, response_deserializer=garuda__pb2.User.FromString, ) self.CreateUser = channel.unary_unary( '/garuda.Garuda/CreateUser', request_serializer=garuda__pb2.User.SerializeToString, response_deserializer=garuda__pb2.User.FromString, ) self.ReadUsersFilter = channel.unary_stream( '/garuda.Garuda/ReadUsersFilter', request_serializer=garuda__pb2.Void.SerializeToString, response_deserializer=garuda__pb2.User.FromString, ) self.DeleteContentType = channel.unary_unary( '/garuda.Garuda/DeleteContentType', request_serializer=garuda__pb2.ID.SerializeToString, response_deserializer=garuda__pb2.Void.FromString, ) self.UpdateContentType = channel.unary_unary( '/garuda.Garuda/UpdateContentType', request_serializer=garuda__pb2.ContentType.SerializeToString, response_deserializer=garuda__pb2.Void.FromString, ) self.ReadContentType = channel.unary_unary( '/garuda.Garuda/ReadContentType', request_serializer=garuda__pb2.ID.SerializeToString, response_deserializer=garuda__pb2.ContentType.FromString, ) self.CreateContentType = channel.unary_unary( '/garuda.Garuda/CreateContentType', request_serializer=garuda__pb2.ContentType.SerializeToString, response_deserializer=garuda__pb2.ContentType.FromString, ) self.ReadContentTypesFilter = channel.unary_stream( '/garuda.Garuda/ReadContentTypesFilter', request_serializer=garuda__pb2.Void.SerializeToString, response_deserializer=garuda__pb2.ContentType.FromString, ) self.DeleteSession = channel.unary_unary( '/garuda.Garuda/DeleteSession', request_serializer=garuda__pb2.ID.SerializeToString, response_deserializer=garuda__pb2.Void.FromString, ) self.UpdateSession = channel.unary_unary( '/garuda.Garuda/UpdateSession', request_serializer=garuda__pb2.Session.SerializeToString, response_deserializer=garuda__pb2.Void.FromString, ) self.ReadSession = channel.unary_unary( '/garuda.Garuda/ReadSession', request_serializer=garuda__pb2.ID.SerializeToString, response_deserializer=garuda__pb2.Session.FromString, ) self.CreateSession = channel.unary_unary( '/garuda.Garuda/CreateSession', request_serializer=garuda__pb2.Session.SerializeToString, response_deserializer=garuda__pb2.Session.FromString, ) self.ReadSessionsFilter = channel.unary_stream( '/garuda.Garuda/ReadSessionsFilter', request_serializer=garuda__pb2.Void.SerializeToString, response_deserializer=garuda__pb2.Session.FromString, ) self.DeleteArticle = channel.unary_unary( '/garuda.Garuda/DeleteArticle', request_serializer=garuda__pb2.ID.SerializeToString, response_deserializer=garuda__pb2.Void.FromString, ) self.UpdateArticle = channel.unary_unary( '/garuda.Garuda/UpdateArticle', request_serializer=garuda__pb2.Article.SerializeToString, response_deserializer=garuda__pb2.Void.FromString, ) self.ReadArticle = channel.unary_unary( '/garuda.Garuda/ReadArticle', request_serializer=garuda__pb2.ID.SerializeToString, response_deserializer=garuda__pb2.Article.FromString, ) self.CreateArticle = channel.unary_unary( '/garuda.Garuda/CreateArticle', request_serializer=garuda__pb2.Article.SerializeToString, response_deserializer=garuda__pb2.Article.FromString, ) self.ReadArticlesFilter = channel.unary_stream( '/garuda.Garuda/ReadArticlesFilter', request_serializer=garuda__pb2.Void.SerializeToString, response_deserializer=garuda__pb2.Article.FromString, ) self.CustomCallDemo = channel.unary_unary( '/garuda.Garuda/CustomCallDemo', request_serializer=garuda__pb2.Void.SerializeToString, response_deserializer=garuda__pb2.Void.FromString, )
704,469
Create detail plots (first row) and total block(second row) of experiments. Args: tag_ids: list of tag-dictionaries, where the dictionaries must have fields 'name' (used for naming) and 'id' (used for numbering axis_dict) Returns: Figure element fig, ax_dict containing the first row plots (accessed via id) and ax_total containing the second row block.
def create_multispan_plots(tag_ids): import matplotlib.gridspec as gridspec fig = plt.figure() nrows = 1 if len(tag_ids) > 1: nrows = 2 fig.set_size_inches(10, 5*nrows) gs = gridspec.GridSpec(nrows, len(tag_ids)) ax_list = [fig.add_subplot(g) for g in gs] ax_dict = {} for i, tag_dict in enumerate(tag_ids): ax_dict[tag_dict['id']] = ax_list[i] ax_dict[tag_dict['id']].set_title( 'System {} (id {})'.format(tag_dict['name'], tag_dict['id'])) if nrows > 1: ax_total = plt.subplot(gs[1, :]) title = 'Combined {}'.format(tag_ids[0]['name']) for i in range(1, len(tag_ids)): title = title + ' and {}'.format(tag_ids[i]['name']) ax_total.set_title(title) gs.tight_layout(fig, rect=[0, 0.03, 1, 0.95]) return fig, ax_dict, ax_total gs.tight_layout(fig, rect=[0, 0.03, 1, 0.95]) return fig, ax_dict, None
705,121
In contrast to MATLAB, numpy's svds() arranges the singular values in ascending order. In order to have matching codes, we wrap it around by a function which re-sorts the singular values and singular vectors. Args: M: 2D numpy array; the matrix whose SVD is to be computed. k: Number of singular values to be computed. Returns: u, s, vt = svds(M, k=k)
def svds_descending(M, k): u, s, vt = svds(M, k=k) # reverse columns of u u = u[:, ::-1] # reverse s s = s[::-1] # reverse rows of vt vt = vt[::-1, :] return u, np.diag(s), vt.T
705,196
Set frame shipping reference mode of H264 encode stream. params: `mode`: see docstr of meth::get_h264_frm_ref_mode
def set_h264_frm_ref_mode(self, mode=1, callback=None): params = {'mode': mode} return self.execute_command('setH264FrmRefMode', params, callback)
705,708
Creates a decorator that can be used to configure sane default LMDB persistence settings for a model Args: path (str): The path where the LMDB database files will be created map_size (int): The amount of space to allot for the database
def simple_lmdb_settings(path, map_size=1e9, user_supplied_id=False): def decorator(cls): provider = \ ff.UserSpecifiedIdProvider(key='_id') \ if user_supplied_id else ff.UuidProvider() class Settings(ff.PersistenceSettings): id_provider = provider key_builder = ff.StringDelimitedKeyBuilder('|') database = ff.LmdbDatabase( path, key_builder=key_builder, map_size=map_size) class Model(cls, Settings): pass Model.__name__ = cls.__name__ Model.__module__ = cls.__module__ return Model return decorator
706,023
Parses the PV array from an astropy FITS header. Args: header: astropy.io.fits.header.Header The header containing the PV values. Returns: cd: 2d array (list(list(float)) [[PV1_0, PV1_1, ... PV1_N], [PV2_0, PV2_1, ... PV2_N]] Note that N depends on the order of the fit. For example, an order 3 fit goes up to PV?_10.
def parse_pv(header): order_fit = parse_order_fit(header) def parse_with_base(i): key_base = "PV%d_" % i pvi_x = [header[key_base + "0"]] def parse_range(lower, upper): for j in range(lower, upper + 1): pvi_x.append(header[key_base + str(j)]) if order_fit >= 1: parse_range(1, 3) if order_fit >= 2: parse_range(4, 6) if order_fit >= 3: parse_range(7, 10) return pvi_x return [parse_with_base(1), parse_with_base(2)]
706,076
Constructs a new astronomy data set object. Args: observations: list(Observations) The observations that are part of the data set.
def __init__(self, observations, sources, provisional_name): self.mpc_observations = {} self.observations = observations self.sys_header = None self.sources = [astrom.Source(reading_list, provisional_name) for reading_list in sources]
706,166
Parses a file into an AstromData structure. Args: filename: str The name of the file whose contents will be parsed. Returns: data: AstromData The file contents extracted into a data structure for programmatic access.
def parse(self, filename): filehandle = storage.open_vos_or_local(filename, "rb") assert filehandle is not None, "Failed to open file {} ".format(filename) filestr = filehandle.read() filehandle.close() assert filestr is not None, "File contents are None" observations = self._parse_observation_list(filestr) self._parse_observation_headers(filestr, observations) sys_header = self._parse_system_header(filestr) sources = self._parse_source_data(filestr, observations) return AstromData(observations, sys_header, sources, discovery_only=self.discovery_only)
706,245
Calculates the offsets between readings' coordinate systems. Args: other_reading: ossos.astrom.SourceReading The reading to compare coordinate systems with. Returns: (offset_x, offset_y): The x and y offsets between this reading and the other reading's coordinate systems.
def get_coordinate_offset(self, other_reading): my_x, my_y = self.reference_source_point other_x, other_y = other_reading.reference_source_point return my_x - other_x, my_y - other_y
706,269
Downloads apcor data. Args: uri: The URI of the apcor data file. Returns: apcor: ossos.downloads.core.ApcorData
def download_apcor(self, uri): local_file = os.path.basename(uri) if os.access(local_file, os.F_OK): fobj = open(local_file) else: fobj = storage.vofile(uri, view='data') fobj.seek(0) str = fobj.read() fobj.close() apcor_str = str return ApcorData.from_string(apcor_str)
706,334
Constructor. Args: downloader: Downloads images. error_handler: Handles errors that occur when trying to download resources.
def __init__(self, downloader, error_handler): self.downloader = downloader self.error_handler = error_handler self._work_queue = Queue.PriorityQueue() self._workers = [] self._maximize_workers()
706,631
Synthesize one or more sine waves Args: duration (numpy.timdelta64): The duration of the sound to be synthesized freqs_in_hz (list of float): Numbers representing the frequencies in hz that should be synthesized
def synthesize(self, duration, freqs_in_hz=[440.]): freqs = np.array(freqs_in_hz) scaling = 1 / len(freqs) sr = int(self.samplerate) cps = freqs / sr ts = (duration / Seconds(1)) * sr ranges = np.array([np.arange(0, ts * c, c) for c in cps]) raw = (np.sin(ranges * (2 * np.pi)) * scaling).sum(axis=0) return AudioSamples(raw, self.samplerate)
706,673
Synthesize periodic "ticks", generated from white noise and an envelope Args: duration (numpy.timedelta64): The total duration of the sound to be synthesized tick_frequency (numpy.timedelta64): The frequency of the ticking sound
def synthesize(self, duration, tick_frequency): sr = self.samplerate.samples_per_second # create a short, tick sound tick = np.random.uniform(low=-1., high=1., size=int(sr * .1)) tick *= np.linspace(1, 0, len(tick)) # create silence samples = np.zeros(int(sr * (duration / Seconds(1)))) ticks_per_second = Seconds(1) / tick_frequency # introduce periodic ticking sound step = int(sr // ticks_per_second) for i in range(0, len(samples), step): size = len(samples[i:i + len(tick)]) samples[i:i + len(tick)] += tick[:size] return AudioSamples(samples, self.samplerate)
706,675
Synthesize white noise Args: duration (numpy.timedelta64): The duration of the synthesized sound
def synthesize(self, duration): sr = self.samplerate.samples_per_second seconds = duration / Seconds(1) samples = np.random.uniform(low=-1., high=1., size=int(sr * seconds)) return AudioSamples(samples, self.samplerate)
706,677
Gets a new unit of work. Args: ignore_list: list(str) A list of filenames which should be ignored. Defaults to None. Returns: new_workunit: WorkUnit A new unit of work that has not yet been processed. A lock on it has been acquired. Raises: NoAvailableWorkException There is no more work available.
def get_workunit(self, ignore_list=None): if ignore_list is None: ignore_list = [] potential_files = self.get_potential_files(ignore_list) while len(potential_files) > 0: potential_file = self.select_potential_file(potential_files) potential_files.remove(potential_file) if self._filter(potential_file): continue if self.directory_context.get_file_size(potential_file) == 0: continue if self.progress_manager.is_done(potential_file): self._done.append(potential_file) continue else: try: self.progress_manager.lock(potential_file) except FileLockedException: continue self._already_fetched.append(potential_file) return self.builder.build_workunit( self.directory_context.get_full_path(potential_file)) logger.info("No eligible workunits remain to be fetched.") raise NoAvailableWorkException()
706,717
Checks what error occured and looks for an appropriate solution. Args: error: Exception The error that has occured. download_request: The request which resulted in the error.
def handle_error(self, error, download_request): if hasattr(error, "errno") and error.errno == errno.EACCES: self.handle_certificate_problem(str(error)) else: self.handle_general_download_error(str(error), download_request)
706,798
Reads a value from the configuration file. Args: keypath: str Specifies the key for which the value is desired. It can be a hierarchical path. Example: "section1.subsection.key1" configfile: str Path to the config file to read. Defaults to None, in which case the application's default config file is used. Returns: value from configuration file
def read(keypath, configfile=None): if configfile in _configs: appconfig = _configs[configfile] else: appconfig = AppConfig(configfile=configfile) _configs[configfile] = appconfig return appconfig.read(keypath)
706,883
Return the intersection between this frequency band and another. Args: other (FrequencyBand): the instance to intersect with Examples:: >>> import zounds >>> b1 = zounds.FrequencyBand(500, 1000) >>> b2 = zounds.FrequencyBand(900, 2000) >>> intersection = b1.intersect(b2) >>> intersection.start_hz, intersection.stop_hz (900, 1000)
def intersect(self, other): lowest_stop = min(self.stop_hz, other.stop_hz) highest_start = max(self.start_hz, other.start_hz) return FrequencyBand(highest_start, lowest_stop)
706,911
Return a :class:`~zounds.spectral.LinearScale` instance whose upper frequency bound is informed by the nyquist frequency of the sample rate. Args: sample_rate (SamplingRate): the sample rate whose nyquist frequency will serve as the upper frequency bound of this scale n_bands (int): the number of evenly-spaced frequency bands
def from_sample_rate(sample_rate, n_bands, always_even=False): fb = FrequencyBand(0, sample_rate.nyquist) return LinearScale(fb, n_bands, always_even=always_even)
706,928
Convert a point from one coordinate system to another. Args: point: tuple(int x, int y) The point in the original coordinate system. Returns: converted_point: tuple(int x, int y) The point in the new coordinate system. Example: convert coordinate from original image into a pixel location within a cutout image. @rtype: list(float,float)
def convert(self, point): x, y = point (x1, y1) = x - self.x_offset, y - self.y_offset logger.debug("converted {} {} ==> {} {}".format(x, y, x1, y1)) return x1, y1
707,090
Retrieves the pixel location of a point within the current HDUList given the location in the original FITS image. This takes into account that the image may be a cutout of a larger original. Args: point: tuple(float, float) (x, y) in original. Returns: (x, y) pixel in this image. @param extno: the extno from the original Mosaic that the x/y coordinates are from.
def get_pixel_coordinates(self, point, ccdnum): hdulist_index = self.get_hdulist_idx(ccdnum) if isinstance(point[0], Quantity) and isinstance(point[1], Quantity): pix_point = point[0].value, point[1].value else: pix_point = point if self.reading.inverted: pix_point = self.reading.obs.naxis1 - pix_point[0] +1 , self.reading.obs.naxis2 - pix_point[1] + 1 (x, y) = self.hdulist[hdulist_index].converter.convert(pix_point) return x, y, hdulist_index
707,122
Transform a :class:`TimeSlice` into integer indices that numpy can work with Args: ts (slice, TimeSlice): the time slice to translate into integer indices
def integer_based_slice(self, ts): if isinstance(ts, slice): try: start = Seconds(0) if ts.start is None else ts.start if start < Seconds(0): start = self.end + start stop = self.end if ts.stop is None else ts.stop if stop < Seconds(0): stop = self.end + stop duration = stop - start ts = TimeSlice(start=start, duration=duration) except (ValueError, TypeError): pass if not isinstance(ts, TimeSlice): return ts diff = self.duration - self.frequency start_index = \ max(0, np.floor((ts.start - diff) / self.frequency)) end = self.end if ts.duration is None else ts.end # KLUDGE: This is basically arbitrary, but the motivation is that we'd # like to differentiate between cases where the slice # actually/intentionally overlaps a particular sample, and cases where # the slice overlaps the sample by a tiny amount, due to rounding or # lack of precision (e.g. Seconds(1) / SR44100().frequency). ratio = np.round(end / self.frequency, 2) stop_index = np.ceil(ratio) return slice(int(start_index), int(stop_index))
707,178
Apply an FFT along the given dimension, and with the specified amount of zero-padding Args: x (ArrayWithUnits): an :class:`~zounds.core.ArrayWithUnits` instance which has one or more :class:`~zounds.timeseries.TimeDimension` axes axis (int): The axis along which the fft should be applied padding_samples (int): The number of padding zeros to apply along axis before performing the FFT
def fft(x, axis=-1, padding_samples=0): if padding_samples > 0: padded = np.concatenate( [x, np.zeros((len(x), padding_samples), dtype=x.dtype)], axis=axis) else: padded = x transformed = np.fft.rfft(padded, axis=axis, norm='ortho') sr = audio_sample_rate(int(Seconds(1) / x.dimensions[axis].frequency)) scale = LinearScale.from_sample_rate(sr, transformed.shape[-1]) new_dimensions = list(x.dimensions) new_dimensions[axis] = FrequencyDimension(scale) return ArrayWithUnits(transformed, new_dimensions)
707,649
Adjusts the image bias. Bias determines where the color changes start. At low bias, low intensities (i.e., low pixel values) will have non-zero color differences, while at high bias only high pixel values will have non-zero differences Args: bias: float A number between 0 and 1. Note that upon initialization the colormap has a default bias of 0.5. Returns: void
def set_bias(self, bias): self.x_offset += (bias - self._bias) self._bias = bias self._build_cdict()
707,664
Adjusts the image contrast. Contrast refers to the rate of change of color with color level. At low contrast, color changes gradually over many intensity levels, while at high contrast it can change rapidly within a few levels Args: contrast: float A number between 0 and 1. Note that upon initialization the colormap has a default contrast value of 0.5. Returns: void
def set_contrast(self, contrast): self._contrast = contrast self.x_spread = 2 * (1.0 - contrast) self.y_spread = 2.0 - 2 * (1.0 - contrast) self._build_cdict()
707,665
Copies file from source to destination Args: src (str or file-like object): Source file. dst (str or file-like object): Destination file. src_is_storage (bool): Source is storage. dst_is_storage (bool): Destination is storage.
def _copy(src, dst, src_is_storage, dst_is_storage): # If both storage: Tries to perform same storage direct copy if src_is_storage and dst_is_storage: system_src = get_instance(src) system_dst = get_instance(dst) # Same storage copy if system_src is system_dst: # Checks if same file if system_src.relpath(src) == system_dst.relpath(dst): raise same_file_error( "'%s' and '%s' are the same file" % (src, dst)) # Tries to copy try: return system_dst.copy(src, dst) except (UnsupportedOperation, ObjectException): pass # Copy from compatible storage using "copy_from_<src_storage>" or # "copy_to_<src_storage>" method if any for caller, called, method in ( (system_dst, system_src, 'copy_from_%s'), (system_src, system_dst, 'copy_to_%s')): if hasattr(caller, method % called.storage): try: return getattr(caller, method % called.storage)( src, dst, called) except (UnsupportedOperation, ObjectException): continue # At least one storage object: copies streams with cos_open(src, 'rb') as fsrc: with cos_open(dst, 'wb') as fdst: # Get stream buffer size for stream in (fsrc, fdst): try: buffer_size = getattr(stream, '_buffer_size') break except AttributeError: continue else: buffer_size = COPY_BUFSIZE # Read and write copyfileobj(fsrc, fdst, buffer_size)
707,683
Copies a source file to a destination file or directory. Equivalent to "shutil.copy". Source and destination can also be binary opened file-like objects. Args: src (path-like object or file-like object): Source file. dst (path-like object or file-like object): Destination file or directory. Raises: IOError: Destination directory not found.
def copy(src, dst): # Handles path-like objects and checks if storage src, src_is_storage = format_and_is_storage(src) dst, dst_is_storage = format_and_is_storage(dst) # Local files: Redirects to "shutil.copy" if not src_is_storage and not dst_is_storage: return shutil_copy(src, dst) with handle_os_exceptions(): # Checks destination if not hasattr(dst, 'read'): try: # If destination is directory: defines an output file inside it if isdir(dst): dst = join(dst, basename(src)) # Checks if destination dir exists elif not isdir(dirname(dst)): raise IOError("No such file or directory: '%s'" % dst) except ObjectPermissionError: # Unable to check target directory due to missing read access, # but do not raise to allow to write if possible pass # Performs copy _copy(src, dst, src_is_storage, dst_is_storage)
707,684
Copies a source file to a destination file. Equivalent to "shutil.copyfile". Source and destination can also be binary opened file-like objects. Args: src (path-like object or file-like object): Source file. dst (path-like object or file-like object): Destination file. follow_symlinks (bool): Follow symlinks. Not supported on cloud storage objects. Raises: IOError: Destination directory not found.
def copyfile(src, dst, follow_symlinks=True): # Handles path-like objects and checks if storage src, src_is_storage = format_and_is_storage(src) dst, dst_is_storage = format_and_is_storage(dst) # Local files: Redirects to "shutil.copyfile" if not src_is_storage and not dst_is_storage: return shutil_copyfile(src, dst, follow_symlinks=follow_symlinks) with handle_os_exceptions(): # Checks destination try: if not hasattr(dst, 'read') and not isdir(dirname(dst)): raise IOError("No such file or directory: '%s'" % dst) except ObjectPermissionError: # Unable to check target directory due to missing read access, but # do not raise to allow to write if possible pass # Performs copy _copy(src, dst, src_is_storage, dst_is_storage)
707,685
Copy object of the same storage. Args: src (str): Path or URL. dst (str): Path or URL. other_system (pycosio._core.io_system.SystemBase subclass): Unused.
def copy(self, src, dst, other_system=None): copy_source = self.get_client_kwargs(src) copy_destination = self.get_client_kwargs(dst) with _handle_client_error(): self.client.copy_object(CopySource=copy_source, **copy_destination)
707,696
Get base keyword arguments for client for a specific path. Args: path (str): Absolute path or URL. Returns: dict: client args
def get_client_kwargs(self, path): bucket_name, key = self.split_locator(path) kwargs = dict(Bucket=bucket_name) if key: kwargs['Key'] = key return kwargs
707,697
Get time from header Args: header (dict): Object header. keys (tuple of str): Header keys. name (str): Method name. Returns: float: The number of seconds since the epoch
def _get_time(header, keys, name): for key in keys: try: return _to_timestamp(header.pop(key)) except KeyError: continue raise _UnsupportedOperation(name)
707,701
Returns object or bucket HTTP header. Args: client_kwargs (dict): Client arguments. Returns: dict: HTTP header.
def _head(self, client_kwargs): with _handle_client_error(): # Object if 'Key' in client_kwargs: header = self.client.head_object(**client_kwargs) # Bucket else: header = self.client.head_bucket(**client_kwargs) # Clean up HTTP request information for key in ('AcceptRanges', 'ResponseMetadata'): header.pop(key, None) return header
707,702
Make a directory. args: client_kwargs (dict): Client arguments.
def _make_dir(self, client_kwargs): with _handle_client_error(): # Object if 'Key' in client_kwargs: return self.client.put_object(Body=b'', **client_kwargs) # Bucket return self.client.create_bucket( Bucket=client_kwargs['Bucket'], CreateBucketConfiguration=dict( LocationConstraint=self._get_session().region_name))
707,703
Remove an object. args: client_kwargs (dict): Client arguments.
def _remove(self, client_kwargs): with _handle_client_error(): # Object if 'Key' in client_kwargs: return self.client.delete_object(**client_kwargs) # Bucket return self.client.delete_bucket(Bucket=client_kwargs['Bucket'])
707,704
Lists objects. args: client_kwargs (dict): Client arguments. path (str): Path relative to current locator. max_request_entries (int): If specified, maximum entries returned by request. Returns: generator of tuple: object name str, object header dict
def _list_objects(self, client_kwargs, path, max_request_entries): client_kwargs = client_kwargs.copy() if max_request_entries: client_kwargs['MaxKeys'] = max_request_entries while True: with _handle_client_error(): response = self.client.list_objects_v2( Prefix=path, **client_kwargs) try: for obj in response['Contents']: yield obj.pop('Key'), obj except KeyError: raise _ObjectNotFoundError('Not found: %s' % path) # Handles results on more than one page try: client_kwargs['ContinuationToken'] = response[ 'NextContinuationToken'] except KeyError: # End of results break
707,706
Read a range of bytes in stream. Args: start (int): Start stream position. end (int): End stream position. 0 To not specify end. Returns: bytes: number of bytes read
def _read_range(self, start, end=0): # Get object part from S3 try: with _handle_client_error(): response = self._client.get_object( Range=self._http_range(start, end), **self._client_kwargs) # Check for end of file except _ClientError as exception: if exception.response['Error']['Code'] == 'InvalidRange': # EOF return bytes() raise # Get object content return response['Body'].read()
707,707
Flush the write buffers of the stream if applicable. Args: buffer (memoryview): Buffer content.
def _flush(self, buffer): with _handle_client_error(): self._client.put_object( Body=buffer.tobytes(), **self._client_kwargs)
707,708
Convert properties model to dict. Args: properties: Properties model. Returns: dict: Converted model.
def _properties_model_to_dict(properties): result = {} for attr in properties.__dict__: value = getattr(properties, attr) if hasattr(value, '__module__') and 'models' in value.__module__: value = _properties_model_to_dict(value) if not (value is None or (isinstance(value, dict) and not value)): result[attr] = value return result
707,713
Get endpoint information from storage parameters. Update system with endpoint information and return information required to define roots. Args: self (pycosio._core.io_system.SystemBase subclass): System. sub_domain (str): Azure storage sub-domain. Returns: tuple of str: account_name, endpoint_suffix
def _get_endpoint(self, sub_domain): storage_parameters = self._storage_parameters or dict() account_name = storage_parameters.get('account_name') if not account_name: raise ValueError('"account_name" is required for Azure storage') suffix = storage_parameters.get( 'endpoint_suffix', 'core.windows.net') self._endpoint = 'http%s://%s.%s.%s' % ( '' if self._unsecure else 's', account_name, sub_domain, suffix) return account_name, suffix.replace('.', r'\.')
707,715
Ensure path is absolute and use the correct URL format for use with cross Azure storage account copy function. Args: path (str): Path or URL. caller_system (pycosio.storage.azure._AzureBaseSystem subclass): System calling this method (Can be another Azure system). Returns: str: URL.
def _format_src_url(self, path, caller_system): path = '%s/%s' % (self._endpoint, self.relpath(path)) # If SAS token available, use it to give cross account copy access. if caller_system is not self: try: path = '%s?%s' % (path, self._storage_parameters['sas_token']) except KeyError: pass return path
707,717
Updates client kwargs for listing functions. Args: client_kwargs (dict): Client arguments. max_request_entries (int): If specified, maximum entries returned by request. Returns: dict: Updated client_kwargs
def _update_listing_client_kwargs(client_kwargs, max_request_entries): client_kwargs = client_kwargs.copy() if max_request_entries: client_kwargs['num_results'] = max_request_entries return client_kwargs
707,718
Convert object model to dict. Args: obj: Object model. Returns: dict: Converted model.
def _model_to_dict(obj): result = _properties_model_to_dict(obj.properties) for attribute in ('metadata', 'snapshot'): try: value = getattr(obj, attribute) except AttributeError: continue if value: result[attribute] = value return result
707,719
Read a range of bytes in stream. Args: start (int): Start stream position. end (int): End stream position. 0 To not specify end. Returns: bytes: number of bytes read
def _read_range(self, start, end=0): stream = _BytesIO() try: with _handle_azure_exception(): self._get_to_stream( stream=stream, start_range=start, end_range=(end - 1) if end else None, **self._client_kwargs) # Check for end of file except _AzureHttpError as exception: if exception.status_code == 416: # EOF return bytes() raise return stream.getvalue()
707,720
Flush the write buffer of the stream if applicable. Args: buffer (memoryview): Buffer content. start (int): Start of buffer position to flush. Supported only with page blobs. end (int): End of buffer position to flush. Supported only with page blobs.
def _flush(self, buffer, start, end): buffer_size = len(buffer) if not buffer_size: return # Write range normally with self._size_lock: if end > self._size: # Require to resize the blob if note enough space with _handle_azure_exception(): self._resize(content_length=end, **self._client_kwargs) self._reset_head() if buffer_size > self.MAX_FLUSH_SIZE: # Too large buffer, needs to split in multiples requests futures = [] for part_start in range(0, buffer_size, self.MAX_FLUSH_SIZE): # Split buffer buffer_part = buffer[ part_start:part_start + self.MAX_FLUSH_SIZE] if not len(buffer_part): # No more data break # Upload split buffer in parallel start_range = start + part_start futures.append(self._workers.submit( self._update_range, data=buffer_part.tobytes(), start_range=start_range, end_range=start_range + len(buffer_part) - 1, **self._client_kwargs)) with _handle_azure_exception(): # Wait for upload completion for future in _as_completed(futures): future.result() else: # Buffer lower than limit, do one requests. with _handle_azure_exception(): self._update_range( data=buffer.tobytes(), start_range=start, end_range=end - 1, **self._client_kwargs)
707,725
Decorator that caches method result. Args: method (function): Method Returns: function: Memoized method. Notes: Target method class needs as "_cache" attribute (dict). It is the case of "ObjectIOBase" and all its subclasses.
def memoizedmethod(method): method_name = method.__name__ @wraps(method) def patched(self, *args, **kwargs): # Gets value from cache try: return self._cache[method_name] # Evaluates and cache value except KeyError: result = self._cache[method_name] = method( self, *args, **kwargs) return result return patched
707,736
Return the previous generator object after having run the first element evaluation as a background task. Args: generator (iterable): A generator function. Returns: iterable: The generator function with first element evaluated in background.
def _generate_async(self, generator): first_value_future = self._workers.submit(next, generator) def get_first_element(future=first_value_future): try: yield future.result() except StopIteration: return return chain(get_first_element(), generator)
707,739
Copy object of the same storage. Args: src (str): Path or URL. dst (str): Path or URL. other_system (pycosio._core.io_system.SystemBase subclass): Unused.
def copy(self, src, dst, other_system=None): container, obj = self.split_locator(src) with _handle_client_exception(): self.client.copy_object( container=container, obj=obj, destination=self.relpath(dst))
707,741
Get base keyword arguments for client for a specific path. Args: path (str): Absolute path or URL. Returns: dict: client args
def get_client_kwargs(self, path): container, obj = self.split_locator(path) kwargs = dict(container=container) if obj: kwargs['obj'] = obj return kwargs
707,742
Returns object HTTP header. Args: client_kwargs (dict): Client arguments. Returns: dict: HTTP header.
def _head(self, client_kwargs): with _handle_client_exception(): # Object if 'obj' in client_kwargs: return self.client.head_object(**client_kwargs) # Container return self.client.head_container(**client_kwargs)
707,744
Make a directory. args: client_kwargs (dict): Client arguments.
def _make_dir(self, client_kwargs): with _handle_client_exception(): # Object if 'obj' in client_kwargs: return self.client.put_object( client_kwargs['container'], client_kwargs['obj'], b'') # Container return self.client.put_container(client_kwargs['container'])
707,745
Remove an object. args: client_kwargs (dict): Client arguments.
def _remove(self, client_kwargs): with _handle_client_exception(): # Object if 'obj' in client_kwargs: return self.client.delete_object( client_kwargs['container'], client_kwargs['obj']) # Container return self.client.delete_container(client_kwargs['container'])
707,746
Lists objects. args: client_kwargs (dict): Client arguments. path (str): Path relative to current locator. max_request_entries (int): If specified, maximum entries returned by request. Returns: generator of tuple: object name str, object header dict
def _list_objects(self, client_kwargs, path, max_request_entries): kwargs = dict(prefix=path) if max_request_entries: kwargs['limit'] = max_request_entries else: kwargs['full_listing'] = True with _handle_client_exception(): response = self.client.get_container( client_kwargs['container'], **kwargs) for obj in response[1]: yield obj.pop('name'), obj
707,748
Read a range of bytes in stream. Args: start (int): Start stream position. end (int): End stream position. 0 To not specify end. Returns: bytes: number of bytes read
def _read_range(self, start, end=0): try: with _handle_client_exception(): return self._client.get_object(*self._client_args, headers=dict( Range=self._http_range(start, end)))[1] except _ClientException as exception: if exception.http_status == 416: # EOF return b'' raise
707,749
Flush the write buffers of the stream if applicable. Args: buffer (memoryview): Buffer content.
def _flush(self, buffer): container, obj = self._client_args with _handle_client_exception(): self._client.put_object(container, obj, buffer)
707,750
Return bytes from the stream without advancing the position. Args: size (int): Number of bytes to read. -1 to read the full stream. Returns: bytes: bytes read
def _peek(self, size=-1): with self._seek_lock: seek = self._seek with handle_os_exceptions(): return self._read_range(seek, seek + size)
707,757
Read bytes into a pre-allocated, writable bytes-like object b, and return the number of bytes read. Args: b (bytes-like object): buffer. Returns: int: number of bytes read
def readinto(self, b): if not self._readable: raise UnsupportedOperation('read') # Get and update stream positions size = len(b) with self._seek_lock: start = self._seek end = start + size self._seek = end # Read data range with handle_os_exceptions(): read_data = self._read_range(start, end) # Copy to bytes-like object read_size = len(read_data) if read_size: memoryview(b)[:read_size] = read_data # Update stream position if end of file if read_size != size: with self._seek_lock: self._seek = start + read_size # Return read size return read_size
707,759
Update seek value. Args: offset (int): Offset. whence (int): Whence. Returns: int: Seek position.
def _update_seek(self, offset, whence): with self._seek_lock: if whence == SEEK_SET: self._seek = offset elif whence == SEEK_CUR: self._seek += offset elif whence == SEEK_END: self._seek = offset + self._size else: raise ValueError('whence value %s unsupported' % whence) return self._seek
707,761
Write the given bytes-like object, b, to the underlying raw stream, and return the number of bytes written. Args: b (bytes-like object): Bytes to write. Returns: int: The number of bytes written.
def write(self, b): if not self._writable: raise UnsupportedOperation('write') # This function write data in a buffer # "flush()" need to be called to really write content on # Cloud Storage size = len(b) with self._seek_lock: start = self._seek end = start + size self._seek = end buffer = self._write_buffer if end <= len(buffer): buffer = memoryview(buffer) buffer[start:end] = b return size
707,762
Check for HTTP errors and raise OSError if relevant. Args: response (requests.Response): Returns: requests.Response: response
def _handle_http_errors(response): code = response.status_code if 200 <= code < 400: return response elif code in (403, 404): raise {403: _ObjectPermissionError, 404: _ObjectNotFoundError}[code](response.reason) response.raise_for_status()
707,763
Returns object HTTP header. Args: client_kwargs (dict): Client arguments. Returns: dict: HTTP header.
def _head(self, client_kwargs): return _handle_http_errors( self.client.request( 'HEAD', timeout=self._TIMEOUT, **client_kwargs)).headers
707,764
Read a range of bytes in stream. Args: start (int): Start stream position. end (int): End stream position. 0 To not specify end. Returns: bytes: number of bytes read
def _read_range(self, start, end=0): # Get object part response = self._client.request( 'GET', self.name, headers=dict(Range=self._http_range(start, end)), timeout=self._TIMEOUT) if response.status_code == 416: # EOF return b'' # Get object content return _handle_http_errors(response).content
707,766
Keep track of the file size during writing. If specified size value is greater than the current size, update the current size using specified value. Used as callback in default "_flush" implementation for files supporting random write access. Args: size (int): Size value. future (concurrent.futures._base.Future): future.
def _update_size(self, size, future): with self._size_lock: # Update value if size > self._size and future.done: # Size can be lower if seek down on an 'a' mode open file. self._size = size
707,771
Flush a buffer to a range of the file. Meant to be used asynchronously, used to provides parallel flushing of file parts when applicable. Args: buffer (memoryview): Buffer content. start (int): Start of buffer position to flush. end (int): End of buffer position to flush.
def _flush_range(self, buffer, start, end): # On first call, Get file size if exists with self._size_lock: if not self._size_synched: self._size_synched = True try: self._size = self.raw._size except (ObjectNotFoundError, UnsupportedOperation): self._size = 0 # It is not possible to flush a part if start > size: # If it is the case, wait that previous parts are flushed before # flushing this one while start > self._size: sleep(self._FLUSH_WAIT) # Flush buffer using RAW IO self._raw_flush(buffer, start, end)
707,772
Copy object of the same storage. Args: src (str): Path or URL. dst (str): Path or URL. other_system (pycosio.storage.azure._AzureBaseSystem subclass): The source storage system.
def copy(self, src, dst, other_system=None): with _handle_azure_exception(): self.client.copy_file( copy_source=(other_system or self)._format_src_url(src, self), **self.get_client_kwargs(dst))
707,781
Get base keyword arguments for client for a specific path. Args: path (str): Absolute path or URL. Returns: dict: client args
def get_client_kwargs(self, path): # Remove query string from URL path = path.split('?', 1)[0] share_name, relpath = self.split_locator(path) kwargs = dict(share_name=share_name) # Directory if relpath and relpath[-1] == '/': kwargs['directory_name'] = relpath.rstrip('/') # File elif relpath: try: kwargs['directory_name'], kwargs['file_name'] = relpath.rsplit( '/', 1) except ValueError: kwargs['directory_name'] = '' kwargs['file_name'] = relpath # Else, Share only return kwargs
707,782
Returns object or bucket HTTP header. Args: client_kwargs (dict): Client arguments. Returns: dict: HTTP header.
def _head(self, client_kwargs): with _handle_azure_exception(): # File if 'file_name' in client_kwargs: result = self.client.get_file_properties(**client_kwargs) # Directory elif 'directory_name' in client_kwargs: result = self.client.get_directory_properties(**client_kwargs) # Share else: result = self.client.get_share_properties(**client_kwargs) return self._model_to_dict(result)
707,783
Lists objects. args: client_kwargs (dict): Client arguments. max_request_entries (int): If specified, maximum entries returned by request. Returns: generator of tuple: object name str, object header dict, directory bool
def _list_objects(self, client_kwargs, max_request_entries): client_kwargs = self._update_listing_client_kwargs( client_kwargs, max_request_entries) with _handle_azure_exception(): for obj in self.client.list_directories_and_files(**client_kwargs): yield (obj.name, self._model_to_dict(obj), isinstance(obj, _Directory))
707,785
Make a directory. args: client_kwargs (dict): Client arguments.
def _make_dir(self, client_kwargs): with _handle_azure_exception(): # Directory if 'directory_name' in client_kwargs: return self.client.create_directory( share_name=client_kwargs['share_name'], directory_name=client_kwargs['directory_name']) # Share return self.client.create_share(**client_kwargs)
707,786
Remove an object. args: client_kwargs (dict): Client arguments.
def _remove(self, client_kwargs): with _handle_azure_exception(): # File if 'file_name' in client_kwargs: return self.client.delete_file( share_name=client_kwargs['share_name'], directory_name=client_kwargs['directory_name'], file_name=client_kwargs['file_name']) # Directory elif 'directory_name' in client_kwargs: return self.client.delete_directory( share_name=client_kwargs['share_name'], directory_name=client_kwargs['directory_name']) # Share return self.client.delete_share( share_name=client_kwargs['share_name'])
707,787
Update range with data Args: data (bytes): data.
def _update_range(self, data, **kwargs): self._client.update_range(data=data, **kwargs)
707,788
Check if file is a local file or a storage file. File is considered local if: - URL is a local path. - URL starts by "file://" - a "storage" is provided. Args: url (str): file path or URL storage (str): Storage name. Returns: bool: return True if file is local.
def is_storage(url, storage=None): if storage: return True split_url = url.split('://', 1) if len(split_url) == 2 and split_url[0].lower() != 'file': return True return False
707,796
Checks if path is storage and format it. If path is an opened file-like object, returns is storage as True. Args: path (path-like object or file-like object): Returns: tuple: str or file-like object (Updated path), bool (True if is storage).
def format_and_is_storage(path): if not hasattr(path, 'read'): path = fsdecode(path).replace('\\', '/') return path, is_storage(path) return path, True
707,797
Decorates a cloud object compatible function to provides fall back to standard function if used on local files. Args: std_function (function): standard function to used with local files. Returns: function: new function
def equivalent_to(std_function): def decorate(cos_function): @wraps(cos_function) def decorated(path, *args, **kwargs): # Handles path-like objects path = fsdecode(path).replace('\\', '/') # Storage object: Handle with Cloud object storage # function if is_storage(path): with handle_os_exceptions(): return cos_function(path, *args, **kwargs) # Local file: Redirect to standard function return std_function(path, *args, **kwargs) return decorated return decorate
707,798
Copy object of the same storage. Args: src (str): Path or URL. dst (str): Path or URL. other_system (pycosio._core.io_system.SystemBase subclass): Unused.
def copy(self, src, dst, other_system=None): copy_source = self.get_client_kwargs(src) copy_destination = self.get_client_kwargs(dst) with _handle_oss_error(): bucket = self._get_bucket(copy_destination) bucket.copy_object( source_bucket_name=copy_source['bucket_name'], source_key=copy_source['key'], target_key=copy_destination['key'])
707,801
Returns True if object is a symbolic link. Args: path (str): File path or URL. header (dict): Object header. Returns: bool: True if object is Symlink.
def islink(self, path=None, header=None): if header is None: header = self._head(self.get_client_kwargs(path)) for key in ('x-oss-object-type', 'type'): try: return header.pop(key) == 'Symlink' except KeyError: continue return False
707,805
Returns object HTTP header. Args: client_kwargs (dict): Client arguments. Returns: dict: HTTP header.
def _head(self, client_kwargs): with _handle_oss_error(): bucket = self._get_bucket(client_kwargs) # Object if 'key' in client_kwargs: return bucket.head_object( key=client_kwargs['key']).headers # Bucket return bucket.get_bucket_info().headers
707,806
Make a directory. args: client_kwargs (dict): Client arguments.
def _make_dir(self, client_kwargs): with _handle_oss_error(): bucket = self._get_bucket(client_kwargs) # Object if 'key' in client_kwargs: return bucket.put_object( key=client_kwargs['key'], data=b'') # Bucket return bucket.create_bucket()
707,807
Remove an object. args: client_kwargs (dict): Client arguments.
def _remove(self, client_kwargs): with _handle_oss_error(): bucket = self._get_bucket(client_kwargs) # Object if 'key' in client_kwargs: return bucket.delete_object(key=client_kwargs['key']) # Bucket return bucket.delete_bucket()
707,808
Convert OSS model to dict. Args: model (oss2.models.RequestResult): Model. ignore (tuple of str): Keys to not insert to dict. Returns: dict: Model dict version.
def _model_to_dict(model, ignore): return {attr: value for attr, value in model.__dict__.items() if not attr.startswith('_') and attr not in ignore}
707,809
Lists objects. args: client_kwargs (dict): Client arguments. path (str): Path relative to current locator. max_request_entries (int): If specified, maximum entries returned by request. Returns: generator of tuple: object name str, object header dict
def _list_objects(self, client_kwargs, path, max_request_entries): kwargs = dict() if max_request_entries: kwargs['max_keys'] = max_request_entries bucket = self._get_bucket(client_kwargs) while True: with _handle_oss_error(): response = bucket.list_objects(prefix=path, **kwargs) if not response.object_list: # In case of empty dir, return empty dir path: # if empty result, the dir do not exists. raise _ObjectNotFoundError('Not found: %s' % path) for obj in response.object_list: yield obj.key, self._model_to_dict(obj, ('key',)) # Handles results on more than one page if response.next_marker: client_kwargs['marker'] = response.next_marker else: # End of results break
707,811
Read a range of bytes in stream. Args: start (int): Start stream position. end (int): End stream position. 0 To not specify end. Returns: bytes: number of bytes read
def _read_range(self, start, end=0): if start >= self._size: # EOF. Do not detect using 416 (Out of range) error, 200 returned. return bytes() # Get object bytes range with _handle_oss_error(): response = self._bucket.get_object(key=self._key, headers=dict( Range=self._http_range( # Returns full file if end > size start, end if end <= self._size else self._size))) # Get object content return response.read()
707,812
Flush the write buffers of the stream if applicable. Args: buffer (memoryview): Buffer content.
def _flush(self, buffer): with _handle_oss_error(): self._bucket.put_object(key=self._key, data=buffer.tobytes())
707,814
Wrap a binary stream to Text stream. Args: stream (file-like object): binary stream. mode (str): Open mode. encoding (str): Stream encoding. errors (str): Decoding error handling. newline (str): Universal newlines
def _text_io_wrapper(stream, mode, encoding, errors, newline): # Text mode, if not already a text stream # That has the "encoding" attribute if "t" in mode and not hasattr(stream, 'encoding'): text_stream = TextIOWrapper( stream, encoding=encoding, errors=errors, newline=newline) yield text_stream text_stream.flush() # Binary mode (Or already text stream) else: yield stream
707,820