text
stringlengths
81
112k
Prepares data structure for breaking data into orbits. Not intended for end user. def _calcOrbits(self): """Prepares data structure for breaking data into orbits. Not intended for end user.""" # if the breaks between orbit have not been defined, define them # also, store the data so that grabbing different orbits does not # require reloads of whole dataset if len(self._orbit_breaks) == 0: # determine orbit breaks self._detBreaks() # store a copy of data self._fullDayData = self.sat.data.copy() # set current orbit counter to zero (default) self._current = 0
Determine where breaks in an equatorial satellite orbit occur. Looks for negative gradients in local time (or longitude) as well as breaks in UT. Parameters ---------- orbit_index_period : float The change in value of supplied index parameter for a single orbit def _equaBreaks(self, orbit_index_period=24.): """Determine where breaks in an equatorial satellite orbit occur. Looks for negative gradients in local time (or longitude) as well as breaks in UT. Parameters ---------- orbit_index_period : float The change in value of supplied index parameter for a single orbit """ if self.orbit_index is None: raise ValueError('Orbit properties must be defined at ' + 'pysat.Instrument object instantiation.' + 'See Instrument docs.') else: try: self.sat[self.orbit_index] except ValueError: raise ValueError('Provided orbit index does not exist in ' + 'loaded data') # get difference in orbit index around the orbit lt_diff = self.sat[self.orbit_index].diff() # universal time values, from datetime index ut_vals = Series(self.sat.data.index) # UT difference ut_diff = ut_vals.diff() # get locations where orbit index derivative is less than 0 # then do some basic checks on these locations ind, = np.where((lt_diff < -0.1)) if len(ind) > 0: ind = np.hstack((ind, np.array([len(self.sat[self.orbit_index])]))) # look at distance between breaks dist = ind[1:] - ind[0:-1] # only keep orbit breaks with a distance greater than 1 # done for robustness if len(ind) > 1: if min(dist) == 1: print('There are orbit breaks right next to each other') ind = ind[:-1][dist > 1] # check for large positive gradients around the break that would # suggest not a true orbit break, but rather bad orbit_index values new_ind = [] for idx in ind: tidx, = np.where(lt_diff[idx - 5:idx + 6] > 0.1) if len(tidx) != 0: # there are large changes, suggests a false alarm # iterate over samples and check for tidx in tidx: # look at time change vs local time change if(ut_diff[idx - 5:idx + 6].iloc[tidx] < lt_diff[idx - 5:idx + 6].iloc[tidx] / orbit_index_period * self.orbit_period): # change in ut is small compared to the change in # the orbit index this is flagged as a false alarm, # or dropped from consideration pass else: # change in UT is significant, keep orbit break new_ind.append(idx) break else: # no large positive gradients, current orbit break passes # the first test new_ind.append(idx) # replace all breaks with those that are 'good' ind = np.array(new_ind) # now, assemble some orbit breaks that are not triggered by changes in # the orbit index # check if there is a UT break that is larger than orbital period, aka # a time gap ut_change_vs_period = ( ut_diff > self.orbit_period ) # characterize ut change using orbital period norm_ut = ut_diff / self.orbit_period # now, look for breaks because the length of time between samples is # too large, thus there is no break in slt/mlt/etc, lt_diff is small # but UT change is big norm_ut_vs_norm_lt = norm_ut.gt(np.abs(lt_diff.values / orbit_index_period)) # indices when one or other flag is true ut_ind, = np.where(ut_change_vs_period | (norm_ut_vs_norm_lt & (norm_ut > 0.95))) # added the or and check after or on 10/20/2014 # & lt_diff.notnull() ))# & (lt_diff != 0) ) ) # combine these UT determined orbit breaks with the orbit index orbit # breaks if len(ut_ind) > 0: ind = np.hstack((ind, ut_ind)) ind = np.sort(ind) ind = np.unique(ind) print('Time Gap') # now that most problems in orbits should have been caught, look at # the time difference between orbits (not individual orbits) orbit_ut_diff = ut_vals[ind].diff() orbit_lt_diff = self.sat[self.orbit_index][ind].diff() # look for time gaps between partial orbits. The full orbital time # period is not required between end of one orbit and begining of next # if first orbit is partial. Also provides another general test of the # orbital breaks determined. idx, = np.where((orbit_ut_diff / self.orbit_period - orbit_lt_diff.values / orbit_index_period) > 0.97) # pull out breaks that pass the test, need to make sure the first one # is always included it gets dropped via the nature of diff if len(idx) > 0: if idx[0] != 0: idx = np.hstack((0, idx)) else: idx = np.array([0]) # only keep the good indices if len(ind) > 0: ind = ind[idx] # create orbitbreak index, ensure first element is always 0 if ind[0] != 0: ind = np.hstack((np.array([0]), ind)) else: ind = np.array([0]) # number of orbits num_orbits = len(ind) # set index of orbit breaks self._orbit_breaks = ind # set number of orbits for the day self.num = num_orbits
Determine where breaks in a polar orbiting satellite orbit occur. Looks for sign changes in latitude (magnetic or geographic) as well as breaks in UT. def _polarBreaks(self): """Determine where breaks in a polar orbiting satellite orbit occur. Looks for sign changes in latitude (magnetic or geographic) as well as breaks in UT. """ if self.orbit_index is None: raise ValueError('Orbit properties must be defined at ' + 'pysat.Instrument object instantiation.' + 'See Instrument docs.') else: try: self.sat[self.orbit_index] except ValueError: raise ValueError('Provided orbit index does not appear to ' + 'exist in loaded data') # determine where orbit index goes from positive to negative pos = (self.sat[self.orbit_index] >= 0) npos = -pos change = (pos.values[:-1] & npos.values[1:]) | (npos.values[:-1] & pos.values[1:]) ind, = np.where(change) ind += 1 ut_diff = Series(self.sat.data.index).diff() ut_ind, = np.where(ut_diff / self.orbit_period > 0.95) if len(ut_ind) > 0: ind = np.hstack((ind, ut_ind)) ind = np.sort(ind) ind = np.unique(ind) # print 'Time Gap' # create orbitbreak index, ensure first element is always 0 if ind[0] != 0: ind = np.hstack((np.array([0]), ind)) # number of orbits num_orbits = len(ind) # set index of orbit breaks self._orbit_breaks = ind # set number of orbits for the day self.num = num_orbits
Determine where orbital breaks in a dataset with orbit numbers occur. Looks for changes in unique values. def _orbitNumberBreaks(self): """Determine where orbital breaks in a dataset with orbit numbers occur. Looks for changes in unique values. """ if self.orbit_index is None: raise ValueError('Orbit properties must be defined at ' + 'pysat.Instrument object instantiation.' + 'See Instrument docs.') else: try: self.sat[self.orbit_index] except ValueError: raise ValueError('Provided orbit index does not appear to ' + 'exist in loaded data') # determine where the orbit index changes from one value to the next uniq_vals = self.sat[self.orbit_index].unique() orbit_index = [] for val in uniq_vals: idx, = np.where(val == self.sat[self.orbit_index].values) orbit_index.append(idx[0]) # create orbitbreak index, ensure first element is always 0 if orbit_index[0] != 0: ind = np.hstack((np.array([0]), orbit_index)) else: ind = orbit_index # number of orbits num_orbits = len(ind) # set index of orbit breaks self._orbit_breaks = ind # set number of orbits for the day self.num = num_orbits
Load a particular orbit into .data for loaded day. Parameters ---------- orbit : int orbit number, 1 indexed, negative indexes allowed, -1 last orbit Note ---- A day of data must be loaded before this routine functions properly. If the last orbit of the day is requested, it will NOT automatically be padded with data from the next day. def _getBasicOrbit(self, orbit=None): """Load a particular orbit into .data for loaded day. Parameters ---------- orbit : int orbit number, 1 indexed, negative indexes allowed, -1 last orbit Note ---- A day of data must be loaded before this routine functions properly. If the last orbit of the day is requested, it will NOT automatically be padded with data from the next day. """ # ensure data exists if not self.sat.empty: # ensure proper orbit metadata present self._calcOrbits() # ensure user is requesting a particular orbit if orbit is not None: # pull out requested orbit if orbit == -1: # load orbit data into data self.sat.data = self._fullDayData[self._orbit_breaks[self.num + orbit]:] self._current = self.num + orbit + 1 elif ((orbit < 0) & (orbit >= -self.num)): # load orbit data into data self.sat.data = self._fullDayData[ self._orbit_breaks[self.num + orbit]:self._orbit_breaks[self.num + orbit + 1]] self._current = self.num + orbit + 1 elif (orbit < self.num) & (orbit != 0): # load orbit data into data self.sat.data = self._fullDayData[self._orbit_breaks[orbit - 1]:self._orbit_breaks[orbit]] self._current = orbit elif orbit == self.num: self.sat.data = self._fullDayData[self._orbit_breaks[orbit - 1]:] # recent addition, wondering why it wasn't there before, # could just be a bug that is now fixed. self._current = orbit elif orbit == 0: raise ValueError('Orbits internally indexed by 1, 0 not ' + 'allowed') else: # gone too far self.sat.data = [] raise ValueError('Requested an orbit past total orbits ' + 'for day') else: raise ValueError('Must set an orbit')
Load a particular orbit into .data for loaded day. Parameters ---------- orbit : int orbit number, 1 indexed Note ---- A day of data must be loaded before this routine functions properly. If the last orbit of the day is requested, it will automatically be padded with data from the next day. The orbit counter will be reset to 1. def load(self, orbit=None): """Load a particular orbit into .data for loaded day. Parameters ---------- orbit : int orbit number, 1 indexed Note ---- A day of data must be loaded before this routine functions properly. If the last orbit of the day is requested, it will automatically be padded with data from the next day. The orbit counter will be reset to 1. """ if not self.sat.empty: # ensure data exists # set up orbit metadata self._calcOrbits() # ensure user supplied an orbit if orbit is not None: # pull out requested orbit if orbit < 0: # negative indexing consistent with numpy, -1 last, # -2 second to last, etc. orbit = self.num + 1 + orbit if orbit == 1: # change from orig copied from _core, didn't look correct. # self._getBasicOrbit(orbit=2) try: true_date = self.sat.date # .copy() self.sat.prev() # if and else added becuase of CINDI turn off # 6/5/2013, turn on 10/22/2014 # crashed when starting on 10/22/2014 # prev returned empty data if not self.sat.empty: self.load(orbit=-1) else: self.sat.next() self._getBasicOrbit(orbit=1) # check that this orbit should end on the current day delta = true_date - self.sat.data.index[0] # print 'checking if first orbit should land on requested day' # print self.sat.date, self.sat.data.index[0], delta, delta >= self.orbit_period if delta >= self.orbit_period: # the orbit loaded isn't close enough to date # to be the first orbit of the day, move forward self.next() except StopIteration: # print 'going for basic orbit' self._getBasicOrbit(orbit=1) # includes hack to appear to be zero indexed print('Loaded Orbit:%i' % (self._current - 1)) # check if the first orbit is also the last orbit elif orbit == self.num: # we get here if user asks for last orbit # make sure that orbit data goes across daybreak as needed # load previous orbit if self.num != 1: self._getBasicOrbit(self.num - 1) self.next() else: self._getBasicOrbit(orbit=-1) elif orbit < self.num: # load orbit data into data self._getBasicOrbit(orbit) # includes hack to appear to be zero indexed print('Loaded Orbit:%i' % (self._current - 1)) else: # gone too far self.sat.data = DataFrame() raise Exception('Requested an orbit past total orbits for day') else: raise Exception('Must set an orbit') else: print('No data loaded in instrument object to determine orbits.')
Load the next orbit into .data. Note ---- Forms complete orbits across day boundaries. If no data loaded then the first orbit from the first date of data is returned. def next(self, *arg, **kwarg): """Load the next orbit into .data. Note ---- Forms complete orbits across day boundaries. If no data loaded then the first orbit from the first date of data is returned. """ # first, check if data exists if not self.sat.empty: # set up orbit metadata self._calcOrbits() # if current orbit near the last, must be careful if self._current == (self.num - 1): # first, load last orbit data self._getBasicOrbit(orbit=-1) # End of orbit may occur on the next day load_next = True if self.sat._iter_type == 'date': delta = self.sat.date - self.sat.data.index[-1] \ + pds.Timedelta('1 day') if delta >= self.orbit_period: # don't need to load the next day because this orbit # ends more than a orbital period from the next date load_next = False if load_next: # the end of the user's desired orbit occurs tomorrow, need # to form a complete orbit save this current orbit, load # the next day, combine data, select the correct orbit temp_orbit_data = self.sat.data.copy() try: # loading next day/file clears orbit breaks info self.sat.next() if not self.sat.empty: # combine this next day's data with previous last # orbit, grab the first one self.sat.data = pds.concat( [temp_orbit_data[:self.sat.data.index[0] - pds.DateOffset(microseconds=1)], self.sat.data]) self._getBasicOrbit(orbit=1) else: # no data, go back a day and grab the last orbit. # As complete as orbit can be self.sat.prev() self._getBasicOrbit(orbit=-1) except StopIteration: pass del temp_orbit_data # includes hack to appear to be zero indexed print('Loaded Orbit:%i' % (self._current - 1)) elif self._current == (self.num): # at the last orbit, need to be careful about getting the next # orbit save this current orbit and load the next day temp_orbit_data = self.sat.data.copy() # load next day, which clears orbit breaks info self.sat.next() # combine this next day orbit with previous last orbit to # ensure things are correct if not self.sat.empty: pad_next = True # check if data padding is really needed, only works when # loading by date if self.sat._iter_type == 'date': delta = self.sat.date - temp_orbit_data.index[-1] if delta >= self.orbit_period: # the end of the previous orbit is more than an # orbit away from today we don't have to worry # about it pad_next = False if pad_next: # orbit went across day break, stick old orbit onto new # data and grab second orbit (first is old) self.sat.data = pds.concat( [temp_orbit_data[:self.sat.data.index[0] - pds.DateOffset(microseconds=1)], self.sat.data]) # select second orbit of combined data self._getBasicOrbit(orbit=2) else: # padding from the previous orbit wasn't needed, can # just grab the first orbit of loaded data self._getBasicOrbit(orbit=1) if self.sat._iter_type == 'date': delta = self.sat.date + pds.DateOffset(days=1) \ - self.sat.data.index[0] if delta < self.orbit_period: # this orbits end occurs on the next day, though # we grabbed the first orbit, missing data # means the first available orbit in the data # is actually the last for the day. Resetting to # the second to last orbit and then calling # next() will get the last orbit, accounting # for tomorrow's data as well. self._current = self.num - 1 self.next() else: # no data for the next day # continue loading data until there is some # nextData raises StopIteration when it reaches the end, # leaving this function while self.sat.empty: self.sat.next() self._getBasicOrbit(orbit=1) del temp_orbit_data # includes hack to appear to be zero indexed print('Loaded Orbit:%i' % (self._current - 1)) elif self._current == 0: # no current orbit set, grab the first one # using load command to specify the first orbit, which # automatically loads prev day if needed to form complete orbit self.load(orbit=1) elif self._current < (self.num - 1): # since we aren't close to the last orbit, just pull the next # orbit self._getBasicOrbit(orbit=self._current + 1) # includes hack to appear to be zero indexed print('Loaded Orbit:%i' % (self._current - 1)) else: raise Exception('You ended up where nobody should ever be. ' + 'Talk to someone about this fundamental ' + 'failure.') else: # no data while self.sat.empty: # keep going until data is found # next raises stopIteration at end of data set, no more data # possible self.sat.next() # we've found data, grab the next orbit self.next()
Load Kp index files Parameters ------------ fnames : (pandas.Series) Series of filenames tag : (str or NoneType) tag or None (default=None) sat_id : (str or NoneType) satellite id or None (default=None) Returns --------- data : (pandas.DataFrame) Object containing satellite data meta : (pysat.Meta) Object containing metadata such as column names and units Note ---- Called by pysat. Not intended for direct use by user. def load(fnames, tag=None, sat_id=None): """Load Kp index files Parameters ------------ fnames : (pandas.Series) Series of filenames tag : (str or NoneType) tag or None (default=None) sat_id : (str or NoneType) satellite id or None (default=None) Returns --------- data : (pandas.DataFrame) Object containing satellite data meta : (pysat.Meta) Object containing metadata such as column names and units Note ---- Called by pysat. Not intended for direct use by user. """ data = pds.DataFrame() for filename in fnames: # need to remove date appended to dst filename fname = filename[0:-11] #f = open(fname) with open(fname) as f: lines = f.readlines() idx = 0 # check if all lines are good max_lines=0 for line in lines: if len(line) > 1: max_lines+=1 yr = np.zeros(max_lines*24, dtype=int) mo = np.zeros(max_lines*24, dtype=int) day = np.zeros(max_lines*24, dtype=int) ut = np.zeros(max_lines*24, dtype=int) dst = np.zeros(max_lines*24, dtype=int) for line in lines: if len(line) > 1: temp_year = int(line[14:16] + line[3:5]) if temp_year > 57: temp_year += 1900 else: temp_year += 2000 yr[idx:idx+24] = temp_year mo[idx:idx+24] = int(line[5:7]) day[idx:idx+24] = int(line[8:10]) ut[idx:idx+24] = np.arange(24) temp = line.strip()[20:-4] temp2 = [temp[4*i:4*(i+1)] for i in np.arange(24)] dst[idx:idx+24] = temp2 idx += 24 #f.close() start = pds.datetime(yr[0], mo[0], day[0], ut[0]) stop = pds.datetime(yr[-1], mo[-1], day[-1], ut[-1]) dates = pds.date_range(start, stop, freq='H') new_data = pds.DataFrame(dst, index=dates, columns=['dst']) # pull out specific day new_date = pysat.datetime.strptime(filename[-10:], '%Y-%m-%d') idx, = np.where((new_data.index >= new_date) & (new_data.index < new_date+pds.DateOffset(days=1))) new_data = new_data.iloc[idx,:] # add specific day to all data loaded for filenames data = pds.concat([data, new_data], axis=0) return data, pysat.Meta()
Basic parser to deal with date format of the Kp file. def _parse(yr, mo, day): """ Basic parser to deal with date format of the Kp file. """ yr = '20'+yr yr = int(yr) mo = int(mo) day = int(day) return pds.datetime(yr, mo, day)
Load Kp index files Parameters ------------ fnames : (pandas.Series) Series of filenames tag : (str or NoneType) tag or None (default=None) sat_id : (str or NoneType) satellite id or None (default=None) Returns --------- data : (pandas.DataFrame) Object containing satellite data meta : (pysat.Meta) Object containing metadata such as column names and units Notes ----- Called by pysat. Not intended for direct use by user. def load(fnames, tag=None, sat_id=None): """Load Kp index files Parameters ------------ fnames : (pandas.Series) Series of filenames tag : (str or NoneType) tag or None (default=None) sat_id : (str or NoneType) satellite id or None (default=None) Returns --------- data : (pandas.DataFrame) Object containing satellite data meta : (pysat.Meta) Object containing metadata such as column names and units Notes ----- Called by pysat. Not intended for direct use by user. """ # Kp data stored monthly, need to return data daily # the daily date is attached to filename # parse off the last date, load month of data, downselect to desired day data = pds.DataFrame() #set up fixed width format for these files colspec = [(0,2),(2,4),(4,6),(7,10),(10,13),(13,16),(16,19),(19,23),(23,26),(26,29),(29,32),(32,50)] for filename in fnames: # the daily date is attached to filename # parse off the last date, load month of data, downselect to desired day fname = filename[0:-11] date = pysat.datetime.strptime(filename[-10:], '%Y-%m-%d') temp = pds.read_fwf(fname, colspecs=colspec, skipfooter=4,header=None, parse_dates=[[0,1,2]], date_parser=_parse, index_col='0_1_2') idx, = np.where((temp.index >= date) & (temp.index < date+pds.DateOffset(days=1))) temp = temp.iloc[idx,:] data = pds.concat([data,temp], axis=0) # drop last column as it has data I don't care about data = data.iloc[:,0:-1] # each column increments UT by three hours # produce a single data series that has Kp value monotonically increasing in time # with appropriate datetime indices s = pds.Series() for i in np.arange(8): temp = pds.Series(data.iloc[:,i].values, index=data.index+pds.DateOffset(hours=int(3*i)) ) #print temp s = s.append(temp) s = s.sort_index() s.index.name = 'time' # now, Kp comes in non-user friendly values # 2-, 2o, and 2+ relate to 1.6, 2.0, 2.3 # will convert for user friendliness first = np.array([float(x[0]) for x in s]) flag = np.array([x[1] for x in s]) ind, = np.where(flag == '+') first[ind] += 1./3. ind, = np.where(flag == '-') first[ind] -= 1./3. result = pds.DataFrame(first, columns=['kp'], index=s.index) return result, pysat.Meta()
Routine to download Kp index data Parameters ----------- tag : (string or NoneType) Denotes type of file to load. Accepted types are '1min' and '5min'. (default=None) sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) Returns -------- Void : (NoneType) data downloaded to disk, if available. Notes ----- Called by pysat. Not intended for direct use by user. def download(date_array, tag, sat_id, data_path, user=None, password=None): """Routine to download Kp index data Parameters ----------- tag : (string or NoneType) Denotes type of file to load. Accepted types are '1min' and '5min'. (default=None) sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) Returns -------- Void : (NoneType) data downloaded to disk, if available. Notes ----- Called by pysat. Not intended for direct use by user. """ import ftplib from ftplib import FTP import sys ftp = FTP('ftp.gfz-potsdam.de') # connect to host, default port ftp.login() # user anonymous, passwd anonymous@ ftp.cwd('/pub/home/obs/kp-ap/tab') for date in date_array: fname = 'kp{year:02d}{month:02d}.tab' fname = fname.format(year=(date.year - date.year//100*100), month=date.month) local_fname = fname saved_fname = os.path.join(data_path,local_fname) try: print('Downloading file for '+date.strftime('%D')) sys.stdout.flush() ftp.retrbinary('RETR '+fname, open(saved_fname,'wb').write) except ftplib.error_perm as exception: # if exception[0][0:3] != '550': if str(exception.args[0]).split(" ", 1)[0] != '550': raise else: os.remove(saved_fname) print('File not available for '+date.strftime('%D')) ftp.close() return
Filters pysat.Instrument data for given time after Kp drops below gate. Loads Kp data for the same timeframe covered by sat and sets sat.data to NaN for times when Kp > maxKp and for filterTime after Kp drops below maxKp. Parameters ---------- sat : pysat.Instrument Instrument to be filtered maxKp : float Maximum Kp value allowed. Kp values above this trigger sat.data filtering. filterTime : int Number of hours to filter data after Kp drops below maxKp kpData : pysat.Instrument (optional) Kp pysat.Instrument object with data already loaded kp_inst : pysat.Instrument (optional) Kp pysat.Instrument object ready to load Kp data.Overrides kpData. Returns ------- None : NoneType sat Instrument object modified in place def filter_geoquiet(sat, maxKp=None, filterTime=None, kpData=None, kp_inst=None): """Filters pysat.Instrument data for given time after Kp drops below gate. Loads Kp data for the same timeframe covered by sat and sets sat.data to NaN for times when Kp > maxKp and for filterTime after Kp drops below maxKp. Parameters ---------- sat : pysat.Instrument Instrument to be filtered maxKp : float Maximum Kp value allowed. Kp values above this trigger sat.data filtering. filterTime : int Number of hours to filter data after Kp drops below maxKp kpData : pysat.Instrument (optional) Kp pysat.Instrument object with data already loaded kp_inst : pysat.Instrument (optional) Kp pysat.Instrument object ready to load Kp data.Overrides kpData. Returns ------- None : NoneType sat Instrument object modified in place """ if kp_inst is not None: kp_inst.load(date=sat.date, verifyPad=True) kpData = kp_inst elif kpData is None: kp = pysat.Instrument('sw', 'kp', pad=pds.DateOffset(days=1)) kp.load(date=sat.date, verifyPad=True) kpData = kp if maxKp is None: maxKp = 3+ 1./3. if filterTime is None: filterTime = 24 # now the defaults are ensured, let's do some filtering # date of satellite data date = sat.date selData = kpData[date-pds.DateOffset(days=1):date+pds.DateOffset(days=1)] ind, = np.where(selData['kp'] >= maxKp) for lind in ind: sat.data[selData.index[lind]:(selData.index[lind]+pds.DateOffset(hours=filterTime) )] = np.NaN sat.data = sat.data.dropna(axis=0, how='all') return
find converter function reference by name find converter by name, converter name follows this convention: Class.method or: method The first type of converter class/function must be available in current module. The second type of converter must be available in `__builtin__` (or `builtins` in python3) module. :param converter_str: string representation of the converter func :return: function reference def _get_converter(self, converter_str): """find converter function reference by name find converter by name, converter name follows this convention: Class.method or: method The first type of converter class/function must be available in current module. The second type of converter must be available in `__builtin__` (or `builtins` in python3) module. :param converter_str: string representation of the converter func :return: function reference """ ret = None if converter_str is not None: converter_desc_list = converter_str.split('.') if len(converter_desc_list) == 1: converter = converter_desc_list[0] # default to `converter` ret = getattr(cvt, converter, None) if ret is None: # try module converter ret = self.get_converter(converter) if ret is None: ret = self.get_resource_clz_by_name(converter) if ret is None: ret = self.get_enum_by_name(converter) if ret is None: # try parser config ret = self.get(converter) if ret is None and converter_str is not None: raise ValueError( 'Specified converter not supported: {}'.format( converter_str)) return ret
scp the local file to remote folder. :param local_path: local path :param remote_path: remote path def copy_file_to_remote(self, local_path, remote_path): """scp the local file to remote folder. :param local_path: local path :param remote_path: remote path """ sftp_client = self.transport.open_sftp_client() LOG.debug('Copy the local file to remote. ' 'Source=%(src)s. Target=%(target)s.' % {'src': local_path, 'target': remote_path}) try: sftp_client.put(local_path, remote_path) except Exception as ex: LOG.error('Failed to copy the local file to remote. ' 'Reason: %s.' % six.text_type(ex)) raise SFtpExecutionError(err=ex)
Fetch remote File. :param remote_path: remote path :param local_path: local path def get_remote_file(self, remote_path, local_path): """Fetch remote File. :param remote_path: remote path :param local_path: local path """ sftp_client = self.transport.open_sftp_client() LOG.debug('Get the remote file. ' 'Source=%(src)s. Target=%(target)s.' % {'src': remote_path, 'target': local_path}) try: sftp_client.get(remote_path, local_path) except Exception as ex: LOG.error('Failed to secure copy. Reason: %s.' % six.text_type(ex)) raise SFtpExecutionError(err=ex)
Closes the ssh connection. def close(self): """Closes the ssh connection.""" if 'isLive' in self.__dict__ and self.isLive: self.transport.close() self.isLive = False
indicate the return value is a xml api request :param check_invalid_data_mover: :param check_object: :return: the response of this request def xml_request(check_object=False, check_invalid_data_mover=False): """ indicate the return value is a xml api request :param check_invalid_data_mover: :param check_object: :return: the response of this request """ def decorator(f): @functools.wraps(f) def func_wrapper(self, *argv, **kwargs): request = f(self, *argv, **kwargs) return self.request( request, check_object=check_object, check_invalid_data_mover=check_invalid_data_mover) return func_wrapper return decorator
indicate it's a command of nas command run with ssh :param f: function that returns the command in list :return: command execution result def nas_command(f): """ indicate it's a command of nas command run with ssh :param f: function that returns the command in list :return: command execution result """ @functools.wraps(f) def func_wrapper(self, *argv, **kwargs): commands = f(self, *argv, **kwargs) return self.ssh_execute(['env', 'NAS_DB=/nas'] + commands) return func_wrapper
Restore the snapshot to the associated storage resource. :param backup: name of the backup snapshot :param delete_backup: Whether to delete the backup snap after a successful restore. def restore(self, backup=None, delete_backup=False): """Restore the snapshot to the associated storage resource. :param backup: name of the backup snapshot :param delete_backup: Whether to delete the backup snap after a successful restore. """ resp = self._cli.action(self.resource_class, self.get_id(), 'restore', copyName=backup) resp.raise_if_err() backup = resp.first_content['backup'] backup_snap = UnitySnap(_id=backup['id'], cli=self._cli) if delete_backup: log.info("Deleting the backup snap {} as the restoration " "succeeded.".format(backup['id'])) backup_snap.delete() return backup_snap
Creates a new thin clone from this snapshot. .. note:: this snapshot should not enable Auto-Delete. def thin_clone(self, name, io_limit_policy=None, description=None): """Creates a new thin clone from this snapshot. .. note:: this snapshot should not enable Auto-Delete. """ if self.is_member_snap(): raise UnityCGMemberActionNotSupportError() if self.lun and not self.lun.is_thin_enabled: raise UnityThinCloneNotAllowedError() return TCHelper.thin_clone(self._cli, self, name, io_limit_policy, description)
Deletes the snapshot. :param async_mode: whether to delete the snapshot in async mode. :param even_attached: whether to delete the snapshot even it is attached to hosts. def delete(self, async_mode=False, even_attached=False): """Deletes the snapshot. :param async_mode: whether to delete the snapshot in async mode. :param even_attached: whether to delete the snapshot even it is attached to hosts. """ try: return super(UnitySnap, self).delete(async_mode=async_mode) except UnityDeleteAttachedSnapError: if even_attached: log.debug("Force delete the snapshot even if it is attached. " "First detach the snapshot from hosts, then delete " "again.") # Currently `detach_from` doesn't process `host` parameter. # It always detaches the snapshot from all hosts. So pass in # `None` here. self.detach_from(None) return super(UnitySnap, self).delete(async_mode=async_mode) else: raise
Get the resource by resource id. :param nested_fields: nested resource fields :param base_fields: fields of this resource :param the_filter: dictionary of filter like `{'name': 'abc'}` :param type_name: Resource type. For example, pool, lun, nasServer. :return: List of resource class objects def get_all(self, type_name, base_fields=None, the_filter=None, nested_fields=None): """Get the resource by resource id. :param nested_fields: nested resource fields :param base_fields: fields of this resource :param the_filter: dictionary of filter like `{'name': 'abc'}` :param type_name: Resource type. For example, pool, lun, nasServer. :return: List of resource class objects """ fields = self.get_fields(type_name, base_fields, nested_fields) the_filter = self.dict_to_filter_string(the_filter) url = '/api/types/{}/instances'.format(type_name) resp = self.rest_get(url, fields=fields, filter=the_filter) ret = resp while resp.has_next_page: resp = self.rest_get(url, fields=fields, filter=the_filter, page=resp.next_page) ret.entries.extend(resp.entries) return ret
Get the resource by resource id. :param nested_fields: nested resource fields. :param type_name: Resource type. For example, pool, lun, nasServer. :param obj_id: Resource id :param base_fields: Resource fields to return :return: List of tuple [(name, res_inst)] def get(self, type_name, obj_id, base_fields=None, nested_fields=None): """Get the resource by resource id. :param nested_fields: nested resource fields. :param type_name: Resource type. For example, pool, lun, nasServer. :param obj_id: Resource id :param base_fields: Resource fields to return :return: List of tuple [(name, res_inst)] """ base_fields = self.get_fields(type_name, base_fields, nested_fields) url = '/api/instances/{}/{}'.format(type_name, obj_id) return self.rest_get(url, fields=base_fields)
Flat the virtual ports. def _flat_vports(self, connection_port): """Flat the virtual ports.""" vports = [] for vport in connection_port.virtual_ports: self._set_child_props(connection_port, vport) vports.append(vport) return vports
This method won't count the snaps in "destroying" state! :return: false if no snaps or all snaps are destroying. def has_snap(self): """ This method won't count the snaps in "destroying" state! :return: false if no snaps or all snaps are destroying. """ return len(list(filter(lambda s: s.state != SnapStateEnum.DESTROYING, self.snapshots))) > 0
Return a 2D average of data_label over a season and label1, label2. Parameters ---------- const: Constellation or Instrument bin#: [min, max, number of bins] label#: string identifies data product for bin# data_label: list-like contains strings identifying data product(s) to be averaged Returns ------- median : dictionary 2D median accessed by data_label as a function of label1 and label2 over the season delineated by bounds of passed instrument objects. Also includes 'count' and 'avg_abs_dev' as well as the values of the bin edges in 'bin_x' and 'bin_y'. def median2D(const, bin1, label1, bin2, label2, data_label, returnData=False): """Return a 2D average of data_label over a season and label1, label2. Parameters ---------- const: Constellation or Instrument bin#: [min, max, number of bins] label#: string identifies data product for bin# data_label: list-like contains strings identifying data product(s) to be averaged Returns ------- median : dictionary 2D median accessed by data_label as a function of label1 and label2 over the season delineated by bounds of passed instrument objects. Also includes 'count' and 'avg_abs_dev' as well as the values of the bin edges in 'bin_x' and 'bin_y'. """ # const is either an Instrument or a Constellation, and we want to # iterate over it. # If it's a Constellation, then we can do that as is, but if it's # an Instrument, we just have to put that Instrument into something # that will yeild that Instrument, like a list. if isinstance(const, pysat.Instrument): const = [const] elif not isinstance(const, pysat.Constellation): raise ValueError("Parameter must be an Instrument or a Constellation.") # create bins #// seems to create the boundaries used for sorting into bins binx = np.linspace(bin1[0], bin1[1], bin1[2]+1) biny = np.linspace(bin2[0], bin2[1], bin2[2]+1) #// how many bins are used numx = len(binx)-1 numy = len(biny)-1 #// how many different data products numz = len(data_label) # create array to store all values before taking median #// the indices of the bins/data products? used for looping. yarr = np.arange(numy) xarr = np.arange(numx) zarr = np.arange(numz) #// 3d array: stores the data that is sorted into each bin? - in a deque ans = [ [ [collections.deque() for i in xarr] for j in yarr] for k in zarr] for inst in const: # do loop to iterate over instrument season #// probably iterates by date but that all depends on the #// configuration of that particular instrument. #// either way, it iterates over the instrument, loading successive #// data between start and end bounds for inst in inst: # collect data in bins for averaging if len(inst.data) != 0: #// sort the data into bins (x) based on label 1 #// (stores bin indexes in xind) xind = np.digitize(inst.data[label1], binx)-1 #// for each possible x index for xi in xarr: #// get the indicies of those pieces of data in that bin xindex, = np.where(xind==xi) if len(xindex) > 0: #// look up the data along y (label2) at that set of indicies (a given x) yData = inst.data.iloc[xindex] #// digitize that, to sort data into bins along y (label2) (get bin indexes) yind = np.digitize(yData[label2], biny)-1 #// for each possible y index for yj in yarr: #// select data with this y index (and we already filtered for this x index) yindex, = np.where(yind==yj) if len(yindex) > 0: #// for each data product label zk for zk in zarr: #// take the data (already filtered by x); filter it by y and #// select the data product, put it in a list, and extend the deque ans[zk][yj][xi].extend( yData.ix[yindex,data_label[zk]].tolist() ) return _calc_2d_median(ans, data_label, binx, biny, xarr, yarr, zarr, numx, numy, numz, returnData)
get the list of resource list to collect based on clz list :param rsc_clz_list: the list of classes to collect :return: filtered list of resource list, like [VNXLunList(), VNXDiskList()] def get_rsc_list_2(self, rsc_clz_list=None): """get the list of resource list to collect based on clz list :param rsc_clz_list: the list of classes to collect :return: filtered list of resource list, like [VNXLunList(), VNXDiskList()] """ rsc_list_2 = self._default_rsc_list_with_perf_stats() if rsc_clz_list is None: rsc_clz_list = ResourceList.get_rsc_clz_list(rsc_list_2) return [rsc_list for rsc_list in rsc_list_2 if rsc_list.get_resource_class() in rsc_clz_list]
cosmic data load routine, called by pysat def load(cosmicFiles, tag=None, sat_id=None): """ cosmic data load routine, called by pysat """ import netCDF4 num = len(cosmicFiles) # make sure there are files to read if num != 0: # call separate load_files routine, segemented for possible # multiprocessor load, not included and only benefits about 20% output = pysat.DataFrame(load_files(cosmicFiles, tag=tag, sat_id=sat_id)) output.index = pysat.utils.create_datetime_index(year=output.year, month=output.month, day=output.day, uts=output.hour*3600.+output.minute*60.+output.second) # make sure UTS strictly increasing output.sort_index(inplace=True) # use the first available file to pick out meta information meta = pysat.Meta() ind = 0 repeat = True while repeat: try: data = netCDF4.Dataset(cosmicFiles[ind]) ncattrsList = data.ncattrs() for d in ncattrsList: meta[d] = {'units':'', 'long_name':d} keys = data.variables.keys() for key in keys: meta[key] = {'units':data.variables[key].units, 'long_name':data.variables[key].long_name} repeat = False except RuntimeError: # file was empty, try the next one by incrementing ind ind+=1 return output, meta else: # no data return pysat.DataFrame(None), pysat.Meta()
Loads a list of COSMIC data files, supplied by user. Returns a list of dicts, a dict for each file. def load_files(files, tag=None, sat_id=None): '''Loads a list of COSMIC data files, supplied by user. Returns a list of dicts, a dict for each file. ''' output = [None]*len(files) drop_idx = [] for (i,file) in enumerate(files): try: data = netCDF4.Dataset(file) # build up dictionary will all ncattrs new = {} # get list of file attributes ncattrsList = data.ncattrs() for d in ncattrsList: new[d] = data.getncattr(d) # load all of the variables in the netCDF loadedVars={} keys = data.variables.keys() for key in keys: loadedVars[key] = data.variables[key][:] new['profiles'] = pysat.DataFrame(loadedVars) if tag == 'ionprf': new['profiles'].index = new['profiles']['MSL_alt'] output[i] = new data.close() except RuntimeError: # some of the S4 files have zero bytes, which causes a read error # this stores the index of these zero byte files so I can drop # the Nones the gappy file leaves behind drop_idx.append(i) # drop anything that came from the zero byte files drop_idx.reverse() for i in drop_idx: del output[i] return output
Downloads data from Madrigal. The user's names should be provided in field user. John Malkovich should be entered as John+Malkovich The password field should be the user's email address. These parameters are passed to Madrigal when downloading. The affiliation field is set to pysat to enable tracking of pysat downloads. Parameters ---------- def download(date_array, tag, sat_id, data_path=None, user=None, password=None): """Downloads data from Madrigal. The user's names should be provided in field user. John Malkovich should be entered as John+Malkovich The password field should be the user's email address. These parameters are passed to Madrigal when downloading. The affiliation field is set to pysat to enable tracking of pysat downloads. Parameters ---------- """ import subprocess # currently passes things along if no user and password supplied # need to do this for testing # TODO, implement user and password values in test code # specific to DMSP if user is None: print ('No user information supplied for download.') user = 'pysat_testing' if password is None: print ('Please provide email address in password field.') password = 'pysat_testing@not_real_email.org' a = subprocess.check_output(["globalDownload.py", "--verbose", "--url=http://cedar.openmadrigal.org", '--outputDir='+data_path, '--user_fullname='+user, '--user_email='+password, '--user_affiliation=pysat', '--format=hdf5', '--startDate='+date_array[0].strftime('%m/%d/%Y'), '--endDate='+date_array[-1].strftime('%m/%d/%Y'), '--inst=8100', '--kindat='+str(madrigal_tag[sat_id])]) print ('Feedback from openMadrigal ', a)
Routine to return DMSP IVM data cleaned to the specified level 'Clean' enforces that both RPA and DM flags are <= 1 'Dusty' <= 2 'Dirty' <= 3 'None' None Routine is called by pysat, and not by the end user directly. Parameters ----------- inst : (pysat.Instrument) Instrument class object, whose attribute clean_level is used to return the desired level of data selectivity. Returns -------- Void : (NoneType) data in inst is modified in-place. Notes -------- Supports 'clean', 'dusty', 'dirty' def clean(self): """Routine to return DMSP IVM data cleaned to the specified level 'Clean' enforces that both RPA and DM flags are <= 1 'Dusty' <= 2 'Dirty' <= 3 'None' None Routine is called by pysat, and not by the end user directly. Parameters ----------- inst : (pysat.Instrument) Instrument class object, whose attribute clean_level is used to return the desired level of data selectivity. Returns -------- Void : (NoneType) data in inst is modified in-place. Notes -------- Supports 'clean', 'dusty', 'dirty' """ if self.clean_level == 'clean': idx, = np.where((self['rpa_flag_ut'] <= 1) & (self['idm_flag_ut'] <= 1)) elif self.clean_level == 'dusty': idx, = np.where((self['rpa_flag_ut'] <= 2) & (self['idm_flag_ut'] <= 2)) elif self.clean_level == 'dirty': idx, = np.where((self['rpa_flag_ut'] <= 3) & (self['idm_flag_ut'] <= 3)) else: idx = [] # downselect data based upon cleaning conditions above self.data = self[idx] return
Configures a remote system for remote replication. :param cls: this class. :param cli: the rest client. :param management_address: the management IP address of the remote system. :param local_username: administrative username of local system. :param local_password: administrative password of local system. :param remote_username: administrative username of remote system. :param remote_password: administrative password of remote system. :param connection_type: `ReplicationCapabilityEnum`. Replication connection type to the remote system. :return: the newly created remote system. def create(cls, cli, management_address, local_username=None, local_password=None, remote_username=None, remote_password=None, connection_type=None): """ Configures a remote system for remote replication. :param cls: this class. :param cli: the rest client. :param management_address: the management IP address of the remote system. :param local_username: administrative username of local system. :param local_password: administrative password of local system. :param remote_username: administrative username of remote system. :param remote_password: administrative password of remote system. :param connection_type: `ReplicationCapabilityEnum`. Replication connection type to the remote system. :return: the newly created remote system. """ req_body = cli.make_body( managementAddress=management_address, localUsername=local_username, localPassword=local_password, remoteUsername=remote_username, remotePassword=remote_password, connectionType=connection_type) resp = cli.post(cls().resource_class, **req_body) resp.raise_if_err() return cls.get(cli, resp.resource_id)
Modifies a remote system for remote replication. :param management_address: same as the one in `create` method. :param username: username for accessing the remote system. :param password: password for accessing the remote system. :param connection_type: same as the one in `create` method. def modify(self, management_address=None, username=None, password=None, connection_type=None): """ Modifies a remote system for remote replication. :param management_address: same as the one in `create` method. :param username: username for accessing the remote system. :param password: password for accessing the remote system. :param connection_type: same as the one in `create` method. """ req_body = self._cli.make_body( managementAddress=management_address, username=username, password=password, connectionType=connection_type) resp = self.action('modify', **req_body) resp.raise_if_err() return resp
Verifies and update the remote system settings. :param connection_type: same as the one in `create` method. def verify(self, connection_type=None): """ Verifies and update the remote system settings. :param connection_type: same as the one in `create` method. """ req_body = self._cli.make_body(connectionType=connection_type) resp = self.action('verify', **req_body) resp.raise_if_err() return resp
Creates a replication interface. :param cls: this class. :param cli: the rest cli. :param sp: `UnityStorageProcessor` object. Storage processor on which the replication interface is running. :param ip_port: `UnityIpPort` object. Physical port or link aggregation on the storage processor on which the interface is running. :param ip_address: IP address of the replication interface. :param netmask: IPv4 netmask for the replication interface, if it uses an IPv4 address. :param v6_prefix_length: IPv6 prefix length for the interface, if it uses an IPv6 address. :param gateway: IPv4 or IPv6 gateway address for the replication interface. :param vlan_id: VLAN identifier for the interface. :return: the newly create replication interface. def create(cls, cli, sp, ip_port, ip_address, netmask=None, v6_prefix_length=None, gateway=None, vlan_id=None): """ Creates a replication interface. :param cls: this class. :param cli: the rest cli. :param sp: `UnityStorageProcessor` object. Storage processor on which the replication interface is running. :param ip_port: `UnityIpPort` object. Physical port or link aggregation on the storage processor on which the interface is running. :param ip_address: IP address of the replication interface. :param netmask: IPv4 netmask for the replication interface, if it uses an IPv4 address. :param v6_prefix_length: IPv6 prefix length for the interface, if it uses an IPv6 address. :param gateway: IPv4 or IPv6 gateway address for the replication interface. :param vlan_id: VLAN identifier for the interface. :return: the newly create replication interface. """ req_body = cli.make_body(sp=sp, ipPort=ip_port, ipAddress=ip_address, netmask=netmask, v6PrefixLength=v6_prefix_length, gateway=gateway, vlanId=vlan_id) resp = cli.post(cls().resource_class, **req_body) resp.raise_if_err() return cls.get(cli, resp.resource_id)
Modifies a replication interface. :param sp: same as the one in `create` method. :param ip_port: same as the one in `create` method. :param ip_address: same as the one in `create` method. :param netmask: same as the one in `create` method. :param v6_prefix_length: same as the one in `create` method. :param gateway: same as the one in `create` method. :param vlan_id: same as the one in `create` method. def modify(self, sp=None, ip_port=None, ip_address=None, netmask=None, v6_prefix_length=None, gateway=None, vlan_id=None): """ Modifies a replication interface. :param sp: same as the one in `create` method. :param ip_port: same as the one in `create` method. :param ip_address: same as the one in `create` method. :param netmask: same as the one in `create` method. :param v6_prefix_length: same as the one in `create` method. :param gateway: same as the one in `create` method. :param vlan_id: same as the one in `create` method. """ req_body = self._cli.make_body(sp=sp, ipPort=ip_port, ipAddress=ip_address, netmask=netmask, v6PrefixLength=v6_prefix_length, gateway=gateway, vlanId=vlan_id) resp = self.action('modify', **req_body) resp.raise_if_err() return resp
return sp level values input: "values": { "spa": { "19": "385", "18": "0", "20": "0", "17": "0", "16": "0" }, "spb": { "19": "101", "18": "101", "20": "101", "17": "101", "16": "101" } }, return: "values": { "spa": 385, "spb": 505 }, def sp_sum_values(self): """ return sp level values input: "values": { "spa": { "19": "385", "18": "0", "20": "0", "17": "0", "16": "0" }, "spb": { "19": "101", "18": "101", "20": "101", "17": "101", "16": "101" } }, return: "values": { "spa": 385, "spb": 505 }, """ if self.values is None: ret = IdValues() else: ret = IdValues({k: sum(int(x) for x in v.values()) for k, v in self.values.items()}) return ret
return system level values (spa + spb) input: "values": { "spa": 385, "spb": 505 }, return: "values": { "0": 890 }, def sum_sp_values(self): """ return system level values (spa + spb) input: "values": { "spa": 385, "spb": 505 }, return: "values": { "0": 890 }, """ if self.values is None: ret = IdValues() else: ret = IdValues({'0': sum(int(x) for x in self.values.values())}) return ret
numeric_values * sp_values def combine_numeric_values(self, other): """ numeric_values * sp_values """ if self.values is None: ret = IdValues() else: ret = sum([IdValues( {k: int(v) * int(other.values[key]) for k, v in value.items()}) for key, value in self.values.items()]) return ret
sp_values * sp_values def combine_sp_values(self, other): """ sp_values * sp_values """ if self.values is None: ret = IdValues() else: ret = IdValues({k: int(v) * int(other.values[k]) for k, v in self.values.items()}) return ret
sum(sp_values * sp_values) def sum_combined_sp_values(self, other): """ sum(sp_values * sp_values) """ if self.values is None: ret = IdValues() else: ret = IdValues({'0': sum(int(x) for x in {k: int(v) * int(other.values[k]) for k, v in self.values.items()}.values())}) return ret
Add a function to custom processing queue. Custom functions are applied automatically to associated pysat instrument whenever instrument.load command called. Parameters ---------- function : string or function object name of function or function object to be added to queue kind : {'add', 'modify', 'pass} add Adds data returned from function to instrument object. A copy of pysat instrument object supplied to routine. modify pysat instrument object supplied to routine. Any and all changes to object are retained. pass A copy of pysat object is passed to function. No data is accepted from return. at_pos : string or int insert at position. (default, insert at end). args : extra arguments extra arguments are passed to the custom function (once) kwargs : extra keyword arguments extra keyword args are passed to the custom function (once) Note ---- Allowed `add` function returns: - {'data' : pandas Series/DataFrame/array_like, 'units' : string/array_like of strings, 'long_name' : string/array_like of strings, 'name' : string/array_like of strings (iff data array_like)} - pandas DataFrame, names of columns are used - pandas Series, .name required - (string/list of strings, numpy array/list of arrays) def add(self, function, kind='add', at_pos='end',*args, **kwargs): """Add a function to custom processing queue. Custom functions are applied automatically to associated pysat instrument whenever instrument.load command called. Parameters ---------- function : string or function object name of function or function object to be added to queue kind : {'add', 'modify', 'pass} add Adds data returned from function to instrument object. A copy of pysat instrument object supplied to routine. modify pysat instrument object supplied to routine. Any and all changes to object are retained. pass A copy of pysat object is passed to function. No data is accepted from return. at_pos : string or int insert at position. (default, insert at end). args : extra arguments extra arguments are passed to the custom function (once) kwargs : extra keyword arguments extra keyword args are passed to the custom function (once) Note ---- Allowed `add` function returns: - {'data' : pandas Series/DataFrame/array_like, 'units' : string/array_like of strings, 'long_name' : string/array_like of strings, 'name' : string/array_like of strings (iff data array_like)} - pandas DataFrame, names of columns are used - pandas Series, .name required - (string/list of strings, numpy array/list of arrays) """ if isinstance(function, str): # convert string to function object function=eval(function) if (at_pos == 'end') | (at_pos == len(self._functions)): # store function object self._functions.append(function) self._args.append(args) self._kwargs.append(kwargs) self._kind.append(kind.lower()) elif at_pos < len(self._functions): # user picked a specific location to insert self._functions.insert(at_pos, function) self._args.insert(at_pos, args) self._kwargs.insert(at_pos, kwargs) self._kind.insert(at_pos, kind) else: raise TypeError('Must enter an index between 0 and %i' % len(self._functions))
Apply all of the custom functions to the satellite data object. def _apply_all(self, sat): """ Apply all of the custom functions to the satellite data object. """ if len(self._functions) > 0: for func, arg, kwarg, kind in zip(self._functions, self._args, self._kwargs, self._kind): if len(sat.data) > 0: if kind == 'add': # apply custom functions that add data to the # instrument object tempd = sat.copy() newData = func(tempd, *arg, **kwarg) del tempd # process different types of data returned by the # function if a dict is returned, data in 'data' if isinstance(newData,dict): # if DataFrame returned, add Frame to existing frame if isinstance(newData['data'], pds.DataFrame): sat[newData['data'].columns] = newData # if a series is returned, add it as a column elif isinstance(newData['data'], pds.Series): # look for name attached to series first if newData['data'].name is not None: sat[newData['data'].name] = newData # look if name is provided as part of dict # returned from function elif 'name' in newData.keys(): name = newData.pop('name') sat[name] = newData # couldn't find name information else: raise ValueError('Must assign a name to ' + 'Series or return a ' + '"name" in dictionary.') # some kind of iterable was returned elif hasattr(newData['data'], '__iter__'): # look for name in returned dict if 'name' in newData.keys(): name = newData.pop('name') sat[name] = newData else: raise ValueError('Must include "name" in ' + 'returned dictionary.') # bare DataFrame is returned elif isinstance(newData, pds.DataFrame): sat[newData.columns] = newData # bare Series is returned, name must be attached to # Series elif isinstance(newData, pds.Series): sat[newData.name] = newData # some kind of iterable returned, # presuming (name, data) # or ([name1,...], [data1,...]) elif hasattr(newData, '__iter__'): # falling back to older behavior # unpack tuple/list that was returned newName = newData[0] newData = newData[1] if len(newData)>0: # doesn't really check ensure data, there could # be multiple empty arrays returned, [[],[]] if isinstance(newName, str): # one item to add sat[newName] = newData else: # multiple items for name, data in zip(newName, newData): if len(data)>0: # fixes up the incomplete check # from before sat[name] = data else: raise ValueError("kernel doesn't know what to do " + "with returned data.") # modifying loaded data if kind == 'modify': t = func(sat,*arg,**kwarg) if t is not None: raise ValueError('Modify functions should not ' + 'return any information via ' + 'return. Information may only be' + ' propagated back by modifying ' + 'supplied pysat object.') # pass function (function runs, no data allowed back) if kind == 'pass': tempd = sat.copy() t = func(tempd,*arg,**kwarg) del tempd if t is not None: raise ValueError('Pass functions should not ' + 'return any information via ' + 'return.')
Clear custom function list. def clear(self): """Clear custom function list.""" self._functions=[] self._args=[] self._kwargs=[] self._kind=[]
Return a Pandas Series of every file for chosen satellite data Parameters ----------- tag : (string) Denotes type of file to load. Accepted types are 'north' and 'south'. (default='north') sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) format_str : (string or NoneType) User specified file format. If None is specified, the default formats associated with the supplied tags are used. (default=None) Returns -------- pysat.Files.from_os : (pysat._files.Files) A class containing the verified available files def list_files(tag='north', sat_id=None, data_path=None, format_str=None): """Return a Pandas Series of every file for chosen satellite data Parameters ----------- tag : (string) Denotes type of file to load. Accepted types are 'north' and 'south'. (default='north') sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) format_str : (string or NoneType) User specified file format. If None is specified, the default formats associated with the supplied tags are used. (default=None) Returns -------- pysat.Files.from_os : (pysat._files.Files) A class containing the verified available files """ if format_str is None and tag is not None: if tag == 'north' or tag == 'south': hemi_fmt = ''.join(('{year:4d}{month:02d}{day:02d}.', tag, '.grdex')) return pysat.Files.from_os(data_path=data_path, format_str=hemi_fmt) else: estr = 'Unrecognized tag name for SuperDARN, north or south.' raise ValueError(estr) elif format_str is None: estr = 'A tag name must be passed to SuperDARN.' raise ValueError (estr) else: return pysat.Files.from_os(data_path=data_path, format_str=format_str)
Download SuperDARN data from Virginia Tech organized for loading by pysat. def download(date_array, tag, sat_id, data_path, user=None, password=None): """ Download SuperDARN data from Virginia Tech organized for loading by pysat. """ import sys import os import pysftp import davitpy if user is None: user = os.environ['DBREADUSER'] if password is None: password = os.environ['DBREADPASS'] with pysftp.Connection( os.environ['VTDB'], username=user, password=password) as sftp: for date in date_array: myDir = '/data/'+date.strftime("%Y")+'/grdex/'+tag+'/' fname = date.strftime("%Y%m%d")+'.' + tag + '.grdex' local_fname = fname+'.bz2' saved_fname = os.path.join(data_path,local_fname) full_fname = os.path.join(data_path,fname) try: print('Downloading file for '+date.strftime('%D')) sys.stdout.flush() sftp.get(myDir+local_fname, saved_fname) os.system('bunzip2 -c '+saved_fname+' > '+ full_fname) os.system('rm ' + saved_fname) except IOError: print('File not available for '+date.strftime('%D')) return
filter self to the required number of disks with same size and type Select the disks with the same type and same size. If not enough disks available, set self to empty. :param count: number of disks to retrieve :return: disk list def same_disks(self, count=2): """ filter self to the required number of disks with same size and type Select the disks with the same type and same size. If not enough disks available, set self to empty. :param count: number of disks to retrieve :return: disk list """ ret = self if len(self) > 0: type_counter = Counter(self.drive_type) drive_type, counts = type_counter.most_common()[0] self.set_drive_type(drive_type) if len(self) > 0: size_counter = Counter(self.capacity) size, counts = size_counter.most_common()[0] self.set_capacity(size) if len(self) >= count: indices = self.index[:count] self.set_indices(indices) else: self.set_indices('N/A') return ret
Sets boundaries for all instruments in constellation def set_bounds(self, start, stop): """ Sets boundaries for all instruments in constellation """ for instrument in self.instruments: instrument.bounds = (start, stop)
Register a function to modify data of member Instruments. The function is not partially applied to modify member data. When the Constellation receives a function call to register a function for data modification, it passes the call to each instrument and registers it in the instrument's pysat.Custom queue. (Wraps pysat.Custom.add; documentation of that function is reproduced here.) Parameters ---------- function : string or function object name of function or function object to be added to queue kind : {'add, 'modify', 'pass'} add Adds data returned from fuction to instrument object. modify pysat instrument object supplied to routine. Any and all changes to object are retained. pass A copy of pysat object is passed to function. No data is accepted from return. at_pos : string or int insert at position. (default, insert at end). args : extra arguments Note ---- Allowed `add` function returns: - {'data' : pandas Series/DataFrame/array_like, 'units' : string/array_like of strings, 'long_name' : string/array_like of strings, 'name' : string/array_like of strings (iff data array_like)} - pandas DataFrame, names of columns are used - pandas Series, .name required - (string/list of strings, numpy array/list of arrays) def data_mod(self, *args, **kwargs): """ Register a function to modify data of member Instruments. The function is not partially applied to modify member data. When the Constellation receives a function call to register a function for data modification, it passes the call to each instrument and registers it in the instrument's pysat.Custom queue. (Wraps pysat.Custom.add; documentation of that function is reproduced here.) Parameters ---------- function : string or function object name of function or function object to be added to queue kind : {'add, 'modify', 'pass'} add Adds data returned from fuction to instrument object. modify pysat instrument object supplied to routine. Any and all changes to object are retained. pass A copy of pysat object is passed to function. No data is accepted from return. at_pos : string or int insert at position. (default, insert at end). args : extra arguments Note ---- Allowed `add` function returns: - {'data' : pandas Series/DataFrame/array_like, 'units' : string/array_like of strings, 'long_name' : string/array_like of strings, 'name' : string/array_like of strings (iff data array_like)} - pandas DataFrame, names of columns are used - pandas Series, .name required - (string/list of strings, numpy array/list of arrays) """ for instrument in self.instruments: instrument.custom.add(*args, **kwargs)
Load instrument data into instrument object.data (Wraps pysat.Instrument.load; documentation of that function is reproduced here.) Parameters --------- yr : integer Year for desired data doy : integer day of year data : datetime object date to load fname : 'string' filename to be loaded verifyPad : boolean if true, padding data not removed (debug purposes) def load(self, *args, **kwargs): """ Load instrument data into instrument object.data (Wraps pysat.Instrument.load; documentation of that function is reproduced here.) Parameters --------- yr : integer Year for desired data doy : integer day of year data : datetime object date to load fname : 'string' filename to be loaded verifyPad : boolean if true, padding data not removed (debug purposes) """ for instrument in self.instruments: instrument.load(*args, **kwargs)
Combines signals from multiple instruments within given bounds. Parameters ---------- bounds1 : (min, max) Bounds for selecting data on the axis of label1 Data points with label1 in [min, max) will be considered. label1 : string Data label for bounds1 to act on. bounds2 : (min, max) Bounds for selecting data on the axis of label2 Data points with label1 in [min, max) will be considered. label2 : string Data label for bounds2 to act on. bin3 : (min, max, #bins) Min and max bounds and number of bins for third axis. label3 : string Data label for third axis. data_label : array of strings Data label(s) for data product(s) to be averaged. Returns ------- median : dictionary Dictionary indexed by data label, each value of which is a dictionary with keys 'median', 'count', 'avg_abs_dev', and 'bin' (the values of the bin edges.) def add(self, bounds1, label1, bounds2, label2, bin3, label3, data_label): """ Combines signals from multiple instruments within given bounds. Parameters ---------- bounds1 : (min, max) Bounds for selecting data on the axis of label1 Data points with label1 in [min, max) will be considered. label1 : string Data label for bounds1 to act on. bounds2 : (min, max) Bounds for selecting data on the axis of label2 Data points with label1 in [min, max) will be considered. label2 : string Data label for bounds2 to act on. bin3 : (min, max, #bins) Min and max bounds and number of bins for third axis. label3 : string Data label for third axis. data_label : array of strings Data label(s) for data product(s) to be averaged. Returns ------- median : dictionary Dictionary indexed by data label, each value of which is a dictionary with keys 'median', 'count', 'avg_abs_dev', and 'bin' (the values of the bin edges.) """ # TODO Update for 2.7 compatability. if isinstance(data_label, str): data_label = [data_label, ] elif not isinstance(data_label, collections.Sequence): raise ValueError("Please pass data_label as a string or " "collection of strings.") # Modeled after pysat.ssnl.median2D # Make bin boundaries. # y: values at label3 # z: *data_labels biny = np.linspace(bin3[0], bin3[1], bin3[2]+1) numy = len(biny)-1 numz = len(data_label) # Ranges yarr, zarr = map(np.arange, (numy, numz)) # Store data here. ans = [[[collections.deque()] for j in yarr] for k in zarr] # Filter data by bounds and bin it. # Idiom for loading all of the data in an instrument's bounds. for inst in self: for inst in inst: if len(inst.data) != 0: # Select indicies for each piece of data we're interest in. # Not all of this data is in bounds on label3 but we'll # sort this later. min1, max1 = bounds1 min2, max2 = bounds2 data1 = inst.data[label1] data2 = inst.data[label2] in_bounds, = np.where((min1 <= data1) & (data1 < max1) & (min2 <= data2) & (data2 < max2)) # Grab the data in bounds on data1, data2. data_considered = inst.data.iloc[in_bounds] y_indexes = np.digitize(data_considered[label3], biny) - 1 # Iterate over the bins along y for yj in yarr: # Indicies of data in this bin yindex, = np.where(y_indexes == yj) # If there's data in this bin if len(yindex) > 0: # For each data label, add the points. for zk in zarr: ans[zk][yj][0].extend( data_considered.ix[yindex, data_label[zk]].tolist()) # Now for the averaging. # Let's, try .. packing the answers for the 2d function. numx = 1 xarr = np.arange(numx) binx = None # TODO modify output out_2d = _calc_2d_median(ans, data_label, binx, biny, xarr, yarr, zarr, numx, numy, numz) # Transform output output = {} for i, label in enumerate(data_label): median = [r[0] for r in out_2d[label]['median']] count = [r[0] for r in out_2d[label]['count']] dev = [r[0] for r in out_2d[label]['avg_abs_dev']] output[label] = {'median': median, 'count': count, 'avg_abs_dev': dev, 'bin': out_2d[label]['bin_y']} return output
Calculates the difference in signals from multiple instruments within the given bounds. Parameters ---------- instrument1 : Instrument Information must already be loaded into the instrument. instrument2 : Instrument Information must already be loaded into the instrument. bounds : list of tuples in the form (inst1_label, inst2_label, min, max, max_difference) inst1_label are inst2_label are labels for the data in instrument1 and instrument2 min and max are bounds on the data considered max_difference is the maximum difference between two points for the difference to be calculated data_labels : list of tuples of data labels The first key is used to access data in s1 and the second data in s2. cost_function : function function that operates on two rows of the instrument data. used to determine the distance between two points for finding closest points Returns ------- data_df: pandas DataFrame Each row has a point from instrument1, with the keys preceded by '1_', and a point within bounds on that point from instrument2 with the keys preceded by '2_', and the difference between the instruments' data for all the labels in data_labels Created as part of a Spring 2018 UTDesign project. def difference(self, instrument1, instrument2, bounds, data_labels, cost_function): """ Calculates the difference in signals from multiple instruments within the given bounds. Parameters ---------- instrument1 : Instrument Information must already be loaded into the instrument. instrument2 : Instrument Information must already be loaded into the instrument. bounds : list of tuples in the form (inst1_label, inst2_label, min, max, max_difference) inst1_label are inst2_label are labels for the data in instrument1 and instrument2 min and max are bounds on the data considered max_difference is the maximum difference between two points for the difference to be calculated data_labels : list of tuples of data labels The first key is used to access data in s1 and the second data in s2. cost_function : function function that operates on two rows of the instrument data. used to determine the distance between two points for finding closest points Returns ------- data_df: pandas DataFrame Each row has a point from instrument1, with the keys preceded by '1_', and a point within bounds on that point from instrument2 with the keys preceded by '2_', and the difference between the instruments' data for all the labels in data_labels Created as part of a Spring 2018 UTDesign project. """ """ Draft Pseudocode ---------------- Check integrity of inputs. Let STD_LABELS be the constant tuple: ("time", "lat", "long", "alt") Note: modify so that user can override labels for time, lat, long, data for each satelite. // We only care about the data currently loaded into each object. Let start be the later of the datetime of the first piece of data loaded into s1, the first piece of data loaded into s2, and the user supplied start bound. Let end be the later of the datetime of the first piece of data loaded into s1, the first piece of data loaded into s2, and the user supplied end bound. If start is after end, raise an error. // Let data be the 2D array of deques holding each piece // of data, sorted into bins by lat/long/alt. Let s1_data (resp s2_data) be data from s1.data, s2.data filtered by user-provided lat/long/alt bounds, time bounds calculated. Let data be a dictionary of lists with the keys [ dl1 for dl1, dl2 in data_labels ] + STD_LABELS + [ lb+"2" for lb in STD_LABELS ] For each piece of data s1_point in s1_data: # Hopefully np.where is very good, because this # runs O(n) times. # We could try reusing selections, maybe, if needed. # This would probably involve binning. Let s2_near be the data from s2.data within certain bounds on lat/long/alt/time using 8 statements to numpy.where. We can probably get those defaults from the user or handy constants / config? # We could try a different algorithm for closest pairs # of points. Let distance be the numpy array representing the distance between s1_point and each point in s2_near. # S: Difference for others: change this line. For each of those, calculate the spatial difference from the s1 using lat/long/alt. If s2_near is empty; break loop. Let s2_nearest be the point in s2_near corresponding to the lowest distance. Append to data: a point, indexed by the time from s1_point, containing the following data: # note Let n be the length of data["time"]. For each key in data: Assert len(data[key]) == n End for. # Create data row to pass to pandas. Let row be an empty dict. For dl1, dl2 in data_labels: Append s1_point[dl1] - s2_nearest[dl2] to data[dl1]. For key in STD_LABELS: Append s1_point[translate[key]] to data[key] key = key+"2" Append s2_nearest[translate[key]] to data[key] Let data_df be a pandas dataframe created from the data in data. return { 'data': data_df, 'start':start, 'end':end } """ labels = [dl1 for dl1, dl2 in data_labels] + ['1_'+b[0] for b in bounds] + ['2_'+b[1] for b in bounds] + ['dist'] data = {label: [] for label in labels} # Apply bounds inst1 = instrument1.data inst2 = instrument2.data for b in bounds: label1 = b[0] label2 = b[1] low = b[2] high = b[3] data1 = inst1[label1] ind1 = np.where((data1 >= low) & (data1 < high)) inst1 = inst1.iloc[ind1] data2 = inst2[label2] ind2 = np.where((data2 >= low) & (data2 < high)) inst2 = inst2.iloc[ind2] for i, s1_point in inst1.iterrows(): # Gets points in instrument2 within the given bounds s2_near = instrument2.data for b in bounds: label1 = b[0] label2 = b[1] s1_val = s1_point[label1] max_dist = b[4] minbound = s1_val - max_dist maxbound = s1_val + max_dist data2 = s2_near[label2] indices = np.where((data2 >= minbound) & (data2 < maxbound)) s2_near = s2_near.iloc[indices] # Finds nearest point to s1_point in s2_near s2_nearest = None min_dist = float('NaN') for j, s2_point in s2_near.iterrows(): dist = cost_function(s1_point, s2_point) if dist < min_dist or min_dist != min_dist: min_dist = dist s2_nearest = s2_point data['dist'].append(min_dist) # Append difference to data dict for dl1, dl2 in data_labels: if s2_nearest is not None: data[dl1].append(s1_point[dl1] - s2_nearest[dl2]) else: data[dl1].append(float('NaN')) # Append the rest of the row for b in bounds: label1 = b[0] label2 = b[1] data['1_'+label1].append(s1_point[label1]) if s2_nearest is not None: data['2_'+label2].append(s2_nearest[label2]) else: data['2_'+label2].append(float('NaN')) data_df = pds.DataFrame(data=data) return data_df
2D Daily Occurrence Probability of data_label > gate over a season. If data_label is greater than gate at least once per day, then a 100% occurrence probability results.Season delineated by the bounds attached to Instrument object. Prob = (# of times with at least one hit)/(# of times in bin) Parameters ---------- inst: pysat.Instrument() Instrument to use for calculating occurrence probability binx: list [min, max, number of bins] labelx: string name for data product for binx data_label: list of strings identifies data product(s) to calculate occurrence probability e.g. inst[data_label] gate: list of values values that data_label must achieve to be counted as an occurrence returnBins: Boolean if True, return arrays with values of bin edges, useful for pcolor Returns ------- occur_prob : dictionary A dict of dicts indexed by data_label. Each entry is dict with entries 'prob' for the probability and 'count' for the number of days with any data; 'bin_x' and 'bin_y' are also returned if requested. Note that arrays are organized for direct plotting, y values along rows, x along columns. Note ---- Season delineated by the bounds attached to Instrument object. def daily2D(inst, bin1, label1, bin2, label2, data_label, gate, returnBins=False): """2D Daily Occurrence Probability of data_label > gate over a season. If data_label is greater than gate at least once per day, then a 100% occurrence probability results.Season delineated by the bounds attached to Instrument object. Prob = (# of times with at least one hit)/(# of times in bin) Parameters ---------- inst: pysat.Instrument() Instrument to use for calculating occurrence probability binx: list [min, max, number of bins] labelx: string name for data product for binx data_label: list of strings identifies data product(s) to calculate occurrence probability e.g. inst[data_label] gate: list of values values that data_label must achieve to be counted as an occurrence returnBins: Boolean if True, return arrays with values of bin edges, useful for pcolor Returns ------- occur_prob : dictionary A dict of dicts indexed by data_label. Each entry is dict with entries 'prob' for the probability and 'count' for the number of days with any data; 'bin_x' and 'bin_y' are also returned if requested. Note that arrays are organized for direct plotting, y values along rows, x along columns. Note ---- Season delineated by the bounds attached to Instrument object. """ return _occurrence2D(inst, bin1, label1, bin2, label2, data_label, gate, by_orbit=False, returnBins=returnBins)
2D Occurrence Probability of data_label orbit-by-orbit over a season. If data_label is greater than gate atleast once per orbit, then a 100% occurrence probability results. Season delineated by the bounds attached to Instrument object. Prob = (# of times with at least one hit)/(# of times in bin) Parameters ---------- inst: pysat.Instrument() Instrument to use for calculating occurrence probability binx: list [min value, max value, number of bins] labelx: string identifies data product for binx data_label: list of strings identifies data product(s) to calculate occurrence probability gate: list of values values that data_label must achieve to be counted as an occurrence returnBins: Boolean if True, return arrays with values of bin edges, useful for pcolor Returns ------- occur_prob : dictionary A dict of dicts indexed by data_label. Each entry is dict with entries 'prob' for the probability and 'count' for the number of orbits with any data; 'bin_x' and 'bin_y' are also returned if requested. Note that arrays are organized for direct plotting, y values along rows, x along columns. Note ---- Season delineated by the bounds attached to Instrument object. def by_orbit2D(inst, bin1, label1, bin2, label2, data_label, gate, returnBins=False): """2D Occurrence Probability of data_label orbit-by-orbit over a season. If data_label is greater than gate atleast once per orbit, then a 100% occurrence probability results. Season delineated by the bounds attached to Instrument object. Prob = (# of times with at least one hit)/(# of times in bin) Parameters ---------- inst: pysat.Instrument() Instrument to use for calculating occurrence probability binx: list [min value, max value, number of bins] labelx: string identifies data product for binx data_label: list of strings identifies data product(s) to calculate occurrence probability gate: list of values values that data_label must achieve to be counted as an occurrence returnBins: Boolean if True, return arrays with values of bin edges, useful for pcolor Returns ------- occur_prob : dictionary A dict of dicts indexed by data_label. Each entry is dict with entries 'prob' for the probability and 'count' for the number of orbits with any data; 'bin_x' and 'bin_y' are also returned if requested. Note that arrays are organized for direct plotting, y values along rows, x along columns. Note ---- Season delineated by the bounds attached to Instrument object. """ return _occurrence2D(inst, bin1, label1, bin2, label2, data_label, gate, by_orbit=True, returnBins=returnBins)
3D Daily Occurrence Probability of data_label > gate over a season. If data_label is greater than gate atleast once per day, then a 100% occurrence probability results. Season delineated by the bounds attached to Instrument object. Prob = (# of times with at least one hit)/(# of times in bin) Parameters ---------- inst: pysat.Instrument() Instrument to use for calculating occurrence probability binx: list [min, max, number of bins] labelx: string name for data product for binx data_label: list of strings identifies data product(s) to calculate occurrence probability gate: list of values values that data_label must achieve to be counted as an occurrence returnBins: Boolean if True, return arrays with values of bin edges, useful for pcolor Returns ------- occur_prob : dictionary A dict of dicts indexed by data_label. Each entry is dict with entries 'prob' for the probability and 'count' for the number of days with any data; 'bin_x', 'bin_y', and 'bin_z' are also returned if requested. Note that arrays are organized for direct plotting, z,y,x. Note ---- Season delineated by the bounds attached to Instrument object. def daily3D(inst, bin1, label1, bin2, label2, bin3, label3, data_label, gate, returnBins=False): """3D Daily Occurrence Probability of data_label > gate over a season. If data_label is greater than gate atleast once per day, then a 100% occurrence probability results. Season delineated by the bounds attached to Instrument object. Prob = (# of times with at least one hit)/(# of times in bin) Parameters ---------- inst: pysat.Instrument() Instrument to use for calculating occurrence probability binx: list [min, max, number of bins] labelx: string name for data product for binx data_label: list of strings identifies data product(s) to calculate occurrence probability gate: list of values values that data_label must achieve to be counted as an occurrence returnBins: Boolean if True, return arrays with values of bin edges, useful for pcolor Returns ------- occur_prob : dictionary A dict of dicts indexed by data_label. Each entry is dict with entries 'prob' for the probability and 'count' for the number of days with any data; 'bin_x', 'bin_y', and 'bin_z' are also returned if requested. Note that arrays are organized for direct plotting, z,y,x. Note ---- Season delineated by the bounds attached to Instrument object. """ return _occurrence3D(inst, bin1, label1, bin2, label2, bin3, label3, data_label, gate, returnBins=returnBins, by_orbit=False)
Input Series of numbers, Series, or DataFrames repackaged for calculation. Parameters ---------- data : pandas.Series Series of numbers, Series, DataFrames Returns ------- pandas.Series, DataFrame, or Panel repacked data, aligned by indices, ready for calculation def computational_form(data): """ Input Series of numbers, Series, or DataFrames repackaged for calculation. Parameters ---------- data : pandas.Series Series of numbers, Series, DataFrames Returns ------- pandas.Series, DataFrame, or Panel repacked data, aligned by indices, ready for calculation """ if isinstance(data.iloc[0], DataFrame): dslice = Panel.from_dict(dict([(i,data.iloc[i]) for i in xrange(len(data))])) elif isinstance(data.iloc[0], Series): dslice = DataFrame(data.tolist()) dslice.index = data.index else: dslice = data return dslice
Set the top level directory pysat uses to look for data and reload. Parameters ---------- path : string valid path to directory pysat uses to look for data store : bool if True, store data directory for future runs def set_data_dir(path=None, store=None): """ Set the top level directory pysat uses to look for data and reload. Parameters ---------- path : string valid path to directory pysat uses to look for data store : bool if True, store data directory for future runs """ import sys import os import pysat if sys.version_info[0] >= 3: if sys.version_info[1] < 4: import imp re_load = imp.reload else: import importlib re_load = importlib.reload else: re_load = reload if store is None: store = True if os.path.isdir(path): if store: with open(os.path.join(os.path.expanduser('~'), '.pysat', 'data_path.txt'), 'w') as f: f.write(path) pysat.data_dir = path pysat._files = re_load(pysat._files) pysat._instrument = re_load(pysat._instrument) else: raise ValueError('Path %s does not lead to a valid directory.' % path)
Load netCDF-3/4 file produced by pysat. Parameters ---------- fnames : string or array_like of strings filenames to load strict_meta : boolean check if metadata across fnames is the same file_format : string file_format keyword passed to netCDF4 routine NETCDF3_CLASSIC, NETCDF3_64BIT, NETCDF4_CLASSIC, and NETCDF4 Returns -------- out : pandas.core.frame.DataFrame DataFrame output mdata : pysat._meta.Meta Meta data def load_netcdf4(fnames=None, strict_meta=False, file_format=None, epoch_name='Epoch', units_label='units', name_label='long_name', notes_label='notes', desc_label='desc', plot_label='label', axis_label='axis', scale_label='scale', min_label='value_min', max_label='value_max', fill_label='fill'): # unix_time=False, **kwargs): """Load netCDF-3/4 file produced by pysat. Parameters ---------- fnames : string or array_like of strings filenames to load strict_meta : boolean check if metadata across fnames is the same file_format : string file_format keyword passed to netCDF4 routine NETCDF3_CLASSIC, NETCDF3_64BIT, NETCDF4_CLASSIC, and NETCDF4 Returns -------- out : pandas.core.frame.DataFrame DataFrame output mdata : pysat._meta.Meta Meta data """ import netCDF4 import string import pysat if fnames is None: raise ValueError("Must supply a filename/list of filenames") if isinstance(fnames, basestring): fnames = [fnames] if file_format is None: file_format = 'NETCDF4' else: file_format = file_format.upper() saved_mdata = None running_idx = 0 running_store=[] two_d_keys = []; two_d_dims = []; three_d_keys = []; three_d_dims = []; for fname in fnames: with netCDF4.Dataset(fname, mode='r', format=file_format) as data: # build up dictionary with all global ncattrs # and add those attributes to a pysat meta object ncattrsList = data.ncattrs() mdata = pysat.Meta(units_label=units_label, name_label=name_label, notes_label=notes_label, desc_label=desc_label, plot_label=plot_label, axis_label=axis_label, scale_label=scale_label, min_label=min_label, max_label=max_label, fill_label=fill_label) for d in ncattrsList: if hasattr(mdata, d): mdata.__setattr__(d+'_', data.getncattr(d)) else: mdata.__setattr__(d, data.getncattr(d)) # loadup all of the variables in the netCDF loadedVars = {} for key in data.variables.keys(): # load up metadata. From here group unique # dimensions and act accordingly, 1D, 2D, 3D if len(data.variables[key].dimensions) == 1: # load 1D data variable # assuming basic time dimension loadedVars[key] = data.variables[key][:] # if key != epoch_name: # load up metadata meta_dict = {} for nc_key in data.variables[key].ncattrs(): meta_dict[nc_key] = data.variables[key].getncattr(nc_key) mdata[key] = meta_dict if len(data.variables[key].dimensions) == 2: # part of dataframe within dataframe two_d_keys.append(key) two_d_dims.append(data.variables[key].dimensions) if len(data.variables[key].dimensions) == 3: # part of full/dedicated dataframe within dataframe three_d_keys.append(key) three_d_dims.append(data.variables[key].dimensions) # we now have a list of keys that need to go into a dataframe, # could be more than one, collect unique dimensions for 2D keys for dim in set(two_d_dims): # first dimension should be epoch # second dimension name used as variable name obj_key_name = dim[1] # collect variable names associated with dimension idx_bool = [dim == i for i in two_d_dims] idx, = np.where(np.array(idx_bool)) obj_var_keys = [] clean_var_keys = [] for i in idx: obj_var_keys.append(two_d_keys[i]) clean_var_keys.append(two_d_keys[i].split(obj_key_name+'_')[-1]) # figure out how to index this data, it could provide its own # index - or we may have to create simple integer based DataFrame access # if the dimension is stored as its own variable then use that info for index if obj_key_name in obj_var_keys: # string used to indentify dimension also in data.variables # will be used as an index index_key_name = obj_key_name # if the object index uses UNIX time, process into datetime index if data.variables[obj_key_name].getncattr(name_label) == epoch_name: # name to be used in DataFrame index index_name = epoch_name time_index_flag = True else: time_index_flag = False # label to be used in DataFrame index index_name = data.variables[obj_key_name].getncattr(name_label) else: # dimension is not itself a variable index_key_name = None # iterate over the variables and grab metadata dim_meta_data = pysat.Meta(units_label=units_label, name_label=name_label, notes_label=notes_label, desc_label=desc_label, plot_label=plot_label, axis_label=axis_label, scale_label=scale_label, min_label=min_label, max_label=max_label, fill_label=fill_label) for key, clean_key in zip(obj_var_keys, clean_var_keys): # store attributes in metadata, exept for dim name meta_dict = {} for nc_key in data.variables[key].ncattrs(): meta_dict[nc_key] = data.variables[key].getncattr(nc_key) dim_meta_data[clean_key] = meta_dict # print (dim_meta_data) dim_meta_dict = {'meta':dim_meta_data} if index_key_name is not None: # add top level meta for nc_key in data.variables[obj_key_name].ncattrs(): dim_meta_dict[nc_key] = data.variables[obj_key_name].getncattr(nc_key) mdata[obj_key_name] = dim_meta_dict # iterate over all variables with this dimension and store data # data storage, whole shebang loop_dict = {} # list holds a series of slices, parsed from dict above loop_list = [] for key, clean_key in zip(obj_var_keys, clean_var_keys): # data loop_dict[clean_key] = data.variables[key][:,:].flatten(order='C') # number of values in time loop_lim = data.variables[obj_var_keys[0]].shape[0] # number of values per time step_size = len(data.variables[obj_var_keys[0]][0,:]) # check if there is an index we should use if not (index_key_name is None): # an index was found time_var = loop_dict.pop(index_key_name) if time_index_flag: # create datetime index from data if file_format == 'NETCDF4': time_var = pds.to_datetime(1E6*time_var) else: time_var = pds.to_datetime(1E6*time_var) new_index = time_var new_index_name = index_name else: # using integer indexing new_index = np.arange(loop_lim*step_size, dtype=int) % step_size new_index_name = 'index' # load all data into frame if len(loop_dict.keys()) > 1: loop_frame = pds.DataFrame(loop_dict, columns=clean_var_keys) if obj_key_name in loop_frame: del loop_frame[obj_key_name] # break massive frame into bunch of smaller frames for i in np.arange(loop_lim, dtype=int): loop_list.append(loop_frame.iloc[step_size*i:step_size*(i+1),:]) loop_list[-1].index = new_index[step_size*i:step_size*(i+1)] loop_list[-1].index.name = new_index_name else: loop_frame = pds.Series(loop_dict[clean_var_keys[0]], name=obj_var_keys[0]) # break massive series into bunch of smaller series for i in np.arange(loop_lim, dtype=int): loop_list.append(loop_frame.iloc[step_size*i:step_size*(i+1)]) loop_list[-1].index = new_index[step_size*i:step_size*(i+1)] loop_list[-1].index.name = new_index_name # print (loop_frame.columns) # add 2D object data, all based on a unique dimension within # netCDF, to loaded data dictionary loadedVars[obj_key_name] = loop_list del loop_list # we now have a list of keys that need to go into a dataframe, # could be more than one, collect unique dimensions for 2D keys for dim in set(three_d_dims): # collect variable names associated with dimension idx_bool = [dim == i for i in three_d_dims] idx, = np.where(np.array(idx_bool)) obj_var_keys = [] for i in idx: obj_var_keys.append(three_d_keys[i]) for obj_key_name in obj_var_keys: # store attributes in metadata meta_dict = {} for nc_key in data.variables[obj_key_name].ncattrs(): meta_dict[nc_key] = data.variables[obj_key_name].getncattr(nc_key) mdata[obj_key_name] = meta_dict # iterate over all variables with this dimension and store data # data storage, whole shebang loop_dict = {} # list holds a series of slices, parsed from dict above loop_list = [] loop_dict[obj_key_name] = data.variables[obj_key_name][:,:,:] # number of values in time loop_lim = data.variables[obj_key_name].shape[0] # number of values per time step_size_x = len(data.variables[obj_key_name][0, :, 0]) step_size_y = len(data.variables[obj_key_name][0, 0, :]) step_size = step_size_x loop_dict[obj_key_name] = loop_dict[obj_key_name].reshape((loop_lim*step_size_x, step_size_y)) # check if there is an index we should use if not (index_key_name is None): # an index was found time_var = loop_dict.pop(index_key_name) if time_index_flag: # create datetime index from data if file_format == 'NETCDF4': time_var = pds.to_datetime(1E6*time_var) else: time_var = pds.to_datetime(1E6*time_var) new_index = time_var new_index_name = index_name else: # using integer indexing new_index = np.arange(loop_lim*step_size, dtype=int) % step_size new_index_name = 'index' # load all data into frame loop_frame = pds.DataFrame(loop_dict[obj_key_name]) # del loop_frame['dimension_1'] # break massive frame into bunch of smaller frames for i in np.arange(loop_lim, dtype=int): loop_list.append(loop_frame.iloc[step_size*i:step_size*(i+1),:]) loop_list[-1].index = new_index[step_size*i:step_size*(i+1)] loop_list[-1].index.name = new_index_name # add 2D object data, all based on a unique dimension within netCDF, # to loaded data dictionary loadedVars[obj_key_name] = loop_list del loop_list # prepare dataframe index for this netcdf file time_var = loadedVars.pop(epoch_name) # convert from GPS seconds to seconds used in pandas (unix time, # no leap) #time_var = convert_gps_to_unix_seconds(time_var) if file_format == 'NETCDF4': loadedVars[epoch_name] = pds.to_datetime((1E6 * time_var).astype(int)) else: loadedVars[epoch_name] = pds.to_datetime((time_var * 1E6).astype(int)) #loadedVars[epoch_name] = pds.to_datetime((time_var*1E6).astype(int)) running_store.append(loadedVars) running_idx += len(loadedVars[epoch_name]) if strict_meta: if saved_mdata is None: saved_mdata = copy.deepcopy(mdata) elif (mdata != saved_mdata): raise ValueError('Metadata across filenames is not the ' + 'same.') # combine all of the data loaded across files together out = [] for item in running_store: out.append(pds.DataFrame.from_records(item, index=epoch_name)) out = pds.concat(out, axis=0) return out, mdata
Return a tuple of year, day of year for a supplied datetime object. def getyrdoy(date): """Return a tuple of year, day of year for a supplied datetime object.""" try: doy = date.toordinal()-datetime(date.year,1,1).toordinal()+1 except AttributeError: raise AttributeError("Must supply a pandas datetime object or " + "equivalent") else: return date.year, doy
Return array of datetime objects using input frequency from start to stop Supports single datetime object or list, tuple, ndarray of start and stop dates. freq codes correspond to pandas date_range codes, D daily, M monthly, S secondly def season_date_range(start, stop, freq='D'): """ Return array of datetime objects using input frequency from start to stop Supports single datetime object or list, tuple, ndarray of start and stop dates. freq codes correspond to pandas date_range codes, D daily, M monthly, S secondly """ if hasattr(start, '__iter__'): # missing check for datetime season = pds.date_range(start[0], stop[0], freq=freq) for (sta,stp) in zip(start[1:], stop[1:]): season = season.append(pds.date_range(sta, stp, freq=freq)) else: season = pds.date_range(start, stop, freq=freq) return season
Create a timeseries index using supplied year, month, day, and ut in seconds. Parameters ---------- year : array_like of ints month : array_like of ints or None day : array_like of ints for day (default) or day of year (use month=None) uts : array_like of floats Returns ------- Pandas timeseries index. Note ---- Leap seconds have no meaning here. def create_datetime_index(year=None, month=None, day=None, uts=None): """Create a timeseries index using supplied year, month, day, and ut in seconds. Parameters ---------- year : array_like of ints month : array_like of ints or None day : array_like of ints for day (default) or day of year (use month=None) uts : array_like of floats Returns ------- Pandas timeseries index. Note ---- Leap seconds have no meaning here. """ # need a timeseries index for storing satellite data in pandas but # creating a datetime object for everything is too slow # so I calculate the number of nanoseconds elapsed since first sample, # and create timeseries index from that. # Factor of 20 improvement compared to previous method, # which itself was an order of magnitude faster than datetime. #get list of unique year, and month if not hasattr(year, '__iter__'): raise ValueError('Must provide an iterable for all inputs.') if len(year) == 0: raise ValueError('Length of array must be larger than 0.') year = year.astype(int) if month is None: month = np.ones(len(year), dtype=int) else: month = month.astype(int) if uts is None: uts = np.zeros(len(year)) if day is None: day = np.ones(len(year)) day = day.astype(int) # track changes in seconds uts_del = uts.copy().astype(float) # determine where there are changes in year and month that need to be # accounted for _,idx = np.unique(year*100.+month, return_index=True) # create another index array for faster algorithm below idx2 = np.hstack((idx,len(year)+1)) # computes UTC seconds offset for each unique set of year and month for _idx,_idx2 in zip(idx[1:],idx2[2:]): temp = (datetime(year[_idx],month[_idx],1) - datetime(year[0],month[0],1)) uts_del[_idx:_idx2] += temp.total_seconds() # add in UTC seconds for days, ignores existence of leap seconds uts_del += (day-1)*86400 # add in seconds since unix epoch to first day uts_del += (datetime(year[0],month[0],1)-datetime(1970,1,1)).total_seconds() # going to use routine that defaults to nanseconds for epoch uts_del *= 1E9 return pds.to_datetime(uts_del)
NaN insensitive version of scipy's circular mean routine Parameters ----------- samples : array_like Input array low : float or int Lower boundary for circular standard deviation range (default=0) high: float or int Upper boundary for circular standard deviation range (default=2 pi) axis : int or NoneType Axis along which standard deviations are computed. The default is to compute the standard deviation of the flattened array Returns -------- circmean : float Circular mean def nan_circmean(samples, high=2.0*np.pi, low=0.0, axis=None): """NaN insensitive version of scipy's circular mean routine Parameters ----------- samples : array_like Input array low : float or int Lower boundary for circular standard deviation range (default=0) high: float or int Upper boundary for circular standard deviation range (default=2 pi) axis : int or NoneType Axis along which standard deviations are computed. The default is to compute the standard deviation of the flattened array Returns -------- circmean : float Circular mean """ samples = np.asarray(samples) samples = samples[~np.isnan(samples)] if samples.size == 0: return np.nan # Ensure the samples are in radians ang = (samples - low) * 2.0 * np.pi / (high - low) # Calculate the means of the sine and cosine, as well as the length # of their unit vector ssum = np.sin(ang).sum(axis=axis) csum = np.cos(ang).sum(axis=axis) res = np.arctan2(ssum, csum) # Bring the range of the result between 0 and 2 pi mask = res < 0.0 if mask.ndim > 0: res[mask] += 2.0 * np.pi elif mask: res += 2.0 * np.pi # Calculate the circular standard deviation circmean = res * (high - low) / (2.0 * np.pi) + low return circmean
NaN insensitive version of scipy's circular standard deviation routine Parameters ----------- samples : array_like Input array low : float or int Lower boundary for circular standard deviation range (default=0) high: float or int Upper boundary for circular standard deviation range (default=2 pi) axis : int or NoneType Axis along which standard deviations are computed. The default is to compute the standard deviation of the flattened array Returns -------- circstd : float Circular standard deviation def nan_circstd(samples, high=2.0*np.pi, low=0.0, axis=None): """NaN insensitive version of scipy's circular standard deviation routine Parameters ----------- samples : array_like Input array low : float or int Lower boundary for circular standard deviation range (default=0) high: float or int Upper boundary for circular standard deviation range (default=2 pi) axis : int or NoneType Axis along which standard deviations are computed. The default is to compute the standard deviation of the flattened array Returns -------- circstd : float Circular standard deviation """ samples = np.asarray(samples) samples = samples[~np.isnan(samples)] if samples.size == 0: return np.nan # Ensure the samples are in radians ang = (samples - low) * 2.0 * np.pi / (high - low) # Calculate the means of the sine and cosine, as well as the length # of their unit vector smean = np.sin(ang).mean(axis=axis) cmean = np.cos(ang).mean(axis=axis) rmean = np.sqrt(smean**2 + cmean**2) # Calculate the circular standard deviation circstd = (high - low) * np.sqrt(-2.0 * np.log(rmean)) / (2.0 * np.pi) return circstd
Default routine to be applied when loading data. Removes redundant naming def default(inst): """Default routine to be applied when loading data. Removes redundant naming """ import pysat.instruments.icon_ivm as icivm inst.tag = 'level_2' icivm.remove_icon_names(inst, target='ICON_L2_EUV_Daytime_OP_')
Produce a list of ICON EUV files. Notes ----- Currently fixed to level-2 def list_files(tag=None, sat_id=None, data_path=None, format_str=None): """Produce a list of ICON EUV files. Notes ----- Currently fixed to level-2 """ desc = None level = tag if level == 'level_1': code = 'L1' desc = None elif level == 'level_2': code = 'L2' desc = None else: raise ValueError('Unsupported level supplied: ' + level) if format_str is None: format_str = 'ICON_'+code+'_EUV_Daytime' if desc is not None: format_str += '_' + desc +'_' format_str += '_{year:4d}-{month:02d}-{day:02d}_v{version:02d}r{revision:03d}.NC' return pysat.Files.from_os(data_path=data_path, format_str=format_str)
Return a copy of the resource with same raw data :return: copy of the resource def shadow_copy(self): """ Return a copy of the resource with same raw data :return: copy of the resource """ ret = self.__class__() if not self._is_updated(): # before copy, make sure source is updated. self.update() ret._parsed_resource = self._parsed_resource return ret
Loads data using pysat.utils.load_netcdf4 . This routine is called as needed by pysat. It is not intended for direct user interaction. Parameters ---------- fnames : array-like iterable of filename strings, full path, to data files to be loaded. This input is nominally provided by pysat itself. tag : string tag name used to identify particular data set to be loaded. This input is nominally provided by pysat itself. sat_id : string Satellite ID used to identify particular data set to be loaded. This input is nominally provided by pysat itself. **kwargs : extra keywords Passthrough for additional keyword arguments specified when instantiating an Instrument object. These additional keywords are passed through to this routine by pysat. Returns ------- data, metadata Data and Metadata are formatted for pysat. Data is a pandas DataFrame while metadata is a pysat.Meta instance. Note ---- Any additional keyword arguments passed to pysat.Instrument upon instantiation are passed along to this routine and through to the load_netcdf4 call. Examples -------- :: inst = pysat.Instrument('sport', 'ivm') inst.load(2019,1) # create quick Instrument object for a new, random netCDF4 file # define filename template string to identify files # this is normally done by instrument code, but in this case # there is no built in pysat instrument support # presumes files are named default_2019-01-01.NC format_str = 'default_{year:04d}-{month:02d}-{day:02d}.NC' inst = pysat.Instrument('netcdf', 'pandas', custom_kwarg='test' data_path='./', format_str=format_str) inst.load(2019,1) def load(fnames, tag=None, sat_id=None, **kwargs): """Loads data using pysat.utils.load_netcdf4 . This routine is called as needed by pysat. It is not intended for direct user interaction. Parameters ---------- fnames : array-like iterable of filename strings, full path, to data files to be loaded. This input is nominally provided by pysat itself. tag : string tag name used to identify particular data set to be loaded. This input is nominally provided by pysat itself. sat_id : string Satellite ID used to identify particular data set to be loaded. This input is nominally provided by pysat itself. **kwargs : extra keywords Passthrough for additional keyword arguments specified when instantiating an Instrument object. These additional keywords are passed through to this routine by pysat. Returns ------- data, metadata Data and Metadata are formatted for pysat. Data is a pandas DataFrame while metadata is a pysat.Meta instance. Note ---- Any additional keyword arguments passed to pysat.Instrument upon instantiation are passed along to this routine and through to the load_netcdf4 call. Examples -------- :: inst = pysat.Instrument('sport', 'ivm') inst.load(2019,1) # create quick Instrument object for a new, random netCDF4 file # define filename template string to identify files # this is normally done by instrument code, but in this case # there is no built in pysat instrument support # presumes files are named default_2019-01-01.NC format_str = 'default_{year:04d}-{month:02d}-{day:02d}.NC' inst = pysat.Instrument('netcdf', 'pandas', custom_kwarg='test' data_path='./', format_str=format_str) inst.load(2019,1) """ return pysat.utils.load_netcdf4(fnames, **kwargs)
Produce a list of files corresponding to format_str located at data_path. This routine is invoked by pysat and is not intended for direct use by the end user. Multiple data levels may be supported via the 'tag' and 'sat_id' input strings. Parameters ---------- tag : string ('') tag name used to identify particular data set to be loaded. This input is nominally provided by pysat itself. sat_id : string ('') Satellite ID used to identify particular data set to be loaded. This input is nominally provided by pysat itself. data_path : string Full path to directory containing files to be loaded. This is provided by pysat. The user may specify their own data path at Instrument instantiation and it will appear here. format_str : string (None) String template used to parse the datasets filenames. If a user supplies a template string at Instrument instantiation then it will appear here, otherwise defaults to None. Returns ------- pandas.Series Series of filename strings, including the path, indexed by datetime. Examples -------- :: If a filename is SPORT_L2_IVM_2019-01-01_v01r0000.NC then the template is 'SPORT_L2_IVM_{year:04d}-{month:02d}-{day:02d}_v{version:02d}r{revision:04d}.NC' Note ---- The returned Series should not have any duplicate datetimes. If there are multiple versions of a file the most recent version should be kept and the rest discarded. This routine uses the pysat.Files.from_os constructor, thus the returned files are up to pysat specifications. Normally the format_str for each supported tag and sat_id is defined within this routine. However, as this is a generic routine, those definitions can't be made here. This method could be used in an instrument specific module where the list_files routine in the new package defines the format_str based upon inputs, then calls this routine passing both data_path and format_str. Alternately, the list_files routine in nasa_cdaweb_methods may also be used and has more built in functionality. Supported tages and format strings may be defined within the new instrument module and passed as arguments to nasa_cdaweb_methods.list_files . For an example on using this routine, see pysat/instrument/cnofs_ivm.py or cnofs_vefi, cnofs_plp, omni_hro, timed_see, etc. def list_files(tag=None, sat_id=None, data_path=None, format_str=None): """Produce a list of files corresponding to format_str located at data_path. This routine is invoked by pysat and is not intended for direct use by the end user. Multiple data levels may be supported via the 'tag' and 'sat_id' input strings. Parameters ---------- tag : string ('') tag name used to identify particular data set to be loaded. This input is nominally provided by pysat itself. sat_id : string ('') Satellite ID used to identify particular data set to be loaded. This input is nominally provided by pysat itself. data_path : string Full path to directory containing files to be loaded. This is provided by pysat. The user may specify their own data path at Instrument instantiation and it will appear here. format_str : string (None) String template used to parse the datasets filenames. If a user supplies a template string at Instrument instantiation then it will appear here, otherwise defaults to None. Returns ------- pandas.Series Series of filename strings, including the path, indexed by datetime. Examples -------- :: If a filename is SPORT_L2_IVM_2019-01-01_v01r0000.NC then the template is 'SPORT_L2_IVM_{year:04d}-{month:02d}-{day:02d}_v{version:02d}r{revision:04d}.NC' Note ---- The returned Series should not have any duplicate datetimes. If there are multiple versions of a file the most recent version should be kept and the rest discarded. This routine uses the pysat.Files.from_os constructor, thus the returned files are up to pysat specifications. Normally the format_str for each supported tag and sat_id is defined within this routine. However, as this is a generic routine, those definitions can't be made here. This method could be used in an instrument specific module where the list_files routine in the new package defines the format_str based upon inputs, then calls this routine passing both data_path and format_str. Alternately, the list_files routine in nasa_cdaweb_methods may also be used and has more built in functionality. Supported tages and format strings may be defined within the new instrument module and passed as arguments to nasa_cdaweb_methods.list_files . For an example on using this routine, see pysat/instrument/cnofs_ivm.py or cnofs_vefi, cnofs_plp, omni_hro, timed_see, etc. """ return pysat.Files.from_os(data_path=data_path, format_str=format_str)
indicate it's a command of naviseccli :param f: function that returns the command in list :return: command execution result def command(f): """ indicate it's a command of naviseccli :param f: function that returns the command in list :return: command execution result """ @functools.wraps(f) def func_wrapper(self, *argv, **kwargs): if 'ip' in kwargs: ip = kwargs['ip'] del kwargs['ip'] else: ip = None commands = _get_commands(f, self, *argv, **kwargs) return self.execute(commands, ip=ip) return func_wrapper
indicate it's a command need to be called on both SP :param f: function that returns the command in list :return: command execution result on both sps (tuple of 2) def duel_command(f): """ indicate it's a command need to be called on both SP :param f: function that returns the command in list :return: command execution result on both sps (tuple of 2) """ @functools.wraps(f) def func_wrapper(self, *argv, **kwargs): commands = _get_commands(f, self, *argv, **kwargs) return self.execute_dual(commands) return func_wrapper
Return new size accounting for the metadata. def supplement_filesystem(old_size, user_cap=False): """Return new size accounting for the metadata.""" new_size = old_size if user_cap: if old_size <= _GiB_to_Byte(1.5): new_size = _GiB_to_Byte(3) else: new_size += _GiB_to_Byte(1.5) return int(new_size)
synchronize on obj if obj is supplied. :param obj: the obj to lock on. if none, lock to the function :return: return of the func. def synchronized(cls, obj=None): """ synchronize on obj if obj is supplied. :param obj: the obj to lock on. if none, lock to the function :return: return of the func. """ def get_key(f, o): if o is None: key = hash(f) else: key = hash(o) return key def get_lock(f, o): key = get_key(f, o) if key not in cls.lock_map: with cls.lock_map_lock: if key not in cls.lock_map: cls.lock_map[key] = _init_lock() return cls.lock_map[key] def wrap(f): @functools.wraps(f) def new_func(*args, **kw): with get_lock(f, obj): return f(*args, **kw) return new_func return wrap
Re-enqueue till reach max retries. def re_enqueue(self, item): """Re-enqueue till reach max retries.""" if 'retries' in item: retries = item['retries'] if retries >= self.MAX_RETRIES: log.warn("Failed to execute {} after {} retries, give it " " up.".format(item['method'], retries)) else: retries += 1 item['retries'] = retries self._q.put_nowait(item) else: item['retries'] = 1 self._q.put_nowait(item)
Internal decorator to define an criteria compare operations. def _support_op(*args): """Internal decorator to define an criteria compare operations.""" def inner(func): for one_arg in args: _op_mapping_[one_arg] = func return func return inner
Routine to return VEFI data cleaned to the specified level Parameters ----------- inst : (pysat.Instrument) Instrument class object, whose attribute clean_level is used to return the desired level of data selectivity. Returns -------- Void : (NoneType) data in inst is modified in-place. Notes -------- 'dusty' or 'clean' removes data when interpolation flag is set to 1 def clean(inst): """Routine to return VEFI data cleaned to the specified level Parameters ----------- inst : (pysat.Instrument) Instrument class object, whose attribute clean_level is used to return the desired level of data selectivity. Returns -------- Void : (NoneType) data in inst is modified in-place. Notes -------- 'dusty' or 'clean' removes data when interpolation flag is set to 1 """ if (inst.clean_level == 'dusty') | (inst.clean_level == 'clean'): idx, = np.where(inst['B_flag'] == 0) inst.data = inst[idx, :] return None
Removes leading text on ICON project variable names Parameters ---------- inst : pysat.Instrument ICON associated pysat.Instrument object target : str Leading string to remove. If none supplied, ICON project standards are used to identify and remove leading text Returns ------- None Modifies Instrument object in place def remove_icon_names(inst, target=None): """Removes leading text on ICON project variable names Parameters ---------- inst : pysat.Instrument ICON associated pysat.Instrument object target : str Leading string to remove. If none supplied, ICON project standards are used to identify and remove leading text Returns ------- None Modifies Instrument object in place """ if target is None: lev = inst.tag if lev == 'level_2': lev = 'L2' elif lev == 'level_0': lev = 'L0' elif lev == 'level_0p': lev = 'L0P' elif lev == 'level_1.5': lev = 'L1-5' elif lev == 'level_1': lev = 'L1' else: raise ValueError('Uknown ICON data level') # get instrument code sid = inst.sat_id.lower() if sid == 'a': sid = 'IVM_A' elif sid == 'b': sid = 'IVM_B' else: raise ValueError('Unknown ICON satellite ID') prepend_str = '_'.join(('ICON', lev, sid)) + '_' else: prepend_str = target inst.data.rename(columns=lambda x: x.split(prepend_str)[-1], inplace=True) inst.meta.data.rename(index=lambda x: x.split(prepend_str)[-1], inplace=True) orig_keys = inst.meta.keys_nD() for key in orig_keys: new_key = key.split(prepend_str)[-1] new_meta = inst.meta.pop(key) new_meta.data.rename(index=lambda x: x.split(prepend_str)[-1], inplace=True) inst.meta[new_key] = new_meta return
Return a Pandas Series of every file for chosen satellite data Parameters ----------- tag : (string or NoneType) Denotes type of file to load. Accepted types are '1min' and '5min'. (default=None) sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) format_str : (string or NoneType) User specified file format. If None is specified, the default formats associated with the supplied tags are used. (default=None) Returns -------- pysat.Files.from_os : (pysat._files.Files) A class containing the verified available files def list_files(tag=None, sat_id=None, data_path=None, format_str=None): """Return a Pandas Series of every file for chosen satellite data Parameters ----------- tag : (string or NoneType) Denotes type of file to load. Accepted types are '1min' and '5min'. (default=None) sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) format_str : (string or NoneType) User specified file format. If None is specified, the default formats associated with the supplied tags are used. (default=None) Returns -------- pysat.Files.from_os : (pysat._files.Files) A class containing the verified available files """ if format_str is None and data_path is not None: if (tag == '1min') | (tag == '5min'): min_fmt = ''.join(['omni_hro_', tag, '{year:4d}{month:02d}{day:02d}_v01.cdf']) files = pysat.Files.from_os(data_path=data_path, format_str=min_fmt) # files are by month, just add date to monthly filename for # each day of the month. load routine will use date to select out # appropriate data if not files.empty: files.ix[files.index[-1] + pds.DateOffset(months=1) - pds.DateOffset(days=1)] = files.iloc[-1] files = files.asfreq('D', 'pad') # add the date to the filename files = files + '_' + files.index.strftime('%Y-%m-%d') return files else: raise ValueError('Unknown tag') elif format_str is None: estr = 'A directory must be passed to the loading routine for OMNI HRO' raise ValueError (estr) else: return pysat.Files.from_os(data_path=data_path, format_str=format_str)
OMNI data is time-shifted to bow shock. Time shifted again to intersections with magnetic pole. Parameters ----------- inst : Instrument class object Instrument with OMNI HRO data Notes --------- Time shift calculated using distance to bow shock nose (BSN) and velocity of solar wind along x-direction. Warnings -------- Use at own risk. def time_shift_to_magnetic_poles(inst): """ OMNI data is time-shifted to bow shock. Time shifted again to intersections with magnetic pole. Parameters ----------- inst : Instrument class object Instrument with OMNI HRO data Notes --------- Time shift calculated using distance to bow shock nose (BSN) and velocity of solar wind along x-direction. Warnings -------- Use at own risk. """ # need to fill in Vx to get an estimate of what is going on inst['Vx'] = inst['Vx'].interpolate('nearest') inst['Vx'] = inst['Vx'].fillna(method='backfill') inst['Vx'] = inst['Vx'].fillna(method='pad') inst['BSN_x'] = inst['BSN_x'].interpolate('nearest') inst['BSN_x'] = inst['BSN_x'].fillna(method='backfill') inst['BSN_x'] = inst['BSN_x'].fillna(method='pad') # make sure there are no gaps larger than a minute inst.data = inst.data.resample('1T').interpolate('time') time_x = inst['BSN_x']*6371.2/-inst['Vx'] idx, = np.where(np.isnan(time_x)) if len(idx) > 0: print (time_x[idx]) print (time_x) time_x_offset = [pds.DateOffset(seconds = time) for time in time_x.astype(int)] new_index=[] for i, time in enumerate(time_x_offset): new_index.append(inst.data.index[i] + time) inst.data.index = new_index inst.data = inst.data.sort_index() return
Calculate IMF clock angle and magnitude of IMF in GSM Y-Z plane Parameters ----------- inst : pysat.Instrument Instrument with OMNI HRO data def calculate_clock_angle(inst): """ Calculate IMF clock angle and magnitude of IMF in GSM Y-Z plane Parameters ----------- inst : pysat.Instrument Instrument with OMNI HRO data """ # Calculate clock angle in degrees clock_angle = np.degrees(np.arctan2(inst['BY_GSM'], inst['BZ_GSM'])) clock_angle[clock_angle < 0.0] += 360.0 inst['clock_angle'] = pds.Series(clock_angle, index=inst.data.index) # Calculate magnitude of IMF in Y-Z plane inst['BYZ_GSM'] = pds.Series(np.sqrt(inst['BY_GSM']**2 + inst['BZ_GSM']**2), index=inst.data.index) return
Calculate IMF steadiness using clock angle standard deviation and the coefficient of variation of the IMF magnitude in the GSM Y-Z plane Parameters ----------- inst : pysat.Instrument Instrument with OMNI HRO data steady_window : int Window for calculating running statistical moments in min (default=15) min_window_frac : float Minimum fraction of points in a window for steadiness to be calculated (default=0.75) max_clock_angle_std : float Maximum standard deviation of the clock angle in degrees (default=22.5) max_bmag_cv : float Maximum coefficient of variation of the IMF magnitude in the GSM Y-Z plane (default=0.5) def calculate_imf_steadiness(inst, steady_window=15, min_window_frac=0.75, max_clock_angle_std=90.0/np.pi, max_bmag_cv=0.5): """ Calculate IMF steadiness using clock angle standard deviation and the coefficient of variation of the IMF magnitude in the GSM Y-Z plane Parameters ----------- inst : pysat.Instrument Instrument with OMNI HRO data steady_window : int Window for calculating running statistical moments in min (default=15) min_window_frac : float Minimum fraction of points in a window for steadiness to be calculated (default=0.75) max_clock_angle_std : float Maximum standard deviation of the clock angle in degrees (default=22.5) max_bmag_cv : float Maximum coefficient of variation of the IMF magnitude in the GSM Y-Z plane (default=0.5) """ # We are not going to interpolate through missing values sample_rate = int(inst.tag[0]) max_wnum = np.floor(steady_window / sample_rate) if max_wnum != steady_window / sample_rate: steady_window = max_wnum * sample_rate print("WARNING: sample rate is not a factor of the statistical window") print("new statistical window is {:.1f}".format(steady_window)) min_wnum = int(np.ceil(max_wnum * min_window_frac)) # Calculate the running coefficient of variation of the BYZ magnitude byz_mean = inst['BYZ_GSM'].rolling(min_periods=min_wnum, center=True, window=steady_window).mean() byz_std = inst['BYZ_GSM'].rolling(min_periods=min_wnum, center=True, window=steady_window).std() inst['BYZ_CV'] = pds.Series(byz_std / byz_mean, index=inst.data.index) # Calculate the running circular standard deviation of the clock angle circ_kwargs = {'high':360.0, 'low':0.0} ca = inst['clock_angle'][~np.isnan(inst['clock_angle'])] ca_std = inst['clock_angle'].rolling(min_periods=min_wnum, window=steady_window, \ center=True).apply(pysat.utils.nan_circstd, kwargs=circ_kwargs) inst['clock_angle_std'] = pds.Series(ca_std, index=inst.data.index) # Determine how long the clock angle and IMF magnitude are steady imf_steady = np.zeros(shape=inst.data.index.shape) steady = False for i,cv in enumerate(inst.data['BYZ_CV']): if steady: del_min = int((inst.data.index[i] - inst.data.index[i-1]).total_seconds() / 60.0) if np.isnan(cv) or np.isnan(ca_std[i]) or del_min > sample_rate: # Reset the steadiness flag if fill values are encountered, or # if an entry is missing steady = False if cv <= max_bmag_cv and ca_std[i] <= max_clock_angle_std: # Steadiness conditions have been met if steady: imf_steady[i] = imf_steady[i-1] imf_steady[i] += sample_rate steady = True inst['IMF_Steady'] = pds.Series(imf_steady, index=inst.data.index) return
Calculate the dayside reconnection rate (Milan et al. 2014) Parameters ----------- inst : pysat.Instrument Instrument with OMNI HRO data, requires BYZ_GSM and clock_angle Notes -------- recon_day = 3.8 Re (Vx / 4e5 m/s)^1/3 Vx B_yz (sin(theta/2))^9/2 def calculate_dayside_reconnection(inst): """ Calculate the dayside reconnection rate (Milan et al. 2014) Parameters ----------- inst : pysat.Instrument Instrument with OMNI HRO data, requires BYZ_GSM and clock_angle Notes -------- recon_day = 3.8 Re (Vx / 4e5 m/s)^1/3 Vx B_yz (sin(theta/2))^9/2 """ rearth = 6371008.8 sin_htheta = np.power(np.sin(np.radians(0.5 * inst['clock_angle'])), 4.5) byz = inst['BYZ_GSM'] * 1.0e-9 vx = inst['flow_speed'] * 1000.0 recon_day = 3.8 * rearth * vx * byz * sin_htheta * np.power((vx / 4.0e5), 1.0/3.0) inst['recon_day'] = pds.Series(recon_day, index=inst.data.index) return
clear all ace entries of the share :param white_list: list of username whose access entry won't be cleared :return: sid list of ace entries removed successfully def clear_access(self, white_list=None): """ clear all ace entries of the share :param white_list: list of username whose access entry won't be cleared :return: sid list of ace entries removed successfully """ access_entries = self.get_ace_list() sid_list = access_entries.sid_list if white_list: sid_white_list = [UnityAclUser.get_sid(self._cli, user, self.cifs_server.domain) for user in white_list] sid_list = list(set(sid_list) - set(sid_white_list)) resp = self.delete_ace(sid=sid_list) resp.raise_if_err() return sid_list
delete ACE for the share delete ACE for the share. User could either supply the domain and username or the sid of the user. :param domain: domain of the user :param user: username :param sid: sid of the user or sid list of the user :return: REST API response def delete_ace(self, domain=None, user=None, sid=None): """ delete ACE for the share delete ACE for the share. User could either supply the domain and username or the sid of the user. :param domain: domain of the user :param user: username :param sid: sid of the user or sid list of the user :return: REST API response """ if sid is None: if domain is None: domain = self.cifs_server.domain sid = UnityAclUser.get_sid(self._cli, user=user, domain=domain) if isinstance(sid, six.string_types): sid = [sid] ace_list = [self._make_remove_ace_entry(s) for s in sid] resp = self.action("setACEs", cifsShareACEs=ace_list) resp.raise_if_err() return resp
Aggregator for ioclass_luns and ioclass_snapshots. def luns(self): """Aggregator for ioclass_luns and ioclass_snapshots.""" lun_list, smp_list = [], [] if self.ioclass_luns: lun_list = map(lambda l: VNXLun(lun_id=l.lun_id, name=l.name, cli=self._cli), self.ioclass_luns) if self.ioclass_snapshots: smp_list = map(lambda smp: VNXLun(name=smp.name, cli=self._cli), self.ioclass_snapshots) return list(lun_list) + list(smp_list)
Returns policy which contains this ioclass. def policy(self): """Returns policy which contains this ioclass.""" policies = VNXIOPolicy.get(cli=self._cli) ret = None for policy in policies: contained = policy.ioclasses.name if self._get_name() in contained: ret = VNXIOPolicy.get(name=policy.name, cli=self._cli) break return ret
Overwrite the current properties for a VNX ioclass. :param new_name: new name for the ioclass :param iotype: can be 'rw', 'r' or 'w' :param lun_ids: list of LUN IDs :param smp_names: list of Snapshot Mount Point names :param ctrlmethod: the new CtrlMethod :param minsize: minimal size in kb :param maxsize: maximum size in kb def modify(self, new_name=None, iotype=None, lun_ids=None, smp_names=None, ctrlmethod=None, minsize=None, maxsize=None): """Overwrite the current properties for a VNX ioclass. :param new_name: new name for the ioclass :param iotype: can be 'rw', 'r' or 'w' :param lun_ids: list of LUN IDs :param smp_names: list of Snapshot Mount Point names :param ctrlmethod: the new CtrlMethod :param minsize: minimal size in kb :param maxsize: maximum size in kb """ if not any([new_name, iotype, lun_ids, smp_names, ctrlmethod]): raise ValueError('Cannot apply modification, please specify ' 'parameters to modify.') def _do_modify(): out = self._cli.modify_ioclass( self._get_name(), new_name, iotype, lun_ids, smp_names, ctrlmethod, minsize, maxsize) ex.raise_if_err(out, default=ex.VNXIOClassError) try: _do_modify() except ex.VNXIOCLassRunningError: with restart_policy(self.policy): _do_modify() return VNXIOClass(new_name if new_name else self._get_name(), self._cli)
A wrapper for modify method. .. note:: This API only append luns to existing luns. def add_lun(self, luns): """A wrapper for modify method. .. note:: This API only append luns to existing luns. """ curr_lun_ids, curr_smp_names = self._get_current_names() luns = normalize_lun(luns, self._cli) new_ids, new_smps = convert_lun(luns) if new_ids: curr_lun_ids.extend(new_ids) if new_smps: curr_smp_names.extend(new_smps) return self.modify(lun_ids=curr_lun_ids, smp_names=curr_smp_names)
Add one VNXIOClass instance to policy. .. note: due to the limitation of VNX, need to stop the policy first. def add_class(self, ioclass): """Add one VNXIOClass instance to policy. .. note: due to the limitation of VNX, need to stop the policy first. """ current_ioclasses = self.ioclasses if ioclass.name in current_ioclasses.name: return current_ioclasses.append(ioclass) self.modify(new_ioclasses=current_ioclasses)
Remove VNXIOClass instance from policy. def remove_class(self, ioclass): """Remove VNXIOClass instance from policy.""" current_ioclasses = self.ioclasses new_ioclasses = filter(lambda x: x.name != ioclass.name, current_ioclasses) self.modify(new_ioclasses=new_ioclasses)
Replaces the exiting LUNs to lun_list. def replace_lun(self, *lun_list): """Replaces the exiting LUNs to lun_list.""" lun_add = self._prepare_luns_add(lun_list) lun_remove = self._prepare_luns_remove(lun_list, False) return self.modify(lun_add=lun_add, lun_remove=lun_remove)
Updates the LUNs in CG, adding the ones in `add_luns` and removing the ones in `remove_luns` def update_lun(self, add_luns=None, remove_luns=None): """Updates the LUNs in CG, adding the ones in `add_luns` and removing the ones in `remove_luns`""" if not add_luns and not remove_luns: log.debug("Empty add_luns and remove_luns passed in, " "skip update_lun.") return RESP_OK lun_add = self._prepare_luns_add(add_luns) lun_remove = self._prepare_luns_remove(remove_luns, True) return self.modify(lun_add=lun_add, lun_remove=lun_remove)
Routine to return FPMU data cleaned to the specified level Parameters ----------- inst : (pysat.Instrument) Instrument class object, whose attribute clean_level is used to return the desired level of data selectivity. Returns -------- Void : (NoneType) data in inst is modified in-place. Notes -------- No cleaning currently available for FPMU def clean(inst): """Routine to return FPMU data cleaned to the specified level Parameters ----------- inst : (pysat.Instrument) Instrument class object, whose attribute clean_level is used to return the desired level of data selectivity. Returns -------- Void : (NoneType) data in inst is modified in-place. Notes -------- No cleaning currently available for FPMU """ inst.data.replace(-999., np.nan, inplace=True) # Te inst.data.replace(-9.9999998e+30, np.nan, inplace=True) #Ni return None
Attaches info returned by instrument list_files routine to Instrument object. def _attach_files(self, files_info): """Attaches info returned by instrument list_files routine to Instrument object. """ if not files_info.empty: if (len(files_info.index.unique()) != len(files_info)): estr = 'WARNING! Duplicate datetimes in provided file ' estr = '{:s}information.\nKeeping one of each '.format(estr) estr = '{:s}of the duplicates, dropping the rest.'.format(estr) print(estr) print(files_info.index.get_duplicates()) idx = np.unique(files_info.index, return_index=True) files_info = files_info.ix[idx[1]] #raise ValueError('List of files must have unique datetimes.') self.files = files_info.sort_index() date = files_info.index[0] self.start_date = pds.datetime(date.year, date.month, date.day) date = files_info.index[-1] self.stop_date = pds.datetime(date.year, date.month, date.day) else: self.start_date = None self.stop_date = None # convert to object type # necessary if Series is empty, enables == checks with strings self.files = files_info.astype(np.dtype('O'))
Store currently loaded filelist for instrument onto filesystem def _store(self): """Store currently loaded filelist for instrument onto filesystem""" name = self.stored_file_name # check if current file data is different than stored file list # if so, move file list to previous file list, store current to file # if not, do nothing stored_files = self._load() if len(stored_files) != len(self.files): # # of items is different, things are new new_flag = True elif len(stored_files) == len(self.files): # # of items equal, check specifically for equality if stored_files.eq(self.files).all(): new_flag = False else: # not equal, there are new files new_flag = True if new_flag: if self.write_to_disk: stored_files.to_csv(os.path.join(self.home_path, 'previous_'+name), date_format='%Y-%m-%d %H:%M:%S.%f') self.files.to_csv(os.path.join(self.home_path, name), date_format='%Y-%m-%d %H:%M:%S.%f') else: self._previous_file_list = stored_files self._current_file_list = self.files.copy() return
Load stored filelist and return as Pandas Series Parameters ---------- prev_version : boolean if True, will load previous version of file list Returns ------- pandas.Series Full path file names are indexed by datetime Series is empty if there is no file list to load def _load(self, prev_version=False): """Load stored filelist and return as Pandas Series Parameters ---------- prev_version : boolean if True, will load previous version of file list Returns ------- pandas.Series Full path file names are indexed by datetime Series is empty if there is no file list to load """ fname = self.stored_file_name if prev_version: fname = os.path.join(self.home_path, 'previous_'+fname) else: fname = os.path.join(self.home_path, fname) if os.path.isfile(fname) and (os.path.getsize(fname) > 0): if self.write_to_disk: return pds.read_csv(fname, index_col=0, parse_dates=True, squeeze=True, header=None) else: # grab files from memory if prev_version: return self._previous_file_list else: return self._current_file_list else: return pds.Series([], dtype='a')
Update list of files, if there are changes. Calls underlying list_rtn for the particular science instrument. Typically, these routines search in the pysat provided path, pysat_data_dir/platform/name/tag/, where pysat_data_dir is set by pysat.utils.set_data_dir(path=path). def refresh(self): """Update list of files, if there are changes. Calls underlying list_rtn for the particular science instrument. Typically, these routines search in the pysat provided path, pysat_data_dir/platform/name/tag/, where pysat_data_dir is set by pysat.utils.set_data_dir(path=path). """ output_str = '{platform} {name} {tag} {sat_id}' output_str = output_str.format(platform=self._sat.platform, name=self._sat.name, tag=self._sat.tag, sat_id=self._sat.sat_id) output_str = " ".join(("pysat is searching for", output_str, "files.")) output_str = " ".join(output_str.split()) print (output_str) info = self._sat._list_rtn(tag=self._sat.tag, sat_id=self._sat.sat_id, data_path=self.data_path, format_str=self.file_format) if not info.empty: print('Found {ll:d} of them.'.format(ll=len(info))) else: estr = "Unable to find any files that match the supplied template. " estr += "If you have the necessary files please check pysat " estr += "settings and file locations (e.g. pysat.pysat_dir)." print(estr) info = self._remove_data_dir_path(info) self._attach_files(info) self._store()
List new files since last recorded file state. pysat stores filenames in the user_home/.pysat directory. Returns a list of all new fileanmes since the last known change to files. Filenames are stored if there is a change and either update_files is True at instrument object level or files.refresh() is called. Returns ------- pandas.Series files are indexed by datetime def get_new(self): """List new files since last recorded file state. pysat stores filenames in the user_home/.pysat directory. Returns a list of all new fileanmes since the last known change to files. Filenames are stored if there is a change and either update_files is True at instrument object level or files.refresh() is called. Returns ------- pandas.Series files are indexed by datetime """ # refresh files self.refresh() # current files new_info = self._load() # previous set of files old_info = self._load(prev_version=True) new_files = new_info[-new_info.isin(old_info)] return new_files
Return index for a given filename. Parameters ---------- fname : string filename Note ---- If fname not found in the file information already attached to the instrument.files instance, then a files.refresh() call is made. def get_index(self, fname): """Return index for a given filename. Parameters ---------- fname : string filename Note ---- If fname not found in the file information already attached to the instrument.files instance, then a files.refresh() call is made. """ idx, = np.where(fname == self.files) if len(idx) == 0: # filename not in index, try reloading files from disk self.refresh() #print("DEBUG get_index:", fname, self.files) idx, = np.where(fname == np.array(self.files)) if len(idx) == 0: raise ValueError('Could not find "' + fname + '" in available file list. Valid Example: ' + self.files.iloc[0]) # return a scalar rather than array - otherwise introduces array to # index warnings. return idx[0]
Return a list of filenames between and including start and end. Parameters ---------- start: array_like or single string filenames for start of returned filelist stop: array_like or single string filenames inclusive end of list Returns ------- list of filenames between and including start and end over all intervals. def get_file_array(self, start, end): """Return a list of filenames between and including start and end. Parameters ---------- start: array_like or single string filenames for start of returned filelist stop: array_like or single string filenames inclusive end of list Returns ------- list of filenames between and including start and end over all intervals. """ if hasattr(start, '__iter__') & hasattr(end, '__iter__'): files = [] for (sta,stp) in zip(start, end): id1 = self.get_index(sta) id2 = self.get_index(stp) files.extend(self.files.iloc[id1 : id2+1]) elif hasattr(start, '__iter__') | hasattr(end, '__iter__'): estr = 'Either both or none of the inputs need to be iterable' raise ValueError(estr) else: id1 = self.get_index(start) id2 = self.get_index(end) files = self.files[id1:id2+1].to_list() return files
Remove the data directory path from filenames def _remove_data_dir_path(self, inp=None): # import string """Remove the data directory path from filenames""" # need to add a check in here to make sure data_dir path is actually in # the filename if inp is not None: split_str = os.path.join(self.data_path, '') return inp.apply(lambda x: x.split(split_str)[-1])