text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gaussfill(dem, size=3, newmask=None):
"""Gaussian filter with filling """ |
smooth = gauss_fltr_astropy(dem, size=size)
smooth[~dem.mask] = dem[~dem.mask]
if newmask is not None:
smooth = np.ma.array(smooth, mask=newmask)
return smooth |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def median_fltr(dem, fsize=7, origmask=False):
"""Scipy.ndimage median filter Does not properly handle NaN """ |
print("Applying median filter with size %s" % fsize)
from scipy.ndimage.filters import median_filter
dem_filt_med = median_filter(dem.filled(np.nan), fsize)
#Now mask all nans
out = np.ma.fix_invalid(dem_filt_med, copy=False, fill_value=dem.fill_value)
if origmask:
out = np.ma.array(out, mask=dem.mask, fill_value=dem.fill_value)
out.set_fill_value(dem.fill_value)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def median_fltr_opencv(dem, size=3, iterations=1):
"""OpenCV median filter """ |
import cv2
dem = malib.checkma(dem)
if size > 5:
print("Need to implement iteration")
n = 0
out = dem
while n <= iterations:
dem_cv = cv2.medianBlur(out.astype(np.float32).filled(np.nan), size)
out = np.ma.fix_invalid(dem_cv)
out.set_fill_value(dem.fill_value)
n += 1
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def circular_mask(size):
"""Create a circular mask for an array Useful when sampling rasters for a laser shot """ |
r = size/2
c = (r,r)
y,x = np.ogrid[-c[0]:size-c[0], -c[1]:size-c[1]]
mask = ~(x*x + y*y <= r*r)
return mask |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def median_fltr_skimage(dem, radius=3, erode=1, origmask=False):
""" Older skimage.filter.median_filter This smooths, removes noise and fills in nodata areas with median of valid pixels! Effectively an inpainting routine """ |
#Note, ndimage doesn't properly handle ma - convert to nan
dem = malib.checkma(dem)
dem = dem.astype(np.float64)
#Mask islands
if erode > 0:
print("Eroding islands smaller than %s pixels" % (erode * 2))
dem = malib.mask_islands(dem, iterations=erode)
print("Applying median filter with radius %s" % radius)
#Note: this funcitonality was present in scikit-image 0.9.3
import skimage.filter
dem_filt_med = skimage.filter.median_filter(dem, radius, mask=~dem.mask)
#Starting in version 0.10.0, this is the new filter
#This is the new filter, but only supports uint8 or unit16
#import skimage.filters
#import skimage.morphology
#dem_filt_med = skimage.filters.rank.median(dem, disk(radius), mask=~dem.mask)
#dem_filt_med = skimage.filters.median(dem, skimage.morphology.disk(radius), mask=~dem.mask)
#Now mask all nans
#skimage assigns the minimum value as nodata
#CHECK THIS, seems pretty hacky
#Also, looks like some valid values are masked at this stage, even though they should be above min
ndv = np.min(dem_filt_med)
#ndv = dem_filt_med.min() + 0.001
out = np.ma.masked_less_equal(dem_filt_med, ndv)
#Should probably replace the ndv with original ndv
out.set_fill_value(dem.fill_value)
if origmask:
print("Applying original mask")
#Allow filling of interior holes, but use original outer edge
#maskfill = malib.maskfill(dem, iterations=radius)
maskfill = malib.maskfill(dem)
#dem_filt_gauss = np.ma.array(dem_filt_gauss, mask=dem.mask, fill_value=dem.fill_value)
out = np.ma.array(out, mask=maskfill, fill_value=dem.fill_value)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dz_fltr(dem_fn, refdem_fn, perc=None, rangelim=(0,30), smooth=False):
"""Absolute elevation difference range filter using values from a source raster file and a reference raster file """ |
try:
open(refdem_fn)
except IOError:
sys.exit('Unable to open reference DEM: %s' % refdem_fn)
from pygeotools.lib import warplib
dem_ds, refdem_ds = warplib.memwarp_multi_fn([dem_fn, refdem_fn], res='first', extent='first', t_srs='first')
dem = iolib.ds_getma(dem_ds)
refdem = iolib.ds_getma(refdem_ds)
out = dz_fltr_ma(dem, refdem, perc, rangelim, smooth)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dz_fltr_ma(dem, refdem, perc=None, rangelim=(0,30), smooth=False):
"""Absolute elevation difference range filter using values from a source array and a reference array """ |
if smooth:
refdem = gauss_fltr_astropy(refdem)
dem = gauss_fltr_astropy(dem)
dz = refdem - dem
#This is True for invalid values in DEM, and should be masked
demmask = np.ma.getmaskarray(dem)
if perc:
dz_perc = malib.calcperc(dz, perc)
print("Applying dz percentile filter (%s%%, %s%%): (%0.1f, %0.1f)" % (perc[0], perc[1], dz_perc[0], dz_perc[1]))
#This is True for invalid values
perc_mask = ((dz < dz_perc[0]) | (dz > dz_perc[1])).filled(False)
demmask = (demmask | perc_mask)
if rangelim:
#This is True for invalid values
range_mask = ((np.abs(dz) < rangelim[0]) | (np.abs(dz) > rangelim[1])).filled(False)
if False:
cutoff = 150
rangelim = (0, 80)
low = (refdem < cutoff).data
range_mask[low] = ((np.abs(dz) < rangelim[0]) | (np.abs(dz) > rangelim[1])).filled(False)[low]
demmask = (demmask | range_mask)
out = np.ma.array(dem, mask=demmask, fill_value=dem.fill_value)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def erode_edge(dem, iterations=1):
"""Erode pixels near nodata """ |
import scipy.ndimage as ndimage
print('Eroding pixels near nodata: %i iterations' % iterations)
mask = np.ma.getmaskarray(dem)
mask_dilate = ndimage.morphology.binary_dilation(mask, iterations=iterations)
out = np.ma.array(dem, mask=mask_dilate)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def butter(dt_list, val, lowpass=1.0):
"""This is framework for a butterworth bandpass for 1D data Needs to be cleaned up and generalized """ |
import scipy.signal
import matplotlib.pyplot as plt
#dt is 300 s, 5 min
dt_diff = np.diff(dt_list)
dt_diff = np.array([dt.total_seconds() for dt in dt_diff])
dt = malib.fast_median(dt_diff)
#f is 0.00333 Hz
#288 samples/day
fs = 1./dt
nyq = fs/2.
if False:
#psd, f = psd(z_msl, fs)
sp_f, sp_psd = scipy.signal.periodogram(val, fs, detrend='linear')
#sp_f, sp_psd = scipy.signal.welch(z_msl, fs, nperseg=2048)
sp_f_days = 1./sp_f/86400.
plt.figure()
plt.plot(sp_f, sp_psd)
plt.plot(sp_f_days, sp_psd)
plt.semilogy(sp_f_days, sp_psd)
plt.xlabel('Frequency')
plt.ylabel('Power')
print("Filtering tidal signal")
#Define bandpass filter
#f_min = dt/(86400*0.25)
f_max = (1./(86400*0.1)) / nyq
f_min = (1./(86400*1.8)) / nyq
order = 6
b, a = scipy.signal.butter(order, f_min, btype='highpass')
#b, a = sp.signal.butter(order, (f_min, f_max), btype='bandpass')
w, h = scipy.signal.freqz(b, a, worN=2000)
w_f = (nyq/np.pi)*w
w_f_days = 1/w_f/86400.
#plt.figure()
#plt.plot(w_f_days, np.abs(h))
val_f_tide = scipy.signal.filtfilt(b, a, val)
b, a = scipy.signal.butter(order, f_max, btype='lowpass')
#b, a = sp.signal.butter(order, (f_min, f_max), btype='bandstop')
w, h = scipy.signal.freqz(b, a, worN=2000)
w_f = (nyq/np.pi)*w
w_f_days = 1/w_f/86400.
#plt.plot(w_f_days, np.abs(h))
val_f_tide_denoise = scipy.signal.filtfilt(b, a, val_f_tide)
#val_f_notide = sp.signal.filtfilt(b, a, val)
val_f_notide = val - val_f_tide |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def freq_filt(bma):
""" This is a framework for 2D FFT filtering. It has not be tested or finished - might be a dead end See separate utility freq_analysis.py """ |
"""
Want to fit linear function to artifact line in freq space,
Then mask everything near that line at distances of ~5-200 pixels,
Or whatever the maximum CCD artifact dimension happens to be,
This will depend on scaling - consult CCD map for interval
"""
#Fill ndv with random data
bf = malib.randomfill(bma)
import scipy.fftpack
f = scipy.fftpack.fft2(bf)
ff = scipy.fftpack.fftshift(f)
#Ben suggested a Hahn filter here, remove the low frequency, high amplitude information
#Then do a second fft?
#np.log(np.abs(ff))
#perc = malib.calcperc(np.real(ff), perc=(80, 95))
#malib.iv(numpy.real(ff), clim=perc)
#See http://scipy-lectures.github.io/advanced/image_processing/
#Starting at a,b, compute argmax along vertical axis for restricted range
#Fit line to the x and y argmax values
#Mask [argmax[y]-1:argmax[y]+1]
#Create radial mask
ff_dim = np.array(ff.shape)
a,b = ff_dim/2
n = ff_dim.max()
y,x = np.ogrid[-a:n-a, -b:n-b]
r1 = 40
r2 = 60
ff_mask = np.ma.make_mask(ff)
radial_mask = (r1**2 <= x**2 + y**2) & (x**2 + y**2 < r2**2)
#Note issues with rounding indices here
#Hacked in +1 for testing
ff_mask[:] = radial_mask[a-ff_dim[0]/2:a+ff_dim[0], b-ff_dim[1]/2:b+1+ff_dim[1]/2]
#Combine radial and line mask
#Convert mask to 0-1, then feather
fm = ff * ff_mask
#Inverse fft
bf_filt = scipy.fftpack.ifft2(scipy.fftpack.ifftshift(fm))
#Apply original mask
bf_filt = np.ma.masked_array(bf_filt, bma.mask) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stack_smooth(s_orig, size=7, save=False):
"""Run Gaussian smoothing filter on exising stack object """ |
from copy import deepcopy
from pygeotools.lib import filtlib
print("Copying original DEMStack")
s = deepcopy(s_orig)
s.stack_fn = os.path.splitext(s_orig.stack_fn)[0]+'_smooth%ipx.npz' % size
#Loop through each array and smooth
print("Smoothing all arrays in stack with %i px gaussian filter" % size)
for i in range(s.ma_stack.shape[0]):
print('%i of %i' % (i+1, s.ma_stack.shape[0]))
s.ma_stack[i] = filtlib.gauss_fltr_astropy(s.ma_stack[i], size=size)
if s.stats:
s.compute_stats()
if save:
s.write_stats()
#Update datestack
if s.datestack and s.date_list_o.count() > 1:
s.compute_dt_stats()
if save:
s.write_datestack()
#Update trend
if s.trend:
s.compute_trend()
if save:
s.write_trend()
if save:
s.savestack()
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stack_clip(s_orig, extent, out_stack_fn=None, copy=True, save=False):
"""Create a new stack object with limited extent from an exising stack object """ |
#Should check for valid extent
#This is not memory efficient, but is much simpler
#To be safe, if we are saving out, create a copy to avoid overwriting
if copy or save:
from copy import deepcopy
print("Copying original DEMStack")
s = deepcopy(s_orig)
else:
#Want to be very careful here, as we could overwrite the original file
s = s_orig
from pygeotools.lib import geolib
gt = s.gt
s_shape = s.ma_stack.shape[1:3]
#Compute pixel bounds for input extent
min_x_px, max_y_px = geolib.mapToPixel(extent[0], extent[1], gt)
max_x_px, min_y_px = geolib.mapToPixel(extent[2], extent[3], gt)
#Clip to stack extent and round to whole integers
min_x_px = int(max(0, min_x_px)+0.5)
max_x_px = int(min(s_shape[1], max_x_px)+0.5)
min_y_px = int(max(0, min_y_px)+0.5)
max_y_px = int(min(s_shape[0], max_y_px)+0.5)
#Clip the stack
x_slice = slice(min_x_px,max_x_px)
y_slice = slice(min_y_px,max_y_px)
s.ma_stack = s.ma_stack[:, y_slice, x_slice]
#Now update geospatial info
#This returns the pixel center in map coordinates
#Want to remove 0.5 px offset for upper left corner in gt
out_ul = geolib.pixelToMap(min_x_px - 0.5, min_y_px - 0.5, gt)
#Update stack geotransform
s.gt[0] = out_ul[0]
s.gt[3] = out_ul[1]
#Update new stack extent
s.get_extent()
#Check for and discard emtpy arrays
#Might be faster to reshape then np.ma.count(s.ma_stack, axis=1)
count_list = np.array([i.count() for i in s.ma_stack])
idx = count_list > 0
#Output subset with valid data in next extent
#fn_list, source, error, error_dict_list, date_list, date_list_o
#Note: no need to copy again
s_sub = get_stack_subset(s, idx, out_stack_fn=out_stack_fn, copy=False, save=False)
print("Orig filename:", s_orig.stack_fn)
print("Orig extent:", s_orig.extent)
print("Orig dimensions:", s_orig.ma_stack.shape)
print("Input extent:", extent)
print("New filename:", s_sub.stack_fn)
print("New extent:", s_sub.extent)
print("New dimensions:", s_sub.ma_stack.shape)
if save:
if os.path.abspath(s_orig.stack_fn) == os.path.abspath(s_sub.stack_fn):
print("Original stack would be overwritten!")
print("Skipping save")
else:
s_sub.save = True
s_sub.savestack()
#The following should be unchanged by clip - it is more efficient to clip thes, but easier to regenerate
#if s.stats:
#stack_count, stack_mean, stack_min, stack_max, stack_std
#s.stack_min = s.stack_min[y_slice, x_slice]
#if s.datestack:
#dt_ptp, dt_min, dt_max, dt_center
#if s.med:
#stack_med
#if s.trend:
#trend, intercept, detrended_std
#Recompute stats/etc
return s_sub |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_stack_subset(s_orig, idx, out_stack_fn=None, copy=True, save=False):
"""Create a new stack object as a subset of an exising stack object """ |
#This must be a numpy boolean array
idx = np.array(idx)
if np.any(idx):
#This is not memory efficient, but is much simpler
#To be safe, if we are saving out, create a copy to avoid overwriting
if copy or save:
from copy import deepcopy
print("Copying original DEMStack")
s = deepcopy(s_orig)
else:
#Want to be very careful here, as we could overwrite the original file
s = s_orig
#Update fn_list
#Note: need to change fn_list to np.array - object array, allows longer strings
#s.fn_list = s.fn_list[idx]
print("Original stack: %i" % len(s_orig.fn_list))
s.fn_list = (np.array(s.fn_list)[idx]).tolist()
print("Filtered stack: %i" % len(s.fn_list))
#Update date_lists
s.date_list = s.date_list[idx]
s.date_list_o = s.date_list_o[idx]
#Update ma
s.ma_stack = s.ma_stack[idx]
#Update source/error
#s.source = s.source[idx]
s.source = (np.array(s.source)[idx]).tolist()
s.error = s.error[idx]
s.error_dict_list = np.array(s.error_dict_list)[idx]
#Update stack_fn
#out_stack_fn should be full path, with npz
if out_stack_fn is None:
s.stack_fn = None
s.get_stack_fn()
else:
s.stack_fn = out_stack_fn
#Check to make sure we are not going to overwrite
if os.path.abspath(s_orig.stack_fn) == os.path.abspath(s.stack_fn):
print("Warning: new stack has identical filename: %s" % s.stack_fn)
print("As a precaution, new stack will not be saved")
save = False
s.save = save
#Update stats
if s.stats:
s.compute_stats()
if save:
s.write_stats()
#Update datestack
if s.datestack and s.date_list_o.count() > 1:
s.compute_dt_stats()
if save:
s.write_datestack()
#Update trend
if s.trend:
s.compute_trend()
if save:
s.write_trend()
if save:
s.savestack()
else:
print("No valid entries for input index array")
s = None
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stack_merge(s1, s2, out_stack_fn=None, sort=True, save=False):
"""Merge two stack objects """ |
from pygeotools.lib import geolib
from copy import deepcopy
#Assumes input stacks have identical extent, resolution, and projection
if s1.ma_stack.shape[1:3] != s2.ma_stack.shape[1:3]:
print(s1.ma_stack.shape)
print(s2.ma_stack.shape)
sys.exit('Input stacks must have identical array dimensions')
if not geolib.extent_compare(s1.extent, s2.extent):
print(s1.extent)
print(s2.extent)
sys.exit('Input stacks must have identical extent')
if not geolib.res_compare(s1.res, s2.res):
print(s1.res)
print(s2.res)
sys.exit('Input stacks must have identical res')
print("\nCombining fn_list and ma_stack")
fn_list = np.array(s1.fn_list + s2.fn_list)
if sort:
#Sort based on filenames (should be datesort)
sort_idx = np.argsort([os.path.split(x)[-1] for x in fn_list])
else:
sort_idx = Ellipsis
#Now pull out final, sorted order
fn_list = fn_list[sort_idx]
ma_stack = np.ma.vstack((s1.ma_stack, s2.ma_stack))[sort_idx]
#date_list = np.ma.dstack(s1.date_list, s2.date_list)
#date_list_o = np.ma.dstack(s1.date_list_o, s2.date_list_o)
source = np.array(s1.source + s2.source)[sort_idx]
error = np.ma.concatenate([s1.error, s2.error])[sort_idx]
#These are object arrays
error_dict_list = np.concatenate([s1.error_dict_list, s2.error_dict_list])[sort_idx]
print("Creating copy for new stack")
s = deepcopy(s1)
s.fn_list = list(fn_list)
s.ma_stack = ma_stack
s.source = list(source)
s.error = error
s.error_dict_list = error_dict_list
#This will use original stack outdir
if not out_stack_fn:
s.get_stack_fn()
else:
s.stack_fn = out_stack_fn
s.get_date_list()
#These will preserve trend from one stack if present in only one stack
#Useful when combining surface topo and bed topo
if s1.datestack and s2.datestack:
s.compute_dt_stats()
if save and s1.datestack:
s.write_datestack()
if s1.stats and s2.stats:
s.compute_stats()
if save and s1.stats:
s.write_stats()
if s1.trend and s2.trend:
s.compute_trend()
if save and s1.trend:
s.write_trend()
if save:
s.savestack()
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def randomfill(a):
"""Fill masked areas with random noise This is needed for any fft-based operations """ |
a = checkma(a)
#For data that have already been normalized,
#This provides a proper normal distribution with mean=0 and std=1
#a = (a - a.mean()) / a.std()
#noise = a.mask * (np.random.randn(*a.shape))
noise = a.mask * np.random.normal(a.mean(), a.std(), a.shape)
#Add the noise
b = a.filled(0) + noise
return b |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nanfill(a, f_a, *args, **kwargs):
"""Fill masked areas with np.nan Wrapper for functions that can't handle ma (e.g. scipy.ndimage) This will force filters to ignore nan, but causes adjacent pixels to be set to nan as well: http://projects.scipy.org/scipy/ticket/1155 """ |
a = checkma(a)
ndv = a.fill_value
#Note: The following fails for arrays that are not float (np.nan is float)
b = f_a(a.filled(np.nan), *args, **kwargs)
#the fix_invalid fill_value parameter doesn't seem to work
out = np.ma.fix_invalid(b, copy=False)
out.set_fill_value(ndv)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fast_median(a):
"""Fast median operation for masked array using 50th-percentile """ |
a = checkma(a)
#return scoreatpercentile(a.compressed(), 50)
if a.count() > 0:
out = np.percentile(a.compressed(), 50)
else:
out = np.ma.masked
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mad(a, axis=None, c=1.4826, return_med=False):
"""Compute normalized median absolute difference Can also return median array, as this can be expensive, and often we want both med and nmad Note: 1.4826 = 1/0.6745 """ |
a = checkma(a)
#return np.ma.median(np.fabs(a - np.ma.median(a))) / c
if a.count() > 0:
if axis is None:
med = fast_median(a)
out = fast_median(np.fabs(a - med)) * c
else:
med = np.ma.median(a, axis=axis)
#This is necessary for broadcasting
med = np.expand_dims(med, axis=axis)
out = np.ma.median(np.ma.fabs(a - med), axis=axis) * c
else:
out = np.ma.masked
if return_med:
out = (out, med)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calcperc(b, perc=(0.1,99.9)):
"""Calculate values at specified percentiles """ |
b = checkma(b)
if b.count() > 0:
#low = scoreatpercentile(b.compressed(), perc[0])
#high = scoreatpercentile(b.compressed(), perc[1])
low = np.percentile(b.compressed(), perc[0])
high = np.percentile(b.compressed(), perc[1])
else:
low = 0
high = 0
#low = scipy.stats.mstats.scoreatpercentile(b, perc[0])
#high = scipy.stats.mstats.scoreatpercentile(b, perc[1])
#This approach can be used for unmasked array, but values less than 0 are problematic
#bma_low = b.min()
#bma_high = b.max()
#low = scipy.stats.scoreatpercentile(b.data.flatten(), perc[0], (bma_low, bma_high))
#high = scipy.stats.scoreatpercentile(b.data.flatten(), perc[1], (bma_low, bma_high))
return low, high |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calcperc_sym(b, perc=(0.1,99.9)):
""" Get symmetrical percentile values Useful for determining clim centered on 0 for difference maps """ |
clim = np.max(np.abs(calcperc(b, perc)))
#clim = (-clim, clim)
return -clim, clim |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iqr(b, perc=(25, 75)):
"""Inter-quartile range """ |
b = checkma(b)
low, high = calcperc(b, perc)
return low, high, high - low |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iv(b, **kwargs):
"""Quick access to imview for interactive sessions """ |
import matplotlib.pyplot as plt
import imview.imviewer as imview
b = checkma(b)
#if hasattr(kwargs,'imshow_kwargs'):
# kwargs['imshow_kwargs']['interpolation'] = 'bicubic'
#else:
# kwargs['imshow_kwargs'] = {'interpolation': 'bicubic'}
#bma_fig(fig, bma, cmap='gist_rainbow_r', clim=None, bg=None, n_subplt=1, subplt=1, label=None, **imshow_kwargs)
fig = plt.figure()
imview.bma_fig(fig, b, **kwargs)
plt.show()
return fig |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def norm_shape(shape):
'''
Normalize numpy array shapes so they're always expressed as a tuple,
even for one-dimensional shapes.
Parameters
shape - an int, or a tuple of ints
Returns
a shape tuple
'''
try:
i = int(shape)
return (i,)
except TypeError:
# shape was not a number
pass
try:
t = tuple(shape)
return t
except TypeError:
# shape was not iterable
pass
raise TypeError('shape must be an int, or a tuple of ints') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def localortho(lon, lat):
"""Create srs for local orthographic projection centered at lat, lon """ |
local_srs = osr.SpatialReference()
local_proj = '+proj=ortho +lat_0=%0.7f +lon_0=%0.7f +datum=WGS84 +units=m +no_defs ' % (lat, lon)
local_srs.ImportFromProj4(local_proj)
return local_srs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def geom2localortho(geom):
"""Convert existing geom to local orthographic projection Useful for local cartesian distance/area calculations """ |
cx, cy = geom.Centroid().GetPoint_2D()
lon, lat, z = cT_helper(cx, cy, 0, geom.GetSpatialReference(), wgs_srs)
local_srs = localortho(lon,lat)
local_geom = geom_dup(geom)
geom_transform(local_geom, local_srs)
return local_geom |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dd2dms(dd):
"""Convert decimal degrees to degrees, minutes, seconds """ |
n = dd < 0
dd = abs(dd)
m,s = divmod(dd*3600,60)
d,m = divmod(m,60)
if n:
d = -d
return d,m,s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dms2dd(d,m,s):
"""Convert degrees, minutes, seconds to decimal degrees """ |
if d < 0:
sign = -1
else:
sign = 1
dd = sign * (int(abs(d)) + float(m) / 60 + float(s) / 3600)
return dd |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dd2dm(dd):
"""Convert decimal to degrees, decimal minutes """ |
d,m,s = dd2dms(dd)
m = m + float(s)/3600
return d,m,s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mapToPixel(mX, mY, geoTransform):
"""Convert map coordinates to pixel coordinates based on geotransform Accepts float or NumPy arrays GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform) """ |
mX = np.asarray(mX)
mY = np.asarray(mY)
if geoTransform[2] + geoTransform[4] == 0:
pX = ((mX - geoTransform[0]) / geoTransform[1]) - 0.5
pY = ((mY - geoTransform[3]) / geoTransform[5]) - 0.5
else:
pX, pY = applyGeoTransform(mX, mY, invertGeoTransform(geoTransform))
#return int(pX), int(pY)
return pX, pY |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pixelToMap(pX, pY, geoTransform):
"""Convert pixel coordinates to map coordinates based on geotransform Accepts float or NumPy arrays GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform) """ |
pX = np.asarray(pX, dtype=float)
pY = np.asarray(pY, dtype=float)
pX += 0.5
pY += 0.5
mX, mY = applyGeoTransform(pX, pY, geoTransform)
return mX, mY |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mem_ds(res, extent, srs=None, dtype=gdal.GDT_Float32):
"""Create a new GDAL Dataset in memory Useful for various applications that require a Dataset """ |
#These round down to int
#dst_ns = int((extent[2] - extent[0])/res)
#dst_nl = int((extent[3] - extent[1])/res)
#This should pad by 1 pixel, but not if extent and res were calculated together to give whole int
dst_ns = int((extent[2] - extent[0])/res + 0.99)
dst_nl = int((extent[3] - extent[1])/res + 0.99)
m_ds = gdal.GetDriverByName('MEM').Create('', dst_ns, dst_nl, 1, dtype)
m_gt = [extent[0], res, 0, extent[3], 0, -res]
m_ds.SetGeoTransform(m_gt)
if srs is not None:
m_ds.SetProjection(srs.ExportToWkt())
return m_ds |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copyproj(src_fn, dst_fn, gt=True):
"""Copy projection and geotransform from one raster file to another """ |
src_ds = gdal.Open(src_fn, gdal.GA_ReadOnly)
dst_ds = gdal.Open(dst_fn, gdal.GA_Update)
dst_ds.SetProjection(src_ds.GetProjection())
if gt:
src_gt = np.array(src_ds.GetGeoTransform())
src_dim = np.array([src_ds.RasterXSize, src_ds.RasterYSize])
dst_dim = np.array([dst_ds.RasterXSize, dst_ds.RasterYSize])
#This preserves dst_fn resolution
if np.any(src_dim != dst_dim):
res_factor = src_dim/dst_dim.astype(float)
src_gt[[1, 5]] *= max(res_factor)
#src_gt[[1, 5]] *= min(res_factor)
#src_gt[[1, 5]] *= res_factor
dst_ds.SetGeoTransform(src_gt)
src_ds = None
dst_ds = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def geom_transform(geom, t_srs):
"""Transform a geometry in place """ |
s_srs = geom.GetSpatialReference()
if not s_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(s_srs, t_srs)
geom.Transform(ct)
geom.AssignSpatialReference(t_srs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shp_dict(shp_fn, fields=None, geom=True):
"""Get a dictionary for all features in a shapefile Optionally, specify fields """ |
from pygeotools.lib import timelib
ds = ogr.Open(shp_fn)
lyr = ds.GetLayer()
nfeat = lyr.GetFeatureCount()
print('%i input features\n' % nfeat)
if fields is None:
fields = shp_fieldnames(lyr)
d_list = []
for n,feat in enumerate(lyr):
d = {}
if geom:
geom = feat.GetGeometryRef()
d['geom'] = geom
for f_name in fields:
i = str(feat.GetField(f_name))
if 'date' in f_name:
# date_f = f_name
#If d is float, clear off decimal
i = i.rsplit('.')[0]
i = timelib.strptime_fuzzy(str(i))
d[f_name] = i
d_list.append(d)
#d_list_sort = sorted(d_list, key=lambda k: k[date_f])
return d_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lyr_proj(lyr, t_srs, preserve_fields=True):
"""Reproject an OGR layer """ |
#Need to check t_srs
s_srs = lyr.GetSpatialRef()
cT = osr.CoordinateTransformation(s_srs, t_srs)
#Do everything in memory
drv = ogr.GetDriverByName('Memory')
#Might want to save clipped, warped shp to disk?
# create the output layer
#drv = ogr.GetDriverByName('ESRI Shapefile')
#out_fn = '/tmp/temp.shp'
#if os.path.exists(out_fn):
# driver.DeleteDataSource(out_fn)
#out_ds = driver.CreateDataSource(out_fn)
out_ds = drv.CreateDataSource('out')
outlyr = out_ds.CreateLayer('out', srs=t_srs, geom_type=lyr.GetGeomType())
if preserve_fields:
# add fields
inLayerDefn = lyr.GetLayerDefn()
for i in range(0, inLayerDefn.GetFieldCount()):
fieldDefn = inLayerDefn.GetFieldDefn(i)
outlyr.CreateField(fieldDefn)
# get the output layer's feature definition
outLayerDefn = outlyr.GetLayerDefn()
# loop through the input features
inFeature = lyr.GetNextFeature()
while inFeature:
# get the input geometry
geom = inFeature.GetGeometryRef()
# reproject the geometry
geom.Transform(cT)
# create a new feature
outFeature = ogr.Feature(outLayerDefn)
# set the geometry and attribute
outFeature.SetGeometry(geom)
if preserve_fields:
for i in range(0, outLayerDefn.GetFieldCount()):
outFeature.SetField(outLayerDefn.GetFieldDefn(i).GetNameRef(), inFeature.GetField(i))
# add the feature to the shapefile
outlyr.CreateFeature(outFeature)
# destroy the features and get the next input feature
inFeature = lyr.GetNextFeature()
#NOTE: have to operate on ds here rather than lyr, otherwise segfault
return out_ds |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def raster_shpclip(r_fn, shp_fn, extent='raster', bbox=False, pad=None, invert=False, verbose=False):
"""Clip an input raster by input polygon shapefile for given extent """ |
from pygeotools.lib import iolib
from pygeotools.lib import warplib
r_ds = iolib.fn_getds(r_fn)
r_srs = get_ds_srs(r_ds)
r_extent = ds_extent(r_ds)
r_extent_geom = bbox2geom(r_extent)
#NOTE: want to add spatial filter here to avoid reprojeting global RGI polygons, for example
shp_ds = ogr.Open(shp_fn)
lyr = shp_ds.GetLayer()
shp_srs = lyr.GetSpatialRef()
if not r_srs.IsSame(shp_srs):
shp_ds = lyr_proj(lyr, r_srs)
lyr = shp_ds.GetLayer()
#This returns xmin, ymin, xmax, ymax
shp_extent = lyr_extent(lyr)
shp_extent_geom = bbox2geom(shp_extent)
#Define the output - can set to either raster or shp
#Could accept as cl arg
out_srs = r_srs
if extent == 'raster':
out_extent = r_extent
elif extent == 'shp':
out_extent = shp_extent
elif extent == 'intersection':
out_extent = geom_intersection([r_extent_geom, shp_extent_geom])
elif extent == 'union':
out_extent = geom_union([r_extent_geom, shp_extent_geom])
else:
print("Unexpected extent specification, reverting to input raster extent")
out_extent = 'raster'
#Add padding around shp_extent
#Should implement buffer here
if pad is not None:
out_extent = pad_extent(out_extent, width=pad)
print("Raster to clip: %s\nShapefile used to clip: %s" % (r_fn, shp_fn))
if verbose:
print(shp_extent)
print(r_extent)
print(out_extent)
r_ds = warplib.memwarp(r_ds, extent=out_extent, t_srs=out_srs, r='cubic')
r = iolib.ds_getma(r_ds)
#If bbox, return without clipping, otherwise, clip to polygons
if not bbox:
#Create binary mask from shp
mask = shp2array(shp_fn, r_ds)
if invert:
mask = ~(mask)
#Now apply the mask
r = np.ma.array(r, mask=mask)
#Return both the array and the dataset, needed for writing out
#Should probably just write r to r_ds and return r_ds
return r, r_ds |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def geom2shp(geom, out_fn, fields=False):
"""Write out a new shapefile for input geometry """ |
from pygeotools.lib import timelib
driverName = "ESRI Shapefile"
drv = ogr.GetDriverByName(driverName)
if os.path.exists(out_fn):
drv.DeleteDataSource(out_fn)
out_ds = drv.CreateDataSource(out_fn)
out_lyrname = os.path.splitext(os.path.split(out_fn)[1])[0]
geom_srs = geom.GetSpatialReference()
geom_type = geom.GetGeometryType()
out_lyr = out_ds.CreateLayer(out_lyrname, geom_srs, geom_type)
if fields:
field_defn = ogr.FieldDefn("name", ogr.OFTString)
field_defn.SetWidth(128)
out_lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn("path", ogr.OFTString)
field_defn.SetWidth(254)
out_lyr.CreateField(field_defn)
#field_defn = ogr.FieldDefn("date", ogr.OFTString)
#This allows sorting by date
field_defn = ogr.FieldDefn("date", ogr.OFTInteger)
field_defn.SetWidth(32)
out_lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn("decyear", ogr.OFTReal)
field_defn.SetPrecision(8)
field_defn.SetWidth(64)
out_lyr.CreateField(field_defn)
out_feat = ogr.Feature(out_lyr.GetLayerDefn())
out_feat.SetGeometry(geom)
if fields:
#Hack to force output extesion to tif, since out_fn is shp
out_path = os.path.splitext(out_fn)[0] + '.tif'
out_feat.SetField("name", os.path.split(out_path)[-1])
out_feat.SetField("path", out_path)
#Try to extract a date from input raster fn
out_feat_date = timelib.fn_getdatetime(out_fn)
if out_feat_date is not None:
datestamp = int(out_feat_date.strftime('%Y%m%d'))
#out_feat_date = int(out_feat_date.strftime('%Y%m%d%H%M'))
out_feat.SetField("date", datestamp)
decyear = timelib.dt2decyear(out_feat_date)
out_feat.SetField("decyear", decyear)
out_lyr.CreateFeature(out_feat)
out_ds = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_outline(ds, t_srs=None, scale=1.0, simplify=False, convex=False):
"""Generate outline of unmasked values in input raster get_outline is an attempt to reproduce the PostGIS Raster ST_MinConvexHull function Could potentially do the following: Extract random pts from unmasked elements, get indices, Run scipy convex hull, Convert hull indices to mapped coords See this: http://stackoverflow.com/questions/3654289/scipy-create-2d-polygon-mask This generates a wkt polygon outline of valid data for the input raster Want to limit the dimensions of a, as notmasked_edges is slow: a = iolib.ds_getma_sub(ds, scale=scale) """ |
gt = np.array(ds.GetGeoTransform())
from pygeotools.lib import iolib
a = iolib.ds_getma_sub(ds, scale=scale)
#Create empty geometry
geom = ogr.Geometry(ogr.wkbPolygon)
#Check to make sure we have unmasked data
if a.count() != 0:
#Scale the gt for reduced resolution
#The UL coords should remain the same, as any rounding will trim LR
if (scale != 1.0):
gt[1] *= scale
gt[5] *= scale
#Get srs
ds_srs = get_ds_srs(ds)
if t_srs is None:
t_srs = ds_srs
#Find the unmasked edges
#Note: using only axis=0 from notmasked_edges will miss undercuts - see malib.get_edgemask
#Better ways to do this - binary mask, sum (see numpy2stl)
#edges0, edges1, edges = malib.get_edges(a)
px = np.ma.notmasked_edges(a, axis=0)
# coord = []
#Combine edge arrays, reversing order and adding first point to complete polygon
x = np.concatenate((px[0][1][::1], px[1][1][::-1], [px[0][1][0]]))
#x = np.concatenate((edges[0][1][::1], edges[1][1][::-1], [edges[0][1][0]]))
y = np.concatenate((px[0][0][::1], px[1][0][::-1], [px[0][0][0]]))
#y = np.concatenate((edges[0][0][::1], edges[1][0][::-1], [edges[0][0][0]]))
#Use np arrays for computing mapped coords
mx, my = pixelToMap(x, y, gt)
#Create wkt string
geom_wkt = 'POLYGON(({0}))'.format(', '.join(['{0} {1}'.format(*a) for a in zip(mx,my)]))
geom = ogr.CreateGeometryFromWkt(geom_wkt)
if not ds_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(ds_srs, t_srs)
geom.Transform(ct)
#Make sure geometry has correct srs assigned
geom.AssignSpatialReference(t_srs)
if not geom.IsValid():
tol = gt[1] * 0.1
geom = geom.Simplify(tol)
#Need to get output units and extent for tolerance specification
if simplify:
#2 pixel tolerance
tol = gt[1] * 2
geom = geom.Simplify(tol)
if convex:
geom = geom.ConvexHull()
else:
print("No unmasked values found")
return geom |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ds_cT(ds, x, y, xy_srs=wgs_srs):
"""Convert input point coordinates to map coordinates that match input dataset """ |
#Convert lat/lon to projected srs
ds_srs = get_ds_srs(ds)
#If xy_srs is undefined, assume it is the same as ds_srs
mX = x
mY = y
if xy_srs is not None:
if not ds_srs.IsSame(xy_srs):
mX, mY, mZ = cT_helper(x, y, 0, xy_srs, ds_srs)
return mX, mY |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def line2pts(geom, dl=None):
"""Given an input line geom, generate points at fixed interval Useful for extracting profile data from raster """ |
#Extract list of (x,y) tuples at nodes
nodes = geom.GetPoints()
#print "%i nodes" % len(nodes)
#Point spacing in map units
if dl is None:
nsteps=1000
dl = geom.Length()/nsteps
#This only works for equidistant projection!
#l = np.arange(0, geom.Length(), dl)
#Initialize empty lists
l = []
mX = []
mY = []
#Add first point to output lists
l += [0]
x = nodes[0][0]
y = nodes[0][1]
mX += [x]
mY += [y]
#Remainder
rem_l = 0
#Previous length (initially 0)
last_l = l[-1]
#Loop through each line segment in the feature
for i in range(0,len(nodes)-1):
x1, y1 = nodes[i]
x2, y2 = nodes[i+1]
#Total length of segment
tl = np.sqrt((x2-x1)**2 + (y2-y1)**2)
#Number of dl steps we can fit in this segment
#This returns floor
steps = int((tl+rem_l)/dl)
if steps > 0:
dx = ((x2-x1)/tl)*dl
dy = ((y2-y1)/tl)*dl
rem_x = rem_l*(dx/dl)
rem_y = rem_l*(dy/dl)
#Loop through each step and append to lists
for n in range(1, steps+1):
l += [last_l + (dl*n)]
#Remove the existing remainder
x = x1 + (dx*n) - rem_x
y = y1 + (dy*n) - rem_y
mX += [x]
mY += [y]
#Note: could just build up arrays of pX, pY for entire line, then do single z extraction
#Update the remainder
rem_l += tl - (steps * dl)
last_l = l[-1]
else:
rem_l += tl
return l, mX, mY |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_res_stats(ds_list, t_srs=None):
"""Return resolution stats for an input dataset list """ |
if t_srs is None:
t_srs = get_ds_srs(ds_list[0])
res = np.array([get_res(ds, t_srs=t_srs) for ds in ds_list])
#Check that all projections are identical
#gt_array = np.array([ds.GetGeoTransform() for ds in args])
#xres = gt_array[:,1]
#yres = -gt_array[:,5]
#if xres == yres:
#res = np.concatenate((xres, yres))
min = np.min(res)
max = np.max(res)
mean = np.mean(res)
med = np.median(res)
return (min, max, mean, med) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_res(ds, t_srs=None, square=False):
"""Get GDAL Dataset raster resolution """ |
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
#This is Xres, Yres
res = [gt[1], np.abs(gt[5])]
if square:
res = [np.mean(res), np.mean(res)]
if t_srs is not None and not ds_srs.IsSame(t_srs):
if True:
#This diagonal approach is similar to the approach in gdaltransformer.cpp
#Bad news for large extents near the poles
#ullr = get_ullr(ds, t_srs)
#diag = np.sqrt((ullr[0]-ullr[2])**2 + (ullr[1]-ullr[3])**2)
extent = ds_extent(ds, t_srs)
diag = np.sqrt((extent[2]-extent[0])**2 + (extent[3]-extent[1])**2)
res = diag / np.sqrt(ds.RasterXSize**2 + ds.RasterYSize**2)
res = [res, res]
else:
#Compute from center pixel
ct = osr.CoordinateTransformation(ds_srs, t_srs)
pt = get_center(ds)
#Transform center coordinates
pt_ct = ct.TransformPoint(*pt)
#Transform center + single pixel offset coordinates
pt_ct_plus = ct.TransformPoint(pt[0] + gt[1], pt[1] + gt[5])
#Compute resolution in new units
res = [pt_ct_plus[0] - pt_ct[0], np.abs(pt_ct_plus[1] - pt_ct[1])]
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_center(ds, t_srs=None):
"""Get center coordinates of GDAL Dataset """ |
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
#Note: this is center of center pixel, not ul corner of center pixel
center = [gt[0] + (gt[1] * ds.RasterXSize/2.0), gt[3] + (gt[5] * ds.RasterYSize/2.0)]
#include t_srs.Validate() and t_srs.Fixup()
if t_srs is not None and not ds_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(ds_srs, t_srs)
center = list(ct.TransformPoint(*center)[0:2])
return center |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ds_srs(ds):
"""Get srs object for GDAL Datset """ |
ds_srs = osr.SpatialReference()
ds_srs.ImportFromWkt(ds.GetProjectionRef())
return ds_srs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def srs_check(ds):
"""Check validitiy of Dataset srs Return True if srs is properly defined """ |
# ds_srs = get_ds_srs(ds)
gt = np.array(ds.GetGeoTransform())
gt_check = ~np.all(gt == np.array((0.0, 1.0, 0.0, 0.0, 0.0, 1.0)))
proj_check = (ds.GetProjection() != '')
#proj_check = ds_srs.IsProjected()
out = False
if gt_check and proj_check:
out = True
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ds_IsEmpty(ds):
"""Check to see if dataset is empty after warp """ |
out = False
b = ds.GetRasterBand(1)
#Looks like this throws:
#ERROR 1: Failed to compute min/max, no valid pixels found in sampling.
#Should just catch this rater than bothering with logic below
try:
mm = b.ComputeRasterMinMax()
if (mm[0] == mm[1]):
ndv = b.GetNoDataValue()
if ndv is None:
out = True
else:
if (mm[0] == ndv):
out = True
except Exception:
out = True
#Check for std of nan
#import math
#stats = b.ComputeStatistics(1)
#for x in stats:
# if math.isnan(x):
# out = True
# break
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gt_corners(gt, nx, ny):
"""Get corner coordinates based on input geotransform and raster dimensions """ |
ul = [gt[0], gt[3]]
ll = [gt[0], gt[3] + (gt[5] * ny)]
ur = [gt[0] + (gt[1] * nx), gt[3]]
lr = [gt[0] + (gt[1] * nx), gt[3] + (gt[5] * ny)]
return ul, ll, ur, lr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ds_geom(ds, t_srs=None):
"""Return dataset bbox envelope as geom """ |
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
if t_srs is None:
t_srs = ds_srs
ns = ds.RasterXSize
nl = ds.RasterYSize
x = np.array([0, ns, ns, 0, 0], dtype=float)
y = np.array([0, 0, nl, nl, 0], dtype=float)
#Note: pixelToMap adds 0.5 to input coords, need to account for this here
x -= 0.5
y -= 0.5
mx, my = pixelToMap(x, y, gt)
geom_wkt = 'POLYGON(({0}))'.format(', '.join(['{0} {1}'.format(*a) for a in zip(mx,my)]))
geom = ogr.CreateGeometryFromWkt(geom_wkt)
geom.AssignSpatialReference(ds_srs)
if not ds_srs.IsSame(t_srs):
geom_transform(geom, t_srs)
return geom |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def geom_wh(geom):
"""Compute width and height of geometry in projected units """ |
e = geom.GetEnvelope()
h = e[1] - e[0]
w = e[3] - e[2]
return w, h |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gdaldem_mem_ma(ma, ds=None, res=None, extent=None, srs=None, processing='hillshade', returnma=False, computeEdges=False):
""" Wrapper to allow gdaldem calculations for arbitrary NumPy masked array input Untested, work in progress placeholder Should only need to specify res, can caluclate local gt, cartesian srs """ |
if ds is None:
ds = mem_ds(res, extent, srs=None, dtype=gdal.GDT_Float32)
else:
ds = mem_ds_copy(ds)
b = ds.GetRasterBand(1)
b.WriteArray(ma)
out = gdaldem_mem_ds(ds, processing=processing, returnma=returnma)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_xy_ma(bma, gt, stride=1, origmask=True, newmask=None):
"""Return arrays of x and y map coordinates for input array and geotransform """ |
pX = np.arange(0, bma.shape[1], stride)
pY = np.arange(0, bma.shape[0], stride)
psamp = np.meshgrid(pX, pY)
#if origmask:
# psamp = np.ma.array(psamp, mask=np.ma.getmaskarray(bma), fill_value=0)
mX, mY = pixelToMap(psamp[0], psamp[1], gt)
mask = None
if origmask:
mask = np.ma.getmaskarray(bma)[::stride]
if newmask is not None:
mask = newmask[::stride]
mX = np.ma.array(mX, mask=mask, fill_value=0)
mY = np.ma.array(mY, mask=mask, fill_value=0)
return mX, mY |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_xy_1D(ds, stride=1, getval=False):
"""Return 1D arrays of x and y map coordinates for input GDAL Dataset """ |
gt = ds.GetGeoTransform()
#stride = stride_m/gt[1]
pX = np.arange(0, ds.RasterXSize, stride)
pY = np.arange(0, ds.RasterYSize, stride)
mX, dummy = pixelToMap(pX, pY[0], gt)
dummy, mY = pixelToMap(pX[0], pY, gt)
return mX, mY |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_xy_grids(ds, stride=1, getval=False):
"""Return 2D arrays of x and y map coordinates for input GDAL Dataset """ |
gt = ds.GetGeoTransform()
#stride = stride_m/gt[1]
pX = np.arange(0, ds.RasterXSize, stride)
pY = np.arange(0, ds.RasterYSize, stride)
psamp = np.meshgrid(pX, pY)
mX, mY = pixelToMap(psamp[0], psamp[1], gt)
return mX, mY |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fitPlaneSVD(XYZ):
"""Fit a plane to input point data using SVD """ |
[rows,cols] = XYZ.shape
# Set up constraint equations of the form AB = 0,
# where B is a column vector of the plane coefficients
# in the form b(1)*X + b(2)*Y +b(3)*Z + b(4) = 0.
p = (np.ones((rows,1)))
AB = np.hstack([XYZ,p])
[u, d, v] = np.linalg.svd(AB,0)
# Solution is last column of v.
B = np.array(v[3,:])
coeff = -B[[0, 1, 3]]/B[2]
return coeff |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fitPlaneLSQ(XYZ):
"""Fit a plane to input point data using LSQ """ |
[rows,cols] = XYZ.shape
G = np.ones((rows,3))
G[:,0] = XYZ[:,0] #X
G[:,1] = XYZ[:,1] #Y
Z = XYZ[:,2]
coeff,resid,rank,s = np.linalg.lstsq(G,Z,rcond=None)
return coeff |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ds_fitplane(ds):
"""Fit a plane to values in GDAL Dataset """ |
from pygeotools.lib import iolib
bma = iolib.ds_getma(ds)
gt = ds.GetGeoTransform()
return ma_fitplane(bma, gt) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getUTMzone(geom):
"""Determine UTM Zone for input geometry """ |
#If geom has srs properly defined, can do this
#geom.TransformTo(wgs_srs)
#Get centroid lat/lon
lon, lat = geom.Centroid().GetPoint_2D()
#Make sure we're -180 to 180
lon180 = (lon+180) - np.floor((lon+180)/360)*360 - 180
zonenum = int(np.floor((lon180 + 180)/6) + 1)
#Determine N/S hemisphere
if lat >= 0:
zonehem = 'N'
else:
zonehem = 'S'
#Deal with special cases
if (lat >= 56.0 and lat < 64.0 and lon180 >= 3.0 and lon180 < 12.0):
zonenum = 32
if (lat >= 72.0 and lat < 84.0):
if (lon180 >= 0.0 and lon180 < 9.0):
zonenum = 31
elif (lon180 >= 9.0 and lon180 < 21.0):
zonenum = 33
elif (lon180 >= 21.0 and lon180 < 33.0):
zonenum = 35
elif (lon180 >= 33.0 and lon180 < 42.0):
zonenum = 37
return str(zonenum)+zonehem |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_proj(geom, proj_list=None):
"""Determine best projection for input geometry """ |
out_srs = None
if proj_list is None:
proj_list = gen_proj_list()
#Go through user-defined projeciton list
for projbox in proj_list:
if projbox.geom.Intersects(geom):
out_srs = projbox.srs
break
#If geom doesn't fall in any of the user projection bbox, use UTM
if out_srs is None:
out_srs = getUTMsrs(geom)
return out_srs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gen_proj_list():
"""Create list of projections with cascading preference """ |
#Eventually, just read this in from a text file
proj_list = []
#Alaska
#Note, this spans -180/180
proj_list.append(ProjBox([-180, -130, 51.35, 71.35], 3338))
#proj_list.append(ProjBox([-130, 172.4, 51.35, 71.35], 3338))
#Transantarctic Mountains
proj_list.append(ProjBox([150, 175, -80, -70], 3294))
#Greenland
proj_list.append(ProjBox([-180, 180, 58, 82], 3413))
#Antarctica
proj_list.append(ProjBox([-180, 180, -90, -58], 3031))
#Arctic
proj_list.append(ProjBox([-180, 180, 60, 90], 3413))
return proj_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def xy2geom(x, y, t_srs=None):
"""Convert x and y point coordinates to geom """ |
geom_wkt = 'POINT({0} {1})'.format(x, y)
geom = ogr.CreateGeometryFromWkt(geom_wkt)
if t_srs is not None and not wgs_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(t_srs, wgs_srs)
geom.Transform(ct)
geom.AssignSpatialReference(t_srs)
return geom |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_dem_mosaic_cmd(fn_list, o, fn_list_txt=None, tr=None, t_srs=None, t_projwin=None, georef_tile_size=None, threads=None, tile=None, stat=None):
""" Create ASP dem_mosaic command Useful for spawning many single-threaded mosaicing processes """ |
cmd = ['dem_mosaic',]
if o is None:
o = 'mos'
cmd.extend(['-o', o])
if threads is None:
from pygeotools.lib import iolib
threads = iolib.cpu_count()
cmd.extend(['--threads', threads])
if tr is not None:
cmd.extend(['--tr', tr])
if t_srs is not None:
#cmd.extend(['--t_srs', t_srs.ExportToProj4()])
cmd.extend(['--t_srs', '"%s"' % t_srs.ExportToProj4()])
#cmd.extend(['--t_srs', "%s" % t_srs.ExportToProj4()])
if t_projwin is not None:
cmd.append('--t_projwin')
cmd.extend(t_projwin)
cmd.append('--force-projwin')
if tile is not None:
#Not yet implemented
#cmd.extend(tile_list)
cmd.append('--tile-index')
cmd.append(tile)
if georef_tile_size is not None:
cmd.extend(['--georef-tile-size', georef_tile_size])
if stat is not None:
if stat == 'wmean':
stat = None
else:
cmd.append('--%s' % stat.replace('index',''))
if stat in ['lastindex', 'firstindex', 'medianindex']:
#This will write out the index map to -last.tif by default
cmd.append('--save-index-map')
#Make sure we don't have ndv that conflicts with 0-based DEM indices
cmd.extend(['--output-nodata-value','-9999'])
#else:
# cmd.extend(['--save-dem-weight', o+'_weight'])
#If user provided a file containing list of DEMs to mosaic (useful to avoid long bash command issues)
if fn_list_txt is not None:
if os.path.exists(fn_list_txt):
cmd.append('-l')
cmd.append(fn_list_txt)
else:
print("Could not find input text file containing list of inputs")
else:
cmd.extend(fn_list)
cmd = [str(i) for i in cmd]
#print(cmd)
#return subprocess.call(cmd)
return cmd |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_rs_alg(r):
"""Parse resampling algorithm """ |
#Note:GRA_CubicSpline created huge block artifacts for the St. Helen's compute_dh WV cases
#Stick with CubicSpline for both upsampling/downsampling for now
if r == 'near':
#Note: Nearest respects nodata when downsampling
gra = gdal.GRA_NearestNeighbour
elif r == 'bilinear':
gra = gdal.GRA_Bilinear
elif r == 'cubic':
gra = gdal.GRA_Cubic
elif r == 'cubicspline':
gra = gdal.GRA_CubicSpline
elif r == 'average':
gra = gdal.GRA_Average
elif r == 'lanczos':
gra = gdal.GRA_Lanczos
elif r == 'mode':
#Note: Mode respects nodata when downsampling, but very slow
gra = gdal.GRA_Mode
else:
gra = None
sys.exit("Invalid resampling method")
return gra |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_srs(t_srs, src_ds_list=None):
"""Parse arbitrary input t_srs Parameters t_srs : str or gdal.Dataset or filename Arbitrary input t_srs src_ds_list : list of gdal.Dataset objects, optional Needed if specifying 'first' or 'last' Returns ------- t_srs : osr.SpatialReference() object Output spatial reference system """ |
if t_srs is None and src_ds_list is None:
print("Input t_srs and src_ds_list are both None")
else:
if t_srs is None:
t_srs = 'first'
if t_srs == 'first' and src_ds_list is not None:
t_srs = geolib.get_ds_srs(src_ds_list[0])
elif t_srs == 'last' and src_ds_list is not None:
t_srs = geolib.get_ds_srs(src_ds_list[-1])
#elif t_srs == 'source':
# t_srs = None
elif isinstance(t_srs, osr.SpatialReference):
pass
elif isinstance(t_srs, gdal.Dataset):
t_srs = geolib.get_ds_srs(t_srs)
elif isinstance(t_srs, str) and os.path.exists(t_srs):
t_srs = geolib.get_ds_srs(gdal.Open(t_srs))
elif isinstance(t_srs, str):
temp = osr.SpatialReference()
if 'EPSG' in t_srs.upper():
epsgcode = int(t_srs.split(':')[-1])
temp.ImportFromEPSG(epsgcode)
elif 'proj' in t_srs:
temp.ImportFromProj4(t_srs)
else:
#Assume the user knows what they are doing
temp.ImportFromWkt(t_srs)
t_srs = temp
else:
t_srs = None
return t_srs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_res(res, src_ds_list=None, t_srs=None):
"""Parse arbitrary input res Parameters res : str or gdal.Dataset or filename or float Arbitrary input res src_ds_list : list of gdal.Dataset objects, optional Needed if specifying 'first' or 'last' t_srs : osr.SpatialReference() object Projection for res calculations, optional Returns ------- res : float Output resolution None if source resolution should be preserved """ |
#Default to using first t_srs for res calculations
#Assumes src_ds_list is not None
t_srs = parse_srs(t_srs, src_ds_list)
#Valid options for res
res_str_list = ['first', 'last', 'min', 'max', 'mean', 'med', 'common_scale_factor']
#Compute output resolution in t_srs
if res in res_str_list and src_ds_list is not None:
#Returns min, max, mean, med
res_stats = geolib.get_res_stats(src_ds_list, t_srs=t_srs)
if res == 'first':
res = geolib.get_res(src_ds_list[0], t_srs=t_srs, square=True)[0]
elif res == 'last':
res = geolib.get_res(src_ds_list[-1], t_srs=t_srs, square=True)[0]
elif res == 'min':
res = res_stats[0]
elif res == 'max':
res = res_stats[1]
elif res == 'mean':
res = res_stats[2]
elif res == 'med':
res = res_stats[3]
elif res == 'common_scale_factor':
#Determine res to upsample min and downsample max by constant factor
res = np.sqrt(res_stats[1]/res_stats[0]) * res_stats[0]
elif res == 'source':
res = None
elif isinstance(res, gdal.Dataset):
res = geolib.get_res(res, t_srs=t_srs, square=True)[0]
elif isinstance(res, str) and os.path.exists(res):
res = geolib.get_res(gdal.Open(res), t_srs=t_srs, square=True)[0]
else:
res = float(res)
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_extent(extent, src_ds_list=None, t_srs=None):
"""Parse arbitrary input extent Parameters extent : str or gdal.Dataset or filename or list of float Arbitrary input extent src_ds_list : list of gdal.Dataset objects, optional Needed if specifying 'first', 'last', 'intersection', or 'union' t_srs : osr.SpatialReference() object, optional Projection for res calculations Returns ------- extent : list of float Output extent [xmin, ymin, xmax, ymax] None if source extent should be preserved """ |
#Default to using first t_srs for extent calculations
if t_srs is not None:
t_srs = parse_srs(t_srs, src_ds_list)
#Valid strings
extent_str_list = ['first', 'last', 'intersection', 'union']
if extent in extent_str_list and src_ds_list is not None:
if len(src_ds_list) == 1 and (extent == 'intersection' or extent == 'union'):
extent = None
elif extent == 'first':
extent = geolib.ds_geom_extent(src_ds_list[0], t_srs=t_srs)
#extent = geolib.ds_extent(src_ds_list[0], t_srs=t_srs)
elif extent == 'last':
extent = geolib.ds_geom_extent(src_ds_list[-1], t_srs=t_srs)
#extent = geolib.ds_extent(src_ds_list[-1], t_srs=t_srs)
elif extent == 'intersection':
#By default, compute_intersection takes ref_srs from ref_ds
extent = geolib.ds_geom_intersection_extent(src_ds_list, t_srs=t_srs)
if len(src_ds_list) > 1 and extent is None:
sys.exit("Input images do not intersect")
elif extent == 'union':
#Need to clean up union t_srs handling
extent = geolib.ds_geom_union_extent(src_ds_list, t_srs=t_srs)
elif extent == 'source':
extent = None
elif isinstance(extent, gdal.Dataset):
extent = geolib.ds_geom_extent(extent, t_srs=t_srs)
elif isinstance(extent, str) and os.path.exists(extent):
extent = geolib.ds_geom_extent(gdal.Open(extent), t_srs=t_srs)
elif isinstance(extent, (list, tuple, np.ndarray)):
extent = list(extent)
else:
extent = [float(i) for i in extent.split(' ')]
return extent |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def memwarp_multi(src_ds_list, res='first', extent='intersection', t_srs='first', r='cubic', verbose=True, dst_ndv=0):
"""Helper function for memwarp of multiple input GDAL Datasets """ |
return warp_multi(src_ds_list, res, extent, t_srs, r, warptype=memwarp, verbose=verbose, dst_ndv=dst_ndv) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def memwarp_multi_fn(src_fn_list, res='first', extent='intersection', t_srs='first', r='cubic', verbose=True, dst_ndv=0):
"""Helper function for memwarp of multiple input filenames """ |
#Should implement proper error handling here
if not iolib.fn_list_check(src_fn_list):
sys.exit('Missing input file(s)')
src_ds_list = [gdal.Open(fn, gdal.GA_ReadOnly) for fn in src_fn_list]
return memwarp_multi(src_ds_list, res, extent, t_srs, r, verbose=verbose, dst_ndv=dst_ndv) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def diskwarp_multi(src_ds_list, res='first', extent='intersection', t_srs='first', r='cubic', verbose=True, outdir=None, dst_ndv=None):
"""Helper function for diskwarp of multiple input GDAL Datasets """ |
return warp_multi(src_ds_list, res, extent, t_srs, r, verbose=verbose, warptype=diskwarp, outdir=outdir, dst_ndv=dst_ndv) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def diskwarp_multi_fn(src_fn_list, res='first', extent='intersection', t_srs='first', r='cubic', verbose=True, outdir=None, dst_ndv=None):
"""Helper function for diskwarp of multiple input filenames """ |
#Should implement proper error handling here
if not iolib.fn_list_check(src_fn_list):
sys.exit('Missing input file(s)')
src_ds_list = [gdal.Open(fn, gdal.GA_ReadOnly) for fn in src_fn_list]
return diskwarp_multi(src_ds_list, res, extent, t_srs, r, verbose=verbose, outdir=outdir, dst_ndv=dst_ndv) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def writeout(ds, outfn):
"""Write ds to disk Note: Depreciated function - use diskwarp functions when writing to disk to avoid unnecessary CreateCopy """ |
print("Writing out %s" % outfn)
#Use outfn extension to get driver
#This may have issues if outfn already exists and the mem ds has different dimensions/res
out_ds = iolib.gtif_drv.CreateCopy(outfn, ds, 0, options=iolib.gdal_opt)
out_ds = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getLocalTime(utc_dt, tz):
"""Return local timezone time """ |
import pytz
local_tz = pytz.timezone(tz)
local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)
return local_dt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def strptime_fuzzy(s):
"""Fuzzy date string parsing Note: this returns current date if not found. If only year is provided, will return current month, day """ |
import dateutil.parser
dt = dateutil.parser.parse(str(s), fuzzy=True)
return dt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fn_getdatetime_list(fn):
"""Extract all datetime strings from input filename """ |
#Want to split last component
fn = os.path.split(os.path.splitext(fn)[0])[-1]
import re
#WV01_12JUN152223255-P1BS_R1C1-102001001B3B9800__WV01_12JUN152224050-P1BS_R1C1-102001001C555C00-DEM_4x.tif
#Need to parse above with month name
#Note: made this more restrictive to avoid false matches:
#'20130304_1510_1030010020770600_1030010020CEAB00-DEM_4x'
#This is a problem, b/c 2015/17/00:
#WV02_20130315_10300100207D5600_1030010020151700
#This code should be obsolete before 2019
#Assume new filenames
#fn = fn[0:13]
#Use cascading re find to pull out timestamps
#Note: Want to be less restrictive here - could have a mix of YYYYMMDD_HHMM, YYYYMMDD and YYYY in filename
#Should probably search for all possibilities, then prune
#NOTE: these don't include seconds in the time
#NOTE: could have 20130304_1510__20130304__whatever in filename
#The current approach will only catch the first datetime
dstr = None
out = None
#20180101_1200 or 20180101T1200
dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9](?:0[1-9]|1[012])(?:0[1-9]|[12][0-9]|3[01])[_T](?:0[0-9]|1[0-9]|2[0-3])[0-5][0-9]', fn)
#201801011200
if not dstr:
dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9](?:0[1-9]|1[012])(?:0[1-9]|[12][0-9]|3[01])(?:0[0-9]|1[0-9]|2[0-3])[0-5][0-9]', fn)
#20180101
if not dstr:
dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9](?:0[1-9]|1[012])(?:0[1-9]|[12][0-9]|3[01])(?:$|_|-)', fn)
#This should pick up dates separated by a dash
#dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9](?:0[1-9]|1[012])(?:0[1-9]|[12][0-9]|3[01])', fn)
#2018.609990
if not dstr:
dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9]\.[0-9][0-9][0-9]*(?:$|_|-)', fn)
dstr = [d.lstrip('_').rstrip('_') for d in dstr]
dstr = [d.lstrip('-').rstrip('-') for d in dstr]
out = [decyear2dt(float(s)) for s in dstr]
dstr = None
#2018
if not dstr:
dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9](?:$|_|-)', fn)
#This is for USGS archive filenames
if not dstr:
dstr = re.findall(r'[0-3][0-9][a-z][a-z][a-z][0-9][0-9]', fn)
#This is USGS archive format
if dstr:
out = [datetime.strptime(s, '%d%b%y') for s in dstr][0]
dstr = None
if dstr:
#This is a hack to remove peripheral underscores and dashes
dstr = [d.lstrip('_').rstrip('_') for d in dstr]
dstr = [d.lstrip('-').rstrip('-') for d in dstr]
#This returns an empty list of nothing is found
out = [strptime_fuzzy(s) for s in dstr]
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_t_factor(t1, t2):
"""Time difference between two datetimes, expressed as decimal year """ |
t_factor = None
if t1 is not None and t2 is not None and t1 != t2:
dt = t2 - t1
year = timedelta(days=365.25)
t_factor = abs(dt.total_seconds() / year.total_seconds())
return t_factor |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort_fn_list(fn_list):
"""Sort input filename list by datetime """ |
dt_list = get_dt_list(fn_list)
fn_list_sort = [fn for (dt,fn) in sorted(zip(dt_list,fn_list))]
return fn_list_sort |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fix_repeat_dt(dt_list, offset_s=0.001):
"""Add some small offset to remove duplicate times Needed for xarray interp, which expects monotonically increasing times """ |
idx = (np.diff(dt_list) == timedelta(0))
while np.any(idx):
dt_list[idx.nonzero()[0] + 1] += timedelta(seconds=offset_s)
idx = (np.diff(dt_list) == timedelta(0))
return dt_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_dt_list(fn_list):
"""Get list of datetime objects, extracted from a filename """ |
dt_list = np.array([fn_getdatetime(fn) for fn in fn_list])
return dt_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_closest_dt_idx(dt, dt_list):
"""Get indices of dt_list that is closest to input dt """ |
from pygeotools.lib import malib
dt_list = malib.checkma(dt_list, fix=False)
dt_diff = np.abs(dt - dt_list)
return dt_diff.argmin() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mean_date(dt_list):
"""Calcuate mean datetime from datetime list """ |
dt_list_sort = sorted(dt_list)
dt_list_sort_rel = [dt - dt_list_sort[0] for dt in dt_list_sort]
avg_timedelta = sum(dt_list_sort_rel, timedelta())/len(dt_list_sort_rel)
return dt_list_sort[0] + avg_timedelta |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def median_date(dt_list):
"""Calcuate median datetime from datetime list """ |
#dt_list_sort = sorted(dt_list)
idx = len(dt_list)/2
if len(dt_list) % 2 == 0:
md = mean_date([dt_list[idx-1], dt_list[idx]])
else:
md = dt_list[idx]
return md |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dt_cluster(dt_list, dt_thresh=16.0):
"""Find clusters of similar datetimes within datetime list """ |
if not isinstance(dt_list[0], float):
o_list = dt2o(dt_list)
else:
o_list = dt_list
o_list_sort = np.sort(o_list)
o_list_sort_idx = np.argsort(o_list)
d = np.diff(o_list_sort)
#These are indices of breaks
#Add one so each b starts a cluster
b = np.nonzero(d > dt_thresh)[0] + 1
#Add one to shape so we include final index
b = np.hstack((0, b, d.shape[0] + 1))
f_list = []
for i in range(len(b)-1):
#Need to subtract 1 here to give cluster bounds
b_idx = [b[i], b[i+1]-1]
b_dt = o_list_sort[b_idx]
#These should be identical if input is already sorted
b_idx_orig = o_list_sort_idx[b_idx]
all_idx = np.arange(b_idx[0], b_idx[1])
all_sort = o_list_sort[all_idx]
#These should be identical if input is already sorted
all_idx_orig = o_list_sort_idx[all_idx]
dict = {}
dict['break_indices'] = b_idx_orig
dict['break_ts_o'] = b_dt
dict['break_ts_dt'] = o2dt(b_dt)
dict['all_indices'] = all_idx_orig
dict['all_ts_o'] = all_sort
dict['all_ts_dt'] = o2dt(all_sort)
f_list.append(dict)
return f_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dt2decyear(dt):
"""Convert datetime to decimal year """ |
year = dt.year
startOfThisYear = datetime(year=year, month=1, day=1)
startOfNextYear = datetime(year=year+1, month=1, day=1)
yearElapsed = sinceEpoch(dt) - sinceEpoch(startOfThisYear)
yearDuration = sinceEpoch(startOfNextYear) - sinceEpoch(startOfThisYear)
fraction = yearElapsed/yearDuration
return year + fraction |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decyear2dt(t):
"""Convert decimal year to datetime """ |
year = int(t)
rem = t - year
base = datetime(year, 1, 1)
dt = base + timedelta(seconds=(base.replace(year=base.year+1) - base).total_seconds() * rem)
#This works for np array input
#year = t.astype(int)
#rem = t - year
#base = np.array([datetime(y, 1, 1) for y in year])
return dt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dt2jd(dt):
"""Convert datetime to julian date """ |
a = (14 - dt.month)//12
y = dt.year + 4800 - a
m = dt.month + 12*a - 3
return dt.day + ((153*m + 2)//5) + 365*y + y//4 - y//100 + y//400 - 32045 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def jd2dt(jd):
"""Convert julian date to datetime """ |
n = int(round(float(jd)))
a = n + 32044
b = (4*a + 3)//146097
c = a - (146097*b)//4
d = (4*c + 3)//1461
e = c - (1461*d)//4
m = (5*e + 2)//153
day = e + 1 - (153*m + 2)//5
month = m + 3 - 12*(m//10)
year = 100*b + d - 4800 + m/10
tfrac = 0.5 + float(jd) - n
tfrac_s = 86400.0 * tfrac
minfrac, hours = np.modf(tfrac_s / 3600.)
secfrac, minutes = np.modf(minfrac * 60.)
microsec, seconds = np.modf(secfrac * 60.)
return datetime(year, month, day, int(hours), int(minutes), int(seconds), int(microsec*1E6)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gps2dt(gps_week, gps_ms):
"""Convert GPS week and ms to a datetime """ |
gps_epoch = datetime(1980,1,6,0,0,0)
gps_week_s = timedelta(seconds=gps_week*7*24*60*60)
gps_ms_s = timedelta(milliseconds=gps_ms)
return gps_epoch + gps_week_s + gps_ms_s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def disco_loop(opc, version, queue, real_out, dup_lines=False, show_bytes=False):
"""Disassembles a queue of code objects. If we discover another code object which will be found in co_consts, we add the new code to the list. Note that the order of code discovery is in the order of first encountered which is not amenable for the format used by a disassembler where code objects should be defined before using them in other functions. However this is not recursive and will overall lead to less memory consumption at run time. """ |
while len(queue) > 0:
co = queue.popleft()
if co.co_name not in ('<module>', '?'):
real_out.write("\n" + format_code_info(co, version) + "\n")
bytecode = Bytecode(co, opc, dup_lines=dup_lines)
real_out.write(bytecode.dis(show_bytes=show_bytes) + "\n")
for c in co.co_consts:
if iscode(c):
queue.append(c)
pass
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def disco_loop_asm_format(opc, version, co, real_out, fn_name_map, all_fns):
"""Produces disassembly in a format more conducive to automatic assembly by producing inner modules before they are used by outer ones. Since this is recusive, we'll use more stack space at runtime. """ |
if version < 3.0:
co = code2compat(co)
else:
co = code3compat(co)
co_name = co.co_name
mapped_name = fn_name_map.get(co_name, co_name)
new_consts = []
for c in co.co_consts:
if iscode(c):
if version < 3.0:
c_compat = code2compat(c)
else:
c_compat = code3compat(c)
disco_loop_asm_format(opc, version, c_compat, real_out,
fn_name_map, all_fns)
m = re.match(".* object <(.+)> at", str(c))
if m:
basename = m.group(1)
if basename != 'module':
mapped_name = code_uniquify(basename, c.co_code)
c_compat.co_name = mapped_name
c_compat.freeze()
new_consts.append(c_compat)
else:
new_consts.append(c)
pass
co.co_consts = new_consts
m = re.match("^<(.+)>$", co.co_name)
if m or co_name in all_fns:
if co_name in all_fns:
basename = co_name
else:
basename = m.group(1)
if basename != 'module':
mapped_name = code_uniquify(basename, co.co_code)
co_name = mapped_name
assert mapped_name not in fn_name_map
fn_name_map[mapped_name] = basename
co.co_name = mapped_name
pass
elif co_name in fn_name_map:
# FIXME: better would be a hash of the co_code
mapped_name = code_uniquify(co_name, co.co_code)
fn_name_map[mapped_name] = co_name
co.co_name = mapped_name
pass
co = co.freeze()
all_fns.add(co_name)
if co.co_name != '<module>' or co.co_filename:
real_out.write("\n" + format_code_info(co, version, mapped_name) + "\n")
bytecode = Bytecode(co, opc, dup_lines=True)
real_out.write(bytecode.dis(asm_format=True) + "\n") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wr_long(f, x):
"""Internal; write a 32-bit int to a file in little-endian order.""" |
if PYTHON3:
f.write(bytes([x & 0xff]))
f.write(bytes([(x >> 8) & 0xff]))
f.write(bytes([(x >> 16) & 0xff]))
f.write(bytes([(x >> 24) & 0xff]))
else:
f.write(chr( x & 0xff))
f.write(chr((x >> 8) & 0xff))
f.write(chr((x >> 16) & 0xff))
f.write(chr((x >> 24) & 0xff)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dump_compile(codeobject, filename, timestamp, magic):
"""Write code object as a byte-compiled file Arguments: codeobject: code object filefile: bytecode file to write timestamp: timestamp to put in file magic: Pyton bytecode magic """ |
# Atomically write the pyc/pyo file. Issue #13146.
# id() is used to generate a pseudo-random filename.
path_tmp = '%s.%s' % (filename, id(filename))
fc = None
try:
fc = open(path_tmp, 'wb')
if PYTHON3:
fc.write(bytes([0, 0, 0, 0]))
else:
fc.write('\0\0\0\0')
wr_long(fc, timestamp)
marshal.dump(codeobject, fc)
fc.flush()
fc.seek(0, 0)
fc.write(magic)
fc.close()
os.rename(path_tmp, filename)
except OSError:
try:
os.unlink(path_tmp)
except OSError:
pass
raise
finally:
if fc: fc.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def int2magic(magic_int):
"""Given a magic int like 62211, compute the corresponding magic byte string b'\x03\xf3\r\n' using the conversion method that does this. See also dictionary magic2nt2version which has precomputed these values for knonwn magic_int's. """ |
if (sys.version_info >= (3, 0)):
return struct.pack('<Hcc', magic_int, bytes('\r', 'utf-8'), bytes('\n', 'utf-8'))
else:
return struct.pack('<Hcc', magic_int, '\r', '\n') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sysinfo2float(version_info=sys.version_info):
"""Convert a sys.versions_info-compatible list into a 'canonic' floating-point number which that can then be used to look up a magic number. Note that this can only be used for released version of C Python, not interim development versions, since we can't represent that as a floating-point number. For handling Pypy, pyston, jython, etc. and interim versions of C Python, use sysinfo2magic. """ |
vers_str = '.'.join([str(v) for v in version_info[0:3]])
if version_info[3] != 'final':
vers_str += '.' + ''.join([str(i) for i in version_info[3:]])
if IS_PYPY:
vers_str += 'pypy'
else:
try:
import platform
platform = platform.python_implementation()
if platform in ('Jython', 'Pyston'):
vers_str += platform
pass
except ImportError:
# Python may be too old, e.g. < 2.6 or implementation may
# just not have platform
pass
except AttributeError:
pass
return py_str2float(vers_str) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sysinfo2magic(version_info=sys.version_info):
"""Convert a list sys.versions_info compatible list into a 'canonic' floating-point number which that can then be used to look up a magic number. Note that this can raise an exception. """ |
# FIXME: DRY with sysinfo2float()
vers_str = '.'.join([str(v) for v in version_info[0:3]])
if version_info[3] != 'final':
vers_str += ''.join([str(v) for v in version_info[3:]])
if IS_PYPY:
vers_str += 'pypy'
else:
try:
import platform
platform = platform.python_implementation()
if platform in ('Jython', 'Pyston'):
vers_str += platform
pass
except ImportError:
# Python may be too old, e.g. < 2.6 or implementation may
# just not have platform
pass
return magics[vers_str] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_opdata(l, from_mod, version=None, is_pypy=False):
"""Sets up a number of the structures found in Python's opcode.py. Python opcode.py routines assign attributes to modules. In order to do this in a modular way here, the local dictionary for the module is passed. """ |
if version:
l['python_version'] = version
l['is_pypy'] = is_pypy
l['cmp_op'] = cmp_op
l['HAVE_ARGUMENT'] = HAVE_ARGUMENT
if version <= 3.5:
l['findlinestarts'] = findlinestarts
l['findlabels'] = findlabels
l['get_jump_targets'] = get_jump_targets
l['get_jump_target_maps'] = get_jump_target_maps
else:
l['findlinestarts'] = wordcode.findlinestarts
l['findlabels'] = wordcode.findlabels
l['get_jump_targets'] = wordcode.get_jump_targets
l['get_jump_target_maps'] = wordcode.get_jump_target_maps
l['opmap'] = deepcopy(from_mod.opmap)
l['opname'] = deepcopy(from_mod.opname)
for field in fields2copy:
l[field] = list(getattr(from_mod, field)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rm_op(l, name, op):
"""Remove an opcode. This is used when basing a new Python release off of another one, and there is an opcode that is in the old release that was removed in the new release. We are pretty aggressive about removing traces of the op. """ |
# opname is an array, so we need to keep the position in there.
l['opname'][op] = '<%s>' % op
if op in l['hasconst']:
l['hasconst'].remove(op)
if op in l['hascompare']:
l['hascompare'].remove(op)
if op in l['hascondition']:
l['hascondition'].remove(op)
if op in l['hasfree']:
l['hasfree'].remove(op)
if op in l['hasjabs']:
l['hasjabs'].remove(op)
if op in l['hasname']:
l['hasname'].remove(op)
if op in l['hasjrel']:
l['hasjrel'].remove(op)
if op in l['haslocal']:
l['haslocal'].remove(op)
if op in l['hasname']:
l['hasname'].remove(op)
if op in l['hasnargs']:
l['hasnargs'].remove(op)
if op in l['hasvargs']:
l['hasvargs'].remove(op)
if op in l['nofollow']:
l['nofollow'].remove(op)
assert l['opmap'][name] == op
del l['opmap'][name] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def opcode_check(l):
"""When the version of Python we are running happens to have the same opcode set as the opcode we are importing, we perform checks to make sure our opcode set matches exactly. """ |
# Python 2.6 reports 2.6000000000000001
if (abs(PYTHON_VERSION - l['python_version']) <= 0.01
and IS_PYPY == l['is_pypy']):
try:
import dis
opmap = fix_opcode_names(dis.opmap)
# print(set(opmap.items()) - set(l['opmap'].items()))
# print(set(l['opmap'].items()) - set(opmap.items()))
assert all(item in opmap.items() for item in l['opmap'].items())
assert all(item in l['opmap'].items() for item in opmap.items())
except:
import sys |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dump_opcodes(opmap):
"""Utility for dumping opcodes""" |
op2name = {}
for k in opmap.keys():
op2name[opmap[k]] = k
for i in sorted(op2name.keys()):
print("%-3s %s" % (str(i), op2name[i])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pretty_flags(flags):
"""Return pretty representation of code flags.""" |
names = []
result = "0x%08x" % flags
for i in range(32):
flag = 1 << i
if flags & flag:
names.append(COMPILER_FLAG_NAMES.get(flag, hex(flag)))
flags ^= flag
if not flags:
break
else:
names.append(hex(flags))
names.reverse()
return "%s (%s)" % (result, " | ".join(names)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _try_compile(source, name):
"""Attempts to compile the given source, first as an expression and then as a statement if the first approach fails. Utility function to accept strings in functions that otherwise expect code objects """ |
try:
c = compile(source, name, 'eval')
except SyntaxError:
c = compile(source, name, 'exec')
return c |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_code_object(x):
"""Helper to handle methods, functions, generators, strings and raw code objects""" |
if hasattr(x, '__func__'): # Method
x = x.__func__
if hasattr(x, '__code__'): # Function
x = x.__code__
if hasattr(x, 'gi_code'): # Generator
x = x.gi_code
if isinstance(x, str): # Source code
x = _try_compile(x, "<disassembly>")
if hasattr(x, 'co_code'): # Code object
return x
raise TypeError("don't know how to disassemble %s objects" %
type(x).__name__) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.