text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_site_amplification_term(self, C, vs30):
""" Returns the site amplification term for the case in which Vs30 is used directly """ |
return C["gamma"] * np.log10(vs30 / self.CONSTS["Vref"]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_site_amplification_term(self, C, vs30):
""" Returns the site amplification given Eurocode 8 site classification """ |
f_s = np.zeros_like(vs30)
# Site class B
idx = np.logical_and(vs30 < 800.0, vs30 >= 360.0)
f_s[idx] = C["eB"]
# Site Class C
idx = np.logical_and(vs30 < 360.0, vs30 >= 180.0)
f_s[idx] = C["eC"]
# Site Class D
idx = vs30 < 180.0
f_s[idx] = C["eD"]
return f_s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_mean(self, C, rup, dists, sites):
""" Returns the mean value of ground motion - noting that in this case the style-of-faulting term is neglected """ |
return (self._get_magnitude_scaling_term(C, rup.mag) +
self._get_distance_scaling_term(C, dists.rjb, rup.mag) +
self._get_site_amplification_term(C, sites.vs30)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_loss_tables(dstore):
""" Compute the total losses by rupture and losses by rlzi. """ |
oq = dstore['oqparam']
L = len(oq.loss_dt().names)
R = dstore['csm_info'].get_num_rlzs()
serials = dstore['ruptures']['serial']
idx_by_ser = dict(zip(serials, range(len(serials))))
tbl = numpy.zeros((len(serials), L), F32)
lbr = numpy.zeros((R, L), F32) # losses by rlz
for rec in dstore['losses_by_event'].value: # call .value for speed
idx = idx_by_ser[rec['eid'] // TWO32]
tbl[idx] += rec['loss']
lbr[rec['rlzi']] += rec['loss']
return tbl, lbr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def html(header_rows):
""" Convert a list of tuples describing a table into a HTML string """ |
name = 'table%d' % next(tablecounter)
return HtmlTable([map(str, row) for row in header_rows], name).render() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_tabs(tag_ids, tag_status, tag_contents):
""" Return a HTML string containing all the tabs we want to display """ |
templ = '''
<div id="tabs">
<ul>
%s
</ul>
%s
</div>'''
lis = []
contents = []
for i, (tag_id, status, tag_content) in enumerate(
zip(tag_ids, tag_status, tag_contents), 1):
mark = '.' if status == 'complete' else '!'
lis.append('<li><a href="#tabs-%d">%s%s</a></li>' % (i, tag_id, mark))
contents.append('<div id="tabs-%d">%s</div>' % (
i, tag_content))
return templ % ('\n'.join(lis), '\n'.join(contents)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_report(isodate='today'):
""" Build a HTML report with the computations performed at the given isodate. Return the name of the report, which is saved in the current directory. """ |
if isodate == 'today':
isodate = date.today()
else:
isodate = date(*time.strptime(isodate, '%Y-%m-%d')[:3])
isodate1 = isodate + timedelta(1) # +1 day
tag_ids = []
tag_status = []
tag_contents = []
# the fetcher returns an header which is stripped with [1:]
jobs = dbcmd(
'fetch', ALL_JOBS, isodate.isoformat(), isodate1.isoformat())
page = '<h2>%d job(s) finished before midnight of %s</h2>' % (
len(jobs), isodate)
for job_id, user, status, ds_calc in jobs:
tag_ids.append(job_id)
tag_status.append(status)
[stats] = dbcmd('fetch', JOB_STATS, job_id)
(job_id, user, start_time, stop_time, status, duration) = stats
try:
ds = read(job_id, datadir=os.path.dirname(ds_calc))
txt = view_fullreport('fullreport', ds)
report = html_parts(txt)
except Exception as exc:
report = dict(
html_title='Could not generate report: %s' % cgi.escape(
str(exc), quote=True),
fragment='')
page = report['html_title']
page += html([stats._fields, stats])
page += report['fragment']
tag_contents.append(page)
page = make_tabs(tag_ids, tag_status, tag_contents) + (
'Report last updated: %s' % datetime.now())
fname = 'jobs-%s.html' % isodate
with open(fname, 'w') as f:
f.write(PAGE_TEMPLATE % page)
return fname |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scenario_risk(riskinputs, riskmodel, param, monitor):
""" Core function for a scenario computation. :param riskinput: a of :class:`openquake.risklib.riskinput.RiskInput` object :param riskmodel: a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance :param param: dictionary of extra parameters :param monitor: :class:`openquake.baselib.performance.Monitor` instance :returns: a dictionary { 'agg': array of shape (E, L, R, 2), 'avg': list of tuples (lt_idx, rlz_idx, asset_ordinal, statistics) } where E is the number of simulated events, L the number of loss types, R the number of realizations and statistics is an array of shape (n, R, 4), with n the number of assets in the current riskinput object """ |
E = param['E']
L = len(riskmodel.loss_types)
result = dict(agg=numpy.zeros((E, L), F32), avg=[],
all_losses=AccumDict(accum={}))
for ri in riskinputs:
for out in riskmodel.gen_outputs(ri, monitor, param['epspath']):
r = out.rlzi
weight = param['weights'][r]
slc = param['event_slice'](r)
for l, loss_type in enumerate(riskmodel.loss_types):
losses = out[loss_type]
if numpy.product(losses.shape) == 0: # happens for all NaNs
continue
stats = numpy.zeros(len(ri.assets), stat_dt) # mean, stddev
for a, asset in enumerate(ri.assets):
stats['mean'][a] = losses[a].mean()
stats['stddev'][a] = losses[a].std(ddof=1)
result['avg'].append((l, r, asset['ordinal'], stats[a]))
agglosses = losses.sum(axis=0) # shape num_gmfs
result['agg'][slc, l] += agglosses * weight
if param['asset_loss_table']:
aids = ri.assets['ordinal']
result['all_losses'][l, r] += AccumDict(zip(aids, losses))
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _check_depth_limits(input_dict):
'''Returns the default upper and lower depth values if not in dictionary
:param input_dict:
Dictionary corresponding to the kwargs dictionary of calling function
:returns:
'upper_depth': Upper seismogenic depth (float)
'lower_depth': Lower seismogenic depth (float)
'''
if ('upper_depth' in input_dict.keys()) and input_dict['upper_depth']:
if input_dict['upper_depth'] < 0.:
raise ValueError('Upper seismogenic depth must be positive')
else:
upper_depth = input_dict['upper_depth']
else:
upper_depth = 0.0
if ('lower_depth' in input_dict.keys()) and input_dict['lower_depth']:
if input_dict['lower_depth'] < upper_depth:
raise ValueError('Lower depth must take a greater value than'
' upper depth!')
else:
lower_depth = input_dict['lower_depth']
else:
lower_depth = np.inf
return upper_depth, lower_depth |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _get_decimal_from_datetime(time):
'''
As the decimal time function requires inputs in the form of numpy
arrays need to convert each value in the datetime object to a single
numpy array
'''
# Get decimal seconds from seconds + microseconds
temp_seconds = np.float(time.second) + (np.float(time.microsecond) / 1.0E6)
return decimal_time(np.array([time.year], dtype=int),
np.array([time.month], dtype=int),
np.array([time.day], dtype=int),
np.array([time.hour], dtype=int),
np.array([time.minute], dtype=int),
np.array([temp_seconds], dtype=int)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def select_catalogue(self, valid_id):
'''
Method to post-process the catalogue based on the selection options
:param numpy.ndarray valid_id:
Boolean vector indicating whether each event is selected (True)
or not (False)
:returns:
Catalogue of selected events as instance of
openquake.hmtk.seismicity.catalogue.Catalogue class
'''
if not np.any(valid_id):
# No events selected - create clean instance of class
output = Catalogue()
output.processes = self.catalogue.processes
elif np.all(valid_id):
if self.copycat:
output = deepcopy(self.catalogue)
else:
output = self.catalogue
else:
if self.copycat:
output = deepcopy(self.catalogue)
else:
output = self.catalogue
output.purge_catalogue(valid_id)
return output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def within_polygon(self, polygon, distance=None, **kwargs):
'''
Select earthquakes within polygon
:param polygon:
Centre point as instance of nhlib.geo.polygon.Polygon class
:param float distance:
Buffer distance (km) (can take negative values)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
if distance:
# If a distance is specified then dilate the polyon by distance
zone_polygon = polygon.dilate(distance)
else:
zone_polygon = polygon
# Make valid all events inside depth range
upper_depth, lower_depth = _check_depth_limits(kwargs)
valid_depth = np.logical_and(
self.catalogue.data['depth'] >= upper_depth,
self.catalogue.data['depth'] < lower_depth)
# Events outside polygon returned to invalid assignment
catalogue_mesh = Mesh(self.catalogue.data['longitude'],
self.catalogue.data['latitude'],
self.catalogue.data['depth'])
valid_id = np.logical_and(valid_depth,
zone_polygon.intersects(catalogue_mesh))
return self.select_catalogue(valid_id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def circular_distance_from_point(self, point, distance, **kwargs):
'''
Select earthquakes within a distance from a Point
:param point:
Centre point as instance of nhlib.geo.point.Point class
:param float distance:
Distance (km)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
if kwargs['distance_type'] is 'epicentral':
locations = Mesh(
self.catalogue.data['longitude'],
self.catalogue.data['latitude'],
np.zeros(len(self.catalogue.data['longitude']), dtype=float))
point = Point(point.longitude, point.latitude, 0.0)
else:
locations = self.catalogue.hypocentres_as_mesh()
is_close = point.closer_than(locations, distance)
return self.select_catalogue(is_close) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def cartesian_square_centred_on_point(self, point, distance, **kwargs):
'''
Select earthquakes from within a square centered on a point
:param point:
Centre point as instance of nhlib.geo.point.Point class
:param distance:
Distance (km)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
class containing only selected events
'''
point_surface = Point(point.longitude, point.latitude, 0.)
# As distance is
north_point = point_surface.point_at(distance, 0., 0.)
east_point = point_surface.point_at(distance, 0., 90.)
south_point = point_surface.point_at(distance, 0., 180.)
west_point = point_surface.point_at(distance, 0., 270.)
is_long = np.logical_and(
self.catalogue.data['longitude'] >= west_point.longitude,
self.catalogue.data['longitude'] < east_point.longitude)
is_surface = np.logical_and(
is_long,
self.catalogue.data['latitude'] >= south_point.latitude,
self.catalogue.data['latitude'] < north_point.latitude)
upper_depth, lower_depth = _check_depth_limits(kwargs)
is_valid = np.logical_and(
is_surface,
self.catalogue.data['depth'] >= upper_depth,
self.catalogue.data['depth'] < lower_depth)
return self.select_catalogue(is_valid) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def within_joyner_boore_distance(self, surface, distance, **kwargs):
'''
Select events within a Joyner-Boore distance of a fault
:param surface:
Fault surface as instance of
nhlib.geo.surface.base.SimpleFaultSurface or as instance of
nhlib.geo.surface.ComplexFaultSurface
:param float distance:
Rupture distance (km)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
upper_depth, lower_depth = _check_depth_limits(kwargs)
rjb = surface.get_joyner_boore_distance(
self.catalogue.hypocentres_as_mesh())
is_valid = np.logical_and(
rjb <= distance,
np.logical_and(self.catalogue.data['depth'] >= upper_depth,
self.catalogue.data['depth'] < lower_depth))
return self.select_catalogue(is_valid) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def within_rupture_distance(self, surface, distance, **kwargs):
'''
Select events within a rupture distance from a fault surface
:param surface:
Fault surface as instance of nhlib.geo.surface.base.BaseSurface
:param float distance:
Rupture distance (km)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
# Check for upper and lower depths
upper_depth, lower_depth = _check_depth_limits(kwargs)
rrupt = surface.get_min_distance(self.catalogue.hypocentres_as_mesh())
is_valid = np.logical_and(
rrupt <= distance,
np.logical_and(self.catalogue.data['depth'] >= upper_depth,
self.catalogue.data['depth'] < lower_depth))
return self.select_catalogue(is_valid) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def within_time_period(self, start_time=None, end_time=None):
'''
Select earthquakes occurring within a given time period
:param start_time:
Earliest time (as datetime.datetime object)
:param end_time:
Latest time (as datetime.datetime object)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
time_value = self.catalogue.get_decimal_time()
if not start_time:
if not end_time:
# No times input, therefore skip everything and return catalog
return self.catalogue
else:
start_time = np.min(self.catalogue.data['year'])
else:
start_time = _get_decimal_from_datetime(start_time)
if not end_time:
end_time = _get_decimal_from_datetime(datetime.now())
else:
end_time = _get_decimal_from_datetime(end_time)
# Get decimal time values
time_value = self.catalogue.get_decimal_time()
is_valid = np.logical_and(time_value >= start_time,
time_value < end_time)
return self.select_catalogue(is_valid) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def within_depth_range(self, lower_depth=None, upper_depth=None):
'''
Selects events within a specified depth range
:param float lower_depth:
Lower depth for consideration
:param float upper_depth:
Upper depth for consideration
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
if not lower_depth:
if not upper_depth:
# No limiting depths defined - so return entire catalogue!
return self.catalogue
else:
lower_depth = np.inf
if not upper_depth:
upper_depth = 0.0
is_valid = np.logical_and(self.catalogue.data['depth'] >= upper_depth,
self.catalogue.data['depth'] < lower_depth)
return self.select_catalogue(is_valid) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_cluster_set(self, vcl):
""" For a given catalogue and list of cluster IDs this function splits the catalogue into a dictionary containing an individual catalogue of events within each cluster :param numpy.ndarray vcl: Cluster ID list :returns: Dictionary of instances of the :class: openquake.hmtk.seismicity.catalogue.Catalogue, where each instance if the catalogue of each cluster """ |
num_clust = np.max(vcl)
cluster_set = []
for clid in range(0, num_clust + 1):
idx = np.where(vcl == clid)[0]
cluster_cat = deepcopy(self.catalogue)
cluster_cat.select_catalogue_events(idx)
cluster_set.append((clid, cluster_cat))
return dict(cluster_set) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def within_bounding_box(self, limits):
""" Selects the earthquakes within a bounding box. :parameter limits: A list or a numpy array with four elements in the following order: - min x (longitude) - min y (latitude) - max x (longitude) - max y (latitude) :returns: Returns a :class:htmk.seismicity.catalogue.Catalogue` instance """ |
is_valid = np.logical_and(
self.catalogue.data['longitude'] >= limits[0],
np.logical_and(self.catalogue.data['longitude'] <= limits[2],
np.logical_and(
self.catalogue.data['latitude'] >= limits[1],
self.catalogue.data['latitude'] <= limits[3])))
return self.select_catalogue(is_valid) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_calc_ids(datadir=None):
""" Extract the available calculation IDs from the datadir, in order. """ |
datadir = datadir or get_datadir()
if not os.path.exists(datadir):
return []
calc_ids = set()
for f in os.listdir(datadir):
mo = re.match(CALC_REGEX, f)
if mo:
calc_ids.add(int(mo.group(2)))
return sorted(calc_ids) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_last_calc_id(datadir=None):
""" Extract the latest calculation ID from the given directory. If none is found, return 0. """ |
datadir = datadir or get_datadir()
calcs = get_calc_ids(datadir)
if not calcs:
return 0
return calcs[-1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def open(self, mode):
""" Open the underlying .hdf5 file and the parent, if any """ |
if self.hdf5 == (): # not already open
kw = dict(mode=mode, libver='latest')
if mode == 'r':
kw['swmr'] = True
try:
self.hdf5 = hdf5.File(self.filename, **kw)
except OSError as exc:
raise OSError('%s in %s' % (exc, self.filename)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_dset(self, key, dtype, shape=(None,), compression=None, fillvalue=0, attrs=None):
""" Create a one-dimensional HDF5 dataset. :param key: name of the dataset :param dtype: dtype of the dataset (usually composite) :param shape: shape of the dataset, possibly extendable :param compression: the kind of HDF5 compression to use :param attrs: dictionary of attributes of the dataset :returns: a HDF5 dataset """ |
return hdf5.create(
self.hdf5, key, dtype, shape, compression, fillvalue, attrs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extend(self, key, array, **attrs):
""" Extend the dataset associated to the given key; create it if needed :param key: name of the dataset :param array: array to store :param attrs: a dictionary of attributes """ |
try:
dset = self.hdf5[key]
except KeyError:
dset = hdf5.create(self.hdf5, key, array.dtype,
shape=(None,) + array.shape[1:])
hdf5.extend(dset, array)
for k, v in attrs.items():
dset.attrs[k] = v
return dset |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, key, kw):
""" Update the object associated to `key` with the `kw` dictionary; works for LiteralAttrs objects and automatically flushes. """ |
if key not in self:
obj = hdf5.LiteralAttrs()
else:
obj = self[key]
vars(obj).update(kw)
self[key] = obj
self.flush() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def export_path(self, relname, export_dir=None):
""" Return the path of the exported file by adding the export_dir in front, the calculation ID at the end. :param relname: relative file name :param export_dir: export directory (if None use .export_dir) """ |
# removing inner slashed to avoid creating intermediate directories
name, ext = relname.replace('/', '-').rsplit('.', 1)
newname = '%s_%s.%s' % (name, self.calc_id, ext)
if export_dir is None:
export_dir = self.export_dir
return os.path.join(export_dir, newname) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_fname(self, prefix, postfix, fmt, export_dir=None):
""" Build a file name from a realization, by using prefix and extension. :param prefix: the prefix to use :param postfix: the postfix to use (can be a realization object) :param fmt: the extension ('csv', 'xml', etc) :param export_dir: export directory (if None use .export_dir) :returns: relative pathname including the extension """ |
if hasattr(postfix, 'sm_lt_path'): # is a realization
fname = '%s-rlz-%03d.%s' % (prefix, postfix.ordinal, fmt)
else:
fname = prefix + ('-%s' % postfix if postfix else '') + '.' + fmt
return self.export_path(fname, export_dir) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flush(self):
"""Flush the underlying hdf5 file""" |
if self.parent != ():
self.parent.flush()
if self.hdf5: # is open
self.hdf5.flush() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Close the underlying hdf5 file""" |
if self.parent != ():
self.parent.flush()
self.parent.close()
if self.hdf5: # is open
self.hdf5.flush()
self.hdf5.close()
self.hdf5 = () |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getsize(self, key=None):
""" Return the size in byte of the output associated to the given key. If no key is given, returns the total size of all files. """ |
if key is None:
return os.path.getsize(self.filename)
return hdf5.ByteCounter.get_nbytes(
h5py.File.__getitem__(self.hdf5, key)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def maybe_encode(value):
""" If value is a sequence of strings, encode it """ |
if isinstance(value, (list, tuple)) and isinstance(value[0], str):
return encode(value)
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extend(dset, array, **attrs):
""" Extend an extensible dataset with an array of a compatible dtype. :param dset: an h5py dataset :param array: an array of length L :returns: the total length of the dataset (i.e. initial length + L) """ |
length = len(dset)
if len(array) == 0:
return length
newlength = length + len(array)
if array.dtype.name == 'object': # vlen array
shape = (newlength,) + preshape(array[0])
else:
shape = (newlength,) + array.shape[1:]
dset.resize(shape)
dset[length:newlength] = array
for key, val in attrs.items():
dset.attrs[key] = val
return newlength |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extend3(filename, key, array, **attrs):
""" Extend an HDF5 file dataset with the given array """ |
with h5py.File(filename) as h5:
try:
dset = h5[key]
except KeyError:
if array.dtype.name == 'object': # vlen array
shape = (None,) + preshape(array[0])
else:
shape = (None,) + array.shape[1:]
dset = create(h5, key, array.dtype, shape)
length = extend(dset, array)
for key, val in attrs.items():
dset.attrs[key] = val
h5.flush()
return length |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_nbytes(dset):
""" If the dataset has an attribute 'nbytes', return it. Otherwise get the size of the underlying array. Returns None if the dataset is actually a group. """ |
if 'nbytes' in dset.attrs:
# look if the dataset has an attribute nbytes
return dset.attrs['nbytes']
elif hasattr(dset, 'dtype'):
# else extract nbytes from the underlying array
return dset.size * numpy.zeros(1, dset.dtype).nbytes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decode_array(values):
""" Decode the values which are bytestrings. """ |
out = []
for val in values:
try:
out.append(val.decode('utf8'))
except AttributeError:
out.append(val)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def temporary(cls):
""" Returns a temporary hdf5 file, open for writing. The temporary name is stored in the .path attribute. It is the user responsability to remove the file when closed. """ |
fh, path = tempfile.mkstemp(suffix='.hdf5')
os.close(fh)
self = cls(path, 'w')
self.path = path
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_vlen(self, key, data):
""" Save a sequence of variable-length arrays :param key: name of the dataset :param data: data to store as a list of arrays """ |
shape = (None,) + data[0].shape[:-1]
try:
dset = self[key]
except KeyError:
vdt = h5py.special_dtype(vlen=data[0].dtype)
dset = create(self, key, vdt, shape, fillvalue=None)
nbytes = dset.attrs.get('nbytes', 0)
totlen = dset.attrs.get('totlen', 0)
for i, val in enumerate(data):
nbytes += val.nbytes
totlen += len(val)
length = len(dset)
dset.resize((length + len(data),) + shape[1:])
for i, arr in enumerate(data):
dset[length + i] = arr
dset.attrs['nbytes'] = nbytes
dset.attrs['totlen'] = totlen |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_nbytes(self, key, nbytes=None):
""" Set the `nbytes` attribute on the HDF5 object identified by `key`. """ |
obj = super().__getitem__(key)
if nbytes is not None: # size set from outside
obj.attrs['nbytes'] = nbytes
else: # recursively determine the size of the datagroup
obj.attrs['nbytes'] = nbytes = ByteCounter.get_nbytes(obj)
return nbytes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_delta(self, stds, dists):
""" Computes the additional delta to be used for the computation of the upp and low models """ |
delta = np.maximum((0.1-0.001*dists.repi), np.zeros_like(dists.repi))
return delta |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
""" Returns only the mean values. See documentation for method `GroundShakingIntensityModel` in :class:~`openquake.hazardlib.gsim.base.GSIM` """ |
# distances
distsl = copy.copy(dists)
distsl.rjb, distsl.rrup = \
utils.get_equivalent_distances_east(rup.mag, dists.repi)
#
# Pezeshk et al. 2011 - Rrup
mean1, stds1 = super().get_mean_and_stddevs(sites, rup, distsl, imt,
stddev_types)
mean1 = self.apply_correction_to_BC(mean1, imt, distsl)
#
# Atkinson 2008 - Rjb
gmpe = Atkinson2008prime()
mean2, stds2 = gmpe.get_mean_and_stddevs(sites, rup, distsl, imt,
stddev_types)
#
# Silva et al. 2002 - Rjb
gmpe = SilvaEtAl2002SingleCornerSaturation()
mean4, stds4 = gmpe.get_mean_and_stddevs(sites, rup, distsl, imt,
stddev_types)
mean4 = self.apply_correction_to_BC(mean4, imt, distsl)
#
# Silva et al. 2002 - Rjb
gmpe = SilvaEtAl2002DoubleCornerSaturation()
mean5, stds5 = gmpe.get_mean_and_stddevs(sites, rup, distsl, imt,
stddev_types)
mean5 = self.apply_correction_to_BC(mean5, imt, distsl)
#
# distances
distsl.rjb, distsl.rrup = \
utils.get_equivalent_distances_east(rup.mag, dists.repi, ab06=True)
#
# Atkinson and Boore 2006 - Rrup
gmpe = AtkinsonBoore2006Modified2011()
mean3, stds3 = gmpe.get_mean_and_stddevs(sites, rup, distsl, imt,
stddev_types)
# Computing adjusted mean and stds
mean_adj = mean1*0.2 + mean2*0.2 + mean3*0.2 + mean4*0.2 + mean5*0.2
# Note that in this case we do not apply a triangular smoothing on
# distance as explained at page 996 of Atkinson and Adams (2013)
# for the calculation of the standard deviation
stds_adj = np.log(np.exp(stds1)*0.2 + np.exp(stds2)*0.2 +
np.exp(stds3)*0.2 + np.exp(stds4)*0.2 +
np.exp(stds5)*0.2)
#
return mean_adj, stds_adj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_checked(self, state):
""" Sets the Widget checked state. :param state: New check state. :type state: bool :return: Method success. :rtype: bool """ |
if not self.__checkable:
return False
if state:
self.__checked = True
self.setPixmap(self.__active_pixmap)
else:
self.__checked = False
self.setPixmap(self.__default_pixmap)
self.toggled.emit(state)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_menu(self, menu):
""" Sets the Widget menu. :param menu: Menu. :type menu: QMenu :return: Method success. :rtype: bool """ |
self.__menu = menu
if not self.parent():
return False
parent = [parent for parent in umbra.ui.common.parents_walker(self)].pop()
for action in self.__menu.actions():
not action.shortcut().isEmpty() and parent.addAction(action)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, path, default=None):
""" Returns given path value. :param path: Path name. :type path: unicode :param default: Default value if path is not found. :type default: object :return: Action. :rtype: QAction """ |
try:
return self.__getitem__(path)
except KeyError as error:
return default |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __watch_file_system(self):
""" Watches the file system for paths that have been changed or invalidated on disk. """ |
for path, data in self.__paths.items():
stored_modified_time, is_file = data
try:
if not foundations.common.path_exists(path):
LOGGER.warning(
"!> {0} | '{1}' path has been invalidated and will be unregistered!".format(
self.__class__.__name__, path))
del (self.__paths[path])
if is_file:
self.file_invalidated.emit(path)
else:
self.directory_invalidated.emit(path)
continue
except KeyError:
LOGGER.debug("> {0} | '{1}' path has been unregistered while iterating!".format(
self.__class__.__name__, path))
continue
try:
modified_time = self.get_path_modified_time(path)
except OSError:
LOGGER.debug("> {0} | '{1}' path has been invalidated while iterating!".format(
self.__class__.__name__, path))
continue
if stored_modified_time != modified_time:
self.__paths[path] = (modified_time, os.path.isfile(path))
LOGGER.debug("> {0} | '{1}' path has been changed!".format(self.__class__.__name__, path))
if is_file:
self.file_changed.emit(path)
else:
self.directory_changed.emit(path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register_path(self, path, modified_time=None):
""" Registers given path. :param path: Path name. :type path: unicode :param modified_time: Custom modified time. :type modified_time: int or float :return: Method success. :rtype: bool """ |
if not foundations.common.path_exists(path):
raise foundations.exceptions.PathExistsError("{0} | '{1}' path doesn't exists!".format(
self.__class__.__name__, path))
if path in self:
raise umbra.exceptions.PathRegistrationError("{0} | '{1}' path is already registered!".format(
self.__class__.__name__, path))
self.__paths[path] = (self.get_path_modified_time(
path) if modified_time is None else modified_time, os.path.isfile(path))
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unregister_path(self, path):
""" Unregisters given path. :param path: Path name. :type path: unicode :return: Method success. :rtype: bool """ |
if not path in self:
raise umbra.exceptions.PathExistsError("{0} | '{1}' path isn't registered!".format(
self.__class__.__name__, path))
del (self.__paths[path])
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_path_modified_time(path):
""" Returns given path modification time. :param path: Path. :type path: unicode :return: Modification time. :rtype: int """ |
return float(foundations.common.get_first_item(str(os.path.getmtime(path)).split("."))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _post_login_page(self):
"""Login to Janrain.""" |
# Prepare post data
data = {
"form": "signInForm",
"client_id": JANRAIN_CLIENT_ID,
"redirect_uri": "https://www.fido.ca/pages/#/",
"response_type": "token",
"locale": "en-US",
"userID": self.username,
"currentPassword": self.password,
}
# HTTP request
try:
raw_res = yield from self._session.post(LOGIN_URL,
headers=self._headers,
data=data,
timeout=self._timeout)
except OSError:
raise PyFidoError("Can not sign in")
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_token(self):
"""Get token from JanRain.""" |
# HTTP request
try:
raw_res = yield from self._session.get(TOKEN_URL,
headers=self._headers,
timeout=self._timeout)
except OSError:
raise PyFidoError("Can not get token")
# Research for json in answer
content = yield from raw_res.text()
reg_res = re.search(r"\({.*}\)", content)
if reg_res is None:
raise PyFidoError("Can not finf token json")
# Load data as json
return_data = json.loads(reg_res.group()[1:-1])
# Get token and uuid
token = return_data.get('result', {}).get('accessToken')
uuid = return_data.get('result', {}).get('userData', {}).get('uuid')
# Check values
if token is None or uuid is None:
raise PyFidoError("Can not get token or uuid")
return token, uuid |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_account_number(self, token, uuid):
"""Get fido account number.""" |
# Data
data = {"accessToken": token,
"uuid": uuid}
# Http request
try:
raw_res = yield from self._session.post(ACCOUNT_URL,
data=data,
headers=self._headers,
timeout=self._timeout)
except OSError:
raise PyFidoError("Can not get account number")
# Load answer as json
try:
json_content = yield from raw_res.json()
account_number = json_content\
.get('getCustomerAccounts', {})\
.get('accounts', [{}])[0]\
.get('accountNumber')
except (OSError, ValueError):
raise PyFidoError("Bad json getting account number")
# Check collected data
if account_number is None:
raise PyFidoError("Can not get account number")
return account_number |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_balance(self, account_number):
"""Get current balance from Fido.""" |
# Prepare data
data = {"ctn": self.username,
"language": "en-US",
"accountNumber": account_number}
# Http request
try:
raw_res = yield from self._session.post(BALANCE_URL,
data=data,
headers=self._headers,
timeout=self._timeout)
except OSError:
raise PyFidoError("Can not get balance")
# Get balance
try:
json_content = yield from raw_res.json()
balance_str = json_content\
.get("getAccountInfo", {})\
.get("balance")
except (OSError, ValueError):
raise PyFidoError("Can not get balance as json")
if balance_str is None:
raise PyFidoError("Can not get balance")
# Casting to float
try:
balance = float(balance_str)
except ValueError:
raise PyFidoError("Can not get balance as float")
return balance |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_fido_dollar(self, account_number, number):
"""Get current Fido dollar balance.""" |
# Prepare data
data = json.dumps({"fidoDollarBalanceFormList":
[{"phoneNumber": number,
"accountNumber": account_number}]})
# Prepare headers
headers_json = self._headers.copy()
headers_json["Content-Type"] = "application/json;charset=UTF-8"
# Http request
try:
raw_res = yield from self._session.post(FIDO_DOLLAR_URL,
data=data,
headers=headers_json,
timeout=self._timeout)
except OSError:
raise PyFidoError("Can not get fido dollar")
# Get fido dollar
try:
json_content = yield from raw_res.json()
fido_dollar_str = json_content\
.get("fidoDollarBalanceInfoList", [{}])[0]\
.get("fidoDollarBalance")
except (OSError, ValueError):
raise PyFidoError("Can not get fido dollar as json")
if fido_dollar_str is None:
raise PyFidoError("Can not get fido dollar")
# Casting to float
try:
fido_dollar = float(fido_dollar_str)
except ValueError:
raise PyFidoError("Can not get fido dollar")
return fido_dollar |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_usage(self, account_number, number):
"""Get Fido usage. Get the following data - talk - text - data Roaming data is not supported yet """ |
# Prepare data
data = {"ctn": number,
"language": "en-US",
"accountNumber": account_number}
# Http request
try:
raw_res = yield from self._session.post(USAGE_URL,
data=data,
headers=self._headers,
timeout=self._timeout)
except OSError:
raise PyFidoError("Can not get usage")
# Load answer as json
try:
output = yield from raw_res.json()
except (OSError, ValueError):
raise PyFidoError("Can not get usage as json")
# Format data
ret_data = {}
for data_name, keys in DATA_MAP.items():
key, subkey = keys
for data in output.get(key)[0].get('wirelessUsageSummaryInfoList'):
if data.get('usageSummaryType') == subkey:
# Prepare keys:
used_key = "{}_used".format(data_name)
remaining_key = "{}_remaining".format(data_name)
limit_key = "{}_limit".format(data_name)
# Get values
ret_data[used_key] = data.get('used', 0.0)
if data.get('remaining') >= 0:
ret_data[remaining_key] = data.get('remaining')
else:
ret_data[remaining_key] = None
if data.get('total') >= 0:
ret_data[limit_key] = data.get('total')
else:
ret_data[limit_key] = None
return ret_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch_data(self):
"""Fetch the latest data from Fido.""" |
# Get http session
yield from self._get_httpsession()
# Post login page
yield from self._post_login_page()
# Get token
token_uuid = yield from self._get_token()
# Get account number
account_number = yield from self._get_account_number(*token_uuid)
# List phone numbers
self._phone_numbers = yield from self._list_phone_numbers(account_number)
# Get balance
balance = yield from self._get_balance(account_number)
self._data['balance'] = balance
# Get fido dollar
for number in self._phone_numbers:
fido_dollar = yield from self._get_fido_dollar(account_number,
number)
self._data[number]= {'fido_dollar': fido_dollar}
# Get usage
for number in self._phone_numbers:
usage = yield from self._get_usage(account_number, number)
self._data[number].update(usage) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def execute( command, abort=True, capture=False, verbose=False, echo=False, stream=None, ):
"""Run a command locally. Arguments: command: a command to execute. abort: If True, a non-zero return code will trigger an exception. capture: If True, returns the output of the command. If False, returns a subprocess result. echo: if True, prints the command before executing it. verbose: If True, prints the output of the command. stream: If set, stdout/stderr will be redirected to the given stream. Ignored if `capture` is True. """ |
stream = stream or sys.stdout
if echo:
out = stream
out.write(u'$ %s' % command)
# Capture stdout and stderr in the same stream
command = u'%s 2>&1' % command
if verbose:
out = stream
err = stream
else:
out = subprocess.PIPE
err = subprocess.PIPE
process = subprocess.Popen(
command,
shell=True,
stdout=out,
stderr=err,
)
# propagate SIGTERM to all child processes within
# the process group. this prevents subprocesses from
# being orphaned when the current process is terminated
signal.signal(
signal.SIGTERM,
make_terminate_handler(process)
)
# Wait for the process to complete
stdout, _ = process.communicate()
stdout = stdout.strip() if stdout else ''
if not isinstance(stdout, unicode):
stdout = stdout.decode('utf-8')
if abort and process.returncode != 0:
message = (
u'Error #%d running "%s"%s' % (
process.returncode,
command,
':\n====================\n'
'%s\n'
'====================\n' % (
stdout
) if stdout else ''
)
)
raise Exception(message)
if capture:
return stdout
else:
return process |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_message(base):
""" Given a MailBase, this will construct a MIME part that is canonicalized for use with the Python email API. """ |
ctype, ctparams = base.get_content_type()
if not ctype:
if base.parts:
ctype = 'multipart/mixed'
else:
ctype = 'text/plain'
maintype, subtype = ctype.split('/')
is_text = maintype == 'text'
is_multipart = maintype == 'multipart'
if base.parts and not is_multipart:
raise RuntimeError(
'Content type should be multipart, not %r' % ctype
)
body = base.get_body()
ctenc = base.get_transfer_encoding()
charset = ctparams.get('charset')
if is_multipart:
out = MIMEMultipart(subtype, **ctparams)
else:
out = MIMENonMultipart(maintype, subtype, **ctparams)
if ctenc:
out['Content-Transfer-Encoding'] = ctenc
if isinstance(body, text_type):
if not charset:
if is_text:
charset, _ = best_charset(body)
else:
charset = 'utf-8'
if PY2:
body = body.encode(charset)
else: # pragma: no cover
body = body.encode(charset, 'surrogateescape')
if body is not None:
if ctenc:
body = transfer_encode(ctenc, body)
if not PY2: # pragma: no cover
body = body.decode(charset or 'ascii', 'replace')
out.set_payload(body, charset)
for k in base.keys(): # returned sorted
value = base[k]
if not value:
continue
out[k] = value
cdisp, cdisp_params = base.get_content_disposition()
if cdisp:
out.add_header('Content-Disposition', cdisp, **cdisp_params)
# go through the children
for part in base.parts:
sub = to_message(part)
out.attach(sub)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_message(self):
""" Returns raw email.Message instance. Validates message first. """ |
self.validate()
bodies = [(self.body, 'text/plain'), (self.html, 'text/html')]
for idx, (val, content_type) in enumerate(bodies):
if val is None:
bodies[idx] = None
elif isinstance(val, Attachment):
bodies[idx] = val.to_mailbase(content_type)
else:
# presumed to be a textual val
attachment = Attachment(
data=val,
content_type=content_type,
transfer_encoding='quoted-printable',
disposition='inline'
)
bodies[idx] = attachment.to_mailbase(content_type)
body, html = bodies
base = MailBase([
('To', ', '.join(self.recipients)),
('From', self.sender),
('Subject', self.subject),
])
# base represents the outermost mime part; it will be one of the
# following types:
#
# - a multipart/mixed type if there are attachments. this
# part will contain a single multipart/alternative type if there
# is both an html part and a plaintext part (the alternative part
# will contain both the text and html), it will contain
# a single text/plain part if there is only a plaintext part,
# or it will contain a single text/html part if there is only
# an html part. it will also contain N parts representing
# each attachment as children of the base mixed type.
#
# - a multipart/alternative type if there are no attachments but
# both an html part and a plaintext part. it will contain
# a single text/plain part if there is only a plaintext part,
# or it will contain a single text/html part if there is only
# an html part.
#
# - a text/plain type if there is only a plaintext part
#
# - a text/html type if there is only an html part
if self.cc:
base['Cc'] = ', '.join(self.cc)
if self.extra_headers:
base.update(dict(self.extra_headers))
if self.attachments:
base.set_content_type('multipart/mixed')
altpart = MailBase()
base.attach_part(altpart)
else:
altpart = base
if body and html:
altpart.set_content_type('multipart/alternative')
altpart.set_body(None)
# Per RFC2046, HTML part comes last in multipart/alternative
altpart.attach_part(body)
altpart.attach_part(html)
elif body is not None:
altpart.merge_part(body)
elif html is not None:
altpart.merge_part(html)
for attachment in self.attachments:
attachment_mailbase = attachment.to_mailbase()
base.attach_part(attachment_mailbase)
return to_message(base) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_bad_headers(self):
""" Checks for bad headers i.e. newlines in subject, sender or recipients. """ |
headers = [self.subject, self.sender]
headers += list(self.send_to)
headers += dict(self.extra_headers).values()
for val in headers:
for c in '\r\n':
if c in val:
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(self):
""" Checks if message is valid and raises appropriate exception. """ |
if not (self.recipients or self.cc or self.bcc):
raise InvalidMessage("No recipients have been added")
if not self.body and not self.html:
raise InvalidMessage("No body has been set")
if not self.sender:
raise InvalidMessage("No sender address has been set")
if self.is_bad_headers():
raise BadHeaders |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_settings(cls, settings, prefix='mail.'):
"""Create a new instance of 'DebugMailer' from settings dict. :param settings: a settings dict-like :param prefix: prefix separating 'tgext.mailer' settings """ |
settings = settings or {}
top_level_directory = settings.get(prefix+'top_level_directory')
if top_level_directory is None:
raise ValueError("DebugMailer: must specify "
"'%stop_level_directory'" % prefix)
return cls(top_level_directory) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _send(self, message, fail_silently=False):
"""Save message to a file for debugging """ |
seeds = '1234567890qwertyuiopasdfghjklzxcvbnm'
file_part1 = datetime.now().strftime('%Y%m%d%H%M%S')
file_part2 = ''.join(sample(seeds, 4))
filename = join(self.tld, '%s_%s.msg' % (file_part1, file_part2))
with open(filename, 'w') as fd:
fd.write(str(message.to_message())) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_settings(cls, settings, prefix='mail.'):
"""Create a new instance of 'Mailer' from settings dict. :param settings: a settings dict-like :param prefix: prefix separating 'tgext.mailer' settings """ |
settings = settings or {}
kwarg_names = [prefix + k for k in (
'host', 'port', 'username',
'password', 'tls', 'ssl', 'keyfile',
'certfile', 'queue_path', 'debug', 'default_sender')]
size = len(prefix)
kwargs = dict(((k[size:], settings[k]) for k in settings.keys() if
k in kwarg_names))
for key in ('tls', 'ssl'):
val = kwargs.get(key)
if val:
kwargs[key] = asbool(val)
return cls(**kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send_immediately(self, message, fail_silently=False):
"""Send a message immediately, outside the transaction manager. If there is a connection error to the mail server this will have to be handled manually. However if you pass ``fail_silently`` the error will be swallowed. :versionadded: 0.3 :param message: a 'Message' instance. :param fail_silently: silently handle connection errors. """ |
try:
return self.smtp_mailer.send(*self._message_args(message))
except smtplib.socket.error:
if not fail_silently:
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send_to_queue(self, message):
"""Add a message to a maildir queue. In order to handle this, the setting 'mail.queue_path' must be provided and must point to a valid maildir. :param message: a 'Message' instance. """ |
if not self.queue_delivery:
raise RuntimeError("No queue_path provided")
return self.queue_delivery.send(*self._message_args(message)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_seq(obj):
""" Check if an object is a sequence. """ |
return (not is_str(obj) and not is_dict(obj) and
(hasattr(obj, "__getitem__") or hasattr(obj, "__iter__"))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(dst, src):
""" Recursively update values in dst from src. Unlike the builtin dict.update() function, this method will decend into nested dicts, updating all nested values. Arguments: dst (dict):
Destination dict. src (dict):
Source dict. Returns: dict: dst updated with entries from src. """ |
for k, v in src.items():
if isinstance(v, Mapping):
r = update(dst.get(k, {}), v)
dst[k] = r
else:
dst[k] = src[k]
return dst |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dict_values(src):
""" Recursively get values in dict. Unlike the builtin dict.values() function, this method will descend into nested dicts, returning all nested values. Arguments: src (dict):
Source dict. Returns: list: List of values. """ |
for v in src.values():
if isinstance(v, dict):
for v in dict_values(v):
yield v
else:
yield v |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_path_package(thepath):
""" Takes a file system path and returns the module object of the python package the said path belongs to. If the said path can not be determined, it returns None. """ |
pname = find_path_package_name(thepath)
if not pname:
return None
fromlist = b'' if six.PY2 else ''
return __import__(pname, globals(), locals(), [fromlist]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_path_package_name(thepath):
""" Takes a file system path and returns the name of the python package the said path belongs to. If the said path can not be determined, it returns None. """ |
module_found = False
last_module_found = None
continue_ = True
while continue_:
module_found = is_path_python_module(thepath)
next_path = path.dirname(thepath)
if next_path == thepath:
continue_ = False
if module_found:
init_names = ['__init__%s' % suffix.lower() for suffix in _py_suffixes]
if path.basename(thepath).lower() in init_names:
last_module_found = path.basename(path.dirname(thepath))
else:
last_module_found = path.basename(thepath)
if last_module_found and not module_found:
continue_ = False
thepath = next_path
return last_module_found |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_path_python_module(thepath):
""" Given a path, find out of the path is a python module or is inside a python module. """ |
thepath = path.normpath(thepath)
if path.isfile(thepath):
base, ext = path.splitext(thepath)
if ext in _py_suffixes:
return True
return False
if path.isdir(thepath):
for suffix in _py_suffixes:
if path.isfile(path.join(thepath, '__init__%s' % suffix)):
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_files(root, pattern):
"""Find all files matching the glob pattern recursively :param root: string :param pattern: string :return: list of file paths relative to root """ |
results = []
for base, dirs, files in os.walk(root):
matched = fnmatch.filter(files, pattern)
results.extend(os.path.join(base, f) for f in matched)
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_directories(root, pattern):
"""Find all directories matching the glob pattern recursively :param root: string :param pattern: string :return: list of dir paths relative to root """ |
results = []
for base, dirs, files in os.walk(root):
matched = fnmatch.filter(dirs, pattern)
results.extend(os.path.join(base, d) for d in matched)
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def posargs_limiter(func, *args):
""" takes a function a positional arguments and sends only the number of positional arguments the function is expecting """ |
posargs = inspect.getargspec(func)[0]
length = len(posargs)
if inspect.ismethod(func):
length -= 1
if length == 0:
return func()
return func(*args[0:length]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compose(*functions):
"""Function composition on a series of functions. Remember that function composition runs right-to-left: `f . g . h = f(g(h(x)))`. As a unix pipeline, it would be written: `h | g | f`. From https://mathieularose.com/function-composition-in-python/. """ |
return functools.reduce(lambda f, g: lambda x: f(g(x)), functions, identity) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def first_where(pred, iterable, default=None):
"""Returns the first element in an iterable that meets the given predicate. :param default: is the default value to use if the predicate matches none of the elements. """ |
return next(six.moves.filter(pred, iterable), default) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def partition_iter(pred, iterable):
"""Partitions an iterable with a predicate into two iterables, one with elements satisfying the predicate and one with elements that do not satisfy it. :returns: a tuple (satisfiers, unsatisfiers). """ |
left, right = itertools.tee(iterable, 2)
return (
(x for x in left if pred(x)),
(y for y in right if not pred(y))
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def partition_list(pred, iterable):
"""Partitions an iterable with a predicate into two lists, one with elements satisfying the predicate and one with elements that do not satisfy it. .. note: this just converts the results of partition_iter to a list for you so that you don't have to in most cases using `partition_iter` is a better option. :returns: a tuple (satisfiers, unsatisfiers). """ |
left, right = partition_iter(pred, iterable)
return list(left), list(right) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def split_every(n, iterable):
"""Returns a generator that spits an iteratable into n-sized chunks. The last chunk may have less than n elements. See http://stackoverflow.com/a/22919323/503377.""" |
items = iter(iterable)
return itertools.takewhile(bool, (list(itertools.islice(items, n)) for _ in itertools.count())) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unique(iterable, key=identity):
"""Yields all the unique values in an iterable maintaining order""" |
seen = set()
for item in iterable:
item_key = key(item)
if item_key not in seen:
seen.add(item_key)
yield item |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iteritems(self):
""" Sort and then iterate the dictionary """ |
sorted_data = sorted(self.data.iteritems(), self.cmp, self.key,
self.reverse)
for k,v in sorted_data:
yield k,v |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _adapt_param(self, key, val):
""" Adapt the value if an adapter is defined. """ |
if key in self.param_adapters:
try:
return self.param_adapters[key](val)
except (AdaptError, AdaptErrors, TypeError, ValueError) as e:
if hasattr(e, 'errors'):
errors = e.errors
else:
errors = [e]
raise AnticipateParamError(
message='Input value %r for parameter `%s` does not match '
'anticipated type %r' % (type(val), key, self.params[key]),
name=key,
value=val,
anticipated=self.params[key],
errors=errors)
else:
return val |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def input(self, *args, **kwargs):
""" Adapt the input and check for errors. Returns a tuple of adapted (args, kwargs) or raises AnticipateErrors """ |
errors = []
if args and self.arg_names:
args = list(args)
# Replace args inline that have adapters
for i, (key, val) in enumerate(izip(self.arg_names, args)):
try:
args[i] = self._adapt_param(key, val)
except AnticipateParamError as e:
errors.append(e)
args = tuple(args)
if kwargs and self.params:
# Adapt all adaptable arguments
for key, val in kwargs.items():
try:
kwargs[key] = self._adapt_param(key, val)
except AnticipateParamError as e:
errors.append(e)
if errors:
raise AnticipateErrors(
message='Invalid input for %s' % self.func,
errors=errors)
return args, kwargs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def output(self, result):
""" Adapts the result of a function based on the returns definition. """ |
if self.returns:
errors = None
try:
return self._adapt_result(result)
except AdaptErrors as e:
errors = e.errors
except AdaptError as e:
errors = [e]
raise AnticipateErrors(
message='Return value %r does not match anticipated type %r'
% (type(result), self.returns),
errors=errors)
elif self.strict:
if result is not None:
raise AnticipateErrors(
message='Return value %r does not match anticipated value '
'of None' % type(result),
errors=None)
return None
else:
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean(self, value):
"""Passes the value to FileField and resizes the image at the path the parent returns if needed. """ |
path = super(Image, self).clean(value)
if path and self.size:
self.resize_image(join(self.base_path, path))
return path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calculate_dimensions(image_size, desired_size):
"""Return the Tuple with the arguments to pass to Image.crop. If the image is smaller than than the desired_size Don't do anything. Otherwise, first calculate the (truncated) center and then take half the width and height (truncated again) for x and y. x0, y0: the center coordinates """ |
current_x, current_y = image_size
target_x, target_y = desired_size
if current_x < target_x and current_y < target_y:
return None
if current_x > target_x:
new_x0 = floor(current_x / 2)
new_x = new_x0 - ceil(target_x / 2)
new_width = target_x
else:
new_x = 0
new_width = current_x
if current_y > target_y:
new_y0 = floor(current_y / 2)
new_y = new_y0 - ceil(target_y / 2)
new_height = target_y
else:
new_y = 0
new_height = current_y
return (int(new_x), int(new_y), new_width, new_height) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def options(self, *args, **kwargs):
"""Default OPTIONS response If the 'cors' option is True, will respond with an empty response and set the 'Access-Control-Allow-Headers' and 'Access-Control-Allow-Methods' headers """ |
if getattr(options, 'cors', False):
self.set_header('Access-Control-Allow-Headers',
'Content-Type, Authorization, '
'Accept, X-Requested-With')
self.set_header('Access-Control-Allow-Methods',
'OPTIONS, TRACE, GET, HEAD, POST, '
'PUT, PATCH, DELETE')
self.finish() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_error(self, status_code, **kwargs):
"""Override `write_error` in order to output JSON errors :param status_code: the response's status code, e.g. 500 """ |
http_error = _get_http_error(kwargs)
if http_error:
self.finish(self._error_template(status_code,
http_error.errors,
http_error.source))
else:
source = kwargs.get('source', getattr(options, 'name', None))
# Slightly annoyed that have to rely on the internal self._reason
# to deal with unhandled exceptions. On the dev version of
# tornado self._reason is always set, while in the current version
# a reason kwarg is passed down from `send_error` but not set
# on the instance.
reason = kwargs.get('reason', self._reason)
self.finish(self._error_template(status_code, reason, source)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _error_template(cls, status_code, errors, source=None):
"""Construct JSON error response :param status_code: the http status code :param errors: string or list of error strings :param source: source of the error :returns: dictionary, e.g. { 'status': 400, 'errors': [ { 'source': 'accounts' , 'message':'errormsg1' }, { 'source': 'accounts', 'message':'errormsg2' } ] } """ |
# this handles unhandled exceptions
if isinstance(errors, basestring):
errors_out = {'errors': [{'message': errors}]}
elif isinstance(errors, (list, tuple)):
errors_out = {'errors': [{'message': e} for e in errors]}
else:
errors_out = errors
errors_out['status'] = status_code
for error in errors_out['errors']:
if not error.get('source'):
error['source'] = source
logging.error(json.dumps(errors_out))
return errors_out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_json_body(self, required=None, validators=None):
"""Get JSON from the request body :param required: optionally provide a list of keys that should be in the JSON body (raises a 400 HTTPError if any are missing) :param validator: optionally provide a dictionary of items that should be in the body with a method that validates the item. The method must be synchronous and return a boolean, no exceptions. :raises: HTTPError """ |
content_type = self.request.headers.get('Content-Type',
'application/json')
if 'application/json' not in content_type.split(';'):
raise HTTPError(415, 'Content-Type should be application/json')
if not self.request.body:
error = 'Request body is empty'
logging.warning(error)
raise HTTPError(400, error)
try:
body = json.loads(self.request.body)
except (ValueError, TypeError):
error = 'Error parsing JSON'
logging.warning(error)
raise HTTPError(400, error)
if required:
_check_required(body, required)
if validators:
_validate(body, validators)
return body |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def verify_token(self, token, requested_access):
""" Check the token bearer is permitted to access the resource :param token: Access token :param requested_access: the access level the client has requested :returns: boolean """ |
client = API(options.url_auth,
auth_username=options.service_id,
auth_password=options.client_secret,
ssl_options=ssl_server_options())
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'}
body = urllib.urlencode({'token': token, 'requested_access': requested_access})
client.auth.verify.prepare_request(headers=headers, request_timeout=180)
try:
result = yield client.auth.verify.post(body=body)
except tornado.httpclient.HTTPError as ex:
# Must be converted to a tornado.web.HTTPError for the server
# to handle it correctly
logging.exception(ex.message)
raise HTTPError(500, 'Internal Server Error')
raise Return(result['has_access']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prepare(self):
"""If OAuth verification is required, validate provided token :raise: HTTPError if token does not have access """ |
requested_access = self.endpoint_access(self.request.method)
use_oauth = getattr(options, 'use_oauth', None)
if use_oauth and requested_access is not self.UNAUTHENTICATED_ACCESS:
token = self.request.headers.get('Authorization', '').split(' ')[-1]
if token:
has_access = yield self.verify_token(token, requested_access)
if not has_access:
msg = "'{}' access not granted.".format(requested_access)
raise HTTPError(403, msg)
else:
msg = 'OAuth token not provided'
raise HTTPError(401, msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_fast(xmlfile, element_name, attrnames, warn=False, optional=False):
""" Parses the given attrnames from all elements with element_name @Note: The element must be on its own line and the attributes must appear in the given order. @Example: parse_fast('plain.edg.xml', 'edge', ['id', 'speed']) """ |
prefixedAttrnames = [_prefix_keyword(a, warn) for a in attrnames]
if optional:
pattern = ''.join(['<%s' % element_name] +
['(\\s+%s="(?P<%s>[^"]*?)")?' % a for a in zip(attrnames, prefixedAttrnames)])
else:
pattern = '.*'.join(['<%s' % element_name] +
['%s="([^"]*)"' % attr for attr in attrnames])
Record = namedtuple(element_name, prefixedAttrnames)
reprog = re.compile(pattern)
for line in open(xmlfile):
m = reprog.search(line)
if m:
if optional:
yield Record(**m.groupdict())
else:
yield Record(*m.groups()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tracing(pattern=None, out=None):
"""Print executed lines to stdout.""" |
_trace = partial(trace_line, pattern)
if out is None:
out = sys.stdout
with redirect_stdout(out):
sys.settrace(_trace)
try:
yield
finally:
sys.settrace(None) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def override_params(opening_char='{', closing_char='}', separator_char='|'):
""" Override some character settings @type opening_char: str @param opening_char: Opening character. Default: '{' @type closing_char: str @param closing_char: Closing character. Default: '}' @type separator_char: str @param separator_char: Separator char. Default: '|' """ |
global char_separator, char_opening, char_closing
char_separator = separator_char
char_opening = opening_char
char_closing = closing_char |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unique(text):
""" Return an unique text @type text: str @param text: Text written used spin syntax. @return: An unique text # Generate an unique sentence 'The quick red fox jumped over the lazy dog' """ |
# check if the text is correct
correct, error = _is_correct(text)
if not correct:
raise Exception(error)
s = []
_all_unique_texts(text, s)
return s[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _all_unique_texts(text, final):
""" Compute all the possible unique texts @type text: str @param text: Text written used spin syntax @type final: list @param final: An empty list where all the unique texts will be stored @return: Nothing. The result will be in the 'final' list """ |
if not char_opening in text:
if not text in final:
final.append(text)
return
stack = []
indexes = []
for i, c in enumerate(text):
if c == char_closing:
if stack[-1] == char_opening:
start_index = indexes.pop()
substring = '' if i == start_index + 1 else text[start_index:i + 1]
# get some random combination
combination = next(_choices(substring))
new_text = text.replace(substring, combination)
_all_unique_texts(new_text, final)
return
elif c == char_opening:
stack.append(c)
indexes.append(i) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _is_correct(text):
""" Check if the specified text has a correct spin syntax @type text: str @param text: Text written used spin syntax @rtype: tuple @return: A tuple: (is_correct, error). First position contains the result, and second one the error if not correct. """ |
error = ''
stack = []
for i, c in enumerate(text):
if c == char_opening:
stack.append(c)
elif c == char_closing:
if stack.count == 0:
error = 'Syntax incorrect. Found "}" before "{"'
break
last_char = stack.pop()
if last_char != char_opening:
error = 'Syntax incorrect. Found "}" before "{"'
break
if len(stack) > 0:
error = 'Syntax incorrect. Some "{" were not closed'
return not error, error |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def visit_FunctionBody(self, node):
"""Visitor for `FunctionBody` AST node.""" |
for child in node.children:
return_value = self.visit(child)
if isinstance(child, ReturnStatement):
return return_value
if isinstance(child, (IfStatement, WhileStatement)):
if return_value is not None:
return return_value
return NoneType() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def visit_WhileStatement(self, node):
"""Visitor for `WhileStatement` AST node.""" |
while self.visit(node.condition):
result = self.visit(node.compound)
if result is not None:
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.