text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot(what, calc_id=-1, other_id=None, webapi=False): """ Generic plotter for local and remote calculations. """
if '?' not in what: raise SystemExit('Missing ? in %r' % what) prefix, rest = what.split('?', 1) assert prefix in 'source_geom hcurves hmaps uhs', prefix if prefix in 'hcurves hmaps' and 'imt=' not in rest: raise SystemExit('Missing imt= in %r' % what) elif prefix == 'uhs' and 'imt=' in rest: raise SystemExit('Invalid IMT in %r' % what) elif prefix in 'hcurves uhs' and 'site_id=' not in rest: what += '&site_id=0' if webapi: xs = [WebExtractor(calc_id)] if other_id: xs.append(WebExtractor(other_id)) else: xs = [Extractor(calc_id)] if other_id: xs.append(Extractor(other_id)) make_figure = globals()['make_figure_' + prefix] plt = make_figure(xs, what) plt.show()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_mean_and_stddevs(self, sctx, rctx, dctx, imt, stddev_types): """ Call the get mean and stddevs of the GMPE for the respective IMT """
return self.kwargs[str(imt)].get_mean_and_stddevs( sctx, rctx, dctx, imt, stddev_types)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_ac(calc_id): """ Aggregate loss curves plotter. """
# read the hazard data dstore = util.read(calc_id) agg_curve = dstore['agg_curve-rlzs'] plt = make_figure(agg_curve) plt.show()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_stddevs(self, C, distance, stddev_types): """ Returns the total standard deviation, which is a function of distance """
stddevs = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: sigma = C["s1"] + (C["s2"] / (1.0 + ((distance / C["s3"]) ** 2.))) stddevs.append(sigma + np.zeros_like(distance)) return stddevs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_mean(self, C, mag, rrup): """ Compute mean value according to equation 18, page 32. """
# see table 3, page 14 R1 = 90. R2 = 150. # see equation 19, page 32 m_ref = mag - 4 r1 = R1 + C['c8'] * m_ref r2 = R2 + C['c11'] * m_ref assert r1 > 0 assert r2 > 0 g0 = np.log10( np.sqrt(np.minimum(rrup, r1) ** 2 + (1 + C['c5'] * m_ref) ** 2) ) g1 = np.maximum(np.log10(rrup / r1), 0) g2 = np.maximum(np.log10(rrup / r2), 0) mean = (C['c0'] + C['c1'] * m_ref + C['c2'] * m_ref ** 2 + (C['c3'] + C['c4'] * m_ref) * g0 + (C['c6'] + C['c7'] * m_ref) * g1 + (C['c9'] + C['c10'] * m_ref) * g2) # convert from log10 to ln and units from cm/s2 to g mean = np.log((10 ** mean) * 1e-2 / g) return mean
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract(what, calc_id, webapi=True): """ Extract an output from the datastore and save it into an .hdf5 file. By default uses the WebAPI, otherwise the extraction is done locally. """
with performance.Monitor('extract', measuremem=True) as mon: if webapi: obj = WebExtractor(calc_id).get(what) else: obj = Extractor(calc_id).get(what) fname = '%s_%d.hdf5' % (what.replace('/', '-').replace('?', '-'), calc_id) obj.save(fname) print('Saved', fname) if mon.duration > 1: print(mon)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_regionalisation(self, strain_model): ''' Gets the tectonic region type for every element inside the strain model :paramm strain_model: Input strain model as instance of openquake.hmtk.strain.geodetic_strain.GeodeticStrain :returns: Strain model with complete regionalisation ''' self.strain = strain_model self.strain.data['region'] = np.array( ['IPL' for _ in range(self.strain.get_number_observations())], dtype='|S13') self.strain.data['area'] = np.array( [np.nan for _ in range(self.strain.get_number_observations())]) regional_model = self.define_kreemer_regionalisation() for polygon in regional_model: self._point_in_tectonic_region(polygon) return self.strain
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def define_kreemer_regionalisation(self, north=90., south=-90., east=180., west=-180.): ''' Applies the regionalisation defined according to the regionalisation typology of Corne Kreemer ''' '''Applies the regionalisation of Kreemer (2003) :param input_file: Filename (str) of input file contraining Kreemer regionalisation :param north: Northern limit (decimal degrees)for consideration (float) :param south: Southern limit (decimal degrees)for consideration (float) :param east: Eastern limit (decimal degrees)for consideration (float) :param west: Western limit (decimal degrees)for consideration (float) :returns: List of polygons corresonding to the Kreemer cells. ''' input_data = getlines(self.filename) kreemer_polygons = [] for line_loc, line in enumerate(input_data): if '>' in line[0]: polygon_dict = {} # Get region type (char) and area (m ^ 2) from header primary_data = line[2:].rstrip('\n') primary_data = primary_data.split(' ', 1) polygon_dict['region_type'] = primary_data[0].strip(' ') polygon_dict['area'] = float(primary_data[1].strip(' ')) polygon_dict['cell'] = _build_kreemer_cell(input_data, line_loc) polygon_dict['long_lims'] = np.array([ np.min(polygon_dict['cell'][:, 0]), np.max(polygon_dict['cell'][:, 0])]) polygon_dict['lat_lims'] = np.array([ np.min(polygon_dict['cell'][:, 1]), np.max(polygon_dict['cell'][:, 1])]) polygon_dict['cell'] = None if polygon_dict['long_lims'][0] >= 180.0: polygon_dict['long_lims'] = \ polygon_dict['long_lims'] - 360.0 valid_check = [ polygon_dict['long_lims'][0] >= west, polygon_dict['long_lims'][1] <= east, polygon_dict['lat_lims'][0] >= south, polygon_dict['lat_lims'][1] <= north] if all(valid_check): kreemer_polygons.append(polygon_dict) return kreemer_polygons
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def urlextract(url, fname): """ Download and unzip an archive and extract the underlying fname """
with urlopen(url) as f: data = io.BytesIO(f.read()) with zipfile.ZipFile(data) as z: try: return z.open(fname) except KeyError: # for instance the ShakeMap ci3031111 has inside a file # data/verified_atlas2.0/reviewed/19920628115739/output/ # uncertainty.xml # instead of just uncertainty.xml zinfo = z.filelist[0] if zinfo.filename.endswith(fname): return z.open(zinfo) else: raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def amplify_gmfs(imts, vs30s, gmfs): """ Amplify the ground shaking depending on the vs30s """
n = len(vs30s) out = [amplify_ground_shaking(im.period, vs30s[i], gmfs[m * n + i]) for m, im in enumerate(imts) for i in range(n)] return numpy.array(out)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cholesky(spatial_cov, cross_corr): """ Decompose the spatial covariance and cross correlation matrices. :param spatial_cov: array of shape (M, N, N) :param cross_corr: array of shape (M, M) :returns: a triangular matrix of shape (M * N, M * N) """
M, N = spatial_cov.shape[:2] L = numpy.array([numpy.linalg.cholesky(spatial_cov[i]) for i in range(M)]) LLT = [] for i in range(M): row = [numpy.dot(L[i], L[j].T) * cross_corr[i, j] for j in range(M)] for j in range(N): singlerow = numpy.zeros(M * N) for i in range(M): singlerow[i * N:(i + 1) * N] = row[i][j] LLT.append(singlerow) return numpy.linalg.cholesky(numpy.array(LLT))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_header(dtype): """ Convert a numpy nested dtype into a list of strings suitable as header of csv file. ['PGA:3', 'PGV:4'] ['A~PGA:3', 'A~PGV:4', 'B~PGA:3', 'B~PGV:4', 'idx:uint32'] """
header = _build_header(dtype, ()) h = [] for col in header: name = '~'.join(col[:-2]) numpytype = col[-2] shape = col[-1] coldescr = name if numpytype != 'float32' and not numpytype.startswith('|S'): coldescr += ':' + numpytype if shape: coldescr += ':' + ':'.join(map(str, shape)) h.append(coldescr) return h
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_composite_array(fname, sep=','): r""" Convert a CSV file with header into an ArrayWrapper object. [([0.1, 0.2, 0.3], [0.4, 0.5], [0.6])] """
with open(fname) as f: header = next(f) if header.startswith('#'): # the first line is a comment, skip it attrs = dict(parse_comment(header[1:])) header = next(f) else: attrs = {} transheader = htranslator.read(header.split(sep)) fields, dtype = parse_header(transheader) ts_pairs = [] # [(type, shape), ...] for name in fields: dt = dtype.fields[name][0] ts_pairs.append((dt.subdtype[0].type if dt.subdtype else dt.type, dt.shape)) col_ids = list(range(1, len(ts_pairs) + 1)) num_columns = len(col_ids) records = [] col, col_id = '', 0 for i, line in enumerate(f, 2): row = line.split(sep) if len(row) != num_columns: raise InvalidFile( 'expected %d columns, found %d in file %s, line %d' % (num_columns, len(row), fname, i)) try: record = [] for (ntype, shape), col, col_id in zip(ts_pairs, row, col_ids): record.append(_cast(col, ntype, shape, i, fname)) records.append(tuple(record)) except Exception as e: raise InvalidFile( 'Could not cast %r in file %s, line %d, column %d ' 'using %s: %s' % (col, fname, i, col_id, (ntype.__name__,) + shape, e)) return ArrayWrapper(numpy.array(records, dtype), attrs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_array(fname, sep=','): r""" Convert a CSV file without header into a numpy array of floats. [[[0.1 0.2] [0.3 0.4] [0.5 0.6]]] """
with open(fname) as f: records = [] for line in f: row = line.split(sep) record = [list(map(float, col.split())) for col in row] records.append(record) return numpy.array(records)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read(self, names): """ Convert names into descriptions """
descrs = [] for name in names: mo = re.match(self.short_regex, name) if mo: idx = mo.lastindex # matching group index, starting from 1 suffix = self.suffix[idx - 1].replace(r':\|', ':|') descrs.append(mo.group(mo.lastindex) + suffix + name[mo.end():]) else: descrs.append(name) return descrs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(self, descrs): """ Convert descriptions into names """
# example: '(poe-[\d\.]+):float32' -> 'poe-[\d\.]+' names = [] for descr in descrs: mo = re.match(self.long_regex, descr) if mo: names.append(mo.group(mo.lastindex) + descr[mo.end():]) else: names.append(descr) return names
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, data, fname, header=None): """ Save data on fname. :param data: numpy array or list of lists :param fname: path name :param header: header to use """
write_csv(fname, data, self.sep, self.fmt, header) self.fnames.add(getattr(fname, 'name', fname))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_block(self, data, dest): """ Save data on dest, which is file open in 'a' mode """
write_csv(dest, data, self.sep, self.fmt, 'no-header')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_site_coeffs(self, sites, imt): """ Extracts correct coefficients for each site from Table 5 on p. 208 for each site. :raises UserWarning: If vs30 is below limit for site class D, since "E- and F-type """
site_classes = self.get_nehrp_classes(sites) is_bedrock = self.is_bedrock(sites) if 'E' in site_classes: msg = ('Site class E and F not supported by %s' % type(self).__name__) warnings.warn(msg, UserWarning) a_1 = np.nan*np.ones_like(sites.vs30) a_2 = np.nan*np.ones_like(sites.vs30) sigma = np.nan*np.ones_like(sites.vs30) for key in self.COEFFS_NEHRP.keys(): indices = (site_classes == key) & ~is_bedrock a_1[indices] = self.COEFFS_NEHRP[key][imt]['a1'] a_2[indices] = self.COEFFS_NEHRP[key][imt]['a2'] sigma[indices] = self.COEFFS_NEHRP[key][imt]['sigma'] a_1[is_bedrock] = 0. a_2[is_bedrock] = 0. sigma[is_bedrock] = 0. return (a_1, a_2, sigma)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_nehrp_classes(self, sites): """ Site classification threshholds from Section 4 "Site correction coefficients" p. 205. Note that site classes E and F are not supported. """
classes = sorted(self.NEHRP_VS30_UPPER_BOUNDS.keys()) bounds = [self.NEHRP_VS30_UPPER_BOUNDS[item] for item in classes] bounds = np.reshape(np.array(bounds), (-1, 1)) vs30s = np.reshape(sites.vs30, (1, -1)) site_classes = np.choose((vs30s < bounds).sum(axis=0) - 1, classes) return site_classes.astype('object')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_valid_users(request): """" Returns a list of `users` based on groups membership. Returns a list made of a single user when it is not member of any group. """
users = [get_user(request)] if settings.LOCKDOWN and hasattr(request, 'user'): if request.user.is_authenticated: groups = request.user.groups.all() if groups: users = list(User.objects.filter(groups__in=groups) .values_list('username', flat=True)) else: # This may happen with crafted requests users = [] return users
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_acl_on(request): """ Returns `True` if ACL should be honorated, returns otherwise `False`. """
acl_on = settings.ACL_ON if settings.LOCKDOWN and hasattr(request, 'user'): # ACL is always disabled for superusers if request.user.is_superuser: acl_on = False return acl_on
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def oq_server_context_processor(request): """ A custom context processor which allows injection of additional context variables. """
context = {} context['oq_engine_server_url'] = ('//' + request.META.get('HTTP_HOST', 'localhost:8800')) # this context var is also evaluated by the STANDALONE_APPS to identify # the running environment. Keep it as it is context['oq_engine_version'] = oqversion context['server_name'] = settings.SERVER_NAME return context
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_webserver_running(url="http://localhost:8800", max_retries=30): """ Returns True if a given URL is responding within a given timeout. """
retry = 0 response = '' success = False while response != requests.codes.ok and retry < max_retries: try: response = requests.head(url, allow_redirects=True).status_code success = True except: sleep(1) retry += 1 if not success: logging.warning('Unable to connect to %s within %s retries' % (url, max_retries)) return success
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def export_csv(ekey, dstore): """ Default csv exporter for arrays stored in the output.hdf5 file :param ekey: export key :param dstore: datastore object :returns: a list with the path of the exported file """
name = ekey[0] + '.csv' try: array = dstore[ekey[0]].value except AttributeError: # this happens if the key correspond to a HDF5 group return [] # write a custom exporter in this case if len(array.shape) == 1: # vector array = array.reshape((len(array), 1)) return [write_csv(dstore.export_path(name), array)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def export_input_zip(ekey, dstore): """ Export the data in the `input_zip` dataset as a .zip file """
dest = dstore.export_path('input.zip') nbytes = dstore.get_attr('input/zip', 'nbytes') zbytes = dstore['input/zip'].value # when reading input_zip some terminating null bytes are truncated (for # unknown reasons) therefore they must be restored zbytes += b'\x00' * (nbytes - len(zbytes)) open(dest, 'wb').write(zbytes) return [dest]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def node_to_point_geometry(node): """ Reads the node and returns the point geometry, upper depth and lower depth """
assert "pointGeometry" in node.tag for subnode in node.nodes: if "Point" in subnode.tag: # Position lon, lat = map(float, subnode.nodes[0].text.split()) point = Point(lon, lat) elif "upperSeismoDepth" in subnode.tag: upper_depth = float_(subnode.text) elif "lowerSeismoDepth" in subnode.tag: lower_depth = float_(subnode.text) else: # Redundent pass assert lower_depth > upper_depth return point, upper_depth, lower_depth
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def node_to_area_geometry(node): """ Reads an area geometry node and returns the polygon, upper depth and lower depth """
assert "areaGeometry" in node.tag for subnode in node.nodes: if "Polygon" in subnode.tag: crds = [float(x) for x in subnode.nodes[0].nodes[0].nodes[0].text.split()] polygon = Polygon([Point(crds[iloc], crds[iloc + 1]) for iloc in range(0, len(crds), 2)]) elif "upperSeismoDepth" in subnode.tag: upper_depth = float_(subnode.text) elif "lowerSeismoDepth" in subnode.tag: lower_depth = float_(subnode.text) else: # Redundent pass assert lower_depth > upper_depth return polygon, upper_depth, lower_depth
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def node_to_simple_fault_geometry(node): """ Reads a simple fault geometry node and returns an OpenQuake representation :returns: trace - Trace of fault as instance """
assert "simpleFaultGeometry" in node.tag for subnode in node.nodes: if "LineString" in subnode.tag: trace = linestring_node_to_line(subnode, with_depth=False) elif "dip" in subnode.tag: dip = float(subnode.text) elif "upperSeismoDepth" in subnode.tag: upper_depth = float(subnode.text) elif "lowerSeismoDepth" in subnode.tag: lower_depth = float(subnode.text) else: # Redundent pass assert lower_depth > upper_depth return trace, dip, upper_depth, lower_depth
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def node_to_complex_fault_geometry(node): """ Reads a complex fault geometry node and returns an """
assert "complexFaultGeometry" in node.tag intermediate_edges = [] for subnode in node.nodes: if "faultTopEdge" in subnode.tag: top_edge = linestring_node_to_line(subnode.nodes[0], with_depth=True) elif "intermediateEdge" in subnode.tag: int_edge = linestring_node_to_line(subnode.nodes[0], with_depth=True) intermediate_edges.append(int_edge) elif "faultBottomEdge" in subnode.tag: bottom_edge = linestring_node_to_line(subnode.nodes[0], with_depth=True) else: # Redundent pass return [top_edge] + intermediate_edges + [bottom_edge]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def node_to_mfd(node, taglist): """ Reads the node to return a magnitude frequency distribution """
if "incrementalMFD" in taglist: mfd = node_to_evenly_discretized( node.nodes[taglist.index("incrementalMFD")]) elif "truncGutenbergRichterMFD" in taglist: mfd = node_to_truncated_gr( node.nodes[taglist.index("truncGutenbergRichterMFD")]) else: mfd = None return mfd
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def node_to_nodal_planes(node): """ Parses the nodal plane distribution to a PMF """
if not len(node): return None npd_pmf = [] for plane in node.nodes: if not all(plane.attrib[key] for key in plane.attrib): # One plane fails - return None return None npd = NodalPlane(float(plane.attrib["strike"]), float(plane.attrib["dip"]), float(plane.attrib["rake"])) npd_pmf.append((float(plane.attrib["probability"]), npd)) return PMF(npd_pmf)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def node_to_hdd(node): """ Parses the node to a hpyocentral depth distribution PMF """
if not len(node): return None hdds = [] for subnode in node.nodes: if not all([subnode.attrib[key] for key in ["depth", "probability"]]): return None hdds.append((float(subnode.attrib["probability"]), float(subnode.attrib["depth"]))) return PMF(hdds)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_mean(self, C, g, mag, hypo_depth, rrup, vs30, pga_rock, imt): """ Compute mean according to equation 1, page 1706. """
if hypo_depth > 100: hypo_depth = 100 delta = 0.00724 * 10 ** (0.507 * mag) R = np.sqrt(rrup ** 2 + delta ** 2) s_amp = self._compute_soil_amplification(C, vs30, pga_rock, imt) mean = ( # 1st term C['c1'] + C['c2'] * mag + # 2nd term C['c3'] * hypo_depth + # 3rd term C['c4'] * R - # 4th term g * np.log10(R) + # 5th, 6th and 7th terms s_amp ) return mean
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_soil_linear_factor(cls, pga_rock, imt): """ Compute soil linear factor as explained in paragraph 'Functional Form', page 1706. """
if imt.period >= 1: return np.ones_like(pga_rock) else: sl = np.zeros_like(pga_rock) pga_between_100_500 = (pga_rock > 100) & (pga_rock < 500) pga_greater_equal_500 = pga_rock >= 500 is_SA_between_05_1 = 0.5 < imt.period < 1 is_SA_less_equal_05 = imt.period <= 0.5 if is_SA_between_05_1: sl[pga_between_100_500] = (1 - (1. / imt.period - 1) * (pga_rock[pga_between_100_500] - 100) / 400) sl[pga_greater_equal_500] = 1 - (1. / imt.period - 1) if is_SA_less_equal_05 or imt.period == 0: sl[pga_between_100_500] = (1 - (pga_rock[pga_between_100_500] - 100) / 400) sl[pga_rock <= 100] = 1 return sl
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_stddevs(self, C, mag, stddev_types, sites): """ Return standard deviation as defined on page 29 in equation 8a,b,c and 9. """
num_sites = sites.vs30.size sigma_intra = np.zeros(num_sites) # interevent stddev tau = sigma_intra + C['tau'] # intraevent std (equations 8a-8c page 29) if mag < 5.0: sigma_intra += C['sigmaM6'] - C['sigSlope'] elif 5.0 <= mag < 7.0: sigma_intra += C['sigmaM6'] + C['sigSlope'] * (mag - 6) else: sigma_intra += C['sigmaM6'] + C['sigSlope'] std = [] for stddev_type in stddev_types: if stddev_type == const.StdDev.TOTAL: # equation 9 page 29 std += [np.sqrt(sigma_intra**2 + tau**2)] elif stddev_type == const.StdDev.INTRA_EVENT: std.append(sigma_intra) elif stddev_type == const.StdDev.INTER_EVENT: std.append(tau) return std
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_deltas(self, sites): """ Return delta's for equation 4 delta_C = 1 for site class C, 0 otherwise delta_D = 1 for site class D, 0 otherwise """
siteclass = sites.siteclass delta_C = np.zeros_like(siteclass, dtype=np.float) delta_C[siteclass == b'C'] = 1 delta_D = np.zeros_like(siteclass, dtype=np.float) delta_D[siteclass == b'D'] = 1 return delta_C, delta_D
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gen_outputs(self, riskinput, monitor, epspath=None, hazard=None): """ Group the assets per taxonomy and compute the outputs by using the underlying riskmodels. Yield one output per realization. :param riskinput: a RiskInput instance :param monitor: a monitor object used to measure the performance """
self.monitor = monitor hazard_getter = riskinput.hazard_getter if hazard is None: with monitor('getting hazard'): hazard_getter.init() hazard = hazard_getter.get_hazard() sids = hazard_getter.sids assert len(sids) == 1 with monitor('computing risk', measuremem=False): # this approach is slow for event_based_risk since a lot of # small arrays are passed (one per realization) instead of # a long array with all realizations; ebrisk does the right # thing since it calls get_output directly assets_by_taxo = get_assets_by_taxo(riskinput.assets, epspath) for rlzi, haz in sorted(hazard[sids[0]].items()): out = self.get_output(assets_by_taxo, haz, rlzi) yield out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_available_mfds(): ''' Returns an ordered dictionary with the available GSIM classes keyed by class name ''' mfds = {} for fname in os.listdir(os.path.dirname(__file__)): if fname.endswith('.py'): modname, _ext = os.path.splitext(fname) mod = importlib.import_module( 'openquake.hmtk.faults.mfd.' + modname) for cls in mod.__dict__.values(): if inspect.isclass(cls) and issubclass(cls, BaseMFDfromSlip): mfds[cls.__name__] = cls return dict((k, mfds[k]) for k in sorted(mfds))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def check_config(config, data): '''Checks that the config file contains all required parameters :param dict config: Configuration file :returns: Configuration file with all correct parameters ''' if 'tolerance' not in config.keys() or not config['tolerance']: config['tolerance'] = 1E-5 if not config.get('maximum_iterations', None): config['maximum_iterations'] = 1000 mmin_obs = np.min(data['magnitude']) if config.get('input_mmin', 0) < mmin_obs: config['input_mmin'] = mmin_obs if fabs(config['b-value']) < 1E-7: config['b-value'] = 1E-7 return config
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def disagg_outputs(value): """ Validate disaggregation outputs. For instance ['TRT', 'Mag_Dist'] ['TRT', 'Mag_Dist'] """
values = value.replace(',', ' ').split() for val in values: if val not in disagg.pmf_map: raise ValueError('Invalid disagg output: %s' % val) return values
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gsim(value): """ Convert a string in TOML format into a GSIM instance [BooreAtkinson2011] """
if not value.startswith('['): # assume the GSIM name value = '[%s]' % value [(gsim_name, kwargs)] = toml.loads(value).items() minimum_distance = float(kwargs.pop('minimum_distance', 0)) if gsim_name == 'FromFile': return FromFile() try: gsim_class = registry[gsim_name] except KeyError: raise ValueError('Unknown GSIM: %s' % gsim_name) gs = gsim_class(**kwargs) gs._toml = '\n'.join(line.strip() for line in value.splitlines()) gs.minimum_distance = minimum_distance return gs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compose(*validators): """ Implement composition of validators. For instance """
def composed_validator(value): out = value for validator in reversed(validators): out = validator(out) return out composed_validator.__name__ = 'compose(%s)' % ','.join( val.__name__ for val in validators) return composed_validator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def utf8(value): r""" Check that the string is UTF-8. Returns an encode bytestring. Traceback (most recent call last): """
try: if isinstance(value, bytes): return value.decode('utf-8') else: return value except Exception: raise ValueError('Not UTF-8: %r' % value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def coordinates(value): """ Convert a non-empty string into a list of lon-lat coordinates. Traceback (most recent call last): ValueError: Empty list of coordinates: '' [(1.1, 1.2, 0.0)] [(1.1, 1.2, 0.0), (2.2, 2.3, 0.0)] [(1.1, 1.2, -0.4), (2.2, 2.3, -0.5)] Traceback (most recent call last): ValueError: Found overlapping site #2, 0 0 -1 """
if not value.strip(): raise ValueError('Empty list of coordinates: %r' % value) points = [] pointset = set() for i, line in enumerate(value.split(','), 1): pnt = point(line) if pnt[:2] in pointset: raise ValueError("Found overlapping site #%d, %s" % (i, line)) pointset.add(pnt[:2]) points.append(pnt) return points
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wkt_polygon(value): """ Convert a string with a comma separated list of coordinates into a WKT polygon, by closing the ring. """
points = ['%s %s' % (lon, lat) for lon, lat, dep in coordinates(value)] # close the linear polygon ring by appending the first coord to the end points.append(points[0]) return 'POLYGON((%s))' % ', '.join(points)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_levels(imls, imt, min_iml=1E-10): """ Raise a ValueError if the given levels are invalid. :param imls: a list of intensity measure and levels :param imt: the intensity measure type :param min_iml: minimum intensity measure level (default 1E-10) Traceback (most recent call last): ValueError: No imls for PGA: [] Traceback (most recent call last): ValueError: The imls for PGA are not sorted: [0.2, 0.1] Traceback (most recent call last): ValueError: Found duplicated levels for PGA: [0.2, 0.2] """
if len(imls) < 1: raise ValueError('No imls for %s: %s' % (imt, imls)) elif imls != sorted(imls): raise ValueError('The imls for %s are not sorted: %s' % (imt, imls)) elif len(distinct(imls)) < len(imls): raise ValueError("Found duplicated levels for %s: %s" % (imt, imls)) elif imls[0] == 0 and imls[1] <= min_iml: # apply the cutoff raise ValueError("The min_iml %s=%s is larger than the second level " "for %s" % (imt, min_iml, imls)) elif imls[0] == 0 and imls[1] > min_iml: # apply the cutoff imls[0] = min_iml
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pmf(value): """ Comvert a string into a Probability Mass Function. :param value: a sequence of probabilities summing up to 1 (no commas) :returns: [(0.157, 0), (0.843, 1)] """
probs = probabilities(value) if abs(1.-sum(map(float, value.split()))) > 1e-12: raise ValueError('The probabilities %s do not sum up to 1!' % value) return [(p, i) for i, p in enumerate(probs)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_weights(nodes_with_a_weight): """ Ensure that the sum of the values is 1 :param nodes_with_a_weight: a list of Node objects with a weight attribute """
weights = [n['weight'] for n in nodes_with_a_weight] if abs(sum(weights) - 1.) > PRECISION: raise ValueError('The weights do not sum up to 1: %s' % weights) return nodes_with_a_weight
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ab_values(value): """ a and b values of the GR magniture-scaling relation. a is a positive float, b is just a float. """
a, b = value.split() return positivefloat(a), float_(b)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def site_param(dic): """ Convert a dictionary site_model_param -> string into a dictionary of valid casted site parameters. """
new = {} for name, val in dic.items(): if name == 'vs30Type': # avoid "Unrecognized parameter vs30Type" new['vs30measured'] = val == 'measured' elif name not in site.site_param_dt: raise ValueError('Unrecognized parameter %s' % name) else: new[name] = val return new
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check(cls, dic): """ Convert a dictionary name->string into a dictionary name->value by converting the string. If the name does not correspond to a known parameter, just ignore it and print a warning. """
res = {} for name, text in dic.items(): try: p = getattr(cls, name) except AttributeError: logging.warning('Ignored unknown parameter %s', name) else: res[name] = p.validator(text) return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_(cls, dic): """ Build a new ParamSet from a dictionary of string-valued parameters which are assumed to be already valid. """
self = cls.__new__(cls) for k, v in dic.items(): setattr(self, k, ast.literal_eval(v)) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(self): """ Apply the `is_valid` methods to self and possibly raise a ValueError. """
# it is important to have the validator applied in a fixed order valids = [getattr(self, valid) for valid in sorted(dir(self.__class__)) if valid.startswith('is_valid_')] for is_valid in valids: if not is_valid(): docstring = '\n'.join( line.strip() for line in is_valid.__doc__.splitlines()) doc = docstring.format(**vars(self)) raise ValueError(doc)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_magnitude_scaling_term(self, C, mag): """ Returns the magnitude scaling term defined in equation 3 """
if mag < 6.75: return C["a1_lo"] + C["a2_lo"] * mag + C["a3"] *\ ((8.5 - mag) ** 2.0) else: return C["a1_hi"] + C["a2_hi"] * mag + C["a3"] *\ ((8.5 - mag) ** 2.0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_distance_scaling_term(self, C, mag, rrup): """ Returns the magnitude dependent distance scaling term """
if mag < 6.75: mag_factor = -(C["b1_lo"] + C["b2_lo"] * mag) else: mag_factor = -(C["b1_hi"] + C["b2_hi"] * mag) return mag_factor * np.log(rrup + 10.0) + (C["gamma"] * rrup)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_base_term(self, C, rup, dists): """ Compute and return base model term, that is the first term in equation 1, page 74. The calculation of this term is explained in paragraph 'Base Model', page 75. """
c1 = self.CONSTS['c1'] R = np.sqrt(dists.rrup ** 2 + self.CONSTS['c4'] ** 2) base_term = (C['a1'] + C['a8'] * ((8.5 - rup.mag) ** 2) + (C['a2'] + self.CONSTS['a3'] * (rup.mag - c1)) * np.log(R)) if rup.mag <= c1: return base_term + self.CONSTS['a4'] * (rup.mag - c1) else: return base_term + self.CONSTS['a5'] * (rup.mag - c1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_site_response_term(self, C, imt, sites, pga1100): """ Compute and return site response model term, that is the fifth term in equation 1, page 74. """
site_resp_term = np.zeros_like(sites.vs30) vs30_star, _ = self._compute_vs30_star_factor(imt, sites.vs30) vlin, c, n = C['VLIN'], self.CONSTS['c'], self.CONSTS['n'] a10, b = C['a10'], C['b'] idx = sites.vs30 < vlin arg = vs30_star[idx] / vlin site_resp_term[idx] = (a10 * np.log(arg) - b * np.log(pga1100[idx] + c) + b * np.log(pga1100[idx] + c * (arg ** n))) idx = sites.vs30 >= vlin site_resp_term[idx] = (a10 + b * n) * np.log(vs30_star[idx] / vlin) return site_resp_term
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_hanging_wall_term(self, C, dists, rup): """ Compute and return hanging wall model term, that is the sixth term in equation 1, page 74. The calculation of this term is explained in paragraph 'Hanging-Wall Model', page 77. """
if rup.dip == 90.0: return np.zeros_like(dists.rx) else: idx = dists.rx > 0 Fhw = np.zeros_like(dists.rx) Fhw[idx] = 1 # equation 8, page 77 T1 = np.zeros_like(dists.rx) idx1 = (dists.rjb < 30.0) & (idx) T1[idx1] = 1.0 - dists.rjb[idx1] / 30.0 # equation 9, page 77 T2 = np.ones_like(dists.rx) idx2 = ((dists.rx <= rup.width * np.cos(np.radians(rup.dip))) & (idx)) T2[idx2] = (0.5 + dists.rx[idx2] / (2 * rup.width * np.cos(np.radians(rup.dip)))) # equation 10, page 78 T3 = np.ones_like(dists.rx) idx3 = (dists.rx < rup.ztor) & (idx) T3[idx3] = dists.rx[idx3] / rup.ztor # equation 11, page 78 if rup.mag <= 6.0: T4 = 0.0 elif rup.mag > 6 and rup.mag < 7: T4 = rup.mag - 6 else: T4 = 1.0 # equation 5, in AS08_NGA_errata.pdf if rup.dip >= 30: T5 = 1.0 - (rup.dip - 30.0) / 60.0 else: T5 = 1.0 return Fhw * C['a14'] * T1 * T2 * T3 * T4 * T5
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_top_of_rupture_depth_term(self, C, rup): """ Compute and return top of rupture depth term, that is the seventh term in equation 1, page 74. The calculation of this term is explained in paragraph 'Depth-to-Top of Rupture Model', page 78. """
if rup.ztor >= 10.0: return C['a16'] else: return C['a16'] * rup.ztor / 10.0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_large_distance_term(self, C, dists, rup): """ Compute and return large distance model term, that is the 8-th term in equation 1, page 74. The calculation of this term is explained in paragraph 'Large Distance Model', page 78. """
# equation 15, page 79 if rup.mag < 5.5: T6 = 1.0 elif rup.mag >= 5.5 and rup.mag <= 6.5: T6 = 0.5 * (6.5 - rup.mag) + 0.5 else: T6 = 0.5 # equation 14, page 79 large_distance_term = np.zeros_like(dists.rrup) idx = dists.rrup >= 100.0 large_distance_term[idx] = C['a18'] * (dists.rrup[idx] - 100.0) * T6 return large_distance_term
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_soil_depth_term(self, C, imt, z1pt0, vs30): """ Compute and return soil depth model term, that is the 9-th term in equation 1, page 74. The calculation of this term is explained in paragraph 'Soil Depth Model', page 79. """
a21 = self._compute_a21_factor(C, imt, z1pt0, vs30) a22 = self._compute_a22_factor(imt) median_z1pt0 = self._compute_median_z1pt0(vs30) soil_depth_term = a21 * np.log((z1pt0 + self.CONSTS['c2']) / (median_z1pt0 + self.CONSTS['c2'])) idx = z1pt0 >= 200 soil_depth_term[idx] += a22 * np.log(z1pt0[idx] / 200) return soil_depth_term
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_stddevs(self, C, C_PGA, pga1100, rup, sites, stddev_types): """ Return standard deviations as described in paragraph 'Equations for standard deviation', page 81. """
std_intra = self._compute_intra_event_std(C, C_PGA, pga1100, rup.mag, sites.vs30, sites.vs30measured) std_inter = self._compute_inter_event_std(C, C_PGA, pga1100, rup.mag, sites.vs30) stddevs = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: stddevs.append(np.sqrt(std_intra ** 2 + std_inter ** 2)) elif stddev_type == const.StdDev.INTRA_EVENT: stddevs.append(std_intra) elif stddev_type == const.StdDev.INTER_EVENT: stddevs.append(std_inter) return stddevs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_inter_event_std(self, C, C_PGA, pga1100, mag, vs30): """ Compute inter event standard deviation, equation 25, page 82. """
tau_0 = self._compute_std_0(C['s3'], C['s4'], mag) tau_b_pga = self._compute_std_0(C_PGA['s3'], C_PGA['s4'], mag) delta_amp = self._compute_partial_derivative_site_amp(C, pga1100, vs30) std_inter = np.sqrt(tau_0 ** 2 + (delta_amp ** 2) * (tau_b_pga ** 2) + 2 * delta_amp * tau_0 * tau_b_pga * C['rho']) return std_inter
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_sigma_b(self, C, mag, vs30measured): """ Equation 23, page 81. """
sigma_0 = self._compute_sigma_0(C, mag, vs30measured) sigma_amp = self.CONSTS['sigma_amp'] return np.sqrt(sigma_0 ** 2 - sigma_amp ** 2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_sigma_0(self, C, mag, vs30measured): """ Equation 27, page 82. """
s1 = np.zeros_like(vs30measured, dtype=float) s2 = np.zeros_like(vs30measured, dtype=float) idx = vs30measured == 1 s1[idx] = C['s1mea'] s2[idx] = C['s2mea'] idx = vs30measured == 0 s1[idx] = C['s1est'] s2[idx] = C['s2est'] return self._compute_std_0(s1, s2, mag)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_std_0(self, c1, c2, mag): """ Common part of equations 27 and 28, pag 82. """
if mag < 5: return c1 elif mag >= 5 and mag <= 7: return c1 + (c2 - c1) * (mag - 5) / 2 else: return c2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_a21_factor(self, C, imt, z1pt0, vs30): """ Compute and return a21 factor, equation 18, page 80. """
e2 = self._compute_e2_factor(imt, vs30) a21 = e2.copy() vs30_star, v1 = self._compute_vs30_star_factor(imt, vs30) median_z1pt0 = self._compute_median_z1pt0(vs30) numerator = ((C['a10'] + C['b'] * self.CONSTS['n']) * np.log(vs30_star / np.min([v1, 1000]))) denominator = np.log((z1pt0 + self.CONSTS['c2']) / (median_z1pt0 + self.CONSTS['c2'])) idx = numerator + e2 * denominator < 0 a21[idx] = - numerator[idx] / denominator[idx] idx = vs30 >= 1000 a21[idx] = 0.0 return a21
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_vs30_star_factor(self, imt, vs30): """ Compute and return vs30 star factor, equation 5, page 77. """
v1 = self._compute_v1_factor(imt) vs30_star = vs30.copy() vs30_star[vs30_star >= v1] = v1 return vs30_star, v1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_v1_factor(self, imt): """ Compute and return v1 factor, equation 6, page 77. """
if imt.name == "SA": t = imt.period if t <= 0.50: v1 = 1500.0 elif t > 0.50 and t <= 1.0: v1 = np.exp(8.0 - 0.795 * np.log(t / 0.21)) elif t > 1.0 and t < 2.0: v1 = np.exp(6.76 - 0.297 * np.log(t)) else: v1 = 700.0 elif imt.name == "PGA": v1 = 1500.0 else: # this is for PGV v1 = 862.0 return v1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_e2_factor(self, imt, vs30): """ Compute and return e2 factor, equation 19, page 80. """
e2 = np.zeros_like(vs30) if imt.name == "PGV": period = 1 elif imt.name == "PGA": period = 0 else: period = imt.period if period < 0.35: return e2 else: idx = vs30 <= 1000 if period >= 0.35 and period <= 2.0: e2[idx] = (-0.25 * np.log(vs30[idx] / 1000) * np.log(period / 0.35)) elif period > 2.0: e2[idx] = (-0.25 * np.log(vs30[idx] / 1000) * np.log(2.0 / 0.35)) return e2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_a22_factor(self, imt): """ Compute and return the a22 factor, equation 20, page 80. """
if imt.name == 'PGV': return 0.0 period = imt.period if period < 2.0: return 0.0 else: return 0.0625 * (period - 2.0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_mean(self, C, A1, A2, A3, A4, A5, A6, mag, hypo_depth, rrup, mean, idx): """ Compute mean for subduction interface events, as explained in table 2, page 67. """
mean[idx] = (A1 + A2 * mag + C['C1'] + C['C2'] * (A3 - mag) ** 3 + C['C3'] * np.log(rrup[idx] + A4 * np.exp(A5 * mag)) + A6 * hypo_depth)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_std(self, C, mag, stddevs, idx): """ Compute total standard deviation, as explained in table 2, page 67. """
if mag > 8.0: mag = 8.0 for stddev in stddevs: stddev[idx] += C['C4'] + C['C5'] * mag
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _build_basemap(self): ''' Creates the map according to the input configuration ''' if self.config['min_lon'] >= self.config['max_lon']: raise ValueError('Upper limit of long is smaller than lower limit') if self.config['min_lon'] >= self.config['max_lon']: raise ValueError('Upper limit of long is smaller than lower limit') # Corners of the map lowcrnrlat = self.config['min_lat'] lowcrnrlon = self.config['min_lon'] uppcrnrlat = self.config['max_lat'] uppcrnrlon = self.config['max_lon'] if 'resolution' not in self.config.keys(): self.config['resolution'] = 'l' lat0 = lowcrnrlat + ((uppcrnrlat - lowcrnrlat) / 2) lon0 = lowcrnrlon + ((uppcrnrlon - lowcrnrlon) / 2) if (uppcrnrlat - lowcrnrlat) >= (uppcrnrlon - lowcrnrlon): fig_aspect = PORTRAIT_ASPECT else: fig_aspect = LANDSCAPE_ASPECT if self.ax is None: self.fig, self.ax = plt.subplots(figsize=fig_aspect, facecolor='w', edgecolor='k') else: self.fig = self.ax.get_figure() if self.title: self.ax.set_title(self.title, fontsize=16) parallels = np.arange(-90., 90., self.lat_lon_spacing) meridians = np.arange(0., 360., self.lat_lon_spacing) # Build Map # Do not import Basemap at top level since it's an optional feature # and it would break doctests from mpl_toolkits.basemap import Basemap self.m = Basemap( llcrnrlon=lowcrnrlon, llcrnrlat=lowcrnrlat, urcrnrlon=uppcrnrlon, urcrnrlat=uppcrnrlat, projection='stere', resolution=self.config['resolution'], area_thresh=1000.0, lat_0=lat0, lon_0=lon0, ax=self.ax) self.m.drawcountries() self.m.drawmapboundary() self.m.drawcoastlines() self.m.drawstates() self.m.drawparallels(parallels, labels=[1, 0, 0, 0], fontsize=12) self.m.drawmeridians(meridians, labels=[0, 0, 0, 1], fontsize=12) self.m.fillcontinents(color='wheat')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def savemap(self, filename, filetype='png', papertype="a4"): """ Save the figure """
self.fig.savefig(filename, dpi=self.dpi, format=filetype, papertype=papertype)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_source_model( self, model, area_border='k-', border_width=1.0, point_marker='ks', point_size=2.0, overlay=False, min_depth=0., max_depth=None, alpha=1.0): """ Adds a source model to the map :param model: Source model of mixed typologies as instance of :class: openquake.hmtk.sources.source_model.mtkSourceModel """
for source in model.sources: if isinstance(source, mtkAreaSource): self._plot_area_source(source, area_border, border_width) elif isinstance(source, mtkPointSource): self._plot_point_source(source, point_marker, point_size) elif isinstance(source, mtkComplexFaultSource): self._plot_complex_fault(source, area_border, border_width, min_depth, max_depth, alpha) elif isinstance(source, mtkSimpleFaultSource): self._plot_simple_fault(source, area_border, border_width) else: pass if not overlay: plt.show()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_colour_scaled_points(self, longitude, latitude, data, shape='s', alpha=1.0, size=20, norm=None, overlay=False): """ Overlays a set of points on a map with a fixed size but colour scaled according to the data :param np.ndarray longitude: Longitude :param np.ndarray latitude: Latitude :param np.ndarray data: Data for plotting :param str shape: Marker style :param float alpha: Sets the transparency of the marker (0 for transparent, 1 opaque) :param int size: Marker size :param norm: Normalisation as instance of :class: matplotlib.colors.Normalize """
if not norm: norm = Normalize(vmin=np.min(data), vmax=np.max(data)) x, y, = self.m(longitude, latitude) mappable = self.m.scatter(x, y, marker=shape, s=size, c=data, norm=norm, alpha=alpha, linewidths=0.0, zorder=4) self.m.colorbar(mappable=mappable, fig=self.fig, ax=self.ax) if not overlay: plt.show()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_size_scaled_points( self, longitude, latitude, data, shape='o', logplot=False, alpha=1.0, colour='b', smin=2.0, sscale=2.0, overlay=False): """ Plots a set of points with size scaled according to the data :param bool logplot: Choose to scale according to the logarithm (base 10) of the data :param float smin: Minimum scale size :param float sscale: Scaling factor """
if logplot: data = np.log10(data.copy()) x, y, = self.m(longitude, latitude) self.m.scatter(x, y, marker=shape, s=(smin + data ** sscale), c=colour, alpha=alpha, zorder=2) if not overlay: plt.show()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_catalogue_cluster(self, catalogue, vcl, flagvector, cluster_id=None, overlay=True): """ Creates a plot of a catalogue showing where particular clusters exist """
# Create simple magnitude scaled point basemap self.add_size_scaled_points(catalogue.data['longitude'], catalogue.data['latitude'], catalogue.data['magnitude'], shape="o", alpha=0.8, colour=(0.5, 0.5, 0.5), smin=1.0, sscale=1.5, overlay=True) # If cluster ID is not specified just show mainshocks if cluster_id is None: idx = flagvector == 0 self.add_size_scaled_points(catalogue.data['longitude'][idx], catalogue.data['latitude'][idx], catalogue.data['magnitude'][idx], shape="o", colour="r", smin=1.0, sscale=1.5, overlay=overlay) return if not isinstance(cluster_id, collections.Iterable): cluster_id = [cluster_id] for iloc, clid in enumerate(cluster_id): if iloc == (len(cluster_id) - 1): # On last iteration set overlay to function overlay temp_overlay = overlay else: temp_overlay = True idx = vcl == clid self.add_size_scaled_points( catalogue.data["longitude"][idx], catalogue.data["latitude"][idx], catalogue.data["magnitude"][idx], shape="o", colour=DISSIMILAR_COLOURLIST[(iloc + 1) % NCOLS], smin=1.0, sscale=1.5, overlay=temp_overlay)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_stddevs(self, C, mag, stddev_types, num_sites): """ Return standard deviation as defined in eq.11 page 319. """
std = C['c16'] + np.zeros(num_sites) if mag < 7.4: std -= 0.07 * mag else: std -= 0.518 # only the 'total' standard deviation is supported, therefore the # std is always the same for all types stddevs = [std for _ in stddev_types] return stddevs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def distinct(keys): """ Return the distinct keys in order. """
known = set() outlist = [] for key in keys: if key not in known: outlist.append(key) known.add(key) return outlist
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def split_in_blocks(sequence, hint, weight=lambda item: 1, key=nokey): """ Split the `sequence` in a number of WeightedSequences close to `hint`. :param sequence: a finite sequence of items :param hint: an integer suggesting the number of subsequences to generate :param weight: a function returning the weigth of a given item :param key: a function returning the key of a given item The WeightedSequences are of homogeneous key and they try to be balanced in weight. For instance [<WeightedSequence ['A', 'B'], weight=2>, <WeightedSequence ['C', 'D'], weight=2>, <WeightedSequence ['E'], weight=1>] """
if isinstance(sequence, int): return split_in_slices(sequence, hint) elif hint in (0, 1) and key is nokey: # do not split return [sequence] elif hint in (0, 1): # split by key blocks = [] for k, group in groupby(sequence, key).items(): blocks.append(group) return blocks items = sorted(sequence, key=lambda item: (key(item), weight(item))) assert hint > 0, hint assert len(items) > 0, len(items) total_weight = float(sum(weight(item) for item in items)) return block_splitter(items, math.ceil(total_weight / hint), weight, key)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gettemp(content=None, dir=None, prefix="tmp", suffix="tmp"): """Create temporary file with the given content. Please note: the temporary file must be deleted by the caller. :param string content: the content to write to the temporary file. :param string dir: directory where the file should be created :param string prefix: file name prefix :param string suffix: file name suffix :returns: a string with the path to the temporary file """
if dir is not None: if not os.path.exists(dir): os.makedirs(dir) fh, path = tempfile.mkstemp(dir=dir, prefix=prefix, suffix=suffix) _tmp_paths.append(path) if content: fh = os.fdopen(fh, "wb") if hasattr(content, 'encode'): content = content.encode('utf8') fh.write(content) fh.close() return path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def removetmp(): """ Remove the temporary files created by gettemp """
for path in _tmp_paths: if os.path.exists(path): # not removed yet try: os.remove(path) except PermissionError: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_in_process(code, *args): """ Run in an external process the given Python code and return the output as a Python object. If there are arguments, then code is taken as a template and traditional string interpolation is performed. :param code: string or template describing Python code :param args: arguments to be used for interpolation :returns: the output of the process, as a Python object """
if args: code %= args try: out = subprocess.check_output([sys.executable, '-c', code]) except subprocess.CalledProcessError as exc: print(exc.cmd[-1], file=sys.stderr) raise if out: return eval(out, {}, {})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_all(module_or_package): """ If `module_or_package` is a module, just import it; if it is a package, recursively imports all the modules it contains. Returns the names of the modules that were imported as a set. The set can be empty if the modules were already in sys.modules. """
already_imported = set(sys.modules) mod_or_pkg = importlib.import_module(module_or_package) if not hasattr(mod_or_pkg, '__path__'): # is a simple module return set(sys.modules) - already_imported # else import all modules contained in the package [pkg_path] = mod_or_pkg.__path__ n = len(pkg_path) for cwd, dirs, files in os.walk(pkg_path): if all(os.path.basename(f) != '__init__.py' for f in files): # the current working directory is not a subpackage continue for f in files: if f.endswith('.py'): # convert PKGPATH/subpackage/module.py -> subpackage.module # works at any level of nesting modname = (module_or_package + cwd[n:].replace(os.sep, '.') + '.' + os.path.basename(f[:-3])) importlib.import_module(modname) return set(sys.modules) - already_imported
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_array(array, **kw): """ Extract a subarray by filtering on the given keyword arguments """
for name, value in kw.items(): array = array[array[name] == value] return array
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def not_equal(array_or_none1, array_or_none2): """ Compare two arrays that can also be None or have diffent shapes and returns a boolean. True True True """
if array_or_none1 is None and array_or_none2 is None: return False elif array_or_none1 is None and array_or_none2 is not None: return True elif array_or_none1 is not None and array_or_none2 is None: return True if array_or_none1.shape != array_or_none2.shape: return True return (array_or_none1 != array_or_none2).any()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def humansize(nbytes, suffixes=('B', 'KB', 'MB', 'GB', 'TB', 'PB')): """ Return file size in a human-friendly format """
if nbytes == 0: return '0 B' i = 0 while nbytes >= 1024 and i < len(suffixes) - 1: nbytes /= 1024. i += 1 f = ('%.2f' % nbytes).rstrip('0').rstrip('.') return '%s %s' % (f, suffixes[i])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deprecated(func, msg='', *args, **kw): """ A family of decorators to mark deprecated functions. :param msg: the message to print the first time the deprecated function is used. Here is an example of usage: Notice that if the function is called several time, the deprecation warning will be displayed only the first time. """
msg = '%s.%s has been deprecated. %s' % ( func.__module__, func.__name__, msg) if not hasattr(func, 'called'): warnings.warn(msg, DeprecationWarning, stacklevel=2) func.called = 0 func.called += 1 return func(*args, **kw)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def random_histogram(counts, nbins, seed): """ Distribute a total number of counts on a set of bins homogenously. array([1, 0]) array([28, 18, 17, 19, 18]) array([2043, 2015, 2050, 1930, 1962]) """
numpy.random.seed(seed) return numpy.histogram(numpy.random.random(counts), nbins, (0, 1))[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def safeprint(*args, **kwargs): """ Convert and print characters using the proper encoding """
new_args = [] # when stdout is redirected to a file, python 2 uses ascii for the writer; # python 3 uses what is configured in the system (i.e. 'utf-8') # if sys.stdout is replaced by a StringIO instance, Python 2 does not # have an attribute 'encoding', and we assume ascii in that case str_encoding = getattr(sys.stdout, 'encoding', None) or 'ascii' for s in args: new_args.append(s.encode('utf-8').decode(str_encoding, 'ignore')) return print(*new_args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def zipfiles(fnames, archive, mode='w', log=lambda msg: None, cleanup=False): """ Build a zip archive from the given file names. :param fnames: list of path names :param archive: path of the archive """
prefix = len(os.path.commonprefix([os.path.dirname(f) for f in fnames])) with zipfile.ZipFile( archive, mode, zipfile.ZIP_DEFLATED, allowZip64=True) as z: for f in fnames: log('Archiving %s' % f) z.write(f, f[prefix:]) if cleanup: # remove the zipped file os.remove(f) log('Generated %s' % archive) return archive
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def println(msg): """ Convenience function to print messages on a single line in the terminal """
sys.stdout.write(msg) sys.stdout.flush() sys.stdout.write('\x08' * len(msg)) sys.stdout.flush()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def warn(msg, *args): """ Print a warning on stderr """
if not args: sys.stderr.write('WARNING: ' + msg) else: sys.stderr.write('WARNING: ' + msg % args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert(self, i, item_weight): """ Insert an item with the given weight in the sequence """
item, weight = item_weight self._seq.insert(i, item) self.weight += weight
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add(self, *keys): """ Return a decorator registering a new implementation for the CallableDict for the given keys. """
def decorator(func): for key in keys: self[key] = func return func return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_magnitude_scaling_term(self, C, mag): """ Returns the magnitude scaling term of the GMPE described in equation 3 """
dmag = mag - self.CONSTS["Mh"] if mag < self.CONSTS["Mh"]: return C["e1"] + (C["b1"] * dmag) + (C["b2"] * (dmag ** 2.0)) else: return C["e1"] + (C["b3"] * dmag)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_distance_scaling_term(self, C, rval, mag): """ Returns the distance scaling term of the GMPE described in equation 2 """
r_adj = np.sqrt(rval ** 2.0 + C["h"] ** 2.0) return ( (C["c1"] + C["c2"] * (mag - self.CONSTS["Mref"])) * np.log10(r_adj / self.CONSTS["Rref"]) - (C["c3"] * (r_adj - self.CONSTS["Rref"])))