text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log(array, cutoff):
""" Compute the logarithm of an array with a cutoff on the small values """ |
arr = numpy.copy(array)
arr[arr < cutoff] = cutoff
return numpy.log(arr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compose_arrays(a1, a2, firstfield='etag'):
""" Compose composite arrays by generating an extended datatype containing all the fields. The two arrays must have the same length. """ |
assert len(a1) == len(a2), (len(a1), len(a2))
if a1.dtype.names is None and len(a1.shape) == 1:
# the first array is not composite, but it is one-dimensional
a1 = numpy.array(a1, numpy.dtype([(firstfield, a1.dtype)]))
fields1 = [(f, a1.dtype.fields[f][0]) for f in a1.dtype.names]
if a2.dtype.names is None: # the second array is not composite
assert len(a2.shape) == 2, a2.shape
width = a2.shape[1]
fields2 = [('value%d' % i, a2.dtype) for i in range(width)]
composite = numpy.zeros(a1.shape, numpy.dtype(fields1 + fields2))
for f1 in dict(fields1):
composite[f1] = a1[f1]
for i in range(width):
composite['value%d' % i] = a2[:, i]
return composite
fields2 = [(f, a2.dtype.fields[f][0]) for f in a2.dtype.names]
composite = numpy.zeros(a1.shape, numpy.dtype(fields1 + fields2))
for f1 in dict(fields1):
composite[f1] = a1[f1]
for f2 in dict(fields2):
composite[f2] = a2[f2]
return composite |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_script(upgrade, conn, dry_run=True, debug=True):
""" An utility to debug upgrade scripts written in Python :param upgrade: upgrade procedure :param conn: a DB API 2 connection :param dry_run: if True, do not change the database :param debug: if True, print the queries which are executed """ |
conn = WrappedConnection(conn, debug=debug)
try:
upgrade(conn)
except Exception:
conn.rollback()
raise
else:
if dry_run:
conn.rollback()
else:
conn.commit() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_sql_script(conn, fname):
""" Apply the given SQL script to the database :param conn: a DB API 2 connection :param fname: full path to the creation script """ |
sql = open(fname).read()
try:
# we cannot use conn.executescript which is non transactional
for query in sql.split('\n\n'):
conn.execute(query)
except Exception:
logging.error('Error executing %s' % fname)
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upgrade_db(conn, pkg_name='openquake.server.db.schema.upgrades', skip_versions=()):
""" Upgrade a database by running several scripts in a single transaction. :param conn: a DB API 2 connection :param str pkg_name: the name of the package with the upgrade scripts :param list skip_versions: the versions to skip :returns: the version numbers of the new scripts applied the database """ |
upgrader = UpgradeManager.instance(conn, pkg_name)
t0 = time.time()
# run the upgrade scripts
try:
versions_applied = upgrader.upgrade(conn, skip_versions)
except:
conn.rollback()
raise
else:
conn.commit()
dt = time.time() - t0
logging.info('Upgrade completed in %s seconds', dt)
return versions_applied |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self, templ, *args):
""" A simple utility to run SQL queries. :param templ: a query or query template :param args: the arguments (or the empty tuple) :returns: the DB API 2 cursor used to run the query """ |
curs = self._conn.cursor()
query = curs.mogrify(templ, args)
if self.debug:
print(query)
curs.execute(query)
return curs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def install_versioning(self, conn):
""" Create the version table into an already populated database and insert the base script. :param conn: a DB API 2 connection """ |
logging.info('Creating the versioning table %s', self.version_table)
conn.executescript(CREATE_VERSIONING % self.version_table)
self._insert_script(self.read_scripts()[0], conn) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init(self, conn):
""" Create the version table and run the base script on an empty database. :param conn: a DB API 2 connection """ |
base = self.read_scripts()[0]['fname']
logging.info('Creating the initial schema from %s', base)
apply_sql_script(conn, os.path.join(self.upgrade_dir, base))
self.install_versioning(conn) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def upgrade(self, conn, skip_versions=()):
'''
Upgrade the database from the current version to the maximum
version in the upgrade scripts.
:param conn: a DBAPI 2 connection
:param skip_versions: the versions to skip
'''
db_versions = self.get_db_versions(conn)
self.starting_version = max(db_versions)
to_skip = sorted(db_versions | set(skip_versions))
scripts = self.read_scripts(None, None, to_skip)
if not scripts: # no new scripts to apply
return []
self.ending_version = max(s['version'] for s in scripts)
return self._upgrade(conn, scripts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_db_versions(self, conn):
""" Get all the versions stored in the database as a set. :param conn: a DB API 2 connection """ |
curs = conn.cursor()
query = 'select version from {}'.format(self.version_table)
try:
curs.execute(query)
return set(version for version, in curs.fetchall())
except:
raise VersioningNotInstalled('Run oq engine --upgrade-db') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_scripts(self, minversion=None, maxversion=None, skip_versions=()):
""" Extract the upgrade scripts from a directory as a list of dictionaries, ordered by version. :param minversion: the minimum version to consider :param maxversion: the maximum version to consider :param skipversions: the versions to skip """ |
scripts = []
versions = {} # a script is unique per version
for scriptname in sorted(os.listdir(self.upgrade_dir)):
match = self.parse_script_name(scriptname)
if match:
version = match['version']
if version in skip_versions:
continue # do not collect scripts already applied
elif minversion and version <= minversion:
continue # do not collect versions too old
elif maxversion and version > maxversion:
continue # do not collect versions too new
try:
previousname = versions[version]
except KeyError: # no previous script with the same version
scripts.append(match)
versions[version] = scriptname
else:
raise DuplicatedVersion(
'Duplicated versions {%s,%s}' %
(scriptname, previousname))
return scripts |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_upgrade_scripts(self):
""" Extract the OpenQuake upgrade scripts from the links in the GitHub page """ |
link_pattern = '>\s*{0}\s*<'.format(self.pattern[1:-1])
page = urllib.request.urlopen(self.upgrades_url).read()
for mo in re.finditer(link_pattern, page):
scriptname = mo.group(0)[1:-1].strip()
yield self.parse_script_name(scriptname) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def average_azimuth(self):
""" Calculate and return weighted average azimuth of all line's segments in decimal degrees. Uses formula from http://en.wikipedia.org/wiki/Mean_of_circular_quantities '45.0' '45.0' '300.0' """ |
if len(self.points) == 2:
return self.points[0].azimuth(self.points[1])
lons = numpy.array([point.longitude for point in self.points])
lats = numpy.array([point.latitude for point in self.points])
azimuths = geodetic.azimuth(lons[:-1], lats[:-1], lons[1:], lats[1:])
distances = geodetic.geodetic_distance(lons[:-1], lats[:-1],
lons[1:], lats[1:])
azimuths = numpy.radians(azimuths)
# convert polar coordinates to Cartesian ones and calculate
# the average coordinate of each component
avg_x = numpy.mean(distances * numpy.sin(azimuths))
avg_y = numpy.mean(distances * numpy.cos(azimuths))
# find the mean azimuth from that mean vector
azimuth = numpy.degrees(numpy.arctan2(avg_x, avg_y))
if azimuth < 0:
azimuth += 360
return azimuth |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resample(self, section_length):
""" Resample this line into sections. The first point in the resampled line corresponds to the first point in the original line. Starting from the first point in the original line, a line segment is defined as the line connecting the last point in the resampled line and the next point in the original line. The line segment is then split into sections of length equal to ``section_length``. The resampled line is obtained by concatenating all sections. The number of sections in a line segment is calculated as follows: ``round(segment_length / section_length)``. Note that the resulting line has a length that is an exact multiple of ``section_length``, therefore its length is in general smaller or greater (depending on the rounding) than the length of the original line. For a straight line, the difference between the resulting length and the original length is at maximum half of the ``section_length``. For a curved line, the difference my be larger, because of corners getting cut. :param section_length: The length of the section, in km. :type section_length: float :returns: A new line resampled into sections based on the given length. :rtype: An instance of :class:`Line` """ |
if len(self.points) < 2:
return Line(self.points)
resampled_points = []
# 1. Resample the first section. 2. Loop over the remaining points
# in the line and resample the remaining sections.
# 3. Extend the list with the resampled points, except the first one
# (because it's already contained in the previous set of
# resampled points).
resampled_points.extend(
self.points[0].equally_spaced_points(self.points[1],
section_length)
)
# Skip the first point, it's already resampled
for i in range(2, len(self.points)):
points = resampled_points[-1].equally_spaced_points(
self.points[i], section_length
)
resampled_points.extend(points[1:])
return Line(resampled_points) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_length(self):
""" Calculate and return the length of the line as a sum of lengths of all its segments. :returns: Total length in km. """ |
length = 0
for i, point in enumerate(self.points):
if i != 0:
length += point.distance(self.points[i - 1])
return length |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resample_to_num_points(self, num_points):
""" Resample the line to a specified number of points. :param num_points: Integer number of points the resulting line should have. :returns: A new line with that many points as requested. """ |
assert len(self.points) > 1, "can not resample the line of one point"
section_length = self.get_length() / (num_points - 1)
resampled_points = [self.points[0]]
segment = 0
acc_length = 0
last_segment_length = 0
for i in range(num_points - 1):
tot_length = (i + 1) * section_length
while tot_length > acc_length and segment < len(self.points) - 1:
last_segment_length = self.points[segment].distance(
self.points[segment + 1]
)
acc_length += last_segment_length
segment += 1
p1, p2 = self.points[segment - 1:segment + 1]
offset = tot_length - (acc_length - last_segment_length)
if offset < 1e-5:
# forward geodetic transformations for very small distances
# are very inefficient (and also unneeded). if target point
# is just 1 cm away from original (non-resampled) line vertex,
# don't even bother doing geodetic calculations.
resampled = p1
else:
resampled = p1.equally_spaced_points(p2, offset)[1]
resampled_points.append(resampled)
return Line(resampled_points) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cena_tau(imt, mag, params):
""" Returns the inter-event standard deviation, tau, for the CENA case """ |
if imt.name == "PGV":
C = params["PGV"]
else:
C = params["SA"]
if mag > 6.5:
return C["tau3"]
elif (mag > 5.5) and (mag <= 6.5):
return ITPL(mag, C["tau3"], C["tau2"], 5.5, 1.0)
elif (mag > 5.0) and (mag <= 5.5):
return ITPL(mag, C["tau2"], C["tau1"], 5.0, 0.5)
else:
return C["tau1"] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_tau_at_quantile(mean, stddev, quantile):
""" Returns the value of tau at a given quantile in the form of a dictionary organised by intensity measure """ |
tau_model = {}
for imt in mean:
tau_model[imt] = {}
for key in mean[imt]:
if quantile is None:
tau_model[imt][key] = mean[imt][key]
else:
tau_model[imt][key] = _at_percentile(mean[imt][key],
stddev[imt][key],
quantile)
return tau_model |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_stddevs(self, mag, imt, stddev_types, num_sites):
""" Returns the standard deviations for either the ergodic or non-ergodic models """ |
tau = self._get_tau(imt, mag)
phi = self._get_phi(imt, mag)
sigma = np.sqrt(tau ** 2. + phi ** 2.)
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
stddevs.append(sigma + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTRA_EVENT:
stddevs.append(phi + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTER_EVENT:
stddevs.append(tau + np.zeros(num_sites))
return stddevs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_stddevs(self, mag, imt, stddev_types, num_sites):
""" Returns the total standard deviation """ |
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
sigma = self._get_total_sigma(imt, mag)
stddevs.append(sigma + np.zeros(num_sites))
return stddevs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_sigma_at_quantile(self, sigma_quantile):
""" Calculates the total standard deviation at the specified quantile """ |
# Mean mean is found in self.TAU. Get the variance in tau
tau_std = TAU_SETUP[self.tau_model]["STD"]
# Mean phiss is found in self.PHI_SS. Get the variance in phi
phi_std = deepcopy(self.PHI_SS.sa_coeffs)
phi_std.update(self.PHI_SS.non_sa_coeffs)
for key in phi_std:
phi_std[key] = {"a": PHI_SETUP[self.phi_model][key]["var_a"],
"b": PHI_SETUP[self.phi_model][key]["var_b"]}
if self.ergodic:
# IMT list should be taken from the PHI_S2SS_MODEL
imt_list = list(
PHI_S2SS_MODEL[self.phi_s2ss_model].non_sa_coeffs.keys())
imt_list += \
list(PHI_S2SS_MODEL[self.phi_s2ss_model].sa_coeffs.keys())
else:
imt_list = phi_std.keys()
phi_std = CoeffsTable(sa_damping=5, table=phi_std)
tau_bar, tau_std = self._get_tau_vector(self.TAU, tau_std, imt_list)
phi_bar, phi_std = self._get_phi_vector(self.PHI_SS, phi_std, imt_list)
sigma = {}
# Calculate the total standard deviation
for imt in imt_list:
sigma[imt] = {}
for i, key in enumerate(self.tau_keys):
# Calculates the expected standard deviation
sigma_bar = np.sqrt(tau_bar[imt][i] ** 2. +
phi_bar[imt][i] ** 2.)
# Calculated the variance in the standard deviation
sigma_std = np.sqrt(tau_std[imt][i] ** 2. +
phi_std[imt][i] ** 2.)
# The keys swap from tau to sigma
new_key = key.replace("tau", "sigma")
if sigma_quantile is not None:
sigma[imt][new_key] =\
_at_percentile(sigma_bar, sigma_std, sigma_quantile)
else:
sigma[imt][new_key] = sigma_bar
self.tau_keys[i] = new_key
self.SIGMA = CoeffsTable(sa_damping=5, table=sigma) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_tau_vector(self, tau_mean, tau_std, imt_list):
""" Gets the vector of mean and variance of tau values corresponding to the specific model and returns them as dictionaries """ |
self.magnitude_limits = MAG_LIMS_KEYS[self.tau_model]["mag"]
self.tau_keys = MAG_LIMS_KEYS[self.tau_model]["keys"]
t_bar = {}
t_std = {}
for imt in imt_list:
t_bar[imt] = []
t_std[imt] = []
for mag, key in zip(self.magnitude_limits, self.tau_keys):
t_bar[imt].append(
TAU_EXECUTION[self.tau_model](imt, mag, tau_mean))
t_std[imt].append(
TAU_EXECUTION[self.tau_model](imt, mag, tau_std))
return t_bar, t_std |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_phi_vector(self, phi_mean, phi_std, imt_list):
""" Gets the vector of mean and variance of phi values corresponding to the specific model and returns them as dictionaries """ |
p_bar = {}
p_std = {}
for imt in imt_list:
p_bar[imt] = []
p_std[imt] = []
for mag in self.magnitude_limits:
phi_ss_mean = get_phi_ss(imt, mag, phi_mean)
phi_ss_std = get_phi_ss(imt, mag, phi_std)
if self.ergodic:
# Add on the phi_s2ss term according to Eqs. 5.15 and 5.16
# of Al Atik (2015)
phi_ss_mean = np.sqrt(
phi_ss_mean ** 2. +
PHI_S2SS_MODEL[self.phi_s2ss_model][imt]["mean"] ** 2.
)
phi_ss_std = np.sqrt(
phi_ss_std ** 2. +
PHI_S2SS_MODEL[self.phi_s2ss_model][imt]["var"] ** 2.
)
p_bar[imt].append(phi_ss_mean)
p_std[imt].append(phi_ss_std)
return p_bar, p_std |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_total_sigma(self, imt, mag):
""" Returns the estimated total standard deviation for a given intensity measure type and magnitude """ |
C = self.SIGMA[imt]
if mag <= self.magnitude_limits[0]:
# The CENA constant model is always returned here
return C[self.tau_keys[0]]
elif mag > self.magnitude_limits[-1]:
return C[self.tau_keys[-1]]
else:
# Needs interpolation
for i in range(len(self.tau_keys) - 1):
l_m = self.magnitude_limits[i]
u_m = self.magnitude_limits[i + 1]
if mag > l_m and mag <= u_m:
return ITPL(mag,
C[self.tau_keys[i + 1]],
C[self.tau_keys[i]],
l_m,
u_m - l_m) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_hcurves_and_means(dstore):
""" Extract hcurves from the datastore and compute their means. :returns: curves_by_rlz, mean_curves """ |
rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
getter = getters.PmapGetter(dstore, rlzs_assoc)
pmaps = getter.get_pmaps()
return dict(zip(getter.rlzs, pmaps)), dstore['hcurves/mean'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compute_mean(self, C, mag, rhypo, hypo_depth, mean, idx):
""" Compute mean value according to equations 10 and 11 page 226. """ |
mean[idx] = (C['C1'] + C['C2'] * mag + C['C3'] * np.log(rhypo[idx] +
C['C4'] * np.exp(C['C5'] * mag)) + C['C6'] * hypo_depth) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compute_std(self, C, stddevs, idx):
""" Compute total standard deviation, see tables 3 and 4, pages 227 and 228. """ |
for stddev in stddevs:
stddev[idx] += C['sigma'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def check_config(config):
'''Check config file inputs and overwrite bad values with the defaults'''
essential_keys = ['number_earthquakes']
for key in essential_keys:
if key not in config:
raise ValueError('For Kijko Nonparametric Gaussian the key %s '
'needs to be set in the configuation' % key)
if config.get('tolerance', 0.0) <= 0.0:
config['tolerance'] = 0.05
if config.get('maximum_iterations', 0) < 1:
config['maximum_iterations'] = 100
if config.get('number_samples', 0) < 2:
config['number_samples'] = 51
return config |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _get_exponential_spaced_values(mmin, mmax, number_samples):
'''
Function to return a set of exponentially spaced values between mmin and
mmax
:param float mmin:
Minimum value
:param float mmax:
Maximum value
:param float number_samples:
Number of exponentially spaced samples
:return np.ndarray:
Set of 'number_samples' exponentially spaced values
'''
lhs = np.exp(mmin) + np.arange(0., number_samples - 1., 1.) *\
((np.exp(mmax) - np.exp(mmin)) / (number_samples - 1.))
magval = np.hstack([lhs, np.exp(mmax)])
return np.log(magval) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dbcmd(action, *args):
""" A dispatcher to the database server. :param action: database action to perform :param args: arguments """ |
global sock
if sock is None:
sock = zeromq.Socket(
'tcp://%s:%s' % (config.dbserver.host, DBSERVER_PORT),
zeromq.zmq.REQ, 'connect').__enter__()
# the socket will be closed when the calculation ends
res = sock.send((action,) + args)
if isinstance(res, parallel.Result):
return res.get()
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _update_log_record(self, record):
""" Massage a log record before emitting it. Intended to be used by the custom log handlers defined in this module. """ |
if not hasattr(record, 'hostname'):
record.hostname = '-'
if not hasattr(record, 'job_id'):
record.job_id = self.job_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle(job_id, log_level='info', log_file=None):
""" Context manager adding and removing log handlers. :param job_id: ID of the current job :param log_level: one of debug, info, warn, error, critical :param log_file: log file path (if None, logs on stdout only) """ |
handlers = [LogDatabaseHandler(job_id)] # log on db always
if log_file is None:
# add a StreamHandler if not already there
if not any(h for h in logging.root.handlers
if isinstance(h, logging.StreamHandler)):
handlers.append(LogStreamHandler(job_id))
else:
handlers.append(LogFileHandler(job_id, log_file))
for handler in handlers:
logging.root.addHandler(handler)
init(job_id, LEVELS.get(log_level, logging.WARNING))
try:
yield
finally:
# sanity check to make sure that the logging on file is working
if (log_file and log_file != os.devnull and
os.path.getsize(log_file) == 0):
logging.root.warn('The log file %s is empty!?' % log_file)
for handler in handlers:
logging.root.removeHandler(handler) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_median_area(self, mag, rake):
""" The values are a function of magnitude. """ |
# strike slip
length = 10.0 ** (-2.57 + 0.62 * mag)
seis_wid = 20.0
# estimate area based on length
if length < seis_wid:
return length ** 2.
else:
return length * seis_wid |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _construct_surface(lons, lats, upper_depth, lower_depth):
""" Utility method that constructs and return a simple fault surface with top edge specified by `lons` and `lats` and extending vertically from `upper_depth` to `lower_depth`. The underlying mesh is built by repeating the same coordinates (`lons` and `lats`) at the two specified depth levels. """ |
depths = np.array([
np.zeros_like(lons) + upper_depth,
np.zeros_like(lats) + lower_depth
])
mesh = RectangularMesh(
np.tile(lons, (2, 1)), np.tile(lats, (2, 1)), depths
)
return SimpleFaultSurface(mesh) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_min_distance_to_sub_trench(lons, lats):
""" Compute and return minimum distance between subduction trench and points specified by 'lon' and 'lat' The method creates an instance of :class:`openquake.hazardlib.geo.SimpleFaultSurface` to model the subduction trench. The surface is assumed vertical and extending from 0 to 10 km depth. The 10 km depth value is arbitrary given that distance calculation depend only on top edge depth. The method calls then :meth:`openquake.hazardlib.geo.base.BaseSurface.get_rx_distance` and return its absolute value. """ |
trench = _construct_surface(SUB_TRENCH_LONS, SUB_TRENCH_LATS, 0., 10.)
sites = Mesh(lons, lats, None)
return np.abs(trench.get_rx_distance(sites)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_min_distance_to_volcanic_front(lons, lats):
""" Compute and return minimum distance between volcanic front and points specified by 'lon' and 'lat'. Distance is negative if point is located east of the volcanic front, positive otherwise. The method uses the same approach as :meth:`_get_min_distance_to_sub_trench` but final distance is returned without taking the absolute value. """ |
vf = _construct_surface(VOLCANIC_FRONT_LONS, VOLCANIC_FRONT_LATS, 0., 10.)
sites = Mesh(lons, lats, None)
return vf.get_rx_distance(sites) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _apply_subduction_trench_correction(mean, x_tr, H, rrup, imt):
""" Implement equation for subduction trench correction as described in equation 3.5.2-1, page 3-148 of "Technical Reports on National Seismic Hazard Maps for Japan" """ |
if imt.name == 'PGV':
V1 = 10 ** ((-4.021e-5 * x_tr + 9.905e-3) * (H - 30))
V2 = np.maximum(1., (10 ** (-0.012)) * ((rrup / 300.) ** 2.064))
corr = V2
if H > 30:
corr *= V1
else:
V2 = np.maximum(1., (10 ** (+0.13)) * ((rrup / 300.) ** 3.2))
corr = V2
if H > 30:
V1 = 10 ** ((-8.1e-5 * x_tr + 2.0e-2) * (H - 30))
corr *= V1
return np.log(np.exp(mean) * corr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _apply_volcanic_front_correction(mean, x_vf, H, imt):
""" Implement equation for volcanic front correction as described in equation 3.5.2.-2, page 3-149 of "Technical Reports on National Seismic Hazard Maps for Japan" """ |
V1 = np.zeros_like(x_vf)
if imt.name == 'PGV':
idx = x_vf <= 75
V1[idx] = 4.28e-5 * x_vf[idx] * (H - 30)
idx = x_vf > 75
V1[idx] = 3.21e-3 * (H - 30)
V1 = 10 ** V1
else:
idx = x_vf <= 75
V1[idx] = 7.06e-5 * x_vf[idx] * (H - 30)
idx = x_vf > 75
V1[idx] = 5.30e-3 * (H - 30)
V1 = 10 ** V1
return np.log(np.exp(mean) * V1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
""" Implements equation 3.5.1-1 page 148 for mean value and equation 3.5.5-2 page 151 for total standard deviation. See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ |
mean = self._get_mean(imt, rup.mag, rup.hypo_depth, dists.rrup, d=0)
stddevs = self._get_stddevs(stddev_types, dists.rrup)
mean = self._apply_amplification_factor(mean, sites.vs30)
return mean, stddevs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_mean(self, imt, mag, hypo_depth, rrup, d):
""" Return mean value as defined in equation 3.5.1-1 page 148 """ |
# clip magnitude at 8.3 as per note at page 3-36 in table Table 3.3.2-6
# in "Technical Reports on National Seismic Hazard Maps for Japan"
mag = min(mag, 8.3)
if imt.name == 'PGV':
mean = (
0.58 * mag +
0.0038 * hypo_depth +
d -
1.29 -
np.log10(rrup + 0.0028 * 10 ** (0.5 * mag)) -
0.002 * rrup
)
else:
mean = (
0.50 * mag +
0.0043 * hypo_depth +
d +
0.61 -
np.log10(rrup + 0.0055 * 10 ** (0.5 * mag)) -
0.003 * rrup
)
mean = np.log10(10**(mean)/(g*100))
return mean |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_stddevs(self, stddev_types, rrup):
""" Return standard deviations as defined in equation 3.5.5-2 page 151 """ |
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
std = np.zeros_like(rrup)
std[rrup <= 20] = 0.23
idx = (rrup > 20) & (rrup <= 30)
std[idx] = 0.23 - 0.03 * np.log10(rrup[idx] / 20) / np.log10(30. / 20.)
std[rrup > 30] = 0.20
# convert from log10 to ln
std = np.log(10 ** std)
return [std for stddev_type in stddev_types] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_stddevs(self, stddev_types, pgv):
""" Return standard deviations as defined in equation 3.5.5-1 page 151 """ |
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
std = np.zeros_like(pgv)
std[pgv <= 25] = 0.20
idx = (pgv > 25) & (pgv <= 50)
std[idx] = 0.20 - 0.05 * (pgv[idx] - 25) / 25
std[pgv > 50] = 0.15
# convert from log10 to ln
std = np.log(10 ** std)
return [std for stddev_type in stddev_types] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_memory(calc_id=-1):
""" Plot the memory occupation """ |
dstore = util.read(calc_id)
plots = []
for task_name in dstore['task_info']:
mem = dstore['task_info/' + task_name]['mem_gb']
plots.append((task_name, mem))
plt = make_figure(plots)
plt.show() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_to_array(pmap, nsites, imtls, inner_idx=0):
""" Convert the probability map into a composite array with header :param pmap: probability map :param nsites: total number of sites :param imtls: a DictArray with IMT and levels :returns: a composite array of lenght nsites """ |
lst = []
# build the export dtype, of the form PGA-0.1, PGA-0.2 ...
for imt, imls in imtls.items():
for iml in imls:
lst.append(('%s-%s' % (imt, iml), F32))
curves = numpy.zeros(nsites, numpy.dtype(lst))
for sid, pcurve in pmap.items():
curve = curves[sid]
idx = 0
for imt, imls in imtls.items():
for iml in imls:
curve['%s-%s' % (imt, iml)] = pcurve.array[idx, inner_idx]
idx += 1
return curves |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compute_hazard_maps(curves, imls, poes):
""" Given a set of hazard curve poes, interpolate a hazard map at the specified ``poe``. :param curves: 2D array of floats. Each row represents a curve, where the values in the row are the PoEs (Probabilities of Exceedance) corresponding to ``imls``. Each curve corresponds to a geographical location. :param imls: Intensity Measure Levels associated with these hazard ``curves``. Type should be an array-like of floats. :param poes: Value(s) on which to interpolate a hazard map from the input ``curves``. Can be an array-like or scalar value (for a single PoE). :returns: An array of shape N x P, where N is the number of curves and P the number of poes. """ |
poes = numpy.array(poes)
if len(poes.shape) == 0:
# `poes` was passed in as a scalar;
# convert it to 1D array of 1 element
poes = poes.reshape(1)
if len(curves.shape) == 1:
# `curves` was passed as 1 dimensional array, there is a single site
curves = curves.reshape((1,) + curves.shape) # 1 x L
L = curves.shape[1] # number of levels
if L != len(imls):
raise ValueError('The curves have %d levels, %d were passed' %
(L, len(imls)))
result = []
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# avoid RuntimeWarning: divide by zero encountered in log
# happening in the classical_tiling tests
imls = numpy.log(numpy.array(imls[::-1]))
for curve in curves:
# the hazard curve, having replaced the too small poes with EPSILON
curve_cutoff = [max(poe, EPSILON) for poe in curve[::-1]]
hmap_val = []
for poe in poes:
# special case when the interpolation poe is bigger than the
# maximum, i.e the iml must be smaller than the minumum
if poe > curve_cutoff[-1]: # the greatest poes in the curve
# extrapolate the iml to zero as per
# https://bugs.launchpad.net/oq-engine/+bug/1292093
# a consequence is that if all poes are zero any poe > 0
# is big and the hmap goes automatically to zero
hmap_val.append(0)
else:
# exp-log interpolation, to reduce numerical errors
# see https://bugs.launchpad.net/oq-engine/+bug/1252770
val = numpy.exp(
numpy.interp(
numpy.log(poe), numpy.log(curve_cutoff), imls))
hmap_val.append(val)
result.append(hmap_val)
return numpy.array(result) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_hmap(pmap, imtls, poes):
""" Compute the hazard maps associated to the passed probability map. :param pmap: hazard curves in the form of a ProbabilityMap :param imtls: DictArray with M intensity measure types :param poes: P PoEs where to compute the maps :returns: a ProbabilityMap with size (N, M, P) """ |
M, P = len(imtls), len(poes)
hmap = probability_map.ProbabilityMap.build(M, P, pmap, dtype=F32)
if len(pmap) == 0:
return hmap # empty hazard map
for i, imt in enumerate(imtls):
curves = numpy.array([pmap[sid].array[imtls(imt), 0]
for sid in pmap.sids])
data = compute_hazard_maps(curves, imtls[imt], poes) # array (N, P)
for sid, value in zip(pmap.sids, data):
array = hmap[sid].array
for j, val in enumerate(value):
array[i, j] = val
return hmap |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_uhs(hmap, info):
""" Make Uniform Hazard Spectra curves for each location. :param hmap: array of shape (N, M, P) :param info: a dictionary with keys poes, imtls, uhs_dt :returns: a composite array containing uniform hazard spectra """ |
uhs = numpy.zeros(len(hmap), info['uhs_dt'])
for p, poe in enumerate(info['poes']):
for m, imt in enumerate(info['imtls']):
if imt.startswith(('PGA', 'SA')):
uhs[str(poe)][imt] = hmap[:, m, p]
return uhs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_array(self, ebruptures):
""" Convert a list of ebruptures into an array of dtype RuptureRata.dt """ |
data = []
for ebr in ebruptures:
rup = ebr.rupture
self.cmaker.add_rup_params(rup)
ruptparams = tuple(getattr(rup, param) for param in self.params)
point = rup.surface.get_middle_point()
multi_lons, multi_lats = rup.surface.get_surface_boundaries()
bounds = ','.join('((%s))' % ','.join(
'%.5f %.5f' % (lon, lat) for lon, lat in zip(lons, lats))
for lons, lats in zip(multi_lons, multi_lats))
try:
rate = ebr.rupture.occurrence_rate
except AttributeError: # for nonparametric sources
rate = numpy.nan
data.append(
(ebr.serial, ebr.srcidx, ebr.n_occ, rate,
rup.mag, point.x, point.y, point.z, rup.surface.get_strike(),
rup.surface.get_dip(), rup.rake,
'MULTIPOLYGON(%s)' % decode(bounds)) + ruptparams)
return numpy.array(data, self.dt) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, rup_array):
""" Store the ruptures in array format. """ |
self.nruptures += len(rup_array)
offset = len(self.datastore['rupgeoms'])
rup_array.array['gidx1'] += offset
rup_array.array['gidx2'] += offset
previous = self.datastore.get_attr('ruptures', 'nbytes', 0)
self.datastore.extend(
'ruptures', rup_array, nbytes=previous + rup_array.nbytes)
self.datastore.extend('rupgeoms', rup_array.geom)
# TODO: PMFs for nonparametric ruptures are not stored
self.datastore.flush() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def close(self):
""" Save information about the rupture codes as attributes of the 'ruptures' dataset. """ |
if 'ruptures' not in self.datastore: # for UCERF
return
codes = numpy.unique(self.datastore['ruptures']['code'])
attr = {'code_%d' % code: ' '.join(
cls.__name__ for cls in BaseRupture.types[code])
for code in codes}
self.datastore.set_attrs('ruptures', **attr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compute_nonlinear_magnitude_term(self, C, mag):
""" Computes the non-linear magnitude term """ |
return self._compute_linear_magnitude_term(C, mag) +\
C["b3"] * ((mag - 7.0) ** 2.) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compute_magnitude_distance_term(self, C, rjb, mag):
""" Returns the magntude dependent distance term """ |
rval = np.sqrt(rjb ** 2. + C["h"] ** 2.)
return (C["b4"] + C["b5"] * (mag - 4.5)) * np.log(rval) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_bnl(self, C_AMP, vs30):
""" Gets the nonlinear term, given by equation 8 of Atkinson & Boore 2006 """ |
# Default case 8d
bnl = np.zeros_like(vs30)
if np.all(vs30 >= self.CONSTS["Vref"]):
return bnl
# Case 8a
bnl[vs30 < self.CONSTS["v1"]] = C_AMP["b1sa"]
# Cade 8b
idx = np.logical_and(vs30 > self.CONSTS["v1"],
vs30 <= self.CONSTS["v2"])
if np.any(idx):
bnl[idx] = (C_AMP["b1sa"] - C_AMP["b2sa"]) *\
(np.log(vs30[idx] / self.CONSTS["v2"]) /
np.log(self.CONSTS["v1"] / self.CONSTS["v2"])) + C_AMP["b2sa"]
# Case 8c
idx = np.logical_and(vs30 > self.CONSTS["v2"],
vs30 < self.CONSTS["Vref"])
if np.any(idx):
bnl[idx] = C_AMP["b2sa"] *\
np.log(vs30[idx] / self.CONSTS["Vref"]) /\
np.log(self.CONSTS["v2"] / self.CONSTS["Vref"])
return bnl |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_stddevs(self, C, stddev_types, stddev_shape):
""" Returns the standard deviations given in Table 2 """ |
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
stddevs.append(C["sigtot"] + np.zeros(stddev_shape))
elif stddev_type == const.StdDev.INTRA_EVENT:
stddevs.append(C['sig2'] + np.zeros(stddev_shape))
elif stddev_type == const.StdDev.INTER_EVENT:
stddevs.append(C['sig1'] + np.zeros(stddev_shape))
return stddevs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tidy(fnames):
""" Reformat a NRML file in a canonical form. That also means reducing the precision of the floats to a standard value. If the file is invalid, a clear error message is shown. """ |
for fname in fnames:
try:
node = nrml.read(fname)
except ValueError as err:
print(err)
return
with open(fname + '.bak', 'wb') as f:
f.write(open(fname, 'rb').read())
with open(fname, 'wb') as f:
# make sure the xmlns i.e. the NRML version is unchanged
nrml.write(node.nodes, f, writers.FIVEDIGITS, xmlns=node['xmlns'])
print('Reformatted %s, original left in %s.bak' % (fname, fname)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_hdf5(input):
""" Convert .xml and .npz files to .hdf5 files. """ |
with performance.Monitor('to_hdf5') as mon:
for input_file in input:
if input_file.endswith('.npz'):
output = convert_npz_hdf5(input_file, input_file[:-3] + 'hdf5')
elif input_file.endswith('.xml'): # for source model files
output = convert_xml_hdf5(input_file, input_file[:-3] + 'hdf5')
else:
continue
print('Generated %s' % output)
print(mon) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_rup_array(ebruptures, srcfilter=nofilter):
""" Convert a list of EBRuptures into a numpy composite array, by filtering out the ruptures far away from every site """ |
if not BaseRupture._code:
BaseRupture.init() # initialize rupture codes
rups = []
geoms = []
nbytes = 0
offset = 0
for ebrupture in ebruptures:
rup = ebrupture.rupture
mesh = surface_to_array(rup.surface)
sy, sz = mesh.shape[1:] # sanity checks
assert sy < TWO16, 'Too many multisurfaces: %d' % sy
assert sz < TWO16, 'The rupture mesh spacing is too small'
points = mesh.reshape(3, -1).T # shape (n, 3)
minlon = points[:, 0].min()
minlat = points[:, 1].min()
maxlon = points[:, 0].max()
maxlat = points[:, 1].max()
if srcfilter.integration_distance and len(srcfilter.close_sids(
(minlon, minlat, maxlon, maxlat),
rup.tectonic_region_type, rup.mag)) == 0:
continue
hypo = rup.hypocenter.x, rup.hypocenter.y, rup.hypocenter.z
rate = getattr(rup, 'occurrence_rate', numpy.nan)
tup = (ebrupture.serial, ebrupture.srcidx, ebrupture.grp_id,
rup.code, ebrupture.n_occ, rup.mag, rup.rake, rate,
minlon, minlat, maxlon, maxlat,
hypo, offset, offset + len(points), sy, sz)
offset += len(points)
rups.append(tup)
geoms.append(numpy.array([tuple(p) for p in points], point3d))
nbytes += rupture_dt.itemsize + mesh.nbytes
if not rups:
return ()
dic = dict(geom=numpy.concatenate(geoms), nbytes=nbytes)
# TODO: PMFs for nonparametric ruptures are not converted
return hdf5.ArrayWrapper(numpy.array(rups, rupture_dt), dic) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sample_cluster(sources, srcfilter, num_ses, param):
""" Yields ruptures generated by a cluster of sources. :param sources: A sequence of sources of the same group :param num_ses: Number of stochastic event sets :param param: a dictionary of additional parameters including ses_per_logic_tree_path :yields: dictionaries with keys rup_array, calc_times, eff_ruptures """ |
eb_ruptures = []
numpy.random.seed(sources[0].serial)
[grp_id] = set(src.src_group_id for src in sources)
# AccumDict of arrays with 3 elements weight, nsites, calc_time
calc_times = AccumDict(accum=numpy.zeros(3, numpy.float32))
# Set the parameters required to compute the number of occurrences
# of the group of sources
# assert param['oqparam'].number_of_logic_tree_samples > 0
samples = getattr(sources[0], 'samples', 1)
tom = getattr(sources, 'temporal_occurrence_model')
rate = tom.occurrence_rate
time_span = tom.time_span
# Note that using a single time interval corresponding to the product
# of the investigation time and the number of realisations as we do
# here is admitted only in the case of a time-independent model
grp_num_occ = numpy.random.poisson(rate * time_span * samples *
num_ses)
# Now we process the sources included in the group. Possible cases:
# * The group is a cluster. In this case we choose one rupture per each
# source; uncertainty in the ruptures can be handled in this case
# using mutually exclusive ruptures (note that this is admitted
# only for nons-parametric sources).
# * The group contains mutually exclusive sources. In this case we
# choose one source and then one rupture from this source.
rup_counter = {}
rup_data = {}
eff_ruptures = 0
for rlz_num in range(grp_num_occ):
if sources.cluster:
for src, _sites in srcfilter(sources):
# Sum Ruptures
if rlz_num == 0:
eff_ruptures += src.num_ruptures
# Track calculation time
t0 = time.time()
rup = src.get_one_rupture()
# The problem here is that we do not know a-priori the
# number of occurrences of a given rupture.
if src.id not in rup_counter:
rup_counter[src.id] = {}
rup_data[src.id] = {}
if rup.idx not in rup_counter[src.id]:
rup_counter[src.id][rup.idx] = 1
rup_data[src.id][rup.idx] = [rup, src.id, grp_id]
else:
rup_counter[src.id][rup.idx] += 1
# Store info
dt = time.time() - t0
calc_times[src.id] += numpy.array([len(rup_data[src.id]),
src.nsites, dt])
elif param['src_interdep'] == 'mutex':
print('Not yet implemented')
exit(0)
# Create event based ruptures
for src_key in rup_data:
for rup_key in rup_data[src_key]:
dat = rup_data[src_key][rup_key]
cnt = rup_counter[src_key][rup_key]
ebr = EBRupture(dat[0], dat[1], dat[2], cnt, samples)
eb_ruptures.append(ebr)
return eb_ruptures, calc_times, eff_ruptures, grp_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_sites(calc_id=-1):
""" Plot the sites """ |
# NB: matplotlib is imported inside since it is a costly import
import matplotlib.pyplot as p
dstore = util.read(calc_id)
sitecol = dstore['sitecol']
lons, lats = sitecol.lons, sitecol.lats
if len(lons) > 1 and cross_idl(*lons):
lons %= 360
fig, ax = p.subplots()
ax.grid(True)
if 'site_model' in dstore:
sm = dstore['site_model']
sm_lons, sm_lats = sm['lon'], sm['lat']
if len(sm_lons) > 1 and cross_idl(*sm_lons):
sm_lons %= 360
p.scatter(sm_lons, sm_lats, marker='.', color='orange')
p.scatter(lons, lats, marker='+')
p.show() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_distance_term(self, C, rrup, backarc):
""" Returns the distance scaling term, which varies depending on whether the site is in the forearc or the backarc """ |
# Geometric attenuation function
distance_scale = -np.log10(np.sqrt(rrup ** 2 + 3600.0))
# Anelastic attenuation in the backarc
distance_scale[backarc] += (C["c2"] * rrup[backarc])
# Anelastic Attenuation in the forearc
idx = np.logical_not(backarc)
distance_scale[idx] += (C["c1"] * rrup[idx])
return distance_scale |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_scaling_term(self, C, rrup):
""" Applies the Cascadia correction factor from Table 2 and the positive correction factor given on Page 567 """ |
a_f = 0.15 + 0.0007 * rrup
a_f[a_f > 0.35] = 0.35
return C["af"] + a_f |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compute_mean(self, C, mag, rjb):
""" Compute mean value, see table 2. """ |
m1 = 6.4
r1 = 50.
h = 6.
R = np.sqrt(rjb ** 2 + h ** 2)
R1 = np.sqrt(r1 ** 2 + h ** 2)
less_r1 = rjb < r1
ge_r1 = rjb >= r1
mean = (C['c1'] + C['c4'] * (mag - m1) * np.log(R) + C['c5'] * rjb +
C['c8'] * (8.5 - mag) ** 2)
mean[less_r1] += C['c3'] * np.log(R[less_r1])
mean[ge_r1] += (C['c3'] * np.log(R1) +
C['c6'] * (np.log(R[ge_r1]) - np.log(R1)))
if mag < m1:
mean += C['c2'] * (mag - m1)
else:
mean += C['c7'] * (mag - m1)
return mean |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_weichert_factor(beta, cmag, cyear, end_year):
'''
Gets the Weichert adjustment factor for each the magnitude bins
:param float beta:
Beta value of Gutenberg & Richter parameter (b * log(10.))
:param np.ndarray cmag:
Magnitude values of the completeness table
:param np.ndarray cyear:
Year values of the completeness table
:param float end_year:
Last year for consideration in the catalogue
:returns:
Weichert adjustment factor (float)
'''
if len(cmag) > 1:
# cval corresponds to the mid-point of the completeness bins
# In the original code it requires that the magnitude bins be
# equal sizedclass IsotropicGaussian(BaseSmoothingKernel):
dmag = (cmag[1:] + cmag[:-1]) / 2.
cval = np.hstack([dmag, cmag[-1] + (dmag[-1] - cmag[-2])])
else:
# Single completeness value so Weichert factor is unity
return 1.0 / (end_year - cyear[0] + 1), None
t_f = sum(np.exp(-beta * cval)) / sum((end_year - cyear + 1) *
np.exp(-beta * cval))
return t_f, cval |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_even_magnitude_completeness(completeness_table, catalogue=None):
'''
To make the magnitudes evenly spaced, render to a constant 0.1
magnitude unit
:param np.ndarray completeness_table:
Completeness table in format [[year, mag]]
:param catalogue:
Instance of openquake.hmtk.seismicity.catalogue.Catalogue class
:returns:
Correct completeness table
'''
mmax = np.floor(10. * np.max(catalogue.data['magnitude'])) / 10.
check_completeness_table(completeness_table, catalogue)
cmag = np.hstack([completeness_table[:, 1], mmax + 0.1])
cyear = np.hstack([completeness_table[:, 0], completeness_table[-1, 0]])
if np.shape(completeness_table)[0] == 1:
# Simple single-valued table
return completeness_table, 0.1
for iloc in range(0, len(cmag) - 1):
mrange = np.arange(np.floor(10. * cmag[iloc]) / 10.,
(np.ceil(10. * cmag[iloc + 1]) / 10.),
0.1)
temp_table = np.column_stack([
cyear[iloc] * np.ones(len(mrange), dtype=float),
mrange])
if iloc == 0:
completeness_table = np.copy(temp_table)
else:
completeness_table = np.vstack([completeness_table,
temp_table])
# completeness_table = np.vstack([completeness_table,
# np.array([[cyear[-1], cmag[-1]]])])
return completeness_table, 0.1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unique(objects, key=None):
""" Raise a ValueError if there is a duplicated object, otherwise returns the objects as they are. """ |
dupl = []
for obj, group in itertools.groupby(sorted(objects), key):
if sum(1 for _ in group) > 1:
dupl.append(obj)
if dupl:
raise ValueError('Found duplicates %s' % dupl)
return objects |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sample(weighted_objects, num_samples, seed):
""" Take random samples of a sequence of weighted objects :param weighted_objects: A finite sequence of objects with a `.weight` attribute. The weights must sum up to 1. :param num_samples: The number of samples to return :param seed: A random seed :return: A subsequence of the original sequence with `num_samples` elements """ |
weights = []
for obj in weighted_objects:
w = obj.weight
if isinstance(obj.weight, float):
weights.append(w)
else:
weights.append(w['weight'])
numpy.random.seed(seed)
idxs = numpy.random.choice(len(weights), num_samples, p=weights)
# NB: returning an array would break things
return [weighted_objects[idx] for idx in idxs] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collect_info(smlt):
""" Given a path to a source model logic tree, collect all of the path names to the source models it contains and build 1. a dictionary source model branch ID -> paths 2. a dictionary source model branch ID -> source IDs in applyToSources :param smlt: source model logic tree file :returns: an Info namedtupled containing the two dictionaries """ |
n = nrml.read(smlt)
try:
blevels = n.logicTree
except Exception:
raise InvalidFile('%s is not a valid source_model_logic_tree_file'
% smlt)
paths = collections.defaultdict(set) # branchID -> paths
applytosources = collections.defaultdict(list) # branchID -> source IDs
for blevel in blevels:
for bset in blevel:
if 'applyToSources' in bset.attrib:
applytosources[bset['branchSetID']].extend(
bset['applyToSources'].split())
for br in bset:
with node.context(smlt, br):
fnames = unique(br.uncertaintyModel.text.split())
paths[br['branchID']].update(get_paths(smlt, fnames))
return Info({k: sorted(v) for k, v in paths.items()}, applytosources) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def toml(uncertainty):
""" Converts an uncertainty node into a TOML string """ |
text = uncertainty.text.strip()
if not text.startswith('['): # a bare GSIM name was passed
text = '[%s]' % text
for k, v in uncertainty.attrib.items():
try:
v = ast.literal_eval(v)
except ValueError:
v = repr(v)
text += '\n%s = %s' % (k, v)
return text |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def name(self):
""" Compact representation for the names """ |
names = self.names.split()
if len(names) == 1:
return names[0]
elif len(names) == 2:
return ' '.join(names)
else:
return ' '.join([names[0], '...', names[-1]]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_skeleton(self):
""" Return an empty copy of the source model, i.e. without sources, but with the proper attributes for each SourceGroup contained within. """ |
src_groups = []
for grp in self.src_groups:
sg = copy.copy(grp)
sg.sources = []
src_groups.append(sg)
return self.__class__(self.names, self.weight, self.path, src_groups,
self.num_gsim_paths, self.ordinal, self.samples) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def enumerate_paths(self):
""" Generate all possible paths starting from this branch set. :returns: Generator of two-item tuples. Each tuple contains weight of the path (calculated as a product of the weights of all path's branches) and list of path's :class:`Branch` objects. Total sum of all paths' weights is 1.0 """ |
for path in self._enumerate_paths([]):
flat_path = []
weight = 1.0
while path:
path, branch = path
weight *= branch.weight
flat_path.append(branch)
yield weight, flat_path[::-1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter_source(self, source):
# pylint: disable=R0911,R0912 """ Apply filters to ``source`` and return ``True`` if uncertainty should be applied to it. """ |
for key, value in self.filters.items():
if key == 'applyToTectonicRegionType':
if value != source.tectonic_region_type:
return False
elif key == 'applyToSourceType':
if value == 'area':
if not isinstance(source, ohs.AreaSource):
return False
elif value == 'point':
# area source extends point source
if (not isinstance(source, ohs.PointSource)
or isinstance(source, ohs.AreaSource)):
return False
elif value == 'simpleFault':
if not isinstance(source, ohs.SimpleFaultSource):
return False
elif value == 'complexFault':
if not isinstance(source, ohs.ComplexFaultSource):
return False
elif value == 'characteristicFault':
if not isinstance(source, ohs.CharacteristicFaultSource):
return False
else:
raise AssertionError("unknown source type '%s'" % value)
elif key == 'applyToSources':
if source and source.source_id not in value:
return False
else:
raise AssertionError("unknown filter '%s'" % key)
# All filters pass, return True.
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _apply_uncertainty_to_geometry(self, source, value):
""" Modify ``source`` geometry with the uncertainty value ``value`` """ |
if self.uncertainty_type == 'simpleFaultDipRelative':
source.modify('adjust_dip', dict(increment=value))
elif self.uncertainty_type == 'simpleFaultDipAbsolute':
source.modify('set_dip', dict(dip=value))
elif self.uncertainty_type == 'simpleFaultGeometryAbsolute':
trace, usd, lsd, dip, spacing = value
source.modify(
'set_geometry',
dict(fault_trace=trace, upper_seismogenic_depth=usd,
lower_seismogenic_depth=lsd, dip=dip, spacing=spacing))
elif self.uncertainty_type == 'complexFaultGeometryAbsolute':
edges, spacing = value
source.modify('set_geometry', dict(edges=edges, spacing=spacing))
elif self.uncertainty_type == 'characteristicFaultGeometryAbsolute':
source.modify('set_geometry', dict(surface=value)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _apply_uncertainty_to_mfd(self, mfd, value):
""" Modify ``mfd`` object with uncertainty value ``value``. """ |
if self.uncertainty_type == 'abGRAbsolute':
a, b = value
mfd.modify('set_ab', dict(a_val=a, b_val=b))
elif self.uncertainty_type == 'bGRRelative':
mfd.modify('increment_b', dict(value=value))
elif self.uncertainty_type == 'maxMagGRRelative':
mfd.modify('increment_max_mag', dict(value=value))
elif self.uncertainty_type == 'maxMagGRAbsolute':
mfd.modify('set_max_mag', dict(value=value))
elif self.uncertainty_type == 'incrementalMFDAbsolute':
min_mag, bin_width, occur_rates = value
mfd.modify('set_mfd', dict(min_mag=min_mag, bin_width=bin_width,
occurrence_rates=occur_rates)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gen_source_models(self, gsim_lt):
""" Yield the underlying LtSourceModel, multiple times if there is sampling """ |
num_gsim_paths = 1 if self.num_samples else gsim_lt.get_num_paths()
for i, rlz in enumerate(self):
yield LtSourceModel(
rlz.value, rlz.weight, ('b1',), [], num_gsim_paths, i, 1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def on_each_source(self):
""" True if there is an applyToSources for each source. """ |
return (self.info.applytosources and
self.info.applytosources == self.source_ids) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_tree(self, tree_node, validate):
""" Parse the whole tree and point ``root_branchset`` attribute to the tree's root. """ |
self.info = collect_info(self.filename)
self.source_ids = collections.defaultdict(list)
t0 = time.time()
for depth, branchinglevel_node in enumerate(tree_node.nodes):
self.parse_branchinglevel(branchinglevel_node, depth, validate)
dt = time.time() - t0
if validate:
bname = os.path.basename(self.filename)
logging.info('Validated %s in %.2f seconds', bname, dt) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_branchinglevel(self, branchinglevel_node, depth, validate):
""" Parse one branching level. :param branchinglevel_node: ``etree.Element`` object with tag "logicTreeBranchingLevel". :param depth: The sequential number of this branching level, based on 0. :param validate: Whether or not the branching level, its branchsets and their branches should be validated. Enumerates children branchsets and call :meth:`parse_branchset`, :meth:`validate_branchset`, :meth:`parse_branches` and finally :meth:`apply_branchset` for each. Keeps track of "open ends" -- the set of branches that don't have any child branchset on this step of execution. After processing of every branching level only those branches that are listed in it can have child branchsets (if there is one on the next level). """ |
new_open_ends = set()
branchsets = branchinglevel_node.nodes
for number, branchset_node in enumerate(branchsets):
branchset = self.parse_branchset(branchset_node, depth, number,
validate)
self.parse_branches(branchset_node, branchset, validate)
if self.root_branchset is None: # not set yet
self.num_paths = 1
self.root_branchset = branchset
else:
self.apply_branchset(branchset_node, branchset)
for branch in branchset.branches:
new_open_ends.add(branch)
self.num_paths *= len(branchset.branches)
if number > 0:
logging.warning('There is a branching level with multiple '
'branchsets in %s', self.filename)
self.open_ends.clear()
self.open_ends.update(new_open_ends) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_branches(self, branchset_node, branchset, validate):
""" Create and attach branches at ``branchset_node`` to ``branchset``. :param branchset_node: Same as for :meth:`parse_branchset`. :param branchset: An instance of :class:`BranchSet`. :param validate: Whether or not branches' uncertainty values should be validated. Checks that each branch has :meth:`valid <validate_uncertainty_value>` value, unique id and that all branches have total weight of 1.0. :return: ``None``, all branches are attached to provided branchset. """ |
weight_sum = 0
branches = branchset_node.nodes
values = []
for branchnode in branches:
weight = ~branchnode.uncertaintyWeight
weight_sum += weight
value_node = node_from_elem(branchnode.uncertaintyModel)
if value_node.text is not None:
values.append(value_node.text.strip())
if validate:
self.validate_uncertainty_value(
value_node, branchnode, branchset)
value = self.parse_uncertainty_value(value_node, branchset)
branch_id = branchnode.attrib.get('branchID')
branch = Branch(branch_id, weight, value)
if branch_id in self.branches:
raise LogicTreeError(
branchnode, self.filename,
"branchID '%s' is not unique" % branch_id)
self.branches[branch_id] = branch
branchset.branches.append(branch)
if abs(weight_sum - 1.0) > pmf.PRECISION:
raise LogicTreeError(
branchset_node, self.filename,
"branchset weights don't sum up to 1.0")
if len(set(values)) < len(values):
# TODO: add a test for this case
# <logicTreeBranch branchID="b71">
# <uncertaintyModel> 7.7 </uncertaintyModel>
# <uncertaintyWeight>0.333</uncertaintyWeight>
# </logicTreeBranch>
# <logicTreeBranch branchID="b72">
# <uncertaintyModel> 7.695 </uncertaintyModel>
# <uncertaintyWeight>0.333</uncertaintyWeight>
# </logicTreeBranch>
# <logicTreeBranch branchID="b73">
# <uncertaintyModel> 7.7 </uncertaintyModel>
# <uncertaintyWeight>0.334</uncertaintyWeight>
# </logicTreeBranch>
raise LogicTreeError(
branchset_node, self.filename,
"there are duplicate values in uncertaintyModel: " +
' '.join(values)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sample_path(self, seed):
""" Return the model name and a list of branch ids. :param seed: the seed used for the sampling """ |
branchset = self.root_branchset
branch_ids = []
while branchset is not None:
[branch] = sample(branchset.branches, 1, seed)
branch_ids.append(branch.branch_id)
branchset = branch.child_branchset
modelname = self.root_branchset.get_branch_by_id(branch_ids[0]).value
return modelname, branch_ids |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_simple_fault_geometry_surface(self, node):
""" Parses a simple fault geometry surface """ |
spacing = node["spacing"]
usd, lsd, dip = (~node.upperSeismoDepth, ~node.lowerSeismoDepth,
~node.dip)
# Parse the geometry
coords = split_coords_2d(~node.LineString.posList)
trace = geo.Line([geo.Point(*p) for p in coords])
return trace, usd, lsd, dip, spacing |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_complex_fault_geometry_surface(self, node):
""" Parses a complex fault geometry surface """ |
spacing = node["spacing"]
edges = []
for edge_node in node.nodes:
coords = split_coords_3d(~edge_node.LineString.posList)
edges.append(geo.Line([geo.Point(*p) for p in coords]))
return edges, spacing |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_planar_geometry_surface(self, node):
""" Parses a planar geometry surface """ |
nodes = []
for key in ["topLeft", "topRight", "bottomRight", "bottomLeft"]:
nodes.append(geo.Point(getattr(node, key)["lon"],
getattr(node, key)["lat"],
getattr(node, key)["depth"]))
top_left, top_right, bottom_right, bottom_left = tuple(nodes)
return geo.PlanarSurface.from_corner_points(
top_left, top_right, bottom_right, bottom_left) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validate_simple_fault_geometry(self, node, _float_re):
""" Validates a node representation of a simple fault geometry """ |
try:
# Parse the geometry
coords = split_coords_2d(~node.LineString.posList)
trace = geo.Line([geo.Point(*p) for p in coords])
except ValueError:
# If the geometry cannot be created then use the LogicTreeError
# to point the user to the incorrect node. Hence, if trace is
# compiled successfully then len(trace) is True, otherwise it is
# False
trace = []
if len(trace):
return
raise LogicTreeError(
node, self.filename,
"'simpleFaultGeometry' node is not valid") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validate_complex_fault_geometry(self, node, _float_re):
""" Validates a node representation of a complex fault geometry - this check merely verifies that the format is correct. If the geometry does not conform to the Aki & Richards convention this will not be verified here, but will raise an error when the surface is created. """ |
valid_edges = []
for edge_node in node.nodes:
try:
coords = split_coords_3d(edge_node.LineString.posList.text)
edge = geo.Line([geo.Point(*p) for p in coords])
except ValueError:
# See use of validation error in simple geometry case
# The node is valid if all of the edges compile correctly
edge = []
if len(edge):
valid_edges.append(True)
else:
valid_edges.append(False)
if node["spacing"] and all(valid_edges):
return
raise LogicTreeError(
node, self.filename,
"'complexFaultGeometry' node is not valid") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validate_planar_fault_geometry(self, node, _float_re):
""" Validares a node representation of a planar fault geometry """ |
valid_spacing = node["spacing"]
for key in ["topLeft", "topRight", "bottomLeft", "bottomRight"]:
lon = getattr(node, key)["lon"]
lat = getattr(node, key)["lat"]
depth = getattr(node, key)["depth"]
valid_lon = (lon >= -180.0) and (lon <= 180.0)
valid_lat = (lat >= -90.0) and (lat <= 90.0)
valid_depth = (depth >= 0.0)
is_valid = valid_lon and valid_lat and valid_depth
if not is_valid or not valid_spacing:
raise LogicTreeError(
node, self.filename,
"'planarFaultGeometry' node is not valid") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_uncertainties(self, branch_ids, source_group):
""" Parse the path through the source model logic tree and return "apply uncertainties" function. :param branch_ids: List of string identifiers of branches, representing the path through source model logic tree. :param source_group: A group of sources :return: A copy of the original group with modified sources """ |
branchset = self.root_branchset
branchsets_and_uncertainties = []
branch_ids = list(branch_ids[::-1])
while branchset is not None:
branch = branchset.get_branch_by_id(branch_ids.pop(-1))
if not branchset.uncertainty_type == 'sourceModel':
branchsets_and_uncertainties.append((branchset, branch.value))
branchset = branch.child_branchset
if not branchsets_and_uncertainties:
return source_group # nothing changed
sg = copy.deepcopy(source_group)
sg.applied_uncertainties = []
sg.changed = numpy.zeros(len(sg.sources), int)
for branchset, value in branchsets_and_uncertainties:
for s, source in enumerate(sg.sources):
changed = branchset.apply_uncertainty(value, source)
if changed:
sg.changed[s] += changed
sg.applied_uncertainties.append(
(branchset.uncertainty_type, value))
return sg |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_one(self):
""" Check that all the inner weights are 1 up to the precision """ |
return all(abs(v - 1.) < pmf.PRECISION for v in self.dic.values()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_(cls, gsim):
""" Generate a trivial GsimLogicTree from a single GSIM instance. """ |
ltbranch = N('logicTreeBranch', {'branchID': 'b1'},
nodes=[N('uncertaintyModel', text=str(gsim)),
N('uncertaintyWeight', text='1.0')])
lt = N('logicTree', {'logicTreeID': 'lt1'},
nodes=[N('logicTreeBranchingLevel', {'branchingLevelID': 'bl1'},
nodes=[N('logicTreeBranchSet',
{'applyToTectonicRegionType': '*',
'branchSetID': 'bs1',
'uncertaintyType': 'gmpeModel'},
nodes=[ltbranch])])])
return cls(repr(gsim), ['*'], ltnode=lt) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_imts(self, imts):
""" Make sure the IMTs are recognized by all GSIMs in the logic tree """ |
for trt in self.values:
for gsim in self.values[trt]:
for attr in dir(gsim):
coeffs = getattr(gsim, attr)
if not isinstance(coeffs, CoeffsTable):
continue
for imt in imts:
if imt.startswith('SA'):
try:
coeffs[from_string(imt)]
except KeyError:
raise ValueError(
'%s is out of the period range defined '
'for %s' % (imt, gsim)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reduce(self, trts):
""" Reduce the GsimLogicTree. :param trts: a subset of tectonic region types :returns: a reduced GsimLogicTree instance """ |
new = object.__new__(self.__class__)
vars(new).update(vars(self))
if trts != {'*'}:
new.branches = []
for br in self.branches:
branch = BranchTuple(br.trt, br.id, br.gsim, br.weight,
br.trt in trts)
new.branches.append(branch)
return new |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_num_branches(self):
""" Return the number of effective branches for tectonic region type, as a dictionary. """ |
num = {}
for trt, branches in itertools.groupby(
self.branches, operator.attrgetter('trt')):
num[trt] = sum(1 for br in branches if br.effective)
return num |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_num_paths(self):
""" Return the effective number of paths in the tree. """ |
# NB: the algorithm assume a symmetric logic tree for the GSIMs;
# in the future we may relax such assumption
num_branches = self.get_num_branches()
if not sum(num_branches.values()):
return 0
num = 1
for val in num_branches.values():
if val: # the branch is effective
num *= val
return num |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compute_faulting_style_term(Frss, pR, Fnss, pN, rake):
""" Compute SHARE faulting style adjustment term. """ |
if rake > 30.0 and rake <= 150.0:
return np.power(Frss, 1 - pR) * np.power(Fnss, -pN)
elif rake > -120.0 and rake <= -60.0:
return np.power(Frss, - pR) * np.power(Fnss, 1 - pN)
else:
return np.power(Frss, - pR) * np.power(Fnss, - pN) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_stddevs(self, C, stddev_types, mag, num_sites):
""" Return total standard deviation as for equation 35, page 1021. """ |
stddevs = []
for _ in stddev_types:
if mag < 7.16:
sigma = C['c11'] + C['c12'] * mag
elif mag >= 7.16:
sigma = C['c13']
stddevs.append(np.zeros(num_sites) + sigma)
return stddevs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compute_term2(self, C, mag, rrup):
""" This computes the term f2 in equation 32, page 1021 """ |
c78_factor = (C['c7'] * np.exp(C['c8'] * mag)) ** 2
R = np.sqrt(rrup ** 2 + c78_factor)
return C['c4'] * np.log(R) + (C['c5'] + C['c6'] * mag) * rrup |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compute_term3(self, C, rrup):
""" This computes the term f3 in equation 34, page 1021 but corrected according to the erratum. """ |
f3 = np.zeros_like(rrup)
idx_between_70_130 = (rrup > 70) & (rrup <= 130)
idx_greater_130 = rrup > 130
f3[idx_between_70_130] = (
C['c9'] * (np.log(rrup[idx_between_70_130]) - np.log(70))
)
f3[idx_greater_130] = (
C['c9'] * (np.log(rrup[idx_greater_130]) - np.log(70)) +
C['c10'] * (np.log(rrup[idx_greater_130]) - np.log(130))
)
return f3 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def time_window_cutoff(sw_time, time_cutoff):
""" Allows for cutting the declustering time window at a specific time, outside of which an event of any magnitude is no longer identified as a cluster """ |
sw_time = np.array(
[(time_cutoff / DAYS) if x > (time_cutoff / DAYS)
else x for x in sw_time])
return(sw_time) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def create_geometry(self, input_geometry, upper_depth, lower_depth):
'''
If geometry is defined as a numpy array then create instance of
nhlib.geo.point.Point class, otherwise if already instance of class
accept class
:param input_geometry:
Input geometry (point) as either
i) instance of nhlib.geo.point.Point class
ii) numpy.ndarray [Longitude, Latitude]
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km)
'''
self._check_seismogenic_depths(upper_depth, lower_depth)
# Check/create the geometry class
if not isinstance(input_geometry, Point):
if not isinstance(input_geometry, np.ndarray):
raise ValueError('Unrecognised or unsupported geometry '
'definition')
self.geometry = Point(input_geometry[0], input_geometry[1])
else:
self.geometry = input_geometry |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def select_catalogue(self, selector, distance, selector_type='circle',
distance_metric='epicentral', point_depth=None,
upper_eq_depth=None, lower_eq_depth=None):
'''
Selects the catalogue associated to the point source.
Effectively a wrapper to the two functions select catalogue within
a distance of the point and select catalogue within cell centred on
point
:param selector:
Populated instance of :class:
`openquake.hmtk.seismicity.selector.CatalogueSelector`
:param float distance:
Distance from point (km) for selection
:param str selector_type:
Chooses whether to select within {'circle'} or within a {'square'}.
:param str distance_metric:
'epicentral' or 'hypocentral' (only for 'circle' selector type)
:param float point_depth:
Assumed hypocentral depth of the point (only applied to 'circle'
distance type)
:param float upper_depth:
Upper seismogenic depth (km) (only for 'square')
:param float lower_depth:
Lower seismogenic depth (km) (only for 'square')
'''
if selector.catalogue.get_number_events() < 1:
raise ValueError('No events found in catalogue!')
if 'square' in selector_type:
# Calls select catalogue within cell function
self.select_catalogue_within_cell(selector,
distance,
upper_depth=upper_eq_depth,
lower_depth=lower_eq_depth)
elif 'circle' in selector_type:
# Calls select catalogue within distance function
self.select_catalogue_within_distance(selector, distance,
distance_metric, point_depth)
else:
raise ValueError('Unrecognised selection type for point source!') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.