repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | _par_read | def _par_read(dirname, compressed=True):
"""
Internal write function to read a formatted parameter file.
:type dirname: str
:param dirname: Directory to read the parameter file from.
:type compressed: bool
:param compressed: Whether the directory is compressed or not.
"""
templates = []
if compressed:
arc = tarfile.open(dirname, "r:*")
members = arc.getmembers()
_parfile = [member for member in members
if member.name.split(os.sep)[-1] ==
'template_parameters.csv']
if len(_parfile) == 0:
arc.close()
raise MatchFilterError(
'No template parameter file in archive')
parfile = arc.extractfile(_parfile[0])
else:
parfile = open(dirname + '/' + 'template_parameters.csv', 'r')
for line in parfile:
t_in = Template()
for key_pair in line.rstrip().split(','):
if key_pair.split(':')[0].strip() == 'name':
t_in.__dict__[key_pair.split(':')[0].strip()] = \
key_pair.split(':')[-1].strip()
elif key_pair.split(':')[0].strip() == 'filt_order':
try:
t_in.__dict__[key_pair.split(':')[0].strip()] = \
int(key_pair.split(':')[-1])
except ValueError:
pass
else:
try:
t_in.__dict__[key_pair.split(':')[0].strip()] = \
float(key_pair.split(':')[-1])
except ValueError:
pass
templates.append(t_in)
parfile.close()
if compressed:
arc.close()
return templates | python | def _par_read(dirname, compressed=True):
"""
Internal write function to read a formatted parameter file.
:type dirname: str
:param dirname: Directory to read the parameter file from.
:type compressed: bool
:param compressed: Whether the directory is compressed or not.
"""
templates = []
if compressed:
arc = tarfile.open(dirname, "r:*")
members = arc.getmembers()
_parfile = [member for member in members
if member.name.split(os.sep)[-1] ==
'template_parameters.csv']
if len(_parfile) == 0:
arc.close()
raise MatchFilterError(
'No template parameter file in archive')
parfile = arc.extractfile(_parfile[0])
else:
parfile = open(dirname + '/' + 'template_parameters.csv', 'r')
for line in parfile:
t_in = Template()
for key_pair in line.rstrip().split(','):
if key_pair.split(':')[0].strip() == 'name':
t_in.__dict__[key_pair.split(':')[0].strip()] = \
key_pair.split(':')[-1].strip()
elif key_pair.split(':')[0].strip() == 'filt_order':
try:
t_in.__dict__[key_pair.split(':')[0].strip()] = \
int(key_pair.split(':')[-1])
except ValueError:
pass
else:
try:
t_in.__dict__[key_pair.split(':')[0].strip()] = \
float(key_pair.split(':')[-1])
except ValueError:
pass
templates.append(t_in)
parfile.close()
if compressed:
arc.close()
return templates | [
"def",
"_par_read",
"(",
"dirname",
",",
"compressed",
"=",
"True",
")",
":",
"templates",
"=",
"[",
"]",
"if",
"compressed",
":",
"arc",
"=",
"tarfile",
".",
"open",
"(",
"dirname",
",",
"\"r:*\"",
")",
"members",
"=",
"arc",
".",
"getmembers",
"(",
... | Internal write function to read a formatted parameter file.
:type dirname: str
:param dirname: Directory to read the parameter file from.
:type compressed: bool
:param compressed: Whether the directory is compressed or not. | [
"Internal",
"write",
"function",
"to",
"read",
"a",
"formatted",
"parameter",
"file",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3639-L3684 | train | 203,300 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | _badpath | def _badpath(path, base):
"""
joinpath will ignore base if path is absolute.
"""
return not _resolved(os.path.join(base, path)).startswith(base) | python | def _badpath(path, base):
"""
joinpath will ignore base if path is absolute.
"""
return not _resolved(os.path.join(base, path)).startswith(base) | [
"def",
"_badpath",
"(",
"path",
",",
"base",
")",
":",
"return",
"not",
"_resolved",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base",
",",
"path",
")",
")",
".",
"startswith",
"(",
"base",
")"
] | joinpath will ignore base if path is absolute. | [
"joinpath",
"will",
"ignore",
"base",
"if",
"path",
"is",
"absolute",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3691-L3695 | train | 203,301 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | _badlink | def _badlink(info, base):
"""
Links are interpreted relative to the directory containing the link
"""
tip = _resolved(os.path.join(base, os.path.dirname(info.name)))
return _badpath(info.linkname, base=tip) | python | def _badlink(info, base):
"""
Links are interpreted relative to the directory containing the link
"""
tip = _resolved(os.path.join(base, os.path.dirname(info.name)))
return _badpath(info.linkname, base=tip) | [
"def",
"_badlink",
"(",
"info",
",",
"base",
")",
":",
"tip",
"=",
"_resolved",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"info",
".",
"name",
")",
")",
")",
"return",
"_badpath",
"(",
"info",
... | Links are interpreted relative to the directory containing the link | [
"Links",
"are",
"interpreted",
"relative",
"to",
"the",
"directory",
"containing",
"the",
"link"
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3698-L3703 | train | 203,302 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | _safemembers | def _safemembers(members):
"""Check members of a tar archive for safety.
Ensure that they do not contain paths or links outside of where we
need them - this would only happen if the archive wasn't made by
eqcorrscan.
:type members: :class:`tarfile.TarFile`
:param members: an open tarfile.
"""
base = _resolved(".")
for finfo in members:
if _badpath(finfo.name, base):
print(finfo.name, "is blocked (illegal path)")
elif finfo.issym() and _badlink(finfo, base):
print(finfo.name, "is blocked: Hard link to", finfo.linkname)
elif finfo.islnk() and _badlink(finfo, base):
print(finfo.name, "is blocked: Symlink to", finfo.linkname)
else:
yield finfo | python | def _safemembers(members):
"""Check members of a tar archive for safety.
Ensure that they do not contain paths or links outside of where we
need them - this would only happen if the archive wasn't made by
eqcorrscan.
:type members: :class:`tarfile.TarFile`
:param members: an open tarfile.
"""
base = _resolved(".")
for finfo in members:
if _badpath(finfo.name, base):
print(finfo.name, "is blocked (illegal path)")
elif finfo.issym() and _badlink(finfo, base):
print(finfo.name, "is blocked: Hard link to", finfo.linkname)
elif finfo.islnk() and _badlink(finfo, base):
print(finfo.name, "is blocked: Symlink to", finfo.linkname)
else:
yield finfo | [
"def",
"_safemembers",
"(",
"members",
")",
":",
"base",
"=",
"_resolved",
"(",
"\".\"",
")",
"for",
"finfo",
"in",
"members",
":",
"if",
"_badpath",
"(",
"finfo",
".",
"name",
",",
"base",
")",
":",
"print",
"(",
"finfo",
".",
"name",
",",
"\"is blo... | Check members of a tar archive for safety.
Ensure that they do not contain paths or links outside of where we
need them - this would only happen if the archive wasn't made by
eqcorrscan.
:type members: :class:`tarfile.TarFile`
:param members: an open tarfile. | [
"Check",
"members",
"of",
"a",
"tar",
"archive",
"for",
"safety",
".",
"Ensure",
"that",
"they",
"do",
"not",
"contain",
"paths",
"or",
"links",
"outside",
"of",
"where",
"we",
"need",
"them",
"-",
"this",
"would",
"only",
"happen",
"if",
"the",
"archive... | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3706-L3725 | train | 203,303 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | _write_family | def _write_family(family, filename):
"""
Write a family to a csv file.
:type family: :class:`eqcorrscan.core.match_filter.Family`
:param family: Family to write to file
:type filename: str
:param filename: File to write to.
"""
with open(filename, 'w') as f:
for detection in family.detections:
det_str = ''
for key in detection.__dict__.keys():
if key == 'event' and detection.__dict__[key] is not None:
value = str(detection.event.resource_id)
elif key in ['threshold', 'detect_val', 'threshold_input']:
value = format(detection.__dict__[key], '.32f').rstrip('0')
else:
value = str(detection.__dict__[key])
det_str += key + ': ' + value + '; '
f.write(det_str + '\n')
return | python | def _write_family(family, filename):
"""
Write a family to a csv file.
:type family: :class:`eqcorrscan.core.match_filter.Family`
:param family: Family to write to file
:type filename: str
:param filename: File to write to.
"""
with open(filename, 'w') as f:
for detection in family.detections:
det_str = ''
for key in detection.__dict__.keys():
if key == 'event' and detection.__dict__[key] is not None:
value = str(detection.event.resource_id)
elif key in ['threshold', 'detect_val', 'threshold_input']:
value = format(detection.__dict__[key], '.32f').rstrip('0')
else:
value = str(detection.__dict__[key])
det_str += key + ': ' + value + '; '
f.write(det_str + '\n')
return | [
"def",
"_write_family",
"(",
"family",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"detection",
"in",
"family",
".",
"detections",
":",
"det_str",
"=",
"''",
"for",
"key",
"in",
"detection",
".",
... | Write a family to a csv file.
:type family: :class:`eqcorrscan.core.match_filter.Family`
:param family: Family to write to file
:type filename: str
:param filename: File to write to. | [
"Write",
"a",
"family",
"to",
"a",
"csv",
"file",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3728-L3749 | train | 203,304 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | _read_family | def _read_family(fname, all_cat, template):
"""
Internal function to read csv family files.
:type fname: str
:param fname: Filename
:return: list of Detection
"""
detections = []
with open(fname, 'r') as f:
for line in f:
det_dict = {}
gen_event = False
for key_pair in line.rstrip().split(';'):
key = key_pair.split(': ')[0].strip()
value = key_pair.split(': ')[-1].strip()
if key == 'event':
if len(all_cat) == 0:
gen_event = True
continue
el = [e for e in all_cat
if str(e.resource_id).split('/')[-1] == value][0]
det_dict.update({'event': el})
elif key == 'detect_time':
det_dict.update(
{'detect_time': UTCDateTime(value)})
elif key == 'chans':
det_dict.update({'chans': ast.literal_eval(value)})
elif key in ['template_name', 'typeofdet', 'id',
'threshold_type']:
det_dict.update({key: value})
elif key == 'no_chans':
det_dict.update({key: int(float(value))})
elif len(key) == 0:
continue
else:
det_dict.update({key: float(value)})
detection = Detection(**det_dict)
if gen_event:
detection._calculate_event(template=template)
detections.append(detection)
return detections | python | def _read_family(fname, all_cat, template):
"""
Internal function to read csv family files.
:type fname: str
:param fname: Filename
:return: list of Detection
"""
detections = []
with open(fname, 'r') as f:
for line in f:
det_dict = {}
gen_event = False
for key_pair in line.rstrip().split(';'):
key = key_pair.split(': ')[0].strip()
value = key_pair.split(': ')[-1].strip()
if key == 'event':
if len(all_cat) == 0:
gen_event = True
continue
el = [e for e in all_cat
if str(e.resource_id).split('/')[-1] == value][0]
det_dict.update({'event': el})
elif key == 'detect_time':
det_dict.update(
{'detect_time': UTCDateTime(value)})
elif key == 'chans':
det_dict.update({'chans': ast.literal_eval(value)})
elif key in ['template_name', 'typeofdet', 'id',
'threshold_type']:
det_dict.update({key: value})
elif key == 'no_chans':
det_dict.update({key: int(float(value))})
elif len(key) == 0:
continue
else:
det_dict.update({key: float(value)})
detection = Detection(**det_dict)
if gen_event:
detection._calculate_event(template=template)
detections.append(detection)
return detections | [
"def",
"_read_family",
"(",
"fname",
",",
"all_cat",
",",
"template",
")",
":",
"detections",
"=",
"[",
"]",
"with",
"open",
"(",
"fname",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"det_dict",
"=",
"{",
"}",
"gen_event",
"=",
... | Internal function to read csv family files.
:type fname: str
:param fname: Filename
:return: list of Detection | [
"Internal",
"function",
"to",
"read",
"csv",
"family",
"files",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3752-L3793 | train | 203,305 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | read_party | def read_party(fname=None, read_detection_catalog=True):
"""
Read detections and metadata from a tar archive.
:type fname: str
:param fname:
Filename to read from, if this contains a single Family, then will
return a party of length = 1
:type read_detection_catalog: bool
:param read_detection_catalog:
Whether to read the detection catalog or not, if False, catalog
will be regenerated - for large catalogs this can be faster.
:return: :class:`eqcorrscan.core.match_filter.Party`
"""
party = Party()
party.read(filename=fname, read_detection_catalog=read_detection_catalog)
return party | python | def read_party(fname=None, read_detection_catalog=True):
"""
Read detections and metadata from a tar archive.
:type fname: str
:param fname:
Filename to read from, if this contains a single Family, then will
return a party of length = 1
:type read_detection_catalog: bool
:param read_detection_catalog:
Whether to read the detection catalog or not, if False, catalog
will be regenerated - for large catalogs this can be faster.
:return: :class:`eqcorrscan.core.match_filter.Party`
"""
party = Party()
party.read(filename=fname, read_detection_catalog=read_detection_catalog)
return party | [
"def",
"read_party",
"(",
"fname",
"=",
"None",
",",
"read_detection_catalog",
"=",
"True",
")",
":",
"party",
"=",
"Party",
"(",
")",
"party",
".",
"read",
"(",
"filename",
"=",
"fname",
",",
"read_detection_catalog",
"=",
"read_detection_catalog",
")",
"re... | Read detections and metadata from a tar archive.
:type fname: str
:param fname:
Filename to read from, if this contains a single Family, then will
return a party of length = 1
:type read_detection_catalog: bool
:param read_detection_catalog:
Whether to read the detection catalog or not, if False, catalog
will be regenerated - for large catalogs this can be faster.
:return: :class:`eqcorrscan.core.match_filter.Party` | [
"Read",
"detections",
"and",
"metadata",
"from",
"a",
"tar",
"archive",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3808-L3825 | train | 203,306 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | read_detections | def read_detections(fname):
"""
Read detections from a file to a list of Detection objects.
:type fname: str
:param fname: File to read from, must be a file written to by \
Detection.write.
:returns: list of :class:`eqcorrscan.core.match_filter.Detection`
:rtype: list
.. note::
:class:`eqcorrscan.core.match_filter.Detection`'s returned do not
contain Detection.event
"""
f = open(fname, 'r')
detections = []
for index, line in enumerate(f):
if index == 0:
continue # Skip header
if line.rstrip().split('; ')[0] == 'Template name':
continue # Skip any repeated headers
detection = line.rstrip().split('; ')
detection[1] = UTCDateTime(detection[1])
detection[2] = int(float(detection[2]))
detection[3] = ast.literal_eval(detection[3])
detection[4] = float(detection[4])
detection[5] = float(detection[5])
if len(detection) < 9:
detection.extend(['Unset', float('NaN')])
else:
detection[7] = float(detection[7])
detections.append(Detection(
template_name=detection[0], detect_time=detection[1],
no_chans=detection[2], detect_val=detection[4],
threshold=detection[5], threshold_type=detection[6],
threshold_input=detection[7], typeofdet=detection[8],
chans=detection[3]))
f.close()
return detections | python | def read_detections(fname):
"""
Read detections from a file to a list of Detection objects.
:type fname: str
:param fname: File to read from, must be a file written to by \
Detection.write.
:returns: list of :class:`eqcorrscan.core.match_filter.Detection`
:rtype: list
.. note::
:class:`eqcorrscan.core.match_filter.Detection`'s returned do not
contain Detection.event
"""
f = open(fname, 'r')
detections = []
for index, line in enumerate(f):
if index == 0:
continue # Skip header
if line.rstrip().split('; ')[0] == 'Template name':
continue # Skip any repeated headers
detection = line.rstrip().split('; ')
detection[1] = UTCDateTime(detection[1])
detection[2] = int(float(detection[2]))
detection[3] = ast.literal_eval(detection[3])
detection[4] = float(detection[4])
detection[5] = float(detection[5])
if len(detection) < 9:
detection.extend(['Unset', float('NaN')])
else:
detection[7] = float(detection[7])
detections.append(Detection(
template_name=detection[0], detect_time=detection[1],
no_chans=detection[2], detect_val=detection[4],
threshold=detection[5], threshold_type=detection[6],
threshold_input=detection[7], typeofdet=detection[8],
chans=detection[3]))
f.close()
return detections | [
"def",
"read_detections",
"(",
"fname",
")",
":",
"f",
"=",
"open",
"(",
"fname",
",",
"'r'",
")",
"detections",
"=",
"[",
"]",
"for",
"index",
",",
"line",
"in",
"enumerate",
"(",
"f",
")",
":",
"if",
"index",
"==",
"0",
":",
"continue",
"# Skip h... | Read detections from a file to a list of Detection objects.
:type fname: str
:param fname: File to read from, must be a file written to by \
Detection.write.
:returns: list of :class:`eqcorrscan.core.match_filter.Detection`
:rtype: list
.. note::
:class:`eqcorrscan.core.match_filter.Detection`'s returned do not
contain Detection.event | [
"Read",
"detections",
"from",
"a",
"file",
"to",
"a",
"list",
"of",
"Detection",
"objects",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3828-L3867 | train | 203,307 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | write_catalog | def write_catalog(detections, fname, format="QUAKEML"):
"""Write events contained within detections to a catalog file.
:type detections: list
:param detections: list of eqcorrscan.core.match_filter.Detection
:type fname: str
:param fname: Name of the file to write to
:type format: str
:param format: File format to use, see obspy.core.event.Catalog.write \
for supported formats.
"""
catalog = get_catalog(detections)
catalog.write(filename=fname, format=format) | python | def write_catalog(detections, fname, format="QUAKEML"):
"""Write events contained within detections to a catalog file.
:type detections: list
:param detections: list of eqcorrscan.core.match_filter.Detection
:type fname: str
:param fname: Name of the file to write to
:type format: str
:param format: File format to use, see obspy.core.event.Catalog.write \
for supported formats.
"""
catalog = get_catalog(detections)
catalog.write(filename=fname, format=format) | [
"def",
"write_catalog",
"(",
"detections",
",",
"fname",
",",
"format",
"=",
"\"QUAKEML\"",
")",
":",
"catalog",
"=",
"get_catalog",
"(",
"detections",
")",
"catalog",
".",
"write",
"(",
"filename",
"=",
"fname",
",",
"format",
"=",
"format",
")"
] | Write events contained within detections to a catalog file.
:type detections: list
:param detections: list of eqcorrscan.core.match_filter.Detection
:type fname: str
:param fname: Name of the file to write to
:type format: str
:param format: File format to use, see obspy.core.event.Catalog.write \
for supported formats. | [
"Write",
"events",
"contained",
"within",
"detections",
"to",
"a",
"catalog",
"file",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3884-L3896 | train | 203,308 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | extract_from_stream | def extract_from_stream(stream, detections, pad=5.0, length=30.0):
"""
Extract waveforms for a list of detections from a stream.
:type stream: obspy.core.stream.Stream
:param stream: Stream containing the detections.
:type detections: list
:param detections: list of eqcorrscan.core.match_filter.detection
:type pad: float
:param pad: Pre-detection extract time in seconds.
:type length: float
:param length: Total extracted length in seconds.
:returns:
list of :class:`obspy.core.stream.Stream`, one for each detection.
:type: list
"""
streams = []
for detection in detections:
cut_stream = Stream()
for pick in detection.event.picks:
tr = stream.select(station=pick.waveform_id.station_code,
channel=pick.waveform_id.channel_code)
if len(tr) == 0:
print('No data in stream for pick:')
print(pick)
continue
cut_stream += tr.slice(
starttime=pick.time - pad,
endtime=pick.time - pad + length).copy()
streams.append(cut_stream)
return streams | python | def extract_from_stream(stream, detections, pad=5.0, length=30.0):
"""
Extract waveforms for a list of detections from a stream.
:type stream: obspy.core.stream.Stream
:param stream: Stream containing the detections.
:type detections: list
:param detections: list of eqcorrscan.core.match_filter.detection
:type pad: float
:param pad: Pre-detection extract time in seconds.
:type length: float
:param length: Total extracted length in seconds.
:returns:
list of :class:`obspy.core.stream.Stream`, one for each detection.
:type: list
"""
streams = []
for detection in detections:
cut_stream = Stream()
for pick in detection.event.picks:
tr = stream.select(station=pick.waveform_id.station_code,
channel=pick.waveform_id.channel_code)
if len(tr) == 0:
print('No data in stream for pick:')
print(pick)
continue
cut_stream += tr.slice(
starttime=pick.time - pad,
endtime=pick.time - pad + length).copy()
streams.append(cut_stream)
return streams | [
"def",
"extract_from_stream",
"(",
"stream",
",",
"detections",
",",
"pad",
"=",
"5.0",
",",
"length",
"=",
"30.0",
")",
":",
"streams",
"=",
"[",
"]",
"for",
"detection",
"in",
"detections",
":",
"cut_stream",
"=",
"Stream",
"(",
")",
"for",
"pick",
"... | Extract waveforms for a list of detections from a stream.
:type stream: obspy.core.stream.Stream
:param stream: Stream containing the detections.
:type detections: list
:param detections: list of eqcorrscan.core.match_filter.detection
:type pad: float
:param pad: Pre-detection extract time in seconds.
:type length: float
:param length: Total extracted length in seconds.
:returns:
list of :class:`obspy.core.stream.Stream`, one for each detection.
:type: list | [
"Extract",
"waveforms",
"for",
"a",
"list",
"of",
"detections",
"from",
"a",
"stream",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3923-L3954 | train | 203,309 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | normxcorr2 | def normxcorr2(template, image):
"""
Thin wrapper to eqcorrscan.utils.correlate functions.
:type template: numpy.ndarray
:param template: Template array
:type image: numpy.ndarray
:param image:
Image to scan the template through. The order of these
matters, if you put the template after the image you will get a
reversed correlation matrix
:return:
New :class:`numpy.ndarray` of the correlation values for the
correlation of the image with the template.
:rtype: numpy.ndarray
.. note::
If your data contain gaps these must be padded with zeros before
using this function. The `eqcorrscan.utils.pre_processing` functions
will provide gap-filled data in the appropriate format. Note that if
you pad your data with zeros before filtering or resampling the gaps
will not be all zeros after filtering. This will result in the
calculation of spurious correlations in the gaps.
"""
array_xcorr = get_array_xcorr()
# Check that we have been passed numpy arrays
if type(template) != np.ndarray or type(image) != np.ndarray:
print('You have not provided numpy arrays, I will not convert them')
return 'NaN'
if len(template) > len(image):
ccc = array_xcorr(
templates=np.array([image]).astype(np.float32),
stream=template.astype(np.float32), pads=[0],
threaded=False)[0][0]
else:
ccc = array_xcorr(
templates=np.array([template]).astype(np.float32),
stream=image.astype(np.float32), pads=[0], threaded=False)[0][0]
ccc = ccc.reshape((1, len(ccc)))
return ccc | python | def normxcorr2(template, image):
"""
Thin wrapper to eqcorrscan.utils.correlate functions.
:type template: numpy.ndarray
:param template: Template array
:type image: numpy.ndarray
:param image:
Image to scan the template through. The order of these
matters, if you put the template after the image you will get a
reversed correlation matrix
:return:
New :class:`numpy.ndarray` of the correlation values for the
correlation of the image with the template.
:rtype: numpy.ndarray
.. note::
If your data contain gaps these must be padded with zeros before
using this function. The `eqcorrscan.utils.pre_processing` functions
will provide gap-filled data in the appropriate format. Note that if
you pad your data with zeros before filtering or resampling the gaps
will not be all zeros after filtering. This will result in the
calculation of spurious correlations in the gaps.
"""
array_xcorr = get_array_xcorr()
# Check that we have been passed numpy arrays
if type(template) != np.ndarray or type(image) != np.ndarray:
print('You have not provided numpy arrays, I will not convert them')
return 'NaN'
if len(template) > len(image):
ccc = array_xcorr(
templates=np.array([image]).astype(np.float32),
stream=template.astype(np.float32), pads=[0],
threaded=False)[0][0]
else:
ccc = array_xcorr(
templates=np.array([template]).astype(np.float32),
stream=image.astype(np.float32), pads=[0], threaded=False)[0][0]
ccc = ccc.reshape((1, len(ccc)))
return ccc | [
"def",
"normxcorr2",
"(",
"template",
",",
"image",
")",
":",
"array_xcorr",
"=",
"get_array_xcorr",
"(",
")",
"# Check that we have been passed numpy arrays",
"if",
"type",
"(",
"template",
")",
"!=",
"np",
".",
"ndarray",
"or",
"type",
"(",
"image",
")",
"!=... | Thin wrapper to eqcorrscan.utils.correlate functions.
:type template: numpy.ndarray
:param template: Template array
:type image: numpy.ndarray
:param image:
Image to scan the template through. The order of these
matters, if you put the template after the image you will get a
reversed correlation matrix
:return:
New :class:`numpy.ndarray` of the correlation values for the
correlation of the image with the template.
:rtype: numpy.ndarray
.. note::
If your data contain gaps these must be padded with zeros before
using this function. The `eqcorrscan.utils.pre_processing` functions
will provide gap-filled data in the appropriate format. Note that if
you pad your data with zeros before filtering or resampling the gaps
will not be all zeros after filtering. This will result in the
calculation of spurious correlations in the gaps. | [
"Thin",
"wrapper",
"to",
"eqcorrscan",
".",
"utils",
".",
"correlate",
"functions",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3957-L3997 | train | 203,310 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Party.select | def select(self, template_name):
"""
Select a specific family from the party.
:type template_name: str
:param template_name: Template name of Family to select from a party.
:returns: Family
"""
return [fam for fam in self.families
if fam.template.name == template_name][0] | python | def select(self, template_name):
"""
Select a specific family from the party.
:type template_name: str
:param template_name: Template name of Family to select from a party.
:returns: Family
"""
return [fam for fam in self.families
if fam.template.name == template_name][0] | [
"def",
"select",
"(",
"self",
",",
"template_name",
")",
":",
"return",
"[",
"fam",
"for",
"fam",
"in",
"self",
".",
"families",
"if",
"fam",
".",
"template",
".",
"name",
"==",
"template_name",
"]",
"[",
"0",
"]"
] | Select a specific family from the party.
:type template_name: str
:param template_name: Template name of Family to select from a party.
:returns: Family | [
"Select",
"a",
"specific",
"family",
"from",
"the",
"party",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L366-L375 | train | 203,311 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Party.sort | def sort(self):
"""
Sort the families by template name.
.. rubric:: Example
>>> party = Party(families=[Family(template=Template(name='b')),
... Family(template=Template(name='a'))])
>>> party[0]
Family of 0 detections from template b
>>> party.sort()[0]
Family of 0 detections from template a
"""
self.families.sort(key=lambda x: x.template.name)
return self | python | def sort(self):
"""
Sort the families by template name.
.. rubric:: Example
>>> party = Party(families=[Family(template=Template(name='b')),
... Family(template=Template(name='a'))])
>>> party[0]
Family of 0 detections from template b
>>> party.sort()[0]
Family of 0 detections from template a
"""
self.families.sort(key=lambda x: x.template.name)
return self | [
"def",
"sort",
"(",
"self",
")",
":",
"self",
".",
"families",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"template",
".",
"name",
")",
"return",
"self"
] | Sort the families by template name.
.. rubric:: Example
>>> party = Party(families=[Family(template=Template(name='b')),
... Family(template=Template(name='a'))])
>>> party[0]
Family of 0 detections from template b
>>> party.sort()[0]
Family of 0 detections from template a | [
"Sort",
"the",
"families",
"by",
"template",
"name",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L377-L392 | train | 203,312 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Party.filter | def filter(self, dates=None, min_dets=1):
"""
Return a new Party filtered according to conditions.
Return a new Party with only detections within a date range and
only families with a minimum number of detections.
:type dates: list of obspy.core.UTCDateTime objects
:param dates: A start and end date for the new Party
:type min_dets: int
:param min_dets: Minimum number of detections per family
.. rubric:: Example
>>> from obspy import UTCDateTime
>>> Party().read().filter(dates=[UTCDateTime(2016, 1, 1),
... UTCDateTime(2017, 1, 1)],
... min_dets=30) # doctest: +SKIP
"""
if dates is None:
raise MatchFilterError('Need a list defining a date range')
new_party = Party()
for fam in self.families:
new_fam = Family(
template=fam.template,
detections=[det for det in fam if
dates[0] < det.detect_time < dates[1]])
if len(new_fam) >= min_dets:
new_party.families.append(new_fam)
return new_party | python | def filter(self, dates=None, min_dets=1):
"""
Return a new Party filtered according to conditions.
Return a new Party with only detections within a date range and
only families with a minimum number of detections.
:type dates: list of obspy.core.UTCDateTime objects
:param dates: A start and end date for the new Party
:type min_dets: int
:param min_dets: Minimum number of detections per family
.. rubric:: Example
>>> from obspy import UTCDateTime
>>> Party().read().filter(dates=[UTCDateTime(2016, 1, 1),
... UTCDateTime(2017, 1, 1)],
... min_dets=30) # doctest: +SKIP
"""
if dates is None:
raise MatchFilterError('Need a list defining a date range')
new_party = Party()
for fam in self.families:
new_fam = Family(
template=fam.template,
detections=[det for det in fam if
dates[0] < det.detect_time < dates[1]])
if len(new_fam) >= min_dets:
new_party.families.append(new_fam)
return new_party | [
"def",
"filter",
"(",
"self",
",",
"dates",
"=",
"None",
",",
"min_dets",
"=",
"1",
")",
":",
"if",
"dates",
"is",
"None",
":",
"raise",
"MatchFilterError",
"(",
"'Need a list defining a date range'",
")",
"new_party",
"=",
"Party",
"(",
")",
"for",
"fam",... | Return a new Party filtered according to conditions.
Return a new Party with only detections within a date range and
only families with a minimum number of detections.
:type dates: list of obspy.core.UTCDateTime objects
:param dates: A start and end date for the new Party
:type min_dets: int
:param min_dets: Minimum number of detections per family
.. rubric:: Example
>>> from obspy import UTCDateTime
>>> Party().read().filter(dates=[UTCDateTime(2016, 1, 1),
... UTCDateTime(2017, 1, 1)],
... min_dets=30) # doctest: +SKIP | [
"Return",
"a",
"new",
"Party",
"filtered",
"according",
"to",
"conditions",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L394-L423 | train | 203,313 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Party.plot | def plot(self, plot_grouped=False, dates=None, min_dets=1, rate=False,
**kwargs):
"""
Plot the cumulative detections in time.
:type plot_grouped: bool
:param plot_grouped:
Whether to plot all families together (plot_grouped=True), or each
as a separate line.
:type dates: list
:param dates: list of obspy.core.UTCDateTime objects bounding the
plot. The first should be the start date, the last the end date.
:type min_dets: int
:param min_dets: Plot only families with this number of detections
or more.
:type rate: bool
:param rate: Whether or not to plot the daily rate of detection as
opposed to cumulative number. Only works with plot_grouped=True.
:param \**kwargs: Any other arguments accepted by
:func:`eqcorrscan.utils.plotting.cumulative_detections`
.. rubric:: Examples
Plot cumulative detections for all templates individually:
>>> Party().read().plot() # doctest: +SKIP
Plot cumulative detections for all templates grouped together:
>>> Party().read().plot(plot_grouped=True) # doctest: +SKIP
Plot the rate of detection for all templates grouped together:
>>> Party().read().plot(plot_grouped=True, rate=True) # doctest: +SKIP
Plot cumulative detections for all templates with more than five
detections between June 1st, 2012 and July 31st, 2012:
>>> from obspy import UTCDateTime
>>> Party().read().plot(dates=[UTCDateTime(2012, 6, 1),
... UTCDateTime(2012, 7, 31)],
... min_dets=5) # doctest: +SKIP
"""
all_dets = []
if dates:
new_party = self.filter(dates=dates, min_dets=min_dets)
for fam in new_party.families:
all_dets.extend(fam.detections)
else:
for fam in self.families:
all_dets.extend(fam.detections)
fig = cumulative_detections(detections=all_dets,
plot_grouped=plot_grouped,
rate=rate, **kwargs)
return fig | python | def plot(self, plot_grouped=False, dates=None, min_dets=1, rate=False,
**kwargs):
"""
Plot the cumulative detections in time.
:type plot_grouped: bool
:param plot_grouped:
Whether to plot all families together (plot_grouped=True), or each
as a separate line.
:type dates: list
:param dates: list of obspy.core.UTCDateTime objects bounding the
plot. The first should be the start date, the last the end date.
:type min_dets: int
:param min_dets: Plot only families with this number of detections
or more.
:type rate: bool
:param rate: Whether or not to plot the daily rate of detection as
opposed to cumulative number. Only works with plot_grouped=True.
:param \**kwargs: Any other arguments accepted by
:func:`eqcorrscan.utils.plotting.cumulative_detections`
.. rubric:: Examples
Plot cumulative detections for all templates individually:
>>> Party().read().plot() # doctest: +SKIP
Plot cumulative detections for all templates grouped together:
>>> Party().read().plot(plot_grouped=True) # doctest: +SKIP
Plot the rate of detection for all templates grouped together:
>>> Party().read().plot(plot_grouped=True, rate=True) # doctest: +SKIP
Plot cumulative detections for all templates with more than five
detections between June 1st, 2012 and July 31st, 2012:
>>> from obspy import UTCDateTime
>>> Party().read().plot(dates=[UTCDateTime(2012, 6, 1),
... UTCDateTime(2012, 7, 31)],
... min_dets=5) # doctest: +SKIP
"""
all_dets = []
if dates:
new_party = self.filter(dates=dates, min_dets=min_dets)
for fam in new_party.families:
all_dets.extend(fam.detections)
else:
for fam in self.families:
all_dets.extend(fam.detections)
fig = cumulative_detections(detections=all_dets,
plot_grouped=plot_grouped,
rate=rate, **kwargs)
return fig | [
"def",
"plot",
"(",
"self",
",",
"plot_grouped",
"=",
"False",
",",
"dates",
"=",
"None",
",",
"min_dets",
"=",
"1",
",",
"rate",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"all_dets",
"=",
"[",
"]",
"if",
"dates",
":",
"new_party",
"=",
"s... | Plot the cumulative detections in time.
:type plot_grouped: bool
:param plot_grouped:
Whether to plot all families together (plot_grouped=True), or each
as a separate line.
:type dates: list
:param dates: list of obspy.core.UTCDateTime objects bounding the
plot. The first should be the start date, the last the end date.
:type min_dets: int
:param min_dets: Plot only families with this number of detections
or more.
:type rate: bool
:param rate: Whether or not to plot the daily rate of detection as
opposed to cumulative number. Only works with plot_grouped=True.
:param \**kwargs: Any other arguments accepted by
:func:`eqcorrscan.utils.plotting.cumulative_detections`
.. rubric:: Examples
Plot cumulative detections for all templates individually:
>>> Party().read().plot() # doctest: +SKIP
Plot cumulative detections for all templates grouped together:
>>> Party().read().plot(plot_grouped=True) # doctest: +SKIP
Plot the rate of detection for all templates grouped together:
>>> Party().read().plot(plot_grouped=True, rate=True) # doctest: +SKIP
Plot cumulative detections for all templates with more than five
detections between June 1st, 2012 and July 31st, 2012:
>>> from obspy import UTCDateTime
>>> Party().read().plot(dates=[UTCDateTime(2012, 6, 1),
... UTCDateTime(2012, 7, 31)],
... min_dets=5) # doctest: +SKIP | [
"Plot",
"the",
"cumulative",
"detections",
"in",
"time",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L425-L480 | train | 203,314 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Party.rethreshold | def rethreshold(self, new_threshold, new_threshold_type='MAD'):
"""
Remove detections from the Party that are below a new threshold.
.. Note:: threshold can only be set higher.
.. Warning::
Works in place on Party.
:type new_threshold: float
:param new_threshold: New threshold level
:type new_threshold_type: str
:param new_threshold_type: Either 'MAD', 'absolute' or 'av_chan_corr'
.. rubric:: Examples
Using the MAD threshold on detections made using the MAD threshold:
>>> party = Party().read()
>>> len(party)
4
>>> party = party.rethreshold(10.0)
>>> len(party)
4
>>> # Note that all detections are self detections
Using the absolute thresholding method on the same Party:
>>> party = Party().read().rethreshold(6.0, 'absolute')
>>> len(party)
1
Using the av_chan_corr method on the same Party:
>>> party = Party().read().rethreshold(0.9, 'av_chan_corr')
>>> len(party)
4
"""
for family in self.families:
rethresh_detections = []
for d in family.detections:
if new_threshold_type == 'MAD' and d.threshold_type == 'MAD':
new_thresh = (d.threshold /
d.threshold_input) * new_threshold
elif new_threshold_type == 'MAD' and d.threshold_type != 'MAD':
raise MatchFilterError(
'Cannot recalculate MAD level, '
'use another threshold type')
elif new_threshold_type == 'absolute':
new_thresh = new_threshold
elif new_threshold_type == 'av_chan_corr':
new_thresh = new_threshold * d.no_chans
else:
raise MatchFilterError(
'new_threshold_type %s is not recognised' %
str(new_threshold_type))
if d.detect_val >= new_thresh:
d.threshold = new_thresh
d.threshold_input = new_threshold
d.threshold_type = new_threshold_type
rethresh_detections.append(d)
family.detections = rethresh_detections
return self | python | def rethreshold(self, new_threshold, new_threshold_type='MAD'):
"""
Remove detections from the Party that are below a new threshold.
.. Note:: threshold can only be set higher.
.. Warning::
Works in place on Party.
:type new_threshold: float
:param new_threshold: New threshold level
:type new_threshold_type: str
:param new_threshold_type: Either 'MAD', 'absolute' or 'av_chan_corr'
.. rubric:: Examples
Using the MAD threshold on detections made using the MAD threshold:
>>> party = Party().read()
>>> len(party)
4
>>> party = party.rethreshold(10.0)
>>> len(party)
4
>>> # Note that all detections are self detections
Using the absolute thresholding method on the same Party:
>>> party = Party().read().rethreshold(6.0, 'absolute')
>>> len(party)
1
Using the av_chan_corr method on the same Party:
>>> party = Party().read().rethreshold(0.9, 'av_chan_corr')
>>> len(party)
4
"""
for family in self.families:
rethresh_detections = []
for d in family.detections:
if new_threshold_type == 'MAD' and d.threshold_type == 'MAD':
new_thresh = (d.threshold /
d.threshold_input) * new_threshold
elif new_threshold_type == 'MAD' and d.threshold_type != 'MAD':
raise MatchFilterError(
'Cannot recalculate MAD level, '
'use another threshold type')
elif new_threshold_type == 'absolute':
new_thresh = new_threshold
elif new_threshold_type == 'av_chan_corr':
new_thresh = new_threshold * d.no_chans
else:
raise MatchFilterError(
'new_threshold_type %s is not recognised' %
str(new_threshold_type))
if d.detect_val >= new_thresh:
d.threshold = new_thresh
d.threshold_input = new_threshold
d.threshold_type = new_threshold_type
rethresh_detections.append(d)
family.detections = rethresh_detections
return self | [
"def",
"rethreshold",
"(",
"self",
",",
"new_threshold",
",",
"new_threshold_type",
"=",
"'MAD'",
")",
":",
"for",
"family",
"in",
"self",
".",
"families",
":",
"rethresh_detections",
"=",
"[",
"]",
"for",
"d",
"in",
"family",
".",
"detections",
":",
"if",... | Remove detections from the Party that are below a new threshold.
.. Note:: threshold can only be set higher.
.. Warning::
Works in place on Party.
:type new_threshold: float
:param new_threshold: New threshold level
:type new_threshold_type: str
:param new_threshold_type: Either 'MAD', 'absolute' or 'av_chan_corr'
.. rubric:: Examples
Using the MAD threshold on detections made using the MAD threshold:
>>> party = Party().read()
>>> len(party)
4
>>> party = party.rethreshold(10.0)
>>> len(party)
4
>>> # Note that all detections are self detections
Using the absolute thresholding method on the same Party:
>>> party = Party().read().rethreshold(6.0, 'absolute')
>>> len(party)
1
Using the av_chan_corr method on the same Party:
>>> party = Party().read().rethreshold(0.9, 'av_chan_corr')
>>> len(party)
4 | [
"Remove",
"detections",
"from",
"the",
"Party",
"that",
"are",
"below",
"a",
"new",
"threshold",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L482-L546 | train | 203,315 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Party.decluster | def decluster(self, trig_int, timing='detect', metric='avg_cor'):
"""
De-cluster a Party of detections by enforcing a detection separation.
De-clustering occurs between events detected by different (or the same)
templates. If multiple detections occur within trig_int then the
preferred detection will be determined by the metric argument. This
can be either the average single-station correlation coefficient which
is calculated as Detection.detect_val / Detection.no_chans, or the
raw cross channel correlation sum which is simply Detection.detect_val.
:type trig_int: float
:param trig_int: Minimum detection separation in seconds.
:type metric: str
:param metric: What metric to sort peaks by. Either 'avg_cor' which
takes the single station average correlation or 'cor_sum' which
takes the total correlation sum across all channels.
:type timing: str
:param timing:
Either 'detect' or 'origin' to decluster based on either the
detection time or the origin time.
.. Warning::
Works in place on object, if you need to keep the original safe
then run this on a copy of the object!
.. rubric:: Example
>>> party = Party().read()
>>> len(party)
4
>>> declustered = party.decluster(20)
>>> len(party)
3
"""
all_detections = []
for fam in self.families:
all_detections.extend(fam.detections)
if timing == 'detect':
if metric == 'avg_cor':
detect_info = [(d.detect_time, d.detect_val / d.no_chans)
for d in all_detections]
elif metric == 'cor_sum':
detect_info = [(d.detect_time, d.detect_val)
for d in all_detections]
else:
raise MatchFilterError('metric is not cor_sum or avg_cor')
elif timing == 'origin':
if metric == 'avg_cor':
detect_info = [(_get_origin(d.event).time,
d.detect_val / d.no_chans)
for d in all_detections]
elif metric == 'cor_sum':
detect_info = [(_get_origin(d.event).time, d.detect_val)
for d in all_detections]
else:
raise MatchFilterError('metric is not cor_sum or avg_cor')
else:
raise MatchFilterError('timing is not detect or origin')
min_det = sorted([d[0] for d in detect_info])[0]
detect_vals = np.array([d[1] for d in detect_info])
detect_times = np.array([
_total_microsec(d[0].datetime, min_det.datetime)
for d in detect_info])
# Trig_int must be converted from seconds to micro-seconds
peaks_out = decluster(
peaks=detect_vals, index=detect_times, trig_int=trig_int * 10 ** 6)
# Need to match both the time and the detection value
declustered_detections = []
for ind in peaks_out:
matching_time_indeces = np.where(detect_times == ind[-1])[0]
matches = matching_time_indeces[
np.where(detect_vals[matching_time_indeces] == ind[0])[0][0]]
declustered_detections.append(all_detections[matches])
# Convert this list into families
template_names = list(set([d.template_name
for d in declustered_detections]))
new_families = []
for template_name in template_names:
template = [fam.template for fam in self.families
if fam.template.name == template_name][0]
new_families.append(Family(
template=template,
detections=[d for d in declustered_detections
if d.template_name == template_name]))
self.families = new_families
return self | python | def decluster(self, trig_int, timing='detect', metric='avg_cor'):
"""
De-cluster a Party of detections by enforcing a detection separation.
De-clustering occurs between events detected by different (or the same)
templates. If multiple detections occur within trig_int then the
preferred detection will be determined by the metric argument. This
can be either the average single-station correlation coefficient which
is calculated as Detection.detect_val / Detection.no_chans, or the
raw cross channel correlation sum which is simply Detection.detect_val.
:type trig_int: float
:param trig_int: Minimum detection separation in seconds.
:type metric: str
:param metric: What metric to sort peaks by. Either 'avg_cor' which
takes the single station average correlation or 'cor_sum' which
takes the total correlation sum across all channels.
:type timing: str
:param timing:
Either 'detect' or 'origin' to decluster based on either the
detection time or the origin time.
.. Warning::
Works in place on object, if you need to keep the original safe
then run this on a copy of the object!
.. rubric:: Example
>>> party = Party().read()
>>> len(party)
4
>>> declustered = party.decluster(20)
>>> len(party)
3
"""
all_detections = []
for fam in self.families:
all_detections.extend(fam.detections)
if timing == 'detect':
if metric == 'avg_cor':
detect_info = [(d.detect_time, d.detect_val / d.no_chans)
for d in all_detections]
elif metric == 'cor_sum':
detect_info = [(d.detect_time, d.detect_val)
for d in all_detections]
else:
raise MatchFilterError('metric is not cor_sum or avg_cor')
elif timing == 'origin':
if metric == 'avg_cor':
detect_info = [(_get_origin(d.event).time,
d.detect_val / d.no_chans)
for d in all_detections]
elif metric == 'cor_sum':
detect_info = [(_get_origin(d.event).time, d.detect_val)
for d in all_detections]
else:
raise MatchFilterError('metric is not cor_sum or avg_cor')
else:
raise MatchFilterError('timing is not detect or origin')
min_det = sorted([d[0] for d in detect_info])[0]
detect_vals = np.array([d[1] for d in detect_info])
detect_times = np.array([
_total_microsec(d[0].datetime, min_det.datetime)
for d in detect_info])
# Trig_int must be converted from seconds to micro-seconds
peaks_out = decluster(
peaks=detect_vals, index=detect_times, trig_int=trig_int * 10 ** 6)
# Need to match both the time and the detection value
declustered_detections = []
for ind in peaks_out:
matching_time_indeces = np.where(detect_times == ind[-1])[0]
matches = matching_time_indeces[
np.where(detect_vals[matching_time_indeces] == ind[0])[0][0]]
declustered_detections.append(all_detections[matches])
# Convert this list into families
template_names = list(set([d.template_name
for d in declustered_detections]))
new_families = []
for template_name in template_names:
template = [fam.template for fam in self.families
if fam.template.name == template_name][0]
new_families.append(Family(
template=template,
detections=[d for d in declustered_detections
if d.template_name == template_name]))
self.families = new_families
return self | [
"def",
"decluster",
"(",
"self",
",",
"trig_int",
",",
"timing",
"=",
"'detect'",
",",
"metric",
"=",
"'avg_cor'",
")",
":",
"all_detections",
"=",
"[",
"]",
"for",
"fam",
"in",
"self",
".",
"families",
":",
"all_detections",
".",
"extend",
"(",
"fam",
... | De-cluster a Party of detections by enforcing a detection separation.
De-clustering occurs between events detected by different (or the same)
templates. If multiple detections occur within trig_int then the
preferred detection will be determined by the metric argument. This
can be either the average single-station correlation coefficient which
is calculated as Detection.detect_val / Detection.no_chans, or the
raw cross channel correlation sum which is simply Detection.detect_val.
:type trig_int: float
:param trig_int: Minimum detection separation in seconds.
:type metric: str
:param metric: What metric to sort peaks by. Either 'avg_cor' which
takes the single station average correlation or 'cor_sum' which
takes the total correlation sum across all channels.
:type timing: str
:param timing:
Either 'detect' or 'origin' to decluster based on either the
detection time or the origin time.
.. Warning::
Works in place on object, if you need to keep the original safe
then run this on a copy of the object!
.. rubric:: Example
>>> party = Party().read()
>>> len(party)
4
>>> declustered = party.decluster(20)
>>> len(party)
3 | [
"De",
"-",
"cluster",
"a",
"Party",
"of",
"detections",
"by",
"enforcing",
"a",
"detection",
"separation",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L548-L634 | train | 203,316 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Party.read | def read(self, filename=None, read_detection_catalog=True):
"""
Read a Party from a file.
:type filename: str
:param filename:
File to read from - can be a list of files, and can contain
wildcards.
:type read_detection_catalog: bool
:param read_detection_catalog:
Whether to read the detection catalog or not, if False, catalog
will be regenerated - for large catalogs this can be faster.
.. rubric:: Example
>>> Party().read()
Party of 4 Families.
"""
tribe = Tribe()
families = []
if filename is None:
# If there is no filename given, then read the example.
filename = os.path.join(os.path.dirname(__file__),
'..', 'tests', 'test_data',
'test_party.tgz')
if isinstance(filename, list):
filenames = []
for _filename in filename:
# Expand wildcards
filenames.extend(glob.glob(_filename))
else:
# Expand wildcards
filenames = glob.glob(filename)
for _filename in filenames:
with tarfile.open(_filename, "r:*") as arc:
temp_dir = tempfile.mkdtemp()
arc.extractall(path=temp_dir, members=_safemembers(arc))
# Read in the detections first, this way, if we read from multiple
# files then we can just read in extra templates as needed.
# Read in families here!
party_dir = glob.glob(temp_dir + os.sep + '*')[0]
tribe._read_from_folder(dirname=party_dir)
det_cat_file = glob.glob(os.path.join(party_dir, "catalog.*"))
if len(det_cat_file) != 0 and read_detection_catalog:
try:
all_cat = read_events(det_cat_file[0])
except TypeError as e:
print(e)
pass
else:
all_cat = Catalog()
for family_file in glob.glob(join(party_dir, '*_detections.csv')):
template = [
t for t in tribe if _templates_match(t, family_file)]
family = Family(template=template[0] or Template())
new_family = True
if family.template.name in [f.template.name for f in families]:
family = [
f for f in families if
f.template.name == family.template.name][0]
new_family = False
family.detections = _read_family(
fname=family_file, all_cat=all_cat, template=template[0])
if new_family:
families.append(family)
shutil.rmtree(temp_dir)
self.families = families
return self | python | def read(self, filename=None, read_detection_catalog=True):
"""
Read a Party from a file.
:type filename: str
:param filename:
File to read from - can be a list of files, and can contain
wildcards.
:type read_detection_catalog: bool
:param read_detection_catalog:
Whether to read the detection catalog or not, if False, catalog
will be regenerated - for large catalogs this can be faster.
.. rubric:: Example
>>> Party().read()
Party of 4 Families.
"""
tribe = Tribe()
families = []
if filename is None:
# If there is no filename given, then read the example.
filename = os.path.join(os.path.dirname(__file__),
'..', 'tests', 'test_data',
'test_party.tgz')
if isinstance(filename, list):
filenames = []
for _filename in filename:
# Expand wildcards
filenames.extend(glob.glob(_filename))
else:
# Expand wildcards
filenames = glob.glob(filename)
for _filename in filenames:
with tarfile.open(_filename, "r:*") as arc:
temp_dir = tempfile.mkdtemp()
arc.extractall(path=temp_dir, members=_safemembers(arc))
# Read in the detections first, this way, if we read from multiple
# files then we can just read in extra templates as needed.
# Read in families here!
party_dir = glob.glob(temp_dir + os.sep + '*')[0]
tribe._read_from_folder(dirname=party_dir)
det_cat_file = glob.glob(os.path.join(party_dir, "catalog.*"))
if len(det_cat_file) != 0 and read_detection_catalog:
try:
all_cat = read_events(det_cat_file[0])
except TypeError as e:
print(e)
pass
else:
all_cat = Catalog()
for family_file in glob.glob(join(party_dir, '*_detections.csv')):
template = [
t for t in tribe if _templates_match(t, family_file)]
family = Family(template=template[0] or Template())
new_family = True
if family.template.name in [f.template.name for f in families]:
family = [
f for f in families if
f.template.name == family.template.name][0]
new_family = False
family.detections = _read_family(
fname=family_file, all_cat=all_cat, template=template[0])
if new_family:
families.append(family)
shutil.rmtree(temp_dir)
self.families = families
return self | [
"def",
"read",
"(",
"self",
",",
"filename",
"=",
"None",
",",
"read_detection_catalog",
"=",
"True",
")",
":",
"tribe",
"=",
"Tribe",
"(",
")",
"families",
"=",
"[",
"]",
"if",
"filename",
"is",
"None",
":",
"# If there is no filename given, then read the exa... | Read a Party from a file.
:type filename: str
:param filename:
File to read from - can be a list of files, and can contain
wildcards.
:type read_detection_catalog: bool
:param read_detection_catalog:
Whether to read the detection catalog or not, if False, catalog
will be regenerated - for large catalogs this can be faster.
.. rubric:: Example
>>> Party().read()
Party of 4 Families. | [
"Read",
"a",
"Party",
"from",
"a",
"file",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L743-L810 | train | 203,317 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Party.get_catalog | def get_catalog(self):
"""
Get an obspy catalog object from the party.
:returns: :class:`obspy.core.event.Catalog`
.. rubric:: Example
>>> party = Party().read()
>>> cat = party.get_catalog()
>>> print(len(cat))
4
"""
catalog = Catalog()
for fam in self.families:
if len(fam.catalog) != 0:
catalog.events.extend(fam.catalog.events)
return catalog | python | def get_catalog(self):
"""
Get an obspy catalog object from the party.
:returns: :class:`obspy.core.event.Catalog`
.. rubric:: Example
>>> party = Party().read()
>>> cat = party.get_catalog()
>>> print(len(cat))
4
"""
catalog = Catalog()
for fam in self.families:
if len(fam.catalog) != 0:
catalog.events.extend(fam.catalog.events)
return catalog | [
"def",
"get_catalog",
"(",
"self",
")",
":",
"catalog",
"=",
"Catalog",
"(",
")",
"for",
"fam",
"in",
"self",
".",
"families",
":",
"if",
"len",
"(",
"fam",
".",
"catalog",
")",
"!=",
"0",
":",
"catalog",
".",
"events",
".",
"extend",
"(",
"fam",
... | Get an obspy catalog object from the party.
:returns: :class:`obspy.core.event.Catalog`
.. rubric:: Example
>>> party = Party().read()
>>> cat = party.get_catalog()
>>> print(len(cat))
4 | [
"Get",
"an",
"obspy",
"catalog",
"object",
"from",
"the",
"party",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L972-L989 | train | 203,318 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Party.min_chans | def min_chans(self, min_chans):
"""
Remove detections with fewer channels used than min_chans
:type min_chans: int
:param min_chans: Minimum number of channels to allow a detection.
:return: Party
.. Note:: Works in place on Party.
.. rubric:: Example
>>> party = Party().read()
>>> print(len(party))
4
>>> party = party.min_chans(5)
>>> print(len(party))
1
"""
declustered = Party()
for family in self.families:
fam = Family(family.template)
for d in family.detections:
if d.no_chans > min_chans:
fam.detections.append(d)
declustered.families.append(fam)
self.families = declustered.families
return self | python | def min_chans(self, min_chans):
"""
Remove detections with fewer channels used than min_chans
:type min_chans: int
:param min_chans: Minimum number of channels to allow a detection.
:return: Party
.. Note:: Works in place on Party.
.. rubric:: Example
>>> party = Party().read()
>>> print(len(party))
4
>>> party = party.min_chans(5)
>>> print(len(party))
1
"""
declustered = Party()
for family in self.families:
fam = Family(family.template)
for d in family.detections:
if d.no_chans > min_chans:
fam.detections.append(d)
declustered.families.append(fam)
self.families = declustered.families
return self | [
"def",
"min_chans",
"(",
"self",
",",
"min_chans",
")",
":",
"declustered",
"=",
"Party",
"(",
")",
"for",
"family",
"in",
"self",
".",
"families",
":",
"fam",
"=",
"Family",
"(",
"family",
".",
"template",
")",
"for",
"d",
"in",
"family",
".",
"dete... | Remove detections with fewer channels used than min_chans
:type min_chans: int
:param min_chans: Minimum number of channels to allow a detection.
:return: Party
.. Note:: Works in place on Party.
.. rubric:: Example
>>> party = Party().read()
>>> print(len(party))
4
>>> party = party.min_chans(5)
>>> print(len(party))
1 | [
"Remove",
"detections",
"with",
"fewer",
"channels",
"used",
"than",
"min_chans"
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L991-L1018 | train | 203,319 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Family._uniq | def _uniq(self):
"""
Get list of unique detections.
Works in place.
.. rubric:: Example
>>> family = Family(
... template=Template(name='a'), detections=[
... Detection(template_name='a', detect_time=UTCDateTime(0),
... no_chans=8, detect_val=4.2, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0)])
>>> len(family)
3
>>> len(family._uniq())
2
"""
_detections = []
[_detections.append(d) for d in self.detections
if not _detections.count(d)]
self.detections = _detections
return self | python | def _uniq(self):
"""
Get list of unique detections.
Works in place.
.. rubric:: Example
>>> family = Family(
... template=Template(name='a'), detections=[
... Detection(template_name='a', detect_time=UTCDateTime(0),
... no_chans=8, detect_val=4.2, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0)])
>>> len(family)
3
>>> len(family._uniq())
2
"""
_detections = []
[_detections.append(d) for d in self.detections
if not _detections.count(d)]
self.detections = _detections
return self | [
"def",
"_uniq",
"(",
"self",
")",
":",
"_detections",
"=",
"[",
"]",
"[",
"_detections",
".",
"append",
"(",
"d",
")",
"for",
"d",
"in",
"self",
".",
"detections",
"if",
"not",
"_detections",
".",
"count",
"(",
"d",
")",
"]",
"self",
".",
"detectio... | Get list of unique detections.
Works in place.
.. rubric:: Example
>>> family = Family(
... template=Template(name='a'), detections=[
... Detection(template_name='a', detect_time=UTCDateTime(0),
... no_chans=8, detect_val=4.2, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0)])
>>> len(family)
3
>>> len(family._uniq())
2 | [
"Get",
"list",
"of",
"unique",
"detections",
".",
"Works",
"in",
"place",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L1281-L1311 | train | 203,320 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Family.sort | def sort(self):
"""Sort by detection time.
.. rubric:: Example
>>> family = Family(
... template=Template(name='a'), detections=[
... Detection(template_name='a', detect_time=UTCDateTime(0) + 200,
... no_chans=8, detect_val=4.2, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0),
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0)])
>>> family[0].detect_time
UTCDateTime(1970, 1, 1, 0, 3, 20)
>>> family.sort()[0].detect_time
UTCDateTime(1970, 1, 1, 0, 0)
"""
self.detections = sorted(self.detections, key=lambda d: d.detect_time)
return self | python | def sort(self):
"""Sort by detection time.
.. rubric:: Example
>>> family = Family(
... template=Template(name='a'), detections=[
... Detection(template_name='a', detect_time=UTCDateTime(0) + 200,
... no_chans=8, detect_val=4.2, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0),
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0)])
>>> family[0].detect_time
UTCDateTime(1970, 1, 1, 0, 3, 20)
>>> family.sort()[0].detect_time
UTCDateTime(1970, 1, 1, 0, 0)
"""
self.detections = sorted(self.detections, key=lambda d: d.detect_time)
return self | [
"def",
"sort",
"(",
"self",
")",
":",
"self",
".",
"detections",
"=",
"sorted",
"(",
"self",
".",
"detections",
",",
"key",
"=",
"lambda",
"d",
":",
"d",
".",
"detect_time",
")",
"return",
"self"
] | Sort by detection time.
.. rubric:: Example
>>> family = Family(
... template=Template(name='a'), detections=[
... Detection(template_name='a', detect_time=UTCDateTime(0) + 200,
... no_chans=8, detect_val=4.2, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0),
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0)])
>>> family[0].detect_time
UTCDateTime(1970, 1, 1, 0, 3, 20)
>>> family.sort()[0].detect_time
UTCDateTime(1970, 1, 1, 0, 0) | [
"Sort",
"by",
"detection",
"time",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L1313-L1338 | train | 203,321 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Family.plot | def plot(self, plot_grouped=False):
"""
Plot the cumulative number of detections in time.
.. rubric:: Example
>>> family = Family(
... template=Template(name='a'), detections=[
... Detection(template_name='a', detect_time=UTCDateTime(0) + 200,
... no_chans=8, detect_val=4.2, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0),
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0)])
>>> family.plot(plot_grouped=True) # doctest: +SKIP
.. plot::
from eqcorrscan.core.match_filter import Family, Template
from eqcorrscan.core.match_filter import Detection
from obspy import UTCDateTime
family = Family(
template=Template(name='a'), detections=[
Detection(template_name='a', detect_time=UTCDateTime(0) + 200,
no_chans=8, detect_val=4.2, threshold=1.2,
typeofdet='corr', threshold_type='MAD',
threshold_input=8.0),
Detection(template_name='a', detect_time=UTCDateTime(0),
no_chans=8, detect_val=4.5, threshold=1.2,
typeofdet='corr', threshold_type='MAD',
threshold_input=8.0),
Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
no_chans=8, detect_val=4.5, threshold=1.2,
typeofdet='corr', threshold_type='MAD',
threshold_input=8.0)])
family.plot(plot_grouped=True)
"""
cumulative_detections(
detections=self.detections, plot_grouped=plot_grouped) | python | def plot(self, plot_grouped=False):
"""
Plot the cumulative number of detections in time.
.. rubric:: Example
>>> family = Family(
... template=Template(name='a'), detections=[
... Detection(template_name='a', detect_time=UTCDateTime(0) + 200,
... no_chans=8, detect_val=4.2, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0),
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0)])
>>> family.plot(plot_grouped=True) # doctest: +SKIP
.. plot::
from eqcorrscan.core.match_filter import Family, Template
from eqcorrscan.core.match_filter import Detection
from obspy import UTCDateTime
family = Family(
template=Template(name='a'), detections=[
Detection(template_name='a', detect_time=UTCDateTime(0) + 200,
no_chans=8, detect_val=4.2, threshold=1.2,
typeofdet='corr', threshold_type='MAD',
threshold_input=8.0),
Detection(template_name='a', detect_time=UTCDateTime(0),
no_chans=8, detect_val=4.5, threshold=1.2,
typeofdet='corr', threshold_type='MAD',
threshold_input=8.0),
Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
no_chans=8, detect_val=4.5, threshold=1.2,
typeofdet='corr', threshold_type='MAD',
threshold_input=8.0)])
family.plot(plot_grouped=True)
"""
cumulative_detections(
detections=self.detections, plot_grouped=plot_grouped) | [
"def",
"plot",
"(",
"self",
",",
"plot_grouped",
"=",
"False",
")",
":",
"cumulative_detections",
"(",
"detections",
"=",
"self",
".",
"detections",
",",
"plot_grouped",
"=",
"plot_grouped",
")"
] | Plot the cumulative number of detections in time.
.. rubric:: Example
>>> family = Family(
... template=Template(name='a'), detections=[
... Detection(template_name='a', detect_time=UTCDateTime(0) + 200,
... no_chans=8, detect_val=4.2, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0),
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0)])
>>> family.plot(plot_grouped=True) # doctest: +SKIP
.. plot::
from eqcorrscan.core.match_filter import Family, Template
from eqcorrscan.core.match_filter import Detection
from obspy import UTCDateTime
family = Family(
template=Template(name='a'), detections=[
Detection(template_name='a', detect_time=UTCDateTime(0) + 200,
no_chans=8, detect_val=4.2, threshold=1.2,
typeofdet='corr', threshold_type='MAD',
threshold_input=8.0),
Detection(template_name='a', detect_time=UTCDateTime(0),
no_chans=8, detect_val=4.5, threshold=1.2,
typeofdet='corr', threshold_type='MAD',
threshold_input=8.0),
Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
no_chans=8, detect_val=4.5, threshold=1.2,
typeofdet='corr', threshold_type='MAD',
threshold_input=8.0)])
family.plot(plot_grouped=True) | [
"Plot",
"the",
"cumulative",
"number",
"of",
"detections",
"in",
"time",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L1389-L1433 | train | 203,322 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Template.same_processing | def same_processing(self, other):
"""
Check is the templates are processed the same.
.. rubric:: Example
>>> template_a = Template(
... name='a', st=read(), lowcut=2.0, highcut=8.0, samp_rate=100,
... filt_order=4, process_length=3600, prepick=0.5)
>>> template_b = template_a.copy()
>>> template_a.same_processing(template_b)
True
>>> template_b.lowcut = 5.0
>>> template_a.same_processing(template_b)
False
"""
for key in self.__dict__.keys():
if key in ['name', 'st', 'prepick', 'event', 'template_info']:
continue
if not self.__dict__[key] == other.__dict__[key]:
return False
return True | python | def same_processing(self, other):
"""
Check is the templates are processed the same.
.. rubric:: Example
>>> template_a = Template(
... name='a', st=read(), lowcut=2.0, highcut=8.0, samp_rate=100,
... filt_order=4, process_length=3600, prepick=0.5)
>>> template_b = template_a.copy()
>>> template_a.same_processing(template_b)
True
>>> template_b.lowcut = 5.0
>>> template_a.same_processing(template_b)
False
"""
for key in self.__dict__.keys():
if key in ['name', 'st', 'prepick', 'event', 'template_info']:
continue
if not self.__dict__[key] == other.__dict__[key]:
return False
return True | [
"def",
"same_processing",
"(",
"self",
",",
"other",
")",
":",
"for",
"key",
"in",
"self",
".",
"__dict__",
".",
"keys",
"(",
")",
":",
"if",
"key",
"in",
"[",
"'name'",
",",
"'st'",
",",
"'prepick'",
",",
"'event'",
",",
"'template_info'",
"]",
":",... | Check is the templates are processed the same.
.. rubric:: Example
>>> template_a = Template(
... name='a', st=read(), lowcut=2.0, highcut=8.0, samp_rate=100,
... filt_order=4, process_length=3600, prepick=0.5)
>>> template_b = template_a.copy()
>>> template_a.same_processing(template_b)
True
>>> template_b.lowcut = 5.0
>>> template_a.same_processing(template_b)
False | [
"Check",
"is",
"the",
"templates",
"are",
"processed",
"the",
"same",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L1754-L1775 | train | 203,323 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Template.write | def write(self, filename, format='tar'):
"""
Write template.
:type filename: str
:param filename:
Filename to write to, if it already exists it will be opened and
appended to, otherwise it will be created.
:type format: str
:param format:
Format to write to, either 'tar' (to retain metadata), or any obspy
supported waveform format to just extract the waveform.
.. rubric:: Example
>>> template_a = Template(
... name='a', st=read(), lowcut=2.0, highcut=8.0, samp_rate=100,
... filt_order=4, process_length=3600, prepick=0.5)
>>> template_a.write('test_template') # doctest: +NORMALIZE_WHITESPACE
Template a:
3 channels;
lowcut: 2.0 Hz;
highcut: 8.0 Hz;
sampling rate 100 Hz;
filter order: 4;
process length: 3600 s
>>> template_a.write('test_waveform.ms',
... format='MSEED') # doctest: +NORMALIZE_WHITESPACE
Template a:
3 channels;
lowcut: 2.0 Hz;
highcut: 8.0 Hz;
sampling rate 100 Hz;
filter order: 4;
process length: 3600 s
"""
if format == 'tar':
Tribe(templates=[self]).write(filename=filename)
else:
self.st.write(filename, format=format)
return self | python | def write(self, filename, format='tar'):
"""
Write template.
:type filename: str
:param filename:
Filename to write to, if it already exists it will be opened and
appended to, otherwise it will be created.
:type format: str
:param format:
Format to write to, either 'tar' (to retain metadata), or any obspy
supported waveform format to just extract the waveform.
.. rubric:: Example
>>> template_a = Template(
... name='a', st=read(), lowcut=2.0, highcut=8.0, samp_rate=100,
... filt_order=4, process_length=3600, prepick=0.5)
>>> template_a.write('test_template') # doctest: +NORMALIZE_WHITESPACE
Template a:
3 channels;
lowcut: 2.0 Hz;
highcut: 8.0 Hz;
sampling rate 100 Hz;
filter order: 4;
process length: 3600 s
>>> template_a.write('test_waveform.ms',
... format='MSEED') # doctest: +NORMALIZE_WHITESPACE
Template a:
3 channels;
lowcut: 2.0 Hz;
highcut: 8.0 Hz;
sampling rate 100 Hz;
filter order: 4;
process length: 3600 s
"""
if format == 'tar':
Tribe(templates=[self]).write(filename=filename)
else:
self.st.write(filename, format=format)
return self | [
"def",
"write",
"(",
"self",
",",
"filename",
",",
"format",
"=",
"'tar'",
")",
":",
"if",
"format",
"==",
"'tar'",
":",
"Tribe",
"(",
"templates",
"=",
"[",
"self",
"]",
")",
".",
"write",
"(",
"filename",
"=",
"filename",
")",
"else",
":",
"self"... | Write template.
:type filename: str
:param filename:
Filename to write to, if it already exists it will be opened and
appended to, otherwise it will be created.
:type format: str
:param format:
Format to write to, either 'tar' (to retain metadata), or any obspy
supported waveform format to just extract the waveform.
.. rubric:: Example
>>> template_a = Template(
... name='a', st=read(), lowcut=2.0, highcut=8.0, samp_rate=100,
... filt_order=4, process_length=3600, prepick=0.5)
>>> template_a.write('test_template') # doctest: +NORMALIZE_WHITESPACE
Template a:
3 channels;
lowcut: 2.0 Hz;
highcut: 8.0 Hz;
sampling rate 100 Hz;
filter order: 4;
process length: 3600 s
>>> template_a.write('test_waveform.ms',
... format='MSEED') # doctest: +NORMALIZE_WHITESPACE
Template a:
3 channels;
lowcut: 2.0 Hz;
highcut: 8.0 Hz;
sampling rate 100 Hz;
filter order: 4;
process length: 3600 s | [
"Write",
"template",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L1777-L1817 | train | 203,324 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Template.read | def read(self, filename):
"""
Read template from tar format with metadata.
:type filename: str
:param filename: Filename to read template from.
.. rubric:: Example
>>> template_a = Template(
... name='a', st=read(), lowcut=2.0, highcut=8.0, samp_rate=100,
... filt_order=4, process_length=3600, prepick=0.5)
>>> template_a.write(
... 'test_template_read') # doctest: +NORMALIZE_WHITESPACE
Template a:
3 channels;
lowcut: 2.0 Hz;
highcut: 8.0 Hz;
sampling rate 100 Hz;
filter order: 4;
process length: 3600 s
>>> template_b = Template().read('test_template_read.tgz')
>>> template_a == template_b
True
"""
tribe = Tribe()
tribe.read(filename=filename)
if len(tribe) > 1:
raise IOError('Multiple templates in file')
for key in self.__dict__.keys():
self.__dict__[key] = tribe[0].__dict__[key]
return self | python | def read(self, filename):
"""
Read template from tar format with metadata.
:type filename: str
:param filename: Filename to read template from.
.. rubric:: Example
>>> template_a = Template(
... name='a', st=read(), lowcut=2.0, highcut=8.0, samp_rate=100,
... filt_order=4, process_length=3600, prepick=0.5)
>>> template_a.write(
... 'test_template_read') # doctest: +NORMALIZE_WHITESPACE
Template a:
3 channels;
lowcut: 2.0 Hz;
highcut: 8.0 Hz;
sampling rate 100 Hz;
filter order: 4;
process length: 3600 s
>>> template_b = Template().read('test_template_read.tgz')
>>> template_a == template_b
True
"""
tribe = Tribe()
tribe.read(filename=filename)
if len(tribe) > 1:
raise IOError('Multiple templates in file')
for key in self.__dict__.keys():
self.__dict__[key] = tribe[0].__dict__[key]
return self | [
"def",
"read",
"(",
"self",
",",
"filename",
")",
":",
"tribe",
"=",
"Tribe",
"(",
")",
"tribe",
".",
"read",
"(",
"filename",
"=",
"filename",
")",
"if",
"len",
"(",
"tribe",
")",
">",
"1",
":",
"raise",
"IOError",
"(",
"'Multiple templates in file'",... | Read template from tar format with metadata.
:type filename: str
:param filename: Filename to read template from.
.. rubric:: Example
>>> template_a = Template(
... name='a', st=read(), lowcut=2.0, highcut=8.0, samp_rate=100,
... filt_order=4, process_length=3600, prepick=0.5)
>>> template_a.write(
... 'test_template_read') # doctest: +NORMALIZE_WHITESPACE
Template a:
3 channels;
lowcut: 2.0 Hz;
highcut: 8.0 Hz;
sampling rate 100 Hz;
filter order: 4;
process length: 3600 s
>>> template_b = Template().read('test_template_read.tgz')
>>> template_a == template_b
True | [
"Read",
"template",
"from",
"tar",
"format",
"with",
"metadata",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L1819-L1850 | train | 203,325 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Template.detect | def detect(self, stream, threshold, threshold_type, trig_int, plotvar,
pre_processed=False, daylong=False, parallel_process=True,
xcorr_func=None, concurrency=None, cores=None,
ignore_length=False, overlap="calculate", debug=0,
full_peaks=False):
"""
Detect using a single template within a continuous stream.
:type stream: `obspy.core.stream.Stream`
:param stream: Continuous data to detect within using the Template.
:type threshold: float
:param threshold:
Threshold level, if using `threshold_type='MAD'` then this will be
the multiple of the median absolute deviation.
:type threshold_type: str
:param threshold_type:
The type of threshold to be used, can be MAD, absolute or
av_chan_corr. See Note on thresholding below.
:type trig_int: float
:param trig_int:
Minimum gap between detections in seconds. If multiple detections
occur within trig_int of one-another, the one with the highest
cross-correlation sum will be selected.
:type plotvar: bool
:param plotvar:
Turn plotting on or off, see warning about plotting below
:type pre_processed: bool
:param pre_processed:
Set to True if `stream` has already undergone processing, in this
case eqcorrscan will only check that the sampling rate is correct.
Defaults to False, which will use the
:mod:`eqcorrscan.utils.pre_processing` routines to resample and
filter the continuous data.
:type daylong: bool
:param daylong:
Set to True to use the
:func:`eqcorrscan.utils.pre_processing.dayproc` routine, which
preforms additional checks and is more efficient for day-long data
over other methods.
:type parallel_process: bool
:param parallel_process:
:type xcorr_func: str or callable
:param xcorr_func:
A str of a registered xcorr function or a callable for implementing
a custom xcorr function. For more details see
:func:`eqcorrscan.utils.correlate.register_array_xcorr`.
:type concurrency: str
:param concurrency:
The type of concurrency to apply to the xcorr function. Options are
'multithread', 'multiprocess', 'concurrent'. For more details see
:func:`eqcorrscan.utils.correlate.get_stream_xcorr`.
:type cores: int
:param cores: Number of workers for processing and detection.
:type ignore_length: bool
:param ignore_length:
If using daylong=True, then dayproc will try check that the data
are there for at least 80% of the day, if you don't want this check
(which will raise an error if too much data are missing) then set
ignore_length=True. This is not recommended!
:type overlap: float
:param overlap:
Either None, "calculate" or a float of number of seconds to
overlap detection streams by. This is to counter the effects of
the delay-and-stack in calcualting cross-correlation sums. Setting
overlap = "calculate" will work out the appropriate overlap based
on the maximum lags within templates.
:type debug: int
:param debug:
Debug level from 0-5 where five is more output, for debug levels
4 and 5, detections will not be computed in parallel.
:type full_peaks:
:param full_peaks: See `eqcorrscan.utils.findpeaks.find_peaks2_short`
:returns: Family of detections.
.. Note::
`stream` must not be pre-processed. If your data contain gaps
you should *NOT* fill those gaps before using this method.
The pre-process functions (called within) will fill the gaps
internally prior to processing, process the data, then re-fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps. If your data have gaps you should pass a
merged stream without the `fill_value` argument
(e.g.: `stream = stream.merge()`).
.. Note::
Detections are not corrected for `pre-pick`, the
detection.detect_time corresponds to the beginning of the earliest
template channel at detection.
.. note::
**Data overlap:**
Internally this routine shifts and trims the data according to
the offsets in the template (e.g. if trace 2 starts 2 seconds
after trace 1 in the template then the continuous data will be
shifted by 2 seconds to align peak correlations prior to summing).
Because of this, detections at the start and end of continuous data
streams **may be missed**. The maximum time-period that might be
missing detections is the maximum offset in the template.
To work around this, if you are conducting matched-filter
detections through long-duration continuous data, we suggest using
some overlap (a few seconds, on the order of the maximum offset
in the templates) in the continous data. You will then need to
post-process the detections (which should be done anyway to remove
duplicates).
.. note::
**Thresholding:**
**MAD** threshold is calculated as the:
.. math::
threshold {\\times} (median(abs(cccsum)))
where :math:`cccsum` is the cross-correlation sum for a
given template.
**absolute** threshold is a true absolute threshold based on the
cccsum value.
**av_chan_corr** is based on the mean values of single-channel
cross-correlations assuming all data are present as required
for the template, e.g:
.. math::
av\_chan\_corr\_thresh=threshold \\times (cccsum /
len(template))
where :math:`template` is a single template from the input and the
length is the number of channels within this template.
.. Note::
See tutorials for example.
"""
party = _group_detect(
templates=[self], stream=stream.copy(), threshold=threshold,
threshold_type=threshold_type, trig_int=trig_int,
plotvar=plotvar, pre_processed=pre_processed, daylong=daylong,
parallel_process=parallel_process, xcorr_func=xcorr_func,
concurrency=concurrency, cores=cores, ignore_length=ignore_length,
overlap=overlap, debug=debug, full_peaks=full_peaks)
return party[0] | python | def detect(self, stream, threshold, threshold_type, trig_int, plotvar,
pre_processed=False, daylong=False, parallel_process=True,
xcorr_func=None, concurrency=None, cores=None,
ignore_length=False, overlap="calculate", debug=0,
full_peaks=False):
"""
Detect using a single template within a continuous stream.
:type stream: `obspy.core.stream.Stream`
:param stream: Continuous data to detect within using the Template.
:type threshold: float
:param threshold:
Threshold level, if using `threshold_type='MAD'` then this will be
the multiple of the median absolute deviation.
:type threshold_type: str
:param threshold_type:
The type of threshold to be used, can be MAD, absolute or
av_chan_corr. See Note on thresholding below.
:type trig_int: float
:param trig_int:
Minimum gap between detections in seconds. If multiple detections
occur within trig_int of one-another, the one with the highest
cross-correlation sum will be selected.
:type plotvar: bool
:param plotvar:
Turn plotting on or off, see warning about plotting below
:type pre_processed: bool
:param pre_processed:
Set to True if `stream` has already undergone processing, in this
case eqcorrscan will only check that the sampling rate is correct.
Defaults to False, which will use the
:mod:`eqcorrscan.utils.pre_processing` routines to resample and
filter the continuous data.
:type daylong: bool
:param daylong:
Set to True to use the
:func:`eqcorrscan.utils.pre_processing.dayproc` routine, which
preforms additional checks and is more efficient for day-long data
over other methods.
:type parallel_process: bool
:param parallel_process:
:type xcorr_func: str or callable
:param xcorr_func:
A str of a registered xcorr function or a callable for implementing
a custom xcorr function. For more details see
:func:`eqcorrscan.utils.correlate.register_array_xcorr`.
:type concurrency: str
:param concurrency:
The type of concurrency to apply to the xcorr function. Options are
'multithread', 'multiprocess', 'concurrent'. For more details see
:func:`eqcorrscan.utils.correlate.get_stream_xcorr`.
:type cores: int
:param cores: Number of workers for processing and detection.
:type ignore_length: bool
:param ignore_length:
If using daylong=True, then dayproc will try check that the data
are there for at least 80% of the day, if you don't want this check
(which will raise an error if too much data are missing) then set
ignore_length=True. This is not recommended!
:type overlap: float
:param overlap:
Either None, "calculate" or a float of number of seconds to
overlap detection streams by. This is to counter the effects of
the delay-and-stack in calcualting cross-correlation sums. Setting
overlap = "calculate" will work out the appropriate overlap based
on the maximum lags within templates.
:type debug: int
:param debug:
Debug level from 0-5 where five is more output, for debug levels
4 and 5, detections will not be computed in parallel.
:type full_peaks:
:param full_peaks: See `eqcorrscan.utils.findpeaks.find_peaks2_short`
:returns: Family of detections.
.. Note::
`stream` must not be pre-processed. If your data contain gaps
you should *NOT* fill those gaps before using this method.
The pre-process functions (called within) will fill the gaps
internally prior to processing, process the data, then re-fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps. If your data have gaps you should pass a
merged stream without the `fill_value` argument
(e.g.: `stream = stream.merge()`).
.. Note::
Detections are not corrected for `pre-pick`, the
detection.detect_time corresponds to the beginning of the earliest
template channel at detection.
.. note::
**Data overlap:**
Internally this routine shifts and trims the data according to
the offsets in the template (e.g. if trace 2 starts 2 seconds
after trace 1 in the template then the continuous data will be
shifted by 2 seconds to align peak correlations prior to summing).
Because of this, detections at the start and end of continuous data
streams **may be missed**. The maximum time-period that might be
missing detections is the maximum offset in the template.
To work around this, if you are conducting matched-filter
detections through long-duration continuous data, we suggest using
some overlap (a few seconds, on the order of the maximum offset
in the templates) in the continous data. You will then need to
post-process the detections (which should be done anyway to remove
duplicates).
.. note::
**Thresholding:**
**MAD** threshold is calculated as the:
.. math::
threshold {\\times} (median(abs(cccsum)))
where :math:`cccsum` is the cross-correlation sum for a
given template.
**absolute** threshold is a true absolute threshold based on the
cccsum value.
**av_chan_corr** is based on the mean values of single-channel
cross-correlations assuming all data are present as required
for the template, e.g:
.. math::
av\_chan\_corr\_thresh=threshold \\times (cccsum /
len(template))
where :math:`template` is a single template from the input and the
length is the number of channels within this template.
.. Note::
See tutorials for example.
"""
party = _group_detect(
templates=[self], stream=stream.copy(), threshold=threshold,
threshold_type=threshold_type, trig_int=trig_int,
plotvar=plotvar, pre_processed=pre_processed, daylong=daylong,
parallel_process=parallel_process, xcorr_func=xcorr_func,
concurrency=concurrency, cores=cores, ignore_length=ignore_length,
overlap=overlap, debug=debug, full_peaks=full_peaks)
return party[0] | [
"def",
"detect",
"(",
"self",
",",
"stream",
",",
"threshold",
",",
"threshold_type",
",",
"trig_int",
",",
"plotvar",
",",
"pre_processed",
"=",
"False",
",",
"daylong",
"=",
"False",
",",
"parallel_process",
"=",
"True",
",",
"xcorr_func",
"=",
"None",
"... | Detect using a single template within a continuous stream.
:type stream: `obspy.core.stream.Stream`
:param stream: Continuous data to detect within using the Template.
:type threshold: float
:param threshold:
Threshold level, if using `threshold_type='MAD'` then this will be
the multiple of the median absolute deviation.
:type threshold_type: str
:param threshold_type:
The type of threshold to be used, can be MAD, absolute or
av_chan_corr. See Note on thresholding below.
:type trig_int: float
:param trig_int:
Minimum gap between detections in seconds. If multiple detections
occur within trig_int of one-another, the one with the highest
cross-correlation sum will be selected.
:type plotvar: bool
:param plotvar:
Turn plotting on or off, see warning about plotting below
:type pre_processed: bool
:param pre_processed:
Set to True if `stream` has already undergone processing, in this
case eqcorrscan will only check that the sampling rate is correct.
Defaults to False, which will use the
:mod:`eqcorrscan.utils.pre_processing` routines to resample and
filter the continuous data.
:type daylong: bool
:param daylong:
Set to True to use the
:func:`eqcorrscan.utils.pre_processing.dayproc` routine, which
preforms additional checks and is more efficient for day-long data
over other methods.
:type parallel_process: bool
:param parallel_process:
:type xcorr_func: str or callable
:param xcorr_func:
A str of a registered xcorr function or a callable for implementing
a custom xcorr function. For more details see
:func:`eqcorrscan.utils.correlate.register_array_xcorr`.
:type concurrency: str
:param concurrency:
The type of concurrency to apply to the xcorr function. Options are
'multithread', 'multiprocess', 'concurrent'. For more details see
:func:`eqcorrscan.utils.correlate.get_stream_xcorr`.
:type cores: int
:param cores: Number of workers for processing and detection.
:type ignore_length: bool
:param ignore_length:
If using daylong=True, then dayproc will try check that the data
are there for at least 80% of the day, if you don't want this check
(which will raise an error if too much data are missing) then set
ignore_length=True. This is not recommended!
:type overlap: float
:param overlap:
Either None, "calculate" or a float of number of seconds to
overlap detection streams by. This is to counter the effects of
the delay-and-stack in calcualting cross-correlation sums. Setting
overlap = "calculate" will work out the appropriate overlap based
on the maximum lags within templates.
:type debug: int
:param debug:
Debug level from 0-5 where five is more output, for debug levels
4 and 5, detections will not be computed in parallel.
:type full_peaks:
:param full_peaks: See `eqcorrscan.utils.findpeaks.find_peaks2_short`
:returns: Family of detections.
.. Note::
`stream` must not be pre-processed. If your data contain gaps
you should *NOT* fill those gaps before using this method.
The pre-process functions (called within) will fill the gaps
internally prior to processing, process the data, then re-fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps. If your data have gaps you should pass a
merged stream without the `fill_value` argument
(e.g.: `stream = stream.merge()`).
.. Note::
Detections are not corrected for `pre-pick`, the
detection.detect_time corresponds to the beginning of the earliest
template channel at detection.
.. note::
**Data overlap:**
Internally this routine shifts and trims the data according to
the offsets in the template (e.g. if trace 2 starts 2 seconds
after trace 1 in the template then the continuous data will be
shifted by 2 seconds to align peak correlations prior to summing).
Because of this, detections at the start and end of continuous data
streams **may be missed**. The maximum time-period that might be
missing detections is the maximum offset in the template.
To work around this, if you are conducting matched-filter
detections through long-duration continuous data, we suggest using
some overlap (a few seconds, on the order of the maximum offset
in the templates) in the continous data. You will then need to
post-process the detections (which should be done anyway to remove
duplicates).
.. note::
**Thresholding:**
**MAD** threshold is calculated as the:
.. math::
threshold {\\times} (median(abs(cccsum)))
where :math:`cccsum` is the cross-correlation sum for a
given template.
**absolute** threshold is a true absolute threshold based on the
cccsum value.
**av_chan_corr** is based on the mean values of single-channel
cross-correlations assuming all data are present as required
for the template, e.g:
.. math::
av\_chan\_corr\_thresh=threshold \\times (cccsum /
len(template))
where :math:`template` is a single template from the input and the
length is the number of channels within this template.
.. Note::
See tutorials for example. | [
"Detect",
"using",
"a",
"single",
"template",
"within",
"a",
"continuous",
"stream",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L1852-L1997 | train | 203,326 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Template.construct | def construct(self, method, name, lowcut, highcut, samp_rate, filt_order,
prepick, **kwargs):
"""
Construct a template using a given method.
:param method:
Method to make the template,
see :mod:`eqcorrscan.core.template_gen` for possible methods.
:type method: str
:type name: str
:param name: Name for the template
:type lowcut: float
:param lowcut:
Low cut (Hz), if set to None will not apply a lowcut
:type highcut: float
:param highcut:
High cut (Hz), if set to None will not apply a highcut.
:type samp_rate: float
:param samp_rate:
New sampling rate in Hz.
:type filt_order: int
:param filt_order:
Filter level (number of corners).
:type prepick: float
:param prepick: Pre-pick time in seconds
.. Note::
methods `from_meta_file`, `from_seishub`, `from_client` and
`multi_template_gen` are not accommodated in this function and must
be called from Tribe.construct as these generate multiple
templates.
.. Note::
Calls functions from `eqcorrscan.core.template_gen`, see these
functions for details on what further arguments are required.
.. rubric:: Example
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = (
... os.path.dirname(eqcorrscan.__file__) + '/tests/test_data')
>>> sac_files = glob.glob(TEST_PATH + '/SAC/2014p611252/*')
>>> template = Template().construct(
... method='from_sac', name='test', lowcut=2.0, highcut=8.0,
... samp_rate=20.0, filt_order=4, prepick=0.1, swin='all',
... length=2.0, sac_files=sac_files)
>>> print(template) # doctest: +NORMALIZE_WHITESPACE
Template test:
12 channels;
lowcut: 2.0 Hz;
highcut: 8.0 Hz;
sampling rate 20.0 Hz;
filter order: 4;
process length: 300.0 s
This will raise an error if the method is unsupported:
>>> template = Template().construct(
... method='from_meta_file', name='test', lowcut=2.0, highcut=8.0,
... samp_rate=20.0, filt_order=4, prepick=0.1, swin='all',
... length=2.0) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError: Method is not supported, use \
Tribe.construct instead.
"""
if method in ['from_meta_file', 'from_seishub', 'from_client',
'multi_template_gen']:
raise NotImplementedError('Method is not supported, '
'use Tribe.construct instead.')
streams, events, process_lengths = template_gen.template_gen(
method=method, lowcut=lowcut, highcut=highcut,
filt_order=filt_order, samp_rate=samp_rate, prepick=prepick,
return_event=True, **kwargs)
self.name = name
st = streams[0]
event = events[0]
process_length = process_lengths[0]
for tr in st:
if not np.any(tr.data.astype(np.float16)):
warnings.warn('Data are zero in float16, missing data,'
' will not use: %s' % tr.id)
st.remove(tr)
self.st = st
self.lowcut = lowcut
self.highcut = highcut
self.filt_order = filt_order
self.samp_rate = samp_rate
self.process_length = process_length
self.prepick = prepick
self.event = event
return self | python | def construct(self, method, name, lowcut, highcut, samp_rate, filt_order,
prepick, **kwargs):
"""
Construct a template using a given method.
:param method:
Method to make the template,
see :mod:`eqcorrscan.core.template_gen` for possible methods.
:type method: str
:type name: str
:param name: Name for the template
:type lowcut: float
:param lowcut:
Low cut (Hz), if set to None will not apply a lowcut
:type highcut: float
:param highcut:
High cut (Hz), if set to None will not apply a highcut.
:type samp_rate: float
:param samp_rate:
New sampling rate in Hz.
:type filt_order: int
:param filt_order:
Filter level (number of corners).
:type prepick: float
:param prepick: Pre-pick time in seconds
.. Note::
methods `from_meta_file`, `from_seishub`, `from_client` and
`multi_template_gen` are not accommodated in this function and must
be called from Tribe.construct as these generate multiple
templates.
.. Note::
Calls functions from `eqcorrscan.core.template_gen`, see these
functions for details on what further arguments are required.
.. rubric:: Example
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = (
... os.path.dirname(eqcorrscan.__file__) + '/tests/test_data')
>>> sac_files = glob.glob(TEST_PATH + '/SAC/2014p611252/*')
>>> template = Template().construct(
... method='from_sac', name='test', lowcut=2.0, highcut=8.0,
... samp_rate=20.0, filt_order=4, prepick=0.1, swin='all',
... length=2.0, sac_files=sac_files)
>>> print(template) # doctest: +NORMALIZE_WHITESPACE
Template test:
12 channels;
lowcut: 2.0 Hz;
highcut: 8.0 Hz;
sampling rate 20.0 Hz;
filter order: 4;
process length: 300.0 s
This will raise an error if the method is unsupported:
>>> template = Template().construct(
... method='from_meta_file', name='test', lowcut=2.0, highcut=8.0,
... samp_rate=20.0, filt_order=4, prepick=0.1, swin='all',
... length=2.0) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError: Method is not supported, use \
Tribe.construct instead.
"""
if method in ['from_meta_file', 'from_seishub', 'from_client',
'multi_template_gen']:
raise NotImplementedError('Method is not supported, '
'use Tribe.construct instead.')
streams, events, process_lengths = template_gen.template_gen(
method=method, lowcut=lowcut, highcut=highcut,
filt_order=filt_order, samp_rate=samp_rate, prepick=prepick,
return_event=True, **kwargs)
self.name = name
st = streams[0]
event = events[0]
process_length = process_lengths[0]
for tr in st:
if not np.any(tr.data.astype(np.float16)):
warnings.warn('Data are zero in float16, missing data,'
' will not use: %s' % tr.id)
st.remove(tr)
self.st = st
self.lowcut = lowcut
self.highcut = highcut
self.filt_order = filt_order
self.samp_rate = samp_rate
self.process_length = process_length
self.prepick = prepick
self.event = event
return self | [
"def",
"construct",
"(",
"self",
",",
"method",
",",
"name",
",",
"lowcut",
",",
"highcut",
",",
"samp_rate",
",",
"filt_order",
",",
"prepick",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"method",
"in",
"[",
"'from_meta_file'",
",",
"'from_seishub'",
",",... | Construct a template using a given method.
:param method:
Method to make the template,
see :mod:`eqcorrscan.core.template_gen` for possible methods.
:type method: str
:type name: str
:param name: Name for the template
:type lowcut: float
:param lowcut:
Low cut (Hz), if set to None will not apply a lowcut
:type highcut: float
:param highcut:
High cut (Hz), if set to None will not apply a highcut.
:type samp_rate: float
:param samp_rate:
New sampling rate in Hz.
:type filt_order: int
:param filt_order:
Filter level (number of corners).
:type prepick: float
:param prepick: Pre-pick time in seconds
.. Note::
methods `from_meta_file`, `from_seishub`, `from_client` and
`multi_template_gen` are not accommodated in this function and must
be called from Tribe.construct as these generate multiple
templates.
.. Note::
Calls functions from `eqcorrscan.core.template_gen`, see these
functions for details on what further arguments are required.
.. rubric:: Example
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = (
... os.path.dirname(eqcorrscan.__file__) + '/tests/test_data')
>>> sac_files = glob.glob(TEST_PATH + '/SAC/2014p611252/*')
>>> template = Template().construct(
... method='from_sac', name='test', lowcut=2.0, highcut=8.0,
... samp_rate=20.0, filt_order=4, prepick=0.1, swin='all',
... length=2.0, sac_files=sac_files)
>>> print(template) # doctest: +NORMALIZE_WHITESPACE
Template test:
12 channels;
lowcut: 2.0 Hz;
highcut: 8.0 Hz;
sampling rate 20.0 Hz;
filter order: 4;
process length: 300.0 s
This will raise an error if the method is unsupported:
>>> template = Template().construct(
... method='from_meta_file', name='test', lowcut=2.0, highcut=8.0,
... samp_rate=20.0, filt_order=4, prepick=0.1, swin='all',
... length=2.0) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError: Method is not supported, use \
Tribe.construct instead. | [
"Construct",
"a",
"template",
"using",
"a",
"given",
"method",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L1999-L2093 | train | 203,327 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Tribe.sort | def sort(self):
"""
Sort the tribe, sorts by template name.
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c'), Template(name='b'),
... Template(name='a')])
>>> tribe.sort()
Tribe of 3 templates
>>> tribe[0] # doctest: +NORMALIZE_WHITESPACE
Template a:
0 channels;
lowcut: None Hz;
highcut: None Hz;
sampling rate None Hz;
filter order: None;
process length: None s
"""
self.templates = sorted(self.templates, key=lambda x: x.name)
return self | python | def sort(self):
"""
Sort the tribe, sorts by template name.
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c'), Template(name='b'),
... Template(name='a')])
>>> tribe.sort()
Tribe of 3 templates
>>> tribe[0] # doctest: +NORMALIZE_WHITESPACE
Template a:
0 channels;
lowcut: None Hz;
highcut: None Hz;
sampling rate None Hz;
filter order: None;
process length: None s
"""
self.templates = sorted(self.templates, key=lambda x: x.name)
return self | [
"def",
"sort",
"(",
"self",
")",
":",
"self",
".",
"templates",
"=",
"sorted",
"(",
"self",
".",
"templates",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"name",
")",
"return",
"self"
] | Sort the tribe, sorts by template name.
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c'), Template(name='b'),
... Template(name='a')])
>>> tribe.sort()
Tribe of 3 templates
>>> tribe[0] # doctest: +NORMALIZE_WHITESPACE
Template a:
0 channels;
lowcut: None Hz;
highcut: None Hz;
sampling rate None Hz;
filter order: None;
process length: None s | [
"Sort",
"the",
"tribe",
"sorts",
"by",
"template",
"name",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L2238-L2258 | train | 203,328 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Tribe.select | def select(self, template_name):
"""
Select a particular template from the tribe.
:type template_name: str
:param template_name: Template name to look-up
:return: Template
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c'), Template(name='b'),
... Template(name='a')])
>>> tribe.select('b') # doctest: +NORMALIZE_WHITESPACE
Template b:
0 channels;
lowcut: None Hz;
highcut: None Hz;
sampling rate None Hz;
filter order: None;
process length: None s
"""
return [t for t in self.templates if t.name == template_name][0] | python | def select(self, template_name):
"""
Select a particular template from the tribe.
:type template_name: str
:param template_name: Template name to look-up
:return: Template
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c'), Template(name='b'),
... Template(name='a')])
>>> tribe.select('b') # doctest: +NORMALIZE_WHITESPACE
Template b:
0 channels;
lowcut: None Hz;
highcut: None Hz;
sampling rate None Hz;
filter order: None;
process length: None s
"""
return [t for t in self.templates if t.name == template_name][0] | [
"def",
"select",
"(",
"self",
",",
"template_name",
")",
":",
"return",
"[",
"t",
"for",
"t",
"in",
"self",
".",
"templates",
"if",
"t",
".",
"name",
"==",
"template_name",
"]",
"[",
"0",
"]"
] | Select a particular template from the tribe.
:type template_name: str
:param template_name: Template name to look-up
:return: Template
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c'), Template(name='b'),
... Template(name='a')])
>>> tribe.select('b') # doctest: +NORMALIZE_WHITESPACE
Template b:
0 channels;
lowcut: None Hz;
highcut: None Hz;
sampling rate None Hz;
filter order: None;
process length: None s | [
"Select",
"a",
"particular",
"template",
"from",
"the",
"tribe",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L2260-L2281 | train | 203,329 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Tribe.remove | def remove(self, template):
"""
Remove a template from the tribe.
:type template: :class:`eqcorrscan.core.match_filter.Template`
:param template: Template to remove from tribe
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c'), Template(name='b'),
... Template(name='a')])
>>> tribe.remove(tribe.templates[0])
Tribe of 2 templates
"""
self.templates = [t for t in self.templates if t != template]
return self | python | def remove(self, template):
"""
Remove a template from the tribe.
:type template: :class:`eqcorrscan.core.match_filter.Template`
:param template: Template to remove from tribe
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c'), Template(name='b'),
... Template(name='a')])
>>> tribe.remove(tribe.templates[0])
Tribe of 2 templates
"""
self.templates = [t for t in self.templates if t != template]
return self | [
"def",
"remove",
"(",
"self",
",",
"template",
")",
":",
"self",
".",
"templates",
"=",
"[",
"t",
"for",
"t",
"in",
"self",
".",
"templates",
"if",
"t",
"!=",
"template",
"]",
"return",
"self"
] | Remove a template from the tribe.
:type template: :class:`eqcorrscan.core.match_filter.Template`
:param template: Template to remove from tribe
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c'), Template(name='b'),
... Template(name='a')])
>>> tribe.remove(tribe.templates[0])
Tribe of 2 templates | [
"Remove",
"a",
"template",
"from",
"the",
"tribe",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L2283-L2298 | train | 203,330 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Tribe.write | def write(self, filename, compress=True, catalog_format="QUAKEML"):
"""
Write the tribe to a file using tar archive formatting.
:type filename: str
:param filename:
Filename to write to, if it exists it will be appended to.
:type compress: bool
:param compress:
Whether to compress the tar archive or not, if False then will
just be files in a folder.
:type catalog_format: str
:param catalog_format:
What format to write the detection-catalog with. Only Nordic,
SC3ML, QUAKEML are supported. Note that not all information is
written for all formats (QUAKEML is the most complete, but is
slow for IO).
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c', st=read())])
>>> tribe.write('test_tribe')
Tribe of 1 templates
"""
if catalog_format not in CAT_EXT_MAP.keys():
raise TypeError("{0} is not supported".format(catalog_format))
if not os.path.isdir(filename):
os.makedirs(filename)
self._par_write(filename)
tribe_cat = Catalog()
for t in self.templates:
if t.event is not None:
tribe_cat.append(t.event)
if len(tribe_cat) > 0:
tribe_cat.write(
os.path.join(filename, 'tribe_cat.{0}'.format(
CAT_EXT_MAP[catalog_format])), format=catalog_format)
for template in self.templates:
template.st.write(filename + '/' + template.name + '.ms',
format='MSEED')
if compress:
with tarfile.open(filename + '.tgz', "w:gz") as tar:
tar.add(filename, arcname=os.path.basename(filename))
shutil.rmtree(filename)
return self | python | def write(self, filename, compress=True, catalog_format="QUAKEML"):
"""
Write the tribe to a file using tar archive formatting.
:type filename: str
:param filename:
Filename to write to, if it exists it will be appended to.
:type compress: bool
:param compress:
Whether to compress the tar archive or not, if False then will
just be files in a folder.
:type catalog_format: str
:param catalog_format:
What format to write the detection-catalog with. Only Nordic,
SC3ML, QUAKEML are supported. Note that not all information is
written for all formats (QUAKEML is the most complete, but is
slow for IO).
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c', st=read())])
>>> tribe.write('test_tribe')
Tribe of 1 templates
"""
if catalog_format not in CAT_EXT_MAP.keys():
raise TypeError("{0} is not supported".format(catalog_format))
if not os.path.isdir(filename):
os.makedirs(filename)
self._par_write(filename)
tribe_cat = Catalog()
for t in self.templates:
if t.event is not None:
tribe_cat.append(t.event)
if len(tribe_cat) > 0:
tribe_cat.write(
os.path.join(filename, 'tribe_cat.{0}'.format(
CAT_EXT_MAP[catalog_format])), format=catalog_format)
for template in self.templates:
template.st.write(filename + '/' + template.name + '.ms',
format='MSEED')
if compress:
with tarfile.open(filename + '.tgz', "w:gz") as tar:
tar.add(filename, arcname=os.path.basename(filename))
shutil.rmtree(filename)
return self | [
"def",
"write",
"(",
"self",
",",
"filename",
",",
"compress",
"=",
"True",
",",
"catalog_format",
"=",
"\"QUAKEML\"",
")",
":",
"if",
"catalog_format",
"not",
"in",
"CAT_EXT_MAP",
".",
"keys",
"(",
")",
":",
"raise",
"TypeError",
"(",
"\"{0} is not supporte... | Write the tribe to a file using tar archive formatting.
:type filename: str
:param filename:
Filename to write to, if it exists it will be appended to.
:type compress: bool
:param compress:
Whether to compress the tar archive or not, if False then will
just be files in a folder.
:type catalog_format: str
:param catalog_format:
What format to write the detection-catalog with. Only Nordic,
SC3ML, QUAKEML are supported. Note that not all information is
written for all formats (QUAKEML is the most complete, but is
slow for IO).
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c', st=read())])
>>> tribe.write('test_tribe')
Tribe of 1 templates | [
"Write",
"the",
"tribe",
"to",
"a",
"file",
"using",
"tar",
"archive",
"formatting",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L2313-L2357 | train | 203,331 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Tribe._par_write | def _par_write(self, dirname):
"""
Internal write function to write a formatted parameter file.
:type dirname: str
:param dirname: Directory to write the parameter file to.
"""
filename = dirname + '/' + 'template_parameters.csv'
with open(filename, 'w') as parfile:
for template in self.templates:
for key in template.__dict__.keys():
if key not in ['st', 'event']:
parfile.write(key + ': ' +
str(template.__dict__[key]) + ', ')
parfile.write('\n')
return self | python | def _par_write(self, dirname):
"""
Internal write function to write a formatted parameter file.
:type dirname: str
:param dirname: Directory to write the parameter file to.
"""
filename = dirname + '/' + 'template_parameters.csv'
with open(filename, 'w') as parfile:
for template in self.templates:
for key in template.__dict__.keys():
if key not in ['st', 'event']:
parfile.write(key + ': ' +
str(template.__dict__[key]) + ', ')
parfile.write('\n')
return self | [
"def",
"_par_write",
"(",
"self",
",",
"dirname",
")",
":",
"filename",
"=",
"dirname",
"+",
"'/'",
"+",
"'template_parameters.csv'",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"parfile",
":",
"for",
"template",
"in",
"self",
".",
"templates",... | Internal write function to write a formatted parameter file.
:type dirname: str
:param dirname: Directory to write the parameter file to. | [
"Internal",
"write",
"function",
"to",
"write",
"a",
"formatted",
"parameter",
"file",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L2359-L2374 | train | 203,332 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Tribe.read | def read(self, filename):
"""
Read a tribe of templates from a tar formatted file.
:type filename: str
:param filename: File to read templates from.
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c', st=read())])
>>> tribe.write('test_tribe')
Tribe of 1 templates
>>> tribe_back = Tribe().read('test_tribe.tgz')
>>> tribe_back == tribe
True
"""
with tarfile.open(filename, "r:*") as arc:
temp_dir = tempfile.mkdtemp()
arc.extractall(path=temp_dir, members=_safemembers(arc))
tribe_dir = glob.glob(temp_dir + os.sep + '*')[0]
self._read_from_folder(dirname=tribe_dir)
shutil.rmtree(temp_dir)
return self | python | def read(self, filename):
"""
Read a tribe of templates from a tar formatted file.
:type filename: str
:param filename: File to read templates from.
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c', st=read())])
>>> tribe.write('test_tribe')
Tribe of 1 templates
>>> tribe_back = Tribe().read('test_tribe.tgz')
>>> tribe_back == tribe
True
"""
with tarfile.open(filename, "r:*") as arc:
temp_dir = tempfile.mkdtemp()
arc.extractall(path=temp_dir, members=_safemembers(arc))
tribe_dir = glob.glob(temp_dir + os.sep + '*')[0]
self._read_from_folder(dirname=tribe_dir)
shutil.rmtree(temp_dir)
return self | [
"def",
"read",
"(",
"self",
",",
"filename",
")",
":",
"with",
"tarfile",
".",
"open",
"(",
"filename",
",",
"\"r:*\"",
")",
"as",
"arc",
":",
"temp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"arc",
".",
"extractall",
"(",
"path",
"=",
"temp_di... | Read a tribe of templates from a tar formatted file.
:type filename: str
:param filename: File to read templates from.
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c', st=read())])
>>> tribe.write('test_tribe')
Tribe of 1 templates
>>> tribe_back = Tribe().read('test_tribe.tgz')
>>> tribe_back == tribe
True | [
"Read",
"a",
"tribe",
"of",
"templates",
"from",
"a",
"tar",
"formatted",
"file",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L2376-L2398 | train | 203,333 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Tribe._read_from_folder | def _read_from_folder(self, dirname):
"""
Internal folder reader.
:type dirname: str
:param dirname: Folder to read from.
"""
templates = _par_read(dirname=dirname, compressed=False)
t_files = glob.glob(dirname + os.sep + '*.ms')
tribe_cat_file = glob.glob(os.path.join(dirname, "tribe_cat.*"))
if len(tribe_cat_file) != 0:
tribe_cat = read_events(tribe_cat_file[0])
else:
tribe_cat = Catalog()
previous_template_names = [t.name for t in self.templates]
for template in templates:
if template.name in previous_template_names:
# Don't read in for templates that we already have.
continue
for event in tribe_cat:
for comment in event.comments:
if comment.text == 'eqcorrscan_template_' + template.name:
template.event = event
t_file = [t for t in t_files
if t.split(os.sep)[-1] == template.name + '.ms']
if len(t_file) == 0:
print('No waveform for template: ' + template.name)
templates.remove(template)
continue
elif len(t_file) > 1:
print('Multiple waveforms found, using: ' + t_file[0])
template.st = read(t_file[0])
self.templates.extend(templates)
return | python | def _read_from_folder(self, dirname):
"""
Internal folder reader.
:type dirname: str
:param dirname: Folder to read from.
"""
templates = _par_read(dirname=dirname, compressed=False)
t_files = glob.glob(dirname + os.sep + '*.ms')
tribe_cat_file = glob.glob(os.path.join(dirname, "tribe_cat.*"))
if len(tribe_cat_file) != 0:
tribe_cat = read_events(tribe_cat_file[0])
else:
tribe_cat = Catalog()
previous_template_names = [t.name for t in self.templates]
for template in templates:
if template.name in previous_template_names:
# Don't read in for templates that we already have.
continue
for event in tribe_cat:
for comment in event.comments:
if comment.text == 'eqcorrscan_template_' + template.name:
template.event = event
t_file = [t for t in t_files
if t.split(os.sep)[-1] == template.name + '.ms']
if len(t_file) == 0:
print('No waveform for template: ' + template.name)
templates.remove(template)
continue
elif len(t_file) > 1:
print('Multiple waveforms found, using: ' + t_file[0])
template.st = read(t_file[0])
self.templates.extend(templates)
return | [
"def",
"_read_from_folder",
"(",
"self",
",",
"dirname",
")",
":",
"templates",
"=",
"_par_read",
"(",
"dirname",
"=",
"dirname",
",",
"compressed",
"=",
"False",
")",
"t_files",
"=",
"glob",
".",
"glob",
"(",
"dirname",
"+",
"os",
".",
"sep",
"+",
"'*... | Internal folder reader.
:type dirname: str
:param dirname: Folder to read from. | [
"Internal",
"folder",
"reader",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L2400-L2433 | train | 203,334 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Tribe.cluster | def cluster(self, method, **kwargs):
"""
Cluster the tribe.
Cluster templates within a tribe: returns multiple tribes each of
which could be stacked.
:type method: str
:param method:
Method of stacking, see :mod:`eqcorrscan.utils.clustering`
:return: List of tribes.
.. rubric:: Example
"""
from eqcorrscan.utils import clustering
tribes = []
func = getattr(clustering, method)
if method in ['space_cluster', 'space_time_cluster']:
cat = Catalog([t.event for t in self.templates])
groups = func(cat, **kwargs)
for group in groups:
new_tribe = Tribe()
for event in group:
new_tribe.templates.extend([t for t in self.templates
if t.event == event])
tribes.append(new_tribe)
return tribes | python | def cluster(self, method, **kwargs):
"""
Cluster the tribe.
Cluster templates within a tribe: returns multiple tribes each of
which could be stacked.
:type method: str
:param method:
Method of stacking, see :mod:`eqcorrscan.utils.clustering`
:return: List of tribes.
.. rubric:: Example
"""
from eqcorrscan.utils import clustering
tribes = []
func = getattr(clustering, method)
if method in ['space_cluster', 'space_time_cluster']:
cat = Catalog([t.event for t in self.templates])
groups = func(cat, **kwargs)
for group in groups:
new_tribe = Tribe()
for event in group:
new_tribe.templates.extend([t for t in self.templates
if t.event == event])
tribes.append(new_tribe)
return tribes | [
"def",
"cluster",
"(",
"self",
",",
"method",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"eqcorrscan",
".",
"utils",
"import",
"clustering",
"tribes",
"=",
"[",
"]",
"func",
"=",
"getattr",
"(",
"clustering",
",",
"method",
")",
"if",
"method",
"in",
... | Cluster the tribe.
Cluster templates within a tribe: returns multiple tribes each of
which could be stacked.
:type method: str
:param method:
Method of stacking, see :mod:`eqcorrscan.utils.clustering`
:return: List of tribes.
.. rubric:: Example | [
"Cluster",
"the",
"tribe",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L2435-L2464 | train | 203,335 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Tribe.construct | def construct(self, method, lowcut, highcut, samp_rate, filt_order,
prepick, save_progress=False, **kwargs):
"""
Generate a Tribe of Templates.
See :mod:`eqcorrscan.core.template_gen` for available methods.
:param method: Method of Tribe generation.
:param kwargs: Arguments for the given method.
:type lowcut: float
:param lowcut:
Low cut (Hz), if set to None will not apply a lowcut
:type highcut: float
:param highcut:
High cut (Hz), if set to None will not apply a highcut.
:type samp_rate: float
:param samp_rate:
New sampling rate in Hz.
:type filt_order: int
:param filt_order:
Filter level (number of corners).
:type prepick: float
:param prepick: Pre-pick time in seconds
:type save_progress: bool
:param save_progress:
Whether to save the resulting party at every data step or not.
Useful for long-running processes.
.. Note::
Methods: `from_contbase`, `from_sfile` and `from_sac` are not
supported by Tribe.construct and must use Template.construct.
.. Note::
The Method `multi_template_gen` is not supported because the
processing parameters for the stream are not known. Use
`from_meta_file` instead.
.. Note:: Templates will be named according to their start-time.
"""
templates, catalog, process_lengths = template_gen.template_gen(
method=method, lowcut=lowcut, highcut=highcut,
filt_order=filt_order, samp_rate=samp_rate, prepick=prepick,
return_event=True, save_progress=save_progress, **kwargs)
for template, event, process_len in zip(templates, catalog,
process_lengths):
t = Template()
for tr in template:
if not np.any(tr.data.astype(np.float16)):
warnings.warn('Data are zero in float16, missing data,'
' will not use: %s' % tr.id)
template.remove(tr)
if len(template) == 0:
print('Empty Template')
continue
t.st = template
t.name = template.sort(['starttime'])[0]. \
stats.starttime.strftime('%Y_%m_%dt%H_%M_%S')
t.lowcut = lowcut
t.highcut = highcut
t.filt_order = filt_order
t.samp_rate = samp_rate
t.process_length = process_len
t.prepick = prepick
event.comments.append(Comment(
text="eqcorrscan_template_" + t.name,
creation_info=CreationInfo(agency='eqcorrscan',
author=getpass.getuser())))
t.event = event
self.templates.append(t)
return self | python | def construct(self, method, lowcut, highcut, samp_rate, filt_order,
prepick, save_progress=False, **kwargs):
"""
Generate a Tribe of Templates.
See :mod:`eqcorrscan.core.template_gen` for available methods.
:param method: Method of Tribe generation.
:param kwargs: Arguments for the given method.
:type lowcut: float
:param lowcut:
Low cut (Hz), if set to None will not apply a lowcut
:type highcut: float
:param highcut:
High cut (Hz), if set to None will not apply a highcut.
:type samp_rate: float
:param samp_rate:
New sampling rate in Hz.
:type filt_order: int
:param filt_order:
Filter level (number of corners).
:type prepick: float
:param prepick: Pre-pick time in seconds
:type save_progress: bool
:param save_progress:
Whether to save the resulting party at every data step or not.
Useful for long-running processes.
.. Note::
Methods: `from_contbase`, `from_sfile` and `from_sac` are not
supported by Tribe.construct and must use Template.construct.
.. Note::
The Method `multi_template_gen` is not supported because the
processing parameters for the stream are not known. Use
`from_meta_file` instead.
.. Note:: Templates will be named according to their start-time.
"""
templates, catalog, process_lengths = template_gen.template_gen(
method=method, lowcut=lowcut, highcut=highcut,
filt_order=filt_order, samp_rate=samp_rate, prepick=prepick,
return_event=True, save_progress=save_progress, **kwargs)
for template, event, process_len in zip(templates, catalog,
process_lengths):
t = Template()
for tr in template:
if not np.any(tr.data.astype(np.float16)):
warnings.warn('Data are zero in float16, missing data,'
' will not use: %s' % tr.id)
template.remove(tr)
if len(template) == 0:
print('Empty Template')
continue
t.st = template
t.name = template.sort(['starttime'])[0]. \
stats.starttime.strftime('%Y_%m_%dt%H_%M_%S')
t.lowcut = lowcut
t.highcut = highcut
t.filt_order = filt_order
t.samp_rate = samp_rate
t.process_length = process_len
t.prepick = prepick
event.comments.append(Comment(
text="eqcorrscan_template_" + t.name,
creation_info=CreationInfo(agency='eqcorrscan',
author=getpass.getuser())))
t.event = event
self.templates.append(t)
return self | [
"def",
"construct",
"(",
"self",
",",
"method",
",",
"lowcut",
",",
"highcut",
",",
"samp_rate",
",",
"filt_order",
",",
"prepick",
",",
"save_progress",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"templates",
",",
"catalog",
",",
"process_lengths",
... | Generate a Tribe of Templates.
See :mod:`eqcorrscan.core.template_gen` for available methods.
:param method: Method of Tribe generation.
:param kwargs: Arguments for the given method.
:type lowcut: float
:param lowcut:
Low cut (Hz), if set to None will not apply a lowcut
:type highcut: float
:param highcut:
High cut (Hz), if set to None will not apply a highcut.
:type samp_rate: float
:param samp_rate:
New sampling rate in Hz.
:type filt_order: int
:param filt_order:
Filter level (number of corners).
:type prepick: float
:param prepick: Pre-pick time in seconds
:type save_progress: bool
:param save_progress:
Whether to save the resulting party at every data step or not.
Useful for long-running processes.
.. Note::
Methods: `from_contbase`, `from_sfile` and `from_sac` are not
supported by Tribe.construct and must use Template.construct.
.. Note::
The Method `multi_template_gen` is not supported because the
processing parameters for the stream are not known. Use
`from_meta_file` instead.
.. Note:: Templates will be named according to their start-time. | [
"Generate",
"a",
"Tribe",
"of",
"Templates",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L2926-L2995 | train | 203,336 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Detection.write | def write(self, fname, append=True):
"""
Write detection to csv formatted file.
Will append if append==True and file exists
:type fname: str
:param fname: Full path to file to open and write to.
:type append: bool
:param append: Set to true to append to an existing file, if True \
and file doesn't exist, will create new file and warn. If False
will overwrite old files.
"""
mode = 'w'
if append and os.path.isfile(fname):
mode = 'a'
header = '; '.join(['Template name', 'Detection time (UTC)',
'Number of channels', 'Channel list',
'Detection value', 'Threshold',
'Threshold type', 'Input threshold',
'Detection type'])
print_str = "{0}; {1}; {2}; {3}; {4}; {5}; {6}; {7}; {8}\n".format(
self.template_name, self.detect_time, self.no_chans,
self.chans, self.detect_val, self.threshold,
self.threshold_type, self.threshold_input, self.typeofdet)
with open(fname, mode) as _f:
_f.write(header + '\n') # Write a header for the file
_f.write(print_str) | python | def write(self, fname, append=True):
"""
Write detection to csv formatted file.
Will append if append==True and file exists
:type fname: str
:param fname: Full path to file to open and write to.
:type append: bool
:param append: Set to true to append to an existing file, if True \
and file doesn't exist, will create new file and warn. If False
will overwrite old files.
"""
mode = 'w'
if append and os.path.isfile(fname):
mode = 'a'
header = '; '.join(['Template name', 'Detection time (UTC)',
'Number of channels', 'Channel list',
'Detection value', 'Threshold',
'Threshold type', 'Input threshold',
'Detection type'])
print_str = "{0}; {1}; {2}; {3}; {4}; {5}; {6}; {7}; {8}\n".format(
self.template_name, self.detect_time, self.no_chans,
self.chans, self.detect_val, self.threshold,
self.threshold_type, self.threshold_input, self.typeofdet)
with open(fname, mode) as _f:
_f.write(header + '\n') # Write a header for the file
_f.write(print_str) | [
"def",
"write",
"(",
"self",
",",
"fname",
",",
"append",
"=",
"True",
")",
":",
"mode",
"=",
"'w'",
"if",
"append",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"fname",
")",
":",
"mode",
"=",
"'a'",
"header",
"=",
"'; '",
".",
"join",
"(",
"... | Write detection to csv formatted file.
Will append if append==True and file exists
:type fname: str
:param fname: Full path to file to open and write to.
:type append: bool
:param append: Set to true to append to an existing file, if True \
and file doesn't exist, will create new file and warn. If False
will overwrite old files. | [
"Write",
"detection",
"to",
"csv",
"formatted",
"file",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3139-L3166 | train | 203,337 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | Detection._calculate_event | def _calculate_event(self, template=None, template_st=None):
"""
Calculate an event for this detection using a given template.
:type template: Template
:param template: The template that made this detection
:type template_st: `obspy.core.stream.Stream`
:param template_st:
Template stream, used to calculate pick times, not needed if
template is given.
.. rubric:: Note
Works in place on Detection - over-writes previous events.
Does not correct for pre-pick.
"""
if template is not None and template.name != self.template_name:
print("Template names do not match: {0}: {1}".format(
template.name, self.template_name))
return
# Detect time must be valid QuakeML uri within resource_id.
# This will write a formatted string which is still
# readable by UTCDateTime
det_time = str(self.detect_time.strftime('%Y%m%dT%H%M%S.%f'))
ev = Event(resource_id=ResourceIdentifier(
id=self.template_name + '_' + det_time,
prefix='smi:local'))
ev.creation_info = CreationInfo(
author='EQcorrscan', creation_time=UTCDateTime())
ev.comments.append(
Comment(text='threshold={0}'.format(self.threshold)))
ev.comments.append(
Comment(text='detect_val={0}'.format(self.detect_val)))
if self.chans is not None:
ev.comments.append(
Comment(text='channels used: {0}'.format(
' '.join([str(pair) for pair in self.chans]))))
if template is not None:
template_st = template.st
min_template_tm = min(
[tr.stats.starttime for tr in template_st])
for tr in template_st:
if (tr.stats.station, tr.stats.channel) \
not in self.chans:
continue
elif tr.stats.__contains__("not_in_original"):
continue
else:
pick_time = self.detect_time + (
tr.stats.starttime - min_template_tm)
ev.picks.append(Pick(
time=pick_time, waveform_id=WaveformStreamID(
network_code=tr.stats.network,
station_code=tr.stats.station,
channel_code=tr.stats.channel,
location_code=tr.stats.location)))
self.event = ev
return | python | def _calculate_event(self, template=None, template_st=None):
"""
Calculate an event for this detection using a given template.
:type template: Template
:param template: The template that made this detection
:type template_st: `obspy.core.stream.Stream`
:param template_st:
Template stream, used to calculate pick times, not needed if
template is given.
.. rubric:: Note
Works in place on Detection - over-writes previous events.
Does not correct for pre-pick.
"""
if template is not None and template.name != self.template_name:
print("Template names do not match: {0}: {1}".format(
template.name, self.template_name))
return
# Detect time must be valid QuakeML uri within resource_id.
# This will write a formatted string which is still
# readable by UTCDateTime
det_time = str(self.detect_time.strftime('%Y%m%dT%H%M%S.%f'))
ev = Event(resource_id=ResourceIdentifier(
id=self.template_name + '_' + det_time,
prefix='smi:local'))
ev.creation_info = CreationInfo(
author='EQcorrscan', creation_time=UTCDateTime())
ev.comments.append(
Comment(text='threshold={0}'.format(self.threshold)))
ev.comments.append(
Comment(text='detect_val={0}'.format(self.detect_val)))
if self.chans is not None:
ev.comments.append(
Comment(text='channels used: {0}'.format(
' '.join([str(pair) for pair in self.chans]))))
if template is not None:
template_st = template.st
min_template_tm = min(
[tr.stats.starttime for tr in template_st])
for tr in template_st:
if (tr.stats.station, tr.stats.channel) \
not in self.chans:
continue
elif tr.stats.__contains__("not_in_original"):
continue
else:
pick_time = self.detect_time + (
tr.stats.starttime - min_template_tm)
ev.picks.append(Pick(
time=pick_time, waveform_id=WaveformStreamID(
network_code=tr.stats.network,
station_code=tr.stats.station,
channel_code=tr.stats.channel,
location_code=tr.stats.location)))
self.event = ev
return | [
"def",
"_calculate_event",
"(",
"self",
",",
"template",
"=",
"None",
",",
"template_st",
"=",
"None",
")",
":",
"if",
"template",
"is",
"not",
"None",
"and",
"template",
".",
"name",
"!=",
"self",
".",
"template_name",
":",
"print",
"(",
"\"Template names... | Calculate an event for this detection using a given template.
:type template: Template
:param template: The template that made this detection
:type template_st: `obspy.core.stream.Stream`
:param template_st:
Template stream, used to calculate pick times, not needed if
template is given.
.. rubric:: Note
Works in place on Detection - over-writes previous events.
Does not correct for pre-pick. | [
"Calculate",
"an",
"event",
"for",
"this",
"detection",
"using",
"a",
"given",
"template",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3168-L3224 | train | 203,338 |
eqcorrscan/EQcorrscan | eqcorrscan/tutorials/template_creation.py | mktemplates | def mktemplates(network_code='GEONET',
publicIDs=['2016p008122', '2016p008353', '2016p008155',
'2016p008194'], plot=True):
"""Functional wrapper to make templates"""
# We want to download some QuakeML files from the New Zealand GeoNet
# network, GeoNet currently doesn't support FDSN event queries, so we
# have to work around to download quakeml from their quakeml.geonet site.
client = Client(network_code)
# We want to download a few events from an earthquake sequence, these are
# identified by publiID numbers, given as arguments
catalog = Catalog()
for publicID in publicIDs:
if network_code == 'GEONET':
data_stream = client._download(
'http://quakeml.geonet.org.nz/quakeml/1.2/' + publicID)
data_stream.seek(0, 0)
catalog += read_events(data_stream, format="quakeml")
data_stream.close()
else:
catalog += client.get_events(
eventid=publicID, includearrivals=True)
# Lets plot the catalog to see what we have
if plot:
catalog.plot(projection='local', resolution='h')
# We don't need all the picks, lets take the information from the
# five most used stations - note that this is done to reduce computational
# costs.
catalog = filter_picks(catalog, top_n_picks=5)
# We only want the P picks in this example, but you can use others or all
# picks if you want.
for event in catalog:
for pick in event.picks:
if pick.phase_hint == 'S':
event.picks.remove(pick)
# Now we can generate the templates
templates = template_gen.template_gen(
method='from_client', catalog=catalog, client_id=network_code,
lowcut=2.0, highcut=9.0, samp_rate=20.0, filt_order=4, length=3.0,
prepick=0.15, swin='all', process_len=3600, debug=0, plot=plot)
# We now have a series of templates! Using Obspy's Stream.write() method we
# can save these to disk for later use. We will do that now for use in the
# following tutorials.
for i, template in enumerate(templates):
template.write('tutorial_template_' + str(i) + '.ms', format='MSEED')
# Note that this will warn you about data types. As we don't care
# at the moment, whatever obspy chooses is fine.
return | python | def mktemplates(network_code='GEONET',
publicIDs=['2016p008122', '2016p008353', '2016p008155',
'2016p008194'], plot=True):
"""Functional wrapper to make templates"""
# We want to download some QuakeML files from the New Zealand GeoNet
# network, GeoNet currently doesn't support FDSN event queries, so we
# have to work around to download quakeml from their quakeml.geonet site.
client = Client(network_code)
# We want to download a few events from an earthquake sequence, these are
# identified by publiID numbers, given as arguments
catalog = Catalog()
for publicID in publicIDs:
if network_code == 'GEONET':
data_stream = client._download(
'http://quakeml.geonet.org.nz/quakeml/1.2/' + publicID)
data_stream.seek(0, 0)
catalog += read_events(data_stream, format="quakeml")
data_stream.close()
else:
catalog += client.get_events(
eventid=publicID, includearrivals=True)
# Lets plot the catalog to see what we have
if plot:
catalog.plot(projection='local', resolution='h')
# We don't need all the picks, lets take the information from the
# five most used stations - note that this is done to reduce computational
# costs.
catalog = filter_picks(catalog, top_n_picks=5)
# We only want the P picks in this example, but you can use others or all
# picks if you want.
for event in catalog:
for pick in event.picks:
if pick.phase_hint == 'S':
event.picks.remove(pick)
# Now we can generate the templates
templates = template_gen.template_gen(
method='from_client', catalog=catalog, client_id=network_code,
lowcut=2.0, highcut=9.0, samp_rate=20.0, filt_order=4, length=3.0,
prepick=0.15, swin='all', process_len=3600, debug=0, plot=plot)
# We now have a series of templates! Using Obspy's Stream.write() method we
# can save these to disk for later use. We will do that now for use in the
# following tutorials.
for i, template in enumerate(templates):
template.write('tutorial_template_' + str(i) + '.ms', format='MSEED')
# Note that this will warn you about data types. As we don't care
# at the moment, whatever obspy chooses is fine.
return | [
"def",
"mktemplates",
"(",
"network_code",
"=",
"'GEONET'",
",",
"publicIDs",
"=",
"[",
"'2016p008122'",
",",
"'2016p008353'",
",",
"'2016p008155'",
",",
"'2016p008194'",
"]",
",",
"plot",
"=",
"True",
")",
":",
"# We want to download some QuakeML files from the New Z... | Functional wrapper to make templates | [
"Functional",
"wrapper",
"to",
"make",
"templates"
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/tutorials/template_creation.py#L14-L66 | train | 203,339 |
eqcorrscan/EQcorrscan | eqcorrscan/core/bright_lights.py | _read_tt | def _read_tt(path, stations, phase, phaseout='S', ps_ratio=1.68,
lags_switch=True):
"""
Read in .csv files of slowness generated from Grid2Time.
Converts these data to a useful format here.
It should be noted that this can read either P or S travel-time grids, not
both at the moment.
:type path: str
:param path: The path to the .csv Grid2Time outputs
:type stations: list
:param stations: List of station names to read slowness files for.
:type phase: str
:param phase: Input phase type.
:type phaseout: str
:param phaseout: What phase to return the lagtimes in.
:type ps_ratio: float
:param ps_ratio: p to s ratio for conversion
:type lags_switch: bool
:param lags_switch:
Return lags or raw travel-times, if set to true will return lags.
:returns: Stations
:rtype: list
:returns: List of lists of tuples of node locations
:rtype: list
:returns: Array of lags.
:rtype: :class:`numpy.ndarray`
.. note::
**Output:**
station[1] refers to nodes[1] and lags[1] nodes[1][1] refers
to station[1] and lags[1][1] nodes[n][n] is a tuple of latitude,
longitude and depth.
.. note::
This function currently needs comma separated grid files in
NonLinLoc format. Only certain versions of NonLinLoc write these csv
files, however it should be possible to read the binary files directly.
If you find you need this capability let us know and we can try and
implement it.
"""
# Locate the slowness file information
gridfiles = []
stations_out = []
for station in stations:
gridfiles += (glob.glob(path + '*.' + phase + '.' + station +
'.time.csv'))
if glob.glob(path + '*.' + phase + '.' + station + '*.csv'):
stations_out += [station]
# Read the files
allnodes = []
for gridfile in gridfiles:
print(' Reading slowness from: ' + gridfile)
f = open(gridfile, 'r')
grid = csv.reader(f, delimiter=str(' '))
traveltime = []
nodes = []
for row in grid:
nodes.append((float(row[0]), float(row[1]), float(row[2])))
traveltime.append(float(row[3]))
traveltime = np.array(traveltime)
if not phase == phaseout:
if phase == 'S':
traveltime = traveltime / ps_ratio
else:
traveltime = traveltime * ps_ratio
if lags_switch:
lags = traveltime - min(traveltime)
else:
lags = traveltime
if 'alllags' not in locals():
alllags = [lags]
else:
alllags = np.concatenate((alllags, [lags]), axis=0)
allnodes = nodes
# each element of allnodes should be the same as the
# other one, e.g. for each station the grid must be the
# same, hence allnodes=nodes
f.close()
alllags = np.array(alllags)
return stations_out, allnodes, alllags | python | def _read_tt(path, stations, phase, phaseout='S', ps_ratio=1.68,
lags_switch=True):
"""
Read in .csv files of slowness generated from Grid2Time.
Converts these data to a useful format here.
It should be noted that this can read either P or S travel-time grids, not
both at the moment.
:type path: str
:param path: The path to the .csv Grid2Time outputs
:type stations: list
:param stations: List of station names to read slowness files for.
:type phase: str
:param phase: Input phase type.
:type phaseout: str
:param phaseout: What phase to return the lagtimes in.
:type ps_ratio: float
:param ps_ratio: p to s ratio for conversion
:type lags_switch: bool
:param lags_switch:
Return lags or raw travel-times, if set to true will return lags.
:returns: Stations
:rtype: list
:returns: List of lists of tuples of node locations
:rtype: list
:returns: Array of lags.
:rtype: :class:`numpy.ndarray`
.. note::
**Output:**
station[1] refers to nodes[1] and lags[1] nodes[1][1] refers
to station[1] and lags[1][1] nodes[n][n] is a tuple of latitude,
longitude and depth.
.. note::
This function currently needs comma separated grid files in
NonLinLoc format. Only certain versions of NonLinLoc write these csv
files, however it should be possible to read the binary files directly.
If you find you need this capability let us know and we can try and
implement it.
"""
# Locate the slowness file information
gridfiles = []
stations_out = []
for station in stations:
gridfiles += (glob.glob(path + '*.' + phase + '.' + station +
'.time.csv'))
if glob.glob(path + '*.' + phase + '.' + station + '*.csv'):
stations_out += [station]
# Read the files
allnodes = []
for gridfile in gridfiles:
print(' Reading slowness from: ' + gridfile)
f = open(gridfile, 'r')
grid = csv.reader(f, delimiter=str(' '))
traveltime = []
nodes = []
for row in grid:
nodes.append((float(row[0]), float(row[1]), float(row[2])))
traveltime.append(float(row[3]))
traveltime = np.array(traveltime)
if not phase == phaseout:
if phase == 'S':
traveltime = traveltime / ps_ratio
else:
traveltime = traveltime * ps_ratio
if lags_switch:
lags = traveltime - min(traveltime)
else:
lags = traveltime
if 'alllags' not in locals():
alllags = [lags]
else:
alllags = np.concatenate((alllags, [lags]), axis=0)
allnodes = nodes
# each element of allnodes should be the same as the
# other one, e.g. for each station the grid must be the
# same, hence allnodes=nodes
f.close()
alllags = np.array(alllags)
return stations_out, allnodes, alllags | [
"def",
"_read_tt",
"(",
"path",
",",
"stations",
",",
"phase",
",",
"phaseout",
"=",
"'S'",
",",
"ps_ratio",
"=",
"1.68",
",",
"lags_switch",
"=",
"True",
")",
":",
"# Locate the slowness file information",
"gridfiles",
"=",
"[",
"]",
"stations_out",
"=",
"[... | Read in .csv files of slowness generated from Grid2Time.
Converts these data to a useful format here.
It should be noted that this can read either P or S travel-time grids, not
both at the moment.
:type path: str
:param path: The path to the .csv Grid2Time outputs
:type stations: list
:param stations: List of station names to read slowness files for.
:type phase: str
:param phase: Input phase type.
:type phaseout: str
:param phaseout: What phase to return the lagtimes in.
:type ps_ratio: float
:param ps_ratio: p to s ratio for conversion
:type lags_switch: bool
:param lags_switch:
Return lags or raw travel-times, if set to true will return lags.
:returns: Stations
:rtype: list
:returns: List of lists of tuples of node locations
:rtype: list
:returns: Array of lags.
:rtype: :class:`numpy.ndarray`
.. note::
**Output:**
station[1] refers to nodes[1] and lags[1] nodes[1][1] refers
to station[1] and lags[1][1] nodes[n][n] is a tuple of latitude,
longitude and depth.
.. note::
This function currently needs comma separated grid files in
NonLinLoc format. Only certain versions of NonLinLoc write these csv
files, however it should be possible to read the binary files directly.
If you find you need this capability let us know and we can try and
implement it. | [
"Read",
"in",
".",
"csv",
"files",
"of",
"slowness",
"generated",
"from",
"Grid2Time",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/bright_lights.py#L60-L144 | train | 203,340 |
eqcorrscan/EQcorrscan | eqcorrscan/core/bright_lights.py | _resample_grid | def _resample_grid(stations, nodes, lags, mindepth, maxdepth, corners):
"""
Resample the lagtime grid to a given volume.
For use if the grid from Grid2Time is too large or you want to run a
faster, downsampled scan.
:type stations: list
:param stations:
List of station names from in the form where stations[i] refers to
nodes[i][:] and lags[i][:]
:type nodes: list
:param nodes:
List of node points where nodes[i] referes to stations[i] and
nodes[:][:][0] is latitude in degrees, nodes[:][:][1] is longitude in
degrees, nodes[:][:][2] is depth in km.
:type lags: numpy.ndarray
:param lags:
Array of arrays where lags[i][:] refers to stations[i]. lags[i][j]
should be the delay to the nodes[i][j] for stations[i] in seconds.
:type mindepth: float
:param mindepth: Upper limit of volume
:type maxdepth: float
:param maxdepth: Lower limit of volume
:type corners: matplotlib.path.Path
:param corners:
matplotlib Path of the corners for the 2D polygon to cut to in lat and
lon.
:returns: Stations
:rtype: list
:returns: List of lists of tuples of node locations
:rtype: list
:returns: Array of lags.
:rtype: :class:`numpy.ndarray`
.. note::
**Output:**
station[1] refers to nodes[1] and lags[1] nodes[1][1] refers
to station[1] and lags[1][1] nodes[n][n] is a tuple of latitude,
longitude and depth.
"""
resamp_nodes = []
resamp_lags = []
# Cut the volume
for i, node in enumerate(nodes):
# If the node is within the volume range, keep it
if mindepth < float(node[2]) < maxdepth and\
corners.contains_point(node[0:2]):
resamp_nodes.append(node)
resamp_lags.append([lags[:, i]])
# Reshape the lags
print(np.shape(resamp_lags))
resamp_lags = np.reshape(resamp_lags, (len(resamp_lags), len(stations))).T
# Resample the nodes - they are sorted in order of size with largest long
# then largest lat, then depth.
print(' '.join(['Grid now has ', str(len(resamp_nodes)), 'nodes']))
return stations, resamp_nodes, resamp_lags | python | def _resample_grid(stations, nodes, lags, mindepth, maxdepth, corners):
"""
Resample the lagtime grid to a given volume.
For use if the grid from Grid2Time is too large or you want to run a
faster, downsampled scan.
:type stations: list
:param stations:
List of station names from in the form where stations[i] refers to
nodes[i][:] and lags[i][:]
:type nodes: list
:param nodes:
List of node points where nodes[i] referes to stations[i] and
nodes[:][:][0] is latitude in degrees, nodes[:][:][1] is longitude in
degrees, nodes[:][:][2] is depth in km.
:type lags: numpy.ndarray
:param lags:
Array of arrays where lags[i][:] refers to stations[i]. lags[i][j]
should be the delay to the nodes[i][j] for stations[i] in seconds.
:type mindepth: float
:param mindepth: Upper limit of volume
:type maxdepth: float
:param maxdepth: Lower limit of volume
:type corners: matplotlib.path.Path
:param corners:
matplotlib Path of the corners for the 2D polygon to cut to in lat and
lon.
:returns: Stations
:rtype: list
:returns: List of lists of tuples of node locations
:rtype: list
:returns: Array of lags.
:rtype: :class:`numpy.ndarray`
.. note::
**Output:**
station[1] refers to nodes[1] and lags[1] nodes[1][1] refers
to station[1] and lags[1][1] nodes[n][n] is a tuple of latitude,
longitude and depth.
"""
resamp_nodes = []
resamp_lags = []
# Cut the volume
for i, node in enumerate(nodes):
# If the node is within the volume range, keep it
if mindepth < float(node[2]) < maxdepth and\
corners.contains_point(node[0:2]):
resamp_nodes.append(node)
resamp_lags.append([lags[:, i]])
# Reshape the lags
print(np.shape(resamp_lags))
resamp_lags = np.reshape(resamp_lags, (len(resamp_lags), len(stations))).T
# Resample the nodes - they are sorted in order of size with largest long
# then largest lat, then depth.
print(' '.join(['Grid now has ', str(len(resamp_nodes)), 'nodes']))
return stations, resamp_nodes, resamp_lags | [
"def",
"_resample_grid",
"(",
"stations",
",",
"nodes",
",",
"lags",
",",
"mindepth",
",",
"maxdepth",
",",
"corners",
")",
":",
"resamp_nodes",
"=",
"[",
"]",
"resamp_lags",
"=",
"[",
"]",
"# Cut the volume",
"for",
"i",
",",
"node",
"in",
"enumerate",
... | Resample the lagtime grid to a given volume.
For use if the grid from Grid2Time is too large or you want to run a
faster, downsampled scan.
:type stations: list
:param stations:
List of station names from in the form where stations[i] refers to
nodes[i][:] and lags[i][:]
:type nodes: list
:param nodes:
List of node points where nodes[i] referes to stations[i] and
nodes[:][:][0] is latitude in degrees, nodes[:][:][1] is longitude in
degrees, nodes[:][:][2] is depth in km.
:type lags: numpy.ndarray
:param lags:
Array of arrays where lags[i][:] refers to stations[i]. lags[i][j]
should be the delay to the nodes[i][j] for stations[i] in seconds.
:type mindepth: float
:param mindepth: Upper limit of volume
:type maxdepth: float
:param maxdepth: Lower limit of volume
:type corners: matplotlib.path.Path
:param corners:
matplotlib Path of the corners for the 2D polygon to cut to in lat and
lon.
:returns: Stations
:rtype: list
:returns: List of lists of tuples of node locations
:rtype: list
:returns: Array of lags.
:rtype: :class:`numpy.ndarray`
.. note::
**Output:**
station[1] refers to nodes[1] and lags[1] nodes[1][1] refers
to station[1] and lags[1][1] nodes[n][n] is a tuple of latitude,
longitude and depth. | [
"Resample",
"the",
"lagtime",
"grid",
"to",
"a",
"given",
"volume",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/bright_lights.py#L147-L205 | train | 203,341 |
eqcorrscan/EQcorrscan | eqcorrscan/core/bright_lights.py | _rm_similarlags | def _rm_similarlags(stations, nodes, lags, threshold):
"""
Remove nodes that have a very similar network moveout to another node.
This function will, for each node, calculate the difference in lagtime
at each station at every node, then sum these for each node to get a
cumulative difference in network moveout. This will result in an
array of arrays with zeros on the diagonal.
:type stations: list
:param stations:
List of station names from in the form where stations[i] refers to
nodes[i][:] and lags[i][:]
:type nodes: list
:param nodes:
List of node points where nodes[i] referes to stations[i] and
nodes[:][:][0] is latitude in degrees, nodes[:][:][1] is longitude in
degrees, nodes[:][:][2] is depth in km.
:type lags: numpy.ndarray
:param lags:
Array of arrays where lags[i][:] refers to stations[i]. lags[i][j]
should be the delay to the nodes[i][j] for stations[i] in seconds.
:type threshold: float
:param threshold: Threshold for removal in seconds
:returns: Stations
:rtype: list
:returns: List of lists of tuples of node locations
:rtype: list
:returns: Array of lags.
:rtype: :class:`numpy.ndarray`
.. note::
**Output:**
station[1] refers to nodes[1] and lags[1] nodes[1][1] refers
to station[1] and lags[1][1] nodes[n][n] is a tuple of latitude,
longitude and depth.
"""
netdif = abs((lags.T - lags.T[0]).sum(axis=1).reshape(1, len(nodes))) \
> threshold
for i in range(len(nodes)):
_netdif = abs((lags.T -
lags.T[i]).sum(axis=1).reshape(1, len(nodes)))\
> threshold
netdif = np.concatenate((netdif, _netdif), axis=0)
sys.stdout.write("\r" + str(float(i) // len(nodes) * 100) + "% \r")
sys.stdout.flush()
nodes_out = [nodes[0]]
node_indices = [0]
print("\n")
print(len(nodes))
for i in range(1, len(nodes)):
if np.all(netdif[i][node_indices]):
node_indices.append(i)
nodes_out.append(nodes[i])
lags_out = lags.T[node_indices].T
print("Removed " + str(len(nodes) - len(nodes_out)) + " duplicate nodes")
return stations, nodes_out, lags_out | python | def _rm_similarlags(stations, nodes, lags, threshold):
"""
Remove nodes that have a very similar network moveout to another node.
This function will, for each node, calculate the difference in lagtime
at each station at every node, then sum these for each node to get a
cumulative difference in network moveout. This will result in an
array of arrays with zeros on the diagonal.
:type stations: list
:param stations:
List of station names from in the form where stations[i] refers to
nodes[i][:] and lags[i][:]
:type nodes: list
:param nodes:
List of node points where nodes[i] referes to stations[i] and
nodes[:][:][0] is latitude in degrees, nodes[:][:][1] is longitude in
degrees, nodes[:][:][2] is depth in km.
:type lags: numpy.ndarray
:param lags:
Array of arrays where lags[i][:] refers to stations[i]. lags[i][j]
should be the delay to the nodes[i][j] for stations[i] in seconds.
:type threshold: float
:param threshold: Threshold for removal in seconds
:returns: Stations
:rtype: list
:returns: List of lists of tuples of node locations
:rtype: list
:returns: Array of lags.
:rtype: :class:`numpy.ndarray`
.. note::
**Output:**
station[1] refers to nodes[1] and lags[1] nodes[1][1] refers
to station[1] and lags[1][1] nodes[n][n] is a tuple of latitude,
longitude and depth.
"""
netdif = abs((lags.T - lags.T[0]).sum(axis=1).reshape(1, len(nodes))) \
> threshold
for i in range(len(nodes)):
_netdif = abs((lags.T -
lags.T[i]).sum(axis=1).reshape(1, len(nodes)))\
> threshold
netdif = np.concatenate((netdif, _netdif), axis=0)
sys.stdout.write("\r" + str(float(i) // len(nodes) * 100) + "% \r")
sys.stdout.flush()
nodes_out = [nodes[0]]
node_indices = [0]
print("\n")
print(len(nodes))
for i in range(1, len(nodes)):
if np.all(netdif[i][node_indices]):
node_indices.append(i)
nodes_out.append(nodes[i])
lags_out = lags.T[node_indices].T
print("Removed " + str(len(nodes) - len(nodes_out)) + " duplicate nodes")
return stations, nodes_out, lags_out | [
"def",
"_rm_similarlags",
"(",
"stations",
",",
"nodes",
",",
"lags",
",",
"threshold",
")",
":",
"netdif",
"=",
"abs",
"(",
"(",
"lags",
".",
"T",
"-",
"lags",
".",
"T",
"[",
"0",
"]",
")",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
".",
"reshape... | Remove nodes that have a very similar network moveout to another node.
This function will, for each node, calculate the difference in lagtime
at each station at every node, then sum these for each node to get a
cumulative difference in network moveout. This will result in an
array of arrays with zeros on the diagonal.
:type stations: list
:param stations:
List of station names from in the form where stations[i] refers to
nodes[i][:] and lags[i][:]
:type nodes: list
:param nodes:
List of node points where nodes[i] referes to stations[i] and
nodes[:][:][0] is latitude in degrees, nodes[:][:][1] is longitude in
degrees, nodes[:][:][2] is depth in km.
:type lags: numpy.ndarray
:param lags:
Array of arrays where lags[i][:] refers to stations[i]. lags[i][j]
should be the delay to the nodes[i][j] for stations[i] in seconds.
:type threshold: float
:param threshold: Threshold for removal in seconds
:returns: Stations
:rtype: list
:returns: List of lists of tuples of node locations
:rtype: list
:returns: Array of lags.
:rtype: :class:`numpy.ndarray`
.. note::
**Output:**
station[1] refers to nodes[1] and lags[1] nodes[1][1] refers
to station[1] and lags[1][1] nodes[n][n] is a tuple of latitude,
longitude and depth. | [
"Remove",
"nodes",
"that",
"have",
"a",
"very",
"similar",
"network",
"moveout",
"to",
"another",
"node",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/bright_lights.py#L208-L266 | train | 203,342 |
eqcorrscan/EQcorrscan | eqcorrscan/core/bright_lights.py | _cum_net_resp | def _cum_net_resp(node_lis, instance=0):
"""
Compute the cumulative network response by reading saved energy .npy files.
:type node_lis: numpy.ndarray
:param node_lis: List of nodes (ints) to read from
:type instance: int
:param instance: Instance flag for parallel workflows, defaults to 0.
:returns: cumulative network response
:rtype: numpy.ndarray
:returns: node indices for each sample of the cumulative network response.
:rtype: list
"""
cum_net_resp = np.load('tmp' + str(instance) +
'/node_' + str(node_lis[0]) + '.npy')[0]
os.remove('tmp' + str(instance) + '/node_' + str(node_lis[0]) + '.npy')
indices = np.ones(len(cum_net_resp)) * node_lis[0]
for i in node_lis[1:]:
node_energy = np.load('tmp' + str(instance) + '/node_' +
str(i) + '.npy')[0]
updated_indices = np.argmax([cum_net_resp, node_energy], axis=0)
temp = np.array([cum_net_resp, node_energy])
cum_net_resp = np.array([temp[updated_indices[j]][j]
for j in range(len(updated_indices))])
del temp, node_energy
updated_indices[updated_indices == 1] = i
indices = updated_indices
os.remove('tmp' + str(instance) + '/node_' + str(i) + '.npy')
return cum_net_resp, indices | python | def _cum_net_resp(node_lis, instance=0):
"""
Compute the cumulative network response by reading saved energy .npy files.
:type node_lis: numpy.ndarray
:param node_lis: List of nodes (ints) to read from
:type instance: int
:param instance: Instance flag for parallel workflows, defaults to 0.
:returns: cumulative network response
:rtype: numpy.ndarray
:returns: node indices for each sample of the cumulative network response.
:rtype: list
"""
cum_net_resp = np.load('tmp' + str(instance) +
'/node_' + str(node_lis[0]) + '.npy')[0]
os.remove('tmp' + str(instance) + '/node_' + str(node_lis[0]) + '.npy')
indices = np.ones(len(cum_net_resp)) * node_lis[0]
for i in node_lis[1:]:
node_energy = np.load('tmp' + str(instance) + '/node_' +
str(i) + '.npy')[0]
updated_indices = np.argmax([cum_net_resp, node_energy], axis=0)
temp = np.array([cum_net_resp, node_energy])
cum_net_resp = np.array([temp[updated_indices[j]][j]
for j in range(len(updated_indices))])
del temp, node_energy
updated_indices[updated_indices == 1] = i
indices = updated_indices
os.remove('tmp' + str(instance) + '/node_' + str(i) + '.npy')
return cum_net_resp, indices | [
"def",
"_cum_net_resp",
"(",
"node_lis",
",",
"instance",
"=",
"0",
")",
":",
"cum_net_resp",
"=",
"np",
".",
"load",
"(",
"'tmp'",
"+",
"str",
"(",
"instance",
")",
"+",
"'/node_'",
"+",
"str",
"(",
"node_lis",
"[",
"0",
"]",
")",
"+",
"'.npy'",
"... | Compute the cumulative network response by reading saved energy .npy files.
:type node_lis: numpy.ndarray
:param node_lis: List of nodes (ints) to read from
:type instance: int
:param instance: Instance flag for parallel workflows, defaults to 0.
:returns: cumulative network response
:rtype: numpy.ndarray
:returns: node indices for each sample of the cumulative network response.
:rtype: list | [
"Compute",
"the",
"cumulative",
"network",
"response",
"by",
"reading",
"saved",
"energy",
".",
"npy",
"files",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/bright_lights.py#L385-L414 | train | 203,343 |
eqcorrscan/EQcorrscan | eqcorrscan/core/bright_lights.py | _find_detections | def _find_detections(cum_net_resp, nodes, threshold, thresh_type,
samp_rate, realstations, length):
"""
Find detections within the cumulative network response.
:type cum_net_resp: numpy.ndarray
:param cum_net_resp: Array of cumulative network response for nodes
:type nodes: list
:param nodes: Nodes associated with the source of energy in the \
cum_net_resp
:type threshold: float
:param threshold: Threshold value
:type thresh_type: str
:param thresh_type: Either MAD (Median Absolute Deviation) or abs \
(absolute) or RMS (Root Mean Squared)
:type samp_rate: float
:param samp_rate: Sampling rate in Hz
:type realstations: list
:param realstations:
List of stations used to make the cumulative network response, will be
reported in the :class:`eqcorrscan.core.match_filter.Detection`
:type length: float
:param length: Maximum length of peak to look for in seconds
:returns:
Detections as :class:`eqcorrscan.core.match_filter.Detection` objects.
:rtype: list
"""
cum_net_resp = np.nan_to_num(cum_net_resp) # Force no NaNs
if np.isnan(cum_net_resp).any():
raise ValueError("Nans present")
print('Mean of data is: ' + str(np.median(cum_net_resp)))
print('RMS of data is: ' + str(np.sqrt(np.mean(np.square(cum_net_resp)))))
print('MAD of data is: ' + str(np.median(np.abs(cum_net_resp))))
if thresh_type == 'MAD':
thresh = (np.median(np.abs(cum_net_resp)) * threshold)
elif thresh_type == 'abs':
thresh = threshold
elif thresh_type == 'RMS':
thresh = _rms(cum_net_resp) * threshold
print('Threshold is set to: ' + str(thresh))
print('Max of data is: ' + str(max(cum_net_resp)))
peaks = findpeaks.find_peaks2_short(cum_net_resp, thresh,
length * samp_rate, debug=0)
detections = []
if peaks:
for peak in peaks:
node = nodes[peak[1]]
detections.append(
Detection(template_name=str(node[0]) + '_' +
str(node[1]) + '_' + str(node[2]),
detect_time=peak[1] / samp_rate,
no_chans=len(realstations), detect_val=peak[0],
threshold=thresh, typeofdet='brightness',
chans=realstations, id=str(node[0]) + '_' +
str(node[1]) + '_' + str(node[2]) +
str(peak[1] / samp_rate),
threshold_type=thresh_type,
threshold_input=threshold))
else:
detections = []
print('I have found ' + str(len(peaks)) + ' possible detections')
return detections | python | def _find_detections(cum_net_resp, nodes, threshold, thresh_type,
samp_rate, realstations, length):
"""
Find detections within the cumulative network response.
:type cum_net_resp: numpy.ndarray
:param cum_net_resp: Array of cumulative network response for nodes
:type nodes: list
:param nodes: Nodes associated with the source of energy in the \
cum_net_resp
:type threshold: float
:param threshold: Threshold value
:type thresh_type: str
:param thresh_type: Either MAD (Median Absolute Deviation) or abs \
(absolute) or RMS (Root Mean Squared)
:type samp_rate: float
:param samp_rate: Sampling rate in Hz
:type realstations: list
:param realstations:
List of stations used to make the cumulative network response, will be
reported in the :class:`eqcorrscan.core.match_filter.Detection`
:type length: float
:param length: Maximum length of peak to look for in seconds
:returns:
Detections as :class:`eqcorrscan.core.match_filter.Detection` objects.
:rtype: list
"""
cum_net_resp = np.nan_to_num(cum_net_resp) # Force no NaNs
if np.isnan(cum_net_resp).any():
raise ValueError("Nans present")
print('Mean of data is: ' + str(np.median(cum_net_resp)))
print('RMS of data is: ' + str(np.sqrt(np.mean(np.square(cum_net_resp)))))
print('MAD of data is: ' + str(np.median(np.abs(cum_net_resp))))
if thresh_type == 'MAD':
thresh = (np.median(np.abs(cum_net_resp)) * threshold)
elif thresh_type == 'abs':
thresh = threshold
elif thresh_type == 'RMS':
thresh = _rms(cum_net_resp) * threshold
print('Threshold is set to: ' + str(thresh))
print('Max of data is: ' + str(max(cum_net_resp)))
peaks = findpeaks.find_peaks2_short(cum_net_resp, thresh,
length * samp_rate, debug=0)
detections = []
if peaks:
for peak in peaks:
node = nodes[peak[1]]
detections.append(
Detection(template_name=str(node[0]) + '_' +
str(node[1]) + '_' + str(node[2]),
detect_time=peak[1] / samp_rate,
no_chans=len(realstations), detect_val=peak[0],
threshold=thresh, typeofdet='brightness',
chans=realstations, id=str(node[0]) + '_' +
str(node[1]) + '_' + str(node[2]) +
str(peak[1] / samp_rate),
threshold_type=thresh_type,
threshold_input=threshold))
else:
detections = []
print('I have found ' + str(len(peaks)) + ' possible detections')
return detections | [
"def",
"_find_detections",
"(",
"cum_net_resp",
",",
"nodes",
",",
"threshold",
",",
"thresh_type",
",",
"samp_rate",
",",
"realstations",
",",
"length",
")",
":",
"cum_net_resp",
"=",
"np",
".",
"nan_to_num",
"(",
"cum_net_resp",
")",
"# Force no NaNs",
"if",
... | Find detections within the cumulative network response.
:type cum_net_resp: numpy.ndarray
:param cum_net_resp: Array of cumulative network response for nodes
:type nodes: list
:param nodes: Nodes associated with the source of energy in the \
cum_net_resp
:type threshold: float
:param threshold: Threshold value
:type thresh_type: str
:param thresh_type: Either MAD (Median Absolute Deviation) or abs \
(absolute) or RMS (Root Mean Squared)
:type samp_rate: float
:param samp_rate: Sampling rate in Hz
:type realstations: list
:param realstations:
List of stations used to make the cumulative network response, will be
reported in the :class:`eqcorrscan.core.match_filter.Detection`
:type length: float
:param length: Maximum length of peak to look for in seconds
:returns:
Detections as :class:`eqcorrscan.core.match_filter.Detection` objects.
:rtype: list | [
"Find",
"detections",
"within",
"the",
"cumulative",
"network",
"response",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/bright_lights.py#L417-L479 | train | 203,344 |
eqcorrscan/EQcorrscan | eqcorrscan/core/bright_lights.py | coherence | def coherence(stream_in, stations=['all'], clip=False):
"""
Determine the average network coherence of a given template or detection.
You will want your stream to contain only signal as noise will reduce the
coherence (assuming it is incoherent random noise).
:type stream_in: obspy.core.stream.Stream
:param stream_in: The stream of seismic data you want to calculate the \
coherence for.
:type stations: list
:param stations: List of stations to use for coherence, default is all
:type clip: tuple
:param clip: Default is to use all the data given (`False`) - \
tuple of start and end in seconds from start of trace
:return: tuple of coherence and number of channels used.
:rtype: tuple
"""
stream = stream_in.copy() # Copy the data before we remove stations
# First check that all channels in stream have data of the same length
maxlen = np.max([len(tr.data) for tr in stream])
if maxlen == 0:
warnings.warn('template without data')
return 0.0, len(stream)
if not stations[0] == 'all':
for tr in stream:
if tr.stats.station not in stations:
stream.remove(tr) # Remove stations we don't want to use
for tr in stream:
if not len(tr.data) == maxlen and not len(tr.data) == 0:
warnings.warn(tr.stats.station + '.' + tr.stats.channel +
' is not the same length, padding \n' +
'Length is ' + str(len(tr.data)) + ' samples')
pad = np.zeros(maxlen - len(tr.data))
if tr.stats.starttime.hour == 0:
tr.data = np.concatenate((pad, tr.data), axis=0)
else:
tr.data = np.concatenate((tr.data, pad), axis=0)
elif len(tr.data) == 0:
tr.data = np.zeros(maxlen)
# Clip the data to the set length
if clip:
for tr in stream:
tr.trim(tr.stats.starttime + clip[0], tr.stats.starttime + clip[1])
_coherence = 0.0
# Loop through channels and generate a correlation value for each
# unique cross-channel pairing
for i in range(len(stream)):
for j in range(i + 1, len(stream)):
_coherence += np.abs(normxcorr2(stream[i].data,
stream[j].data))[0][0]
_coherence = 2 * _coherence / (len(stream) * (len(stream) - 1))
return _coherence, len(stream) | python | def coherence(stream_in, stations=['all'], clip=False):
"""
Determine the average network coherence of a given template or detection.
You will want your stream to contain only signal as noise will reduce the
coherence (assuming it is incoherent random noise).
:type stream_in: obspy.core.stream.Stream
:param stream_in: The stream of seismic data you want to calculate the \
coherence for.
:type stations: list
:param stations: List of stations to use for coherence, default is all
:type clip: tuple
:param clip: Default is to use all the data given (`False`) - \
tuple of start and end in seconds from start of trace
:return: tuple of coherence and number of channels used.
:rtype: tuple
"""
stream = stream_in.copy() # Copy the data before we remove stations
# First check that all channels in stream have data of the same length
maxlen = np.max([len(tr.data) for tr in stream])
if maxlen == 0:
warnings.warn('template without data')
return 0.0, len(stream)
if not stations[0] == 'all':
for tr in stream:
if tr.stats.station not in stations:
stream.remove(tr) # Remove stations we don't want to use
for tr in stream:
if not len(tr.data) == maxlen and not len(tr.data) == 0:
warnings.warn(tr.stats.station + '.' + tr.stats.channel +
' is not the same length, padding \n' +
'Length is ' + str(len(tr.data)) + ' samples')
pad = np.zeros(maxlen - len(tr.data))
if tr.stats.starttime.hour == 0:
tr.data = np.concatenate((pad, tr.data), axis=0)
else:
tr.data = np.concatenate((tr.data, pad), axis=0)
elif len(tr.data) == 0:
tr.data = np.zeros(maxlen)
# Clip the data to the set length
if clip:
for tr in stream:
tr.trim(tr.stats.starttime + clip[0], tr.stats.starttime + clip[1])
_coherence = 0.0
# Loop through channels and generate a correlation value for each
# unique cross-channel pairing
for i in range(len(stream)):
for j in range(i + 1, len(stream)):
_coherence += np.abs(normxcorr2(stream[i].data,
stream[j].data))[0][0]
_coherence = 2 * _coherence / (len(stream) * (len(stream) - 1))
return _coherence, len(stream) | [
"def",
"coherence",
"(",
"stream_in",
",",
"stations",
"=",
"[",
"'all'",
"]",
",",
"clip",
"=",
"False",
")",
":",
"stream",
"=",
"stream_in",
".",
"copy",
"(",
")",
"# Copy the data before we remove stations",
"# First check that all channels in stream have data of ... | Determine the average network coherence of a given template or detection.
You will want your stream to contain only signal as noise will reduce the
coherence (assuming it is incoherent random noise).
:type stream_in: obspy.core.stream.Stream
:param stream_in: The stream of seismic data you want to calculate the \
coherence for.
:type stations: list
:param stations: List of stations to use for coherence, default is all
:type clip: tuple
:param clip: Default is to use all the data given (`False`) - \
tuple of start and end in seconds from start of trace
:return: tuple of coherence and number of channels used.
:rtype: tuple | [
"Determine",
"the",
"average",
"network",
"coherence",
"of",
"a",
"given",
"template",
"or",
"detection",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/bright_lights.py#L482-L535 | train | 203,345 |
eqcorrscan/EQcorrscan | eqcorrscan/core/subspace.py | _do_ffts | def _do_ffts(detector, stream, Nc):
"""
Perform ffts on data, detector and denominator boxcar
:type detector: eqcorrscan.core.subspace.Detector
:param detector: Detector object for doing detecting
:type stream: list of obspy.core.stream.Stream
:param stream: List of streams processed according to detector
:type Nc: int
:param Nc: Number of channels in data. 1 for non-multiplexed
:return: list of time-reversed detector(s) in freq domain
:rtype: list
:return: list of squared data stream(s) in freq domain
:rtype: list
:return: list of data stream(s) in freq domain
:return: detector-length boxcar in freq domain
:rtype: numpy.ndarray
:return: length of detector
:rtype: int
:return: length of data
:rtype: int
"""
min_fftlen = int(stream[0][0].data.shape[0] +
detector.data[0].shape[0] - Nc)
fftlen = scipy.fftpack.next_fast_len(min_fftlen)
mplen = stream[0][0].data.shape[0]
ulen = detector.data[0].shape[0]
num_st_fd = [np.fft.rfft(tr.data, n=fftlen)
for tr in stream[0]]
denom_st_fd = [np.fft.rfft(np.square(tr.data), n=fftlen)
for tr in stream[0]]
# Frequency domain of boxcar
w = np.fft.rfft(np.ones(detector.data[0].shape[0]),
n=fftlen)
# This should go into the detector object as in Detex
detector_fd = []
for dat_mat in detector.data:
detector_fd.append(np.array([np.fft.rfft(col[::-1], n=fftlen)
for col in dat_mat.T]))
return detector_fd, denom_st_fd, num_st_fd, w, ulen, mplen | python | def _do_ffts(detector, stream, Nc):
"""
Perform ffts on data, detector and denominator boxcar
:type detector: eqcorrscan.core.subspace.Detector
:param detector: Detector object for doing detecting
:type stream: list of obspy.core.stream.Stream
:param stream: List of streams processed according to detector
:type Nc: int
:param Nc: Number of channels in data. 1 for non-multiplexed
:return: list of time-reversed detector(s) in freq domain
:rtype: list
:return: list of squared data stream(s) in freq domain
:rtype: list
:return: list of data stream(s) in freq domain
:return: detector-length boxcar in freq domain
:rtype: numpy.ndarray
:return: length of detector
:rtype: int
:return: length of data
:rtype: int
"""
min_fftlen = int(stream[0][0].data.shape[0] +
detector.data[0].shape[0] - Nc)
fftlen = scipy.fftpack.next_fast_len(min_fftlen)
mplen = stream[0][0].data.shape[0]
ulen = detector.data[0].shape[0]
num_st_fd = [np.fft.rfft(tr.data, n=fftlen)
for tr in stream[0]]
denom_st_fd = [np.fft.rfft(np.square(tr.data), n=fftlen)
for tr in stream[0]]
# Frequency domain of boxcar
w = np.fft.rfft(np.ones(detector.data[0].shape[0]),
n=fftlen)
# This should go into the detector object as in Detex
detector_fd = []
for dat_mat in detector.data:
detector_fd.append(np.array([np.fft.rfft(col[::-1], n=fftlen)
for col in dat_mat.T]))
return detector_fd, denom_st_fd, num_st_fd, w, ulen, mplen | [
"def",
"_do_ffts",
"(",
"detector",
",",
"stream",
",",
"Nc",
")",
":",
"min_fftlen",
"=",
"int",
"(",
"stream",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"data",
".",
"shape",
"[",
"0",
"]",
"+",
"detector",
".",
"data",
"[",
"0",
"]",
".",
"shape",
... | Perform ffts on data, detector and denominator boxcar
:type detector: eqcorrscan.core.subspace.Detector
:param detector: Detector object for doing detecting
:type stream: list of obspy.core.stream.Stream
:param stream: List of streams processed according to detector
:type Nc: int
:param Nc: Number of channels in data. 1 for non-multiplexed
:return: list of time-reversed detector(s) in freq domain
:rtype: list
:return: list of squared data stream(s) in freq domain
:rtype: list
:return: list of data stream(s) in freq domain
:return: detector-length boxcar in freq domain
:rtype: numpy.ndarray
:return: length of detector
:rtype: int
:return: length of data
:rtype: int | [
"Perform",
"ffts",
"on",
"data",
"detector",
"and",
"denominator",
"boxcar"
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/subspace.py#L584-L624 | train | 203,346 |
eqcorrscan/EQcorrscan | eqcorrscan/core/subspace.py | _det_stat_freq | def _det_stat_freq(det_freq, data_freq_sq, data_freq, w, Nc, ulen, mplen):
"""
Compute detection statistic in the frequency domain
:type det_freq: numpy.ndarray
:param det_freq: detector in freq domain
:type data_freq_sq: numpy.ndarray
:param data_freq_sq: squared data in freq domain
:type data_freq: numpy.ndarray
:param data_freq: data in freq domain
:type w: numpy.ndarray
:param w: boxcar in freq domain
:type Nc: int
:param Nc: number of channels in data stream
:type ulen: int
:param ulen: length of detector
:type mplen: int
:param mplen: length of data
:return: Array of detection statistics
:rtype: numpy.ndarray
"""
num_cor = np.multiply(det_freq, data_freq) # Numerator convolution
den_cor = np.multiply(w, data_freq_sq) # Denominator convolution
# Do inverse fft
# First and last Nt - 1 samples are invalid; clip them off
num_ifft = np.real(np.fft.irfft(num_cor))[:, ulen-1:mplen:Nc]
denominator = np.real(np.fft.irfft(den_cor))[ulen-1:mplen:Nc]
# Ratio of projected to envelope energy = det_stat across all channels
result = np.sum(np.square(num_ifft), axis=0) / denominator
return result | python | def _det_stat_freq(det_freq, data_freq_sq, data_freq, w, Nc, ulen, mplen):
"""
Compute detection statistic in the frequency domain
:type det_freq: numpy.ndarray
:param det_freq: detector in freq domain
:type data_freq_sq: numpy.ndarray
:param data_freq_sq: squared data in freq domain
:type data_freq: numpy.ndarray
:param data_freq: data in freq domain
:type w: numpy.ndarray
:param w: boxcar in freq domain
:type Nc: int
:param Nc: number of channels in data stream
:type ulen: int
:param ulen: length of detector
:type mplen: int
:param mplen: length of data
:return: Array of detection statistics
:rtype: numpy.ndarray
"""
num_cor = np.multiply(det_freq, data_freq) # Numerator convolution
den_cor = np.multiply(w, data_freq_sq) # Denominator convolution
# Do inverse fft
# First and last Nt - 1 samples are invalid; clip them off
num_ifft = np.real(np.fft.irfft(num_cor))[:, ulen-1:mplen:Nc]
denominator = np.real(np.fft.irfft(den_cor))[ulen-1:mplen:Nc]
# Ratio of projected to envelope energy = det_stat across all channels
result = np.sum(np.square(num_ifft), axis=0) / denominator
return result | [
"def",
"_det_stat_freq",
"(",
"det_freq",
",",
"data_freq_sq",
",",
"data_freq",
",",
"w",
",",
"Nc",
",",
"ulen",
",",
"mplen",
")",
":",
"num_cor",
"=",
"np",
".",
"multiply",
"(",
"det_freq",
",",
"data_freq",
")",
"# Numerator convolution",
"den_cor",
... | Compute detection statistic in the frequency domain
:type det_freq: numpy.ndarray
:param det_freq: detector in freq domain
:type data_freq_sq: numpy.ndarray
:param data_freq_sq: squared data in freq domain
:type data_freq: numpy.ndarray
:param data_freq: data in freq domain
:type w: numpy.ndarray
:param w: boxcar in freq domain
:type Nc: int
:param Nc: number of channels in data stream
:type ulen: int
:param ulen: length of detector
:type mplen: int
:param mplen: length of data
:return: Array of detection statistics
:rtype: numpy.ndarray | [
"Compute",
"detection",
"statistic",
"in",
"the",
"frequency",
"domain"
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/subspace.py#L627-L657 | train | 203,347 |
eqcorrscan/EQcorrscan | eqcorrscan/core/subspace.py | multi | def multi(stream):
"""
Internal multiplexer for multiplex_detect.
:type stream: obspy.core.stream.Stream
:param stream: Stream to multiplex
:return: trace of multiplexed data
:rtype: obspy.core.trace.Trace
.. Note: Requires all channels to be the same length.
Maps a standard multiplexed stream of seismic data to a single traces of \
multiplexed data as follows:
Input:
x = [x1, x2, x3, ...]
y = [y1, y2, y3, ...]
z = [z1, z2, z3, ...]
Output:
xyz = [x1, y1, z1, x2, y2, z2, x3, y3, z3, ...]
"""
stack = stream[0].data
for tr in stream[1:]:
stack = np.dstack(np.array([stack, tr.data]))
multiplex = stack.reshape(stack.size, )
return multiplex | python | def multi(stream):
"""
Internal multiplexer for multiplex_detect.
:type stream: obspy.core.stream.Stream
:param stream: Stream to multiplex
:return: trace of multiplexed data
:rtype: obspy.core.trace.Trace
.. Note: Requires all channels to be the same length.
Maps a standard multiplexed stream of seismic data to a single traces of \
multiplexed data as follows:
Input:
x = [x1, x2, x3, ...]
y = [y1, y2, y3, ...]
z = [z1, z2, z3, ...]
Output:
xyz = [x1, y1, z1, x2, y2, z2, x3, y3, z3, ...]
"""
stack = stream[0].data
for tr in stream[1:]:
stack = np.dstack(np.array([stack, tr.data]))
multiplex = stack.reshape(stack.size, )
return multiplex | [
"def",
"multi",
"(",
"stream",
")",
":",
"stack",
"=",
"stream",
"[",
"0",
"]",
".",
"data",
"for",
"tr",
"in",
"stream",
"[",
"1",
":",
"]",
":",
"stack",
"=",
"np",
".",
"dstack",
"(",
"np",
".",
"array",
"(",
"[",
"stack",
",",
"tr",
".",
... | Internal multiplexer for multiplex_detect.
:type stream: obspy.core.stream.Stream
:param stream: Stream to multiplex
:return: trace of multiplexed data
:rtype: obspy.core.trace.Trace
.. Note: Requires all channels to be the same length.
Maps a standard multiplexed stream of seismic data to a single traces of \
multiplexed data as follows:
Input:
x = [x1, x2, x3, ...]
y = [y1, y2, y3, ...]
z = [z1, z2, z3, ...]
Output:
xyz = [x1, y1, z1, x2, y2, z2, x3, y3, z3, ...] | [
"Internal",
"multiplexer",
"for",
"multiplex_detect",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/subspace.py#L830-L857 | train | 203,348 |
eqcorrscan/EQcorrscan | eqcorrscan/core/subspace.py | subspace_detect | def subspace_detect(detectors, stream, threshold, trig_int, moveout=0,
min_trig=1, parallel=True, num_cores=None):
"""
Conduct subspace detection with chosen detectors.
:type detectors: list
:param detectors:
list of :class:`eqcorrscan.core.subspace.Detector` to be used
for detection.
:type stream: obspy.core.stream.Stream
:param stream: Stream to detect within.
:type threshold: float
:param threshold:
Threshold between 0 and 1 for detection, see :func:`Detector.detect`
:type trig_int: float
:param trig_int: Minimum trigger interval in seconds.
:type moveout: float
:param moveout:
Maximum allowable moveout window for non-multiplexed, network
detection. See note.
:type min_trig: int
:param min_trig:
Minimum number of stations exceeding threshold for non-multiplexed,
network detection. See note in :func:`Detector.detect`.
:type parallel: bool
:param parallel: Whether to run detectors in parallel in groups.
:type num_cores: int
:param num_cores:
How many cpu cores to use if parallel==True. If set to None (default),
will use all available cores.
:rtype: list
:return:
List of :class:`eqcorrscan.core.match_filter.Detection` detections.
.. Note::
This will loop through your detectors using their detect method.
If the detectors are multiplexed it will run groups of detectors with
the same channels at the same time.
"""
from multiprocessing import Pool, cpu_count
# First check that detector parameters are the same
parameters = []
detections = []
for detector in detectors:
parameter = (detector.lowcut, detector.highcut,
detector.filt_order, detector.sampling_rate,
detector.multiplex, detector.stachans)
if parameter not in parameters:
parameters.append(parameter)
for parameter_set in parameters:
parameter_detectors = []
for detector in detectors:
det_par = (detector.lowcut, detector.highcut, detector.filt_order,
detector.sampling_rate, detector.multiplex,
detector.stachans)
if det_par == parameter_set:
parameter_detectors.append(detector)
stream, stachans = \
_subspace_process(
streams=[stream.copy()], lowcut=parameter_set[0],
highcut=parameter_set[1], filt_order=parameter_set[2],
sampling_rate=parameter_set[3], multiplex=parameter_set[4],
stachans=parameter_set[5], parallel=True, align=False,
shift_len=None, reject=False)
if not parallel:
for detector in parameter_detectors:
detections += _detect(
detector=detector, st=stream[0], threshold=threshold,
trig_int=trig_int, moveout=moveout, min_trig=min_trig,
process=False, extract_detections=False, debug=0)
else:
if num_cores:
ncores = num_cores
else:
ncores = cpu_count()
pool = Pool(processes=ncores)
results = [pool.apply_async(
_detect, args=(detector, stream[0], threshold, trig_int,
moveout, min_trig, False, False, 0))
for detector in parameter_detectors]
pool.close()
try:
_detections = [p.get() for p in results]
except KeyboardInterrupt as e: # pragma: no cover
pool.terminate()
raise e
pool.join()
for d in _detections:
if isinstance(d, list):
detections += d
else:
detections.append(d)
return detections | python | def subspace_detect(detectors, stream, threshold, trig_int, moveout=0,
min_trig=1, parallel=True, num_cores=None):
"""
Conduct subspace detection with chosen detectors.
:type detectors: list
:param detectors:
list of :class:`eqcorrscan.core.subspace.Detector` to be used
for detection.
:type stream: obspy.core.stream.Stream
:param stream: Stream to detect within.
:type threshold: float
:param threshold:
Threshold between 0 and 1 for detection, see :func:`Detector.detect`
:type trig_int: float
:param trig_int: Minimum trigger interval in seconds.
:type moveout: float
:param moveout:
Maximum allowable moveout window for non-multiplexed, network
detection. See note.
:type min_trig: int
:param min_trig:
Minimum number of stations exceeding threshold for non-multiplexed,
network detection. See note in :func:`Detector.detect`.
:type parallel: bool
:param parallel: Whether to run detectors in parallel in groups.
:type num_cores: int
:param num_cores:
How many cpu cores to use if parallel==True. If set to None (default),
will use all available cores.
:rtype: list
:return:
List of :class:`eqcorrscan.core.match_filter.Detection` detections.
.. Note::
This will loop through your detectors using their detect method.
If the detectors are multiplexed it will run groups of detectors with
the same channels at the same time.
"""
from multiprocessing import Pool, cpu_count
# First check that detector parameters are the same
parameters = []
detections = []
for detector in detectors:
parameter = (detector.lowcut, detector.highcut,
detector.filt_order, detector.sampling_rate,
detector.multiplex, detector.stachans)
if parameter not in parameters:
parameters.append(parameter)
for parameter_set in parameters:
parameter_detectors = []
for detector in detectors:
det_par = (detector.lowcut, detector.highcut, detector.filt_order,
detector.sampling_rate, detector.multiplex,
detector.stachans)
if det_par == parameter_set:
parameter_detectors.append(detector)
stream, stachans = \
_subspace_process(
streams=[stream.copy()], lowcut=parameter_set[0],
highcut=parameter_set[1], filt_order=parameter_set[2],
sampling_rate=parameter_set[3], multiplex=parameter_set[4],
stachans=parameter_set[5], parallel=True, align=False,
shift_len=None, reject=False)
if not parallel:
for detector in parameter_detectors:
detections += _detect(
detector=detector, st=stream[0], threshold=threshold,
trig_int=trig_int, moveout=moveout, min_trig=min_trig,
process=False, extract_detections=False, debug=0)
else:
if num_cores:
ncores = num_cores
else:
ncores = cpu_count()
pool = Pool(processes=ncores)
results = [pool.apply_async(
_detect, args=(detector, stream[0], threshold, trig_int,
moveout, min_trig, False, False, 0))
for detector in parameter_detectors]
pool.close()
try:
_detections = [p.get() for p in results]
except KeyboardInterrupt as e: # pragma: no cover
pool.terminate()
raise e
pool.join()
for d in _detections:
if isinstance(d, list):
detections += d
else:
detections.append(d)
return detections | [
"def",
"subspace_detect",
"(",
"detectors",
",",
"stream",
",",
"threshold",
",",
"trig_int",
",",
"moveout",
"=",
"0",
",",
"min_trig",
"=",
"1",
",",
"parallel",
"=",
"True",
",",
"num_cores",
"=",
"None",
")",
":",
"from",
"multiprocessing",
"import",
... | Conduct subspace detection with chosen detectors.
:type detectors: list
:param detectors:
list of :class:`eqcorrscan.core.subspace.Detector` to be used
for detection.
:type stream: obspy.core.stream.Stream
:param stream: Stream to detect within.
:type threshold: float
:param threshold:
Threshold between 0 and 1 for detection, see :func:`Detector.detect`
:type trig_int: float
:param trig_int: Minimum trigger interval in seconds.
:type moveout: float
:param moveout:
Maximum allowable moveout window for non-multiplexed, network
detection. See note.
:type min_trig: int
:param min_trig:
Minimum number of stations exceeding threshold for non-multiplexed,
network detection. See note in :func:`Detector.detect`.
:type parallel: bool
:param parallel: Whether to run detectors in parallel in groups.
:type num_cores: int
:param num_cores:
How many cpu cores to use if parallel==True. If set to None (default),
will use all available cores.
:rtype: list
:return:
List of :class:`eqcorrscan.core.match_filter.Detection` detections.
.. Note::
This will loop through your detectors using their detect method.
If the detectors are multiplexed it will run groups of detectors with
the same channels at the same time. | [
"Conduct",
"subspace",
"detection",
"with",
"chosen",
"detectors",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/subspace.py#L961-L1054 | train | 203,349 |
eqcorrscan/EQcorrscan | eqcorrscan/core/subspace.py | Detector.construct | def construct(self, streams, lowcut, highcut, filt_order,
sampling_rate, multiplex, name, align, shift_len=0,
reject=0.3, no_missed=True, plot=False):
"""
Construct a subspace detector from a list of streams, full rank.
Subspace detector will be full-rank, further functions can be used \
to select the desired dimensions.
:type streams: list
:param streams:
List of :class:`obspy.core.stream.Stream` to be used to generate
the subspace detector. These should be pre-clustered and aligned.
:type lowcut: float
:param lowcut: Lowcut in Hz, can be None to not apply filter
:type highcut: float
:param highcut: Highcut in Hz, can be None to not apply filter
:type filt_order: int
:param filt_order: Number of corners for filter.
:type sampling_rate: float
:param sampling_rate: Desired sampling rate in Hz
:type multiplex: bool
:param multiplex:
Whether to multiplex the data or not. Data are multiplexed
according to the method of Harris, see the multi function for
details.
:type name: str
:param name: Name of the detector, used for book-keeping.
:type align: bool
:param align:
Whether to align the data or not - needs to be done at some point
:type shift_len: float
:param shift_len: Maximum shift allowed for alignment in seconds.
:type reject: float
:param reject:
Minimum correlation to include traces - only used if align=True.
:type no_missed: bool
:param no_missed:
Reject streams with missed traces, defaults to True. A missing
trace from lots of events will reduce the quality of the subspace
detector if multiplexed. Only used when multi is set to True.
:type plot: bool
:param plot: Whether to plot the alignment stage or not.
.. note::
The detector will be normalized such that the data, before
computing the singular-value decomposition, will have unit energy.
e.g. We divide the amplitudes of the data by the L1 norm of the
data.
.. warning::
EQcorrscan's alignment will attempt to align over the whole data
window given. For long (more than 2s) chunks of data this can give
poor results and you might be better off using the
:func:`eqcorrscan.utils.stacking.align_traces` function externally,
focusing on a smaller window of data. To do this you would align
the data prior to running construct.
"""
self.lowcut = lowcut
self.highcut = highcut
self.filt_order = filt_order
self.sampling_rate = sampling_rate
self.name = name
self.multiplex = multiplex
# Pre-process data
p_streams, stachans = _subspace_process(
streams=copy.deepcopy(streams), lowcut=lowcut, highcut=highcut,
filt_order=filt_order, sampling_rate=sampling_rate,
multiplex=multiplex, align=align, shift_len=shift_len,
reject=reject, plot=plot, no_missed=no_missed)
# Compute the SVD, use the cluster.SVD function
u, sigma, v, svd_stachans = svd(stream_list=p_streams, full=True)
self.stachans = stachans
# self.delays = delays
self.u = u
self.v = v
self.sigma = sigma
self.data = copy.deepcopy(u) # Set the data matrix to be full rank U.
self.dimension = np.inf
return self | python | def construct(self, streams, lowcut, highcut, filt_order,
sampling_rate, multiplex, name, align, shift_len=0,
reject=0.3, no_missed=True, plot=False):
"""
Construct a subspace detector from a list of streams, full rank.
Subspace detector will be full-rank, further functions can be used \
to select the desired dimensions.
:type streams: list
:param streams:
List of :class:`obspy.core.stream.Stream` to be used to generate
the subspace detector. These should be pre-clustered and aligned.
:type lowcut: float
:param lowcut: Lowcut in Hz, can be None to not apply filter
:type highcut: float
:param highcut: Highcut in Hz, can be None to not apply filter
:type filt_order: int
:param filt_order: Number of corners for filter.
:type sampling_rate: float
:param sampling_rate: Desired sampling rate in Hz
:type multiplex: bool
:param multiplex:
Whether to multiplex the data or not. Data are multiplexed
according to the method of Harris, see the multi function for
details.
:type name: str
:param name: Name of the detector, used for book-keeping.
:type align: bool
:param align:
Whether to align the data or not - needs to be done at some point
:type shift_len: float
:param shift_len: Maximum shift allowed for alignment in seconds.
:type reject: float
:param reject:
Minimum correlation to include traces - only used if align=True.
:type no_missed: bool
:param no_missed:
Reject streams with missed traces, defaults to True. A missing
trace from lots of events will reduce the quality of the subspace
detector if multiplexed. Only used when multi is set to True.
:type plot: bool
:param plot: Whether to plot the alignment stage or not.
.. note::
The detector will be normalized such that the data, before
computing the singular-value decomposition, will have unit energy.
e.g. We divide the amplitudes of the data by the L1 norm of the
data.
.. warning::
EQcorrscan's alignment will attempt to align over the whole data
window given. For long (more than 2s) chunks of data this can give
poor results and you might be better off using the
:func:`eqcorrscan.utils.stacking.align_traces` function externally,
focusing on a smaller window of data. To do this you would align
the data prior to running construct.
"""
self.lowcut = lowcut
self.highcut = highcut
self.filt_order = filt_order
self.sampling_rate = sampling_rate
self.name = name
self.multiplex = multiplex
# Pre-process data
p_streams, stachans = _subspace_process(
streams=copy.deepcopy(streams), lowcut=lowcut, highcut=highcut,
filt_order=filt_order, sampling_rate=sampling_rate,
multiplex=multiplex, align=align, shift_len=shift_len,
reject=reject, plot=plot, no_missed=no_missed)
# Compute the SVD, use the cluster.SVD function
u, sigma, v, svd_stachans = svd(stream_list=p_streams, full=True)
self.stachans = stachans
# self.delays = delays
self.u = u
self.v = v
self.sigma = sigma
self.data = copy.deepcopy(u) # Set the data matrix to be full rank U.
self.dimension = np.inf
return self | [
"def",
"construct",
"(",
"self",
",",
"streams",
",",
"lowcut",
",",
"highcut",
",",
"filt_order",
",",
"sampling_rate",
",",
"multiplex",
",",
"name",
",",
"align",
",",
"shift_len",
"=",
"0",
",",
"reject",
"=",
"0.3",
",",
"no_missed",
"=",
"True",
... | Construct a subspace detector from a list of streams, full rank.
Subspace detector will be full-rank, further functions can be used \
to select the desired dimensions.
:type streams: list
:param streams:
List of :class:`obspy.core.stream.Stream` to be used to generate
the subspace detector. These should be pre-clustered and aligned.
:type lowcut: float
:param lowcut: Lowcut in Hz, can be None to not apply filter
:type highcut: float
:param highcut: Highcut in Hz, can be None to not apply filter
:type filt_order: int
:param filt_order: Number of corners for filter.
:type sampling_rate: float
:param sampling_rate: Desired sampling rate in Hz
:type multiplex: bool
:param multiplex:
Whether to multiplex the data or not. Data are multiplexed
according to the method of Harris, see the multi function for
details.
:type name: str
:param name: Name of the detector, used for book-keeping.
:type align: bool
:param align:
Whether to align the data or not - needs to be done at some point
:type shift_len: float
:param shift_len: Maximum shift allowed for alignment in seconds.
:type reject: float
:param reject:
Minimum correlation to include traces - only used if align=True.
:type no_missed: bool
:param no_missed:
Reject streams with missed traces, defaults to True. A missing
trace from lots of events will reduce the quality of the subspace
detector if multiplexed. Only used when multi is set to True.
:type plot: bool
:param plot: Whether to plot the alignment stage or not.
.. note::
The detector will be normalized such that the data, before
computing the singular-value decomposition, will have unit energy.
e.g. We divide the amplitudes of the data by the L1 norm of the
data.
.. warning::
EQcorrscan's alignment will attempt to align over the whole data
window given. For long (more than 2s) chunks of data this can give
poor results and you might be better off using the
:func:`eqcorrscan.utils.stacking.align_traces` function externally,
focusing on a smaller window of data. To do this you would align
the data prior to running construct. | [
"Construct",
"a",
"subspace",
"detector",
"from",
"a",
"list",
"of",
"streams",
"full",
"rank",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/subspace.py#L137-L216 | train | 203,350 |
eqcorrscan/EQcorrscan | eqcorrscan/core/subspace.py | Detector.partition | def partition(self, dimension):
"""
Partition subspace into desired dimension.
:type dimension: int
:param dimension: Maximum dimension to use.
"""
# Take leftmost 'dimension' input basis vectors
for i, channel in enumerate(self.u):
if self.v[i].shape[1] < dimension:
raise IndexError('Channel is max dimension %s'
% self.v[i].shape[1])
self.data[i] = channel[:, 0:dimension]
self.dimension = dimension
return self | python | def partition(self, dimension):
"""
Partition subspace into desired dimension.
:type dimension: int
:param dimension: Maximum dimension to use.
"""
# Take leftmost 'dimension' input basis vectors
for i, channel in enumerate(self.u):
if self.v[i].shape[1] < dimension:
raise IndexError('Channel is max dimension %s'
% self.v[i].shape[1])
self.data[i] = channel[:, 0:dimension]
self.dimension = dimension
return self | [
"def",
"partition",
"(",
"self",
",",
"dimension",
")",
":",
"# Take leftmost 'dimension' input basis vectors",
"for",
"i",
",",
"channel",
"in",
"enumerate",
"(",
"self",
".",
"u",
")",
":",
"if",
"self",
".",
"v",
"[",
"i",
"]",
".",
"shape",
"[",
"1",... | Partition subspace into desired dimension.
:type dimension: int
:param dimension: Maximum dimension to use. | [
"Partition",
"subspace",
"into",
"desired",
"dimension",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/subspace.py#L218-L232 | train | 203,351 |
eqcorrscan/EQcorrscan | eqcorrscan/core/subspace.py | Detector.energy_capture | def energy_capture(self, stachans='all', size=(10, 7), show=False):
"""
Calculate the average percentage energy capture for this subspace.
:return: Percentage energy capture
:rtype: float
"""
if show:
return subspace_fc_plot(detector=self, stachans=stachans,
size=size, show=show)
percent_capture = 0
if np.isinf(self.dimension):
return 100
for channel in self.sigma:
fc = np.sum(channel[0:self.dimension]) / np.sum(channel)
percent_capture += fc
else:
return 100 * (percent_capture / len(self.sigma)) | python | def energy_capture(self, stachans='all', size=(10, 7), show=False):
"""
Calculate the average percentage energy capture for this subspace.
:return: Percentage energy capture
:rtype: float
"""
if show:
return subspace_fc_plot(detector=self, stachans=stachans,
size=size, show=show)
percent_capture = 0
if np.isinf(self.dimension):
return 100
for channel in self.sigma:
fc = np.sum(channel[0:self.dimension]) / np.sum(channel)
percent_capture += fc
else:
return 100 * (percent_capture / len(self.sigma)) | [
"def",
"energy_capture",
"(",
"self",
",",
"stachans",
"=",
"'all'",
",",
"size",
"=",
"(",
"10",
",",
"7",
")",
",",
"show",
"=",
"False",
")",
":",
"if",
"show",
":",
"return",
"subspace_fc_plot",
"(",
"detector",
"=",
"self",
",",
"stachans",
"=",... | Calculate the average percentage energy capture for this subspace.
:return: Percentage energy capture
:rtype: float | [
"Calculate",
"the",
"average",
"percentage",
"energy",
"capture",
"for",
"this",
"subspace",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/subspace.py#L234-L251 | train | 203,352 |
eqcorrscan/EQcorrscan | eqcorrscan/core/subspace.py | Detector.write | def write(self, filename):
"""
Write detector to a file - uses HDF5 file format.
Meta-data are stored alongside numpy data arrays. See h5py.org for \
details of the methods.
:type filename: str
:param filename: Filename to save the detector to.
"""
f = h5py.File(filename, "w")
# Must store eqcorrscan version number, username would be useful too.
data_group = f.create_group(name="data")
for i, data in enumerate(self.data):
dset = data_group.create_dataset(name="data_" + str(i),
shape=data.shape,
dtype=data.dtype)
dset[...] = data
data_group.attrs['length'] = len(self.data)
data_group.attrs['name'] = self.name.encode("ascii", "ignore")
data_group.attrs['sampling_rate'] = self.sampling_rate
data_group.attrs['multiplex'] = self.multiplex
data_group.attrs['lowcut'] = self.lowcut
data_group.attrs['highcut'] = self.highcut
data_group.attrs['filt_order'] = self.filt_order
data_group.attrs['dimension'] = self.dimension
data_group.attrs['user'] = getpass.getuser()
data_group.attrs['eqcorrscan_version'] = str(eqcorrscan.__version__)
# Convert station-channel list to something writable
ascii_stachans = ['.'.join(stachan).encode("ascii", "ignore")
for stachan in self.stachans]
stachans = f.create_dataset(name="stachans",
shape=(len(ascii_stachans),),
dtype='S10')
stachans[...] = ascii_stachans
u_group = f.create_group("u")
for i, u in enumerate(self.u):
uset = u_group.create_dataset(name="u_" + str(i),
shape=u.shape, dtype=u.dtype)
uset[...] = u
u_group.attrs['length'] = len(self.u)
sigma_group = f.create_group("sigma")
for i, sigma in enumerate(self.sigma):
sigmaset = sigma_group.create_dataset(name="sigma_" + str(i),
shape=sigma.shape,
dtype=sigma.dtype)
sigmaset[...] = sigma
sigma_group.attrs['length'] = len(self.sigma)
v_group = f.create_group("v")
for i, v in enumerate(self.v):
vset = v_group.create_dataset(name="v_" + str(i),
shape=v.shape, dtype=v.dtype)
vset[...] = v
v_group.attrs['length'] = len(self.v)
f.flush()
f.close()
return self | python | def write(self, filename):
"""
Write detector to a file - uses HDF5 file format.
Meta-data are stored alongside numpy data arrays. See h5py.org for \
details of the methods.
:type filename: str
:param filename: Filename to save the detector to.
"""
f = h5py.File(filename, "w")
# Must store eqcorrscan version number, username would be useful too.
data_group = f.create_group(name="data")
for i, data in enumerate(self.data):
dset = data_group.create_dataset(name="data_" + str(i),
shape=data.shape,
dtype=data.dtype)
dset[...] = data
data_group.attrs['length'] = len(self.data)
data_group.attrs['name'] = self.name.encode("ascii", "ignore")
data_group.attrs['sampling_rate'] = self.sampling_rate
data_group.attrs['multiplex'] = self.multiplex
data_group.attrs['lowcut'] = self.lowcut
data_group.attrs['highcut'] = self.highcut
data_group.attrs['filt_order'] = self.filt_order
data_group.attrs['dimension'] = self.dimension
data_group.attrs['user'] = getpass.getuser()
data_group.attrs['eqcorrscan_version'] = str(eqcorrscan.__version__)
# Convert station-channel list to something writable
ascii_stachans = ['.'.join(stachan).encode("ascii", "ignore")
for stachan in self.stachans]
stachans = f.create_dataset(name="stachans",
shape=(len(ascii_stachans),),
dtype='S10')
stachans[...] = ascii_stachans
u_group = f.create_group("u")
for i, u in enumerate(self.u):
uset = u_group.create_dataset(name="u_" + str(i),
shape=u.shape, dtype=u.dtype)
uset[...] = u
u_group.attrs['length'] = len(self.u)
sigma_group = f.create_group("sigma")
for i, sigma in enumerate(self.sigma):
sigmaset = sigma_group.create_dataset(name="sigma_" + str(i),
shape=sigma.shape,
dtype=sigma.dtype)
sigmaset[...] = sigma
sigma_group.attrs['length'] = len(self.sigma)
v_group = f.create_group("v")
for i, v in enumerate(self.v):
vset = v_group.create_dataset(name="v_" + str(i),
shape=v.shape, dtype=v.dtype)
vset[...] = v
v_group.attrs['length'] = len(self.v)
f.flush()
f.close()
return self | [
"def",
"write",
"(",
"self",
",",
"filename",
")",
":",
"f",
"=",
"h5py",
".",
"File",
"(",
"filename",
",",
"\"w\"",
")",
"# Must store eqcorrscan version number, username would be useful too.",
"data_group",
"=",
"f",
".",
"create_group",
"(",
"name",
"=",
"\"... | Write detector to a file - uses HDF5 file format.
Meta-data are stored alongside numpy data arrays. See h5py.org for \
details of the methods.
:type filename: str
:param filename: Filename to save the detector to. | [
"Write",
"detector",
"to",
"a",
"file",
"-",
"uses",
"HDF5",
"file",
"format",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/subspace.py#L322-L378 | train | 203,353 |
eqcorrscan/EQcorrscan | eqcorrscan/core/subspace.py | Detector.read | def read(self, filename):
"""
Read detector from a file, must be HDF5 format.
Reads a Detector object from an HDF5 file, usually created by \
eqcorrscan.
:type filename: str
:param filename: Filename to save the detector to.
"""
f = h5py.File(filename, "r")
self.data = []
for i in range(f['data'].attrs['length']):
self.data.append(f['data']['data_' + str(i)].value)
self.u = []
for i in range(f['u'].attrs['length']):
self.u.append(f['u']['u_' + str(i)].value)
self.sigma = []
for i in range(f['sigma'].attrs['length']):
self.sigma.append(f['sigma']['sigma_' + str(i)].value)
self.v = []
for i in range(f['v'].attrs['length']):
self.v.append(f['v']['v_' + str(i)].value)
self.stachans = [tuple(stachan.decode('ascii').split('.'))
for stachan in f['stachans'].value]
self.dimension = f['data'].attrs['dimension']
self.filt_order = f['data'].attrs['filt_order']
self.highcut = f['data'].attrs['highcut']
self.lowcut = f['data'].attrs['lowcut']
self.multiplex = bool(f['data'].attrs['multiplex'])
self.sampling_rate = f['data'].attrs['sampling_rate']
if isinstance(f['data'].attrs['name'], str):
self.name = f['data'].attrs['name']
else:
self.name = f['data'].attrs['name'].decode('ascii')
return self | python | def read(self, filename):
"""
Read detector from a file, must be HDF5 format.
Reads a Detector object from an HDF5 file, usually created by \
eqcorrscan.
:type filename: str
:param filename: Filename to save the detector to.
"""
f = h5py.File(filename, "r")
self.data = []
for i in range(f['data'].attrs['length']):
self.data.append(f['data']['data_' + str(i)].value)
self.u = []
for i in range(f['u'].attrs['length']):
self.u.append(f['u']['u_' + str(i)].value)
self.sigma = []
for i in range(f['sigma'].attrs['length']):
self.sigma.append(f['sigma']['sigma_' + str(i)].value)
self.v = []
for i in range(f['v'].attrs['length']):
self.v.append(f['v']['v_' + str(i)].value)
self.stachans = [tuple(stachan.decode('ascii').split('.'))
for stachan in f['stachans'].value]
self.dimension = f['data'].attrs['dimension']
self.filt_order = f['data'].attrs['filt_order']
self.highcut = f['data'].attrs['highcut']
self.lowcut = f['data'].attrs['lowcut']
self.multiplex = bool(f['data'].attrs['multiplex'])
self.sampling_rate = f['data'].attrs['sampling_rate']
if isinstance(f['data'].attrs['name'], str):
self.name = f['data'].attrs['name']
else:
self.name = f['data'].attrs['name'].decode('ascii')
return self | [
"def",
"read",
"(",
"self",
",",
"filename",
")",
":",
"f",
"=",
"h5py",
".",
"File",
"(",
"filename",
",",
"\"r\"",
")",
"self",
".",
"data",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"f",
"[",
"'data'",
"]",
".",
"attrs",
"[",
"'length'"... | Read detector from a file, must be HDF5 format.
Reads a Detector object from an HDF5 file, usually created by \
eqcorrscan.
:type filename: str
:param filename: Filename to save the detector to. | [
"Read",
"detector",
"from",
"a",
"file",
"must",
"be",
"HDF5",
"format",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/subspace.py#L380-L415 | train | 203,354 |
eqcorrscan/EQcorrscan | eqcorrscan/core/subspace.py | Detector.plot | def plot(self, stachans='all', size=(10, 7), show=True):
"""
Plot the output basis vectors for the detector at the given dimension.
Corresponds to the first n horizontal vectors of the V matrix.
:type stachans: list
:param stachans: list of tuples of station, channel pairs to plot.
:type stachans: list
:param stachans: List of tuples of (station, channel) to use. Can set\
to 'all' to use all the station-channel pairs available. If \
detector is multiplexed, will just plot that.
:type size: tuple
:param size: Figure size.
:type show: bool
:param show: Whether or not to show the figure.
:returns: Figure
:rtype: matplotlib.pyplot.Figure
.. Note::
See :func:`eqcorrscan.utils.plotting.subspace_detector_plot`
for example.
"""
return subspace_detector_plot(detector=self, stachans=stachans,
size=size, show=show) | python | def plot(self, stachans='all', size=(10, 7), show=True):
"""
Plot the output basis vectors for the detector at the given dimension.
Corresponds to the first n horizontal vectors of the V matrix.
:type stachans: list
:param stachans: list of tuples of station, channel pairs to plot.
:type stachans: list
:param stachans: List of tuples of (station, channel) to use. Can set\
to 'all' to use all the station-channel pairs available. If \
detector is multiplexed, will just plot that.
:type size: tuple
:param size: Figure size.
:type show: bool
:param show: Whether or not to show the figure.
:returns: Figure
:rtype: matplotlib.pyplot.Figure
.. Note::
See :func:`eqcorrscan.utils.plotting.subspace_detector_plot`
for example.
"""
return subspace_detector_plot(detector=self, stachans=stachans,
size=size, show=show) | [
"def",
"plot",
"(",
"self",
",",
"stachans",
"=",
"'all'",
",",
"size",
"=",
"(",
"10",
",",
"7",
")",
",",
"show",
"=",
"True",
")",
":",
"return",
"subspace_detector_plot",
"(",
"detector",
"=",
"self",
",",
"stachans",
"=",
"stachans",
",",
"size"... | Plot the output basis vectors for the detector at the given dimension.
Corresponds to the first n horizontal vectors of the V matrix.
:type stachans: list
:param stachans: list of tuples of station, channel pairs to plot.
:type stachans: list
:param stachans: List of tuples of (station, channel) to use. Can set\
to 'all' to use all the station-channel pairs available. If \
detector is multiplexed, will just plot that.
:type size: tuple
:param size: Figure size.
:type show: bool
:param show: Whether or not to show the figure.
:returns: Figure
:rtype: matplotlib.pyplot.Figure
.. Note::
See :func:`eqcorrscan.utils.plotting.subspace_detector_plot`
for example. | [
"Plot",
"the",
"output",
"basis",
"vectors",
"for",
"the",
"detector",
"at",
"the",
"given",
"dimension",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/subspace.py#L417-L442 | train | 203,355 |
eqcorrscan/EQcorrscan | setup.py | export_symbols | def export_symbols(*path):
"""
Required for windows systems - functions defined in libutils.def.
"""
lines = open(os.path.join(*path), 'r').readlines()[2:]
return [s.strip() for s in lines if s.strip() != ''] | python | def export_symbols(*path):
"""
Required for windows systems - functions defined in libutils.def.
"""
lines = open(os.path.join(*path), 'r').readlines()[2:]
return [s.strip() for s in lines if s.strip() != ''] | [
"def",
"export_symbols",
"(",
"*",
"path",
")",
":",
"lines",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"*",
"path",
")",
",",
"'r'",
")",
".",
"readlines",
"(",
")",
"[",
"2",
":",
"]",
"return",
"[",
"s",
".",
"strip",
"(",
")"... | Required for windows systems - functions defined in libutils.def. | [
"Required",
"for",
"windows",
"systems",
"-",
"functions",
"defined",
"in",
"libutils",
".",
"def",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/setup.py#L181-L186 | train | 203,356 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/mag_calc.py | dist_calc | def dist_calc(loc1, loc2):
"""
Function to calculate the distance in km between two points.
Uses the flat Earth approximation. Better things are available for this,
like `gdal <http://www.gdal.org/>`_.
:type loc1: tuple
:param loc1: Tuple of lat, lon, depth (in decimal degrees and km)
:type loc2: tuple
:param loc2: Tuple of lat, lon, depth (in decimal degrees and km)
:returns: Distance between points in km.
:rtype: float
"""
R = 6371.009 # Radius of the Earth in km
dlat = np.radians(abs(loc1[0] - loc2[0]))
dlong = np.radians(abs(loc1[1] - loc2[1]))
ddepth = abs(loc1[2] - loc2[2])
mean_lat = np.radians((loc1[0] + loc2[0]) / 2)
dist = R * np.sqrt(dlat ** 2 + (np.cos(mean_lat) * dlong) ** 2)
dist = np.sqrt(dist ** 2 + ddepth ** 2)
return dist | python | def dist_calc(loc1, loc2):
"""
Function to calculate the distance in km between two points.
Uses the flat Earth approximation. Better things are available for this,
like `gdal <http://www.gdal.org/>`_.
:type loc1: tuple
:param loc1: Tuple of lat, lon, depth (in decimal degrees and km)
:type loc2: tuple
:param loc2: Tuple of lat, lon, depth (in decimal degrees and km)
:returns: Distance between points in km.
:rtype: float
"""
R = 6371.009 # Radius of the Earth in km
dlat = np.radians(abs(loc1[0] - loc2[0]))
dlong = np.radians(abs(loc1[1] - loc2[1]))
ddepth = abs(loc1[2] - loc2[2])
mean_lat = np.radians((loc1[0] + loc2[0]) / 2)
dist = R * np.sqrt(dlat ** 2 + (np.cos(mean_lat) * dlong) ** 2)
dist = np.sqrt(dist ** 2 + ddepth ** 2)
return dist | [
"def",
"dist_calc",
"(",
"loc1",
",",
"loc2",
")",
":",
"R",
"=",
"6371.009",
"# Radius of the Earth in km",
"dlat",
"=",
"np",
".",
"radians",
"(",
"abs",
"(",
"loc1",
"[",
"0",
"]",
"-",
"loc2",
"[",
"0",
"]",
")",
")",
"dlong",
"=",
"np",
".",
... | Function to calculate the distance in km between two points.
Uses the flat Earth approximation. Better things are available for this,
like `gdal <http://www.gdal.org/>`_.
:type loc1: tuple
:param loc1: Tuple of lat, lon, depth (in decimal degrees and km)
:type loc2: tuple
:param loc2: Tuple of lat, lon, depth (in decimal degrees and km)
:returns: Distance between points in km.
:rtype: float | [
"Function",
"to",
"calculate",
"the",
"distance",
"in",
"km",
"between",
"two",
"points",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/mag_calc.py#L41-L63 | train | 203,357 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/mag_calc.py | calc_max_curv | def calc_max_curv(magnitudes, plotvar=False):
"""
Calculate the magnitude of completeness using the maximum curvature method.
:type magnitudes: list
:param magnitudes:
List of magnitudes from which to compute the maximum curvature which
will give an estimate of the magnitude of completeness given the
assumption of a power-law scaling.
:type plotvar: bool
:param plotvar: Turn plotting on and off
:rtype: float
:return: Magnitude at maximum curvature
.. Note:: Should be used as a guide, often under-estimates Mc.
.. rubric:: Example
>>> import numpy as np
>>> mags = []
>>> for mag in np.arange(2.5,3, 0.1):
... mags.extend([mag] * int(20000 - 10 * mag))
>>> for mag in np.arange(3,7, 0.1):
... mags.extend([mag] * int(10 ** (7 - 1 * mag)))
>>> calc_max_curv(mags, plotvar=False)
3.0
"""
counts = Counter(magnitudes)
df = np.zeros(len(counts))
mag_steps = np.zeros(len(counts))
grad = np.zeros(len(counts) - 1)
grad_points = grad.copy()
for i, magnitude in enumerate(sorted(counts.keys(), reverse=True)):
mag_steps[i] = magnitude
if i > 0:
df[i] = counts[magnitude] + df[i - 1]
else:
df[i] = counts[magnitude]
for i, val in enumerate(df):
if i > 0:
grad[i - 1] = (val - df[i - 1]) / (mag_steps[i] - mag_steps[i - 1])
grad_points[i - 1] = mag_steps[i] - ((mag_steps[i] -
mag_steps[i - 1]) / 2.0)
# Need to find the second order derivative
curvature = np.zeros(len(grad) - 1)
curvature_points = curvature.copy()
for i, _grad in enumerate(grad):
if i > 0:
curvature[i - 1] = (_grad - grad[i - 1]) / (grad_points[i] -
grad_points[i - 1])
curvature_points[i - 1] = grad_points[i] - ((grad_points[i] -
grad_points[i - 1]) /
2.0)
if plotvar:
plt.scatter(mag_steps, df, c='k', label='Magnitude function')
plt.plot(mag_steps, df, c='k')
plt.scatter(grad_points, grad, c='r', label='Gradient')
plt.plot(grad_points, grad, c='r')
plt.scatter(curvature_points, curvature, c='g', label='Curvature')
plt.plot(curvature_points, curvature, c='g')
plt.legend()
plt.show()
return curvature_points[np.argmax(abs(curvature))] | python | def calc_max_curv(magnitudes, plotvar=False):
"""
Calculate the magnitude of completeness using the maximum curvature method.
:type magnitudes: list
:param magnitudes:
List of magnitudes from which to compute the maximum curvature which
will give an estimate of the magnitude of completeness given the
assumption of a power-law scaling.
:type plotvar: bool
:param plotvar: Turn plotting on and off
:rtype: float
:return: Magnitude at maximum curvature
.. Note:: Should be used as a guide, often under-estimates Mc.
.. rubric:: Example
>>> import numpy as np
>>> mags = []
>>> for mag in np.arange(2.5,3, 0.1):
... mags.extend([mag] * int(20000 - 10 * mag))
>>> for mag in np.arange(3,7, 0.1):
... mags.extend([mag] * int(10 ** (7 - 1 * mag)))
>>> calc_max_curv(mags, plotvar=False)
3.0
"""
counts = Counter(magnitudes)
df = np.zeros(len(counts))
mag_steps = np.zeros(len(counts))
grad = np.zeros(len(counts) - 1)
grad_points = grad.copy()
for i, magnitude in enumerate(sorted(counts.keys(), reverse=True)):
mag_steps[i] = magnitude
if i > 0:
df[i] = counts[magnitude] + df[i - 1]
else:
df[i] = counts[magnitude]
for i, val in enumerate(df):
if i > 0:
grad[i - 1] = (val - df[i - 1]) / (mag_steps[i] - mag_steps[i - 1])
grad_points[i - 1] = mag_steps[i] - ((mag_steps[i] -
mag_steps[i - 1]) / 2.0)
# Need to find the second order derivative
curvature = np.zeros(len(grad) - 1)
curvature_points = curvature.copy()
for i, _grad in enumerate(grad):
if i > 0:
curvature[i - 1] = (_grad - grad[i - 1]) / (grad_points[i] -
grad_points[i - 1])
curvature_points[i - 1] = grad_points[i] - ((grad_points[i] -
grad_points[i - 1]) /
2.0)
if plotvar:
plt.scatter(mag_steps, df, c='k', label='Magnitude function')
plt.plot(mag_steps, df, c='k')
plt.scatter(grad_points, grad, c='r', label='Gradient')
plt.plot(grad_points, grad, c='r')
plt.scatter(curvature_points, curvature, c='g', label='Curvature')
plt.plot(curvature_points, curvature, c='g')
plt.legend()
plt.show()
return curvature_points[np.argmax(abs(curvature))] | [
"def",
"calc_max_curv",
"(",
"magnitudes",
",",
"plotvar",
"=",
"False",
")",
":",
"counts",
"=",
"Counter",
"(",
"magnitudes",
")",
"df",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"counts",
")",
")",
"mag_steps",
"=",
"np",
".",
"zeros",
"(",
"len",... | Calculate the magnitude of completeness using the maximum curvature method.
:type magnitudes: list
:param magnitudes:
List of magnitudes from which to compute the maximum curvature which
will give an estimate of the magnitude of completeness given the
assumption of a power-law scaling.
:type plotvar: bool
:param plotvar: Turn plotting on and off
:rtype: float
:return: Magnitude at maximum curvature
.. Note:: Should be used as a guide, often under-estimates Mc.
.. rubric:: Example
>>> import numpy as np
>>> mags = []
>>> for mag in np.arange(2.5,3, 0.1):
... mags.extend([mag] * int(20000 - 10 * mag))
>>> for mag in np.arange(3,7, 0.1):
... mags.extend([mag] * int(10 ** (7 - 1 * mag)))
>>> calc_max_curv(mags, plotvar=False)
3.0 | [
"Calculate",
"the",
"magnitude",
"of",
"completeness",
"using",
"the",
"maximum",
"curvature",
"method",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/mag_calc.py#L66-L129 | train | 203,358 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/mag_calc.py | _sim_WA | def _sim_WA(trace, PAZ, seedresp, water_level, velocity=False):
"""
Remove the instrument response from a trace and simulate a Wood-Anderson.
Returns a de-meaned, de-trended, Wood Anderson simulated trace in
its place.
Works in-place on data and will destroy your original data, copy the
trace before giving it to this function!
:type trace: obspy.core.trace.Trace
:param trace:
A standard obspy trace, generally should be given without
pre-filtering, if given with pre-filtering for use with
amplitude determination for magnitudes you will need to
worry about how you cope with the response of this filter
yourself.
:type PAZ: dict
:param PAZ:
Dictionary containing lists of poles and zeros, the gain and
the sensitivity. If unset will expect seedresp.
:type seedresp: dict
:param seedresp: Seed response information - if unset will expect PAZ.
:type water_level: int
:param water_level: Water level for the simulation.
:type velocity: bool
:param velocity:
Whether to return a velocity trace or not - velocity is non-standard
for Wood-Anderson instruments, but institutes that use seiscomp3 or
Antelope require picks in velocity.
:returns: Trace of Wood-Anderson simulated data
:rtype: :class:`obspy.core.trace.Trace`
"""
# Note Wood anderson sensitivity is 2080 as per Uhrhammer & Collins 1990
PAZ_WA = {'poles': [-6.283 + 4.7124j, -6.283 - 4.7124j],
'zeros': [0 + 0j], 'gain': 1.0, 'sensitivity': 2080}
if velocity:
PAZ_WA['zeros'] = [0 + 0j, 0 + 0j]
# De-trend data
trace.detrend('simple')
# Simulate Wood Anderson
if PAZ:
trace.data = seis_sim(trace.data, trace.stats.sampling_rate,
paz_remove=PAZ, paz_simulate=PAZ_WA,
water_level=water_level,
remove_sensitivity=True)
elif seedresp:
trace.data = seis_sim(trace.data, trace.stats.sampling_rate,
paz_remove=None, paz_simulate=PAZ_WA,
water_level=water_level, seedresp=seedresp)
else:
UserWarning('No response given to remove, will just simulate WA')
trace.data = seis_sim(trace.data, trace.stats.sampling_rate,
paz_remove=None, paz_simulate=PAZ_WA,
water_level=water_level)
return trace | python | def _sim_WA(trace, PAZ, seedresp, water_level, velocity=False):
"""
Remove the instrument response from a trace and simulate a Wood-Anderson.
Returns a de-meaned, de-trended, Wood Anderson simulated trace in
its place.
Works in-place on data and will destroy your original data, copy the
trace before giving it to this function!
:type trace: obspy.core.trace.Trace
:param trace:
A standard obspy trace, generally should be given without
pre-filtering, if given with pre-filtering for use with
amplitude determination for magnitudes you will need to
worry about how you cope with the response of this filter
yourself.
:type PAZ: dict
:param PAZ:
Dictionary containing lists of poles and zeros, the gain and
the sensitivity. If unset will expect seedresp.
:type seedresp: dict
:param seedresp: Seed response information - if unset will expect PAZ.
:type water_level: int
:param water_level: Water level for the simulation.
:type velocity: bool
:param velocity:
Whether to return a velocity trace or not - velocity is non-standard
for Wood-Anderson instruments, but institutes that use seiscomp3 or
Antelope require picks in velocity.
:returns: Trace of Wood-Anderson simulated data
:rtype: :class:`obspy.core.trace.Trace`
"""
# Note Wood anderson sensitivity is 2080 as per Uhrhammer & Collins 1990
PAZ_WA = {'poles': [-6.283 + 4.7124j, -6.283 - 4.7124j],
'zeros': [0 + 0j], 'gain': 1.0, 'sensitivity': 2080}
if velocity:
PAZ_WA['zeros'] = [0 + 0j, 0 + 0j]
# De-trend data
trace.detrend('simple')
# Simulate Wood Anderson
if PAZ:
trace.data = seis_sim(trace.data, trace.stats.sampling_rate,
paz_remove=PAZ, paz_simulate=PAZ_WA,
water_level=water_level,
remove_sensitivity=True)
elif seedresp:
trace.data = seis_sim(trace.data, trace.stats.sampling_rate,
paz_remove=None, paz_simulate=PAZ_WA,
water_level=water_level, seedresp=seedresp)
else:
UserWarning('No response given to remove, will just simulate WA')
trace.data = seis_sim(trace.data, trace.stats.sampling_rate,
paz_remove=None, paz_simulate=PAZ_WA,
water_level=water_level)
return trace | [
"def",
"_sim_WA",
"(",
"trace",
",",
"PAZ",
",",
"seedresp",
",",
"water_level",
",",
"velocity",
"=",
"False",
")",
":",
"# Note Wood anderson sensitivity is 2080 as per Uhrhammer & Collins 1990",
"PAZ_WA",
"=",
"{",
"'poles'",
":",
"[",
"-",
"6.283",
"+",
"4.712... | Remove the instrument response from a trace and simulate a Wood-Anderson.
Returns a de-meaned, de-trended, Wood Anderson simulated trace in
its place.
Works in-place on data and will destroy your original data, copy the
trace before giving it to this function!
:type trace: obspy.core.trace.Trace
:param trace:
A standard obspy trace, generally should be given without
pre-filtering, if given with pre-filtering for use with
amplitude determination for magnitudes you will need to
worry about how you cope with the response of this filter
yourself.
:type PAZ: dict
:param PAZ:
Dictionary containing lists of poles and zeros, the gain and
the sensitivity. If unset will expect seedresp.
:type seedresp: dict
:param seedresp: Seed response information - if unset will expect PAZ.
:type water_level: int
:param water_level: Water level for the simulation.
:type velocity: bool
:param velocity:
Whether to return a velocity trace or not - velocity is non-standard
for Wood-Anderson instruments, but institutes that use seiscomp3 or
Antelope require picks in velocity.
:returns: Trace of Wood-Anderson simulated data
:rtype: :class:`obspy.core.trace.Trace` | [
"Remove",
"the",
"instrument",
"response",
"from",
"a",
"trace",
"and",
"simulate",
"a",
"Wood",
"-",
"Anderson",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/mag_calc.py#L231-L287 | train | 203,359 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/mag_calc.py | _GSE2_PAZ_read | def _GSE2_PAZ_read(gsefile):
"""
Read the instrument response information from a GSE Poles and Zeros file.
Formatted for files generated by the SEISAN program RESP.
Format must be CAL2, not coded for any other format at the moment,
contact the authors to add others in.
:type gsefile: string
:param gsefile: Path to GSE file
:returns: Dictionary of poles, zeros, gain and sensitivity
:rtype: dict
"""
with open(gsefile, 'r') as f:
# First line should start with CAL2
header = f.readline()
if not header[0:4] == 'CAL2':
raise IOError('Unknown format for GSE file, only coded for CAL2')
station = header.split()[1]
channel = header.split()[2]
sensor = header.split()[3]
date = dt.datetime.strptime(header.split()[7], '%Y/%m/%d')
header = f.readline()
if not header[0:4] == 'PAZ2':
raise IOError('Unknown format for GSE file, only coded for PAZ2')
gain = float(header.split()[3]) # Measured in nm/counts
kpoles = int(header.split()[4])
kzeros = int(header.split()[5])
poles = []
for i in range(kpoles):
pole = f.readline()
poles.append(complex(float(pole.split()[0]),
float(pole.split()[1])))
zeros = []
for i in range(kzeros):
zero = f.readline()
zeros.append(complex(float(zero.split()[0]),
float(zero.split()[1])))
# Have Poles and Zeros, but need Gain and Sensitivity
# Gain should be in the DIG2 line:
for line in f:
if line[0:4] == 'DIG2':
sensitivity = float(line.split()[2])
# measured in counts/muVolt
PAZ = {'poles': poles, 'zeros': zeros, 'gain': gain,
'sensitivity': sensitivity}
return PAZ, date, station, channel, sensor | python | def _GSE2_PAZ_read(gsefile):
"""
Read the instrument response information from a GSE Poles and Zeros file.
Formatted for files generated by the SEISAN program RESP.
Format must be CAL2, not coded for any other format at the moment,
contact the authors to add others in.
:type gsefile: string
:param gsefile: Path to GSE file
:returns: Dictionary of poles, zeros, gain and sensitivity
:rtype: dict
"""
with open(gsefile, 'r') as f:
# First line should start with CAL2
header = f.readline()
if not header[0:4] == 'CAL2':
raise IOError('Unknown format for GSE file, only coded for CAL2')
station = header.split()[1]
channel = header.split()[2]
sensor = header.split()[3]
date = dt.datetime.strptime(header.split()[7], '%Y/%m/%d')
header = f.readline()
if not header[0:4] == 'PAZ2':
raise IOError('Unknown format for GSE file, only coded for PAZ2')
gain = float(header.split()[3]) # Measured in nm/counts
kpoles = int(header.split()[4])
kzeros = int(header.split()[5])
poles = []
for i in range(kpoles):
pole = f.readline()
poles.append(complex(float(pole.split()[0]),
float(pole.split()[1])))
zeros = []
for i in range(kzeros):
zero = f.readline()
zeros.append(complex(float(zero.split()[0]),
float(zero.split()[1])))
# Have Poles and Zeros, but need Gain and Sensitivity
# Gain should be in the DIG2 line:
for line in f:
if line[0:4] == 'DIG2':
sensitivity = float(line.split()[2])
# measured in counts/muVolt
PAZ = {'poles': poles, 'zeros': zeros, 'gain': gain,
'sensitivity': sensitivity}
return PAZ, date, station, channel, sensor | [
"def",
"_GSE2_PAZ_read",
"(",
"gsefile",
")",
":",
"with",
"open",
"(",
"gsefile",
",",
"'r'",
")",
"as",
"f",
":",
"# First line should start with CAL2",
"header",
"=",
"f",
".",
"readline",
"(",
")",
"if",
"not",
"header",
"[",
"0",
":",
"4",
"]",
"=... | Read the instrument response information from a GSE Poles and Zeros file.
Formatted for files generated by the SEISAN program RESP.
Format must be CAL2, not coded for any other format at the moment,
contact the authors to add others in.
:type gsefile: string
:param gsefile: Path to GSE file
:returns: Dictionary of poles, zeros, gain and sensitivity
:rtype: dict | [
"Read",
"the",
"instrument",
"response",
"information",
"from",
"a",
"GSE",
"Poles",
"and",
"Zeros",
"file",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/mag_calc.py#L328-L376 | train | 203,360 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/mag_calc.py | _find_resp | def _find_resp(station, channel, network, time, delta, directory):
"""
Helper function to find the response information.
Works for a given station and channel at a given time and return a
dictionary of poles and zeros, gain and sensitivity.
:type station: str
:param station: Station name (as in the response files)
:type channel: str
:param channel: Channel name (as in the response files)
:type network: str
:param network: Network to scan for, can be a wildcard
:type time: datetime.datetime
:param time: Date-time to look for repsonse information
:type delta: float
:param delta: Sample interval in seconds
:type directory: str
:param directory: Directory to scan for response information
:returns: dictionary of response information
:rtype: dict
"""
possible_respfiles = glob.glob(directory + os.path.sep + 'RESP.' +
network + '.' + station +
'.*.' + channel) # GeoNet RESP naming
possible_respfiles += glob.glob(directory + os.path.sep + 'RESP.' +
network + '.' + channel +
'.' + station) # RDseed RESP naming
possible_respfiles += glob.glob(directory + os.path.sep + 'RESP.' +
station + '.' + network)
# WIZARD resp naming
# GSE format, station needs to be 5 characters padded with _, channel is 4
# characters padded with _
station = str(station)
channel = str(channel)
possible_respfiles += glob.glob(directory + os.path.sep +
station.ljust(5, str('_')) +
channel[0:len(channel) - 1].
ljust(3, str('_')) +
channel[-1] + '.*_GSE')
PAZ = []
seedresp = []
for respfile in possible_respfiles:
print('Reading response from: ' + respfile)
if respfile.split(os.path.sep)[-1][0:4] == 'RESP':
# Read from a resp file
seedresp = {'filename': respfile, 'date': UTCDateTime(time),
'units': 'DIS', 'network': network, 'station': station,
'channel': channel, 'location': '*'}
try:
# Attempt to evaluate the response for this information, if not
# then this is not the correct response info!
freq_resp, freqs = evalresp(
delta, 100, seedresp['filename'], seedresp['date'],
units=seedresp['units'], freq=True,
network=seedresp['network'], station=seedresp['station'],
channel=seedresp['channel'])
except:
print('Issues with RESP file')
seedresp = []
continue
elif respfile[-3:] == 'GSE':
PAZ, pazdate, pazstation, pazchannel, pazsensor =\
_GSE2_PAZ_read(respfile)
# check that the date is good!
if pazdate >= time and pazchannel != channel and\
pazstation != station:
print('Issue with GSE file')
print('date: ' + str(pazdate) + ' channel: ' + pazchannel +
' station: ' + pazstation)
PAZ = []
else:
continue
# Check that PAZ are for the correct station, channel and date
if PAZ or seedresp:
break
if PAZ:
return PAZ
elif seedresp:
return seedresp | python | def _find_resp(station, channel, network, time, delta, directory):
"""
Helper function to find the response information.
Works for a given station and channel at a given time and return a
dictionary of poles and zeros, gain and sensitivity.
:type station: str
:param station: Station name (as in the response files)
:type channel: str
:param channel: Channel name (as in the response files)
:type network: str
:param network: Network to scan for, can be a wildcard
:type time: datetime.datetime
:param time: Date-time to look for repsonse information
:type delta: float
:param delta: Sample interval in seconds
:type directory: str
:param directory: Directory to scan for response information
:returns: dictionary of response information
:rtype: dict
"""
possible_respfiles = glob.glob(directory + os.path.sep + 'RESP.' +
network + '.' + station +
'.*.' + channel) # GeoNet RESP naming
possible_respfiles += glob.glob(directory + os.path.sep + 'RESP.' +
network + '.' + channel +
'.' + station) # RDseed RESP naming
possible_respfiles += glob.glob(directory + os.path.sep + 'RESP.' +
station + '.' + network)
# WIZARD resp naming
# GSE format, station needs to be 5 characters padded with _, channel is 4
# characters padded with _
station = str(station)
channel = str(channel)
possible_respfiles += glob.glob(directory + os.path.sep +
station.ljust(5, str('_')) +
channel[0:len(channel) - 1].
ljust(3, str('_')) +
channel[-1] + '.*_GSE')
PAZ = []
seedresp = []
for respfile in possible_respfiles:
print('Reading response from: ' + respfile)
if respfile.split(os.path.sep)[-1][0:4] == 'RESP':
# Read from a resp file
seedresp = {'filename': respfile, 'date': UTCDateTime(time),
'units': 'DIS', 'network': network, 'station': station,
'channel': channel, 'location': '*'}
try:
# Attempt to evaluate the response for this information, if not
# then this is not the correct response info!
freq_resp, freqs = evalresp(
delta, 100, seedresp['filename'], seedresp['date'],
units=seedresp['units'], freq=True,
network=seedresp['network'], station=seedresp['station'],
channel=seedresp['channel'])
except:
print('Issues with RESP file')
seedresp = []
continue
elif respfile[-3:] == 'GSE':
PAZ, pazdate, pazstation, pazchannel, pazsensor =\
_GSE2_PAZ_read(respfile)
# check that the date is good!
if pazdate >= time and pazchannel != channel and\
pazstation != station:
print('Issue with GSE file')
print('date: ' + str(pazdate) + ' channel: ' + pazchannel +
' station: ' + pazstation)
PAZ = []
else:
continue
# Check that PAZ are for the correct station, channel and date
if PAZ or seedresp:
break
if PAZ:
return PAZ
elif seedresp:
return seedresp | [
"def",
"_find_resp",
"(",
"station",
",",
"channel",
",",
"network",
",",
"time",
",",
"delta",
",",
"directory",
")",
":",
"possible_respfiles",
"=",
"glob",
".",
"glob",
"(",
"directory",
"+",
"os",
".",
"path",
".",
"sep",
"+",
"'RESP.'",
"+",
"netw... | Helper function to find the response information.
Works for a given station and channel at a given time and return a
dictionary of poles and zeros, gain and sensitivity.
:type station: str
:param station: Station name (as in the response files)
:type channel: str
:param channel: Channel name (as in the response files)
:type network: str
:param network: Network to scan for, can be a wildcard
:type time: datetime.datetime
:param time: Date-time to look for repsonse information
:type delta: float
:param delta: Sample interval in seconds
:type directory: str
:param directory: Directory to scan for response information
:returns: dictionary of response information
:rtype: dict | [
"Helper",
"function",
"to",
"find",
"the",
"response",
"information",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/mag_calc.py#L379-L459 | train | 203,361 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/mag_calc.py | _pairwise | def _pairwise(iterable):
"""
Wrapper on itertools for SVD_magnitude.
"""
a, b = itertools.tee(iterable)
next(b, None)
if sys.version_info.major == 2:
return itertools.izip(a, b)
else:
return zip(a, b) | python | def _pairwise(iterable):
"""
Wrapper on itertools for SVD_magnitude.
"""
a, b = itertools.tee(iterable)
next(b, None)
if sys.version_info.major == 2:
return itertools.izip(a, b)
else:
return zip(a, b) | [
"def",
"_pairwise",
"(",
"iterable",
")",
":",
"a",
",",
"b",
"=",
"itertools",
".",
"tee",
"(",
"iterable",
")",
"next",
"(",
"b",
",",
"None",
")",
"if",
"sys",
".",
"version_info",
".",
"major",
"==",
"2",
":",
"return",
"itertools",
".",
"izip"... | Wrapper on itertools for SVD_magnitude. | [
"Wrapper",
"on",
"itertools",
"for",
"SVD_magnitude",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/mag_calc.py#L462-L471 | train | 203,362 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/catalog_utils.py | filter_picks | def filter_picks(catalog, stations=None, channels=None, networks=None,
locations=None, top_n_picks=None, evaluation_mode='all'):
"""
Filter events in the catalog based on a number of parameters.
:param catalog: Catalog to filter.
:type catalog: obspy.core.event.Catalog
:param stations: List for stations to keep picks from.
:type stations: list
:param channels: List of channels to keep picks from.
:type channels: list
:param networks: List of networks to keep picks from.
:type networks: list
:param locations: List of location codes to use
:type locations: list
:param top_n_picks: Filter only the top N most used station-channel pairs.
:type top_n_picks: int
:param evaluation_mode:
To select only manual or automatic picks, or use all (default).
:type evaluation_mode: str
:return:
Filtered Catalog - if events are left with no picks, they are removed
from the catalog.
:rtype: obspy.core.event.Catalog
.. note::
Will filter first by station, then by channel, then by network, if
using top_n_picks, this will be done last, after the other filters
have been applied.
.. note::
Doesn't work in place on the catalog, your input catalog will be safe
unless you overwrite it.
.. note:: Doesn't expand wildcard characters.
.. rubric:: Example
>>> from obspy.clients.fdsn import Client
>>> from eqcorrscan.utils.catalog_utils import filter_picks
>>> from obspy import UTCDateTime
>>> client = Client('NCEDC')
>>> t1 = UTCDateTime(2004, 9, 28)
>>> t2 = t1 + 86400
>>> catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3,
... minlatitude=35.7, maxlatitude=36.1,
... minlongitude=-120.6, maxlongitude=-120.2,
... includearrivals=True)
>>> print(len(catalog))
12
>>> filtered_catalog = filter_picks(catalog, stations=['BMS', 'BAP',
... 'PAG', 'PAN',
... 'PBI', 'PKY',
... 'YEG', 'WOF'])
>>> print(len(filtered_catalog))
12
>>> stations = []
>>> for event in filtered_catalog:
... for pick in event.picks:
... stations.append(pick.waveform_id.station_code)
>>> print(sorted(list(set(stations))))
['BAP', 'BMS', 'PAG', 'PAN', 'PBI', 'PKY', 'WOF', 'YEG']
"""
# Don't work in place on the catalog
filtered_catalog = catalog.copy()
if stations:
for event in filtered_catalog:
if len(event.picks) == 0:
continue
event.picks = [pick for pick in event.picks
if pick.waveform_id.station_code in stations]
if channels:
for event in filtered_catalog:
if len(event.picks) == 0:
continue
event.picks = [pick for pick in event.picks
if pick.waveform_id.channel_code in channels]
if networks:
for event in filtered_catalog:
if len(event.picks) == 0:
continue
event.picks = [pick for pick in event.picks
if pick.waveform_id.network_code in networks]
if locations:
for event in filtered_catalog:
if len(event.picks) == 0:
continue
event.picks = [pick for pick in event.picks
if pick.waveform_id.location_code in locations]
if evaluation_mode == 'manual':
for event in filtered_catalog:
event.picks = [pick for pick in event.picks
if pick.evaluation_mode == 'manual']
elif evaluation_mode == 'automatic':
for event in filtered_catalog:
event.picks = [pick for pick in event.picks
if pick.evaluation_mode == 'automatic']
elif evaluation_mode != 'all':
warnings.warn('Unrecognised evaluation_mode: %s, using all picks' %
evaluation_mode)
if top_n_picks:
all_picks = []
for event in filtered_catalog:
all_picks += [(pick.waveform_id.station_code,
pick.waveform_id.channel_code)
for pick in event.picks]
counted = Counter(all_picks).most_common()
all_picks = []
# Hack around sorting the counter object: Py 2 does it differently to 3
for i in range(counted[0][1]):
highest = [item[0] for item in counted
if item[1] >= counted[0][1] - i]
# Sort them by alphabetical order in station
highest = sorted(highest, key=lambda tup: tup[0])
for stachan in highest:
if stachan not in all_picks:
all_picks.append(stachan)
if len(all_picks) > top_n_picks:
all_picks = all_picks[0:top_n_picks]
break
for event in filtered_catalog:
if len(event.picks) == 0:
continue
event.picks = [pick for pick in event.picks
if (pick.waveform_id.station_code,
pick.waveform_id.channel_code) in all_picks]
# Remove events without picks
tmp_catalog = Catalog()
for event in filtered_catalog:
if len(event.picks) > 0:
tmp_catalog.append(event)
return tmp_catalog | python | def filter_picks(catalog, stations=None, channels=None, networks=None,
locations=None, top_n_picks=None, evaluation_mode='all'):
"""
Filter events in the catalog based on a number of parameters.
:param catalog: Catalog to filter.
:type catalog: obspy.core.event.Catalog
:param stations: List for stations to keep picks from.
:type stations: list
:param channels: List of channels to keep picks from.
:type channels: list
:param networks: List of networks to keep picks from.
:type networks: list
:param locations: List of location codes to use
:type locations: list
:param top_n_picks: Filter only the top N most used station-channel pairs.
:type top_n_picks: int
:param evaluation_mode:
To select only manual or automatic picks, or use all (default).
:type evaluation_mode: str
:return:
Filtered Catalog - if events are left with no picks, they are removed
from the catalog.
:rtype: obspy.core.event.Catalog
.. note::
Will filter first by station, then by channel, then by network, if
using top_n_picks, this will be done last, after the other filters
have been applied.
.. note::
Doesn't work in place on the catalog, your input catalog will be safe
unless you overwrite it.
.. note:: Doesn't expand wildcard characters.
.. rubric:: Example
>>> from obspy.clients.fdsn import Client
>>> from eqcorrscan.utils.catalog_utils import filter_picks
>>> from obspy import UTCDateTime
>>> client = Client('NCEDC')
>>> t1 = UTCDateTime(2004, 9, 28)
>>> t2 = t1 + 86400
>>> catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3,
... minlatitude=35.7, maxlatitude=36.1,
... minlongitude=-120.6, maxlongitude=-120.2,
... includearrivals=True)
>>> print(len(catalog))
12
>>> filtered_catalog = filter_picks(catalog, stations=['BMS', 'BAP',
... 'PAG', 'PAN',
... 'PBI', 'PKY',
... 'YEG', 'WOF'])
>>> print(len(filtered_catalog))
12
>>> stations = []
>>> for event in filtered_catalog:
... for pick in event.picks:
... stations.append(pick.waveform_id.station_code)
>>> print(sorted(list(set(stations))))
['BAP', 'BMS', 'PAG', 'PAN', 'PBI', 'PKY', 'WOF', 'YEG']
"""
# Don't work in place on the catalog
filtered_catalog = catalog.copy()
if stations:
for event in filtered_catalog:
if len(event.picks) == 0:
continue
event.picks = [pick for pick in event.picks
if pick.waveform_id.station_code in stations]
if channels:
for event in filtered_catalog:
if len(event.picks) == 0:
continue
event.picks = [pick for pick in event.picks
if pick.waveform_id.channel_code in channels]
if networks:
for event in filtered_catalog:
if len(event.picks) == 0:
continue
event.picks = [pick for pick in event.picks
if pick.waveform_id.network_code in networks]
if locations:
for event in filtered_catalog:
if len(event.picks) == 0:
continue
event.picks = [pick for pick in event.picks
if pick.waveform_id.location_code in locations]
if evaluation_mode == 'manual':
for event in filtered_catalog:
event.picks = [pick for pick in event.picks
if pick.evaluation_mode == 'manual']
elif evaluation_mode == 'automatic':
for event in filtered_catalog:
event.picks = [pick for pick in event.picks
if pick.evaluation_mode == 'automatic']
elif evaluation_mode != 'all':
warnings.warn('Unrecognised evaluation_mode: %s, using all picks' %
evaluation_mode)
if top_n_picks:
all_picks = []
for event in filtered_catalog:
all_picks += [(pick.waveform_id.station_code,
pick.waveform_id.channel_code)
for pick in event.picks]
counted = Counter(all_picks).most_common()
all_picks = []
# Hack around sorting the counter object: Py 2 does it differently to 3
for i in range(counted[0][1]):
highest = [item[0] for item in counted
if item[1] >= counted[0][1] - i]
# Sort them by alphabetical order in station
highest = sorted(highest, key=lambda tup: tup[0])
for stachan in highest:
if stachan not in all_picks:
all_picks.append(stachan)
if len(all_picks) > top_n_picks:
all_picks = all_picks[0:top_n_picks]
break
for event in filtered_catalog:
if len(event.picks) == 0:
continue
event.picks = [pick for pick in event.picks
if (pick.waveform_id.station_code,
pick.waveform_id.channel_code) in all_picks]
# Remove events without picks
tmp_catalog = Catalog()
for event in filtered_catalog:
if len(event.picks) > 0:
tmp_catalog.append(event)
return tmp_catalog | [
"def",
"filter_picks",
"(",
"catalog",
",",
"stations",
"=",
"None",
",",
"channels",
"=",
"None",
",",
"networks",
"=",
"None",
",",
"locations",
"=",
"None",
",",
"top_n_picks",
"=",
"None",
",",
"evaluation_mode",
"=",
"'all'",
")",
":",
"# Don't work i... | Filter events in the catalog based on a number of parameters.
:param catalog: Catalog to filter.
:type catalog: obspy.core.event.Catalog
:param stations: List for stations to keep picks from.
:type stations: list
:param channels: List of channels to keep picks from.
:type channels: list
:param networks: List of networks to keep picks from.
:type networks: list
:param locations: List of location codes to use
:type locations: list
:param top_n_picks: Filter only the top N most used station-channel pairs.
:type top_n_picks: int
:param evaluation_mode:
To select only manual or automatic picks, or use all (default).
:type evaluation_mode: str
:return:
Filtered Catalog - if events are left with no picks, they are removed
from the catalog.
:rtype: obspy.core.event.Catalog
.. note::
Will filter first by station, then by channel, then by network, if
using top_n_picks, this will be done last, after the other filters
have been applied.
.. note::
Doesn't work in place on the catalog, your input catalog will be safe
unless you overwrite it.
.. note:: Doesn't expand wildcard characters.
.. rubric:: Example
>>> from obspy.clients.fdsn import Client
>>> from eqcorrscan.utils.catalog_utils import filter_picks
>>> from obspy import UTCDateTime
>>> client = Client('NCEDC')
>>> t1 = UTCDateTime(2004, 9, 28)
>>> t2 = t1 + 86400
>>> catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3,
... minlatitude=35.7, maxlatitude=36.1,
... minlongitude=-120.6, maxlongitude=-120.2,
... includearrivals=True)
>>> print(len(catalog))
12
>>> filtered_catalog = filter_picks(catalog, stations=['BMS', 'BAP',
... 'PAG', 'PAN',
... 'PBI', 'PKY',
... 'YEG', 'WOF'])
>>> print(len(filtered_catalog))
12
>>> stations = []
>>> for event in filtered_catalog:
... for pick in event.picks:
... stations.append(pick.waveform_id.station_code)
>>> print(sorted(list(set(stations))))
['BAP', 'BMS', 'PAG', 'PAN', 'PBI', 'PKY', 'WOF', 'YEG'] | [
"Filter",
"events",
"in",
"the",
"catalog",
"based",
"on",
"a",
"number",
"of",
"parameters",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/catalog_utils.py#L28-L163 | train | 203,363 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/catalog_utils.py | spatial_clip | def spatial_clip(catalog, corners, mindepth=None, maxdepth=None):
"""
Clip the catalog to a spatial box, can be irregular.
Can only be irregular in 2D, depth must be between bounds.
:type catalog: :class:`obspy.core.catalog.Catalog`
:param catalog: Catalog to clip.
:type corners: :class:`matplotlib.path.Path`
:param corners: Corners to clip the catalog to
:type mindepth: float
:param mindepth: Minimum depth for earthquakes in km.
:type maxdepth: float
:param maxdepth: Maximum depth for earthquakes in km.
.. Note::
Corners is expected to be a :class:`matplotlib.path.Path` in the form
of tuples of (lat, lon) in decimal degrees.
"""
cat_out = catalog.copy()
if mindepth is not None:
for event in cat_out:
try:
origin = _get_origin(event)
except IOError:
continue
if origin.depth < mindepth * 1000:
cat_out.events.remove(event)
if maxdepth is not None:
for event in cat_out:
try:
origin = _get_origin(event)
except IOError:
continue
if origin.depth > maxdepth * 1000:
cat_out.events.remove(event)
for event in cat_out:
try:
origin = _get_origin(event)
except IOError:
continue
if not corners.contains_point((origin.latitude, origin.longitude)):
cat_out.events.remove(event)
return cat_out | python | def spatial_clip(catalog, corners, mindepth=None, maxdepth=None):
"""
Clip the catalog to a spatial box, can be irregular.
Can only be irregular in 2D, depth must be between bounds.
:type catalog: :class:`obspy.core.catalog.Catalog`
:param catalog: Catalog to clip.
:type corners: :class:`matplotlib.path.Path`
:param corners: Corners to clip the catalog to
:type mindepth: float
:param mindepth: Minimum depth for earthquakes in km.
:type maxdepth: float
:param maxdepth: Maximum depth for earthquakes in km.
.. Note::
Corners is expected to be a :class:`matplotlib.path.Path` in the form
of tuples of (lat, lon) in decimal degrees.
"""
cat_out = catalog.copy()
if mindepth is not None:
for event in cat_out:
try:
origin = _get_origin(event)
except IOError:
continue
if origin.depth < mindepth * 1000:
cat_out.events.remove(event)
if maxdepth is not None:
for event in cat_out:
try:
origin = _get_origin(event)
except IOError:
continue
if origin.depth > maxdepth * 1000:
cat_out.events.remove(event)
for event in cat_out:
try:
origin = _get_origin(event)
except IOError:
continue
if not corners.contains_point((origin.latitude, origin.longitude)):
cat_out.events.remove(event)
return cat_out | [
"def",
"spatial_clip",
"(",
"catalog",
",",
"corners",
",",
"mindepth",
"=",
"None",
",",
"maxdepth",
"=",
"None",
")",
":",
"cat_out",
"=",
"catalog",
".",
"copy",
"(",
")",
"if",
"mindepth",
"is",
"not",
"None",
":",
"for",
"event",
"in",
"cat_out",
... | Clip the catalog to a spatial box, can be irregular.
Can only be irregular in 2D, depth must be between bounds.
:type catalog: :class:`obspy.core.catalog.Catalog`
:param catalog: Catalog to clip.
:type corners: :class:`matplotlib.path.Path`
:param corners: Corners to clip the catalog to
:type mindepth: float
:param mindepth: Minimum depth for earthquakes in km.
:type maxdepth: float
:param maxdepth: Maximum depth for earthquakes in km.
.. Note::
Corners is expected to be a :class:`matplotlib.path.Path` in the form
of tuples of (lat, lon) in decimal degrees. | [
"Clip",
"the",
"catalog",
"to",
"a",
"spatial",
"box",
"can",
"be",
"irregular",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/catalog_utils.py#L166-L209 | train | 203,364 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/catalog_utils.py | _get_origin | def _get_origin(event):
"""
Get the origin of an event.
:type event: :class:`obspy.core.event.Event`
:param event: Event to get the origin of.
:return: :class:`obspy.core.event.Origin`
"""
if event.preferred_origin() is not None:
origin = event.preferred_origin()
elif len(event.origins) > 0:
origin = event.origins[0]
else:
raise IndexError('No origin set, cannot constrain')
return origin | python | def _get_origin(event):
"""
Get the origin of an event.
:type event: :class:`obspy.core.event.Event`
:param event: Event to get the origin of.
:return: :class:`obspy.core.event.Origin`
"""
if event.preferred_origin() is not None:
origin = event.preferred_origin()
elif len(event.origins) > 0:
origin = event.origins[0]
else:
raise IndexError('No origin set, cannot constrain')
return origin | [
"def",
"_get_origin",
"(",
"event",
")",
":",
"if",
"event",
".",
"preferred_origin",
"(",
")",
"is",
"not",
"None",
":",
"origin",
"=",
"event",
".",
"preferred_origin",
"(",
")",
"elif",
"len",
"(",
"event",
".",
"origins",
")",
">",
"0",
":",
"ori... | Get the origin of an event.
:type event: :class:`obspy.core.event.Event`
:param event: Event to get the origin of.
:return: :class:`obspy.core.event.Origin` | [
"Get",
"the",
"origin",
"of",
"an",
"event",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/catalog_utils.py#L212-L226 | train | 203,365 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/parameters.py | read_parameters | def read_parameters(infile='../parameters/EQcorrscan_parameters.txt'):
"""
Read the default parameters from file.
:type infile: str
:param infile: Full path to parameter file.
:returns: parameters read from file.
:rtype: :class:`eqcorrscan.utils.parameters.EQcorrscanParameters`
"""
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import ast
f = open(infile, 'r')
print('Reading parameters with the following header:')
for line in f:
if line[0] == '#':
print(line.rstrip('\n').lstrip('\n'))
f.close()
config = ConfigParser.ConfigParser()
config.read(infile)
# Slightly tricky list reading
template_names = list(ast.literal_eval(config.get("eqcorrscan_pars",
"template_names")))
parameters = \
EQcorrscanParameters(template_names=template_names,
lowcut=config.get("eqcorrscan_pars", "lowcut"),
highcut=config.get("eqcorrscan_pars", "highcut"),
filt_order=config.get("eqcorrscan_pars",
"filt_order"),
samp_rate=config.get("eqcorrscan_pars",
"samp_rate"),
debug=config.get("eqcorrscan_pars", "debug"),
startdate=config.get("eqcorrscan_pars",
"startdate"),
enddate=config.get("eqcorrscan_pars", "enddate"),
archive=config.get("eqcorrscan_pars", "archive"),
arc_type=config.get("eqcorrscan_pars",
"arc_type"),
cores=config.get("eqcorrscan_pars", "cores"),
plotvar=config.getboolean("eqcorrscan_pars",
"plotvar"),
plotdir=config.get("eqcorrscan_pars", "plotdir"),
plot_format=config.get("eqcorrscan_pars",
"plot_format"),
tempdir=ast.literal_eval(config.
get("eqcorrscan_pars",
"tempdir")),
threshold=config.get("eqcorrscan_pars",
"threshold"),
threshold_type=config.get("eqcorrscan_pars",
"threshold_type"),
trigger_interval=config.get("eqcorrscan_pars",
"trigger_interval")
)
return parameters | python | def read_parameters(infile='../parameters/EQcorrscan_parameters.txt'):
"""
Read the default parameters from file.
:type infile: str
:param infile: Full path to parameter file.
:returns: parameters read from file.
:rtype: :class:`eqcorrscan.utils.parameters.EQcorrscanParameters`
"""
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import ast
f = open(infile, 'r')
print('Reading parameters with the following header:')
for line in f:
if line[0] == '#':
print(line.rstrip('\n').lstrip('\n'))
f.close()
config = ConfigParser.ConfigParser()
config.read(infile)
# Slightly tricky list reading
template_names = list(ast.literal_eval(config.get("eqcorrscan_pars",
"template_names")))
parameters = \
EQcorrscanParameters(template_names=template_names,
lowcut=config.get("eqcorrscan_pars", "lowcut"),
highcut=config.get("eqcorrscan_pars", "highcut"),
filt_order=config.get("eqcorrscan_pars",
"filt_order"),
samp_rate=config.get("eqcorrscan_pars",
"samp_rate"),
debug=config.get("eqcorrscan_pars", "debug"),
startdate=config.get("eqcorrscan_pars",
"startdate"),
enddate=config.get("eqcorrscan_pars", "enddate"),
archive=config.get("eqcorrscan_pars", "archive"),
arc_type=config.get("eqcorrscan_pars",
"arc_type"),
cores=config.get("eqcorrscan_pars", "cores"),
plotvar=config.getboolean("eqcorrscan_pars",
"plotvar"),
plotdir=config.get("eqcorrscan_pars", "plotdir"),
plot_format=config.get("eqcorrscan_pars",
"plot_format"),
tempdir=ast.literal_eval(config.
get("eqcorrscan_pars",
"tempdir")),
threshold=config.get("eqcorrscan_pars",
"threshold"),
threshold_type=config.get("eqcorrscan_pars",
"threshold_type"),
trigger_interval=config.get("eqcorrscan_pars",
"trigger_interval")
)
return parameters | [
"def",
"read_parameters",
"(",
"infile",
"=",
"'../parameters/EQcorrscan_parameters.txt'",
")",
":",
"try",
":",
"import",
"ConfigParser",
"except",
"ImportError",
":",
"import",
"configparser",
"as",
"ConfigParser",
"import",
"ast",
"f",
"=",
"open",
"(",
"infile",... | Read the default parameters from file.
:type infile: str
:param infile: Full path to parameter file.
:returns: parameters read from file.
:rtype: :class:`eqcorrscan.utils.parameters.EQcorrscanParameters` | [
"Read",
"the",
"default",
"parameters",
"from",
"file",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/parameters.py#L195-L253 | train | 203,366 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/parameters.py | EQcorrscanParameters.write | def write(self, outfile='../parameters/EQcorrscan_parameters.txt',
overwrite=False):
"""
Function to write the parameters to a file - user readable.
:type outfile: str
:param outfile: Full path to filename to store parameters in.
:type overwrite: bool
:param overwrite: Whether to overwrite the old file or not.
"""
outpath = os.sep.join(outfile.split(os.sep)[0:-1])
if len(outpath) > 0 and not os.path.isdir(outpath):
msg = ' '.join([os.path.join(outfile.split(os.sep)[0:-1]),
'does not exist, check path.'])
raise IOError(msg)
# Make sure that the user wants to overwrite the old parameters
if os.path.isfile(outfile) and not overwrite:
responding = True
while responding:
print(' '.join([outfile, 'exists. Overwrite? [y/N]']))
option = raw_input()
if option.upper() == 'N':
raise IOError('File exists, will not overwrite')
elif option.upper() == 'Y':
responding = False
else:
print('Must respond with y or n')
f = open(outfile, 'w')
# Write creation info.
header = ' '.join(['# User:', getpass.getuser(),
'\n# Creation date:', str(UTCDateTime()),
'\n# EQcorrscan version:',
str(eqcorrscan.__version__),
'\n\n\n'])
f.write(header)
# Write parameter info in a user readable, and parsable format.
parameters = self.__str__().split('\n')[1:]
f.write('[eqcorrscan_pars]\n')
for parameter in parameters:
f.write(parameter.lstrip() + '\n')
f.close()
print('Written parameter file: ' + outfile) | python | def write(self, outfile='../parameters/EQcorrscan_parameters.txt',
overwrite=False):
"""
Function to write the parameters to a file - user readable.
:type outfile: str
:param outfile: Full path to filename to store parameters in.
:type overwrite: bool
:param overwrite: Whether to overwrite the old file or not.
"""
outpath = os.sep.join(outfile.split(os.sep)[0:-1])
if len(outpath) > 0 and not os.path.isdir(outpath):
msg = ' '.join([os.path.join(outfile.split(os.sep)[0:-1]),
'does not exist, check path.'])
raise IOError(msg)
# Make sure that the user wants to overwrite the old parameters
if os.path.isfile(outfile) and not overwrite:
responding = True
while responding:
print(' '.join([outfile, 'exists. Overwrite? [y/N]']))
option = raw_input()
if option.upper() == 'N':
raise IOError('File exists, will not overwrite')
elif option.upper() == 'Y':
responding = False
else:
print('Must respond with y or n')
f = open(outfile, 'w')
# Write creation info.
header = ' '.join(['# User:', getpass.getuser(),
'\n# Creation date:', str(UTCDateTime()),
'\n# EQcorrscan version:',
str(eqcorrscan.__version__),
'\n\n\n'])
f.write(header)
# Write parameter info in a user readable, and parsable format.
parameters = self.__str__().split('\n')[1:]
f.write('[eqcorrscan_pars]\n')
for parameter in parameters:
f.write(parameter.lstrip() + '\n')
f.close()
print('Written parameter file: ' + outfile) | [
"def",
"write",
"(",
"self",
",",
"outfile",
"=",
"'../parameters/EQcorrscan_parameters.txt'",
",",
"overwrite",
"=",
"False",
")",
":",
"outpath",
"=",
"os",
".",
"sep",
".",
"join",
"(",
"outfile",
".",
"split",
"(",
"os",
".",
"sep",
")",
"[",
"0",
... | Function to write the parameters to a file - user readable.
:type outfile: str
:param outfile: Full path to filename to store parameters in.
:type overwrite: bool
:param overwrite: Whether to overwrite the old file or not. | [
"Function",
"to",
"write",
"the",
"parameters",
"to",
"a",
"file",
"-",
"user",
"readable",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/parameters.py#L151-L192 | train | 203,367 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/pre_processing.py | _check_daylong | def _check_daylong(tr):
"""
Check the data quality of the daylong file.
Check to see that the day isn't just zeros, with large steps, if it is
then the resampling will hate it.
:type tr: obspy.core.trace.Trace
:param tr: Trace to check if the data are daylong.
:return quality (simply good or bad)
:rtype: bool
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.pre_processing import _check_daylong
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/' +
... '2013-09-01-0410-35.DFDPC_024_00')
>>> _check_daylong(st[0])
True
"""
if len(np.nonzero(tr.data)[0]) < 0.5 * len(tr.data):
qual = False
else:
qual = True
return qual | python | def _check_daylong(tr):
"""
Check the data quality of the daylong file.
Check to see that the day isn't just zeros, with large steps, if it is
then the resampling will hate it.
:type tr: obspy.core.trace.Trace
:param tr: Trace to check if the data are daylong.
:return quality (simply good or bad)
:rtype: bool
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.pre_processing import _check_daylong
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/' +
... '2013-09-01-0410-35.DFDPC_024_00')
>>> _check_daylong(st[0])
True
"""
if len(np.nonzero(tr.data)[0]) < 0.5 * len(tr.data):
qual = False
else:
qual = True
return qual | [
"def",
"_check_daylong",
"(",
"tr",
")",
":",
"if",
"len",
"(",
"np",
".",
"nonzero",
"(",
"tr",
".",
"data",
")",
"[",
"0",
"]",
")",
"<",
"0.5",
"*",
"len",
"(",
"tr",
".",
"data",
")",
":",
"qual",
"=",
"False",
"else",
":",
"qual",
"=",
... | Check the data quality of the daylong file.
Check to see that the day isn't just zeros, with large steps, if it is
then the resampling will hate it.
:type tr: obspy.core.trace.Trace
:param tr: Trace to check if the data are daylong.
:return quality (simply good or bad)
:rtype: bool
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.pre_processing import _check_daylong
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/' +
... '2013-09-01-0410-35.DFDPC_024_00')
>>> _check_daylong(st[0])
True | [
"Check",
"the",
"data",
"quality",
"of",
"the",
"daylong",
"file",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/pre_processing.py#L27-L57 | train | 203,368 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/pre_processing.py | shortproc | def shortproc(st, lowcut, highcut, filt_order, samp_rate, debug=0,
parallel=False, num_cores=False, starttime=None, endtime=None,
seisan_chan_names=False, fill_gaps=True):
"""
Basic function to bandpass and downsample.
Works in place on data. This is employed to ensure all parts of the
data are processed in the same way.
:type st: obspy.core.stream.Stream
:param st: Stream to process
:type lowcut: float
:param lowcut: Low cut for bandpass in Hz
:type highcut: float
:param highcut: High cut for bandpass in Hz
:type filt_order: int
:param filt_order: Number of corners for bandpass filter
:type samp_rate: float
:param samp_rate: Sampling rate desired in Hz
:type debug: int
:param debug: Debug flag from 0-5, higher numbers = more output
:type parallel: bool
:param parallel:
Set to True to process traces in parallel, for small numbers of traces
this is often slower than serial processing, defaults to False
:type num_cores: int
:param num_cores:
Control the number of cores for parallel processing, if set to False
then this will use all the cores available.
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime:
Desired data start time, will trim to this before processing
:type endtime: obspy.core.utcdatetime.UTCDateTime
:param endtime:
Desired data end time, will trim to this before processing
:type seisan_chan_names: bool
:param seisan_chan_names:
Whether channels are named like seisan channels (which are two letters
rather than SEED convention of three) - defaults to True.
:type fill_gaps: bool
:param fill_gaps: Whether to pad any gaps found with zeros or not.
:return: Processed stream
:rtype: :class:`obspy.core.stream.Stream`
.. note::
If your data contain gaps you should *NOT* fill those gaps before
using the pre-process functions. The pre-process functions will fill
the gaps internally prior to processing, process the data, then re-fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps. If your data have gaps you should pass a merged
stream without the `fill_value` argument (e.g.: `st = st.merge()`).
.. warning::
If you intend to use this for processing templates you should consider
how resampling will impact your cross-correlations. Minor differences
in resampling between day-long files (which you are likely to use for
continuous detection) and shorter files will reduce your
cross-correlations!
.. rubric:: Example, bandpass
>>> from obspy import read
>>> from eqcorrscan.utils.pre_processing import shortproc
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/2013-09-01-0410-35.DFDPC_024_00')
>>> st = shortproc(st=st, lowcut=2, highcut=9, filt_order=3, samp_rate=20,
... debug=0, parallel=True, num_cores=2)
>>> print(st[0])
AF.LABE..SHZ | 2013-09-01T04:10:35.700000Z - 2013-09-01T04:12:05.650000Z \
| 20.0 Hz, 1800 samples
.. rubric:: Example, low-pass
>>> from obspy import read
>>> from eqcorrscan.utils.pre_processing import shortproc
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/2013-09-01-0410-35.DFDPC_024_00')
>>> st = shortproc(st=st, lowcut=None, highcut=9, filt_order=3,
... samp_rate=20, debug=0)
>>> print(st[0])
AF.LABE..SHZ | 2013-09-01T04:10:35.700000Z - 2013-09-01T04:12:05.650000Z \
| 20.0 Hz, 1800 samples
.. rubric:: Example, high-pass
>>> from obspy import read
>>> from eqcorrscan.utils.pre_processing import shortproc
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/2013-09-01-0410-35.DFDPC_024_00')
>>> st = shortproc(st=st, lowcut=2, highcut=None, filt_order=3,
... samp_rate=20, debug=0)
>>> print(st[0])
AF.LABE..SHZ | 2013-09-01T04:10:35.700000Z - 2013-09-01T04:12:05.650000Z \
| 20.0 Hz, 1800 samples
"""
if isinstance(st, Trace):
tracein = True
st = Stream(st)
else:
tracein = False
# Add sanity check for filter
if highcut and highcut >= 0.5 * samp_rate:
raise IOError('Highcut must be lower than the nyquist')
if debug > 4:
parallel = False
length = None
clip = False
if starttime is not None and endtime is not None:
for tr in st:
tr.trim(starttime, endtime)
if len(tr.data) == ((endtime - starttime) *
tr.stats.sampling_rate) + 1:
tr.data = tr.data[1:len(tr.data)]
length = endtime - starttime
clip = True
elif starttime:
for tr in st:
tr.trim(starttime=starttime)
elif endtime:
for tr in st:
tr.trim(endtime=endtime)
for tr in st:
if len(tr.data) == 0:
st.remove(tr)
debug_print('No data for %s.%s after trim' %
(tr.stats.station, tr.stats.channel), 1, debug)
if parallel:
if not num_cores:
num_cores = cpu_count()
if num_cores > len(st):
num_cores = len(st)
pool = Pool(processes=num_cores)
results = [pool.apply_async(process, (tr,), {
'lowcut': lowcut, 'highcut': highcut, 'filt_order': filt_order,
'samp_rate': samp_rate, 'debug': debug, 'starttime': starttime,
'clip': clip, 'seisan_chan_names': seisan_chan_names,
'fill_gaps': fill_gaps, 'length': length})
for tr in st]
pool.close()
try:
stream_list = [p.get() for p in results]
except KeyboardInterrupt as e: # pragma: no cover
pool.terminate()
raise e
pool.join()
st = Stream(stream_list)
else:
for i, tr in enumerate(st):
st[i] = process(
tr=tr, lowcut=lowcut, highcut=highcut, filt_order=filt_order,
samp_rate=samp_rate, debug=debug, starttime=starttime,
clip=clip, seisan_chan_names=seisan_chan_names,
fill_gaps=fill_gaps, length=length)
if tracein:
st.merge()
return st[0]
return st | python | def shortproc(st, lowcut, highcut, filt_order, samp_rate, debug=0,
parallel=False, num_cores=False, starttime=None, endtime=None,
seisan_chan_names=False, fill_gaps=True):
"""
Basic function to bandpass and downsample.
Works in place on data. This is employed to ensure all parts of the
data are processed in the same way.
:type st: obspy.core.stream.Stream
:param st: Stream to process
:type lowcut: float
:param lowcut: Low cut for bandpass in Hz
:type highcut: float
:param highcut: High cut for bandpass in Hz
:type filt_order: int
:param filt_order: Number of corners for bandpass filter
:type samp_rate: float
:param samp_rate: Sampling rate desired in Hz
:type debug: int
:param debug: Debug flag from 0-5, higher numbers = more output
:type parallel: bool
:param parallel:
Set to True to process traces in parallel, for small numbers of traces
this is often slower than serial processing, defaults to False
:type num_cores: int
:param num_cores:
Control the number of cores for parallel processing, if set to False
then this will use all the cores available.
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime:
Desired data start time, will trim to this before processing
:type endtime: obspy.core.utcdatetime.UTCDateTime
:param endtime:
Desired data end time, will trim to this before processing
:type seisan_chan_names: bool
:param seisan_chan_names:
Whether channels are named like seisan channels (which are two letters
rather than SEED convention of three) - defaults to True.
:type fill_gaps: bool
:param fill_gaps: Whether to pad any gaps found with zeros or not.
:return: Processed stream
:rtype: :class:`obspy.core.stream.Stream`
.. note::
If your data contain gaps you should *NOT* fill those gaps before
using the pre-process functions. The pre-process functions will fill
the gaps internally prior to processing, process the data, then re-fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps. If your data have gaps you should pass a merged
stream without the `fill_value` argument (e.g.: `st = st.merge()`).
.. warning::
If you intend to use this for processing templates you should consider
how resampling will impact your cross-correlations. Minor differences
in resampling between day-long files (which you are likely to use for
continuous detection) and shorter files will reduce your
cross-correlations!
.. rubric:: Example, bandpass
>>> from obspy import read
>>> from eqcorrscan.utils.pre_processing import shortproc
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/2013-09-01-0410-35.DFDPC_024_00')
>>> st = shortproc(st=st, lowcut=2, highcut=9, filt_order=3, samp_rate=20,
... debug=0, parallel=True, num_cores=2)
>>> print(st[0])
AF.LABE..SHZ | 2013-09-01T04:10:35.700000Z - 2013-09-01T04:12:05.650000Z \
| 20.0 Hz, 1800 samples
.. rubric:: Example, low-pass
>>> from obspy import read
>>> from eqcorrscan.utils.pre_processing import shortproc
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/2013-09-01-0410-35.DFDPC_024_00')
>>> st = shortproc(st=st, lowcut=None, highcut=9, filt_order=3,
... samp_rate=20, debug=0)
>>> print(st[0])
AF.LABE..SHZ | 2013-09-01T04:10:35.700000Z - 2013-09-01T04:12:05.650000Z \
| 20.0 Hz, 1800 samples
.. rubric:: Example, high-pass
>>> from obspy import read
>>> from eqcorrscan.utils.pre_processing import shortproc
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/2013-09-01-0410-35.DFDPC_024_00')
>>> st = shortproc(st=st, lowcut=2, highcut=None, filt_order=3,
... samp_rate=20, debug=0)
>>> print(st[0])
AF.LABE..SHZ | 2013-09-01T04:10:35.700000Z - 2013-09-01T04:12:05.650000Z \
| 20.0 Hz, 1800 samples
"""
if isinstance(st, Trace):
tracein = True
st = Stream(st)
else:
tracein = False
# Add sanity check for filter
if highcut and highcut >= 0.5 * samp_rate:
raise IOError('Highcut must be lower than the nyquist')
if debug > 4:
parallel = False
length = None
clip = False
if starttime is not None and endtime is not None:
for tr in st:
tr.trim(starttime, endtime)
if len(tr.data) == ((endtime - starttime) *
tr.stats.sampling_rate) + 1:
tr.data = tr.data[1:len(tr.data)]
length = endtime - starttime
clip = True
elif starttime:
for tr in st:
tr.trim(starttime=starttime)
elif endtime:
for tr in st:
tr.trim(endtime=endtime)
for tr in st:
if len(tr.data) == 0:
st.remove(tr)
debug_print('No data for %s.%s after trim' %
(tr.stats.station, tr.stats.channel), 1, debug)
if parallel:
if not num_cores:
num_cores = cpu_count()
if num_cores > len(st):
num_cores = len(st)
pool = Pool(processes=num_cores)
results = [pool.apply_async(process, (tr,), {
'lowcut': lowcut, 'highcut': highcut, 'filt_order': filt_order,
'samp_rate': samp_rate, 'debug': debug, 'starttime': starttime,
'clip': clip, 'seisan_chan_names': seisan_chan_names,
'fill_gaps': fill_gaps, 'length': length})
for tr in st]
pool.close()
try:
stream_list = [p.get() for p in results]
except KeyboardInterrupt as e: # pragma: no cover
pool.terminate()
raise e
pool.join()
st = Stream(stream_list)
else:
for i, tr in enumerate(st):
st[i] = process(
tr=tr, lowcut=lowcut, highcut=highcut, filt_order=filt_order,
samp_rate=samp_rate, debug=debug, starttime=starttime,
clip=clip, seisan_chan_names=seisan_chan_names,
fill_gaps=fill_gaps, length=length)
if tracein:
st.merge()
return st[0]
return st | [
"def",
"shortproc",
"(",
"st",
",",
"lowcut",
",",
"highcut",
",",
"filt_order",
",",
"samp_rate",
",",
"debug",
"=",
"0",
",",
"parallel",
"=",
"False",
",",
"num_cores",
"=",
"False",
",",
"starttime",
"=",
"None",
",",
"endtime",
"=",
"None",
",",
... | Basic function to bandpass and downsample.
Works in place on data. This is employed to ensure all parts of the
data are processed in the same way.
:type st: obspy.core.stream.Stream
:param st: Stream to process
:type lowcut: float
:param lowcut: Low cut for bandpass in Hz
:type highcut: float
:param highcut: High cut for bandpass in Hz
:type filt_order: int
:param filt_order: Number of corners for bandpass filter
:type samp_rate: float
:param samp_rate: Sampling rate desired in Hz
:type debug: int
:param debug: Debug flag from 0-5, higher numbers = more output
:type parallel: bool
:param parallel:
Set to True to process traces in parallel, for small numbers of traces
this is often slower than serial processing, defaults to False
:type num_cores: int
:param num_cores:
Control the number of cores for parallel processing, if set to False
then this will use all the cores available.
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime:
Desired data start time, will trim to this before processing
:type endtime: obspy.core.utcdatetime.UTCDateTime
:param endtime:
Desired data end time, will trim to this before processing
:type seisan_chan_names: bool
:param seisan_chan_names:
Whether channels are named like seisan channels (which are two letters
rather than SEED convention of three) - defaults to True.
:type fill_gaps: bool
:param fill_gaps: Whether to pad any gaps found with zeros or not.
:return: Processed stream
:rtype: :class:`obspy.core.stream.Stream`
.. note::
If your data contain gaps you should *NOT* fill those gaps before
using the pre-process functions. The pre-process functions will fill
the gaps internally prior to processing, process the data, then re-fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps. If your data have gaps you should pass a merged
stream without the `fill_value` argument (e.g.: `st = st.merge()`).
.. warning::
If you intend to use this for processing templates you should consider
how resampling will impact your cross-correlations. Minor differences
in resampling between day-long files (which you are likely to use for
continuous detection) and shorter files will reduce your
cross-correlations!
.. rubric:: Example, bandpass
>>> from obspy import read
>>> from eqcorrscan.utils.pre_processing import shortproc
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/2013-09-01-0410-35.DFDPC_024_00')
>>> st = shortproc(st=st, lowcut=2, highcut=9, filt_order=3, samp_rate=20,
... debug=0, parallel=True, num_cores=2)
>>> print(st[0])
AF.LABE..SHZ | 2013-09-01T04:10:35.700000Z - 2013-09-01T04:12:05.650000Z \
| 20.0 Hz, 1800 samples
.. rubric:: Example, low-pass
>>> from obspy import read
>>> from eqcorrscan.utils.pre_processing import shortproc
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/2013-09-01-0410-35.DFDPC_024_00')
>>> st = shortproc(st=st, lowcut=None, highcut=9, filt_order=3,
... samp_rate=20, debug=0)
>>> print(st[0])
AF.LABE..SHZ | 2013-09-01T04:10:35.700000Z - 2013-09-01T04:12:05.650000Z \
| 20.0 Hz, 1800 samples
.. rubric:: Example, high-pass
>>> from obspy import read
>>> from eqcorrscan.utils.pre_processing import shortproc
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/2013-09-01-0410-35.DFDPC_024_00')
>>> st = shortproc(st=st, lowcut=2, highcut=None, filt_order=3,
... samp_rate=20, debug=0)
>>> print(st[0])
AF.LABE..SHZ | 2013-09-01T04:10:35.700000Z - 2013-09-01T04:12:05.650000Z \
| 20.0 Hz, 1800 samples | [
"Basic",
"function",
"to",
"bandpass",
"and",
"downsample",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/pre_processing.py#L60-L226 | train | 203,369 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/pre_processing.py | dayproc | def dayproc(st, lowcut, highcut, filt_order, samp_rate, starttime, debug=0,
parallel=True, num_cores=False, ignore_length=False,
seisan_chan_names=False, fill_gaps=True):
"""
Wrapper for dayproc to parallel multiple traces in a stream.
Works in place on data. This is employed to ensure all parts of the data \
are processed in the same way.
:type st: obspy.core.stream.Stream
:param st: Stream to process (can be trace).
:type lowcut: float
:param lowcut: Low cut in Hz for bandpass.
:type highcut: float
:param highcut: High cut in Hz for bandpass.
:type filt_order: int
:param filt_order: Corners for bandpass.
:type samp_rate: float
:param samp_rate: Desired sampling rate in Hz.
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime: Desired start-date of trace.
:type debug: int
:param debug: Debug output level from 0-5, higher numbers = more output.
:type parallel: bool
:param parallel:
Set to True to process traces in parallel, this is often faster than
serial processing of traces: defaults to True.
:type num_cores: int
:param num_cores:
Control the number of cores for parallel processing, if set to False
then this will use all the cores.
:type ignore_length: bool
:param ignore_length: See warning below.
:type seisan_chan_names: bool
:param seisan_chan_names:
Whether channels are named like seisan channels (which are two letters
rather than SEED convention of three) - defaults to True.
:type fill_gaps: bool
:param fill_gaps: Whether to pad any gaps found with zeros or not.
:return: Processed stream.
:rtype: :class:`obspy.core.stream.Stream`
.. note::
If your data contain gaps you should *NOT* fill those gaps before
using the pre-process functions. The pre-process functions will fill
the gaps internally prior to processing, process the data, then re-fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps. If your data have gaps you should pass a merged
stream without the `fill_value` argument (e.g.: `st = st.merge()`).
.. warning::
Will fail if data are less than 19.2 hours long - this number is
arbitrary and is chosen to alert the user to the dangers of padding
to day-long, if you don't care you can ignore this error by setting
`ignore_length=True`. Use this option at your own risk! It will also
warn any-time it has to pad data - if you see strange artifacts in your
detections, check whether the data have gaps.
.. rubric:: Example
>>> import obspy
>>> if int(obspy.__version__.split('.')[0]) >= 1:
... from obspy.clients.fdsn import Client
... else:
... from obspy.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.pre_processing import dayproc
>>> client = Client('NCEDC')
>>> t1 = UTCDateTime(2012, 3, 26)
>>> t2 = t1 + 86400
>>> bulk_info = [('BP', 'JCNB', '40', 'SP1', t1, t2)]
>>> st = client.get_waveforms_bulk(bulk_info)
>>> st_keep = st.copy() # Copy the stream for later examples
>>> # Example of bandpass filtering
>>> st = dayproc(st=st, lowcut=2, highcut=9, filt_order=3, samp_rate=20,
... starttime=t1, debug=0, parallel=True, num_cores=2)
>>> print(st[0])
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
>>> # Example of lowpass filtering
>>> st = dayproc(st=st, lowcut=None, highcut=9, filt_order=3, samp_rate=20,
... starttime=t1, debug=0, parallel=True, num_cores=2)
>>> print(st[0])
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
>>> # Example of highpass filtering
>>> st = dayproc(st=st, lowcut=2, highcut=None, filt_order=3, samp_rate=20,
... starttime=t1, debug=0, parallel=True, num_cores=2)
>>> print(st[0])
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
"""
# Add sanity check for filter
if isinstance(st, Trace):
st = Stream(st)
tracein = True
else:
tracein = False
if highcut and highcut >= 0.5 * samp_rate:
raise IOError('Highcut must be lower than the nyquist')
if debug > 4:
parallel = False
# Set the start-time to a day start - cope with
if starttime is None:
startdates = []
for tr in st:
if abs(tr.stats.starttime - (UTCDateTime(
tr.stats.starttime.date) + 86400)) < tr.stats.delta:
# If the trace starts within 1 sample of the next day, use the
# next day as the startdate
startdates.append((tr.stats.starttime + 86400).date)
debug_print(
'{0} starts within 1 sample of the next day, using this '
'time {1}'.format(
tr.id, (tr.stats.starttime + 86400).date), 2, debug)
else:
startdates.append(tr.stats.starttime.date)
# Check that all traces start on the same date...
if not len(set(startdates)) == 1:
raise NotImplementedError('Traces start on different days')
starttime = UTCDateTime(startdates[0])
if parallel:
if not num_cores:
num_cores = cpu_count()
if num_cores > len(st):
num_cores = len(st)
pool = Pool(processes=num_cores)
results = [pool.apply_async(process, (tr,), {
'lowcut': lowcut, 'highcut': highcut, 'filt_order': filt_order,
'samp_rate': samp_rate, 'debug': debug, 'starttime': starttime,
'clip': True, 'ignore_length': ignore_length, 'length': 86400,
'seisan_chan_names': seisan_chan_names, 'fill_gaps': fill_gaps})
for tr in st]
pool.close()
try:
stream_list = [p.get() for p in results]
except KeyboardInterrupt as e: # pragma: no cover
pool.terminate()
raise e
pool.join()
st = Stream(stream_list)
else:
for i, tr in enumerate(st):
st[i] = process(
tr=tr, lowcut=lowcut, highcut=highcut, filt_order=filt_order,
samp_rate=samp_rate, debug=debug, starttime=starttime,
clip=True, length=86400, ignore_length=ignore_length,
seisan_chan_names=seisan_chan_names, fill_gaps=fill_gaps)
for tr in st:
if len(tr.data) == 0:
st.remove(tr)
if tracein:
st.merge()
return st[0]
return st | python | def dayproc(st, lowcut, highcut, filt_order, samp_rate, starttime, debug=0,
parallel=True, num_cores=False, ignore_length=False,
seisan_chan_names=False, fill_gaps=True):
"""
Wrapper for dayproc to parallel multiple traces in a stream.
Works in place on data. This is employed to ensure all parts of the data \
are processed in the same way.
:type st: obspy.core.stream.Stream
:param st: Stream to process (can be trace).
:type lowcut: float
:param lowcut: Low cut in Hz for bandpass.
:type highcut: float
:param highcut: High cut in Hz for bandpass.
:type filt_order: int
:param filt_order: Corners for bandpass.
:type samp_rate: float
:param samp_rate: Desired sampling rate in Hz.
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime: Desired start-date of trace.
:type debug: int
:param debug: Debug output level from 0-5, higher numbers = more output.
:type parallel: bool
:param parallel:
Set to True to process traces in parallel, this is often faster than
serial processing of traces: defaults to True.
:type num_cores: int
:param num_cores:
Control the number of cores for parallel processing, if set to False
then this will use all the cores.
:type ignore_length: bool
:param ignore_length: See warning below.
:type seisan_chan_names: bool
:param seisan_chan_names:
Whether channels are named like seisan channels (which are two letters
rather than SEED convention of three) - defaults to True.
:type fill_gaps: bool
:param fill_gaps: Whether to pad any gaps found with zeros or not.
:return: Processed stream.
:rtype: :class:`obspy.core.stream.Stream`
.. note::
If your data contain gaps you should *NOT* fill those gaps before
using the pre-process functions. The pre-process functions will fill
the gaps internally prior to processing, process the data, then re-fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps. If your data have gaps you should pass a merged
stream without the `fill_value` argument (e.g.: `st = st.merge()`).
.. warning::
Will fail if data are less than 19.2 hours long - this number is
arbitrary and is chosen to alert the user to the dangers of padding
to day-long, if you don't care you can ignore this error by setting
`ignore_length=True`. Use this option at your own risk! It will also
warn any-time it has to pad data - if you see strange artifacts in your
detections, check whether the data have gaps.
.. rubric:: Example
>>> import obspy
>>> if int(obspy.__version__.split('.')[0]) >= 1:
... from obspy.clients.fdsn import Client
... else:
... from obspy.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.pre_processing import dayproc
>>> client = Client('NCEDC')
>>> t1 = UTCDateTime(2012, 3, 26)
>>> t2 = t1 + 86400
>>> bulk_info = [('BP', 'JCNB', '40', 'SP1', t1, t2)]
>>> st = client.get_waveforms_bulk(bulk_info)
>>> st_keep = st.copy() # Copy the stream for later examples
>>> # Example of bandpass filtering
>>> st = dayproc(st=st, lowcut=2, highcut=9, filt_order=3, samp_rate=20,
... starttime=t1, debug=0, parallel=True, num_cores=2)
>>> print(st[0])
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
>>> # Example of lowpass filtering
>>> st = dayproc(st=st, lowcut=None, highcut=9, filt_order=3, samp_rate=20,
... starttime=t1, debug=0, parallel=True, num_cores=2)
>>> print(st[0])
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
>>> # Example of highpass filtering
>>> st = dayproc(st=st, lowcut=2, highcut=None, filt_order=3, samp_rate=20,
... starttime=t1, debug=0, parallel=True, num_cores=2)
>>> print(st[0])
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
"""
# Add sanity check for filter
if isinstance(st, Trace):
st = Stream(st)
tracein = True
else:
tracein = False
if highcut and highcut >= 0.5 * samp_rate:
raise IOError('Highcut must be lower than the nyquist')
if debug > 4:
parallel = False
# Set the start-time to a day start - cope with
if starttime is None:
startdates = []
for tr in st:
if abs(tr.stats.starttime - (UTCDateTime(
tr.stats.starttime.date) + 86400)) < tr.stats.delta:
# If the trace starts within 1 sample of the next day, use the
# next day as the startdate
startdates.append((tr.stats.starttime + 86400).date)
debug_print(
'{0} starts within 1 sample of the next day, using this '
'time {1}'.format(
tr.id, (tr.stats.starttime + 86400).date), 2, debug)
else:
startdates.append(tr.stats.starttime.date)
# Check that all traces start on the same date...
if not len(set(startdates)) == 1:
raise NotImplementedError('Traces start on different days')
starttime = UTCDateTime(startdates[0])
if parallel:
if not num_cores:
num_cores = cpu_count()
if num_cores > len(st):
num_cores = len(st)
pool = Pool(processes=num_cores)
results = [pool.apply_async(process, (tr,), {
'lowcut': lowcut, 'highcut': highcut, 'filt_order': filt_order,
'samp_rate': samp_rate, 'debug': debug, 'starttime': starttime,
'clip': True, 'ignore_length': ignore_length, 'length': 86400,
'seisan_chan_names': seisan_chan_names, 'fill_gaps': fill_gaps})
for tr in st]
pool.close()
try:
stream_list = [p.get() for p in results]
except KeyboardInterrupt as e: # pragma: no cover
pool.terminate()
raise e
pool.join()
st = Stream(stream_list)
else:
for i, tr in enumerate(st):
st[i] = process(
tr=tr, lowcut=lowcut, highcut=highcut, filt_order=filt_order,
samp_rate=samp_rate, debug=debug, starttime=starttime,
clip=True, length=86400, ignore_length=ignore_length,
seisan_chan_names=seisan_chan_names, fill_gaps=fill_gaps)
for tr in st:
if len(tr.data) == 0:
st.remove(tr)
if tracein:
st.merge()
return st[0]
return st | [
"def",
"dayproc",
"(",
"st",
",",
"lowcut",
",",
"highcut",
",",
"filt_order",
",",
"samp_rate",
",",
"starttime",
",",
"debug",
"=",
"0",
",",
"parallel",
"=",
"True",
",",
"num_cores",
"=",
"False",
",",
"ignore_length",
"=",
"False",
",",
"seisan_chan... | Wrapper for dayproc to parallel multiple traces in a stream.
Works in place on data. This is employed to ensure all parts of the data \
are processed in the same way.
:type st: obspy.core.stream.Stream
:param st: Stream to process (can be trace).
:type lowcut: float
:param lowcut: Low cut in Hz for bandpass.
:type highcut: float
:param highcut: High cut in Hz for bandpass.
:type filt_order: int
:param filt_order: Corners for bandpass.
:type samp_rate: float
:param samp_rate: Desired sampling rate in Hz.
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime: Desired start-date of trace.
:type debug: int
:param debug: Debug output level from 0-5, higher numbers = more output.
:type parallel: bool
:param parallel:
Set to True to process traces in parallel, this is often faster than
serial processing of traces: defaults to True.
:type num_cores: int
:param num_cores:
Control the number of cores for parallel processing, if set to False
then this will use all the cores.
:type ignore_length: bool
:param ignore_length: See warning below.
:type seisan_chan_names: bool
:param seisan_chan_names:
Whether channels are named like seisan channels (which are two letters
rather than SEED convention of three) - defaults to True.
:type fill_gaps: bool
:param fill_gaps: Whether to pad any gaps found with zeros or not.
:return: Processed stream.
:rtype: :class:`obspy.core.stream.Stream`
.. note::
If your data contain gaps you should *NOT* fill those gaps before
using the pre-process functions. The pre-process functions will fill
the gaps internally prior to processing, process the data, then re-fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps. If your data have gaps you should pass a merged
stream without the `fill_value` argument (e.g.: `st = st.merge()`).
.. warning::
Will fail if data are less than 19.2 hours long - this number is
arbitrary and is chosen to alert the user to the dangers of padding
to day-long, if you don't care you can ignore this error by setting
`ignore_length=True`. Use this option at your own risk! It will also
warn any-time it has to pad data - if you see strange artifacts in your
detections, check whether the data have gaps.
.. rubric:: Example
>>> import obspy
>>> if int(obspy.__version__.split('.')[0]) >= 1:
... from obspy.clients.fdsn import Client
... else:
... from obspy.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.pre_processing import dayproc
>>> client = Client('NCEDC')
>>> t1 = UTCDateTime(2012, 3, 26)
>>> t2 = t1 + 86400
>>> bulk_info = [('BP', 'JCNB', '40', 'SP1', t1, t2)]
>>> st = client.get_waveforms_bulk(bulk_info)
>>> st_keep = st.copy() # Copy the stream for later examples
>>> # Example of bandpass filtering
>>> st = dayproc(st=st, lowcut=2, highcut=9, filt_order=3, samp_rate=20,
... starttime=t1, debug=0, parallel=True, num_cores=2)
>>> print(st[0])
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
>>> # Example of lowpass filtering
>>> st = dayproc(st=st, lowcut=None, highcut=9, filt_order=3, samp_rate=20,
... starttime=t1, debug=0, parallel=True, num_cores=2)
>>> print(st[0])
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
>>> # Example of highpass filtering
>>> st = dayproc(st=st, lowcut=2, highcut=None, filt_order=3, samp_rate=20,
... starttime=t1, debug=0, parallel=True, num_cores=2)
>>> print(st[0])
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples | [
"Wrapper",
"for",
"dayproc",
"to",
"parallel",
"multiple",
"traces",
"in",
"a",
"stream",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/pre_processing.py#L229-L384 | train | 203,370 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/pre_processing.py | _zero_pad_gaps | def _zero_pad_gaps(tr, gaps, fill_gaps=True):
"""
Replace padded parts of trace with zeros.
Will cut around gaps, detrend, then pad the gaps with zeros.
:type tr: :class:`osbpy.core.stream.Trace`
:param tr: A trace that has had the gaps padded
:param gaps: List of dict of start-time and end-time as UTCDateTime objects
:type gaps: list
:return: :class:`obspy.core.stream.Trace`
"""
start_in, end_in = (tr.stats.starttime, tr.stats.endtime)
for gap in gaps:
stream = Stream()
if gap['starttime'] > tr.stats.starttime:
stream += tr.slice(tr.stats.starttime, gap['starttime']).copy()
if gap['endtime'] < tr.stats.endtime:
# Note this can happen when gaps are calculated for a trace that
# is longer than `length`, e.g. gaps are calculated pre-trim.
stream += tr.slice(gap['endtime'], tr.stats.endtime).copy()
tr = stream.merge()[0]
if fill_gaps:
tr = tr.split()
tr = tr.detrend()
tr = tr.merge(fill_value=0)[0]
# Need to check length - if a gap happened overlapping the end or start
# of the trace this will be lost.
if tr.stats.starttime != start_in:
# pad with zeros
tr.data = np.concatenate(
[np.zeros(int(tr.stats.starttime - start_in)), tr.data])
tr.stats.starttime = start_in
if tr.stats.endtime != end_in:
tr.data = np.concatenate(
[tr.data, np.zeros(int(end_in - tr.stats.endtime))])
return tr | python | def _zero_pad_gaps(tr, gaps, fill_gaps=True):
"""
Replace padded parts of trace with zeros.
Will cut around gaps, detrend, then pad the gaps with zeros.
:type tr: :class:`osbpy.core.stream.Trace`
:param tr: A trace that has had the gaps padded
:param gaps: List of dict of start-time and end-time as UTCDateTime objects
:type gaps: list
:return: :class:`obspy.core.stream.Trace`
"""
start_in, end_in = (tr.stats.starttime, tr.stats.endtime)
for gap in gaps:
stream = Stream()
if gap['starttime'] > tr.stats.starttime:
stream += tr.slice(tr.stats.starttime, gap['starttime']).copy()
if gap['endtime'] < tr.stats.endtime:
# Note this can happen when gaps are calculated for a trace that
# is longer than `length`, e.g. gaps are calculated pre-trim.
stream += tr.slice(gap['endtime'], tr.stats.endtime).copy()
tr = stream.merge()[0]
if fill_gaps:
tr = tr.split()
tr = tr.detrend()
tr = tr.merge(fill_value=0)[0]
# Need to check length - if a gap happened overlapping the end or start
# of the trace this will be lost.
if tr.stats.starttime != start_in:
# pad with zeros
tr.data = np.concatenate(
[np.zeros(int(tr.stats.starttime - start_in)), tr.data])
tr.stats.starttime = start_in
if tr.stats.endtime != end_in:
tr.data = np.concatenate(
[tr.data, np.zeros(int(end_in - tr.stats.endtime))])
return tr | [
"def",
"_zero_pad_gaps",
"(",
"tr",
",",
"gaps",
",",
"fill_gaps",
"=",
"True",
")",
":",
"start_in",
",",
"end_in",
"=",
"(",
"tr",
".",
"stats",
".",
"starttime",
",",
"tr",
".",
"stats",
".",
"endtime",
")",
"for",
"gap",
"in",
"gaps",
":",
"str... | Replace padded parts of trace with zeros.
Will cut around gaps, detrend, then pad the gaps with zeros.
:type tr: :class:`osbpy.core.stream.Trace`
:param tr: A trace that has had the gaps padded
:param gaps: List of dict of start-time and end-time as UTCDateTime objects
:type gaps: list
:return: :class:`obspy.core.stream.Trace` | [
"Replace",
"padded",
"parts",
"of",
"trace",
"with",
"zeros",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/pre_processing.py#L581-L618 | train | 203,371 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/pre_processing.py | _fill_gaps | def _fill_gaps(tr):
"""
Interpolate through gaps and work-out where gaps are.
:param tr: Gappy trace (e.g. tr.data is np.ma.MaskedArray)
:type tr: `obspy.core.stream.Trace`
:return: gaps, trace, where gaps is a list of dict
"""
tr = tr.split()
gaps = tr.get_gaps()
tr = tr.detrend().merge(fill_value=0)[0]
gaps = [{'starttime': gap[4], 'endtime': gap[5]} for gap in gaps]
return gaps, tr | python | def _fill_gaps(tr):
"""
Interpolate through gaps and work-out where gaps are.
:param tr: Gappy trace (e.g. tr.data is np.ma.MaskedArray)
:type tr: `obspy.core.stream.Trace`
:return: gaps, trace, where gaps is a list of dict
"""
tr = tr.split()
gaps = tr.get_gaps()
tr = tr.detrend().merge(fill_value=0)[0]
gaps = [{'starttime': gap[4], 'endtime': gap[5]} for gap in gaps]
return gaps, tr | [
"def",
"_fill_gaps",
"(",
"tr",
")",
":",
"tr",
"=",
"tr",
".",
"split",
"(",
")",
"gaps",
"=",
"tr",
".",
"get_gaps",
"(",
")",
"tr",
"=",
"tr",
".",
"detrend",
"(",
")",
".",
"merge",
"(",
"fill_value",
"=",
"0",
")",
"[",
"0",
"]",
"gaps",... | Interpolate through gaps and work-out where gaps are.
:param tr: Gappy trace (e.g. tr.data is np.ma.MaskedArray)
:type tr: `obspy.core.stream.Trace`
:return: gaps, trace, where gaps is a list of dict | [
"Interpolate",
"through",
"gaps",
"and",
"work",
"-",
"out",
"where",
"gaps",
"are",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/pre_processing.py#L621-L634 | train | 203,372 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/findpeaks.py | find_peaks2_short | def find_peaks2_short(arr, thresh, trig_int, debug=0, starttime=False,
samp_rate=1.0, full_peaks=False):
"""
Determine peaks in an array of data above a certain threshold.
Uses a mask to remove data below threshold and finds peaks in what is left.
:type arr: numpy.ndarray
:param arr: 1-D numpy array is required
:type thresh: float
:param thresh:
The threshold below which will be considered noise and peaks will
not be found in.
:type trig_int: int
:param trig_int:
The minimum difference in samples between triggers, if multiple
peaks within this window this code will find the highest.
:type debug: int
:param debug: Optional, debug level 0-5
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime: Starttime for plotting, only used if debug > 2.
:type samp_rate: float
:param samp_rate: Sampling rate in Hz, only used for plotting if debug > 2.
:type full_peaks: bool
:param full_peaks:
If True, will remove the issue eluded to below, by declustering within
data-sections above the threshold, rather than just taking the peak
within that section. This will take more time. This defaults to True
for match_filter.
:return: peaks: Lists of tuples of peak values and locations.
:rtype: list
>>> import numpy as np
>>> arr = np.random.randn(100)
>>> threshold = 10
>>> arr[40] = 20
>>> arr[60] = 100
>>> find_peaks2_short(arr, threshold, 3)
[(20.0, 40), (100.0, 60)]
.. note::
peak-finding is optimised for zero-mean cross-correlation data where
fluctuations are frequent. Because of this, in certain cases some
peaks may be missed if the trig_int is short and the threshold is low.
Consider the following case:
>>> arr = np.array([1, .2, .2, .2, .2, 1, .2, .2, .2, .2, 1])
>>> find_peaks2_short(arr, thresh=.2, trig_int=3)
[(1.0, 0)]
Whereas you would expect the following:
>>> arr = np.array([1, .2, .2, .2, .2, 1, .2, .2, .2, .2, 1])
>>> find_peaks2_short(arr, thresh=.2, trig_int=3, full_peaks=True)
[(1.0, 0), (1.0, 5), (1.0, 10)]
This is rare and unlikely to happen for correlation cases, where
trigger intervals are usually large and thresholds high.
"""
if not starttime:
starttime = UTCDateTime(0)
# Set everything below the threshold to zero
image = np.copy(arr)
image = np.abs(image)
debug_print("Threshold: {0}\tMax: {1}".format(thresh, max(image)),
2, debug)
image[image < thresh] = 0
if len(image[image > thresh]) == 0:
debug_print("No values over threshold {0}".format(thresh), 0, debug)
return []
debug_print('Found {0} samples above the threshold'.format(
len(image[image > thresh])), 0, debug)
initial_peaks = []
# Find the peaks
labeled_image, number_of_objects = ndimage.label(image)
peak_slices = ndimage.find_objects(labeled_image)
for peak_slice in peak_slices:
window = arr[peak_slice[0].start: peak_slice[0].stop]
if peak_slice[0].stop - peak_slice[0].start > trig_int and full_peaks:
peaks = decluster(
peaks=window, trig_int=trig_int,
index=np.arange(peak_slice[0].start, peak_slice[0].stop))
else:
peaks = [(window[np.argmax(abs(window))],
int(peak_slice[0].start + np.argmax(abs(window))))]
initial_peaks.extend(peaks)
peaks = decluster(peaks=np.array(list(zip(*initial_peaks))[0]),
index=np.array(list(zip(*initial_peaks))[1]),
trig_int=trig_int)
if initial_peaks:
if debug >= 3:
from eqcorrscan.utils import plotting
_fname = ''.join([
'peaks_', starttime.datetime.strftime('%Y-%m-%d'), '.pdf'])
plotting.peaks_plot(
data=image, starttime=starttime, samp_rate=samp_rate,
save=True, peaks=peaks, savefile=_fname)
peaks = sorted(peaks, key=lambda time: time[1], reverse=False)
return peaks
else:
print('No peaks for you!')
return [] | python | def find_peaks2_short(arr, thresh, trig_int, debug=0, starttime=False,
samp_rate=1.0, full_peaks=False):
"""
Determine peaks in an array of data above a certain threshold.
Uses a mask to remove data below threshold and finds peaks in what is left.
:type arr: numpy.ndarray
:param arr: 1-D numpy array is required
:type thresh: float
:param thresh:
The threshold below which will be considered noise and peaks will
not be found in.
:type trig_int: int
:param trig_int:
The minimum difference in samples between triggers, if multiple
peaks within this window this code will find the highest.
:type debug: int
:param debug: Optional, debug level 0-5
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime: Starttime for plotting, only used if debug > 2.
:type samp_rate: float
:param samp_rate: Sampling rate in Hz, only used for plotting if debug > 2.
:type full_peaks: bool
:param full_peaks:
If True, will remove the issue eluded to below, by declustering within
data-sections above the threshold, rather than just taking the peak
within that section. This will take more time. This defaults to True
for match_filter.
:return: peaks: Lists of tuples of peak values and locations.
:rtype: list
>>> import numpy as np
>>> arr = np.random.randn(100)
>>> threshold = 10
>>> arr[40] = 20
>>> arr[60] = 100
>>> find_peaks2_short(arr, threshold, 3)
[(20.0, 40), (100.0, 60)]
.. note::
peak-finding is optimised for zero-mean cross-correlation data where
fluctuations are frequent. Because of this, in certain cases some
peaks may be missed if the trig_int is short and the threshold is low.
Consider the following case:
>>> arr = np.array([1, .2, .2, .2, .2, 1, .2, .2, .2, .2, 1])
>>> find_peaks2_short(arr, thresh=.2, trig_int=3)
[(1.0, 0)]
Whereas you would expect the following:
>>> arr = np.array([1, .2, .2, .2, .2, 1, .2, .2, .2, .2, 1])
>>> find_peaks2_short(arr, thresh=.2, trig_int=3, full_peaks=True)
[(1.0, 0), (1.0, 5), (1.0, 10)]
This is rare and unlikely to happen for correlation cases, where
trigger intervals are usually large and thresholds high.
"""
if not starttime:
starttime = UTCDateTime(0)
# Set everything below the threshold to zero
image = np.copy(arr)
image = np.abs(image)
debug_print("Threshold: {0}\tMax: {1}".format(thresh, max(image)),
2, debug)
image[image < thresh] = 0
if len(image[image > thresh]) == 0:
debug_print("No values over threshold {0}".format(thresh), 0, debug)
return []
debug_print('Found {0} samples above the threshold'.format(
len(image[image > thresh])), 0, debug)
initial_peaks = []
# Find the peaks
labeled_image, number_of_objects = ndimage.label(image)
peak_slices = ndimage.find_objects(labeled_image)
for peak_slice in peak_slices:
window = arr[peak_slice[0].start: peak_slice[0].stop]
if peak_slice[0].stop - peak_slice[0].start > trig_int and full_peaks:
peaks = decluster(
peaks=window, trig_int=trig_int,
index=np.arange(peak_slice[0].start, peak_slice[0].stop))
else:
peaks = [(window[np.argmax(abs(window))],
int(peak_slice[0].start + np.argmax(abs(window))))]
initial_peaks.extend(peaks)
peaks = decluster(peaks=np.array(list(zip(*initial_peaks))[0]),
index=np.array(list(zip(*initial_peaks))[1]),
trig_int=trig_int)
if initial_peaks:
if debug >= 3:
from eqcorrscan.utils import plotting
_fname = ''.join([
'peaks_', starttime.datetime.strftime('%Y-%m-%d'), '.pdf'])
plotting.peaks_plot(
data=image, starttime=starttime, samp_rate=samp_rate,
save=True, peaks=peaks, savefile=_fname)
peaks = sorted(peaks, key=lambda time: time[1], reverse=False)
return peaks
else:
print('No peaks for you!')
return [] | [
"def",
"find_peaks2_short",
"(",
"arr",
",",
"thresh",
",",
"trig_int",
",",
"debug",
"=",
"0",
",",
"starttime",
"=",
"False",
",",
"samp_rate",
"=",
"1.0",
",",
"full_peaks",
"=",
"False",
")",
":",
"if",
"not",
"starttime",
":",
"starttime",
"=",
"U... | Determine peaks in an array of data above a certain threshold.
Uses a mask to remove data below threshold and finds peaks in what is left.
:type arr: numpy.ndarray
:param arr: 1-D numpy array is required
:type thresh: float
:param thresh:
The threshold below which will be considered noise and peaks will
not be found in.
:type trig_int: int
:param trig_int:
The minimum difference in samples between triggers, if multiple
peaks within this window this code will find the highest.
:type debug: int
:param debug: Optional, debug level 0-5
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime: Starttime for plotting, only used if debug > 2.
:type samp_rate: float
:param samp_rate: Sampling rate in Hz, only used for plotting if debug > 2.
:type full_peaks: bool
:param full_peaks:
If True, will remove the issue eluded to below, by declustering within
data-sections above the threshold, rather than just taking the peak
within that section. This will take more time. This defaults to True
for match_filter.
:return: peaks: Lists of tuples of peak values and locations.
:rtype: list
>>> import numpy as np
>>> arr = np.random.randn(100)
>>> threshold = 10
>>> arr[40] = 20
>>> arr[60] = 100
>>> find_peaks2_short(arr, threshold, 3)
[(20.0, 40), (100.0, 60)]
.. note::
peak-finding is optimised for zero-mean cross-correlation data where
fluctuations are frequent. Because of this, in certain cases some
peaks may be missed if the trig_int is short and the threshold is low.
Consider the following case:
>>> arr = np.array([1, .2, .2, .2, .2, 1, .2, .2, .2, .2, 1])
>>> find_peaks2_short(arr, thresh=.2, trig_int=3)
[(1.0, 0)]
Whereas you would expect the following:
>>> arr = np.array([1, .2, .2, .2, .2, 1, .2, .2, .2, .2, 1])
>>> find_peaks2_short(arr, thresh=.2, trig_int=3, full_peaks=True)
[(1.0, 0), (1.0, 5), (1.0, 10)]
This is rare and unlikely to happen for correlation cases, where
trigger intervals are usually large and thresholds high. | [
"Determine",
"peaks",
"in",
"an",
"array",
"of",
"data",
"above",
"a",
"certain",
"threshold",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/findpeaks.py#L65-L169 | train | 203,373 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/findpeaks.py | multi_find_peaks | def multi_find_peaks(arr, thresh, trig_int, debug=0, starttime=False,
samp_rate=1.0, parallel=True, full_peaks=False,
cores=None):
"""
Wrapper for find-peaks for multiple arrays.
:type arr: numpy.ndarray
:param arr: 2-D numpy array is required
:type thresh: list
:param thresh:
The threshold below which will be considered noise and peaks will not
be found in. One threshold per array.
:type trig_int: int
:param trig_int:
The minimum difference in samples between triggers, if multiple
peaks within this window this code will find the highest.
:type debug: int
:param debug: Optional, debug level 0-5
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime: Starttime for plotting, only used if debug > 2.
:type samp_rate: float
:param samp_rate: Sampling rate in Hz, only used for plotting if debug > 2.
:type parallel: bool
:param parallel:
Whether to compute in parallel or not - will use multiprocessing
:type full_peaks: bool
:param full_peaks: See `eqcorrscan.utils.findpeaks.find_peaks2_short`
:type cores: int
:param cores:
Maximum number of processes to spin up for parallel peak-finding
:returns:
List of list of tuples of (peak, index) in same order as input arrays
"""
peaks = []
if not parallel:
for sub_arr, arr_thresh in zip(arr, thresh):
peaks.append(find_peaks2_short(
arr=sub_arr, thresh=arr_thresh, trig_int=trig_int, debug=debug,
starttime=starttime, samp_rate=samp_rate,
full_peaks=full_peaks))
else:
if cores is None:
cores = arr.shape[0]
with pool_boy(Pool=Pool, traces=cores) as pool:
params = ((sub_arr, arr_thresh, trig_int, debug,
False, 1.0, full_peaks)
for sub_arr, arr_thresh in zip(arr, thresh))
results = [pool.apply_async(find_peaks2_short, param)
for param in params]
peaks = [res.get() for res in results]
return peaks | python | def multi_find_peaks(arr, thresh, trig_int, debug=0, starttime=False,
samp_rate=1.0, parallel=True, full_peaks=False,
cores=None):
"""
Wrapper for find-peaks for multiple arrays.
:type arr: numpy.ndarray
:param arr: 2-D numpy array is required
:type thresh: list
:param thresh:
The threshold below which will be considered noise and peaks will not
be found in. One threshold per array.
:type trig_int: int
:param trig_int:
The minimum difference in samples between triggers, if multiple
peaks within this window this code will find the highest.
:type debug: int
:param debug: Optional, debug level 0-5
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime: Starttime for plotting, only used if debug > 2.
:type samp_rate: float
:param samp_rate: Sampling rate in Hz, only used for plotting if debug > 2.
:type parallel: bool
:param parallel:
Whether to compute in parallel or not - will use multiprocessing
:type full_peaks: bool
:param full_peaks: See `eqcorrscan.utils.findpeaks.find_peaks2_short`
:type cores: int
:param cores:
Maximum number of processes to spin up for parallel peak-finding
:returns:
List of list of tuples of (peak, index) in same order as input arrays
"""
peaks = []
if not parallel:
for sub_arr, arr_thresh in zip(arr, thresh):
peaks.append(find_peaks2_short(
arr=sub_arr, thresh=arr_thresh, trig_int=trig_int, debug=debug,
starttime=starttime, samp_rate=samp_rate,
full_peaks=full_peaks))
else:
if cores is None:
cores = arr.shape[0]
with pool_boy(Pool=Pool, traces=cores) as pool:
params = ((sub_arr, arr_thresh, trig_int, debug,
False, 1.0, full_peaks)
for sub_arr, arr_thresh in zip(arr, thresh))
results = [pool.apply_async(find_peaks2_short, param)
for param in params]
peaks = [res.get() for res in results]
return peaks | [
"def",
"multi_find_peaks",
"(",
"arr",
",",
"thresh",
",",
"trig_int",
",",
"debug",
"=",
"0",
",",
"starttime",
"=",
"False",
",",
"samp_rate",
"=",
"1.0",
",",
"parallel",
"=",
"True",
",",
"full_peaks",
"=",
"False",
",",
"cores",
"=",
"None",
")",
... | Wrapper for find-peaks for multiple arrays.
:type arr: numpy.ndarray
:param arr: 2-D numpy array is required
:type thresh: list
:param thresh:
The threshold below which will be considered noise and peaks will not
be found in. One threshold per array.
:type trig_int: int
:param trig_int:
The minimum difference in samples between triggers, if multiple
peaks within this window this code will find the highest.
:type debug: int
:param debug: Optional, debug level 0-5
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime: Starttime for plotting, only used if debug > 2.
:type samp_rate: float
:param samp_rate: Sampling rate in Hz, only used for plotting if debug > 2.
:type parallel: bool
:param parallel:
Whether to compute in parallel or not - will use multiprocessing
:type full_peaks: bool
:param full_peaks: See `eqcorrscan.utils.findpeaks.find_peaks2_short`
:type cores: int
:param cores:
Maximum number of processes to spin up for parallel peak-finding
:returns:
List of list of tuples of (peak, index) in same order as input arrays | [
"Wrapper",
"for",
"find",
"-",
"peaks",
"for",
"multiple",
"arrays",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/findpeaks.py#L172-L223 | train | 203,374 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/findpeaks.py | coin_trig | def coin_trig(peaks, stachans, samp_rate, moveout, min_trig, trig_int):
"""
Find network coincidence triggers within peaks of detection statistics.
Useful for finding network detections from sets of detections on individual
stations.
:type peaks: list
:param peaks: List of lists of tuples of (peak, index) for each \
station-channel. Index should be in samples.
:type stachans: list
:param stachans: List of tuples of (station, channel) in the order of \
peaks.
:type samp_rate: float
:param samp_rate: Sampling rate in Hz
:type moveout: float
:param moveout: Allowable network moveout in seconds.
:type min_trig: int
:param min_trig: Minimum station-channels required to declare a trigger.
:type trig_int: float
:param trig_int:
Minimum allowable time between network triggers in seconds.
:return:
List of tuples of (peak, index), for the earliest detected station.
:rtype: list
>>> peaks = [[(0.5, 100), (0.3, 800)], [(0.4, 120), (0.7, 850)]]
>>> triggers = coin_trig(peaks, [('a', 'Z'), ('b', 'Z')], 10, 3, 2, 1)
>>> print(triggers)
[(0.45, 100)]
"""
triggers = []
for stachan, _peaks in zip(stachans, peaks):
for peak in _peaks:
trigger = (peak[1], peak[0], '.'.join(stachan))
triggers.append(trigger)
coincidence_triggers = []
for i, master in enumerate(triggers):
slaves = triggers[i + 1:]
coincidence = 1
trig_time = master[0]
trig_val = master[1]
for slave in slaves:
if abs(slave[0] - master[0]) <= (moveout * samp_rate) and \
slave[2] != master[2]:
coincidence += 1
if slave[0] < master[0]:
trig_time = slave[0]
trig_val += slave[1]
if coincidence >= min_trig:
coincidence_triggers.append((trig_val / coincidence,
trig_time))
# Sort by trigger-value, largest to smallest - remove duplicate detections
if coincidence_triggers:
coincidence_triggers.sort(key=lambda tup: tup[0], reverse=True)
output = [coincidence_triggers[0]]
for coincidence_trigger in coincidence_triggers[1:]:
add = True
for peak in output:
# If the event occurs within the trig_int time then do not add
# it, and break out of the inner loop.
if abs(coincidence_trigger[1] - peak[1]) < (trig_int *
samp_rate):
add = False
break
if add:
output.append((coincidence_trigger[0],
coincidence_trigger[1]))
output.sort(key=lambda tup: tup[1])
return output
else:
return [] | python | def coin_trig(peaks, stachans, samp_rate, moveout, min_trig, trig_int):
"""
Find network coincidence triggers within peaks of detection statistics.
Useful for finding network detections from sets of detections on individual
stations.
:type peaks: list
:param peaks: List of lists of tuples of (peak, index) for each \
station-channel. Index should be in samples.
:type stachans: list
:param stachans: List of tuples of (station, channel) in the order of \
peaks.
:type samp_rate: float
:param samp_rate: Sampling rate in Hz
:type moveout: float
:param moveout: Allowable network moveout in seconds.
:type min_trig: int
:param min_trig: Minimum station-channels required to declare a trigger.
:type trig_int: float
:param trig_int:
Minimum allowable time between network triggers in seconds.
:return:
List of tuples of (peak, index), for the earliest detected station.
:rtype: list
>>> peaks = [[(0.5, 100), (0.3, 800)], [(0.4, 120), (0.7, 850)]]
>>> triggers = coin_trig(peaks, [('a', 'Z'), ('b', 'Z')], 10, 3, 2, 1)
>>> print(triggers)
[(0.45, 100)]
"""
triggers = []
for stachan, _peaks in zip(stachans, peaks):
for peak in _peaks:
trigger = (peak[1], peak[0], '.'.join(stachan))
triggers.append(trigger)
coincidence_triggers = []
for i, master in enumerate(triggers):
slaves = triggers[i + 1:]
coincidence = 1
trig_time = master[0]
trig_val = master[1]
for slave in slaves:
if abs(slave[0] - master[0]) <= (moveout * samp_rate) and \
slave[2] != master[2]:
coincidence += 1
if slave[0] < master[0]:
trig_time = slave[0]
trig_val += slave[1]
if coincidence >= min_trig:
coincidence_triggers.append((trig_val / coincidence,
trig_time))
# Sort by trigger-value, largest to smallest - remove duplicate detections
if coincidence_triggers:
coincidence_triggers.sort(key=lambda tup: tup[0], reverse=True)
output = [coincidence_triggers[0]]
for coincidence_trigger in coincidence_triggers[1:]:
add = True
for peak in output:
# If the event occurs within the trig_int time then do not add
# it, and break out of the inner loop.
if abs(coincidence_trigger[1] - peak[1]) < (trig_int *
samp_rate):
add = False
break
if add:
output.append((coincidence_trigger[0],
coincidence_trigger[1]))
output.sort(key=lambda tup: tup[1])
return output
else:
return [] | [
"def",
"coin_trig",
"(",
"peaks",
",",
"stachans",
",",
"samp_rate",
",",
"moveout",
",",
"min_trig",
",",
"trig_int",
")",
":",
"triggers",
"=",
"[",
"]",
"for",
"stachan",
",",
"_peaks",
"in",
"zip",
"(",
"stachans",
",",
"peaks",
")",
":",
"for",
... | Find network coincidence triggers within peaks of detection statistics.
Useful for finding network detections from sets of detections on individual
stations.
:type peaks: list
:param peaks: List of lists of tuples of (peak, index) for each \
station-channel. Index should be in samples.
:type stachans: list
:param stachans: List of tuples of (station, channel) in the order of \
peaks.
:type samp_rate: float
:param samp_rate: Sampling rate in Hz
:type moveout: float
:param moveout: Allowable network moveout in seconds.
:type min_trig: int
:param min_trig: Minimum station-channels required to declare a trigger.
:type trig_int: float
:param trig_int:
Minimum allowable time between network triggers in seconds.
:return:
List of tuples of (peak, index), for the earliest detected station.
:rtype: list
>>> peaks = [[(0.5, 100), (0.3, 800)], [(0.4, 120), (0.7, 850)]]
>>> triggers = coin_trig(peaks, [('a', 'Z'), ('b', 'Z')], 10, 3, 2, 1)
>>> print(triggers)
[(0.45, 100)] | [
"Find",
"network",
"coincidence",
"triggers",
"within",
"peaks",
"of",
"detection",
"statistics",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/findpeaks.py#L266-L338 | train | 203,375 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/plotting.py | _finalise_figure | def _finalise_figure(fig, **kwargs): # pragma: no cover
"""
Internal function to wrap up a figure.
Possible arguments:
:type title: str
:type show: bool
:type save: bool
:type savefile: str
:type return_figure: bool
"""
title = kwargs.get("title") or None
show = kwargs.get("show") or False
save = kwargs.get("save") or False
savefile = kwargs.get("savefile") or "EQcorrscan_figure.png"
return_fig = kwargs.get("return_figure") or False
if title:
fig.suptitle(title)
if show:
fig.show()
if save:
fig.savefig(savefile)
print("Saved figure to {0}".format(savefile))
if return_fig:
return fig
return None | python | def _finalise_figure(fig, **kwargs): # pragma: no cover
"""
Internal function to wrap up a figure.
Possible arguments:
:type title: str
:type show: bool
:type save: bool
:type savefile: str
:type return_figure: bool
"""
title = kwargs.get("title") or None
show = kwargs.get("show") or False
save = kwargs.get("save") or False
savefile = kwargs.get("savefile") or "EQcorrscan_figure.png"
return_fig = kwargs.get("return_figure") or False
if title:
fig.suptitle(title)
if show:
fig.show()
if save:
fig.savefig(savefile)
print("Saved figure to {0}".format(savefile))
if return_fig:
return fig
return None | [
"def",
"_finalise_figure",
"(",
"fig",
",",
"*",
"*",
"kwargs",
")",
":",
"# pragma: no cover",
"title",
"=",
"kwargs",
".",
"get",
"(",
"\"title\"",
")",
"or",
"None",
"show",
"=",
"kwargs",
".",
"get",
"(",
"\"show\"",
")",
"or",
"False",
"save",
"="... | Internal function to wrap up a figure.
Possible arguments:
:type title: str
:type show: bool
:type save: bool
:type savefile: str
:type return_figure: bool | [
"Internal",
"function",
"to",
"wrap",
"up",
"a",
"figure",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L34-L59 | train | 203,376 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/plotting.py | chunk_data | def chunk_data(tr, samp_rate, state='mean'):
"""
Downsample data for plotting.
Computes the maximum of data within chunks, useful for plotting waveforms
or cccsums, large datasets that would otherwise exceed the complexity
allowed, and overflow.
:type tr: obspy.core.trace.Trace
:param tr: Trace to be chunked
:type samp_rate: float
:param samp_rate: Desired sampling rate in Hz
:type state: str
:param state:
Either 'Min', 'Max', 'Mean' or 'Maxabs' to return one of these for the
chunks. Maxabs will return the largest (positive or negative) for
that chunk.
:returns: :class:`obspy.core.trace.Trace`
"""
trout = tr.copy() # Don't do it inplace on data
x = np.arange(len(tr.data))
y = tr.data
chunksize = int(round(tr.stats.sampling_rate / samp_rate))
# Wrap the array into a 2D array of chunks, truncating the last chunk if
# chunksize isn't an even divisor of the total size.
# (This part won't use _any_ additional memory)
numchunks = int(y.size // chunksize)
ychunks = y[:chunksize * numchunks].reshape((-1, chunksize))
xchunks = x[:chunksize * numchunks].reshape((-1, chunksize))
# Calculate the max, min, and means of chunksize-element chunks...
if state == 'Max':
trout.data = ychunks.max(axis=1)
elif state == 'Min':
trout.data = ychunks.min(axis=1)
elif state == 'Mean':
trout.data = ychunks.mean(axis=1)
elif state == 'Maxabs':
max_env = ychunks.max(axis=1)
min_env = ychunks.min(axis=1)
indeces = np.argmax(np.vstack([np.abs(max_env), np.abs(min_env)]),
axis=0)
stack = np.vstack([max_env, min_env]).T
trout.data = np.array([stack[i][indeces[i]]
for i in range(len(stack))])
xcenters = xchunks.mean(axis=1)
trout.stats.starttime = tr.stats.starttime + xcenters[0] /\
tr.stats.sampling_rate
trout.stats.sampling_rate = samp_rate
return trout | python | def chunk_data(tr, samp_rate, state='mean'):
"""
Downsample data for plotting.
Computes the maximum of data within chunks, useful for plotting waveforms
or cccsums, large datasets that would otherwise exceed the complexity
allowed, and overflow.
:type tr: obspy.core.trace.Trace
:param tr: Trace to be chunked
:type samp_rate: float
:param samp_rate: Desired sampling rate in Hz
:type state: str
:param state:
Either 'Min', 'Max', 'Mean' or 'Maxabs' to return one of these for the
chunks. Maxabs will return the largest (positive or negative) for
that chunk.
:returns: :class:`obspy.core.trace.Trace`
"""
trout = tr.copy() # Don't do it inplace on data
x = np.arange(len(tr.data))
y = tr.data
chunksize = int(round(tr.stats.sampling_rate / samp_rate))
# Wrap the array into a 2D array of chunks, truncating the last chunk if
# chunksize isn't an even divisor of the total size.
# (This part won't use _any_ additional memory)
numchunks = int(y.size // chunksize)
ychunks = y[:chunksize * numchunks].reshape((-1, chunksize))
xchunks = x[:chunksize * numchunks].reshape((-1, chunksize))
# Calculate the max, min, and means of chunksize-element chunks...
if state == 'Max':
trout.data = ychunks.max(axis=1)
elif state == 'Min':
trout.data = ychunks.min(axis=1)
elif state == 'Mean':
trout.data = ychunks.mean(axis=1)
elif state == 'Maxabs':
max_env = ychunks.max(axis=1)
min_env = ychunks.min(axis=1)
indeces = np.argmax(np.vstack([np.abs(max_env), np.abs(min_env)]),
axis=0)
stack = np.vstack([max_env, min_env]).T
trout.data = np.array([stack[i][indeces[i]]
for i in range(len(stack))])
xcenters = xchunks.mean(axis=1)
trout.stats.starttime = tr.stats.starttime + xcenters[0] /\
tr.stats.sampling_rate
trout.stats.sampling_rate = samp_rate
return trout | [
"def",
"chunk_data",
"(",
"tr",
",",
"samp_rate",
",",
"state",
"=",
"'mean'",
")",
":",
"trout",
"=",
"tr",
".",
"copy",
"(",
")",
"# Don't do it inplace on data",
"x",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"tr",
".",
"data",
")",
")",
"y",
"... | Downsample data for plotting.
Computes the maximum of data within chunks, useful for plotting waveforms
or cccsums, large datasets that would otherwise exceed the complexity
allowed, and overflow.
:type tr: obspy.core.trace.Trace
:param tr: Trace to be chunked
:type samp_rate: float
:param samp_rate: Desired sampling rate in Hz
:type state: str
:param state:
Either 'Min', 'Max', 'Mean' or 'Maxabs' to return one of these for the
chunks. Maxabs will return the largest (positive or negative) for
that chunk.
:returns: :class:`obspy.core.trace.Trace` | [
"Downsample",
"data",
"for",
"plotting",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L62-L113 | train | 203,377 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/plotting.py | xcorr_plot | def xcorr_plot(template, image, shift=None, cc=None, cc_vec=None, **kwargs):
"""
Plot a template overlying an image aligned by correlation.
:type template: numpy.ndarray
:param template: Short template image
:type image: numpy.ndarray
:param image: Long master image
:type shift: int
:param shift: Shift to apply to template relative to image, in samples
:type cc: float
:param cc: Cross-correlation at shift
:type cc_vec: numpy.ndarray
:param cc_vec: Cross-correlation vector.
:type save: bool
:param save: Whether to save the plot or not.
:type savefile: str
:param savefile: File name to save to
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.plotting import xcorr_plot
>>> from eqcorrscan.utils.stacking import align_traces
>>> st = read().detrend('simple').filter('bandpass', freqmin=2, freqmax=15)
>>> shifts, ccs = align_traces([st[0], st[1]], 40)
>>> shift = shifts[1] * st[1].stats.sampling_rate
>>> cc = ccs[1]
>>> xcorr_plot(template=st[1].data, image=st[0].data, shift=shift,
... cc=cc) # doctest: +SKIP
.. image:: ../../plots/xcorr_plot.png
"""
import matplotlib.pyplot as plt
if cc is None or shift is None:
if not isinstance(cc_vec, np.ndarray):
print('Given cc: %s and shift: %s' % (cc, shift))
raise IOError('Must provide either cc_vec, or cc and shift')
shift = np.abs(cc_vec).argmax()
cc = cc_vec[shift]
x = np.arange(len(image))
plt.plot(x, image / abs(image).max(), 'k', lw=1.3, label='Image')
x = np.arange(len(template)) + shift
plt.plot(x, template / abs(template).max(), 'r', lw=1.1, label='Template')
plt.title('Shift=%s, Correlation=%s' % (shift, cc))
fig = plt.gcf()
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | python | def xcorr_plot(template, image, shift=None, cc=None, cc_vec=None, **kwargs):
"""
Plot a template overlying an image aligned by correlation.
:type template: numpy.ndarray
:param template: Short template image
:type image: numpy.ndarray
:param image: Long master image
:type shift: int
:param shift: Shift to apply to template relative to image, in samples
:type cc: float
:param cc: Cross-correlation at shift
:type cc_vec: numpy.ndarray
:param cc_vec: Cross-correlation vector.
:type save: bool
:param save: Whether to save the plot or not.
:type savefile: str
:param savefile: File name to save to
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.plotting import xcorr_plot
>>> from eqcorrscan.utils.stacking import align_traces
>>> st = read().detrend('simple').filter('bandpass', freqmin=2, freqmax=15)
>>> shifts, ccs = align_traces([st[0], st[1]], 40)
>>> shift = shifts[1] * st[1].stats.sampling_rate
>>> cc = ccs[1]
>>> xcorr_plot(template=st[1].data, image=st[0].data, shift=shift,
... cc=cc) # doctest: +SKIP
.. image:: ../../plots/xcorr_plot.png
"""
import matplotlib.pyplot as plt
if cc is None or shift is None:
if not isinstance(cc_vec, np.ndarray):
print('Given cc: %s and shift: %s' % (cc, shift))
raise IOError('Must provide either cc_vec, or cc and shift')
shift = np.abs(cc_vec).argmax()
cc = cc_vec[shift]
x = np.arange(len(image))
plt.plot(x, image / abs(image).max(), 'k', lw=1.3, label='Image')
x = np.arange(len(template)) + shift
plt.plot(x, template / abs(template).max(), 'r', lw=1.1, label='Template')
plt.title('Shift=%s, Correlation=%s' % (shift, cc))
fig = plt.gcf()
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | [
"def",
"xcorr_plot",
"(",
"template",
",",
"image",
",",
"shift",
"=",
"None",
",",
"cc",
"=",
"None",
",",
"cc_vec",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"if",
"cc",
"is",
"None",
"or"... | Plot a template overlying an image aligned by correlation.
:type template: numpy.ndarray
:param template: Short template image
:type image: numpy.ndarray
:param image: Long master image
:type shift: int
:param shift: Shift to apply to template relative to image, in samples
:type cc: float
:param cc: Cross-correlation at shift
:type cc_vec: numpy.ndarray
:param cc_vec: Cross-correlation vector.
:type save: bool
:param save: Whether to save the plot or not.
:type savefile: str
:param savefile: File name to save to
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.plotting import xcorr_plot
>>> from eqcorrscan.utils.stacking import align_traces
>>> st = read().detrend('simple').filter('bandpass', freqmin=2, freqmax=15)
>>> shifts, ccs = align_traces([st[0], st[1]], 40)
>>> shift = shifts[1] * st[1].stats.sampling_rate
>>> cc = ccs[1]
>>> xcorr_plot(template=st[1].data, image=st[0].data, shift=shift,
... cc=cc) # doctest: +SKIP
.. image:: ../../plots/xcorr_plot.png | [
"Plot",
"a",
"template",
"overlying",
"an",
"image",
"aligned",
"by",
"correlation",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L116-L165 | train | 203,378 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/plotting.py | triple_plot | def triple_plot(cccsum, cccsum_hist, trace, threshold, **kwargs):
"""
Plot a seismogram, correlogram and histogram.
:type cccsum: numpy.ndarray
:param cccsum: Array of the cross-channel cross-correlation sum
:type cccsum_hist: numpy.ndarray
:param cccsum_hist: cccsum for histogram plotting, can be the same as \
cccsum but included if cccsum is just an envelope.
:type trace: obspy.core.trace.Trace
:param trace: A sample trace from the same time as cccsum
:type threshold: float
:param threshold: Detection threshold within cccsum
:type save: bool
:param save: If True will save and not plot to screen, vice-versa if False
:type savefile: str
:param savefile: Path to save figure to, only required if save=True
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.core.match_filter import normxcorr2
>>> from eqcorrscan.utils.plotting import triple_plot
>>> st = read()
>>> template = st[0].copy().trim(st[0].stats.starttime + 8,
... st[0].stats.starttime + 12)
>>> tr = st[0]
>>> ccc = normxcorr2(template=template.data, image=tr.data)
>>> tr.data = tr.data[0:len(ccc[0])]
>>> triple_plot(cccsum=ccc[0], cccsum_hist=ccc[0], trace=tr,
... threshold=0.8) # doctest: +SKIP
.. image:: ../../plots/triple_plot.png
"""
import matplotlib.pyplot as plt
if len(cccsum) != len(trace.data):
print('cccsum is: ' +
str(len(cccsum)) + ' trace is: ' + str(len(trace.data)))
msg = ' '.join(['cccsum and trace must have the',
'same number of data points'])
raise ValueError(msg)
df = trace.stats.sampling_rate
npts = trace.stats.npts
t = np.arange(npts, dtype=np.float32) / (df * 3600)
# Generate the subplot for the seismic data
ax1 = plt.subplot2grid((2, 5), (0, 0), colspan=4)
ax1.plot(t, trace.data, 'k')
ax1.axis('tight')
ax1.set_ylim([-15 * np.mean(np.abs(trace.data)),
15 * np.mean(np.abs(trace.data))])
# Generate the subplot for the correlation sum data
ax2 = plt.subplot2grid((2, 5), (1, 0), colspan=4, sharex=ax1)
# Plot the threshold values
ax2.plot([min(t), max(t)], [threshold, threshold], color='r', lw=1,
label="Threshold")
ax2.plot([min(t), max(t)], [-threshold, -threshold], color='r', lw=1)
ax2.plot(t, cccsum, 'k')
ax2.axis('tight')
ax2.set_ylim([-1.7 * threshold, 1.7 * threshold])
ax2.set_xlabel("Time after %s [hr]" % trace.stats.starttime.isoformat())
# ax2.legend()
# Generate a small subplot for the histogram of the cccsum data
ax3 = plt.subplot2grid((2, 5), (1, 4), sharey=ax2)
ax3.hist(cccsum_hist, 200, normed=1, histtype='stepfilled',
orientation='horizontal', color='black')
ax3.set_ylim([-5, 5])
fig = plt.gcf()
fig.suptitle(trace.id)
fig.canvas.draw()
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | python | def triple_plot(cccsum, cccsum_hist, trace, threshold, **kwargs):
"""
Plot a seismogram, correlogram and histogram.
:type cccsum: numpy.ndarray
:param cccsum: Array of the cross-channel cross-correlation sum
:type cccsum_hist: numpy.ndarray
:param cccsum_hist: cccsum for histogram plotting, can be the same as \
cccsum but included if cccsum is just an envelope.
:type trace: obspy.core.trace.Trace
:param trace: A sample trace from the same time as cccsum
:type threshold: float
:param threshold: Detection threshold within cccsum
:type save: bool
:param save: If True will save and not plot to screen, vice-versa if False
:type savefile: str
:param savefile: Path to save figure to, only required if save=True
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.core.match_filter import normxcorr2
>>> from eqcorrscan.utils.plotting import triple_plot
>>> st = read()
>>> template = st[0].copy().trim(st[0].stats.starttime + 8,
... st[0].stats.starttime + 12)
>>> tr = st[0]
>>> ccc = normxcorr2(template=template.data, image=tr.data)
>>> tr.data = tr.data[0:len(ccc[0])]
>>> triple_plot(cccsum=ccc[0], cccsum_hist=ccc[0], trace=tr,
... threshold=0.8) # doctest: +SKIP
.. image:: ../../plots/triple_plot.png
"""
import matplotlib.pyplot as plt
if len(cccsum) != len(trace.data):
print('cccsum is: ' +
str(len(cccsum)) + ' trace is: ' + str(len(trace.data)))
msg = ' '.join(['cccsum and trace must have the',
'same number of data points'])
raise ValueError(msg)
df = trace.stats.sampling_rate
npts = trace.stats.npts
t = np.arange(npts, dtype=np.float32) / (df * 3600)
# Generate the subplot for the seismic data
ax1 = plt.subplot2grid((2, 5), (0, 0), colspan=4)
ax1.plot(t, trace.data, 'k')
ax1.axis('tight')
ax1.set_ylim([-15 * np.mean(np.abs(trace.data)),
15 * np.mean(np.abs(trace.data))])
# Generate the subplot for the correlation sum data
ax2 = plt.subplot2grid((2, 5), (1, 0), colspan=4, sharex=ax1)
# Plot the threshold values
ax2.plot([min(t), max(t)], [threshold, threshold], color='r', lw=1,
label="Threshold")
ax2.plot([min(t), max(t)], [-threshold, -threshold], color='r', lw=1)
ax2.plot(t, cccsum, 'k')
ax2.axis('tight')
ax2.set_ylim([-1.7 * threshold, 1.7 * threshold])
ax2.set_xlabel("Time after %s [hr]" % trace.stats.starttime.isoformat())
# ax2.legend()
# Generate a small subplot for the histogram of the cccsum data
ax3 = plt.subplot2grid((2, 5), (1, 4), sharey=ax2)
ax3.hist(cccsum_hist, 200, normed=1, histtype='stepfilled',
orientation='horizontal', color='black')
ax3.set_ylim([-5, 5])
fig = plt.gcf()
fig.suptitle(trace.id)
fig.canvas.draw()
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | [
"def",
"triple_plot",
"(",
"cccsum",
",",
"cccsum_hist",
",",
"trace",
",",
"threshold",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"if",
"len",
"(",
"cccsum",
")",
"!=",
"len",
"(",
"trace",
".",
"data",
"... | Plot a seismogram, correlogram and histogram.
:type cccsum: numpy.ndarray
:param cccsum: Array of the cross-channel cross-correlation sum
:type cccsum_hist: numpy.ndarray
:param cccsum_hist: cccsum for histogram plotting, can be the same as \
cccsum but included if cccsum is just an envelope.
:type trace: obspy.core.trace.Trace
:param trace: A sample trace from the same time as cccsum
:type threshold: float
:param threshold: Detection threshold within cccsum
:type save: bool
:param save: If True will save and not plot to screen, vice-versa if False
:type savefile: str
:param savefile: Path to save figure to, only required if save=True
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.core.match_filter import normxcorr2
>>> from eqcorrscan.utils.plotting import triple_plot
>>> st = read()
>>> template = st[0].copy().trim(st[0].stats.starttime + 8,
... st[0].stats.starttime + 12)
>>> tr = st[0]
>>> ccc = normxcorr2(template=template.data, image=tr.data)
>>> tr.data = tr.data[0:len(ccc[0])]
>>> triple_plot(cccsum=ccc[0], cccsum_hist=ccc[0], trace=tr,
... threshold=0.8) # doctest: +SKIP
.. image:: ../../plots/triple_plot.png | [
"Plot",
"a",
"seismogram",
"correlogram",
"and",
"histogram",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L168-L241 | train | 203,379 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/plotting.py | peaks_plot | def peaks_plot(data, starttime, samp_rate, peaks=[(0, 0)], **kwargs):
"""
Plot peaks to check that the peak finding routine is running correctly.
Used in debugging for the EQcorrscan module.
:type data: numpy.array
:param data: Numpy array of the data within which peaks have been found
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime: Start time for the data
:type samp_rate: float
:param samp_rate: Sampling rate of data in Hz
:type peaks: list
:param peaks: List of tuples of peak locations and amplitudes (loc, amp)
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> import numpy as np
>>> from eqcorrscan.utils import findpeaks
>>> from eqcorrscan.utils.plotting import peaks_plot
>>> from obspy import UTCDateTime
>>> data = np.random.randn(200)
>>> data[30] = 100
>>> data[60] = 40
>>> threshold = 10
>>> peaks = findpeaks.find_peaks2_short(data, threshold, 3)
>>> peaks_plot(data=data, starttime=UTCDateTime("2008001"),
... samp_rate=10, peaks=peaks) # doctest: +SKIP
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from eqcorrscan.utils import findpeaks
from eqcorrscan.utils.plotting import peaks_plot
from obspy import UTCDateTime
data = np.random.randn(200)
data[30]=100
data[60]=40
threshold = 10
peaks = findpeaks.find_peaks2_short(data, threshold, 3)
peaks_plot(data=data, starttime=UTCDateTime("2008001"),
samp_rate=10, peaks=peaks)
"""
import matplotlib.pyplot as plt
npts = len(data)
t = np.arange(npts, dtype=np.float32) / (samp_rate * 3600)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(t, data, 'k')
ax1.scatter(peaks[0][1] / (samp_rate * 3600), abs(peaks[0][0]),
color='r', label='Peaks')
for peak in peaks:
ax1.scatter(peak[1] / (samp_rate * 3600), abs(peak[0]), color='r')
ax1.legend()
ax1.set_xlabel("Time after %s [hr]" % starttime.isoformat())
ax1.axis('tight')
fig.suptitle('Peaks')
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | python | def peaks_plot(data, starttime, samp_rate, peaks=[(0, 0)], **kwargs):
"""
Plot peaks to check that the peak finding routine is running correctly.
Used in debugging for the EQcorrscan module.
:type data: numpy.array
:param data: Numpy array of the data within which peaks have been found
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime: Start time for the data
:type samp_rate: float
:param samp_rate: Sampling rate of data in Hz
:type peaks: list
:param peaks: List of tuples of peak locations and amplitudes (loc, amp)
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> import numpy as np
>>> from eqcorrscan.utils import findpeaks
>>> from eqcorrscan.utils.plotting import peaks_plot
>>> from obspy import UTCDateTime
>>> data = np.random.randn(200)
>>> data[30] = 100
>>> data[60] = 40
>>> threshold = 10
>>> peaks = findpeaks.find_peaks2_short(data, threshold, 3)
>>> peaks_plot(data=data, starttime=UTCDateTime("2008001"),
... samp_rate=10, peaks=peaks) # doctest: +SKIP
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from eqcorrscan.utils import findpeaks
from eqcorrscan.utils.plotting import peaks_plot
from obspy import UTCDateTime
data = np.random.randn(200)
data[30]=100
data[60]=40
threshold = 10
peaks = findpeaks.find_peaks2_short(data, threshold, 3)
peaks_plot(data=data, starttime=UTCDateTime("2008001"),
samp_rate=10, peaks=peaks)
"""
import matplotlib.pyplot as plt
npts = len(data)
t = np.arange(npts, dtype=np.float32) / (samp_rate * 3600)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(t, data, 'k')
ax1.scatter(peaks[0][1] / (samp_rate * 3600), abs(peaks[0][0]),
color='r', label='Peaks')
for peak in peaks:
ax1.scatter(peak[1] / (samp_rate * 3600), abs(peak[0]), color='r')
ax1.legend()
ax1.set_xlabel("Time after %s [hr]" % starttime.isoformat())
ax1.axis('tight')
fig.suptitle('Peaks')
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | [
"def",
"peaks_plot",
"(",
"data",
",",
"starttime",
",",
"samp_rate",
",",
"peaks",
"=",
"[",
"(",
"0",
",",
"0",
")",
"]",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"npts",
"=",
"len",
"(",
"data",
")... | Plot peaks to check that the peak finding routine is running correctly.
Used in debugging for the EQcorrscan module.
:type data: numpy.array
:param data: Numpy array of the data within which peaks have been found
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime: Start time for the data
:type samp_rate: float
:param samp_rate: Sampling rate of data in Hz
:type peaks: list
:param peaks: List of tuples of peak locations and amplitudes (loc, amp)
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> import numpy as np
>>> from eqcorrscan.utils import findpeaks
>>> from eqcorrscan.utils.plotting import peaks_plot
>>> from obspy import UTCDateTime
>>> data = np.random.randn(200)
>>> data[30] = 100
>>> data[60] = 40
>>> threshold = 10
>>> peaks = findpeaks.find_peaks2_short(data, threshold, 3)
>>> peaks_plot(data=data, starttime=UTCDateTime("2008001"),
... samp_rate=10, peaks=peaks) # doctest: +SKIP
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from eqcorrscan.utils import findpeaks
from eqcorrscan.utils.plotting import peaks_plot
from obspy import UTCDateTime
data = np.random.randn(200)
data[30]=100
data[60]=40
threshold = 10
peaks = findpeaks.find_peaks2_short(data, threshold, 3)
peaks_plot(data=data, starttime=UTCDateTime("2008001"),
samp_rate=10, peaks=peaks) | [
"Plot",
"peaks",
"to",
"check",
"that",
"the",
"peak",
"finding",
"routine",
"is",
"running",
"correctly",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L244-L305 | train | 203,380 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/plotting.py | threeD_gridplot | def threeD_gridplot(nodes, **kwargs):
"""Plot in a series of grid points in 3D.
:type nodes: list
:param nodes: List of tuples of the form (lat, long, depth)
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from eqcorrscan.utils.plotting import threeD_gridplot
>>> nodes = [(-43.5, 170.4, 4), (-43.3, 170.8, 12), (-43.4, 170.3, 8)]
>>> threeD_gridplot(nodes=nodes) # doctest: +SKIP
.. plot::
from eqcorrscan.utils.plotting import threeD_gridplot
nodes = [(-43.5, 170.4, 4), (-43.3, 170.8, 12), (-43.4, 170.3, 8)]
threeD_gridplot(nodes=nodes)
"""
from mpl_toolkits.mplot3d import Axes3D # noqa: F401
import matplotlib.pyplot as plt
lats = []
longs = []
depths = []
for node in nodes:
lats.append(float(node[0]))
longs.append(float(node[1]))
depths.append(float(node[2]))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(lats, longs, depths)
ax.set_ylabel("Latitude (deg)")
ax.set_xlabel("Longitude (deg)")
ax.set_zlabel("Depth(km)")
ax.get_xaxis().get_major_formatter().set_scientific(False)
ax.get_yaxis().get_major_formatter().set_scientific(False)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | python | def threeD_gridplot(nodes, **kwargs):
"""Plot in a series of grid points in 3D.
:type nodes: list
:param nodes: List of tuples of the form (lat, long, depth)
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from eqcorrscan.utils.plotting import threeD_gridplot
>>> nodes = [(-43.5, 170.4, 4), (-43.3, 170.8, 12), (-43.4, 170.3, 8)]
>>> threeD_gridplot(nodes=nodes) # doctest: +SKIP
.. plot::
from eqcorrscan.utils.plotting import threeD_gridplot
nodes = [(-43.5, 170.4, 4), (-43.3, 170.8, 12), (-43.4, 170.3, 8)]
threeD_gridplot(nodes=nodes)
"""
from mpl_toolkits.mplot3d import Axes3D # noqa: F401
import matplotlib.pyplot as plt
lats = []
longs = []
depths = []
for node in nodes:
lats.append(float(node[0]))
longs.append(float(node[1]))
depths.append(float(node[2]))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(lats, longs, depths)
ax.set_ylabel("Latitude (deg)")
ax.set_xlabel("Longitude (deg)")
ax.set_zlabel("Depth(km)")
ax.get_xaxis().get_major_formatter().set_scientific(False)
ax.get_yaxis().get_major_formatter().set_scientific(False)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | [
"def",
"threeD_gridplot",
"(",
"nodes",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"mpl_toolkits",
".",
"mplot3d",
"import",
"Axes3D",
"# noqa: F401",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"lats",
"=",
"[",
"]",
"longs",
"=",
"[",
"]",
"dep... | Plot in a series of grid points in 3D.
:type nodes: list
:param nodes: List of tuples of the form (lat, long, depth)
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from eqcorrscan.utils.plotting import threeD_gridplot
>>> nodes = [(-43.5, 170.4, 4), (-43.3, 170.8, 12), (-43.4, 170.3, 8)]
>>> threeD_gridplot(nodes=nodes) # doctest: +SKIP
.. plot::
from eqcorrscan.utils.plotting import threeD_gridplot
nodes = [(-43.5, 170.4, 4), (-43.3, 170.8, 12), (-43.4, 170.3, 8)]
threeD_gridplot(nodes=nodes) | [
"Plot",
"in",
"a",
"series",
"of",
"grid",
"points",
"in",
"3D",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L525-L563 | train | 203,381 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/plotting.py | interev_mag | def interev_mag(times, mags, size=(10.5, 7.5), **kwargs):
"""
Plot inter-event times against magnitude.
:type times: list
:param times: list of the detection times, must be sorted the same as mags
:type mags: list
:param mags: list of magnitudes
:type size: tuple
:param size: Size of figure in inches.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.plotting import interev_mag
>>> client = Client('IRIS')
>>> t1 = UTCDateTime('2012-03-26T00:00:00')
>>> t2 = t1 + (3 * 86400)
>>> catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3)
>>> magnitudes = [event.preferred_magnitude().mag for event in catalog]
>>> times = [event.preferred_origin().time for event in catalog]
>>> interev_mag(times, magnitudes) # doctest: +SKIP
.. plot::
from obspy.clients.fdsn import Client
from obspy import UTCDateTime
from eqcorrscan.utils.plotting import interev_mag
client = Client('IRIS')
t1 = UTCDateTime('2012-03-26T00:00:00')
t2 = t1 + (3 * 86400)
catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3)
magnitudes = [event.preferred_magnitude().mag for event in catalog]
times = [event.preferred_origin().time for event in catalog]
interev_mag(times, magnitudes)
"""
import matplotlib.pyplot as plt
info = [(times[i], mags[i]) for i in range(len(times))]
info.sort(key=lambda tup: tup[0])
times = [x[0] for x in info]
mags = [x[1] for x in info]
# Make two subplots next to each other of time before and time after
fig, axes = plt.subplots(1, 2, sharey=True, figsize=size)
axes = axes.ravel()
pre_times = []
post_times = []
for i in range(len(times)):
if i > 0:
pre_times.append((times[i] - times[i - 1]) / 60)
if i < len(times) - 1:
post_times.append((times[i + 1] - times[i]) / 60)
axes[0].scatter(pre_times, mags[1:])
axes[0].set_title('Pre-event times')
axes[0].set_ylabel('Magnitude')
axes[0].set_xlabel('Time (Minutes)')
plt.setp(axes[0].xaxis.get_majorticklabels(), rotation=30)
axes[1].scatter(pre_times, mags[:-1])
axes[1].set_title('Post-event times')
axes[1].set_xlabel('Time (Minutes)')
axes[0].autoscale(enable=True, tight=True)
axes[1].autoscale(enable=True, tight=True)
plt.setp(axes[1].xaxis.get_majorticklabels(), rotation=30)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | python | def interev_mag(times, mags, size=(10.5, 7.5), **kwargs):
"""
Plot inter-event times against magnitude.
:type times: list
:param times: list of the detection times, must be sorted the same as mags
:type mags: list
:param mags: list of magnitudes
:type size: tuple
:param size: Size of figure in inches.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.plotting import interev_mag
>>> client = Client('IRIS')
>>> t1 = UTCDateTime('2012-03-26T00:00:00')
>>> t2 = t1 + (3 * 86400)
>>> catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3)
>>> magnitudes = [event.preferred_magnitude().mag for event in catalog]
>>> times = [event.preferred_origin().time for event in catalog]
>>> interev_mag(times, magnitudes) # doctest: +SKIP
.. plot::
from obspy.clients.fdsn import Client
from obspy import UTCDateTime
from eqcorrscan.utils.plotting import interev_mag
client = Client('IRIS')
t1 = UTCDateTime('2012-03-26T00:00:00')
t2 = t1 + (3 * 86400)
catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3)
magnitudes = [event.preferred_magnitude().mag for event in catalog]
times = [event.preferred_origin().time for event in catalog]
interev_mag(times, magnitudes)
"""
import matplotlib.pyplot as plt
info = [(times[i], mags[i]) for i in range(len(times))]
info.sort(key=lambda tup: tup[0])
times = [x[0] for x in info]
mags = [x[1] for x in info]
# Make two subplots next to each other of time before and time after
fig, axes = plt.subplots(1, 2, sharey=True, figsize=size)
axes = axes.ravel()
pre_times = []
post_times = []
for i in range(len(times)):
if i > 0:
pre_times.append((times[i] - times[i - 1]) / 60)
if i < len(times) - 1:
post_times.append((times[i + 1] - times[i]) / 60)
axes[0].scatter(pre_times, mags[1:])
axes[0].set_title('Pre-event times')
axes[0].set_ylabel('Magnitude')
axes[0].set_xlabel('Time (Minutes)')
plt.setp(axes[0].xaxis.get_majorticklabels(), rotation=30)
axes[1].scatter(pre_times, mags[:-1])
axes[1].set_title('Post-event times')
axes[1].set_xlabel('Time (Minutes)')
axes[0].autoscale(enable=True, tight=True)
axes[1].autoscale(enable=True, tight=True)
plt.setp(axes[1].xaxis.get_majorticklabels(), rotation=30)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | [
"def",
"interev_mag",
"(",
"times",
",",
"mags",
",",
"size",
"=",
"(",
"10.5",
",",
"7.5",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"info",
"=",
"[",
"(",
"times",
"[",
"i",
"]",
",",
"mags",
... | Plot inter-event times against magnitude.
:type times: list
:param times: list of the detection times, must be sorted the same as mags
:type mags: list
:param mags: list of magnitudes
:type size: tuple
:param size: Size of figure in inches.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.plotting import interev_mag
>>> client = Client('IRIS')
>>> t1 = UTCDateTime('2012-03-26T00:00:00')
>>> t2 = t1 + (3 * 86400)
>>> catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3)
>>> magnitudes = [event.preferred_magnitude().mag for event in catalog]
>>> times = [event.preferred_origin().time for event in catalog]
>>> interev_mag(times, magnitudes) # doctest: +SKIP
.. plot::
from obspy.clients.fdsn import Client
from obspy import UTCDateTime
from eqcorrscan.utils.plotting import interev_mag
client = Client('IRIS')
t1 = UTCDateTime('2012-03-26T00:00:00')
t2 = t1 + (3 * 86400)
catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3)
magnitudes = [event.preferred_magnitude().mag for event in catalog]
times = [event.preferred_origin().time for event in catalog]
interev_mag(times, magnitudes) | [
"Plot",
"inter",
"-",
"event",
"times",
"against",
"magnitude",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L962-L1028 | train | 203,382 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/plotting.py | obspy_3d_plot | def obspy_3d_plot(inventory, catalog, size=(10.5, 7.5), **kwargs):
"""
Plot obspy Inventory and obspy Catalog classes in three dimensions.
:type inventory: obspy.core.inventory.inventory.Inventory
:param inventory: Obspy inventory class containing station metadata
:type catalog: obspy.core.event.catalog.Catalog
:param catalog: Obspy catalog class containing event metadata
:type save: bool
:param save: False will plot to screen, true will save plot and not show \
to screen.
:type savefile: str
:param savefile: Filename to save to, required for save=True
:type size: tuple
:param size: Size of figure in inches.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example:
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.plotting import obspy_3d_plot
>>> client = Client('IRIS')
>>> t1 = UTCDateTime(2012, 3, 26)
>>> t2 = t1 + 86400
>>> catalog = client.get_events(starttime=t1, endtime=t2, latitude=-43,
... longitude=170, maxradius=5)
>>> inventory = client.get_stations(starttime=t1, endtime=t2, latitude=-43,
... longitude=170, maxradius=10)
>>> obspy_3d_plot(inventory=inventory, catalog=catalog) # doctest: +SKIP
.. plot::
from obspy.clients.fdsn import Client
from obspy import UTCDateTime
from eqcorrscan.utils.plotting import obspy_3d_plot
client = Client('IRIS')
t1 = UTCDateTime(2012, 3, 26)
t2 = t1 + 86400
catalog = client.get_events(starttime=t1, endtime=t2, latitude=-43,
longitude=170, maxradius=5)
inventory = client.get_stations(starttime=t1, endtime=t2, latitude=-43,
longitude=170, maxradius=10)
obspy_3d_plot(inventory=inventory, catalog=catalog)
"""
nodes = []
for ev in catalog:
nodes.append((ev.preferred_origin().latitude,
ev.preferred_origin().longitude,
ev.preferred_origin().depth / 1000))
# Will plot borehole instruments at elevation - depth if provided
all_stas = []
for net in inventory:
for sta in net:
if len(sta.channels) > 0:
all_stas.append((sta.latitude, sta.longitude,
sta.elevation / 1000 -
sta.channels[0].depth / 1000))
else:
warnings.warn('No channel information attached, '
'setting elevation without depth')
all_stas.append((sta.latitude, sta.longitude,
sta.elevation / 1000))
fig = threeD_seismplot(
stations=all_stas, nodes=nodes, size=size, **kwargs)
return fig | python | def obspy_3d_plot(inventory, catalog, size=(10.5, 7.5), **kwargs):
"""
Plot obspy Inventory and obspy Catalog classes in three dimensions.
:type inventory: obspy.core.inventory.inventory.Inventory
:param inventory: Obspy inventory class containing station metadata
:type catalog: obspy.core.event.catalog.Catalog
:param catalog: Obspy catalog class containing event metadata
:type save: bool
:param save: False will plot to screen, true will save plot and not show \
to screen.
:type savefile: str
:param savefile: Filename to save to, required for save=True
:type size: tuple
:param size: Size of figure in inches.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example:
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.plotting import obspy_3d_plot
>>> client = Client('IRIS')
>>> t1 = UTCDateTime(2012, 3, 26)
>>> t2 = t1 + 86400
>>> catalog = client.get_events(starttime=t1, endtime=t2, latitude=-43,
... longitude=170, maxradius=5)
>>> inventory = client.get_stations(starttime=t1, endtime=t2, latitude=-43,
... longitude=170, maxradius=10)
>>> obspy_3d_plot(inventory=inventory, catalog=catalog) # doctest: +SKIP
.. plot::
from obspy.clients.fdsn import Client
from obspy import UTCDateTime
from eqcorrscan.utils.plotting import obspy_3d_plot
client = Client('IRIS')
t1 = UTCDateTime(2012, 3, 26)
t2 = t1 + 86400
catalog = client.get_events(starttime=t1, endtime=t2, latitude=-43,
longitude=170, maxradius=5)
inventory = client.get_stations(starttime=t1, endtime=t2, latitude=-43,
longitude=170, maxradius=10)
obspy_3d_plot(inventory=inventory, catalog=catalog)
"""
nodes = []
for ev in catalog:
nodes.append((ev.preferred_origin().latitude,
ev.preferred_origin().longitude,
ev.preferred_origin().depth / 1000))
# Will plot borehole instruments at elevation - depth if provided
all_stas = []
for net in inventory:
for sta in net:
if len(sta.channels) > 0:
all_stas.append((sta.latitude, sta.longitude,
sta.elevation / 1000 -
sta.channels[0].depth / 1000))
else:
warnings.warn('No channel information attached, '
'setting elevation without depth')
all_stas.append((sta.latitude, sta.longitude,
sta.elevation / 1000))
fig = threeD_seismplot(
stations=all_stas, nodes=nodes, size=size, **kwargs)
return fig | [
"def",
"obspy_3d_plot",
"(",
"inventory",
",",
"catalog",
",",
"size",
"=",
"(",
"10.5",
",",
"7.5",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"nodes",
"=",
"[",
"]",
"for",
"ev",
"in",
"catalog",
":",
"nodes",
".",
"append",
"(",
"(",
"ev",
".",
... | Plot obspy Inventory and obspy Catalog classes in three dimensions.
:type inventory: obspy.core.inventory.inventory.Inventory
:param inventory: Obspy inventory class containing station metadata
:type catalog: obspy.core.event.catalog.Catalog
:param catalog: Obspy catalog class containing event metadata
:type save: bool
:param save: False will plot to screen, true will save plot and not show \
to screen.
:type savefile: str
:param savefile: Filename to save to, required for save=True
:type size: tuple
:param size: Size of figure in inches.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example:
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.plotting import obspy_3d_plot
>>> client = Client('IRIS')
>>> t1 = UTCDateTime(2012, 3, 26)
>>> t2 = t1 + 86400
>>> catalog = client.get_events(starttime=t1, endtime=t2, latitude=-43,
... longitude=170, maxradius=5)
>>> inventory = client.get_stations(starttime=t1, endtime=t2, latitude=-43,
... longitude=170, maxradius=10)
>>> obspy_3d_plot(inventory=inventory, catalog=catalog) # doctest: +SKIP
.. plot::
from obspy.clients.fdsn import Client
from obspy import UTCDateTime
from eqcorrscan.utils.plotting import obspy_3d_plot
client = Client('IRIS')
t1 = UTCDateTime(2012, 3, 26)
t2 = t1 + 86400
catalog = client.get_events(starttime=t1, endtime=t2, latitude=-43,
longitude=170, maxradius=5)
inventory = client.get_stations(starttime=t1, endtime=t2, latitude=-43,
longitude=170, maxradius=10)
obspy_3d_plot(inventory=inventory, catalog=catalog) | [
"Plot",
"obspy",
"Inventory",
"and",
"obspy",
"Catalog",
"classes",
"in",
"three",
"dimensions",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L1031-L1097 | train | 203,383 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/plotting.py | threeD_seismplot | def threeD_seismplot(stations, nodes, size=(10.5, 7.5), **kwargs):
"""
Plot seismicity and stations in a 3D, movable, zoomable space.
Uses matplotlibs Axes3D package.
:type stations: list
:param stations: list of one tuple per station of (lat, long, elevation), \
with up positive.
:type nodes: list
:param nodes: list of one tuple per event of (lat, long, depth) with down \
positive.
:type size: tuple
:param size: Size of figure in inches.
:returns: :class:`matplotlib.figure.Figure`
.. Note::
See :func:`eqcorrscan.utils.plotting.obspy_3d_plot` for example output.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
stalats, stalongs, staelevs = zip(*stations)
evlats, evlongs, evdepths = zip(*nodes)
# Cope with +/-180 latitudes...
_evlongs = []
for evlong in evlongs:
if evlong < 0:
evlong = float(evlong)
evlong += 360
_evlongs.append(evlong)
evlongs = _evlongs
_stalongs = []
for stalong in stalongs:
if stalong < 0:
stalong = float(stalong)
stalong += 360
_stalongs.append(stalong)
stalongs = _stalongs
evdepths = [-1 * depth for depth in evdepths]
fig = plt.figure(figsize=size)
ax = Axes3D(fig)
ax.scatter(evlats, evlongs, evdepths, marker="x", c="k",
label='Hypocenters')
ax.scatter(stalats, stalongs, staelevs, marker="v", c="r",
label='Stations')
ax.set_ylabel("Longitude (deg)")
ax.set_xlabel("Latitude (deg)")
ax.set_zlabel("Elevation (km)")
ax.get_xaxis().get_major_formatter().set_scientific(False)
ax.get_yaxis().get_major_formatter().set_scientific(False)
plt.legend()
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | python | def threeD_seismplot(stations, nodes, size=(10.5, 7.5), **kwargs):
"""
Plot seismicity and stations in a 3D, movable, zoomable space.
Uses matplotlibs Axes3D package.
:type stations: list
:param stations: list of one tuple per station of (lat, long, elevation), \
with up positive.
:type nodes: list
:param nodes: list of one tuple per event of (lat, long, depth) with down \
positive.
:type size: tuple
:param size: Size of figure in inches.
:returns: :class:`matplotlib.figure.Figure`
.. Note::
See :func:`eqcorrscan.utils.plotting.obspy_3d_plot` for example output.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
stalats, stalongs, staelevs = zip(*stations)
evlats, evlongs, evdepths = zip(*nodes)
# Cope with +/-180 latitudes...
_evlongs = []
for evlong in evlongs:
if evlong < 0:
evlong = float(evlong)
evlong += 360
_evlongs.append(evlong)
evlongs = _evlongs
_stalongs = []
for stalong in stalongs:
if stalong < 0:
stalong = float(stalong)
stalong += 360
_stalongs.append(stalong)
stalongs = _stalongs
evdepths = [-1 * depth for depth in evdepths]
fig = plt.figure(figsize=size)
ax = Axes3D(fig)
ax.scatter(evlats, evlongs, evdepths, marker="x", c="k",
label='Hypocenters')
ax.scatter(stalats, stalongs, staelevs, marker="v", c="r",
label='Stations')
ax.set_ylabel("Longitude (deg)")
ax.set_xlabel("Latitude (deg)")
ax.set_zlabel("Elevation (km)")
ax.get_xaxis().get_major_formatter().set_scientific(False)
ax.get_yaxis().get_major_formatter().set_scientific(False)
plt.legend()
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | [
"def",
"threeD_seismplot",
"(",
"stations",
",",
"nodes",
",",
"size",
"=",
"(",
"10.5",
",",
"7.5",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"from",
"mpl_toolkits",
".",
"mplot3d",
"import",
"Axes3D",
... | Plot seismicity and stations in a 3D, movable, zoomable space.
Uses matplotlibs Axes3D package.
:type stations: list
:param stations: list of one tuple per station of (lat, long, elevation), \
with up positive.
:type nodes: list
:param nodes: list of one tuple per event of (lat, long, depth) with down \
positive.
:type size: tuple
:param size: Size of figure in inches.
:returns: :class:`matplotlib.figure.Figure`
.. Note::
See :func:`eqcorrscan.utils.plotting.obspy_3d_plot` for example output. | [
"Plot",
"seismicity",
"and",
"stations",
"in",
"a",
"3D",
"movable",
"zoomable",
"space",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L1100-L1153 | train | 203,384 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/plotting.py | noise_plot | def noise_plot(signal, noise, normalise=False, **kwargs):
"""
Plot signal and noise fourier transforms and the difference.
:type signal: `obspy.core.stream.Stream`
:param signal: Stream of "signal" window
:type noise: `obspy.core.stream.Stream`
:param noise: Stream of the "noise" window.
:type normalise: bool
:param normalise: Whether to normalise the data before plotting or not.
:return: `matplotlib.pyplot.Figure`
"""
import matplotlib.pyplot as plt
# Work out how many traces we can plot
n_traces = 0
for tr in signal:
try:
noise.select(id=tr.id)[0]
except IndexError: # pragma: no cover
continue
n_traces += 1
fig, axes = plt.subplots(n_traces, 2, sharex=True)
if len(signal) > 1:
axes = axes.ravel()
i = 0
lines = []
labels = []
for tr in signal:
try:
noise_tr = noise.select(id=tr.id)[0]
except IndexError: # pragma: no cover
continue
ax1 = axes[i]
ax2 = axes[i + 1]
fft_len = fftpack.next_fast_len(
max(noise_tr.stats.npts, tr.stats.npts))
if not normalise:
signal_fft = fftpack.rfft(tr.data, fft_len)
noise_fft = fftpack.rfft(noise_tr.data, fft_len)
else:
signal_fft = fftpack.rfft(tr.data / max(tr.data), fft_len)
noise_fft = fftpack.rfft(
noise_tr.data / max(noise_tr.data), fft_len)
frequencies = np.linspace(0, 1 / (2 * tr.stats.delta), fft_len // 2)
noise_line, = ax1.semilogy(
frequencies, 2.0 / fft_len * np.abs(noise_fft[0: fft_len // 2]),
'k', label="noise")
signal_line, = ax1.semilogy(
frequencies, 2.0 / fft_len * np.abs(signal_fft[0: fft_len // 2]),
'r', label="signal")
if "signal" not in labels:
labels.append("signal")
lines.append(signal_line)
if "noise" not in labels:
labels.append("noise")
lines.append(noise_line)
ax1.set_ylabel(tr.id, rotation=0, horizontalalignment='right')
ax2.plot(
frequencies,
(2.0 / fft_len * np.abs(signal_fft[0: fft_len // 2])) -
(2.0 / fft_len * np.abs(noise_fft[0: fft_len // 2])), 'k')
ax2.yaxis.tick_right()
ax2.set_ylim(bottom=0)
i += 2
axes[-1].set_xlabel("Frequency (Hz)")
axes[-2].set_xlabel("Frequency (Hz)")
axes[0].set_title("Spectra")
axes[1].set_title("Signal - noise")
plt.figlegend(lines, labels, 'upper left')
plt.tight_layout()
plt.subplots_adjust(hspace=0)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | python | def noise_plot(signal, noise, normalise=False, **kwargs):
"""
Plot signal and noise fourier transforms and the difference.
:type signal: `obspy.core.stream.Stream`
:param signal: Stream of "signal" window
:type noise: `obspy.core.stream.Stream`
:param noise: Stream of the "noise" window.
:type normalise: bool
:param normalise: Whether to normalise the data before plotting or not.
:return: `matplotlib.pyplot.Figure`
"""
import matplotlib.pyplot as plt
# Work out how many traces we can plot
n_traces = 0
for tr in signal:
try:
noise.select(id=tr.id)[0]
except IndexError: # pragma: no cover
continue
n_traces += 1
fig, axes = plt.subplots(n_traces, 2, sharex=True)
if len(signal) > 1:
axes = axes.ravel()
i = 0
lines = []
labels = []
for tr in signal:
try:
noise_tr = noise.select(id=tr.id)[0]
except IndexError: # pragma: no cover
continue
ax1 = axes[i]
ax2 = axes[i + 1]
fft_len = fftpack.next_fast_len(
max(noise_tr.stats.npts, tr.stats.npts))
if not normalise:
signal_fft = fftpack.rfft(tr.data, fft_len)
noise_fft = fftpack.rfft(noise_tr.data, fft_len)
else:
signal_fft = fftpack.rfft(tr.data / max(tr.data), fft_len)
noise_fft = fftpack.rfft(
noise_tr.data / max(noise_tr.data), fft_len)
frequencies = np.linspace(0, 1 / (2 * tr.stats.delta), fft_len // 2)
noise_line, = ax1.semilogy(
frequencies, 2.0 / fft_len * np.abs(noise_fft[0: fft_len // 2]),
'k', label="noise")
signal_line, = ax1.semilogy(
frequencies, 2.0 / fft_len * np.abs(signal_fft[0: fft_len // 2]),
'r', label="signal")
if "signal" not in labels:
labels.append("signal")
lines.append(signal_line)
if "noise" not in labels:
labels.append("noise")
lines.append(noise_line)
ax1.set_ylabel(tr.id, rotation=0, horizontalalignment='right')
ax2.plot(
frequencies,
(2.0 / fft_len * np.abs(signal_fft[0: fft_len // 2])) -
(2.0 / fft_len * np.abs(noise_fft[0: fft_len // 2])), 'k')
ax2.yaxis.tick_right()
ax2.set_ylim(bottom=0)
i += 2
axes[-1].set_xlabel("Frequency (Hz)")
axes[-2].set_xlabel("Frequency (Hz)")
axes[0].set_title("Spectra")
axes[1].set_title("Signal - noise")
plt.figlegend(lines, labels, 'upper left')
plt.tight_layout()
plt.subplots_adjust(hspace=0)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | [
"def",
"noise_plot",
"(",
"signal",
",",
"noise",
",",
"normalise",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"# Work out how many traces we can plot",
"n_traces",
"=",
"0",
"for",
"tr",
"in",
"signa... | Plot signal and noise fourier transforms and the difference.
:type signal: `obspy.core.stream.Stream`
:param signal: Stream of "signal" window
:type noise: `obspy.core.stream.Stream`
:param noise: Stream of the "noise" window.
:type normalise: bool
:param normalise: Whether to normalise the data before plotting or not.
:return: `matplotlib.pyplot.Figure` | [
"Plot",
"signal",
"and",
"noise",
"fourier",
"transforms",
"and",
"the",
"difference",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L1156-L1231 | train | 203,385 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/plotting.py | spec_trace | def spec_trace(traces, cmap=None, wlen=0.4, log=False, trc='k', tralpha=0.9,
size=(10, 13), fig=None, **kwargs):
"""
Plots seismic data with spectrogram behind.
Takes a stream or list of traces and plots the trace with the spectra
beneath it.
:type traces: list
:param traces: Traces to be plotted, can be a single
:class:`obspy.core.stream.Stream`, or a list of
:class:`obspy.core.trace.Trace`.
:type cmap: str
:param cmap:
`Matplotlib colormap
<http://matplotlib.org/examples/color/colormaps_reference.html>`_.
:type wlen: float
:param wlen: Window length for fft in seconds
:type log: bool
:param log: Use a log frequency scale
:type trc: str
:param trc: Color for the trace.
:type tralpha: float
:param tralpha: Opacity level for the seismogram, from transparent (0.0) \
to opaque (1.0).
:type size: tuple
:param size: Plot size, tuple of floats, inches
:type fig: matplotlib.figure.Figure
:param fig: Figure to plot onto, defaults to self generating.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.plotting import spec_trace
>>> st = read()
>>> spec_trace(st, trc='white') # doctest: +SKIP
.. plot::
from obspy import read
from eqcorrscan.utils.plotting import spec_trace
st = read()
spec_trace(st, trc='white')
"""
import matplotlib.pyplot as plt
if isinstance(traces, Stream):
traces.sort(['station', 'channel'])
if not fig:
fig = plt.figure()
for i, tr in enumerate(traces):
if i == 0:
ax = fig.add_subplot(len(traces), 1, i + 1)
else:
ax = fig.add_subplot(len(traces), 1, i + 1, sharex=ax)
ax1, ax2 = _spec_trace(tr, cmap=cmap, wlen=wlen, log=log, trc=trc,
tralpha=tralpha, axes=ax)
ax.set_yticks([])
if i < len(traces) - 1:
plt.setp(ax1.get_xticklabels(), visible=False)
if isinstance(traces, list):
ax.text(0.005, 0.85, "{0}::{1}".format(tr.id, tr.stats.starttime),
bbox=dict(facecolor='white', alpha=0.8),
transform=ax2.transAxes)
elif isinstance(traces, Stream):
ax.text(0.005, 0.85, tr.id,
bbox=dict(facecolor='white', alpha=0.8),
transform=ax2.transAxes)
ax.text(0.005, 0.02, str(np.max(tr.data).round(1)),
bbox=dict(facecolor='white', alpha=0.95),
transform=ax2.transAxes)
ax.set_xlabel('Time (s)')
fig.subplots_adjust(hspace=0)
fig.set_size_inches(w=size[0], h=size[1], forward=True)
fig.text(0.04, 0.5, 'Frequency (Hz)', va='center', rotation='vertical')
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | python | def spec_trace(traces, cmap=None, wlen=0.4, log=False, trc='k', tralpha=0.9,
size=(10, 13), fig=None, **kwargs):
"""
Plots seismic data with spectrogram behind.
Takes a stream or list of traces and plots the trace with the spectra
beneath it.
:type traces: list
:param traces: Traces to be plotted, can be a single
:class:`obspy.core.stream.Stream`, or a list of
:class:`obspy.core.trace.Trace`.
:type cmap: str
:param cmap:
`Matplotlib colormap
<http://matplotlib.org/examples/color/colormaps_reference.html>`_.
:type wlen: float
:param wlen: Window length for fft in seconds
:type log: bool
:param log: Use a log frequency scale
:type trc: str
:param trc: Color for the trace.
:type tralpha: float
:param tralpha: Opacity level for the seismogram, from transparent (0.0) \
to opaque (1.0).
:type size: tuple
:param size: Plot size, tuple of floats, inches
:type fig: matplotlib.figure.Figure
:param fig: Figure to plot onto, defaults to self generating.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.plotting import spec_trace
>>> st = read()
>>> spec_trace(st, trc='white') # doctest: +SKIP
.. plot::
from obspy import read
from eqcorrscan.utils.plotting import spec_trace
st = read()
spec_trace(st, trc='white')
"""
import matplotlib.pyplot as plt
if isinstance(traces, Stream):
traces.sort(['station', 'channel'])
if not fig:
fig = plt.figure()
for i, tr in enumerate(traces):
if i == 0:
ax = fig.add_subplot(len(traces), 1, i + 1)
else:
ax = fig.add_subplot(len(traces), 1, i + 1, sharex=ax)
ax1, ax2 = _spec_trace(tr, cmap=cmap, wlen=wlen, log=log, trc=trc,
tralpha=tralpha, axes=ax)
ax.set_yticks([])
if i < len(traces) - 1:
plt.setp(ax1.get_xticklabels(), visible=False)
if isinstance(traces, list):
ax.text(0.005, 0.85, "{0}::{1}".format(tr.id, tr.stats.starttime),
bbox=dict(facecolor='white', alpha=0.8),
transform=ax2.transAxes)
elif isinstance(traces, Stream):
ax.text(0.005, 0.85, tr.id,
bbox=dict(facecolor='white', alpha=0.8),
transform=ax2.transAxes)
ax.text(0.005, 0.02, str(np.max(tr.data).round(1)),
bbox=dict(facecolor='white', alpha=0.95),
transform=ax2.transAxes)
ax.set_xlabel('Time (s)')
fig.subplots_adjust(hspace=0)
fig.set_size_inches(w=size[0], h=size[1], forward=True)
fig.text(0.04, 0.5, 'Frequency (Hz)', va='center', rotation='vertical')
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | [
"def",
"spec_trace",
"(",
"traces",
",",
"cmap",
"=",
"None",
",",
"wlen",
"=",
"0.4",
",",
"log",
"=",
"False",
",",
"trc",
"=",
"'k'",
",",
"tralpha",
"=",
"0.9",
",",
"size",
"=",
"(",
"10",
",",
"13",
")",
",",
"fig",
"=",
"None",
",",
"*... | Plots seismic data with spectrogram behind.
Takes a stream or list of traces and plots the trace with the spectra
beneath it.
:type traces: list
:param traces: Traces to be plotted, can be a single
:class:`obspy.core.stream.Stream`, or a list of
:class:`obspy.core.trace.Trace`.
:type cmap: str
:param cmap:
`Matplotlib colormap
<http://matplotlib.org/examples/color/colormaps_reference.html>`_.
:type wlen: float
:param wlen: Window length for fft in seconds
:type log: bool
:param log: Use a log frequency scale
:type trc: str
:param trc: Color for the trace.
:type tralpha: float
:param tralpha: Opacity level for the seismogram, from transparent (0.0) \
to opaque (1.0).
:type size: tuple
:param size: Plot size, tuple of floats, inches
:type fig: matplotlib.figure.Figure
:param fig: Figure to plot onto, defaults to self generating.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.plotting import spec_trace
>>> st = read()
>>> spec_trace(st, trc='white') # doctest: +SKIP
.. plot::
from obspy import read
from eqcorrscan.utils.plotting import spec_trace
st = read()
spec_trace(st, trc='white') | [
"Plots",
"seismic",
"data",
"with",
"spectrogram",
"behind",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L1913-L1992 | train | 203,386 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/plotting.py | _spec_trace | def _spec_trace(trace, cmap=None, wlen=0.4, log=False, trc='k',
tralpha=0.9, size=(10, 2.5), axes=None, title=None):
"""
Function to plot a trace over that traces spectrogram.
Uses obspys spectrogram routine.
:type trace: obspy.core.trace.Trace
:param trace: trace to plot
:type cmap: str
:param cmap: [Matplotlib colormap](http://matplotlib.org/examples/color/
colormaps_reference.html)
:type wlen: float
:param wlen: Window length for fft in seconds
:type log: bool
:param log: Use a log frequency scale
:type trc: str
:param trc: Color for the trace.
:type tralpha: float
:param tralpha: Opacity level for the seismogram, from transparent (0.0) \
to opaque (1.0).
:type size: tuple
:param size: Plot size, tuple of floats, inches
:type axes: matplotlib axes
:param axes: Axes to plot onto, defaults to self generating.
:type title: str
:param title: Title for the plot.
"""
import matplotlib.pyplot as plt
if not axes:
fig = plt.figure(figsize=size)
ax1 = fig.add_subplot(111)
else:
ax1 = axes
trace.spectrogram(wlen=wlen, log=log, show=False, cmap=cmap, axes=ax1)
fig = plt.gcf()
ax2 = ax1.twinx()
y = trace.data
x = np.linspace(0, len(y) / trace.stats.sampling_rate, len(y))
ax2.plot(x, y, color=trc, linewidth=2.0, alpha=tralpha)
ax2.set_xlim(min(x), max(x))
ax2.set_ylim(min(y) * 2, max(y) * 2)
if title:
ax1.set_title(' '.join([trace.stats.station, trace.stats.channel,
trace.stats.starttime.datetime.
strftime('%Y/%m/%d %H:%M:%S')]))
if not axes:
fig.set_size_inches(size)
fig.show()
else:
return ax1, ax2 | python | def _spec_trace(trace, cmap=None, wlen=0.4, log=False, trc='k',
tralpha=0.9, size=(10, 2.5), axes=None, title=None):
"""
Function to plot a trace over that traces spectrogram.
Uses obspys spectrogram routine.
:type trace: obspy.core.trace.Trace
:param trace: trace to plot
:type cmap: str
:param cmap: [Matplotlib colormap](http://matplotlib.org/examples/color/
colormaps_reference.html)
:type wlen: float
:param wlen: Window length for fft in seconds
:type log: bool
:param log: Use a log frequency scale
:type trc: str
:param trc: Color for the trace.
:type tralpha: float
:param tralpha: Opacity level for the seismogram, from transparent (0.0) \
to opaque (1.0).
:type size: tuple
:param size: Plot size, tuple of floats, inches
:type axes: matplotlib axes
:param axes: Axes to plot onto, defaults to self generating.
:type title: str
:param title: Title for the plot.
"""
import matplotlib.pyplot as plt
if not axes:
fig = plt.figure(figsize=size)
ax1 = fig.add_subplot(111)
else:
ax1 = axes
trace.spectrogram(wlen=wlen, log=log, show=False, cmap=cmap, axes=ax1)
fig = plt.gcf()
ax2 = ax1.twinx()
y = trace.data
x = np.linspace(0, len(y) / trace.stats.sampling_rate, len(y))
ax2.plot(x, y, color=trc, linewidth=2.0, alpha=tralpha)
ax2.set_xlim(min(x), max(x))
ax2.set_ylim(min(y) * 2, max(y) * 2)
if title:
ax1.set_title(' '.join([trace.stats.station, trace.stats.channel,
trace.stats.starttime.datetime.
strftime('%Y/%m/%d %H:%M:%S')]))
if not axes:
fig.set_size_inches(size)
fig.show()
else:
return ax1, ax2 | [
"def",
"_spec_trace",
"(",
"trace",
",",
"cmap",
"=",
"None",
",",
"wlen",
"=",
"0.4",
",",
"log",
"=",
"False",
",",
"trc",
"=",
"'k'",
",",
"tralpha",
"=",
"0.9",
",",
"size",
"=",
"(",
"10",
",",
"2.5",
")",
",",
"axes",
"=",
"None",
",",
... | Function to plot a trace over that traces spectrogram.
Uses obspys spectrogram routine.
:type trace: obspy.core.trace.Trace
:param trace: trace to plot
:type cmap: str
:param cmap: [Matplotlib colormap](http://matplotlib.org/examples/color/
colormaps_reference.html)
:type wlen: float
:param wlen: Window length for fft in seconds
:type log: bool
:param log: Use a log frequency scale
:type trc: str
:param trc: Color for the trace.
:type tralpha: float
:param tralpha: Opacity level for the seismogram, from transparent (0.0) \
to opaque (1.0).
:type size: tuple
:param size: Plot size, tuple of floats, inches
:type axes: matplotlib axes
:param axes: Axes to plot onto, defaults to self generating.
:type title: str
:param title: Title for the plot. | [
"Function",
"to",
"plot",
"a",
"trace",
"over",
"that",
"traces",
"spectrogram",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L1995-L2045 | train | 203,387 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/plotting.py | subspace_detector_plot | def subspace_detector_plot(detector, stachans, size, **kwargs):
"""
Plotting for the subspace detector class.
Plot the output basis vectors for the detector at the given dimension.
Corresponds to the first n horizontal vectors of the V matrix.
:type detector: :class:`eqcorrscan.core.subspace.Detector`
:type stachans: list
:param stachans: list of tuples of station, channel pairs to plot.
:type stachans: list
:param stachans: List of tuples of (station, channel) to use. Can set\
to 'all' to use all the station-channel pairs available. If \
detector is multiplexed, will just plot that.
:type size: tuple
:param size: Figure size.
:returns: Figure
:rtype: matplotlib.pyplot.Figure
.. rubric:: Example
>>> from eqcorrscan.core import subspace
>>> import os
>>> detector = subspace.Detector()
>>> detector.read(os.path.join(
... os.path.abspath(os.path.dirname(__file__)),
... '..', 'tests', 'test_data', 'subspace',
... 'stat_test_detector.h5'))
Detector: Tester
>>> subspace_detector_plot(detector=detector, stachans='all', size=(10, 7),
... show=True) # doctest: +SKIP
.. plot::
from eqcorrscan.core import subspace
from eqcorrscan.utils.plotting import subspace_detector_plot
import os
print('running subspace plot')
detector = subspace.Detector()
detector.read(os.path.join('..', '..', '..', 'tests', 'test_data',
'subspace', 'stat_test_detector.h5'))
subspace_detector_plot(detector=detector, stachans='all', size=(10, 7),
show=True)
"""
import matplotlib.pyplot as plt
if stachans == 'all' and not detector.multiplex:
stachans = detector.stachans
elif detector.multiplex:
stachans = [('multi', ' ')]
if np.isinf(detector.dimension):
msg = ' '.join(['Infinite subspace dimension. Only plotting as many',
'dimensions as events in design set'])
warnings.warn(msg)
nrows = detector.v[0].shape[1]
else:
nrows = detector.dimension
fig, axes = plt.subplots(nrows=nrows, ncols=len(stachans),
sharex=True, sharey=True, figsize=size)
x = np.arange(len(detector.u[0]), dtype=np.float32)
if detector.multiplex:
x /= len(detector.stachans) * detector.sampling_rate
else:
x /= detector.sampling_rate
for column, stachan in enumerate(stachans):
channel = detector.u[column]
for row, vector in enumerate(channel.T[0:nrows]):
if len(stachans) == 1:
if nrows == 1:
axis = axes
else:
axis = axes[row]
else:
axis = axes[row, column]
if row == 0:
axis.set_title('.'.join(stachan))
axis.plot(x, vector, 'k', linewidth=1.1)
if column == 0:
axis.set_ylabel('Basis %s' % (row + 1), rotation=0)
if row == nrows - 1:
axis.set_xlabel('Time (s)')
axis.set_yticks([])
plt.subplots_adjust(hspace=0.05)
plt.subplots_adjust(wspace=0.05)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | python | def subspace_detector_plot(detector, stachans, size, **kwargs):
"""
Plotting for the subspace detector class.
Plot the output basis vectors for the detector at the given dimension.
Corresponds to the first n horizontal vectors of the V matrix.
:type detector: :class:`eqcorrscan.core.subspace.Detector`
:type stachans: list
:param stachans: list of tuples of station, channel pairs to plot.
:type stachans: list
:param stachans: List of tuples of (station, channel) to use. Can set\
to 'all' to use all the station-channel pairs available. If \
detector is multiplexed, will just plot that.
:type size: tuple
:param size: Figure size.
:returns: Figure
:rtype: matplotlib.pyplot.Figure
.. rubric:: Example
>>> from eqcorrscan.core import subspace
>>> import os
>>> detector = subspace.Detector()
>>> detector.read(os.path.join(
... os.path.abspath(os.path.dirname(__file__)),
... '..', 'tests', 'test_data', 'subspace',
... 'stat_test_detector.h5'))
Detector: Tester
>>> subspace_detector_plot(detector=detector, stachans='all', size=(10, 7),
... show=True) # doctest: +SKIP
.. plot::
from eqcorrscan.core import subspace
from eqcorrscan.utils.plotting import subspace_detector_plot
import os
print('running subspace plot')
detector = subspace.Detector()
detector.read(os.path.join('..', '..', '..', 'tests', 'test_data',
'subspace', 'stat_test_detector.h5'))
subspace_detector_plot(detector=detector, stachans='all', size=(10, 7),
show=True)
"""
import matplotlib.pyplot as plt
if stachans == 'all' and not detector.multiplex:
stachans = detector.stachans
elif detector.multiplex:
stachans = [('multi', ' ')]
if np.isinf(detector.dimension):
msg = ' '.join(['Infinite subspace dimension. Only plotting as many',
'dimensions as events in design set'])
warnings.warn(msg)
nrows = detector.v[0].shape[1]
else:
nrows = detector.dimension
fig, axes = plt.subplots(nrows=nrows, ncols=len(stachans),
sharex=True, sharey=True, figsize=size)
x = np.arange(len(detector.u[0]), dtype=np.float32)
if detector.multiplex:
x /= len(detector.stachans) * detector.sampling_rate
else:
x /= detector.sampling_rate
for column, stachan in enumerate(stachans):
channel = detector.u[column]
for row, vector in enumerate(channel.T[0:nrows]):
if len(stachans) == 1:
if nrows == 1:
axis = axes
else:
axis = axes[row]
else:
axis = axes[row, column]
if row == 0:
axis.set_title('.'.join(stachan))
axis.plot(x, vector, 'k', linewidth=1.1)
if column == 0:
axis.set_ylabel('Basis %s' % (row + 1), rotation=0)
if row == nrows - 1:
axis.set_xlabel('Time (s)')
axis.set_yticks([])
plt.subplots_adjust(hspace=0.05)
plt.subplots_adjust(wspace=0.05)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | [
"def",
"subspace_detector_plot",
"(",
"detector",
",",
"stachans",
",",
"size",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"if",
"stachans",
"==",
"'all'",
"and",
"not",
"detector",
".",
"multiplex",
":",
"stacha... | Plotting for the subspace detector class.
Plot the output basis vectors for the detector at the given dimension.
Corresponds to the first n horizontal vectors of the V matrix.
:type detector: :class:`eqcorrscan.core.subspace.Detector`
:type stachans: list
:param stachans: list of tuples of station, channel pairs to plot.
:type stachans: list
:param stachans: List of tuples of (station, channel) to use. Can set\
to 'all' to use all the station-channel pairs available. If \
detector is multiplexed, will just plot that.
:type size: tuple
:param size: Figure size.
:returns: Figure
:rtype: matplotlib.pyplot.Figure
.. rubric:: Example
>>> from eqcorrscan.core import subspace
>>> import os
>>> detector = subspace.Detector()
>>> detector.read(os.path.join(
... os.path.abspath(os.path.dirname(__file__)),
... '..', 'tests', 'test_data', 'subspace',
... 'stat_test_detector.h5'))
Detector: Tester
>>> subspace_detector_plot(detector=detector, stachans='all', size=(10, 7),
... show=True) # doctest: +SKIP
.. plot::
from eqcorrscan.core import subspace
from eqcorrscan.utils.plotting import subspace_detector_plot
import os
print('running subspace plot')
detector = subspace.Detector()
detector.read(os.path.join('..', '..', '..', 'tests', 'test_data',
'subspace', 'stat_test_detector.h5'))
subspace_detector_plot(detector=detector, stachans='all', size=(10, 7),
show=True) | [
"Plotting",
"for",
"the",
"subspace",
"detector",
"class",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L2048-L2134 | train | 203,388 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/plotting.py | subspace_fc_plot | def subspace_fc_plot(detector, stachans, size, **kwargs):
"""
Plot the fractional energy capture of the detector for all events in
the design set
:type detector: :class:`eqcorrscan.core.subspace.Detector`
:type stachans: list
:param stachans: list of tuples of station, channel pairs to plot.
:type stachans: list
:param stachans: List of tuples of (station, channel) to use. Can set\
to 'all' to use all the station-channel pairs available. If \
detector is multiplexed, will just plot that.
:type size: tuple
:param size: Figure size.
:returns: Figure
:rtype: matplotlib.pyplot.Figure
.. rubric:: Example
>>> from eqcorrscan.core import subspace
>>> import os
>>> detector = subspace.Detector()
>>> detector.read(os.path.join(os.path.abspath(os.path.dirname(__file__)),
... '..', 'tests', 'test_data', 'subspace',
... 'stat_test_detector.h5'))
Detector: Tester
>>> subspace_fc_plot(detector=detector, stachans='all', size=(10, 7),
... show=True) # doctest: +SKIP
.. plot::
from eqcorrscan.core import subspace
from eqcorrscan.utils.plotting import subspace_detector_plot
import os
print('running subspace plot')
detector = subspace.Detector()
detector.read(os.path.join('..', '..', '..', 'tests', 'test_data',
'subspace', 'stat_test_detector.h5'))
subspace_fc_plot(detector=detector, stachans='all', size=(10, 7),
show=True)
"""
import matplotlib.pyplot as plt
if stachans == 'all' and not detector.multiplex:
stachans = detector.stachans
elif detector.multiplex:
stachans = [('multi', ' ')]
# Work out how many rows and columns are most 'square'
pfs = []
for x in range(1, len(stachans)):
if len(stachans) % x == 0:
pfs.append(x)
if stachans == [('multi', ' ')]:
ncols = 1
else:
ncols = min(pfs,
key=lambda x: abs((np.floor(np.sqrt(len(stachans))) - x)))
nrows = len(stachans) // ncols
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True,
sharey=True, figsize=size, squeeze=False)
for column, axis in enumerate(axes.reshape(-1)):
axis.set_title('.'.join(stachans[column]))
sig = diagsvd(detector.sigma[column], detector.u[column].shape[0],
detector.v[column].shape[0])
A = np.dot(sig, detector.v[column]) # v is v.H from scipy.svd
if detector.dimension > max(
detector.v[column].shape) or detector.dimension == np.inf:
dim = max(detector.v[column].shape) + 1
else:
dim = detector.dimension + 1
av_fc_dict = {i: [] for i in range(dim)}
for ai in A.T:
fcs = []
for j in range(dim):
av_fc_dict[j].append(float(np.dot(ai[:j].T, ai[:j])))
fcs.append(float(np.dot(ai[:j].T, ai[:j])))
axis.plot(fcs, color='grey')
avg = [np.average(_dim[1]) for _dim in av_fc_dict.items()]
axis.plot(avg, color='red', linewidth=3.)
if column % ncols == 0 or column == 0:
axis.set_ylabel('Frac. E Capture (Fc)')
if column + 1 > len(stachans) - ncols:
axis.set_xlabel('Subspace Dimension')
plt.subplots_adjust(hspace=0.2)
plt.subplots_adjust(wspace=0.2)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | python | def subspace_fc_plot(detector, stachans, size, **kwargs):
"""
Plot the fractional energy capture of the detector for all events in
the design set
:type detector: :class:`eqcorrscan.core.subspace.Detector`
:type stachans: list
:param stachans: list of tuples of station, channel pairs to plot.
:type stachans: list
:param stachans: List of tuples of (station, channel) to use. Can set\
to 'all' to use all the station-channel pairs available. If \
detector is multiplexed, will just plot that.
:type size: tuple
:param size: Figure size.
:returns: Figure
:rtype: matplotlib.pyplot.Figure
.. rubric:: Example
>>> from eqcorrscan.core import subspace
>>> import os
>>> detector = subspace.Detector()
>>> detector.read(os.path.join(os.path.abspath(os.path.dirname(__file__)),
... '..', 'tests', 'test_data', 'subspace',
... 'stat_test_detector.h5'))
Detector: Tester
>>> subspace_fc_plot(detector=detector, stachans='all', size=(10, 7),
... show=True) # doctest: +SKIP
.. plot::
from eqcorrscan.core import subspace
from eqcorrscan.utils.plotting import subspace_detector_plot
import os
print('running subspace plot')
detector = subspace.Detector()
detector.read(os.path.join('..', '..', '..', 'tests', 'test_data',
'subspace', 'stat_test_detector.h5'))
subspace_fc_plot(detector=detector, stachans='all', size=(10, 7),
show=True)
"""
import matplotlib.pyplot as plt
if stachans == 'all' and not detector.multiplex:
stachans = detector.stachans
elif detector.multiplex:
stachans = [('multi', ' ')]
# Work out how many rows and columns are most 'square'
pfs = []
for x in range(1, len(stachans)):
if len(stachans) % x == 0:
pfs.append(x)
if stachans == [('multi', ' ')]:
ncols = 1
else:
ncols = min(pfs,
key=lambda x: abs((np.floor(np.sqrt(len(stachans))) - x)))
nrows = len(stachans) // ncols
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True,
sharey=True, figsize=size, squeeze=False)
for column, axis in enumerate(axes.reshape(-1)):
axis.set_title('.'.join(stachans[column]))
sig = diagsvd(detector.sigma[column], detector.u[column].shape[0],
detector.v[column].shape[0])
A = np.dot(sig, detector.v[column]) # v is v.H from scipy.svd
if detector.dimension > max(
detector.v[column].shape) or detector.dimension == np.inf:
dim = max(detector.v[column].shape) + 1
else:
dim = detector.dimension + 1
av_fc_dict = {i: [] for i in range(dim)}
for ai in A.T:
fcs = []
for j in range(dim):
av_fc_dict[j].append(float(np.dot(ai[:j].T, ai[:j])))
fcs.append(float(np.dot(ai[:j].T, ai[:j])))
axis.plot(fcs, color='grey')
avg = [np.average(_dim[1]) for _dim in av_fc_dict.items()]
axis.plot(avg, color='red', linewidth=3.)
if column % ncols == 0 or column == 0:
axis.set_ylabel('Frac. E Capture (Fc)')
if column + 1 > len(stachans) - ncols:
axis.set_xlabel('Subspace Dimension')
plt.subplots_adjust(hspace=0.2)
plt.subplots_adjust(wspace=0.2)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | [
"def",
"subspace_fc_plot",
"(",
"detector",
",",
"stachans",
",",
"size",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"if",
"stachans",
"==",
"'all'",
"and",
"not",
"detector",
".",
"multiplex",
":",
"stachans",
... | Plot the fractional energy capture of the detector for all events in
the design set
:type detector: :class:`eqcorrscan.core.subspace.Detector`
:type stachans: list
:param stachans: list of tuples of station, channel pairs to plot.
:type stachans: list
:param stachans: List of tuples of (station, channel) to use. Can set\
to 'all' to use all the station-channel pairs available. If \
detector is multiplexed, will just plot that.
:type size: tuple
:param size: Figure size.
:returns: Figure
:rtype: matplotlib.pyplot.Figure
.. rubric:: Example
>>> from eqcorrscan.core import subspace
>>> import os
>>> detector = subspace.Detector()
>>> detector.read(os.path.join(os.path.abspath(os.path.dirname(__file__)),
... '..', 'tests', 'test_data', 'subspace',
... 'stat_test_detector.h5'))
Detector: Tester
>>> subspace_fc_plot(detector=detector, stachans='all', size=(10, 7),
... show=True) # doctest: +SKIP
.. plot::
from eqcorrscan.core import subspace
from eqcorrscan.utils.plotting import subspace_detector_plot
import os
print('running subspace plot')
detector = subspace.Detector()
detector.read(os.path.join('..', '..', '..', 'tests', 'test_data',
'subspace', 'stat_test_detector.h5'))
subspace_fc_plot(detector=detector, stachans='all', size=(10, 7),
show=True) | [
"Plot",
"the",
"fractional",
"energy",
"capture",
"of",
"the",
"detector",
"for",
"all",
"events",
"in",
"the",
"design",
"set"
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L2137-L2224 | train | 203,389 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/plotting.py | _match_filter_plot | def _match_filter_plot(stream, cccsum, template_names, rawthresh, plotdir,
plot_format, i): # pragma: no cover
"""
Plotting function for match_filter.
:param stream: Stream to plot
:param cccsum: Cross-correlation sum to plot
:param template_names: Template names used
:param rawthresh: Threshold level
:param plotdir: Location to save plots
:param plot_format: Output plot type (e.g. png, svg, eps, pdf...)
:param i: Template index name to plot.
"""
import matplotlib.pyplot as plt
plt.ioff()
stream_plot = copy.deepcopy(stream[0])
# Downsample for plotting
stream_plot = _plotting_decimation(stream_plot, 10e5, 4)
cccsum_plot = Trace(cccsum)
cccsum_plot.stats.sampling_rate = stream[0].stats.sampling_rate
# Resample here to maintain shape better
cccsum_hist = cccsum_plot.copy()
cccsum_hist = cccsum_hist.decimate(int(stream[0].stats.
sampling_rate / 10)).data
cccsum_plot = chunk_data(cccsum_plot, 10, 'Maxabs').data
# Enforce same length
stream_plot.data = stream_plot.data[0:len(cccsum_plot)]
cccsum_plot = cccsum_plot[0:len(stream_plot.data)]
cccsum_hist = cccsum_hist[0:len(stream_plot.data)]
plot_name = (plotdir + os.sep + 'cccsum_plot_' + template_names[i] + '_' +
stream[0].stats.starttime.datetime.strftime('%Y-%m-%d') +
'.' + plot_format)
triple_plot(cccsum=cccsum_plot, cccsum_hist=cccsum_hist,
trace=stream_plot, threshold=rawthresh, save=True,
savefile=plot_name) | python | def _match_filter_plot(stream, cccsum, template_names, rawthresh, plotdir,
plot_format, i): # pragma: no cover
"""
Plotting function for match_filter.
:param stream: Stream to plot
:param cccsum: Cross-correlation sum to plot
:param template_names: Template names used
:param rawthresh: Threshold level
:param plotdir: Location to save plots
:param plot_format: Output plot type (e.g. png, svg, eps, pdf...)
:param i: Template index name to plot.
"""
import matplotlib.pyplot as plt
plt.ioff()
stream_plot = copy.deepcopy(stream[0])
# Downsample for plotting
stream_plot = _plotting_decimation(stream_plot, 10e5, 4)
cccsum_plot = Trace(cccsum)
cccsum_plot.stats.sampling_rate = stream[0].stats.sampling_rate
# Resample here to maintain shape better
cccsum_hist = cccsum_plot.copy()
cccsum_hist = cccsum_hist.decimate(int(stream[0].stats.
sampling_rate / 10)).data
cccsum_plot = chunk_data(cccsum_plot, 10, 'Maxabs').data
# Enforce same length
stream_plot.data = stream_plot.data[0:len(cccsum_plot)]
cccsum_plot = cccsum_plot[0:len(stream_plot.data)]
cccsum_hist = cccsum_hist[0:len(stream_plot.data)]
plot_name = (plotdir + os.sep + 'cccsum_plot_' + template_names[i] + '_' +
stream[0].stats.starttime.datetime.strftime('%Y-%m-%d') +
'.' + plot_format)
triple_plot(cccsum=cccsum_plot, cccsum_hist=cccsum_hist,
trace=stream_plot, threshold=rawthresh, save=True,
savefile=plot_name) | [
"def",
"_match_filter_plot",
"(",
"stream",
",",
"cccsum",
",",
"template_names",
",",
"rawthresh",
",",
"plotdir",
",",
"plot_format",
",",
"i",
")",
":",
"# pragma: no cover",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"plt",
".",
"ioff",
"(",
")"... | Plotting function for match_filter.
:param stream: Stream to plot
:param cccsum: Cross-correlation sum to plot
:param template_names: Template names used
:param rawthresh: Threshold level
:param plotdir: Location to save plots
:param plot_format: Output plot type (e.g. png, svg, eps, pdf...)
:param i: Template index name to plot. | [
"Plotting",
"function",
"for",
"match_filter",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L2227-L2261 | train | 203,390 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/plotting.py | _plotting_decimation | def _plotting_decimation(trace, max_len=10e5, decimation_step=4):
"""
Decimate data until required length reached.
:type trace: obspy.core.stream.Trace
:param trace: Trace to decimate
type max_len: int
:param max_len: Maximum length in samples
:type decimation_step: int
:param decimation_step: Decimation factor to use for each step.
:return: obspy.core.stream.Trace
.. rubric: Example
>>> from obspy import Trace
>>> import numpy as np
>>> trace = Trace(np.random.randn(1000))
>>> trace = _plotting_decimation(trace, max_len=100, decimation_step=2)
>>> print(trace.stats.npts)
63
"""
trace_len = trace.stats.npts
while trace_len > max_len:
trace.decimate(decimation_step)
trace_len = trace.stats.npts
return trace | python | def _plotting_decimation(trace, max_len=10e5, decimation_step=4):
"""
Decimate data until required length reached.
:type trace: obspy.core.stream.Trace
:param trace: Trace to decimate
type max_len: int
:param max_len: Maximum length in samples
:type decimation_step: int
:param decimation_step: Decimation factor to use for each step.
:return: obspy.core.stream.Trace
.. rubric: Example
>>> from obspy import Trace
>>> import numpy as np
>>> trace = Trace(np.random.randn(1000))
>>> trace = _plotting_decimation(trace, max_len=100, decimation_step=2)
>>> print(trace.stats.npts)
63
"""
trace_len = trace.stats.npts
while trace_len > max_len:
trace.decimate(decimation_step)
trace_len = trace.stats.npts
return trace | [
"def",
"_plotting_decimation",
"(",
"trace",
",",
"max_len",
"=",
"10e5",
",",
"decimation_step",
"=",
"4",
")",
":",
"trace_len",
"=",
"trace",
".",
"stats",
".",
"npts",
"while",
"trace_len",
">",
"max_len",
":",
"trace",
".",
"decimate",
"(",
"decimatio... | Decimate data until required length reached.
:type trace: obspy.core.stream.Trace
:param trace: Trace to decimate
type max_len: int
:param max_len: Maximum length in samples
:type decimation_step: int
:param decimation_step: Decimation factor to use for each step.
:return: obspy.core.stream.Trace
.. rubric: Example
>>> from obspy import Trace
>>> import numpy as np
>>> trace = Trace(np.random.randn(1000))
>>> trace = _plotting_decimation(trace, max_len=100, decimation_step=2)
>>> print(trace.stats.npts)
63 | [
"Decimate",
"data",
"until",
"required",
"length",
"reached",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L2264-L2290 | train | 203,391 |
eqcorrscan/EQcorrscan | eqcorrscan/doc/_ext/obspydoc.py | make_images_responsive | def make_images_responsive(app, doctree):
"""
Add Bootstrap img-responsive class to images.
"""
for fig in doctree.traverse(condition=nodes.figure):
if 'thumbnail' in fig['classes']:
continue
for img in fig.traverse(condition=nodes.image):
img['classes'].append('img-responsive') | python | def make_images_responsive(app, doctree):
"""
Add Bootstrap img-responsive class to images.
"""
for fig in doctree.traverse(condition=nodes.figure):
if 'thumbnail' in fig['classes']:
continue
for img in fig.traverse(condition=nodes.image):
img['classes'].append('img-responsive') | [
"def",
"make_images_responsive",
"(",
"app",
",",
"doctree",
")",
":",
"for",
"fig",
"in",
"doctree",
".",
"traverse",
"(",
"condition",
"=",
"nodes",
".",
"figure",
")",
":",
"if",
"'thumbnail'",
"in",
"fig",
"[",
"'classes'",
"]",
":",
"continue",
"for... | Add Bootstrap img-responsive class to images. | [
"Add",
"Bootstrap",
"img",
"-",
"responsive",
"class",
"to",
"images",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/doc/_ext/obspydoc.py#L34-L44 | train | 203,392 |
brettcannon/caniusepython3 | caniusepython3/pypi.py | _manual_overrides | def _manual_overrides(_cache_date=None):
"""Read the overrides file.
An attempt is made to read the file as it currently stands on GitHub, and
then only if that fails is the included file used.
"""
log = logging.getLogger('ciu')
request = requests.get("https://raw.githubusercontent.com/brettcannon/"
"caniusepython3/master/caniusepython3/overrides.json")
if request.status_code == 200:
log.info("Overrides loaded from GitHub and cached")
overrides = request.json()
else:
log.info("Overrides loaded from included package data and cached")
raw_bytes = pkgutil.get_data(__name__, 'overrides.json')
overrides = json.loads(raw_bytes.decode('utf-8'))
return frozenset(map(packaging.utils.canonicalize_name, overrides.keys())) | python | def _manual_overrides(_cache_date=None):
"""Read the overrides file.
An attempt is made to read the file as it currently stands on GitHub, and
then only if that fails is the included file used.
"""
log = logging.getLogger('ciu')
request = requests.get("https://raw.githubusercontent.com/brettcannon/"
"caniusepython3/master/caniusepython3/overrides.json")
if request.status_code == 200:
log.info("Overrides loaded from GitHub and cached")
overrides = request.json()
else:
log.info("Overrides loaded from included package data and cached")
raw_bytes = pkgutil.get_data(__name__, 'overrides.json')
overrides = json.loads(raw_bytes.decode('utf-8'))
return frozenset(map(packaging.utils.canonicalize_name, overrides.keys())) | [
"def",
"_manual_overrides",
"(",
"_cache_date",
"=",
"None",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"'ciu'",
")",
"request",
"=",
"requests",
".",
"get",
"(",
"\"https://raw.githubusercontent.com/brettcannon/\"",
"\"caniusepython3/master/caniusepython3/... | Read the overrides file.
An attempt is made to read the file as it currently stands on GitHub, and
then only if that fails is the included file used. | [
"Read",
"the",
"overrides",
"file",
"."
] | 195775d8f1891f73eb90734f3edda0c57e08dbf3 | https://github.com/brettcannon/caniusepython3/blob/195775d8f1891f73eb90734f3edda0c57e08dbf3/caniusepython3/pypi.py#L59-L75 | train | 203,393 |
brettcannon/caniusepython3 | caniusepython3/pypi.py | supports_py3 | def supports_py3(project_name):
"""Check with PyPI if a project supports Python 3."""
log = logging.getLogger("ciu")
log.info("Checking {} ...".format(project_name))
request = requests.get("https://pypi.org/pypi/{}/json".format(project_name))
if request.status_code >= 400:
log = logging.getLogger("ciu")
log.warning("problem fetching {}, assuming ported ({})".format(
project_name, request.status_code))
return True
response = request.json()
return any(c.startswith("Programming Language :: Python :: 3")
for c in response["info"]["classifiers"]) | python | def supports_py3(project_name):
"""Check with PyPI if a project supports Python 3."""
log = logging.getLogger("ciu")
log.info("Checking {} ...".format(project_name))
request = requests.get("https://pypi.org/pypi/{}/json".format(project_name))
if request.status_code >= 400:
log = logging.getLogger("ciu")
log.warning("problem fetching {}, assuming ported ({})".format(
project_name, request.status_code))
return True
response = request.json()
return any(c.startswith("Programming Language :: Python :: 3")
for c in response["info"]["classifiers"]) | [
"def",
"supports_py3",
"(",
"project_name",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"\"ciu\"",
")",
"log",
".",
"info",
"(",
"\"Checking {} ...\"",
".",
"format",
"(",
"project_name",
")",
")",
"request",
"=",
"requests",
".",
"get",
"(",
... | Check with PyPI if a project supports Python 3. | [
"Check",
"with",
"PyPI",
"if",
"a",
"project",
"supports",
"Python",
"3",
"."
] | 195775d8f1891f73eb90734f3edda0c57e08dbf3 | https://github.com/brettcannon/caniusepython3/blob/195775d8f1891f73eb90734f3edda0c57e08dbf3/caniusepython3/pypi.py#L78-L90 | train | 203,394 |
brettcannon/caniusepython3 | caniusepython3/__init__.py | check | def check(requirements_paths=[], metadata=[], projects=[]):
"""Return True if all of the specified dependencies have been ported to Python 3.
The requirements_paths argument takes a sequence of file paths to
requirements files. The 'metadata' argument takes a sequence of strings
representing metadata. The 'projects' argument takes a sequence of project
names.
Any project that is not listed on PyPI will be considered ported.
"""
dependencies = []
dependencies.extend(projects_.projects_from_requirements(requirements_paths))
dependencies.extend(projects_.projects_from_metadata(metadata))
dependencies.extend(projects)
manual_overrides = pypi.manual_overrides()
for dependency in dependencies:
if dependency in manual_overrides:
continue
elif not pypi.supports_py3(dependency):
return False
return True | python | def check(requirements_paths=[], metadata=[], projects=[]):
"""Return True if all of the specified dependencies have been ported to Python 3.
The requirements_paths argument takes a sequence of file paths to
requirements files. The 'metadata' argument takes a sequence of strings
representing metadata. The 'projects' argument takes a sequence of project
names.
Any project that is not listed on PyPI will be considered ported.
"""
dependencies = []
dependencies.extend(projects_.projects_from_requirements(requirements_paths))
dependencies.extend(projects_.projects_from_metadata(metadata))
dependencies.extend(projects)
manual_overrides = pypi.manual_overrides()
for dependency in dependencies:
if dependency in manual_overrides:
continue
elif not pypi.supports_py3(dependency):
return False
return True | [
"def",
"check",
"(",
"requirements_paths",
"=",
"[",
"]",
",",
"metadata",
"=",
"[",
"]",
",",
"projects",
"=",
"[",
"]",
")",
":",
"dependencies",
"=",
"[",
"]",
"dependencies",
".",
"extend",
"(",
"projects_",
".",
"projects_from_requirements",
"(",
"r... | Return True if all of the specified dependencies have been ported to Python 3.
The requirements_paths argument takes a sequence of file paths to
requirements files. The 'metadata' argument takes a sequence of strings
representing metadata. The 'projects' argument takes a sequence of project
names.
Any project that is not listed on PyPI will be considered ported. | [
"Return",
"True",
"if",
"all",
"of",
"the",
"specified",
"dependencies",
"have",
"been",
"ported",
"to",
"Python",
"3",
"."
] | 195775d8f1891f73eb90734f3edda0c57e08dbf3 | https://github.com/brettcannon/caniusepython3/blob/195775d8f1891f73eb90734f3edda0c57e08dbf3/caniusepython3/__init__.py#L31-L53 | train | 203,395 |
brettcannon/caniusepython3 | caniusepython3/__main__.py | projects_from_cli | def projects_from_cli(args):
"""Take arguments through the CLI can create a list of specified projects."""
description = ('Determine if a set of project dependencies will work with '
'Python 3')
parser = argparse.ArgumentParser(description=description)
req_help = 'path(s) to a pip requirements file (e.g. requirements.txt)'
parser.add_argument('--requirements', '-r', nargs='+', default=(),
help=req_help)
meta_help = 'path(s) to a PEP 426 metadata file (e.g. PKG-INFO, pydist.json)'
parser.add_argument('--metadata', '-m', nargs='+', default=(),
help=meta_help)
parser.add_argument('--projects', '-p', nargs='+', default=(),
help='name(s) of projects to test for Python 3 support')
parser.add_argument('--verbose', '-v', action='store_true',
help='verbose output (e.g. list compatibility overrides)')
parsed = parser.parse_args(args)
if not (parsed.requirements or parsed.metadata or parsed.projects):
parser.error("Missing 'requirements', 'metadata', or 'projects'")
projects = []
if parsed.verbose:
logging.getLogger('ciu').setLevel(logging.INFO)
projects.extend(projects_.projects_from_requirements(parsed.requirements))
metadata = []
for metadata_path in parsed.metadata:
with io.open(metadata_path) as file:
metadata.append(file.read())
projects.extend(projects_.projects_from_metadata(metadata))
projects.extend(map(packaging.utils.canonicalize_name, parsed.projects))
return projects | python | def projects_from_cli(args):
"""Take arguments through the CLI can create a list of specified projects."""
description = ('Determine if a set of project dependencies will work with '
'Python 3')
parser = argparse.ArgumentParser(description=description)
req_help = 'path(s) to a pip requirements file (e.g. requirements.txt)'
parser.add_argument('--requirements', '-r', nargs='+', default=(),
help=req_help)
meta_help = 'path(s) to a PEP 426 metadata file (e.g. PKG-INFO, pydist.json)'
parser.add_argument('--metadata', '-m', nargs='+', default=(),
help=meta_help)
parser.add_argument('--projects', '-p', nargs='+', default=(),
help='name(s) of projects to test for Python 3 support')
parser.add_argument('--verbose', '-v', action='store_true',
help='verbose output (e.g. list compatibility overrides)')
parsed = parser.parse_args(args)
if not (parsed.requirements or parsed.metadata or parsed.projects):
parser.error("Missing 'requirements', 'metadata', or 'projects'")
projects = []
if parsed.verbose:
logging.getLogger('ciu').setLevel(logging.INFO)
projects.extend(projects_.projects_from_requirements(parsed.requirements))
metadata = []
for metadata_path in parsed.metadata:
with io.open(metadata_path) as file:
metadata.append(file.read())
projects.extend(projects_.projects_from_metadata(metadata))
projects.extend(map(packaging.utils.canonicalize_name, parsed.projects))
return projects | [
"def",
"projects_from_cli",
"(",
"args",
")",
":",
"description",
"=",
"(",
"'Determine if a set of project dependencies will work with '",
"'Python 3'",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"description",
")",
"req_help",
"=",... | Take arguments through the CLI can create a list of specified projects. | [
"Take",
"arguments",
"through",
"the",
"CLI",
"can",
"create",
"a",
"list",
"of",
"specified",
"projects",
"."
] | 195775d8f1891f73eb90734f3edda0c57e08dbf3 | https://github.com/brettcannon/caniusepython3/blob/195775d8f1891f73eb90734f3edda0c57e08dbf3/caniusepython3/__main__.py#L33-L64 | train | 203,396 |
brettcannon/caniusepython3 | caniusepython3/__main__.py | message | def message(blockers):
"""Create a sequence of key messages based on what is blocking."""
if not blockers:
encoding = getattr(sys.stdout, 'encoding', '')
if encoding:
encoding = encoding.lower()
if encoding == 'utf-8':
# party hat
flair = "\U0001F389 "
else:
flair = ''
return [flair +
'You have 0 projects blocking you from using Python 3!']
flattened_blockers = set()
for blocker_reasons in blockers:
for blocker in blocker_reasons:
flattened_blockers.add(blocker)
need = 'You need {0} project{1} to transition to Python 3.'
formatted_need = need.format(len(flattened_blockers),
's' if len(flattened_blockers) != 1 else '')
can_port = ('Of {0} {1} project{2}, {3} {4} no direct dependencies '
'blocking {5} transition:')
formatted_can_port = can_port.format(
'those' if len(flattened_blockers) != 1 else 'that',
len(flattened_blockers),
's' if len(flattened_blockers) != 1 else '',
len(blockers),
'have' if len(blockers) != 1 else 'has',
'their' if len(blockers) != 1 else 'its')
return formatted_need, formatted_can_port | python | def message(blockers):
"""Create a sequence of key messages based on what is blocking."""
if not blockers:
encoding = getattr(sys.stdout, 'encoding', '')
if encoding:
encoding = encoding.lower()
if encoding == 'utf-8':
# party hat
flair = "\U0001F389 "
else:
flair = ''
return [flair +
'You have 0 projects blocking you from using Python 3!']
flattened_blockers = set()
for blocker_reasons in blockers:
for blocker in blocker_reasons:
flattened_blockers.add(blocker)
need = 'You need {0} project{1} to transition to Python 3.'
formatted_need = need.format(len(flattened_blockers),
's' if len(flattened_blockers) != 1 else '')
can_port = ('Of {0} {1} project{2}, {3} {4} no direct dependencies '
'blocking {5} transition:')
formatted_can_port = can_port.format(
'those' if len(flattened_blockers) != 1 else 'that',
len(flattened_blockers),
's' if len(flattened_blockers) != 1 else '',
len(blockers),
'have' if len(blockers) != 1 else 'has',
'their' if len(blockers) != 1 else 'its')
return formatted_need, formatted_can_port | [
"def",
"message",
"(",
"blockers",
")",
":",
"if",
"not",
"blockers",
":",
"encoding",
"=",
"getattr",
"(",
"sys",
".",
"stdout",
",",
"'encoding'",
",",
"''",
")",
"if",
"encoding",
":",
"encoding",
"=",
"encoding",
".",
"lower",
"(",
")",
"if",
"en... | Create a sequence of key messages based on what is blocking. | [
"Create",
"a",
"sequence",
"of",
"key",
"messages",
"based",
"on",
"what",
"is",
"blocking",
"."
] | 195775d8f1891f73eb90734f3edda0c57e08dbf3 | https://github.com/brettcannon/caniusepython3/blob/195775d8f1891f73eb90734f3edda0c57e08dbf3/caniusepython3/__main__.py#L67-L96 | train | 203,397 |
brettcannon/caniusepython3 | caniusepython3/__main__.py | pprint_blockers | def pprint_blockers(blockers):
"""Pretty print blockers into a sequence of strings.
Results will be sorted by top-level project name. This means that if a
project is blocking another project then the dependent project will be
what is used in the sorting, not the project at the bottom of the
dependency graph.
"""
pprinted = []
for blocker in sorted(blockers, key=lambda x: tuple(reversed(x))):
buf = [blocker[0]]
if len(blocker) > 1:
buf.append(' (which is blocking ')
buf.append(', which is blocking '.join(blocker[1:]))
buf.append(')')
pprinted.append(''.join(buf))
return pprinted | python | def pprint_blockers(blockers):
"""Pretty print blockers into a sequence of strings.
Results will be sorted by top-level project name. This means that if a
project is blocking another project then the dependent project will be
what is used in the sorting, not the project at the bottom of the
dependency graph.
"""
pprinted = []
for blocker in sorted(blockers, key=lambda x: tuple(reversed(x))):
buf = [blocker[0]]
if len(blocker) > 1:
buf.append(' (which is blocking ')
buf.append(', which is blocking '.join(blocker[1:]))
buf.append(')')
pprinted.append(''.join(buf))
return pprinted | [
"def",
"pprint_blockers",
"(",
"blockers",
")",
":",
"pprinted",
"=",
"[",
"]",
"for",
"blocker",
"in",
"sorted",
"(",
"blockers",
",",
"key",
"=",
"lambda",
"x",
":",
"tuple",
"(",
"reversed",
"(",
"x",
")",
")",
")",
":",
"buf",
"=",
"[",
"blocke... | Pretty print blockers into a sequence of strings.
Results will be sorted by top-level project name. This means that if a
project is blocking another project then the dependent project will be
what is used in the sorting, not the project at the bottom of the
dependency graph. | [
"Pretty",
"print",
"blockers",
"into",
"a",
"sequence",
"of",
"strings",
"."
] | 195775d8f1891f73eb90734f3edda0c57e08dbf3 | https://github.com/brettcannon/caniusepython3/blob/195775d8f1891f73eb90734f3edda0c57e08dbf3/caniusepython3/__main__.py#L99-L116 | train | 203,398 |
brettcannon/caniusepython3 | caniusepython3/__main__.py | check | def check(projects):
"""Check the specified projects for Python 3 compatibility."""
log = logging.getLogger('ciu')
log.info('{0} top-level projects to check'.format(len(projects)))
print('Finding and checking dependencies ...')
blockers = dependencies.blockers(projects)
print('')
for line in message(blockers):
print(line)
print('')
for line in pprint_blockers(blockers):
print(' ', line)
return len(blockers) == 0 | python | def check(projects):
"""Check the specified projects for Python 3 compatibility."""
log = logging.getLogger('ciu')
log.info('{0} top-level projects to check'.format(len(projects)))
print('Finding and checking dependencies ...')
blockers = dependencies.blockers(projects)
print('')
for line in message(blockers):
print(line)
print('')
for line in pprint_blockers(blockers):
print(' ', line)
return len(blockers) == 0 | [
"def",
"check",
"(",
"projects",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"'ciu'",
")",
"log",
".",
"info",
"(",
"'{0} top-level projects to check'",
".",
"format",
"(",
"len",
"(",
"projects",
")",
")",
")",
"print",
"(",
"'Finding and chec... | Check the specified projects for Python 3 compatibility. | [
"Check",
"the",
"specified",
"projects",
"for",
"Python",
"3",
"compatibility",
"."
] | 195775d8f1891f73eb90734f3edda0c57e08dbf3 | https://github.com/brettcannon/caniusepython3/blob/195775d8f1891f73eb90734f3edda0c57e08dbf3/caniusepython3/__main__.py#L119-L134 | train | 203,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.