repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
kovacsbalu/WazeRouteCalculator | WazeRouteCalculator/WazeRouteCalculator.py | WazeRouteCalculator.coords_string_parser | def coords_string_parser(self, coords):
"""Pareses the address string into coordinates to match address_to_coords return object"""
lat, lon = coords.split(',')
return {"lat": lat.strip(), "lon": lon.strip(), "bounds": {}} | python | def coords_string_parser(self, coords):
"""Pareses the address string into coordinates to match address_to_coords return object"""
lat, lon = coords.split(',')
return {"lat": lat.strip(), "lon": lon.strip(), "bounds": {}} | [
"def",
"coords_string_parser",
"(",
"self",
",",
"coords",
")",
":",
"lat",
",",
"lon",
"=",
"coords",
".",
"split",
"(",
"','",
")",
"return",
"{",
"\"lat\"",
":",
"lat",
".",
"strip",
"(",
")",
",",
"\"lon\"",
":",
"lon",
".",
"strip",
"(",
")",
... | Pareses the address string into coordinates to match address_to_coords return object | [
"Pareses",
"the",
"address",
"string",
"into",
"coordinates",
"to",
"match",
"address_to_coords",
"return",
"object"
] | 13ddb064571bb2bc0ceec51b5b317640b2bc3fb2 | https://github.com/kovacsbalu/WazeRouteCalculator/blob/13ddb064571bb2bc0ceec51b5b317640b2bc3fb2/WazeRouteCalculator/WazeRouteCalculator.py#L82-L86 | train | 203,200 |
kovacsbalu/WazeRouteCalculator | WazeRouteCalculator/WazeRouteCalculator.py | WazeRouteCalculator.address_to_coords | def address_to_coords(self, address):
"""Convert address to coordinates"""
base_coords = self.BASE_COORDS[self.region]
get_cord = self.COORD_SERVERS[self.region]
url_options = {
"q": address,
"lang": "eng",
"origin": "livemap",
"lat": base_coords["lat"],
"lon": base_coords["lon"]
}
response = requests.get(self.WAZE_URL + get_cord, params=url_options, headers=self.HEADERS)
for response_json in response.json():
if response_json.get('city'):
lat = response_json['location']['lat']
lon = response_json['location']['lon']
bounds = response_json['bounds'] # sometimes the coords don't match up
if bounds is not None:
bounds['top'], bounds['bottom'] = max(bounds['top'], bounds['bottom']), min(bounds['top'], bounds['bottom'])
bounds['left'], bounds['right'] = min(bounds['left'], bounds['right']), max(bounds['left'], bounds['right'])
else:
bounds = {}
return {"lat": lat, "lon": lon, "bounds": bounds}
raise WRCError("Cannot get coords for %s" % address) | python | def address_to_coords(self, address):
"""Convert address to coordinates"""
base_coords = self.BASE_COORDS[self.region]
get_cord = self.COORD_SERVERS[self.region]
url_options = {
"q": address,
"lang": "eng",
"origin": "livemap",
"lat": base_coords["lat"],
"lon": base_coords["lon"]
}
response = requests.get(self.WAZE_URL + get_cord, params=url_options, headers=self.HEADERS)
for response_json in response.json():
if response_json.get('city'):
lat = response_json['location']['lat']
lon = response_json['location']['lon']
bounds = response_json['bounds'] # sometimes the coords don't match up
if bounds is not None:
bounds['top'], bounds['bottom'] = max(bounds['top'], bounds['bottom']), min(bounds['top'], bounds['bottom'])
bounds['left'], bounds['right'] = min(bounds['left'], bounds['right']), max(bounds['left'], bounds['right'])
else:
bounds = {}
return {"lat": lat, "lon": lon, "bounds": bounds}
raise WRCError("Cannot get coords for %s" % address) | [
"def",
"address_to_coords",
"(",
"self",
",",
"address",
")",
":",
"base_coords",
"=",
"self",
".",
"BASE_COORDS",
"[",
"self",
".",
"region",
"]",
"get_cord",
"=",
"self",
".",
"COORD_SERVERS",
"[",
"self",
".",
"region",
"]",
"url_options",
"=",
"{",
"... | Convert address to coordinates | [
"Convert",
"address",
"to",
"coordinates"
] | 13ddb064571bb2bc0ceec51b5b317640b2bc3fb2 | https://github.com/kovacsbalu/WazeRouteCalculator/blob/13ddb064571bb2bc0ceec51b5b317640b2bc3fb2/WazeRouteCalculator/WazeRouteCalculator.py#L88-L113 | train | 203,201 |
kovacsbalu/WazeRouteCalculator | WazeRouteCalculator/WazeRouteCalculator.py | WazeRouteCalculator.get_route | def get_route(self, npaths=1, time_delta=0):
"""Get route data from waze"""
routing_server = self.ROUTING_SERVERS[self.region]
url_options = {
"from": "x:%s y:%s" % (self.start_coords["lon"], self.start_coords["lat"]),
"to": "x:%s y:%s" % (self.end_coords["lon"], self.end_coords["lat"]),
"at": time_delta,
"returnJSON": "true",
"returnGeometries": "true",
"returnInstructions": "true",
"timeout": 60000,
"nPaths": npaths,
"options": "AVOID_TRAILS:t",
}
if self.vehicle_type:
url_options["vehicleType"] = self.vehicle_type
response = requests.get(self.WAZE_URL + routing_server, params=url_options, headers=self.HEADERS)
response.encoding = 'utf-8'
response_json = self._check_response(response)
if response_json:
if 'error' in response_json:
raise WRCError(response_json.get("error"))
else:
if response_json.get("alternatives"):
return [alt['response'] for alt in response_json['alternatives']]
if npaths > 1:
return [response_json['response']]
return response_json['response']
else:
raise WRCError("empty response") | python | def get_route(self, npaths=1, time_delta=0):
"""Get route data from waze"""
routing_server = self.ROUTING_SERVERS[self.region]
url_options = {
"from": "x:%s y:%s" % (self.start_coords["lon"], self.start_coords["lat"]),
"to": "x:%s y:%s" % (self.end_coords["lon"], self.end_coords["lat"]),
"at": time_delta,
"returnJSON": "true",
"returnGeometries": "true",
"returnInstructions": "true",
"timeout": 60000,
"nPaths": npaths,
"options": "AVOID_TRAILS:t",
}
if self.vehicle_type:
url_options["vehicleType"] = self.vehicle_type
response = requests.get(self.WAZE_URL + routing_server, params=url_options, headers=self.HEADERS)
response.encoding = 'utf-8'
response_json = self._check_response(response)
if response_json:
if 'error' in response_json:
raise WRCError(response_json.get("error"))
else:
if response_json.get("alternatives"):
return [alt['response'] for alt in response_json['alternatives']]
if npaths > 1:
return [response_json['response']]
return response_json['response']
else:
raise WRCError("empty response") | [
"def",
"get_route",
"(",
"self",
",",
"npaths",
"=",
"1",
",",
"time_delta",
"=",
"0",
")",
":",
"routing_server",
"=",
"self",
".",
"ROUTING_SERVERS",
"[",
"self",
".",
"region",
"]",
"url_options",
"=",
"{",
"\"from\"",
":",
"\"x:%s y:%s\"",
"%",
"(",
... | Get route data from waze | [
"Get",
"route",
"data",
"from",
"waze"
] | 13ddb064571bb2bc0ceec51b5b317640b2bc3fb2 | https://github.com/kovacsbalu/WazeRouteCalculator/blob/13ddb064571bb2bc0ceec51b5b317640b2bc3fb2/WazeRouteCalculator/WazeRouteCalculator.py#L115-L147 | train | 203,202 |
kovacsbalu/WazeRouteCalculator | WazeRouteCalculator/WazeRouteCalculator.py | WazeRouteCalculator._add_up_route | def _add_up_route(self, results, real_time=True, stop_at_bounds=False):
"""Calculate route time and distance."""
start_bounds = self.start_coords['bounds']
end_bounds = self.end_coords['bounds']
def between(target, min, max):
return target > min and target < max
time = 0
distance = 0
for segment in results:
if stop_at_bounds and segment.get('path'):
x = segment['path']['x']
y = segment['path']['y']
if (
between(x, start_bounds.get('left', 0), start_bounds.get('right', 0)) or
between(x, end_bounds.get('left', 0), end_bounds.get('right', 0))
) and (
between(y, start_bounds.get('bottom', 0), start_bounds.get('top', 0)) or
between(y, end_bounds.get('bottom', 0), end_bounds.get('top', 0))
):
continue
time += segment['crossTime' if real_time else 'crossTimeWithoutRealTime']
distance += segment['length']
route_time = time / 60.0
route_distance = distance / 1000.0
return route_time, route_distance | python | def _add_up_route(self, results, real_time=True, stop_at_bounds=False):
"""Calculate route time and distance."""
start_bounds = self.start_coords['bounds']
end_bounds = self.end_coords['bounds']
def between(target, min, max):
return target > min and target < max
time = 0
distance = 0
for segment in results:
if stop_at_bounds and segment.get('path'):
x = segment['path']['x']
y = segment['path']['y']
if (
between(x, start_bounds.get('left', 0), start_bounds.get('right', 0)) or
between(x, end_bounds.get('left', 0), end_bounds.get('right', 0))
) and (
between(y, start_bounds.get('bottom', 0), start_bounds.get('top', 0)) or
between(y, end_bounds.get('bottom', 0), end_bounds.get('top', 0))
):
continue
time += segment['crossTime' if real_time else 'crossTimeWithoutRealTime']
distance += segment['length']
route_time = time / 60.0
route_distance = distance / 1000.0
return route_time, route_distance | [
"def",
"_add_up_route",
"(",
"self",
",",
"results",
",",
"real_time",
"=",
"True",
",",
"stop_at_bounds",
"=",
"False",
")",
":",
"start_bounds",
"=",
"self",
".",
"start_coords",
"[",
"'bounds'",
"]",
"end_bounds",
"=",
"self",
".",
"end_coords",
"[",
"'... | Calculate route time and distance. | [
"Calculate",
"route",
"time",
"and",
"distance",
"."
] | 13ddb064571bb2bc0ceec51b5b317640b2bc3fb2 | https://github.com/kovacsbalu/WazeRouteCalculator/blob/13ddb064571bb2bc0ceec51b5b317640b2bc3fb2/WazeRouteCalculator/WazeRouteCalculator.py#L158-L185 | train | 203,203 |
kovacsbalu/WazeRouteCalculator | WazeRouteCalculator/WazeRouteCalculator.py | WazeRouteCalculator.calc_route_info | def calc_route_info(self, real_time=True, stop_at_bounds=False, time_delta=0):
"""Calculate best route info."""
route = self.get_route(1, time_delta)
results = route['results']
route_time, route_distance = self._add_up_route(results, real_time=real_time, stop_at_bounds=stop_at_bounds)
self.log.info('Time %.2f minutes, distance %.2f km.', route_time, route_distance)
return route_time, route_distance | python | def calc_route_info(self, real_time=True, stop_at_bounds=False, time_delta=0):
"""Calculate best route info."""
route = self.get_route(1, time_delta)
results = route['results']
route_time, route_distance = self._add_up_route(results, real_time=real_time, stop_at_bounds=stop_at_bounds)
self.log.info('Time %.2f minutes, distance %.2f km.', route_time, route_distance)
return route_time, route_distance | [
"def",
"calc_route_info",
"(",
"self",
",",
"real_time",
"=",
"True",
",",
"stop_at_bounds",
"=",
"False",
",",
"time_delta",
"=",
"0",
")",
":",
"route",
"=",
"self",
".",
"get_route",
"(",
"1",
",",
"time_delta",
")",
"results",
"=",
"route",
"[",
"'... | Calculate best route info. | [
"Calculate",
"best",
"route",
"info",
"."
] | 13ddb064571bb2bc0ceec51b5b317640b2bc3fb2 | https://github.com/kovacsbalu/WazeRouteCalculator/blob/13ddb064571bb2bc0ceec51b5b317640b2bc3fb2/WazeRouteCalculator/WazeRouteCalculator.py#L187-L194 | train | 203,204 |
kovacsbalu/WazeRouteCalculator | WazeRouteCalculator/WazeRouteCalculator.py | WazeRouteCalculator.calc_all_routes_info | def calc_all_routes_info(self, npaths=3, real_time=True, stop_at_bounds=False, time_delta=0):
"""Calculate all route infos."""
routes = self.get_route(npaths, time_delta)
results = {route['routeName']: self._add_up_route(route['results'], real_time=real_time, stop_at_bounds=stop_at_bounds) for route in routes}
route_time = [route[0] for route in results.values()]
route_distance = [route[1] for route in results.values()]
self.log.info('Time %.2f - %.2f minutes, distance %.2f - %.2f km.', min(route_time), max(route_time), min(route_distance), max(route_distance))
return results | python | def calc_all_routes_info(self, npaths=3, real_time=True, stop_at_bounds=False, time_delta=0):
"""Calculate all route infos."""
routes = self.get_route(npaths, time_delta)
results = {route['routeName']: self._add_up_route(route['results'], real_time=real_time, stop_at_bounds=stop_at_bounds) for route in routes}
route_time = [route[0] for route in results.values()]
route_distance = [route[1] for route in results.values()]
self.log.info('Time %.2f - %.2f minutes, distance %.2f - %.2f km.', min(route_time), max(route_time), min(route_distance), max(route_distance))
return results | [
"def",
"calc_all_routes_info",
"(",
"self",
",",
"npaths",
"=",
"3",
",",
"real_time",
"=",
"True",
",",
"stop_at_bounds",
"=",
"False",
",",
"time_delta",
"=",
"0",
")",
":",
"routes",
"=",
"self",
".",
"get_route",
"(",
"npaths",
",",
"time_delta",
")"... | Calculate all route infos. | [
"Calculate",
"all",
"route",
"infos",
"."
] | 13ddb064571bb2bc0ceec51b5b317640b2bc3fb2 | https://github.com/kovacsbalu/WazeRouteCalculator/blob/13ddb064571bb2bc0ceec51b5b317640b2bc3fb2/WazeRouteCalculator/WazeRouteCalculator.py#L196-L204 | train | 203,205 |
plivo/sharq | sharq/queue.py | SharQ._initialize | def _initialize(self):
"""Read the SharQ configuration and set appropriate
variables. Open a redis connection pool and load all
the Lua scripts.
"""
self._key_prefix = self._config.get('redis', 'key_prefix')
self._job_expire_interval = int(
self._config.get('sharq', 'job_expire_interval')
)
self._default_job_requeue_limit = int(
self._config.get('sharq', 'default_job_requeue_limit')
)
# initalize redis
redis_connection_type = self._config.get('redis', 'conn_type')
db = self._config.get('redis', 'db')
if redis_connection_type == 'unix_sock':
self._r = redis.StrictRedis(
db=db,
unix_socket_path=self._config.get('redis', 'unix_socket_path')
)
elif redis_connection_type == 'tcp_sock':
self._r = redis.StrictRedis(
db=db,
host=self._config.get('redis', 'host'),
port=self._config.get('redis', 'port')
)
self._load_lua_scripts() | python | def _initialize(self):
"""Read the SharQ configuration and set appropriate
variables. Open a redis connection pool and load all
the Lua scripts.
"""
self._key_prefix = self._config.get('redis', 'key_prefix')
self._job_expire_interval = int(
self._config.get('sharq', 'job_expire_interval')
)
self._default_job_requeue_limit = int(
self._config.get('sharq', 'default_job_requeue_limit')
)
# initalize redis
redis_connection_type = self._config.get('redis', 'conn_type')
db = self._config.get('redis', 'db')
if redis_connection_type == 'unix_sock':
self._r = redis.StrictRedis(
db=db,
unix_socket_path=self._config.get('redis', 'unix_socket_path')
)
elif redis_connection_type == 'tcp_sock':
self._r = redis.StrictRedis(
db=db,
host=self._config.get('redis', 'host'),
port=self._config.get('redis', 'port')
)
self._load_lua_scripts() | [
"def",
"_initialize",
"(",
"self",
")",
":",
"self",
".",
"_key_prefix",
"=",
"self",
".",
"_config",
".",
"get",
"(",
"'redis'",
",",
"'key_prefix'",
")",
"self",
".",
"_job_expire_interval",
"=",
"int",
"(",
"self",
".",
"_config",
".",
"get",
"(",
"... | Read the SharQ configuration and set appropriate
variables. Open a redis connection pool and load all
the Lua scripts. | [
"Read",
"the",
"SharQ",
"configuration",
"and",
"set",
"appropriate",
"variables",
".",
"Open",
"a",
"redis",
"connection",
"pool",
"and",
"load",
"all",
"the",
"Lua",
"scripts",
"."
] | 32bbfbdcbbaa8e154271ffd125ac4500382f3d19 | https://github.com/plivo/sharq/blob/32bbfbdcbbaa8e154271ffd125ac4500382f3d19/sharq/queue.py#L33-L61 | train | 203,206 |
plivo/sharq | sharq/queue.py | SharQ._load_config | def _load_config(self):
"""Read the configuration file and load it into memory."""
self._config = ConfigParser.SafeConfigParser()
self._config.read(self.config_path) | python | def _load_config(self):
"""Read the configuration file and load it into memory."""
self._config = ConfigParser.SafeConfigParser()
self._config.read(self.config_path) | [
"def",
"_load_config",
"(",
"self",
")",
":",
"self",
".",
"_config",
"=",
"ConfigParser",
".",
"SafeConfigParser",
"(",
")",
"self",
".",
"_config",
".",
"read",
"(",
"self",
".",
"config_path",
")"
] | Read the configuration file and load it into memory. | [
"Read",
"the",
"configuration",
"file",
"and",
"load",
"it",
"into",
"memory",
"."
] | 32bbfbdcbbaa8e154271ffd125ac4500382f3d19 | https://github.com/plivo/sharq/blob/32bbfbdcbbaa8e154271ffd125ac4500382f3d19/sharq/queue.py#L63-L66 | train | 203,207 |
plivo/sharq | sharq/queue.py | SharQ._load_lua_scripts | def _load_lua_scripts(self):
"""Loads all lua scripts required by SharQ."""
# load lua scripts
lua_script_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'scripts/lua'
)
with open(os.path.join(
lua_script_path,
'enqueue.lua'), 'r') as enqueue_file:
self._lua_enqueue_script = enqueue_file.read()
self._lua_enqueue = self._r.register_script(
self._lua_enqueue_script)
with open(os.path.join(
lua_script_path,
'dequeue.lua'), 'r') as dequeue_file:
self._lua_dequeue_script = dequeue_file.read()
self._lua_dequeue = self._r.register_script(
self._lua_dequeue_script)
with open(os.path.join(
lua_script_path,
'finish.lua'), 'r') as finish_file:
self._lua_finish_script = finish_file.read()
self._lua_finish = self._r.register_script(self._lua_finish_script)
with open(os.path.join(
lua_script_path,
'interval.lua'), 'r') as interval_file:
self._lua_interval_script = interval_file.read()
self._lua_interval = self._r.register_script(
self._lua_interval_script)
with open(os.path.join(
lua_script_path,
'requeue.lua'), 'r') as requeue_file:
self._lua_requeue_script = requeue_file.read()
self._lua_requeue = self._r.register_script(
self._lua_requeue_script)
with open(os.path.join(
lua_script_path,
'metrics.lua'), 'r') as metrics_file:
self._lua_metrics_script = metrics_file.read()
self._lua_metrics = self._r.register_script(
self._lua_metrics_script) | python | def _load_lua_scripts(self):
"""Loads all lua scripts required by SharQ."""
# load lua scripts
lua_script_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'scripts/lua'
)
with open(os.path.join(
lua_script_path,
'enqueue.lua'), 'r') as enqueue_file:
self._lua_enqueue_script = enqueue_file.read()
self._lua_enqueue = self._r.register_script(
self._lua_enqueue_script)
with open(os.path.join(
lua_script_path,
'dequeue.lua'), 'r') as dequeue_file:
self._lua_dequeue_script = dequeue_file.read()
self._lua_dequeue = self._r.register_script(
self._lua_dequeue_script)
with open(os.path.join(
lua_script_path,
'finish.lua'), 'r') as finish_file:
self._lua_finish_script = finish_file.read()
self._lua_finish = self._r.register_script(self._lua_finish_script)
with open(os.path.join(
lua_script_path,
'interval.lua'), 'r') as interval_file:
self._lua_interval_script = interval_file.read()
self._lua_interval = self._r.register_script(
self._lua_interval_script)
with open(os.path.join(
lua_script_path,
'requeue.lua'), 'r') as requeue_file:
self._lua_requeue_script = requeue_file.read()
self._lua_requeue = self._r.register_script(
self._lua_requeue_script)
with open(os.path.join(
lua_script_path,
'metrics.lua'), 'r') as metrics_file:
self._lua_metrics_script = metrics_file.read()
self._lua_metrics = self._r.register_script(
self._lua_metrics_script) | [
"def",
"_load_lua_scripts",
"(",
"self",
")",
":",
"# load lua scripts",
"lua_script_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
",",
"'scripts/lua... | Loads all lua scripts required by SharQ. | [
"Loads",
"all",
"lua",
"scripts",
"required",
"by",
"SharQ",
"."
] | 32bbfbdcbbaa8e154271ffd125ac4500382f3d19 | https://github.com/plivo/sharq/blob/32bbfbdcbbaa8e154271ffd125ac4500382f3d19/sharq/queue.py#L76-L122 | train | 203,208 |
plivo/sharq | sharq/queue.py | SharQ.enqueue | def enqueue(self, payload, interval, job_id,
queue_id, queue_type='default', requeue_limit=None):
"""Enqueues the job into the specified queue_id
of a particular queue_type
"""
# validate all the input
if not is_valid_interval(interval):
raise BadArgumentException('`interval` has an invalid value.')
if not is_valid_identifier(job_id):
raise BadArgumentException('`job_id` has an invalid value.')
if not is_valid_identifier(queue_id):
raise BadArgumentException('`queue_id` has an invalid value.')
if not is_valid_identifier(queue_type):
raise BadArgumentException('`queue_type` has an invalid value.')
if requeue_limit is None:
requeue_limit = self._default_job_requeue_limit
if not is_valid_requeue_limit(requeue_limit):
raise BadArgumentException('`requeue_limit` has an invalid value.')
try:
serialized_payload = serialize_payload(payload)
except TypeError as e:
raise BadArgumentException(e.message)
timestamp = str(generate_epoch())
keys = [
self._key_prefix,
queue_type
]
args = [
timestamp,
queue_id,
job_id,
'"%s"' % serialized_payload,
interval,
requeue_limit
]
self._lua_enqueue(keys=keys, args=args)
response = {
'status': 'queued'
}
return response | python | def enqueue(self, payload, interval, job_id,
queue_id, queue_type='default', requeue_limit=None):
"""Enqueues the job into the specified queue_id
of a particular queue_type
"""
# validate all the input
if not is_valid_interval(interval):
raise BadArgumentException('`interval` has an invalid value.')
if not is_valid_identifier(job_id):
raise BadArgumentException('`job_id` has an invalid value.')
if not is_valid_identifier(queue_id):
raise BadArgumentException('`queue_id` has an invalid value.')
if not is_valid_identifier(queue_type):
raise BadArgumentException('`queue_type` has an invalid value.')
if requeue_limit is None:
requeue_limit = self._default_job_requeue_limit
if not is_valid_requeue_limit(requeue_limit):
raise BadArgumentException('`requeue_limit` has an invalid value.')
try:
serialized_payload = serialize_payload(payload)
except TypeError as e:
raise BadArgumentException(e.message)
timestamp = str(generate_epoch())
keys = [
self._key_prefix,
queue_type
]
args = [
timestamp,
queue_id,
job_id,
'"%s"' % serialized_payload,
interval,
requeue_limit
]
self._lua_enqueue(keys=keys, args=args)
response = {
'status': 'queued'
}
return response | [
"def",
"enqueue",
"(",
"self",
",",
"payload",
",",
"interval",
",",
"job_id",
",",
"queue_id",
",",
"queue_type",
"=",
"'default'",
",",
"requeue_limit",
"=",
"None",
")",
":",
"# validate all the input",
"if",
"not",
"is_valid_interval",
"(",
"interval",
")"... | Enqueues the job into the specified queue_id
of a particular queue_type | [
"Enqueues",
"the",
"job",
"into",
"the",
"specified",
"queue_id",
"of",
"a",
"particular",
"queue_type"
] | 32bbfbdcbbaa8e154271ffd125ac4500382f3d19 | https://github.com/plivo/sharq/blob/32bbfbdcbbaa8e154271ffd125ac4500382f3d19/sharq/queue.py#L128-L178 | train | 203,209 |
plivo/sharq | sharq/queue.py | SharQ.dequeue | def dequeue(self, queue_type='default'):
"""Dequeues a job from any of the ready queues
based on the queue_type. If no job is ready,
returns a failure status.
"""
if not is_valid_identifier(queue_type):
raise BadArgumentException('`queue_type` has an invalid value.')
timestamp = str(generate_epoch())
keys = [
self._key_prefix,
queue_type
]
args = [
timestamp,
self._job_expire_interval
]
dequeue_response = self._lua_dequeue(keys=keys, args=args)
if len(dequeue_response) < 4:
response = {
'status': 'failure'
}
return response
queue_id, job_id, payload, requeues_remaining = dequeue_response
payload = deserialize_payload(payload[1:-1])
response = {
'status': 'success',
'queue_id': queue_id,
'job_id': job_id,
'payload': payload,
'requeues_remaining': int(requeues_remaining)
}
return response | python | def dequeue(self, queue_type='default'):
"""Dequeues a job from any of the ready queues
based on the queue_type. If no job is ready,
returns a failure status.
"""
if not is_valid_identifier(queue_type):
raise BadArgumentException('`queue_type` has an invalid value.')
timestamp = str(generate_epoch())
keys = [
self._key_prefix,
queue_type
]
args = [
timestamp,
self._job_expire_interval
]
dequeue_response = self._lua_dequeue(keys=keys, args=args)
if len(dequeue_response) < 4:
response = {
'status': 'failure'
}
return response
queue_id, job_id, payload, requeues_remaining = dequeue_response
payload = deserialize_payload(payload[1:-1])
response = {
'status': 'success',
'queue_id': queue_id,
'job_id': job_id,
'payload': payload,
'requeues_remaining': int(requeues_remaining)
}
return response | [
"def",
"dequeue",
"(",
"self",
",",
"queue_type",
"=",
"'default'",
")",
":",
"if",
"not",
"is_valid_identifier",
"(",
"queue_type",
")",
":",
"raise",
"BadArgumentException",
"(",
"'`queue_type` has an invalid value.'",
")",
"timestamp",
"=",
"str",
"(",
"generat... | Dequeues a job from any of the ready queues
based on the queue_type. If no job is ready,
returns a failure status. | [
"Dequeues",
"a",
"job",
"from",
"any",
"of",
"the",
"ready",
"queues",
"based",
"on",
"the",
"queue_type",
".",
"If",
"no",
"job",
"is",
"ready",
"returns",
"a",
"failure",
"status",
"."
] | 32bbfbdcbbaa8e154271ffd125ac4500382f3d19 | https://github.com/plivo/sharq/blob/32bbfbdcbbaa8e154271ffd125ac4500382f3d19/sharq/queue.py#L180-L218 | train | 203,210 |
plivo/sharq | sharq/queue.py | SharQ.interval | def interval(self, interval, queue_id, queue_type='default'):
"""Updates the interval for a specific queue_id
of a particular queue type.
"""
# validate all the input
if not is_valid_interval(interval):
raise BadArgumentException('`interval` has an invalid value.')
if not is_valid_identifier(queue_id):
raise BadArgumentException('`queue_id` has an invalid value.')
if not is_valid_identifier(queue_type):
raise BadArgumentException('`queue_type` has an invalid value.')
# generate the interval key
interval_hmap_key = '%s:interval' % self._key_prefix
interval_queue_key = '%s:%s' % (queue_type, queue_id)
keys = [
interval_hmap_key,
interval_queue_key
]
args = [
interval
]
interval_response = self._lua_interval(keys=keys, args=args)
if interval_response == 0:
# the queue with the id and type does not exist.
response = {
'status': 'failure'
}
else:
response = {
'status': 'success'
}
return response | python | def interval(self, interval, queue_id, queue_type='default'):
"""Updates the interval for a specific queue_id
of a particular queue type.
"""
# validate all the input
if not is_valid_interval(interval):
raise BadArgumentException('`interval` has an invalid value.')
if not is_valid_identifier(queue_id):
raise BadArgumentException('`queue_id` has an invalid value.')
if not is_valid_identifier(queue_type):
raise BadArgumentException('`queue_type` has an invalid value.')
# generate the interval key
interval_hmap_key = '%s:interval' % self._key_prefix
interval_queue_key = '%s:%s' % (queue_type, queue_id)
keys = [
interval_hmap_key,
interval_queue_key
]
args = [
interval
]
interval_response = self._lua_interval(keys=keys, args=args)
if interval_response == 0:
# the queue with the id and type does not exist.
response = {
'status': 'failure'
}
else:
response = {
'status': 'success'
}
return response | [
"def",
"interval",
"(",
"self",
",",
"interval",
",",
"queue_id",
",",
"queue_type",
"=",
"'default'",
")",
":",
"# validate all the input",
"if",
"not",
"is_valid_interval",
"(",
"interval",
")",
":",
"raise",
"BadArgumentException",
"(",
"'`interval` has an invali... | Updates the interval for a specific queue_id
of a particular queue type. | [
"Updates",
"the",
"interval",
"for",
"a",
"specific",
"queue_id",
"of",
"a",
"particular",
"queue",
"type",
"."
] | 32bbfbdcbbaa8e154271ffd125ac4500382f3d19 | https://github.com/plivo/sharq/blob/32bbfbdcbbaa8e154271ffd125ac4500382f3d19/sharq/queue.py#L257-L293 | train | 203,211 |
plivo/sharq | sharq/utils.py | is_valid_identifier | def is_valid_identifier(identifier):
"""Checks if the given identifier is valid or not. A valid
identifier may consists of the following characters with a
maximum length of 100 characters, minimum of 1 character.
Valid characters for an identifier,
- A to Z
- a to z
- 0 to 9
- _ (underscore)
- - (hypen)
"""
if not isinstance(identifier, basestring):
return False
if len(identifier) > 100 or len(identifier) < 1:
return False
condensed_form = set(list(identifier.lower()))
return condensed_form.issubset(VALID_IDENTIFIER_SET) | python | def is_valid_identifier(identifier):
"""Checks if the given identifier is valid or not. A valid
identifier may consists of the following characters with a
maximum length of 100 characters, minimum of 1 character.
Valid characters for an identifier,
- A to Z
- a to z
- 0 to 9
- _ (underscore)
- - (hypen)
"""
if not isinstance(identifier, basestring):
return False
if len(identifier) > 100 or len(identifier) < 1:
return False
condensed_form = set(list(identifier.lower()))
return condensed_form.issubset(VALID_IDENTIFIER_SET) | [
"def",
"is_valid_identifier",
"(",
"identifier",
")",
":",
"if",
"not",
"isinstance",
"(",
"identifier",
",",
"basestring",
")",
":",
"return",
"False",
"if",
"len",
"(",
"identifier",
")",
">",
"100",
"or",
"len",
"(",
"identifier",
")",
"<",
"1",
":",
... | Checks if the given identifier is valid or not. A valid
identifier may consists of the following characters with a
maximum length of 100 characters, minimum of 1 character.
Valid characters for an identifier,
- A to Z
- a to z
- 0 to 9
- _ (underscore)
- - (hypen) | [
"Checks",
"if",
"the",
"given",
"identifier",
"is",
"valid",
"or",
"not",
".",
"A",
"valid",
"identifier",
"may",
"consists",
"of",
"the",
"following",
"characters",
"with",
"a",
"maximum",
"length",
"of",
"100",
"characters",
"minimum",
"of",
"1",
"characte... | 32bbfbdcbbaa8e154271ffd125ac4500382f3d19 | https://github.com/plivo/sharq/blob/32bbfbdcbbaa8e154271ffd125ac4500382f3d19/sharq/utils.py#L9-L28 | train | 203,212 |
plivo/sharq | sharq/utils.py | is_valid_interval | def is_valid_interval(interval):
"""Checks if the given interval is valid. A valid interval
is always a positive, non-zero integer value.
"""
if not isinstance(interval, (int, long)):
return False
if interval <= 0:
return False
return True | python | def is_valid_interval(interval):
"""Checks if the given interval is valid. A valid interval
is always a positive, non-zero integer value.
"""
if not isinstance(interval, (int, long)):
return False
if interval <= 0:
return False
return True | [
"def",
"is_valid_interval",
"(",
"interval",
")",
":",
"if",
"not",
"isinstance",
"(",
"interval",
",",
"(",
"int",
",",
"long",
")",
")",
":",
"return",
"False",
"if",
"interval",
"<=",
"0",
":",
"return",
"False",
"return",
"True"
] | Checks if the given interval is valid. A valid interval
is always a positive, non-zero integer value. | [
"Checks",
"if",
"the",
"given",
"interval",
"is",
"valid",
".",
"A",
"valid",
"interval",
"is",
"always",
"a",
"positive",
"non",
"-",
"zero",
"integer",
"value",
"."
] | 32bbfbdcbbaa8e154271ffd125ac4500382f3d19 | https://github.com/plivo/sharq/blob/32bbfbdcbbaa8e154271ffd125ac4500382f3d19/sharq/utils.py#L31-L41 | train | 203,213 |
plivo/sharq | sharq/utils.py | is_valid_requeue_limit | def is_valid_requeue_limit(requeue_limit):
"""Checks if the given requeue limit is valid.
A valid requeue limit is always greater than
or equal to -1.
"""
if not isinstance(requeue_limit, (int, long)):
return False
if requeue_limit <= -2:
return False
return True | python | def is_valid_requeue_limit(requeue_limit):
"""Checks if the given requeue limit is valid.
A valid requeue limit is always greater than
or equal to -1.
"""
if not isinstance(requeue_limit, (int, long)):
return False
if requeue_limit <= -2:
return False
return True | [
"def",
"is_valid_requeue_limit",
"(",
"requeue_limit",
")",
":",
"if",
"not",
"isinstance",
"(",
"requeue_limit",
",",
"(",
"int",
",",
"long",
")",
")",
":",
"return",
"False",
"if",
"requeue_limit",
"<=",
"-",
"2",
":",
"return",
"False",
"return",
"True... | Checks if the given requeue limit is valid.
A valid requeue limit is always greater than
or equal to -1. | [
"Checks",
"if",
"the",
"given",
"requeue",
"limit",
"is",
"valid",
".",
"A",
"valid",
"requeue",
"limit",
"is",
"always",
"greater",
"than",
"or",
"equal",
"to",
"-",
"1",
"."
] | 32bbfbdcbbaa8e154271ffd125ac4500382f3d19 | https://github.com/plivo/sharq/blob/32bbfbdcbbaa8e154271ffd125ac4500382f3d19/sharq/utils.py#L44-L55 | train | 203,214 |
mvantellingen/localshop | src/localshop/apps/packages/pypi.py | get_search_names | def get_search_names(name):
"""Return a list of values to search on when we are looking for a package
with the given name.
This is required to search on both pyramid_debugtoolbar and
pyramid-debugtoolbar.
"""
parts = re.split('[-_.]', name)
if len(parts) == 1:
return parts
result = set()
for i in range(len(parts) - 1, 0, -1):
for s1 in '-_.':
prefix = s1.join(parts[:i])
for s2 in '-_.':
suffix = s2.join(parts[i:])
for s3 in '-_.':
result.add(s3.join([prefix, suffix]))
return list(result) | python | def get_search_names(name):
"""Return a list of values to search on when we are looking for a package
with the given name.
This is required to search on both pyramid_debugtoolbar and
pyramid-debugtoolbar.
"""
parts = re.split('[-_.]', name)
if len(parts) == 1:
return parts
result = set()
for i in range(len(parts) - 1, 0, -1):
for s1 in '-_.':
prefix = s1.join(parts[:i])
for s2 in '-_.':
suffix = s2.join(parts[i:])
for s3 in '-_.':
result.add(s3.join([prefix, suffix]))
return list(result) | [
"def",
"get_search_names",
"(",
"name",
")",
":",
"parts",
"=",
"re",
".",
"split",
"(",
"'[-_.]'",
",",
"name",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"1",
":",
"return",
"parts",
"result",
"=",
"set",
"(",
")",
"for",
"i",
"in",
"range",
"(... | Return a list of values to search on when we are looking for a package
with the given name.
This is required to search on both pyramid_debugtoolbar and
pyramid-debugtoolbar. | [
"Return",
"a",
"list",
"of",
"values",
"to",
"search",
"on",
"when",
"we",
"are",
"looking",
"for",
"a",
"package",
"with",
"the",
"given",
"name",
"."
] | 32310dc454720aefdea5bf4cea7f78a38c183954 | https://github.com/mvantellingen/localshop/blob/32310dc454720aefdea5bf4cea7f78a38c183954/src/localshop/apps/packages/pypi.py#L6-L26 | train | 203,215 |
mvantellingen/localshop | src/localshop/apps/packages/utils.py | alter_old_distutils_request | def alter_old_distutils_request(request: WSGIRequest):
"""Alter the request body for compatibility with older distutils clients
Due to a bug in the Python distutils library, the request post is sent
using \n as a separator instead of the \r\n that the HTTP spec demands.
This breaks the Django form parser and therefore we have to write a
custom parser.
This bug was fixed in the Python 2.7.4 and 3.4:
http://bugs.python.org/issue10510
"""
# We first need to retrieve the body before accessing POST or FILES since
# it can only be read once.
body = request.body
if request.POST or request.FILES:
return
new_body = BytesIO()
# Split the response in the various parts based on the boundary string
content_type, opts = parse_header(request.META['CONTENT_TYPE'].encode('ascii'))
parts = body.split(b'\n--' + opts['boundary'] + b'\n')
for part in parts:
if b'\n\n' not in part:
continue
headers, content = part.split(b'\n\n', 1)
if not headers:
continue
new_body.write(b'--' + opts['boundary'] + b'\r\n')
new_body.write(headers.replace(b'\n', b'\r\n'))
new_body.write(b'\r\n\r\n')
new_body.write(content)
new_body.write(b'\r\n')
new_body.write(b'--' + opts['boundary'] + b'--\r\n')
request._body = new_body.getvalue()
request.META['CONTENT_LENGTH'] = len(request._body)
# Clear out _files and _post so that the request object re-parses the body
if hasattr(request, '_files'):
delattr(request, '_files')
if hasattr(request, '_post'):
delattr(request, '_post') | python | def alter_old_distutils_request(request: WSGIRequest):
"""Alter the request body for compatibility with older distutils clients
Due to a bug in the Python distutils library, the request post is sent
using \n as a separator instead of the \r\n that the HTTP spec demands.
This breaks the Django form parser and therefore we have to write a
custom parser.
This bug was fixed in the Python 2.7.4 and 3.4:
http://bugs.python.org/issue10510
"""
# We first need to retrieve the body before accessing POST or FILES since
# it can only be read once.
body = request.body
if request.POST or request.FILES:
return
new_body = BytesIO()
# Split the response in the various parts based on the boundary string
content_type, opts = parse_header(request.META['CONTENT_TYPE'].encode('ascii'))
parts = body.split(b'\n--' + opts['boundary'] + b'\n')
for part in parts:
if b'\n\n' not in part:
continue
headers, content = part.split(b'\n\n', 1)
if not headers:
continue
new_body.write(b'--' + opts['boundary'] + b'\r\n')
new_body.write(headers.replace(b'\n', b'\r\n'))
new_body.write(b'\r\n\r\n')
new_body.write(content)
new_body.write(b'\r\n')
new_body.write(b'--' + opts['boundary'] + b'--\r\n')
request._body = new_body.getvalue()
request.META['CONTENT_LENGTH'] = len(request._body)
# Clear out _files and _post so that the request object re-parses the body
if hasattr(request, '_files'):
delattr(request, '_files')
if hasattr(request, '_post'):
delattr(request, '_post') | [
"def",
"alter_old_distutils_request",
"(",
"request",
":",
"WSGIRequest",
")",
":",
"# We first need to retrieve the body before accessing POST or FILES since",
"# it can only be read once.",
"body",
"=",
"request",
".",
"body",
"if",
"request",
".",
"POST",
"or",
"request",
... | Alter the request body for compatibility with older distutils clients
Due to a bug in the Python distutils library, the request post is sent
using \n as a separator instead of the \r\n that the HTTP spec demands.
This breaks the Django form parser and therefore we have to write a
custom parser.
This bug was fixed in the Python 2.7.4 and 3.4:
http://bugs.python.org/issue10510 | [
"Alter",
"the",
"request",
"body",
"for",
"compatibility",
"with",
"older",
"distutils",
"clients"
] | 32310dc454720aefdea5bf4cea7f78a38c183954 | https://github.com/mvantellingen/localshop/blob/32310dc454720aefdea5bf4cea7f78a38c183954/src/localshop/apps/packages/utils.py#L13-L58 | train | 203,216 |
mvantellingen/localshop | src/localshop/apps/packages/utils.py | delete_files | def delete_files(sender, **kwargs):
"""Signal callback for deleting old files when database item is deleted"""
instance = kwargs['instance']
if not hasattr(instance.distribution, 'path'):
return
if not os.path.exists(instance.distribution.path):
return
# Check if there are other instances which reference this fle
is_referenced = (
instance.__class__.objects
.filter(distribution=instance.distribution)
.exclude(pk=instance._get_pk_val())
.exists())
if is_referenced:
return
try:
instance.distribution.storage.delete(instance.distribution.path)
except Exception:
logger.exception(
'Error when trying to delete file %s of package %s:' % (
instance.pk, instance.distribution.path)) | python | def delete_files(sender, **kwargs):
"""Signal callback for deleting old files when database item is deleted"""
instance = kwargs['instance']
if not hasattr(instance.distribution, 'path'):
return
if not os.path.exists(instance.distribution.path):
return
# Check if there are other instances which reference this fle
is_referenced = (
instance.__class__.objects
.filter(distribution=instance.distribution)
.exclude(pk=instance._get_pk_val())
.exists())
if is_referenced:
return
try:
instance.distribution.storage.delete(instance.distribution.path)
except Exception:
logger.exception(
'Error when trying to delete file %s of package %s:' % (
instance.pk, instance.distribution.path)) | [
"def",
"delete_files",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"instance",
"=",
"kwargs",
"[",
"'instance'",
"]",
"if",
"not",
"hasattr",
"(",
"instance",
".",
"distribution",
",",
"'path'",
")",
":",
"return",
"if",
"not",
"os",
".",
"path",... | Signal callback for deleting old files when database item is deleted | [
"Signal",
"callback",
"for",
"deleting",
"old",
"files",
"when",
"database",
"item",
"is",
"deleted"
] | 32310dc454720aefdea5bf4cea7f78a38c183954 | https://github.com/mvantellingen/localshop/blob/32310dc454720aefdea5bf4cea7f78a38c183954/src/localshop/apps/packages/utils.py#L61-L85 | train | 203,217 |
mvantellingen/localshop | src/localshop/apps/packages/utils.py | md5_hash_file | def md5_hash_file(fh):
"""Return the md5 hash of the given file-object"""
md5 = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
md5.update(data)
return md5.hexdigest() | python | def md5_hash_file(fh):
"""Return the md5 hash of the given file-object"""
md5 = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
md5.update(data)
return md5.hexdigest() | [
"def",
"md5_hash_file",
"(",
"fh",
")",
":",
"md5",
"=",
"hashlib",
".",
"md5",
"(",
")",
"while",
"True",
":",
"data",
"=",
"fh",
".",
"read",
"(",
"8192",
")",
"if",
"not",
"data",
":",
"break",
"md5",
".",
"update",
"(",
"data",
")",
"return",... | Return the md5 hash of the given file-object | [
"Return",
"the",
"md5",
"hash",
"of",
"the",
"given",
"file",
"-",
"object"
] | 32310dc454720aefdea5bf4cea7f78a38c183954 | https://github.com/mvantellingen/localshop/blob/32310dc454720aefdea5bf4cea7f78a38c183954/src/localshop/apps/packages/utils.py#L88-L96 | train | 203,218 |
mvantellingen/localshop | src/localshop/apps/packages/utils.py | get_versio_versioning_scheme | def get_versio_versioning_scheme(full_class_path):
"""Return a class based on it's full path"""
module_path = '.'.join(full_class_path.split('.')[0:-1])
class_name = full_class_path.split('.')[-1]
try:
module = importlib.import_module(module_path)
except ImportError:
raise RuntimeError('Invalid specified Versio schema {}'.format(full_class_path))
try:
return getattr(module, class_name)
except AttributeError:
raise RuntimeError(
'Could not find Versio schema class {!r} inside {!r} module.'.format(
class_name, module_path)) | python | def get_versio_versioning_scheme(full_class_path):
"""Return a class based on it's full path"""
module_path = '.'.join(full_class_path.split('.')[0:-1])
class_name = full_class_path.split('.')[-1]
try:
module = importlib.import_module(module_path)
except ImportError:
raise RuntimeError('Invalid specified Versio schema {}'.format(full_class_path))
try:
return getattr(module, class_name)
except AttributeError:
raise RuntimeError(
'Could not find Versio schema class {!r} inside {!r} module.'.format(
class_name, module_path)) | [
"def",
"get_versio_versioning_scheme",
"(",
"full_class_path",
")",
":",
"module_path",
"=",
"'.'",
".",
"join",
"(",
"full_class_path",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
":",
"-",
"1",
"]",
")",
"class_name",
"=",
"full_class_path",
".",
"split",
"... | Return a class based on it's full path | [
"Return",
"a",
"class",
"based",
"on",
"it",
"s",
"full",
"path"
] | 32310dc454720aefdea5bf4cea7f78a38c183954 | https://github.com/mvantellingen/localshop/blob/32310dc454720aefdea5bf4cea7f78a38c183954/src/localshop/apps/packages/utils.py#L99-L113 | train | 203,219 |
mvantellingen/localshop | src/localshop/apps/packages/xmlrpc.py | search | def search(spec, operator='and'):
"""Implement xmlrpc search command.
This only searches through the mirrored and private packages
"""
field_map = {
'name': 'name__icontains',
'summary': 'releases__summary__icontains',
}
query_filter = None
for field, values in spec.items():
for value in values:
if field not in field_map:
continue
field_filter = Q(**{field_map[field]: value})
if not query_filter:
query_filter = field_filter
continue
if operator == 'and':
query_filter &= field_filter
else:
query_filter |= field_filter
result = []
packages = models.Package.objects.filter(query_filter).all()[:20]
for package in packages:
release = package.releases.all()[0]
result.append({
'name': package.name,
'summary': release.summary,
'version': release.version,
'_pypi_ordering': 0,
})
return result | python | def search(spec, operator='and'):
"""Implement xmlrpc search command.
This only searches through the mirrored and private packages
"""
field_map = {
'name': 'name__icontains',
'summary': 'releases__summary__icontains',
}
query_filter = None
for field, values in spec.items():
for value in values:
if field not in field_map:
continue
field_filter = Q(**{field_map[field]: value})
if not query_filter:
query_filter = field_filter
continue
if operator == 'and':
query_filter &= field_filter
else:
query_filter |= field_filter
result = []
packages = models.Package.objects.filter(query_filter).all()[:20]
for package in packages:
release = package.releases.all()[0]
result.append({
'name': package.name,
'summary': release.summary,
'version': release.version,
'_pypi_ordering': 0,
})
return result | [
"def",
"search",
"(",
"spec",
",",
"operator",
"=",
"'and'",
")",
":",
"field_map",
"=",
"{",
"'name'",
":",
"'name__icontains'",
",",
"'summary'",
":",
"'releases__summary__icontains'",
",",
"}",
"query_filter",
"=",
"None",
"for",
"field",
",",
"values",
"... | Implement xmlrpc search command.
This only searches through the mirrored and private packages | [
"Implement",
"xmlrpc",
"search",
"command",
"."
] | 32310dc454720aefdea5bf4cea7f78a38c183954 | https://github.com/mvantellingen/localshop/blob/32310dc454720aefdea5bf4cea7f78a38c183954/src/localshop/apps/packages/xmlrpc.py#L26-L63 | train | 203,220 |
mvantellingen/localshop | src/localshop/apps/permissions/utils.py | credentials_required | def credentials_required(view_func):
"""
This decorator should be used with views that need simple authentication
against Django's authentication framework.
"""
@wraps(view_func, assigned=available_attrs(view_func))
def decorator(request, *args, **kwargs):
if settings.LOCALSHOP_USE_PROXIED_IP:
try:
ip_addr = request.META['HTTP_X_FORWARDED_FOR']
except KeyError:
return HttpResponseForbidden('No permission')
else:
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.
# The client's IP will be the first one.
ip_addr = ip_addr.split(",")[0].strip()
else:
ip_addr = request.META['REMOTE_ADDR']
if CIDR.objects.has_access(ip_addr, with_credentials=False):
return view_func(request, *args, **kwargs)
if not CIDR.objects.has_access(ip_addr, with_credentials=True):
return HttpResponseForbidden('No permission')
# Just return the original view because already logged in
if request.user.is_authenticated():
return view_func(request, *args, **kwargs)
user = authenticate_user(request)
if user is not None:
login(request, user)
return view_func(request, *args, **kwargs)
return HttpResponseUnauthorized(content='Authorization Required')
return decorator | python | def credentials_required(view_func):
"""
This decorator should be used with views that need simple authentication
against Django's authentication framework.
"""
@wraps(view_func, assigned=available_attrs(view_func))
def decorator(request, *args, **kwargs):
if settings.LOCALSHOP_USE_PROXIED_IP:
try:
ip_addr = request.META['HTTP_X_FORWARDED_FOR']
except KeyError:
return HttpResponseForbidden('No permission')
else:
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.
# The client's IP will be the first one.
ip_addr = ip_addr.split(",")[0].strip()
else:
ip_addr = request.META['REMOTE_ADDR']
if CIDR.objects.has_access(ip_addr, with_credentials=False):
return view_func(request, *args, **kwargs)
if not CIDR.objects.has_access(ip_addr, with_credentials=True):
return HttpResponseForbidden('No permission')
# Just return the original view because already logged in
if request.user.is_authenticated():
return view_func(request, *args, **kwargs)
user = authenticate_user(request)
if user is not None:
login(request, user)
return view_func(request, *args, **kwargs)
return HttpResponseUnauthorized(content='Authorization Required')
return decorator | [
"def",
"credentials_required",
"(",
"view_func",
")",
":",
"@",
"wraps",
"(",
"view_func",
",",
"assigned",
"=",
"available_attrs",
"(",
"view_func",
")",
")",
"def",
"decorator",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",... | This decorator should be used with views that need simple authentication
against Django's authentication framework. | [
"This",
"decorator",
"should",
"be",
"used",
"with",
"views",
"that",
"need",
"simple",
"authentication",
"against",
"Django",
"s",
"authentication",
"framework",
"."
] | 32310dc454720aefdea5bf4cea7f78a38c183954 | https://github.com/mvantellingen/localshop/blob/32310dc454720aefdea5bf4cea7f78a38c183954/src/localshop/apps/permissions/utils.py#L48-L83 | train | 203,221 |
mvantellingen/localshop | src/localshop/utils.py | no_duplicates | def no_duplicates(function, *args, **kwargs):
"""
Makes sure that no duplicated tasks are enqueued.
"""
@wraps(function)
def wrapper(self, *args, **kwargs):
key = generate_key(function, *args, **kwargs)
try:
function(self, *args, **kwargs)
finally:
logging.info('Removing key %s', key)
cache.delete(key)
return wrapper | python | def no_duplicates(function, *args, **kwargs):
"""
Makes sure that no duplicated tasks are enqueued.
"""
@wraps(function)
def wrapper(self, *args, **kwargs):
key = generate_key(function, *args, **kwargs)
try:
function(self, *args, **kwargs)
finally:
logging.info('Removing key %s', key)
cache.delete(key)
return wrapper | [
"def",
"no_duplicates",
"(",
"function",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"@",
"wraps",
"(",
"function",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"key",
"=",
"generate_key",
"(",
... | Makes sure that no duplicated tasks are enqueued. | [
"Makes",
"sure",
"that",
"no",
"duplicated",
"tasks",
"are",
"enqueued",
"."
] | 32310dc454720aefdea5bf4cea7f78a38c183954 | https://github.com/mvantellingen/localshop/blob/32310dc454720aefdea5bf4cea7f78a38c183954/src/localshop/utils.py#L25-L38 | train | 203,222 |
mvantellingen/localshop | src/localshop/apps/packages/tasks.py | download_file | def download_file(pk):
"""Download the file reference in `models.ReleaseFile` with the given pk.
"""
release_file = models.ReleaseFile.objects.get(pk=pk)
logging.info("Downloading %s", release_file.url)
proxies = None
if settings.LOCALSHOP_HTTP_PROXY:
proxies = settings.LOCALSHOP_HTTP_PROXY
response = requests.get(release_file.url, stream=True, proxies=proxies)
# Write the file to the django file field
filename = os.path.basename(release_file.url)
# Setting the size manually since Django can't figure it our from
# the raw HTTPResponse
if 'content-length' in response.headers:
size = int(response.headers['content-length'])
else:
size = len(response.content)
# Setting the content type by first looking at the response header
# and falling back to guessing it from the filename
default_content_type = 'application/octet-stream'
content_type = response.headers.get('content-type')
if content_type is None or content_type == default_content_type:
content_type = mimetypes.guess_type(filename)[0] or default_content_type
# Using Django's temporary file upload system to not risk memory
# overflows
with TemporaryUploadedFile(name=filename, size=size, charset='utf-8',
content_type=content_type) as temp_file:
temp_file.write(response.content)
temp_file.seek(0)
# Validate the md5 hash of the downloaded file
md5_hash = md5_hash_file(temp_file)
if md5_hash != release_file.md5_digest:
logging.error("MD5 hash mismatch: %s (expected: %s)" % (
md5_hash, release_file.md5_digest))
return
release_file.distribution.save(filename, temp_file)
release_file.save()
logging.info("Complete") | python | def download_file(pk):
"""Download the file reference in `models.ReleaseFile` with the given pk.
"""
release_file = models.ReleaseFile.objects.get(pk=pk)
logging.info("Downloading %s", release_file.url)
proxies = None
if settings.LOCALSHOP_HTTP_PROXY:
proxies = settings.LOCALSHOP_HTTP_PROXY
response = requests.get(release_file.url, stream=True, proxies=proxies)
# Write the file to the django file field
filename = os.path.basename(release_file.url)
# Setting the size manually since Django can't figure it our from
# the raw HTTPResponse
if 'content-length' in response.headers:
size = int(response.headers['content-length'])
else:
size = len(response.content)
# Setting the content type by first looking at the response header
# and falling back to guessing it from the filename
default_content_type = 'application/octet-stream'
content_type = response.headers.get('content-type')
if content_type is None or content_type == default_content_type:
content_type = mimetypes.guess_type(filename)[0] or default_content_type
# Using Django's temporary file upload system to not risk memory
# overflows
with TemporaryUploadedFile(name=filename, size=size, charset='utf-8',
content_type=content_type) as temp_file:
temp_file.write(response.content)
temp_file.seek(0)
# Validate the md5 hash of the downloaded file
md5_hash = md5_hash_file(temp_file)
if md5_hash != release_file.md5_digest:
logging.error("MD5 hash mismatch: %s (expected: %s)" % (
md5_hash, release_file.md5_digest))
return
release_file.distribution.save(filename, temp_file)
release_file.save()
logging.info("Complete") | [
"def",
"download_file",
"(",
"pk",
")",
":",
"release_file",
"=",
"models",
".",
"ReleaseFile",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"pk",
")",
"logging",
".",
"info",
"(",
"\"Downloading %s\"",
",",
"release_file",
".",
"url",
")",
"proxies",
"="... | Download the file reference in `models.ReleaseFile` with the given pk. | [
"Download",
"the",
"file",
"reference",
"in",
"models",
".",
"ReleaseFile",
"with",
"the",
"given",
"pk",
"."
] | 32310dc454720aefdea5bf4cea7f78a38c183954 | https://github.com/mvantellingen/localshop/blob/32310dc454720aefdea5bf4cea7f78a38c183954/src/localshop/apps/packages/tasks.py#L102-L147 | train | 203,223 |
mvantellingen/localshop | src/localshop/apps/packages/views.py | handle_register_or_upload | def handle_register_or_upload(post_data, files, user, repository):
"""Process a `register` or `upload` comment issued via distutils.
This method is called with the authenticated user.
"""
name = post_data.get('name')
version = post_data.get('version')
if settings.LOCALSHOP_VERSIONING_TYPE:
scheme = get_versio_versioning_scheme(settings.LOCALSHOP_VERSIONING_TYPE)
try:
Version(version, scheme=scheme)
except AttributeError:
response = HttpResponseBadRequest(
reason="Invalid version supplied '{!s}' for '{!s}' scheme.".format(
version, settings.LOCALSHOP_VERSIONING_TYPE))
return response
if not name or not version:
logger.info("Missing name or version for package")
return HttpResponseBadRequest('No name or version given')
try:
condition = Q()
for search_name in get_search_names(name):
condition |= Q(name__iexact=search_name)
package = repository.packages.get(condition)
# Error out when we try to override a mirror'ed package for now
# not sure what the best thing is
if not package.is_local:
return HttpResponseBadRequest(
'%s is a pypi package!' % package.name)
try:
release = package.releases.get(version=version)
except ObjectDoesNotExist:
release = None
except ObjectDoesNotExist:
package = None
release = None
# Validate the data
form = forms.ReleaseForm(post_data, instance=release)
if not form.is_valid():
return HttpResponseBadRequest(reason=form.errors.values()[0][0])
if not package:
pkg_form = forms.PackageForm(post_data, repository=repository)
if not pkg_form.is_valid():
return HttpResponseBadRequest(
reason=six.next(six.itervalues(pkg_form.errors))[0])
package = pkg_form.save()
release = form.save(commit=False)
release.package = package
release.save()
# If this is an upload action then process the uploaded file
if files:
files = {
'distribution': files['content']
}
filename = files['distribution']._name
try:
release_file = release.files.get(filename=filename)
if settings.LOCALSHOP_RELEASE_OVERWRITE is False:
message = 'That it already released, please bump version.'
return HttpResponseBadRequest(message)
except ObjectDoesNotExist:
release_file = models.ReleaseFile(
release=release, filename=filename)
form_file = forms.ReleaseFileForm(
post_data, files, instance=release_file)
if not form_file.is_valid():
return HttpResponseBadRequest('ERRORS %s' % form_file.errors)
release_file = form_file.save(commit=False)
release_file.save()
return HttpResponse() | python | def handle_register_or_upload(post_data, files, user, repository):
"""Process a `register` or `upload` comment issued via distutils.
This method is called with the authenticated user.
"""
name = post_data.get('name')
version = post_data.get('version')
if settings.LOCALSHOP_VERSIONING_TYPE:
scheme = get_versio_versioning_scheme(settings.LOCALSHOP_VERSIONING_TYPE)
try:
Version(version, scheme=scheme)
except AttributeError:
response = HttpResponseBadRequest(
reason="Invalid version supplied '{!s}' for '{!s}' scheme.".format(
version, settings.LOCALSHOP_VERSIONING_TYPE))
return response
if not name or not version:
logger.info("Missing name or version for package")
return HttpResponseBadRequest('No name or version given')
try:
condition = Q()
for search_name in get_search_names(name):
condition |= Q(name__iexact=search_name)
package = repository.packages.get(condition)
# Error out when we try to override a mirror'ed package for now
# not sure what the best thing is
if not package.is_local:
return HttpResponseBadRequest(
'%s is a pypi package!' % package.name)
try:
release = package.releases.get(version=version)
except ObjectDoesNotExist:
release = None
except ObjectDoesNotExist:
package = None
release = None
# Validate the data
form = forms.ReleaseForm(post_data, instance=release)
if not form.is_valid():
return HttpResponseBadRequest(reason=form.errors.values()[0][0])
if not package:
pkg_form = forms.PackageForm(post_data, repository=repository)
if not pkg_form.is_valid():
return HttpResponseBadRequest(
reason=six.next(six.itervalues(pkg_form.errors))[0])
package = pkg_form.save()
release = form.save(commit=False)
release.package = package
release.save()
# If this is an upload action then process the uploaded file
if files:
files = {
'distribution': files['content']
}
filename = files['distribution']._name
try:
release_file = release.files.get(filename=filename)
if settings.LOCALSHOP_RELEASE_OVERWRITE is False:
message = 'That it already released, please bump version.'
return HttpResponseBadRequest(message)
except ObjectDoesNotExist:
release_file = models.ReleaseFile(
release=release, filename=filename)
form_file = forms.ReleaseFileForm(
post_data, files, instance=release_file)
if not form_file.is_valid():
return HttpResponseBadRequest('ERRORS %s' % form_file.errors)
release_file = form_file.save(commit=False)
release_file.save()
return HttpResponse() | [
"def",
"handle_register_or_upload",
"(",
"post_data",
",",
"files",
",",
"user",
",",
"repository",
")",
":",
"name",
"=",
"post_data",
".",
"get",
"(",
"'name'",
")",
"version",
"=",
"post_data",
".",
"get",
"(",
"'version'",
")",
"if",
"settings",
".",
... | Process a `register` or `upload` comment issued via distutils.
This method is called with the authenticated user. | [
"Process",
"a",
"register",
"or",
"upload",
"comment",
"issued",
"via",
"distutils",
"."
] | 32310dc454720aefdea5bf4cea7f78a38c183954 | https://github.com/mvantellingen/localshop/blob/32310dc454720aefdea5bf4cea7f78a38c183954/src/localshop/apps/packages/views.py#L155-L237 | train | 203,224 |
mvantellingen/localshop | src/localshop/apps/packages/models.py | ReleaseFile.download | def download(self):
"""Start a celery task to download the release file from pypi.
If `settings.LOCALSHOP_ISOLATED` is True then download the file
in-process.
"""
from .tasks import download_file
if not settings.LOCALSHOP_ISOLATED:
download_file.delay(pk=self.pk)
else:
download_file(pk=self.pk) | python | def download(self):
"""Start a celery task to download the release file from pypi.
If `settings.LOCALSHOP_ISOLATED` is True then download the file
in-process.
"""
from .tasks import download_file
if not settings.LOCALSHOP_ISOLATED:
download_file.delay(pk=self.pk)
else:
download_file(pk=self.pk) | [
"def",
"download",
"(",
"self",
")",
":",
"from",
".",
"tasks",
"import",
"download_file",
"if",
"not",
"settings",
".",
"LOCALSHOP_ISOLATED",
":",
"download_file",
".",
"delay",
"(",
"pk",
"=",
"self",
".",
"pk",
")",
"else",
":",
"download_file",
"(",
... | Start a celery task to download the release file from pypi.
If `settings.LOCALSHOP_ISOLATED` is True then download the file
in-process. | [
"Start",
"a",
"celery",
"task",
"to",
"download",
"the",
"release",
"file",
"from",
"pypi",
"."
] | 32310dc454720aefdea5bf4cea7f78a38c183954 | https://github.com/mvantellingen/localshop/blob/32310dc454720aefdea5bf4cea7f78a38c183954/src/localshop/apps/packages/models.py#L252-L263 | train | 203,225 |
syrusakbary/promise | promise/dataloader.py | dispatch_queue | def dispatch_queue(loader):
# type: (DataLoader) -> None
"""
Given the current state of a Loader instance, perform a batch load
from its current queue.
"""
# Take the current loader queue, replacing it with an empty queue.
queue = loader._queue
loader._queue = []
# If a maxBatchSize was provided and the queue is longer, then segment the
# queue into multiple batches, otherwise treat the queue as a single batch.
max_batch_size = loader.max_batch_size
if max_batch_size and max_batch_size < len(queue):
chunks = get_chunks(queue, max_batch_size)
for chunk in chunks:
dispatch_queue_batch(loader, chunk)
else:
dispatch_queue_batch(loader, queue) | python | def dispatch_queue(loader):
# type: (DataLoader) -> None
"""
Given the current state of a Loader instance, perform a batch load
from its current queue.
"""
# Take the current loader queue, replacing it with an empty queue.
queue = loader._queue
loader._queue = []
# If a maxBatchSize was provided and the queue is longer, then segment the
# queue into multiple batches, otherwise treat the queue as a single batch.
max_batch_size = loader.max_batch_size
if max_batch_size and max_batch_size < len(queue):
chunks = get_chunks(queue, max_batch_size)
for chunk in chunks:
dispatch_queue_batch(loader, chunk)
else:
dispatch_queue_batch(loader, queue) | [
"def",
"dispatch_queue",
"(",
"loader",
")",
":",
"# type: (DataLoader) -> None",
"# Take the current loader queue, replacing it with an empty queue.",
"queue",
"=",
"loader",
".",
"_queue",
"loader",
".",
"_queue",
"=",
"[",
"]",
"# If a maxBatchSize was provided and the queue... | Given the current state of a Loader instance, perform a batch load
from its current queue. | [
"Given",
"the",
"current",
"state",
"of",
"a",
"Loader",
"instance",
"perform",
"a",
"batch",
"load",
"from",
"its",
"current",
"queue",
"."
] | d80d791fcc86c89713dac57b55e56c0a9024f153 | https://github.com/syrusakbary/promise/blob/d80d791fcc86c89713dac57b55e56c0a9024f153/promise/dataloader.py#L233-L252 | train | 203,226 |
syrusakbary/promise | promise/dataloader.py | failed_dispatch | def failed_dispatch(loader, queue, error):
# type: (DataLoader, Iterable[Loader], Exception) -> None
"""
Do not cache individual loads if the entire batch dispatch fails,
but still reject each request so they do not hang.
"""
for l in queue:
loader.clear(l.key)
l.reject(error) | python | def failed_dispatch(loader, queue, error):
# type: (DataLoader, Iterable[Loader], Exception) -> None
"""
Do not cache individual loads if the entire batch dispatch fails,
but still reject each request so they do not hang.
"""
for l in queue:
loader.clear(l.key)
l.reject(error) | [
"def",
"failed_dispatch",
"(",
"loader",
",",
"queue",
",",
"error",
")",
":",
"# type: (DataLoader, Iterable[Loader], Exception) -> None",
"for",
"l",
"in",
"queue",
":",
"loader",
".",
"clear",
"(",
"l",
".",
"key",
")",
"l",
".",
"reject",
"(",
"error",
"... | Do not cache individual loads if the entire batch dispatch fails,
but still reject each request so they do not hang. | [
"Do",
"not",
"cache",
"individual",
"loads",
"if",
"the",
"entire",
"batch",
"dispatch",
"fails",
"but",
"still",
"reject",
"each",
"request",
"so",
"they",
"do",
"not",
"hang",
"."
] | d80d791fcc86c89713dac57b55e56c0a9024f153 | https://github.com/syrusakbary/promise/blob/d80d791fcc86c89713dac57b55e56c0a9024f153/promise/dataloader.py#L319-L327 | train | 203,227 |
syrusakbary/promise | promise/dataloader.py | DataLoader.load | def load(self, key=None):
# type: (Hashable) -> Promise
"""
Loads a key, returning a `Promise` for the value represented by that key.
"""
if key is None:
raise TypeError(
(
"The loader.load() function must be called with a value,"
+ "but got: {}."
).format(key)
)
cache_key = self.get_cache_key(key)
# If caching and there is a cache-hit, return cached Promise.
if self.cache:
cached_promise = self._promise_cache.get(cache_key)
if cached_promise:
return cached_promise
# Otherwise, produce a new Promise for this value.
promise = Promise(partial(self.do_resolve_reject, key)) # type: ignore
# If caching, cache this promise.
if self.cache:
self._promise_cache[cache_key] = promise
return promise | python | def load(self, key=None):
# type: (Hashable) -> Promise
"""
Loads a key, returning a `Promise` for the value represented by that key.
"""
if key is None:
raise TypeError(
(
"The loader.load() function must be called with a value,"
+ "but got: {}."
).format(key)
)
cache_key = self.get_cache_key(key)
# If caching and there is a cache-hit, return cached Promise.
if self.cache:
cached_promise = self._promise_cache.get(cache_key)
if cached_promise:
return cached_promise
# Otherwise, produce a new Promise for this value.
promise = Promise(partial(self.do_resolve_reject, key)) # type: ignore
# If caching, cache this promise.
if self.cache:
self._promise_cache[cache_key] = promise
return promise | [
"def",
"load",
"(",
"self",
",",
"key",
"=",
"None",
")",
":",
"# type: (Hashable) -> Promise",
"if",
"key",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"(",
"\"The loader.load() function must be called with a value,\"",
"+",
"\"but got: {}.\"",
")",
".",
"format"... | Loads a key, returning a `Promise` for the value represented by that key. | [
"Loads",
"a",
"key",
"returning",
"a",
"Promise",
"for",
"the",
"value",
"represented",
"by",
"that",
"key",
"."
] | d80d791fcc86c89713dac57b55e56c0a9024f153 | https://github.com/syrusakbary/promise/blob/d80d791fcc86c89713dac57b55e56c0a9024f153/promise/dataloader.py#L80-L109 | train | 203,228 |
syrusakbary/promise | promise/dataloader.py | DataLoader.load_many | def load_many(self, keys):
# type: (Iterable[Hashable]) -> Promise
"""
Loads multiple keys, promising an array of values
>>> a, b = await my_loader.load_many([ 'a', 'b' ])
This is equivalent to the more verbose:
>>> a, b = await Promise.all([
>>> my_loader.load('a'),
>>> my_loader.load('b')
>>> ])
"""
if not isinstance(keys, Iterable):
raise TypeError(
(
"The loader.loadMany() function must be called with Array<key> "
+ "but got: {}."
).format(keys)
)
return Promise.all([self.load(key) for key in keys]) | python | def load_many(self, keys):
# type: (Iterable[Hashable]) -> Promise
"""
Loads multiple keys, promising an array of values
>>> a, b = await my_loader.load_many([ 'a', 'b' ])
This is equivalent to the more verbose:
>>> a, b = await Promise.all([
>>> my_loader.load('a'),
>>> my_loader.load('b')
>>> ])
"""
if not isinstance(keys, Iterable):
raise TypeError(
(
"The loader.loadMany() function must be called with Array<key> "
+ "but got: {}."
).format(keys)
)
return Promise.all([self.load(key) for key in keys]) | [
"def",
"load_many",
"(",
"self",
",",
"keys",
")",
":",
"# type: (Iterable[Hashable]) -> Promise",
"if",
"not",
"isinstance",
"(",
"keys",
",",
"Iterable",
")",
":",
"raise",
"TypeError",
"(",
"(",
"\"The loader.loadMany() function must be called with Array<key> \"",
"+... | Loads multiple keys, promising an array of values
>>> a, b = await my_loader.load_many([ 'a', 'b' ])
This is equivalent to the more verbose:
>>> a, b = await Promise.all([
>>> my_loader.load('a'),
>>> my_loader.load('b')
>>> ]) | [
"Loads",
"multiple",
"keys",
"promising",
"an",
"array",
"of",
"values"
] | d80d791fcc86c89713dac57b55e56c0a9024f153 | https://github.com/syrusakbary/promise/blob/d80d791fcc86c89713dac57b55e56c0a9024f153/promise/dataloader.py#L126-L148 | train | 203,229 |
syrusakbary/promise | promise/dataloader.py | DataLoader.clear | def clear(self, key):
# type: (Hashable) -> DataLoader
"""
Clears the value at `key` from the cache, if it exists. Returns itself for
method chaining.
"""
cache_key = self.get_cache_key(key)
self._promise_cache.pop(cache_key, None)
return self | python | def clear(self, key):
# type: (Hashable) -> DataLoader
"""
Clears the value at `key` from the cache, if it exists. Returns itself for
method chaining.
"""
cache_key = self.get_cache_key(key)
self._promise_cache.pop(cache_key, None)
return self | [
"def",
"clear",
"(",
"self",
",",
"key",
")",
":",
"# type: (Hashable) -> DataLoader",
"cache_key",
"=",
"self",
".",
"get_cache_key",
"(",
"key",
")",
"self",
".",
"_promise_cache",
".",
"pop",
"(",
"cache_key",
",",
"None",
")",
"return",
"self"
] | Clears the value at `key` from the cache, if it exists. Returns itself for
method chaining. | [
"Clears",
"the",
"value",
"at",
"key",
"from",
"the",
"cache",
"if",
"it",
"exists",
".",
"Returns",
"itself",
"for",
"method",
"chaining",
"."
] | d80d791fcc86c89713dac57b55e56c0a9024f153 | https://github.com/syrusakbary/promise/blob/d80d791fcc86c89713dac57b55e56c0a9024f153/promise/dataloader.py#L150-L158 | train | 203,230 |
syrusakbary/promise | promise/dataloader.py | DataLoader.prime | def prime(self, key, value):
# type: (Hashable, Any) -> DataLoader
"""
Adds the provied key and value to the cache. If the key already exists, no
change is made. Returns itself for method chaining.
"""
cache_key = self.get_cache_key(key)
# Only add the key if it does not already exist.
if cache_key not in self._promise_cache:
# Cache a rejected promise if the value is an Error, in order to match
# the behavior of load(key).
if isinstance(value, Exception):
promise = Promise.reject(value)
else:
promise = Promise.resolve(value)
self._promise_cache[cache_key] = promise
return self | python | def prime(self, key, value):
# type: (Hashable, Any) -> DataLoader
"""
Adds the provied key and value to the cache. If the key already exists, no
change is made. Returns itself for method chaining.
"""
cache_key = self.get_cache_key(key)
# Only add the key if it does not already exist.
if cache_key not in self._promise_cache:
# Cache a rejected promise if the value is an Error, in order to match
# the behavior of load(key).
if isinstance(value, Exception):
promise = Promise.reject(value)
else:
promise = Promise.resolve(value)
self._promise_cache[cache_key] = promise
return self | [
"def",
"prime",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"# type: (Hashable, Any) -> DataLoader",
"cache_key",
"=",
"self",
".",
"get_cache_key",
"(",
"key",
")",
"# Only add the key if it does not already exist.",
"if",
"cache_key",
"not",
"in",
"self",
"."... | Adds the provied key and value to the cache. If the key already exists, no
change is made. Returns itself for method chaining. | [
"Adds",
"the",
"provied",
"key",
"and",
"value",
"to",
"the",
"cache",
".",
"If",
"the",
"key",
"already",
"exists",
"no",
"change",
"is",
"made",
".",
"Returns",
"itself",
"for",
"method",
"chaining",
"."
] | d80d791fcc86c89713dac57b55e56c0a9024f153 | https://github.com/syrusakbary/promise/blob/d80d791fcc86c89713dac57b55e56c0a9024f153/promise/dataloader.py#L170-L189 | train | 203,231 |
syrusakbary/promise | promise/pyutils/version.py | get_complete_version | def get_complete_version(version=None):
"""Returns a tuple of the promise version. If version argument is non-empty,
then checks for correctness of the tuple provided.
"""
if version is None:
from promise import VERSION
return VERSION
else:
assert len(version) == 5
assert version[3] in ("alpha", "beta", "rc", "final")
return version | python | def get_complete_version(version=None):
"""Returns a tuple of the promise version. If version argument is non-empty,
then checks for correctness of the tuple provided.
"""
if version is None:
from promise import VERSION
return VERSION
else:
assert len(version) == 5
assert version[3] in ("alpha", "beta", "rc", "final")
return version | [
"def",
"get_complete_version",
"(",
"version",
"=",
"None",
")",
":",
"if",
"version",
"is",
"None",
":",
"from",
"promise",
"import",
"VERSION",
"return",
"VERSION",
"else",
":",
"assert",
"len",
"(",
"version",
")",
"==",
"5",
"assert",
"version",
"[",
... | Returns a tuple of the promise version. If version argument is non-empty,
then checks for correctness of the tuple provided. | [
"Returns",
"a",
"tuple",
"of",
"the",
"promise",
"version",
".",
"If",
"version",
"argument",
"is",
"non",
"-",
"empty",
"then",
"checks",
"for",
"correctness",
"of",
"the",
"tuple",
"provided",
"."
] | d80d791fcc86c89713dac57b55e56c0a9024f153 | https://github.com/syrusakbary/promise/blob/d80d791fcc86c89713dac57b55e56c0a9024f153/promise/pyutils/version.py#L40-L52 | train | 203,232 |
eqcorrscan/EQcorrscan | eqcorrscan/core/lag_calc.py | _xcorr_interp | def _xcorr_interp(ccc, dt):
"""
Intrpolate around the maximum correlation value for sub-sample precision.
:param ccc: Cross-correlation array
:type ccc: numpy.ndarray
:param dt: sample interval
:type dt: float
:return: Position of interpolated maximum in seconds from start of ccc
:rtype: float
"""
if ccc.shape[0] == 1:
cc = ccc[0]
else:
cc = ccc
# Code borrowed from obspy.signal.cross_correlation.xcorr_pick_correction
cc_curvature = np.concatenate((np.zeros(1), np.diff(cc, 2), np.zeros(1)))
cc_t = np.arange(0, len(cc) * dt, dt)
peak_index = cc.argmax()
first_sample = peak_index
# XXX this could be improved..
while first_sample > 0 and cc_curvature[first_sample - 1] <= 0:
first_sample -= 1
last_sample = peak_index
while last_sample < len(cc) - 1 and cc_curvature[last_sample + 1] <= 0:
last_sample += 1
num_samples = last_sample - first_sample + 1
if num_samples < 3:
msg = "Less than 3 samples selected for fit to cross " + \
"correlation: %s" % num_samples
raise IndexError(msg)
if num_samples < 5:
msg = "Less than 5 samples selected for fit to cross " + \
"correlation: %s" % num_samples
warnings.warn(msg)
coeffs, residual = scipy.polyfit(
cc_t[first_sample:last_sample + 1],
cc[first_sample:last_sample + 1], deg=2, full=True)[:2]
# check results of fit
if coeffs[0] >= 0:
msg = "Fitted parabola opens upwards!"
warnings.warn(msg)
if residual > 0.1:
msg = "Residual in quadratic fit to cross correlation maximum " + \
"larger than 0.1: %s" % residual
warnings.warn(msg)
# X coordinate of vertex of parabola gives time shift to correct
# differential pick time. Y coordinate gives maximum correlation
# coefficient.
shift = -coeffs[1] / 2.0 / coeffs[0]
coeff = (4 * coeffs[0] * coeffs[2] - coeffs[1] ** 2) / (4 * coeffs[0])
return shift, coeff | python | def _xcorr_interp(ccc, dt):
"""
Intrpolate around the maximum correlation value for sub-sample precision.
:param ccc: Cross-correlation array
:type ccc: numpy.ndarray
:param dt: sample interval
:type dt: float
:return: Position of interpolated maximum in seconds from start of ccc
:rtype: float
"""
if ccc.shape[0] == 1:
cc = ccc[0]
else:
cc = ccc
# Code borrowed from obspy.signal.cross_correlation.xcorr_pick_correction
cc_curvature = np.concatenate((np.zeros(1), np.diff(cc, 2), np.zeros(1)))
cc_t = np.arange(0, len(cc) * dt, dt)
peak_index = cc.argmax()
first_sample = peak_index
# XXX this could be improved..
while first_sample > 0 and cc_curvature[first_sample - 1] <= 0:
first_sample -= 1
last_sample = peak_index
while last_sample < len(cc) - 1 and cc_curvature[last_sample + 1] <= 0:
last_sample += 1
num_samples = last_sample - first_sample + 1
if num_samples < 3:
msg = "Less than 3 samples selected for fit to cross " + \
"correlation: %s" % num_samples
raise IndexError(msg)
if num_samples < 5:
msg = "Less than 5 samples selected for fit to cross " + \
"correlation: %s" % num_samples
warnings.warn(msg)
coeffs, residual = scipy.polyfit(
cc_t[first_sample:last_sample + 1],
cc[first_sample:last_sample + 1], deg=2, full=True)[:2]
# check results of fit
if coeffs[0] >= 0:
msg = "Fitted parabola opens upwards!"
warnings.warn(msg)
if residual > 0.1:
msg = "Residual in quadratic fit to cross correlation maximum " + \
"larger than 0.1: %s" % residual
warnings.warn(msg)
# X coordinate of vertex of parabola gives time shift to correct
# differential pick time. Y coordinate gives maximum correlation
# coefficient.
shift = -coeffs[1] / 2.0 / coeffs[0]
coeff = (4 * coeffs[0] * coeffs[2] - coeffs[1] ** 2) / (4 * coeffs[0])
return shift, coeff | [
"def",
"_xcorr_interp",
"(",
"ccc",
",",
"dt",
")",
":",
"if",
"ccc",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
":",
"cc",
"=",
"ccc",
"[",
"0",
"]",
"else",
":",
"cc",
"=",
"ccc",
"# Code borrowed from obspy.signal.cross_correlation.xcorr_pick_correction",
... | Intrpolate around the maximum correlation value for sub-sample precision.
:param ccc: Cross-correlation array
:type ccc: numpy.ndarray
:param dt: sample interval
:type dt: float
:return: Position of interpolated maximum in seconds from start of ccc
:rtype: float | [
"Intrpolate",
"around",
"the",
"maximum",
"correlation",
"value",
"for",
"sub",
"-",
"sample",
"precision",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/lag_calc.py#L46-L98 | train | 203,233 |
eqcorrscan/EQcorrscan | eqcorrscan/core/lag_calc.py | _day_loop | def _day_loop(detection_streams, template, min_cc, detections,
horizontal_chans, vertical_chans, interpolate, cores, parallel,
debug=0):
"""
Function to loop through multiple detections for one template.
Designed to run for the same day of data for I/O simplicity, but as you
are passing stream objects it could run for all the detections ever, as
long as you have the RAM!
:type detection_streams: list
:param detection_streams:
List of all the detections for this template that you want to compute
the optimum pick for. Individual things in list should be of
:class:`obspy.core.stream.Stream` type.
:type template: obspy.core.stream.Stream
:param template: The original template used to detect the detections passed
:type min_cc: float
:param min_cc: Minimum cross-correlation value to be allowed for a pick.
:type detections: list
:param detections:
List of detections to associate events with an input detection.
:type horizontal_chans: list
:param horizontal_chans:
List of channel endings for horizontal-channels, on which S-picks will
be made.
:type vertical_chans: list
:param vertical_chans:
List of channel endings for vertical-channels, on which P-picks will
be made.
:type interpolate: bool
:param interpolate:
Interpolate the correlation function to achieve sub-sample precision.
:type debug: int
:param debug: debug output level 0-5.
:returns:
Catalog object containing Event objects for each detection created by
this template.
:rtype: :class:`obspy.core.event.Catalog`
"""
if len(detection_streams) == 0:
return Catalog()
if not cores:
num_cores = cpu_count()
else:
num_cores = cores
if num_cores > len(detection_streams):
num_cores = len(detection_streams)
if parallel:
pool = Pool(processes=num_cores)
debug_print('Made pool of %i workers' % num_cores, 4, debug)
# Parallel generation of events for each detection:
# results will be a list of (i, event class)
results = [pool.apply_async(
_channel_loop, (detection_streams[i], ),
{'template': template, 'min_cc': min_cc,
'detection_id': detections[i].id, 'interpolate': interpolate,
'i': i, 'pre_lag_ccsum': detections[i].detect_val,
'detect_chans': detections[i].no_chans,
'horizontal_chans': horizontal_chans,
'vertical_chans': vertical_chans})
for i in range(len(detection_streams))]
pool.close()
try:
events_list = [p.get() for p in results]
except KeyboardInterrupt as e: # pragma: no cover
pool.terminate()
raise e
pool.join()
events_list.sort(key=lambda tup: tup[0]) # Sort based on index.
else:
events_list = []
for i in range(len(detection_streams)):
events_list.append(_channel_loop(
detection=detection_streams[i], template=template,
min_cc=min_cc, detection_id=detections[i].id,
interpolate=interpolate, i=i,
pre_lag_ccsum=detections[i].detect_val,
detect_chans=detections[i].no_chans,
horizontal_chans=horizontal_chans,
vertical_chans=vertical_chans, debug=debug))
temp_catalog = Catalog()
temp_catalog.events = [event_tup[1] for event_tup in events_list]
return temp_catalog | python | def _day_loop(detection_streams, template, min_cc, detections,
horizontal_chans, vertical_chans, interpolate, cores, parallel,
debug=0):
"""
Function to loop through multiple detections for one template.
Designed to run for the same day of data for I/O simplicity, but as you
are passing stream objects it could run for all the detections ever, as
long as you have the RAM!
:type detection_streams: list
:param detection_streams:
List of all the detections for this template that you want to compute
the optimum pick for. Individual things in list should be of
:class:`obspy.core.stream.Stream` type.
:type template: obspy.core.stream.Stream
:param template: The original template used to detect the detections passed
:type min_cc: float
:param min_cc: Minimum cross-correlation value to be allowed for a pick.
:type detections: list
:param detections:
List of detections to associate events with an input detection.
:type horizontal_chans: list
:param horizontal_chans:
List of channel endings for horizontal-channels, on which S-picks will
be made.
:type vertical_chans: list
:param vertical_chans:
List of channel endings for vertical-channels, on which P-picks will
be made.
:type interpolate: bool
:param interpolate:
Interpolate the correlation function to achieve sub-sample precision.
:type debug: int
:param debug: debug output level 0-5.
:returns:
Catalog object containing Event objects for each detection created by
this template.
:rtype: :class:`obspy.core.event.Catalog`
"""
if len(detection_streams) == 0:
return Catalog()
if not cores:
num_cores = cpu_count()
else:
num_cores = cores
if num_cores > len(detection_streams):
num_cores = len(detection_streams)
if parallel:
pool = Pool(processes=num_cores)
debug_print('Made pool of %i workers' % num_cores, 4, debug)
# Parallel generation of events for each detection:
# results will be a list of (i, event class)
results = [pool.apply_async(
_channel_loop, (detection_streams[i], ),
{'template': template, 'min_cc': min_cc,
'detection_id': detections[i].id, 'interpolate': interpolate,
'i': i, 'pre_lag_ccsum': detections[i].detect_val,
'detect_chans': detections[i].no_chans,
'horizontal_chans': horizontal_chans,
'vertical_chans': vertical_chans})
for i in range(len(detection_streams))]
pool.close()
try:
events_list = [p.get() for p in results]
except KeyboardInterrupt as e: # pragma: no cover
pool.terminate()
raise e
pool.join()
events_list.sort(key=lambda tup: tup[0]) # Sort based on index.
else:
events_list = []
for i in range(len(detection_streams)):
events_list.append(_channel_loop(
detection=detection_streams[i], template=template,
min_cc=min_cc, detection_id=detections[i].id,
interpolate=interpolate, i=i,
pre_lag_ccsum=detections[i].detect_val,
detect_chans=detections[i].no_chans,
horizontal_chans=horizontal_chans,
vertical_chans=vertical_chans, debug=debug))
temp_catalog = Catalog()
temp_catalog.events = [event_tup[1] for event_tup in events_list]
return temp_catalog | [
"def",
"_day_loop",
"(",
"detection_streams",
",",
"template",
",",
"min_cc",
",",
"detections",
",",
"horizontal_chans",
",",
"vertical_chans",
",",
"interpolate",
",",
"cores",
",",
"parallel",
",",
"debug",
"=",
"0",
")",
":",
"if",
"len",
"(",
"detection... | Function to loop through multiple detections for one template.
Designed to run for the same day of data for I/O simplicity, but as you
are passing stream objects it could run for all the detections ever, as
long as you have the RAM!
:type detection_streams: list
:param detection_streams:
List of all the detections for this template that you want to compute
the optimum pick for. Individual things in list should be of
:class:`obspy.core.stream.Stream` type.
:type template: obspy.core.stream.Stream
:param template: The original template used to detect the detections passed
:type min_cc: float
:param min_cc: Minimum cross-correlation value to be allowed for a pick.
:type detections: list
:param detections:
List of detections to associate events with an input detection.
:type horizontal_chans: list
:param horizontal_chans:
List of channel endings for horizontal-channels, on which S-picks will
be made.
:type vertical_chans: list
:param vertical_chans:
List of channel endings for vertical-channels, on which P-picks will
be made.
:type interpolate: bool
:param interpolate:
Interpolate the correlation function to achieve sub-sample precision.
:type debug: int
:param debug: debug output level 0-5.
:returns:
Catalog object containing Event objects for each detection created by
this template.
:rtype: :class:`obspy.core.event.Catalog` | [
"Function",
"to",
"loop",
"through",
"multiple",
"detections",
"for",
"one",
"template",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/lag_calc.py#L257-L341 | train | 203,234 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/trigger.py | read_trigger_parameters | def read_trigger_parameters(filename):
"""Read the trigger parameters into trigger_parameter classes.
:type filename: str
:param filename: Parameter file
:returns: List of :class:`eqcorrscan.utils.trigger.TriggerParameters`
:rtype: list
.. rubric:: Example
>>> from eqcorrscan.utils.trigger import read_trigger_parameters
>>> parameters = read_trigger_parameters('parameters') # doctest: +SKIP
"""
parameters = []
f = open(filename, 'r')
print('Reading parameters with the following header:')
for line in f:
if line[0] == '#':
print(line.rstrip('\n').lstrip('\n'))
else:
parameter_dict = ast.literal_eval(line)
# convert the dictionary to the class
trig_par = TriggerParameters(parameter_dict)
parameters.append(trig_par)
f.close()
return parameters | python | def read_trigger_parameters(filename):
"""Read the trigger parameters into trigger_parameter classes.
:type filename: str
:param filename: Parameter file
:returns: List of :class:`eqcorrscan.utils.trigger.TriggerParameters`
:rtype: list
.. rubric:: Example
>>> from eqcorrscan.utils.trigger import read_trigger_parameters
>>> parameters = read_trigger_parameters('parameters') # doctest: +SKIP
"""
parameters = []
f = open(filename, 'r')
print('Reading parameters with the following header:')
for line in f:
if line[0] == '#':
print(line.rstrip('\n').lstrip('\n'))
else:
parameter_dict = ast.literal_eval(line)
# convert the dictionary to the class
trig_par = TriggerParameters(parameter_dict)
parameters.append(trig_par)
f.close()
return parameters | [
"def",
"read_trigger_parameters",
"(",
"filename",
")",
":",
"parameters",
"=",
"[",
"]",
"f",
"=",
"open",
"(",
"filename",
",",
"'r'",
")",
"print",
"(",
"'Reading parameters with the following header:'",
")",
"for",
"line",
"in",
"f",
":",
"if",
"line",
"... | Read the trigger parameters into trigger_parameter classes.
:type filename: str
:param filename: Parameter file
:returns: List of :class:`eqcorrscan.utils.trigger.TriggerParameters`
:rtype: list
.. rubric:: Example
>>> from eqcorrscan.utils.trigger import read_trigger_parameters
>>> parameters = read_trigger_parameters('parameters') # doctest: +SKIP | [
"Read",
"the",
"trigger",
"parameters",
"into",
"trigger_parameter",
"classes",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/trigger.py#L120-L146 | train | 203,235 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/trigger.py | _channel_loop | def _channel_loop(tr, parameters, max_trigger_length=60,
despike=False, debug=0):
"""
Internal loop for parellel processing.
:type tr: obspy.core.trace
:param tr: Trace to look for triggers in.
:type parameters: list
:param parameters: List of TriggerParameter class for trace.
:type max_trigger_length: float
:type despike: bool
:type debug: int
:return: trigger
:rtype: list
"""
for par in parameters:
if par['station'] == tr.stats.station and \
par['channel'] == tr.stats.channel:
parameter = par
break
else:
msg = 'No parameters set for station ' + str(tr.stats.station)
warnings.warn(msg)
return []
triggers = []
if debug > 0:
print(tr)
tr.detrend('simple')
if despike:
median_filter(tr)
if parameter['lowcut'] and parameter['highcut']:
tr.filter('bandpass', freqmin=parameter['lowcut'],
freqmax=parameter['highcut'])
elif parameter['lowcut']:
tr.filter('highpass', freq=parameter['lowcut'])
elif parameter['highcut']:
tr.filter('lowpass', freq=parameter['highcut'])
# find triggers for each channel using recursive_sta_lta
df = tr.stats.sampling_rate
cft = recursive_sta_lta(tr.data, int(parameter['sta_len'] * df),
int(parameter['lta_len'] * df))
if max_trigger_length:
trig_args = {'max_len_delete': True}
trig_args['max_len'] = int(max_trigger_length *
df + 0.5)
if debug > 3:
plot_trigger(tr, cft, parameter['thr_on'], parameter['thr_off'])
tmp_trigs = trigger_onset(cft, float(parameter['thr_on']),
float(parameter['thr_off']),
**trig_args)
for on, off in tmp_trigs:
cft_peak = tr.data[on:off].max()
cft_std = tr.data[on:off].std()
on = tr.stats.starttime + \
float(on) / tr.stats.sampling_rate
off = tr.stats.starttime + \
float(off) / tr.stats.sampling_rate
triggers.append((on.timestamp, off.timestamp,
tr.id, cft_peak,
cft_std))
return triggers | python | def _channel_loop(tr, parameters, max_trigger_length=60,
despike=False, debug=0):
"""
Internal loop for parellel processing.
:type tr: obspy.core.trace
:param tr: Trace to look for triggers in.
:type parameters: list
:param parameters: List of TriggerParameter class for trace.
:type max_trigger_length: float
:type despike: bool
:type debug: int
:return: trigger
:rtype: list
"""
for par in parameters:
if par['station'] == tr.stats.station and \
par['channel'] == tr.stats.channel:
parameter = par
break
else:
msg = 'No parameters set for station ' + str(tr.stats.station)
warnings.warn(msg)
return []
triggers = []
if debug > 0:
print(tr)
tr.detrend('simple')
if despike:
median_filter(tr)
if parameter['lowcut'] and parameter['highcut']:
tr.filter('bandpass', freqmin=parameter['lowcut'],
freqmax=parameter['highcut'])
elif parameter['lowcut']:
tr.filter('highpass', freq=parameter['lowcut'])
elif parameter['highcut']:
tr.filter('lowpass', freq=parameter['highcut'])
# find triggers for each channel using recursive_sta_lta
df = tr.stats.sampling_rate
cft = recursive_sta_lta(tr.data, int(parameter['sta_len'] * df),
int(parameter['lta_len'] * df))
if max_trigger_length:
trig_args = {'max_len_delete': True}
trig_args['max_len'] = int(max_trigger_length *
df + 0.5)
if debug > 3:
plot_trigger(tr, cft, parameter['thr_on'], parameter['thr_off'])
tmp_trigs = trigger_onset(cft, float(parameter['thr_on']),
float(parameter['thr_off']),
**trig_args)
for on, off in tmp_trigs:
cft_peak = tr.data[on:off].max()
cft_std = tr.data[on:off].std()
on = tr.stats.starttime + \
float(on) / tr.stats.sampling_rate
off = tr.stats.starttime + \
float(off) / tr.stats.sampling_rate
triggers.append((on.timestamp, off.timestamp,
tr.id, cft_peak,
cft_std))
return triggers | [
"def",
"_channel_loop",
"(",
"tr",
",",
"parameters",
",",
"max_trigger_length",
"=",
"60",
",",
"despike",
"=",
"False",
",",
"debug",
"=",
"0",
")",
":",
"for",
"par",
"in",
"parameters",
":",
"if",
"par",
"[",
"'station'",
"]",
"==",
"tr",
".",
"s... | Internal loop for parellel processing.
:type tr: obspy.core.trace
:param tr: Trace to look for triggers in.
:type parameters: list
:param parameters: List of TriggerParameter class for trace.
:type max_trigger_length: float
:type despike: bool
:type debug: int
:return: trigger
:rtype: list | [
"Internal",
"loop",
"for",
"parellel",
"processing",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/trigger.py#L149-L211 | train | 203,236 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/trigger.py | TriggerParameters.write | def write(self, filename, append=True):
"""Write the parameters to a file as a human-readable series of dicts.
:type filename: str
:param filename: File to write to
:type append: bool
:param append: Append to already existing file or over-write.
"""
header = ' '.join(['# User:', getpass.getuser(),
'\n# Creation date:', str(UTCDateTime()),
'\n# EQcorrscan version:',
str(eqcorrscan.__version__),
'\n\n\n'])
if append:
f = open(filename, 'a')
else:
f = open(filename, 'w')
f.write(header)
parameters = self.__dict__
f.write(str(parameters))
f.write('\n')
f.close()
return | python | def write(self, filename, append=True):
"""Write the parameters to a file as a human-readable series of dicts.
:type filename: str
:param filename: File to write to
:type append: bool
:param append: Append to already existing file or over-write.
"""
header = ' '.join(['# User:', getpass.getuser(),
'\n# Creation date:', str(UTCDateTime()),
'\n# EQcorrscan version:',
str(eqcorrscan.__version__),
'\n\n\n'])
if append:
f = open(filename, 'a')
else:
f = open(filename, 'w')
f.write(header)
parameters = self.__dict__
f.write(str(parameters))
f.write('\n')
f.close()
return | [
"def",
"write",
"(",
"self",
",",
"filename",
",",
"append",
"=",
"True",
")",
":",
"header",
"=",
"' '",
".",
"join",
"(",
"[",
"'# User:'",
",",
"getpass",
".",
"getuser",
"(",
")",
",",
"'\\n# Creation date:'",
",",
"str",
"(",
"UTCDateTime",
"(",
... | Write the parameters to a file as a human-readable series of dicts.
:type filename: str
:param filename: File to write to
:type append: bool
:param append: Append to already existing file or over-write. | [
"Write",
"the",
"parameters",
"to",
"a",
"file",
"as",
"a",
"human",
"-",
"readable",
"series",
"of",
"dicts",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/trigger.py#L95-L117 | train | 203,237 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/libnames.py | _get_lib_name | def _get_lib_name(lib):
"""
Helper function to get an architecture and Python version specific library
filename.
"""
# append any extension suffix defined by Python for current platform
ext_suffix = sysconfig.get_config_var("EXT_SUFFIX")
# in principle "EXT_SUFFIX" is what we want.
# "SO" seems to be deprecated on newer python
# but: older python seems to have empty "EXT_SUFFIX", so we fall back
if not ext_suffix:
try:
ext_suffix = sysconfig.get_config_var("SO")
except Exception as e:
msg = ("Empty 'EXT_SUFFIX' encountered while building CDLL "
"filename and fallback to 'SO' variable failed "
"(%s)." % str(e))
warnings.warn(msg)
pass
if ext_suffix:
libname = lib + ext_suffix
return libname | python | def _get_lib_name(lib):
"""
Helper function to get an architecture and Python version specific library
filename.
"""
# append any extension suffix defined by Python for current platform
ext_suffix = sysconfig.get_config_var("EXT_SUFFIX")
# in principle "EXT_SUFFIX" is what we want.
# "SO" seems to be deprecated on newer python
# but: older python seems to have empty "EXT_SUFFIX", so we fall back
if not ext_suffix:
try:
ext_suffix = sysconfig.get_config_var("SO")
except Exception as e:
msg = ("Empty 'EXT_SUFFIX' encountered while building CDLL "
"filename and fallback to 'SO' variable failed "
"(%s)." % str(e))
warnings.warn(msg)
pass
if ext_suffix:
libname = lib + ext_suffix
return libname | [
"def",
"_get_lib_name",
"(",
"lib",
")",
":",
"# append any extension suffix defined by Python for current platform",
"ext_suffix",
"=",
"sysconfig",
".",
"get_config_var",
"(",
"\"EXT_SUFFIX\"",
")",
"# in principle \"EXT_SUFFIX\" is what we want.",
"# \"SO\" seems to be deprecated ... | Helper function to get an architecture and Python version specific library
filename. | [
"Helper",
"function",
"to",
"get",
"an",
"architecture",
"and",
"Python",
"version",
"specific",
"library",
"filename",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/libnames.py#L18-L39 | train | 203,238 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/libnames.py | _load_cdll | def _load_cdll(name):
"""
Helper function to load a shared library built during installation
with ctypes.
:type name: str
:param name: Name of the library to load (e.g. 'mseed').
:rtype: :class:`ctypes.CDLL`
"""
# our custom defined part of the extension file name
libname = _get_lib_name(name)
libdir = os.path.join(os.path.dirname(__file__), 'lib')
libpath = os.path.join(libdir, libname)
static_fftw = os.path.join(libdir, 'libfftw3-3.dll')
static_fftwf = os.path.join(libdir, 'libfftw3f-3.dll')
try:
fftw_lib = ctypes.CDLL(str(static_fftw)) # noqa: F841
fftwf_lib = ctypes.CDLL(str(static_fftwf)) # noqa: F841
except:
pass
try:
cdll = ctypes.CDLL(str(libpath))
except Exception as e:
msg = 'Could not load shared library "%s".\n\n %s' % (libname, str(e))
raise ImportError(msg)
return cdll | python | def _load_cdll(name):
"""
Helper function to load a shared library built during installation
with ctypes.
:type name: str
:param name: Name of the library to load (e.g. 'mseed').
:rtype: :class:`ctypes.CDLL`
"""
# our custom defined part of the extension file name
libname = _get_lib_name(name)
libdir = os.path.join(os.path.dirname(__file__), 'lib')
libpath = os.path.join(libdir, libname)
static_fftw = os.path.join(libdir, 'libfftw3-3.dll')
static_fftwf = os.path.join(libdir, 'libfftw3f-3.dll')
try:
fftw_lib = ctypes.CDLL(str(static_fftw)) # noqa: F841
fftwf_lib = ctypes.CDLL(str(static_fftwf)) # noqa: F841
except:
pass
try:
cdll = ctypes.CDLL(str(libpath))
except Exception as e:
msg = 'Could not load shared library "%s".\n\n %s' % (libname, str(e))
raise ImportError(msg)
return cdll | [
"def",
"_load_cdll",
"(",
"name",
")",
":",
"# our custom defined part of the extension file name",
"libname",
"=",
"_get_lib_name",
"(",
"name",
")",
"libdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
... | Helper function to load a shared library built during installation
with ctypes.
:type name: str
:param name: Name of the library to load (e.g. 'mseed').
:rtype: :class:`ctypes.CDLL` | [
"Helper",
"function",
"to",
"load",
"a",
"shared",
"library",
"built",
"during",
"installation",
"with",
"ctypes",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/libnames.py#L42-L67 | train | 203,239 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/picker.py | cross_net | def cross_net(stream, env=False, debug=0, master=False):
"""
Generate picks using a simple envelope cross-correlation.
Picks are made for each channel based on optimal moveout defined by
maximum cross-correlation with master trace. Master trace will be the
first trace in the stream if not set. Requires good inter-station
coherance.
:type stream: obspy.core.stream.Stream
:param stream: Stream to pick
:type env: bool
:param env: To compute cross-correlations on the envelope or not.
:type debug: int
:param debug: Debug level from 0-5
:type master: obspy.core.trace.Trace
:param master:
Trace to use as master, if False, will use the first trace in stream.
:returns: :class:`obspy.core.event.event.Event`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.picker import cross_net
>>> st = read()
>>> event = cross_net(st, env=True)
>>> print(event.creation_info.author)
EQcorrscan
.. warning::
This routine is not designed for accurate picking, rather it can be
used for a first-pass at picks to obtain simple locations. Based on
the waveform-envelope cross-correlation method.
"""
event = Event()
event.origins.append(Origin())
event.creation_info = CreationInfo(author='EQcorrscan',
creation_time=UTCDateTime())
event.comments.append(Comment(text='cross_net'))
samp_rate = stream[0].stats.sampling_rate
if not env:
if debug > 2:
print('Using the raw data')
st = stream.copy()
st.resample(samp_rate)
else:
st = stream.copy()
if debug > 2:
print('Computing envelope')
for tr in st:
tr.resample(samp_rate)
tr.data = envelope(tr.data)
if not master:
master = st[0]
else:
master = master
master.data = np.nan_to_num(master.data)
for i, tr in enumerate(st):
tr.data = np.nan_to_num(tr.data)
if debug > 2:
msg = ' '.join(['Comparing', tr.stats.station, tr.stats.channel,
'with the master'])
print(msg)
shift_len = int(0.3 * len(tr))
if debug > 2:
print('Shift length is set to ' + str(shift_len) + ' samples')
index, cc = xcorr(master, tr, shift_len)
wav_id = WaveformStreamID(station_code=tr.stats.station,
channel_code=tr.stats.channel,
network_code=tr.stats.network)
event.picks.append(Pick(time=tr.stats.starttime +
(index / tr.stats.sampling_rate),
waveform_id=wav_id,
phase_hint='S',
onset='emergent'))
if debug > 2:
print(event.picks[i])
event.origins[0].time = min([pick.time for pick in event.picks]) - 1
# event.origins[0].latitude = float('nan')
# event.origins[0].longitude = float('nan')
# Set arbitrary origin time
del st
return event | python | def cross_net(stream, env=False, debug=0, master=False):
"""
Generate picks using a simple envelope cross-correlation.
Picks are made for each channel based on optimal moveout defined by
maximum cross-correlation with master trace. Master trace will be the
first trace in the stream if not set. Requires good inter-station
coherance.
:type stream: obspy.core.stream.Stream
:param stream: Stream to pick
:type env: bool
:param env: To compute cross-correlations on the envelope or not.
:type debug: int
:param debug: Debug level from 0-5
:type master: obspy.core.trace.Trace
:param master:
Trace to use as master, if False, will use the first trace in stream.
:returns: :class:`obspy.core.event.event.Event`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.picker import cross_net
>>> st = read()
>>> event = cross_net(st, env=True)
>>> print(event.creation_info.author)
EQcorrscan
.. warning::
This routine is not designed for accurate picking, rather it can be
used for a first-pass at picks to obtain simple locations. Based on
the waveform-envelope cross-correlation method.
"""
event = Event()
event.origins.append(Origin())
event.creation_info = CreationInfo(author='EQcorrscan',
creation_time=UTCDateTime())
event.comments.append(Comment(text='cross_net'))
samp_rate = stream[0].stats.sampling_rate
if not env:
if debug > 2:
print('Using the raw data')
st = stream.copy()
st.resample(samp_rate)
else:
st = stream.copy()
if debug > 2:
print('Computing envelope')
for tr in st:
tr.resample(samp_rate)
tr.data = envelope(tr.data)
if not master:
master = st[0]
else:
master = master
master.data = np.nan_to_num(master.data)
for i, tr in enumerate(st):
tr.data = np.nan_to_num(tr.data)
if debug > 2:
msg = ' '.join(['Comparing', tr.stats.station, tr.stats.channel,
'with the master'])
print(msg)
shift_len = int(0.3 * len(tr))
if debug > 2:
print('Shift length is set to ' + str(shift_len) + ' samples')
index, cc = xcorr(master, tr, shift_len)
wav_id = WaveformStreamID(station_code=tr.stats.station,
channel_code=tr.stats.channel,
network_code=tr.stats.network)
event.picks.append(Pick(time=tr.stats.starttime +
(index / tr.stats.sampling_rate),
waveform_id=wav_id,
phase_hint='S',
onset='emergent'))
if debug > 2:
print(event.picks[i])
event.origins[0].time = min([pick.time for pick in event.picks]) - 1
# event.origins[0].latitude = float('nan')
# event.origins[0].longitude = float('nan')
# Set arbitrary origin time
del st
return event | [
"def",
"cross_net",
"(",
"stream",
",",
"env",
"=",
"False",
",",
"debug",
"=",
"0",
",",
"master",
"=",
"False",
")",
":",
"event",
"=",
"Event",
"(",
")",
"event",
".",
"origins",
".",
"append",
"(",
"Origin",
"(",
")",
")",
"event",
".",
"crea... | Generate picks using a simple envelope cross-correlation.
Picks are made for each channel based on optimal moveout defined by
maximum cross-correlation with master trace. Master trace will be the
first trace in the stream if not set. Requires good inter-station
coherance.
:type stream: obspy.core.stream.Stream
:param stream: Stream to pick
:type env: bool
:param env: To compute cross-correlations on the envelope or not.
:type debug: int
:param debug: Debug level from 0-5
:type master: obspy.core.trace.Trace
:param master:
Trace to use as master, if False, will use the first trace in stream.
:returns: :class:`obspy.core.event.event.Event`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.picker import cross_net
>>> st = read()
>>> event = cross_net(st, env=True)
>>> print(event.creation_info.author)
EQcorrscan
.. warning::
This routine is not designed for accurate picking, rather it can be
used for a first-pass at picks to obtain simple locations. Based on
the waveform-envelope cross-correlation method. | [
"Generate",
"picks",
"using",
"a",
"simple",
"envelope",
"cross",
"-",
"correlation",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/picker.py#L100-L183 | train | 203,240 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/clustering.py | cross_chan_coherence | def cross_chan_coherence(st1, st2, allow_shift=False, shift_len=0.2, i=0,
xcorr_func='time_domain'):
"""
Calculate cross-channel coherency.
Determine the cross-channel coherency between two streams of multichannel
seismic data.
:type st1: obspy.core.stream.Stream
:param st1: Stream one
:type st2: obspy.core.stream.Stream
:param st2: Stream two
:type allow_shift: bool
:param allow_shift:
Whether to allow the optimum alignment to be found for coherence,
defaults to `False` for strict coherence
:type shift_len: float
:param shift_len: Seconds to shift, only used if `allow_shift=True`
:type i: int
:param i: index used for parallel async processing, returned unaltered
:type xcorr_func: str, callable
:param xcorr_func:
The method for performing correlations. Accepts either a string or
callabe. See :func:`eqcorrscan.utils.correlate.register_array_xcorr`
for more details
:returns:
cross channel coherence, float - normalized by number of channels,
and i, where i is int, as input.
:rtype: tuple
"""
cccoh = 0.0
kchan = 0
array_xcorr = get_array_xcorr(xcorr_func)
for tr in st1:
tr2 = st2.select(station=tr.stats.station,
channel=tr.stats.channel)
if len(tr2) > 0 and tr.stats.sampling_rate != \
tr2[0].stats.sampling_rate:
warnings.warn('Sampling rates do not match, not using: %s.%s'
% (tr.stats.station, tr.stats.channel))
if len(tr2) > 0 and allow_shift:
index, corval = xcorr(tr, tr2[0],
int(shift_len * tr.stats.sampling_rate))
cccoh += corval
kchan += 1
elif len(tr2) > 0:
min_len = min(len(tr.data), len(tr2[0].data))
cccoh += array_xcorr(
np.array([tr.data[0:min_len]]), tr2[0].data[0:min_len],
[0])[0][0][0]
kchan += 1
if kchan:
cccoh /= kchan
return np.round(cccoh, 6), i
else:
warnings.warn('No matching channels')
return 0, i | python | def cross_chan_coherence(st1, st2, allow_shift=False, shift_len=0.2, i=0,
xcorr_func='time_domain'):
"""
Calculate cross-channel coherency.
Determine the cross-channel coherency between two streams of multichannel
seismic data.
:type st1: obspy.core.stream.Stream
:param st1: Stream one
:type st2: obspy.core.stream.Stream
:param st2: Stream two
:type allow_shift: bool
:param allow_shift:
Whether to allow the optimum alignment to be found for coherence,
defaults to `False` for strict coherence
:type shift_len: float
:param shift_len: Seconds to shift, only used if `allow_shift=True`
:type i: int
:param i: index used for parallel async processing, returned unaltered
:type xcorr_func: str, callable
:param xcorr_func:
The method for performing correlations. Accepts either a string or
callabe. See :func:`eqcorrscan.utils.correlate.register_array_xcorr`
for more details
:returns:
cross channel coherence, float - normalized by number of channels,
and i, where i is int, as input.
:rtype: tuple
"""
cccoh = 0.0
kchan = 0
array_xcorr = get_array_xcorr(xcorr_func)
for tr in st1:
tr2 = st2.select(station=tr.stats.station,
channel=tr.stats.channel)
if len(tr2) > 0 and tr.stats.sampling_rate != \
tr2[0].stats.sampling_rate:
warnings.warn('Sampling rates do not match, not using: %s.%s'
% (tr.stats.station, tr.stats.channel))
if len(tr2) > 0 and allow_shift:
index, corval = xcorr(tr, tr2[0],
int(shift_len * tr.stats.sampling_rate))
cccoh += corval
kchan += 1
elif len(tr2) > 0:
min_len = min(len(tr.data), len(tr2[0].data))
cccoh += array_xcorr(
np.array([tr.data[0:min_len]]), tr2[0].data[0:min_len],
[0])[0][0][0]
kchan += 1
if kchan:
cccoh /= kchan
return np.round(cccoh, 6), i
else:
warnings.warn('No matching channels')
return 0, i | [
"def",
"cross_chan_coherence",
"(",
"st1",
",",
"st2",
",",
"allow_shift",
"=",
"False",
",",
"shift_len",
"=",
"0.2",
",",
"i",
"=",
"0",
",",
"xcorr_func",
"=",
"'time_domain'",
")",
":",
"cccoh",
"=",
"0.0",
"kchan",
"=",
"0",
"array_xcorr",
"=",
"g... | Calculate cross-channel coherency.
Determine the cross-channel coherency between two streams of multichannel
seismic data.
:type st1: obspy.core.stream.Stream
:param st1: Stream one
:type st2: obspy.core.stream.Stream
:param st2: Stream two
:type allow_shift: bool
:param allow_shift:
Whether to allow the optimum alignment to be found for coherence,
defaults to `False` for strict coherence
:type shift_len: float
:param shift_len: Seconds to shift, only used if `allow_shift=True`
:type i: int
:param i: index used for parallel async processing, returned unaltered
:type xcorr_func: str, callable
:param xcorr_func:
The method for performing correlations. Accepts either a string or
callabe. See :func:`eqcorrscan.utils.correlate.register_array_xcorr`
for more details
:returns:
cross channel coherence, float - normalized by number of channels,
and i, where i is int, as input.
:rtype: tuple | [
"Calculate",
"cross",
"-",
"channel",
"coherency",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L33-L90 | train | 203,241 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/clustering.py | distance_matrix | def distance_matrix(stream_list, allow_shift=False, shift_len=0, cores=1):
"""
Compute distance matrix for waveforms based on cross-correlations.
Function to compute the distance matrix for all templates - will give
distance as 1-abs(cccoh), e.g. a well correlated pair of templates will
have small distances, and an equally well correlated reverse image will
have the same distance as a positively correlated image - this is an issue.
:type stream_list: list
:param stream_list:
List of the :class:`obspy.core.stream.Stream` to compute the distance
matrix for
:type allow_shift: bool
:param allow_shift: To allow templates to shift or not?
:type shift_len: float
:param shift_len: How many seconds for templates to shift
:type cores: int
:param cores: Number of cores to parallel process using, defaults to 1.
:returns: distance matrix
:rtype: :class:`numpy.ndarray`
.. warning::
Because distance is given as :math:`1-abs(coherence)`, negatively
correlated and positively correlated objects are given the same
distance.
"""
# Initialize square matrix
dist_mat = np.array([np.array([0.0] * len(stream_list))] *
len(stream_list))
for i, master in enumerate(stream_list):
# Start a parallel processing pool
pool = Pool(processes=cores)
# Parallel processing
results = [pool.apply_async(cross_chan_coherence,
args=(master, stream_list[j], allow_shift,
shift_len, j))
for j in range(len(stream_list))]
pool.close()
# Extract the results when they are done
dist_list = [p.get() for p in results]
# Close and join all the processes back to the master process
pool.join()
# Sort the results by the input j
dist_list.sort(key=lambda tup: tup[1])
# Sort the list into the dist_mat structure
for j in range(i, len(stream_list)):
if i == j:
dist_mat[i, j] = 0.0
else:
dist_mat[i, j] = 1 - dist_list[j][0]
# Reshape the distance matrix
for i in range(1, len(stream_list)):
for j in range(i):
dist_mat[i, j] = dist_mat.T[i, j]
return dist_mat | python | def distance_matrix(stream_list, allow_shift=False, shift_len=0, cores=1):
"""
Compute distance matrix for waveforms based on cross-correlations.
Function to compute the distance matrix for all templates - will give
distance as 1-abs(cccoh), e.g. a well correlated pair of templates will
have small distances, and an equally well correlated reverse image will
have the same distance as a positively correlated image - this is an issue.
:type stream_list: list
:param stream_list:
List of the :class:`obspy.core.stream.Stream` to compute the distance
matrix for
:type allow_shift: bool
:param allow_shift: To allow templates to shift or not?
:type shift_len: float
:param shift_len: How many seconds for templates to shift
:type cores: int
:param cores: Number of cores to parallel process using, defaults to 1.
:returns: distance matrix
:rtype: :class:`numpy.ndarray`
.. warning::
Because distance is given as :math:`1-abs(coherence)`, negatively
correlated and positively correlated objects are given the same
distance.
"""
# Initialize square matrix
dist_mat = np.array([np.array([0.0] * len(stream_list))] *
len(stream_list))
for i, master in enumerate(stream_list):
# Start a parallel processing pool
pool = Pool(processes=cores)
# Parallel processing
results = [pool.apply_async(cross_chan_coherence,
args=(master, stream_list[j], allow_shift,
shift_len, j))
for j in range(len(stream_list))]
pool.close()
# Extract the results when they are done
dist_list = [p.get() for p in results]
# Close and join all the processes back to the master process
pool.join()
# Sort the results by the input j
dist_list.sort(key=lambda tup: tup[1])
# Sort the list into the dist_mat structure
for j in range(i, len(stream_list)):
if i == j:
dist_mat[i, j] = 0.0
else:
dist_mat[i, j] = 1 - dist_list[j][0]
# Reshape the distance matrix
for i in range(1, len(stream_list)):
for j in range(i):
dist_mat[i, j] = dist_mat.T[i, j]
return dist_mat | [
"def",
"distance_matrix",
"(",
"stream_list",
",",
"allow_shift",
"=",
"False",
",",
"shift_len",
"=",
"0",
",",
"cores",
"=",
"1",
")",
":",
"# Initialize square matrix",
"dist_mat",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"array",
"(",
"[",
"0.0"... | Compute distance matrix for waveforms based on cross-correlations.
Function to compute the distance matrix for all templates - will give
distance as 1-abs(cccoh), e.g. a well correlated pair of templates will
have small distances, and an equally well correlated reverse image will
have the same distance as a positively correlated image - this is an issue.
:type stream_list: list
:param stream_list:
List of the :class:`obspy.core.stream.Stream` to compute the distance
matrix for
:type allow_shift: bool
:param allow_shift: To allow templates to shift or not?
:type shift_len: float
:param shift_len: How many seconds for templates to shift
:type cores: int
:param cores: Number of cores to parallel process using, defaults to 1.
:returns: distance matrix
:rtype: :class:`numpy.ndarray`
.. warning::
Because distance is given as :math:`1-abs(coherence)`, negatively
correlated and positively correlated objects are given the same
distance. | [
"Compute",
"distance",
"matrix",
"for",
"waveforms",
"based",
"on",
"cross",
"-",
"correlations",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L93-L149 | train | 203,242 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/clustering.py | cluster | def cluster(template_list, show=True, corr_thresh=0.3, allow_shift=False,
shift_len=0, save_corrmat=False, cores='all', debug=1):
"""
Cluster template waveforms based on average correlations.
Function to take a set of templates and cluster them, will return groups
as lists of streams. Clustering is done by computing the cross-channel
correlation sum of each stream in stream_list with every other stream in
the list. :mod:`scipy.cluster.hierarchy` functions are then used to
compute the complete distance matrix, where distance is 1 minus the
normalised cross-correlation sum such that larger distances are less
similar events. Groups are then created by clustering the distance matrix
at distances less than 1 - corr_thresh.
Will compute the distance matrix in parallel, using all available cores
:type template_list: list
:param template_list:
List of tuples of the template (:class:`obspy.core.stream.Stream`)
and the template id to compute clustering for
:type show: bool
:param show: plot linkage on screen if True, defaults to True
:type corr_thresh: float
:param corr_thresh: Cross-channel correlation threshold for grouping
:type allow_shift: bool
:param allow_shift:
Whether to allow the templates to shift when correlating
:type shift_len: float
:param shift_len: How many seconds to allow the templates to shift
:type save_corrmat: bool
:param save_corrmat:
If True will save the distance matrix to dist_mat.npy in the local
directory.
:type cores: int
:param cores:
number of cores to use when computing the distance matrix, defaults to
'all' which will work out how many cpus are available and hog them.
:type debug: int
:param debug:
Level of debugging from 1-5, higher is more output,
currently only level 1 implemented.
:returns:
List of groups. Each group is a list of
:class:`obspy.core.stream.Stream` making up that group.
"""
if cores == 'all':
num_cores = cpu_count()
else:
num_cores = cores
# Extract only the Streams from stream_list
stream_list = [x[0] for x in template_list]
# Compute the distance matrix
if debug >= 1:
print('Computing the distance matrix using %i cores' % num_cores)
dist_mat = distance_matrix(stream_list, allow_shift, shift_len,
cores=num_cores)
if save_corrmat:
np.save('dist_mat.npy', dist_mat)
if debug >= 1:
print('Saved the distance matrix as dist_mat.npy')
dist_vec = squareform(dist_mat)
if debug >= 1:
print('Computing linkage')
Z = linkage(dist_vec)
if show:
if debug >= 1:
print('Plotting the dendrogram')
dendrogram(Z, color_threshold=1 - corr_thresh,
distance_sort='ascending')
plt.show()
# Get the indices of the groups
if debug >= 1:
print('Clustering')
indices = fcluster(Z, t=1 - corr_thresh, criterion='distance')
# Indices start at 1...
group_ids = list(set(indices)) # Unique list of group ids
if debug >= 1:
msg = ' '.join(['Found', str(len(group_ids)), 'groups'])
print(msg)
# Convert to tuple of (group id, stream id)
indices = [(indices[i], i) for i in range(len(indices))]
# Sort by group id
indices.sort(key=lambda tup: tup[0])
groups = []
if debug >= 1:
print('Extracting and grouping')
for group_id in group_ids:
group = []
for ind in indices:
if ind[0] == group_id:
group.append(template_list[ind[1]])
elif ind[0] > group_id:
# Because we have sorted by group id, when the index is greater
# than the group_id we can break the inner loop.
# Patch applied by CJC 05/11/2015
groups.append(group)
break
# Catch the final group
groups.append(group)
return groups | python | def cluster(template_list, show=True, corr_thresh=0.3, allow_shift=False,
shift_len=0, save_corrmat=False, cores='all', debug=1):
"""
Cluster template waveforms based on average correlations.
Function to take a set of templates and cluster them, will return groups
as lists of streams. Clustering is done by computing the cross-channel
correlation sum of each stream in stream_list with every other stream in
the list. :mod:`scipy.cluster.hierarchy` functions are then used to
compute the complete distance matrix, where distance is 1 minus the
normalised cross-correlation sum such that larger distances are less
similar events. Groups are then created by clustering the distance matrix
at distances less than 1 - corr_thresh.
Will compute the distance matrix in parallel, using all available cores
:type template_list: list
:param template_list:
List of tuples of the template (:class:`obspy.core.stream.Stream`)
and the template id to compute clustering for
:type show: bool
:param show: plot linkage on screen if True, defaults to True
:type corr_thresh: float
:param corr_thresh: Cross-channel correlation threshold for grouping
:type allow_shift: bool
:param allow_shift:
Whether to allow the templates to shift when correlating
:type shift_len: float
:param shift_len: How many seconds to allow the templates to shift
:type save_corrmat: bool
:param save_corrmat:
If True will save the distance matrix to dist_mat.npy in the local
directory.
:type cores: int
:param cores:
number of cores to use when computing the distance matrix, defaults to
'all' which will work out how many cpus are available and hog them.
:type debug: int
:param debug:
Level of debugging from 1-5, higher is more output,
currently only level 1 implemented.
:returns:
List of groups. Each group is a list of
:class:`obspy.core.stream.Stream` making up that group.
"""
if cores == 'all':
num_cores = cpu_count()
else:
num_cores = cores
# Extract only the Streams from stream_list
stream_list = [x[0] for x in template_list]
# Compute the distance matrix
if debug >= 1:
print('Computing the distance matrix using %i cores' % num_cores)
dist_mat = distance_matrix(stream_list, allow_shift, shift_len,
cores=num_cores)
if save_corrmat:
np.save('dist_mat.npy', dist_mat)
if debug >= 1:
print('Saved the distance matrix as dist_mat.npy')
dist_vec = squareform(dist_mat)
if debug >= 1:
print('Computing linkage')
Z = linkage(dist_vec)
if show:
if debug >= 1:
print('Plotting the dendrogram')
dendrogram(Z, color_threshold=1 - corr_thresh,
distance_sort='ascending')
plt.show()
# Get the indices of the groups
if debug >= 1:
print('Clustering')
indices = fcluster(Z, t=1 - corr_thresh, criterion='distance')
# Indices start at 1...
group_ids = list(set(indices)) # Unique list of group ids
if debug >= 1:
msg = ' '.join(['Found', str(len(group_ids)), 'groups'])
print(msg)
# Convert to tuple of (group id, stream id)
indices = [(indices[i], i) for i in range(len(indices))]
# Sort by group id
indices.sort(key=lambda tup: tup[0])
groups = []
if debug >= 1:
print('Extracting and grouping')
for group_id in group_ids:
group = []
for ind in indices:
if ind[0] == group_id:
group.append(template_list[ind[1]])
elif ind[0] > group_id:
# Because we have sorted by group id, when the index is greater
# than the group_id we can break the inner loop.
# Patch applied by CJC 05/11/2015
groups.append(group)
break
# Catch the final group
groups.append(group)
return groups | [
"def",
"cluster",
"(",
"template_list",
",",
"show",
"=",
"True",
",",
"corr_thresh",
"=",
"0.3",
",",
"allow_shift",
"=",
"False",
",",
"shift_len",
"=",
"0",
",",
"save_corrmat",
"=",
"False",
",",
"cores",
"=",
"'all'",
",",
"debug",
"=",
"1",
")",
... | Cluster template waveforms based on average correlations.
Function to take a set of templates and cluster them, will return groups
as lists of streams. Clustering is done by computing the cross-channel
correlation sum of each stream in stream_list with every other stream in
the list. :mod:`scipy.cluster.hierarchy` functions are then used to
compute the complete distance matrix, where distance is 1 minus the
normalised cross-correlation sum such that larger distances are less
similar events. Groups are then created by clustering the distance matrix
at distances less than 1 - corr_thresh.
Will compute the distance matrix in parallel, using all available cores
:type template_list: list
:param template_list:
List of tuples of the template (:class:`obspy.core.stream.Stream`)
and the template id to compute clustering for
:type show: bool
:param show: plot linkage on screen if True, defaults to True
:type corr_thresh: float
:param corr_thresh: Cross-channel correlation threshold for grouping
:type allow_shift: bool
:param allow_shift:
Whether to allow the templates to shift when correlating
:type shift_len: float
:param shift_len: How many seconds to allow the templates to shift
:type save_corrmat: bool
:param save_corrmat:
If True will save the distance matrix to dist_mat.npy in the local
directory.
:type cores: int
:param cores:
number of cores to use when computing the distance matrix, defaults to
'all' which will work out how many cpus are available and hog them.
:type debug: int
:param debug:
Level of debugging from 1-5, higher is more output,
currently only level 1 implemented.
:returns:
List of groups. Each group is a list of
:class:`obspy.core.stream.Stream` making up that group. | [
"Cluster",
"template",
"waveforms",
"based",
"on",
"average",
"correlations",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L152-L252 | train | 203,243 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/clustering.py | SVD | def SVD(stream_list, full=False):
"""
Depreciated. Use svd.
"""
warnings.warn('Depreciated, use svd instead.')
return svd(stream_list=stream_list, full=full) | python | def SVD(stream_list, full=False):
"""
Depreciated. Use svd.
"""
warnings.warn('Depreciated, use svd instead.')
return svd(stream_list=stream_list, full=full) | [
"def",
"SVD",
"(",
"stream_list",
",",
"full",
"=",
"False",
")",
":",
"warnings",
".",
"warn",
"(",
"'Depreciated, use svd instead.'",
")",
"return",
"svd",
"(",
"stream_list",
"=",
"stream_list",
",",
"full",
"=",
"full",
")"
] | Depreciated. Use svd. | [
"Depreciated",
".",
"Use",
"svd",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L333-L338 | train | 203,244 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/clustering.py | svd | def svd(stream_list, full=False):
"""
Compute the SVD of a number of templates.
Returns the singular vectors and singular values of the templates.
:type stream_list: List of :class: obspy.Stream
:param stream_list: List of the templates to be analysed
:type full: bool
:param full: Whether to compute the full input vector matrix or not.
:return: SValues(list) for each channel, SVectors(list of ndarray), \
UVectors(list of ndarray) for each channel, \
stachans, List of String (station.channel)
.. note:: We recommend that you align the data before computing the \
SVD, e.g., the P-arrival on all templates for the same channel \
should appear at the same time in the trace. See the \
stacking.align_traces function for a way to do this.
.. note:: Uses the numpy.linalg.svd function, their U, s and V are mapped \
to UVectors, SValues and SVectors respectively. Their V (and ours) \
corresponds to V.H.
"""
# Convert templates into ndarrays for each channel
# First find all unique channels:
stachans = list(set([(tr.stats.station, tr.stats.channel)
for st in stream_list for tr in st]))
stachans.sort()
# Initialize a list for the output matrices, one matrix per-channel
svalues = []
svectors = []
uvectors = []
for stachan in stachans:
lengths = []
for st in stream_list:
tr = st.select(station=stachan[0],
channel=stachan[1])
if len(tr) > 0:
tr = tr[0]
else:
warnings.warn('Stream does not contain %s'
% '.'.join(list(stachan)))
continue
lengths.append(len(tr.data))
min_length = min(lengths)
for stream in stream_list:
chan = stream.select(station=stachan[0],
channel=stachan[1])
if chan:
if len(chan[0].data) > min_length:
if abs(len(chan[0].data) - min_length) > 0.1 * \
chan[0].stats.sampling_rate:
raise IndexError('More than 0.1 s length '
'difference, align and fix')
warnings.warn('Channels are not equal length, trimming')
chan[0].data = chan[0].data[0:min_length]
if 'chan_mat' not in locals():
chan_mat = chan[0].data
else:
chan_mat = np.vstack((chan_mat, chan[0].data))
if not len(chan_mat.shape) > 1:
warnings.warn('Matrix of traces is less than 2D for %s'
% '.'.join(list(stachan)))
continue
# Be sure to transpose chan_mat as waveforms must define columns
chan_mat = np.asarray(chan_mat)
u, s, v = np.linalg.svd(chan_mat.T, full_matrices=full)
svalues.append(s)
svectors.append(v)
uvectors.append(u)
del (chan_mat)
return uvectors, svalues, svectors, stachans | python | def svd(stream_list, full=False):
"""
Compute the SVD of a number of templates.
Returns the singular vectors and singular values of the templates.
:type stream_list: List of :class: obspy.Stream
:param stream_list: List of the templates to be analysed
:type full: bool
:param full: Whether to compute the full input vector matrix or not.
:return: SValues(list) for each channel, SVectors(list of ndarray), \
UVectors(list of ndarray) for each channel, \
stachans, List of String (station.channel)
.. note:: We recommend that you align the data before computing the \
SVD, e.g., the P-arrival on all templates for the same channel \
should appear at the same time in the trace. See the \
stacking.align_traces function for a way to do this.
.. note:: Uses the numpy.linalg.svd function, their U, s and V are mapped \
to UVectors, SValues and SVectors respectively. Their V (and ours) \
corresponds to V.H.
"""
# Convert templates into ndarrays for each channel
# First find all unique channels:
stachans = list(set([(tr.stats.station, tr.stats.channel)
for st in stream_list for tr in st]))
stachans.sort()
# Initialize a list for the output matrices, one matrix per-channel
svalues = []
svectors = []
uvectors = []
for stachan in stachans:
lengths = []
for st in stream_list:
tr = st.select(station=stachan[0],
channel=stachan[1])
if len(tr) > 0:
tr = tr[0]
else:
warnings.warn('Stream does not contain %s'
% '.'.join(list(stachan)))
continue
lengths.append(len(tr.data))
min_length = min(lengths)
for stream in stream_list:
chan = stream.select(station=stachan[0],
channel=stachan[1])
if chan:
if len(chan[0].data) > min_length:
if abs(len(chan[0].data) - min_length) > 0.1 * \
chan[0].stats.sampling_rate:
raise IndexError('More than 0.1 s length '
'difference, align and fix')
warnings.warn('Channels are not equal length, trimming')
chan[0].data = chan[0].data[0:min_length]
if 'chan_mat' not in locals():
chan_mat = chan[0].data
else:
chan_mat = np.vstack((chan_mat, chan[0].data))
if not len(chan_mat.shape) > 1:
warnings.warn('Matrix of traces is less than 2D for %s'
% '.'.join(list(stachan)))
continue
# Be sure to transpose chan_mat as waveforms must define columns
chan_mat = np.asarray(chan_mat)
u, s, v = np.linalg.svd(chan_mat.T, full_matrices=full)
svalues.append(s)
svectors.append(v)
uvectors.append(u)
del (chan_mat)
return uvectors, svalues, svectors, stachans | [
"def",
"svd",
"(",
"stream_list",
",",
"full",
"=",
"False",
")",
":",
"# Convert templates into ndarrays for each channel",
"# First find all unique channels:",
"stachans",
"=",
"list",
"(",
"set",
"(",
"[",
"(",
"tr",
".",
"stats",
".",
"station",
",",
"tr",
"... | Compute the SVD of a number of templates.
Returns the singular vectors and singular values of the templates.
:type stream_list: List of :class: obspy.Stream
:param stream_list: List of the templates to be analysed
:type full: bool
:param full: Whether to compute the full input vector matrix or not.
:return: SValues(list) for each channel, SVectors(list of ndarray), \
UVectors(list of ndarray) for each channel, \
stachans, List of String (station.channel)
.. note:: We recommend that you align the data before computing the \
SVD, e.g., the P-arrival on all templates for the same channel \
should appear at the same time in the trace. See the \
stacking.align_traces function for a way to do this.
.. note:: Uses the numpy.linalg.svd function, their U, s and V are mapped \
to UVectors, SValues and SVectors respectively. Their V (and ours) \
corresponds to V.H. | [
"Compute",
"the",
"SVD",
"of",
"a",
"number",
"of",
"templates",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L341-L413 | train | 203,245 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/clustering.py | empirical_SVD | def empirical_SVD(stream_list, linear=True):
"""
Depreciated. Use empirical_svd.
"""
warnings.warn('Depreciated, use empirical_svd instead.')
return empirical_svd(stream_list=stream_list, linear=linear) | python | def empirical_SVD(stream_list, linear=True):
"""
Depreciated. Use empirical_svd.
"""
warnings.warn('Depreciated, use empirical_svd instead.')
return empirical_svd(stream_list=stream_list, linear=linear) | [
"def",
"empirical_SVD",
"(",
"stream_list",
",",
"linear",
"=",
"True",
")",
":",
"warnings",
".",
"warn",
"(",
"'Depreciated, use empirical_svd instead.'",
")",
"return",
"empirical_svd",
"(",
"stream_list",
"=",
"stream_list",
",",
"linear",
"=",
"linear",
")"
] | Depreciated. Use empirical_svd. | [
"Depreciated",
".",
"Use",
"empirical_svd",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L416-L421 | train | 203,246 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/clustering.py | empirical_svd | def empirical_svd(stream_list, linear=True):
"""
Empirical subspace detector generation function.
Takes a list of templates and computes the stack as the first order
subspace detector, and the differential of this as the second order
subspace detector following the empirical subspace method of
`Barrett & Beroza, 2014 - SRL
<http://srl.geoscienceworld.org/content/85/3/594.extract>`_.
:type stream_list: list
:param stream_list:
list of streams to compute the subspace detectors from, where streams
are :class:`obspy.core.stream.Stream` objects.
:type linear: bool
:param linear: Set to true by default to compute the linear stack as the \
first subspace vector, False will use the phase-weighted stack as the \
first subspace vector.
:returns: list of two :class:`obspy.core.stream.Stream` s
"""
# Run a check to ensure all traces are the same length
stachans = list(set([(tr.stats.station, tr.stats.channel)
for st in stream_list for tr in st]))
for stachan in stachans:
lengths = []
for st in stream_list:
lengths.append(len(st.select(station=stachan[0],
channel=stachan[1])[0]))
min_length = min(lengths)
for st in stream_list:
tr = st.select(station=stachan[0],
channel=stachan[1])[0]
if len(tr.data) > min_length:
sr = tr.stats.sampling_rate
if abs(len(tr.data) - min_length) > (0.1 * sr):
msg = 'More than 0.1 s length difference, align and fix'
raise IndexError(msg)
msg = ' is not the same length as others, trimming the end'
warnings.warn(str(tr) + msg)
tr.data = tr.data[0:min_length]
if linear:
first_subspace = stacking.linstack(stream_list)
else:
first_subspace = stacking.PWS_stack(streams=stream_list)
second_subspace = first_subspace.copy()
for i in range(len(second_subspace)):
second_subspace[i].data = np.diff(second_subspace[i].data)
delta = second_subspace[i].stats.delta
second_subspace[i].stats.starttime += 0.5 * delta
return [first_subspace, second_subspace] | python | def empirical_svd(stream_list, linear=True):
"""
Empirical subspace detector generation function.
Takes a list of templates and computes the stack as the first order
subspace detector, and the differential of this as the second order
subspace detector following the empirical subspace method of
`Barrett & Beroza, 2014 - SRL
<http://srl.geoscienceworld.org/content/85/3/594.extract>`_.
:type stream_list: list
:param stream_list:
list of streams to compute the subspace detectors from, where streams
are :class:`obspy.core.stream.Stream` objects.
:type linear: bool
:param linear: Set to true by default to compute the linear stack as the \
first subspace vector, False will use the phase-weighted stack as the \
first subspace vector.
:returns: list of two :class:`obspy.core.stream.Stream` s
"""
# Run a check to ensure all traces are the same length
stachans = list(set([(tr.stats.station, tr.stats.channel)
for st in stream_list for tr in st]))
for stachan in stachans:
lengths = []
for st in stream_list:
lengths.append(len(st.select(station=stachan[0],
channel=stachan[1])[0]))
min_length = min(lengths)
for st in stream_list:
tr = st.select(station=stachan[0],
channel=stachan[1])[0]
if len(tr.data) > min_length:
sr = tr.stats.sampling_rate
if abs(len(tr.data) - min_length) > (0.1 * sr):
msg = 'More than 0.1 s length difference, align and fix'
raise IndexError(msg)
msg = ' is not the same length as others, trimming the end'
warnings.warn(str(tr) + msg)
tr.data = tr.data[0:min_length]
if linear:
first_subspace = stacking.linstack(stream_list)
else:
first_subspace = stacking.PWS_stack(streams=stream_list)
second_subspace = first_subspace.copy()
for i in range(len(second_subspace)):
second_subspace[i].data = np.diff(second_subspace[i].data)
delta = second_subspace[i].stats.delta
second_subspace[i].stats.starttime += 0.5 * delta
return [first_subspace, second_subspace] | [
"def",
"empirical_svd",
"(",
"stream_list",
",",
"linear",
"=",
"True",
")",
":",
"# Run a check to ensure all traces are the same length",
"stachans",
"=",
"list",
"(",
"set",
"(",
"[",
"(",
"tr",
".",
"stats",
".",
"station",
",",
"tr",
".",
"stats",
".",
... | Empirical subspace detector generation function.
Takes a list of templates and computes the stack as the first order
subspace detector, and the differential of this as the second order
subspace detector following the empirical subspace method of
`Barrett & Beroza, 2014 - SRL
<http://srl.geoscienceworld.org/content/85/3/594.extract>`_.
:type stream_list: list
:param stream_list:
list of streams to compute the subspace detectors from, where streams
are :class:`obspy.core.stream.Stream` objects.
:type linear: bool
:param linear: Set to true by default to compute the linear stack as the \
first subspace vector, False will use the phase-weighted stack as the \
first subspace vector.
:returns: list of two :class:`obspy.core.stream.Stream` s | [
"Empirical",
"subspace",
"detector",
"generation",
"function",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L424-L475 | train | 203,247 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/clustering.py | SVD_2_stream | def SVD_2_stream(uvectors, stachans, k, sampling_rate):
"""
Depreciated. Use svd_to_stream
"""
warnings.warn('Depreciated, use svd_to_stream instead.')
return svd_to_stream(uvectors=uvectors, stachans=stachans, k=k,
sampling_rate=sampling_rate) | python | def SVD_2_stream(uvectors, stachans, k, sampling_rate):
"""
Depreciated. Use svd_to_stream
"""
warnings.warn('Depreciated, use svd_to_stream instead.')
return svd_to_stream(uvectors=uvectors, stachans=stachans, k=k,
sampling_rate=sampling_rate) | [
"def",
"SVD_2_stream",
"(",
"uvectors",
",",
"stachans",
",",
"k",
",",
"sampling_rate",
")",
":",
"warnings",
".",
"warn",
"(",
"'Depreciated, use svd_to_stream instead.'",
")",
"return",
"svd_to_stream",
"(",
"uvectors",
"=",
"uvectors",
",",
"stachans",
"=",
... | Depreciated. Use svd_to_stream | [
"Depreciated",
".",
"Use",
"svd_to_stream"
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L478-L484 | train | 203,248 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/clustering.py | svd_to_stream | def svd_to_stream(uvectors, stachans, k, sampling_rate):
"""
Convert the singular vectors output by SVD to streams.
One stream will be generated for each singular vector level,
for all channels. Useful for plotting, and aiding seismologists thinking
of waveforms!
:type svectors: list
:param svectors: List of :class:`numpy.ndarray` Singular vectors
:type stachans: list
:param stachans: List of station.channel Strings
:type k: int
:param k: Number of streams to return = number of SV's to include
:type sampling_rate: float
:param sampling_rate: Sampling rate in Hz
:returns:
svstreams, List of :class:`obspy.core.stream.Stream`, with
svStreams[0] being composed of the highest rank singular vectors.
"""
svstreams = []
for i in range(k):
svstream = []
for j, stachan in enumerate(stachans):
if len(uvectors[j]) <= k:
warnings.warn('Too few traces at %s for a %02d dimensional '
'subspace. Detector streams will not include '
'this channel.' % ('.'.join(stachan[0],
stachan[1]), k))
else:
svstream.append(Trace(uvectors[j][i],
header={'station': stachan[0],
'channel': stachan[1],
'sampling_rate': sampling_rate}))
svstreams.append(Stream(svstream))
return svstreams | python | def svd_to_stream(uvectors, stachans, k, sampling_rate):
"""
Convert the singular vectors output by SVD to streams.
One stream will be generated for each singular vector level,
for all channels. Useful for plotting, and aiding seismologists thinking
of waveforms!
:type svectors: list
:param svectors: List of :class:`numpy.ndarray` Singular vectors
:type stachans: list
:param stachans: List of station.channel Strings
:type k: int
:param k: Number of streams to return = number of SV's to include
:type sampling_rate: float
:param sampling_rate: Sampling rate in Hz
:returns:
svstreams, List of :class:`obspy.core.stream.Stream`, with
svStreams[0] being composed of the highest rank singular vectors.
"""
svstreams = []
for i in range(k):
svstream = []
for j, stachan in enumerate(stachans):
if len(uvectors[j]) <= k:
warnings.warn('Too few traces at %s for a %02d dimensional '
'subspace. Detector streams will not include '
'this channel.' % ('.'.join(stachan[0],
stachan[1]), k))
else:
svstream.append(Trace(uvectors[j][i],
header={'station': stachan[0],
'channel': stachan[1],
'sampling_rate': sampling_rate}))
svstreams.append(Stream(svstream))
return svstreams | [
"def",
"svd_to_stream",
"(",
"uvectors",
",",
"stachans",
",",
"k",
",",
"sampling_rate",
")",
":",
"svstreams",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"k",
")",
":",
"svstream",
"=",
"[",
"]",
"for",
"j",
",",
"stachan",
"in",
"enumerate",
... | Convert the singular vectors output by SVD to streams.
One stream will be generated for each singular vector level,
for all channels. Useful for plotting, and aiding seismologists thinking
of waveforms!
:type svectors: list
:param svectors: List of :class:`numpy.ndarray` Singular vectors
:type stachans: list
:param stachans: List of station.channel Strings
:type k: int
:param k: Number of streams to return = number of SV's to include
:type sampling_rate: float
:param sampling_rate: Sampling rate in Hz
:returns:
svstreams, List of :class:`obspy.core.stream.Stream`, with
svStreams[0] being composed of the highest rank singular vectors. | [
"Convert",
"the",
"singular",
"vectors",
"output",
"by",
"SVD",
"to",
"streams",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L487-L523 | train | 203,249 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/clustering.py | corr_cluster | def corr_cluster(trace_list, thresh=0.9):
"""
Group traces based on correlations above threshold with the stack.
Will run twice, once with a lower threshold to remove large outliers that
would negatively affect the stack, then again with your threshold.
:type trace_list: list
:param trace_list:
List of :class:`obspy.core.stream.Trace` to compute similarity between
:type thresh: float
:param thresh: Correlation threshold between -1-1
:returns:
:class:`numpy.ndarray` of bool of whether that trace correlates well
enough (above your given threshold) with the stack.
.. note::
We recommend that you align the data before computing the clustering,
e.g., the P-arrival on all templates for the same channel should
appear at the same time in the trace. See the
:func:`eqcorrscan.utils.stacking.align_traces` function for a way to do
this.
"""
stack = stacking.linstack([Stream(tr) for tr in trace_list])[0]
output = np.array([False] * len(trace_list))
group1 = []
array_xcorr = get_array_xcorr()
for i, tr in enumerate(trace_list):
if array_xcorr(
np.array([tr.data]), stack.data, [0])[0][0][0] > 0.6:
output[i] = True
group1.append(tr)
if not group1:
warnings.warn('Nothing made it past the first 0.6 threshold')
return output
stack = stacking.linstack([Stream(tr) for tr in group1])[0]
group2 = []
for i, tr in enumerate(trace_list):
if array_xcorr(
np.array([tr.data]), stack.data, [0])[0][0][0] > thresh:
group2.append(tr)
output[i] = True
else:
output[i] = False
return output | python | def corr_cluster(trace_list, thresh=0.9):
"""
Group traces based on correlations above threshold with the stack.
Will run twice, once with a lower threshold to remove large outliers that
would negatively affect the stack, then again with your threshold.
:type trace_list: list
:param trace_list:
List of :class:`obspy.core.stream.Trace` to compute similarity between
:type thresh: float
:param thresh: Correlation threshold between -1-1
:returns:
:class:`numpy.ndarray` of bool of whether that trace correlates well
enough (above your given threshold) with the stack.
.. note::
We recommend that you align the data before computing the clustering,
e.g., the P-arrival on all templates for the same channel should
appear at the same time in the trace. See the
:func:`eqcorrscan.utils.stacking.align_traces` function for a way to do
this.
"""
stack = stacking.linstack([Stream(tr) for tr in trace_list])[0]
output = np.array([False] * len(trace_list))
group1 = []
array_xcorr = get_array_xcorr()
for i, tr in enumerate(trace_list):
if array_xcorr(
np.array([tr.data]), stack.data, [0])[0][0][0] > 0.6:
output[i] = True
group1.append(tr)
if not group1:
warnings.warn('Nothing made it past the first 0.6 threshold')
return output
stack = stacking.linstack([Stream(tr) for tr in group1])[0]
group2 = []
for i, tr in enumerate(trace_list):
if array_xcorr(
np.array([tr.data]), stack.data, [0])[0][0][0] > thresh:
group2.append(tr)
output[i] = True
else:
output[i] = False
return output | [
"def",
"corr_cluster",
"(",
"trace_list",
",",
"thresh",
"=",
"0.9",
")",
":",
"stack",
"=",
"stacking",
".",
"linstack",
"(",
"[",
"Stream",
"(",
"tr",
")",
"for",
"tr",
"in",
"trace_list",
"]",
")",
"[",
"0",
"]",
"output",
"=",
"np",
".",
"array... | Group traces based on correlations above threshold with the stack.
Will run twice, once with a lower threshold to remove large outliers that
would negatively affect the stack, then again with your threshold.
:type trace_list: list
:param trace_list:
List of :class:`obspy.core.stream.Trace` to compute similarity between
:type thresh: float
:param thresh: Correlation threshold between -1-1
:returns:
:class:`numpy.ndarray` of bool of whether that trace correlates well
enough (above your given threshold) with the stack.
.. note::
We recommend that you align the data before computing the clustering,
e.g., the P-arrival on all templates for the same channel should
appear at the same time in the trace. See the
:func:`eqcorrscan.utils.stacking.align_traces` function for a way to do
this. | [
"Group",
"traces",
"based",
"on",
"correlations",
"above",
"threshold",
"with",
"the",
"stack",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L526-L571 | train | 203,250 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/clustering.py | dist_mat_km | def dist_mat_km(catalog):
"""
Compute the distance matrix for all a catalog using epicentral separation.
Will give physical distance in kilometers.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog for which to compute the distance matrix
:returns: distance matrix
:rtype: :class:`numpy.ndarray`
"""
# Initialize square matrix
dist_mat = np.array([np.array([0.0] * len(catalog))] *
len(catalog))
# Calculate distance vector for each event
for i, master in enumerate(catalog):
mast_list = []
if master.preferred_origin():
master_ori = master.preferred_origin()
else:
master_ori = master.origins[-1]
master_tup = (master_ori.latitude,
master_ori.longitude,
master_ori.depth // 1000)
for slave in catalog:
if slave.preferred_origin():
slave_ori = slave.preferred_origin()
else:
slave_ori = slave.origins[-1]
slave_tup = (slave_ori.latitude,
slave_ori.longitude,
slave_ori.depth // 1000)
mast_list.append(dist_calc(master_tup, slave_tup))
# Sort the list into the dist_mat structure
for j in range(i, len(catalog)):
dist_mat[i, j] = mast_list[j]
# Reshape the distance matrix
for i in range(1, len(catalog)):
for j in range(i):
dist_mat[i, j] = dist_mat.T[i, j]
return dist_mat | python | def dist_mat_km(catalog):
"""
Compute the distance matrix for all a catalog using epicentral separation.
Will give physical distance in kilometers.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog for which to compute the distance matrix
:returns: distance matrix
:rtype: :class:`numpy.ndarray`
"""
# Initialize square matrix
dist_mat = np.array([np.array([0.0] * len(catalog))] *
len(catalog))
# Calculate distance vector for each event
for i, master in enumerate(catalog):
mast_list = []
if master.preferred_origin():
master_ori = master.preferred_origin()
else:
master_ori = master.origins[-1]
master_tup = (master_ori.latitude,
master_ori.longitude,
master_ori.depth // 1000)
for slave in catalog:
if slave.preferred_origin():
slave_ori = slave.preferred_origin()
else:
slave_ori = slave.origins[-1]
slave_tup = (slave_ori.latitude,
slave_ori.longitude,
slave_ori.depth // 1000)
mast_list.append(dist_calc(master_tup, slave_tup))
# Sort the list into the dist_mat structure
for j in range(i, len(catalog)):
dist_mat[i, j] = mast_list[j]
# Reshape the distance matrix
for i in range(1, len(catalog)):
for j in range(i):
dist_mat[i, j] = dist_mat.T[i, j]
return dist_mat | [
"def",
"dist_mat_km",
"(",
"catalog",
")",
":",
"# Initialize square matrix",
"dist_mat",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"array",
"(",
"[",
"0.0",
"]",
"*",
"len",
"(",
"catalog",
")",
")",
"]",
"*",
"len",
"(",
"catalog",
")",
")",
... | Compute the distance matrix for all a catalog using epicentral separation.
Will give physical distance in kilometers.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog for which to compute the distance matrix
:returns: distance matrix
:rtype: :class:`numpy.ndarray` | [
"Compute",
"the",
"distance",
"matrix",
"for",
"all",
"a",
"catalog",
"using",
"epicentral",
"separation",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L796-L837 | train | 203,251 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/clustering.py | space_cluster | def space_cluster(catalog, d_thresh, show=True):
"""
Cluster a catalog by distance only.
Will compute the matrix of physical distances between events and utilize
the :mod:`scipy.clustering.hierarchy` module to perform the clustering.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog of events to clustered
:type d_thresh: float
:param d_thresh: Maximum inter-event distance threshold
:returns: list of :class:`obspy.core.event.Catalog` objects
:rtype: list
>>> from eqcorrscan.utils.clustering import space_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("NCEDC")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=2)
>>> groups = space_cluster(catalog=cat, d_thresh=2, show=False)
>>> from eqcorrscan.utils.clustering import space_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("https://earthquake.usgs.gov")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=6)
>>> groups = space_cluster(catalog=cat, d_thresh=1000, show=False)
"""
# Compute the distance matrix and linkage
dist_mat = dist_mat_km(catalog)
dist_vec = squareform(dist_mat)
Z = linkage(dist_vec, method='average')
# Cluster the linkage using the given threshold as the cutoff
indices = fcluster(Z, t=d_thresh, criterion='distance')
group_ids = list(set(indices))
indices = [(indices[i], i) for i in range(len(indices))]
if show:
# Plot the dendrogram...if it's not way too huge
dendrogram(Z, color_threshold=d_thresh,
distance_sort='ascending')
plt.show()
# Sort by group id
indices.sort(key=lambda tup: tup[0])
groups = []
for group_id in group_ids:
group = Catalog()
for ind in indices:
if ind[0] == group_id:
group.append(catalog[ind[1]])
elif ind[0] > group_id:
# Because we have sorted by group id, when the index is greater
# than the group_id we can break the inner loop.
# Patch applied by CJC 05/11/2015
groups.append(group)
break
groups.append(group)
return groups | python | def space_cluster(catalog, d_thresh, show=True):
"""
Cluster a catalog by distance only.
Will compute the matrix of physical distances between events and utilize
the :mod:`scipy.clustering.hierarchy` module to perform the clustering.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog of events to clustered
:type d_thresh: float
:param d_thresh: Maximum inter-event distance threshold
:returns: list of :class:`obspy.core.event.Catalog` objects
:rtype: list
>>> from eqcorrscan.utils.clustering import space_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("NCEDC")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=2)
>>> groups = space_cluster(catalog=cat, d_thresh=2, show=False)
>>> from eqcorrscan.utils.clustering import space_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("https://earthquake.usgs.gov")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=6)
>>> groups = space_cluster(catalog=cat, d_thresh=1000, show=False)
"""
# Compute the distance matrix and linkage
dist_mat = dist_mat_km(catalog)
dist_vec = squareform(dist_mat)
Z = linkage(dist_vec, method='average')
# Cluster the linkage using the given threshold as the cutoff
indices = fcluster(Z, t=d_thresh, criterion='distance')
group_ids = list(set(indices))
indices = [(indices[i], i) for i in range(len(indices))]
if show:
# Plot the dendrogram...if it's not way too huge
dendrogram(Z, color_threshold=d_thresh,
distance_sort='ascending')
plt.show()
# Sort by group id
indices.sort(key=lambda tup: tup[0])
groups = []
for group_id in group_ids:
group = Catalog()
for ind in indices:
if ind[0] == group_id:
group.append(catalog[ind[1]])
elif ind[0] > group_id:
# Because we have sorted by group id, when the index is greater
# than the group_id we can break the inner loop.
# Patch applied by CJC 05/11/2015
groups.append(group)
break
groups.append(group)
return groups | [
"def",
"space_cluster",
"(",
"catalog",
",",
"d_thresh",
",",
"show",
"=",
"True",
")",
":",
"# Compute the distance matrix and linkage",
"dist_mat",
"=",
"dist_mat_km",
"(",
"catalog",
")",
"dist_vec",
"=",
"squareform",
"(",
"dist_mat",
")",
"Z",
"=",
"linkage... | Cluster a catalog by distance only.
Will compute the matrix of physical distances between events and utilize
the :mod:`scipy.clustering.hierarchy` module to perform the clustering.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog of events to clustered
:type d_thresh: float
:param d_thresh: Maximum inter-event distance threshold
:returns: list of :class:`obspy.core.event.Catalog` objects
:rtype: list
>>> from eqcorrscan.utils.clustering import space_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("NCEDC")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=2)
>>> groups = space_cluster(catalog=cat, d_thresh=2, show=False)
>>> from eqcorrscan.utils.clustering import space_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("https://earthquake.usgs.gov")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=6)
>>> groups = space_cluster(catalog=cat, d_thresh=1000, show=False) | [
"Cluster",
"a",
"catalog",
"by",
"distance",
"only",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L840-L906 | train | 203,252 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/clustering.py | space_time_cluster | def space_time_cluster(catalog, t_thresh, d_thresh):
"""
Cluster detections in space and time.
Use to separate repeaters from other events. Clusters by distance
first, then removes events in those groups that are at different times.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog of events to clustered
:type t_thresh: float
:param t_thresh: Maximum inter-event time threshold in seconds
:type d_thresh: float
:param d_thresh: Maximum inter-event distance in km
:returns: list of :class:`obspy.core.event.Catalog` objects
:rtype: list
>>> from eqcorrscan.utils.clustering import space_time_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("https://earthquake.usgs.gov")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=6)
>>> groups = space_time_cluster(catalog=cat, t_thresh=86400, d_thresh=1000)
"""
initial_spatial_groups = space_cluster(catalog=catalog, d_thresh=d_thresh,
show=False)
# Need initial_spatial_groups to be lists at the moment
initial_spatial_lists = []
for group in initial_spatial_groups:
initial_spatial_lists.append(list(group))
# Check within these groups and throw them out if they are not close in
# time.
groups = []
for group in initial_spatial_lists:
for master in group:
for event in group:
if abs(event.preferred_origin().time -
master.preferred_origin().time) > t_thresh:
# If greater then just put event in on it's own
groups.append([event])
group.remove(event)
groups.append(group)
return [Catalog(group) for group in groups] | python | def space_time_cluster(catalog, t_thresh, d_thresh):
"""
Cluster detections in space and time.
Use to separate repeaters from other events. Clusters by distance
first, then removes events in those groups that are at different times.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog of events to clustered
:type t_thresh: float
:param t_thresh: Maximum inter-event time threshold in seconds
:type d_thresh: float
:param d_thresh: Maximum inter-event distance in km
:returns: list of :class:`obspy.core.event.Catalog` objects
:rtype: list
>>> from eqcorrscan.utils.clustering import space_time_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("https://earthquake.usgs.gov")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=6)
>>> groups = space_time_cluster(catalog=cat, t_thresh=86400, d_thresh=1000)
"""
initial_spatial_groups = space_cluster(catalog=catalog, d_thresh=d_thresh,
show=False)
# Need initial_spatial_groups to be lists at the moment
initial_spatial_lists = []
for group in initial_spatial_groups:
initial_spatial_lists.append(list(group))
# Check within these groups and throw them out if they are not close in
# time.
groups = []
for group in initial_spatial_lists:
for master in group:
for event in group:
if abs(event.preferred_origin().time -
master.preferred_origin().time) > t_thresh:
# If greater then just put event in on it's own
groups.append([event])
group.remove(event)
groups.append(group)
return [Catalog(group) for group in groups] | [
"def",
"space_time_cluster",
"(",
"catalog",
",",
"t_thresh",
",",
"d_thresh",
")",
":",
"initial_spatial_groups",
"=",
"space_cluster",
"(",
"catalog",
"=",
"catalog",
",",
"d_thresh",
"=",
"d_thresh",
",",
"show",
"=",
"False",
")",
"# Need initial_spatial_group... | Cluster detections in space and time.
Use to separate repeaters from other events. Clusters by distance
first, then removes events in those groups that are at different times.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog of events to clustered
:type t_thresh: float
:param t_thresh: Maximum inter-event time threshold in seconds
:type d_thresh: float
:param d_thresh: Maximum inter-event distance in km
:returns: list of :class:`obspy.core.event.Catalog` objects
:rtype: list
>>> from eqcorrscan.utils.clustering import space_time_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("https://earthquake.usgs.gov")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=6)
>>> groups = space_time_cluster(catalog=cat, t_thresh=86400, d_thresh=1000) | [
"Cluster",
"detections",
"in",
"space",
"and",
"time",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L909-L954 | train | 203,253 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/clustering.py | re_thresh_csv | def re_thresh_csv(path, old_thresh, new_thresh, chan_thresh):
"""
Remove detections by changing the threshold.
Can only be done to remove detection by increasing threshold,
threshold lowering will have no effect.
:type path: str
:param path: Path to the .csv detection file
:type old_thresh: float
:param old_thresh: Old threshold MAD multiplier
:type new_thresh: float
:param new_thresh: New threshold MAD multiplier
:type chan_thresh: int
:param chan_thresh: Minimum number of channels for a detection
:returns: List of detections
:rtype: list
.. rubric:: Example
>>> from eqcorrscan.utils.clustering import re_thresh_csv
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> det_file = os.path.join(TEST_PATH, 'expected_tutorial_detections.txt')
>>> detections = re_thresh_csv(path=det_file, old_thresh=8, new_thresh=10,
... chan_thresh=3)
Read in 22 detections
Left with 17 detections
.. Note::
This is a legacy function, and will read detections from all versions.
.. Warning:: Only works if thresholding was done by MAD.
"""
from eqcorrscan.core.match_filter import read_detections
warnings.warn('Legacy function, please use '
'eqcorrscan.core.match_filter.Party.rethreshold.')
old_detections = read_detections(path)
old_thresh = float(old_thresh)
new_thresh = float(new_thresh)
# Be nice, ensure that the thresholds are float
detections = []
detections_in = 0
detections_out = 0
for detection in old_detections:
detections_in += 1
con1 = (new_thresh / old_thresh) * detection.threshold
con2 = detection.no_chans >= chan_thresh
requirted_thresh = (new_thresh / old_thresh) * detection.threshold
con3 = abs(detection.detect_val) >= requirted_thresh
if all([con1, con2, con3]):
detections_out += 1
detections.append(detection)
print('Read in %i detections' % detections_in)
print('Left with %i detections' % detections_out)
return detections | python | def re_thresh_csv(path, old_thresh, new_thresh, chan_thresh):
"""
Remove detections by changing the threshold.
Can only be done to remove detection by increasing threshold,
threshold lowering will have no effect.
:type path: str
:param path: Path to the .csv detection file
:type old_thresh: float
:param old_thresh: Old threshold MAD multiplier
:type new_thresh: float
:param new_thresh: New threshold MAD multiplier
:type chan_thresh: int
:param chan_thresh: Minimum number of channels for a detection
:returns: List of detections
:rtype: list
.. rubric:: Example
>>> from eqcorrscan.utils.clustering import re_thresh_csv
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> det_file = os.path.join(TEST_PATH, 'expected_tutorial_detections.txt')
>>> detections = re_thresh_csv(path=det_file, old_thresh=8, new_thresh=10,
... chan_thresh=3)
Read in 22 detections
Left with 17 detections
.. Note::
This is a legacy function, and will read detections from all versions.
.. Warning:: Only works if thresholding was done by MAD.
"""
from eqcorrscan.core.match_filter import read_detections
warnings.warn('Legacy function, please use '
'eqcorrscan.core.match_filter.Party.rethreshold.')
old_detections = read_detections(path)
old_thresh = float(old_thresh)
new_thresh = float(new_thresh)
# Be nice, ensure that the thresholds are float
detections = []
detections_in = 0
detections_out = 0
for detection in old_detections:
detections_in += 1
con1 = (new_thresh / old_thresh) * detection.threshold
con2 = detection.no_chans >= chan_thresh
requirted_thresh = (new_thresh / old_thresh) * detection.threshold
con3 = abs(detection.detect_val) >= requirted_thresh
if all([con1, con2, con3]):
detections_out += 1
detections.append(detection)
print('Read in %i detections' % detections_in)
print('Left with %i detections' % detections_out)
return detections | [
"def",
"re_thresh_csv",
"(",
"path",
",",
"old_thresh",
",",
"new_thresh",
",",
"chan_thresh",
")",
":",
"from",
"eqcorrscan",
".",
"core",
".",
"match_filter",
"import",
"read_detections",
"warnings",
".",
"warn",
"(",
"'Legacy function, please use '",
"'eqcorrscan... | Remove detections by changing the threshold.
Can only be done to remove detection by increasing threshold,
threshold lowering will have no effect.
:type path: str
:param path: Path to the .csv detection file
:type old_thresh: float
:param old_thresh: Old threshold MAD multiplier
:type new_thresh: float
:param new_thresh: New threshold MAD multiplier
:type chan_thresh: int
:param chan_thresh: Minimum number of channels for a detection
:returns: List of detections
:rtype: list
.. rubric:: Example
>>> from eqcorrscan.utils.clustering import re_thresh_csv
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> det_file = os.path.join(TEST_PATH, 'expected_tutorial_detections.txt')
>>> detections = re_thresh_csv(path=det_file, old_thresh=8, new_thresh=10,
... chan_thresh=3)
Read in 22 detections
Left with 17 detections
.. Note::
This is a legacy function, and will read detections from all versions.
.. Warning:: Only works if thresholding was done by MAD. | [
"Remove",
"detections",
"by",
"changing",
"the",
"threshold",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L957-L1015 | train | 203,254 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/correlate.py | pool_boy | def pool_boy(Pool, traces, **kwargs):
"""
A context manager for handling the setup and cleanup of a pool object.
:param Pool: any Class (not instance) that implements the multiprocessing
Pool interface
:param traces: The number of traces to process
:type traces: int
"""
# All parallel processing happens on a per-trace basis, we shouldn't create
# more workers than there are traces
n_cores = kwargs.get('cores', cpu_count())
if n_cores is None:
n_cores = cpu_count()
if n_cores > traces:
n_cores = traces
pool = Pool(n_cores)
yield pool
pool.close()
pool.join() | python | def pool_boy(Pool, traces, **kwargs):
"""
A context manager for handling the setup and cleanup of a pool object.
:param Pool: any Class (not instance) that implements the multiprocessing
Pool interface
:param traces: The number of traces to process
:type traces: int
"""
# All parallel processing happens on a per-trace basis, we shouldn't create
# more workers than there are traces
n_cores = kwargs.get('cores', cpu_count())
if n_cores is None:
n_cores = cpu_count()
if n_cores > traces:
n_cores = traces
pool = Pool(n_cores)
yield pool
pool.close()
pool.join() | [
"def",
"pool_boy",
"(",
"Pool",
",",
"traces",
",",
"*",
"*",
"kwargs",
")",
":",
"# All parallel processing happens on a per-trace basis, we shouldn't create",
"# more workers than there are traces",
"n_cores",
"=",
"kwargs",
".",
"get",
"(",
"'cores'",
",",
"cpu_count",... | A context manager for handling the setup and cleanup of a pool object.
:param Pool: any Class (not instance) that implements the multiprocessing
Pool interface
:param traces: The number of traces to process
:type traces: int | [
"A",
"context",
"manager",
"for",
"handling",
"the",
"setup",
"and",
"cleanup",
"of",
"a",
"pool",
"object",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/correlate.py#L154-L173 | train | 203,255 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/correlate.py | _general_multithread | def _general_multithread(func):
""" return the general multithreading function using func """
def multithread(templates, stream, *args, **kwargs):
with pool_boy(ThreadPool, len(stream), **kwargs) as pool:
return _pool_normxcorr(templates, stream, pool=pool, func=func)
return multithread | python | def _general_multithread(func):
""" return the general multithreading function using func """
def multithread(templates, stream, *args, **kwargs):
with pool_boy(ThreadPool, len(stream), **kwargs) as pool:
return _pool_normxcorr(templates, stream, pool=pool, func=func)
return multithread | [
"def",
"_general_multithread",
"(",
"func",
")",
":",
"def",
"multithread",
"(",
"templates",
",",
"stream",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"pool_boy",
"(",
"ThreadPool",
",",
"len",
"(",
"stream",
")",
",",
"*",
"*",
"k... | return the general multithreading function using func | [
"return",
"the",
"general",
"multithreading",
"function",
"using",
"func"
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/correlate.py#L200-L207 | train | 203,256 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/correlate.py | register_array_xcorr | def register_array_xcorr(name, func=None, is_default=False):
"""
Decorator for registering correlation functions.
Each function must have the same interface as numpy_normxcorr, which is
*f(templates, stream, pads, *args, **kwargs)* any number of specific kwargs
can be used.
Register_normxcorr can be used as a decorator (with or without arguments)
or as a callable.
:param name: The name of the function for quick access, or the callable
that will be wrapped when used as a decorator.
:type name: str, callable
:param func: The function to register
:type func: callable, optional
:param is_default: True if this function should be marked as default
normxcorr
:type is_default: bool
:return: callable
"""
valid_methods = set(list(XCOR_ARRAY_METHODS) + list(XCORR_STREAM_METHODS))
cache = {}
def register(register_str):
"""
Register a function as an implementation.
:param register_str: The registration designation
:type register_str: str
"""
if register_str not in valid_methods:
msg = 'register_name must be in %s' % valid_methods
raise ValueError(msg)
def _register(func):
cache[register_str] = func
setattr(cache['func'], register_str, func)
return func
return _register
def wrapper(func, func_name=None):
# register the functions in the XCOR
fname = func_name or name.__name__ if callable(name) else str(name)
XCOR_FUNCS[fname] = func
# if is_default: # set function as default
# XCOR_FUNCS['default'] = func
# attach some attrs, this is a bit of a hack to avoid pickle problems
func.register = register
cache['func'] = func
func.multithread = _general_multithread(func)
func.multiprocess = _general_multiprocess(func)
func.concurrent = _general_multithread(func)
func.stream_xcorr = _general_serial(func)
func.array_xcorr = func
func.registered = True
if is_default: # set function as default
XCOR_FUNCS['default'] = copy.deepcopy(func)
return func
# used as a decorator
if callable(name):
return wrapper(name)
# used as a normal function (called and passed a function)
if callable(func):
return wrapper(func, func_name=name)
# called, then used as a decorator
return wrapper | python | def register_array_xcorr(name, func=None, is_default=False):
"""
Decorator for registering correlation functions.
Each function must have the same interface as numpy_normxcorr, which is
*f(templates, stream, pads, *args, **kwargs)* any number of specific kwargs
can be used.
Register_normxcorr can be used as a decorator (with or without arguments)
or as a callable.
:param name: The name of the function for quick access, or the callable
that will be wrapped when used as a decorator.
:type name: str, callable
:param func: The function to register
:type func: callable, optional
:param is_default: True if this function should be marked as default
normxcorr
:type is_default: bool
:return: callable
"""
valid_methods = set(list(XCOR_ARRAY_METHODS) + list(XCORR_STREAM_METHODS))
cache = {}
def register(register_str):
"""
Register a function as an implementation.
:param register_str: The registration designation
:type register_str: str
"""
if register_str not in valid_methods:
msg = 'register_name must be in %s' % valid_methods
raise ValueError(msg)
def _register(func):
cache[register_str] = func
setattr(cache['func'], register_str, func)
return func
return _register
def wrapper(func, func_name=None):
# register the functions in the XCOR
fname = func_name or name.__name__ if callable(name) else str(name)
XCOR_FUNCS[fname] = func
# if is_default: # set function as default
# XCOR_FUNCS['default'] = func
# attach some attrs, this is a bit of a hack to avoid pickle problems
func.register = register
cache['func'] = func
func.multithread = _general_multithread(func)
func.multiprocess = _general_multiprocess(func)
func.concurrent = _general_multithread(func)
func.stream_xcorr = _general_serial(func)
func.array_xcorr = func
func.registered = True
if is_default: # set function as default
XCOR_FUNCS['default'] = copy.deepcopy(func)
return func
# used as a decorator
if callable(name):
return wrapper(name)
# used as a normal function (called and passed a function)
if callable(func):
return wrapper(func, func_name=name)
# called, then used as a decorator
return wrapper | [
"def",
"register_array_xcorr",
"(",
"name",
",",
"func",
"=",
"None",
",",
"is_default",
"=",
"False",
")",
":",
"valid_methods",
"=",
"set",
"(",
"list",
"(",
"XCOR_ARRAY_METHODS",
")",
"+",
"list",
"(",
"XCORR_STREAM_METHODS",
")",
")",
"cache",
"=",
"{"... | Decorator for registering correlation functions.
Each function must have the same interface as numpy_normxcorr, which is
*f(templates, stream, pads, *args, **kwargs)* any number of specific kwargs
can be used.
Register_normxcorr can be used as a decorator (with or without arguments)
or as a callable.
:param name: The name of the function for quick access, or the callable
that will be wrapped when used as a decorator.
:type name: str, callable
:param func: The function to register
:type func: callable, optional
:param is_default: True if this function should be marked as default
normxcorr
:type is_default: bool
:return: callable | [
"Decorator",
"for",
"registering",
"correlation",
"functions",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/correlate.py#L241-L312 | train | 203,257 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/correlate.py | _get_registerd_func | def _get_registerd_func(name_or_func):
""" get a xcorr function from a str or callable. """
# get the function or register callable
if callable(name_or_func):
func = register_array_xcorr(name_or_func)
else:
func = XCOR_FUNCS[name_or_func or 'default']
assert callable(func), 'func is not callable'
# ensure func has the added methods
if not hasattr(func, 'registered'):
func = register_array_xcorr(func)
return func | python | def _get_registerd_func(name_or_func):
""" get a xcorr function from a str or callable. """
# get the function or register callable
if callable(name_or_func):
func = register_array_xcorr(name_or_func)
else:
func = XCOR_FUNCS[name_or_func or 'default']
assert callable(func), 'func is not callable'
# ensure func has the added methods
if not hasattr(func, 'registered'):
func = register_array_xcorr(func)
return func | [
"def",
"_get_registerd_func",
"(",
"name_or_func",
")",
":",
"# get the function or register callable",
"if",
"callable",
"(",
"name_or_func",
")",
":",
"func",
"=",
"register_array_xcorr",
"(",
"name_or_func",
")",
"else",
":",
"func",
"=",
"XCOR_FUNCS",
"[",
"name... | get a xcorr function from a str or callable. | [
"get",
"a",
"xcorr",
"function",
"from",
"a",
"str",
"or",
"callable",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/correlate.py#L318-L329 | train | 203,258 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/correlate.py | numpy_normxcorr | def numpy_normxcorr(templates, stream, pads, *args, **kwargs):
"""
Compute the normalized cross-correlation using numpy and bottleneck.
:param templates: 2D Array of templates
:type templates: np.ndarray
:param stream: 1D array of continuous data
:type stream: np.ndarray
:param pads: List of ints of pad lengths in the same order as templates
:type pads: list
:return: np.ndarray of cross-correlations
:return: np.ndarray channels used
"""
import bottleneck
from scipy.signal.signaltools import _centered
# Generate a template mask
used_chans = ~np.isnan(templates).any(axis=1)
# Currently have to use float64 as bottleneck runs into issues with other
# types: https://github.com/kwgoodman/bottleneck/issues/164
stream = stream.astype(np.float64)
templates = templates.astype(np.float64)
template_length = templates.shape[1]
stream_length = len(stream)
fftshape = next_fast_len(template_length + stream_length - 1)
# Set up normalizers
stream_mean_array = bottleneck.move_mean(
stream, template_length)[template_length - 1:]
stream_std_array = bottleneck.move_std(
stream, template_length)[template_length - 1:]
# because stream_std_array is in denominator or res, nan all 0s
stream_std_array[stream_std_array == 0] = np.nan
# Normalize and flip the templates
norm = ((templates - templates.mean(axis=-1, keepdims=True)) / (
templates.std(axis=-1, keepdims=True) * template_length))
norm_sum = norm.sum(axis=-1, keepdims=True)
stream_fft = np.fft.rfft(stream, fftshape)
template_fft = np.fft.rfft(np.flip(norm, axis=-1), fftshape, axis=-1)
res = np.fft.irfft(template_fft * stream_fft,
fftshape)[:, 0:template_length + stream_length - 1]
res = ((_centered(res, stream_length - template_length + 1)) -
norm_sum * stream_mean_array) / stream_std_array
res[np.isnan(res)] = 0.0
# res[np.isinf(res)] = 0.0
for i, pad in enumerate(pads): # range(len(pads)):
res[i] = np.append(res[i], np.zeros(pad))[pad:]
return res.astype(np.float32), used_chans | python | def numpy_normxcorr(templates, stream, pads, *args, **kwargs):
"""
Compute the normalized cross-correlation using numpy and bottleneck.
:param templates: 2D Array of templates
:type templates: np.ndarray
:param stream: 1D array of continuous data
:type stream: np.ndarray
:param pads: List of ints of pad lengths in the same order as templates
:type pads: list
:return: np.ndarray of cross-correlations
:return: np.ndarray channels used
"""
import bottleneck
from scipy.signal.signaltools import _centered
# Generate a template mask
used_chans = ~np.isnan(templates).any(axis=1)
# Currently have to use float64 as bottleneck runs into issues with other
# types: https://github.com/kwgoodman/bottleneck/issues/164
stream = stream.astype(np.float64)
templates = templates.astype(np.float64)
template_length = templates.shape[1]
stream_length = len(stream)
fftshape = next_fast_len(template_length + stream_length - 1)
# Set up normalizers
stream_mean_array = bottleneck.move_mean(
stream, template_length)[template_length - 1:]
stream_std_array = bottleneck.move_std(
stream, template_length)[template_length - 1:]
# because stream_std_array is in denominator or res, nan all 0s
stream_std_array[stream_std_array == 0] = np.nan
# Normalize and flip the templates
norm = ((templates - templates.mean(axis=-1, keepdims=True)) / (
templates.std(axis=-1, keepdims=True) * template_length))
norm_sum = norm.sum(axis=-1, keepdims=True)
stream_fft = np.fft.rfft(stream, fftshape)
template_fft = np.fft.rfft(np.flip(norm, axis=-1), fftshape, axis=-1)
res = np.fft.irfft(template_fft * stream_fft,
fftshape)[:, 0:template_length + stream_length - 1]
res = ((_centered(res, stream_length - template_length + 1)) -
norm_sum * stream_mean_array) / stream_std_array
res[np.isnan(res)] = 0.0
# res[np.isinf(res)] = 0.0
for i, pad in enumerate(pads): # range(len(pads)):
res[i] = np.append(res[i], np.zeros(pad))[pad:]
return res.astype(np.float32), used_chans | [
"def",
"numpy_normxcorr",
"(",
"templates",
",",
"stream",
",",
"pads",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"bottleneck",
"from",
"scipy",
".",
"signal",
".",
"signaltools",
"import",
"_centered",
"# Generate a template mask",
"used_... | Compute the normalized cross-correlation using numpy and bottleneck.
:param templates: 2D Array of templates
:type templates: np.ndarray
:param stream: 1D array of continuous data
:type stream: np.ndarray
:param pads: List of ints of pad lengths in the same order as templates
:type pads: list
:return: np.ndarray of cross-correlations
:return: np.ndarray channels used | [
"Compute",
"the",
"normalized",
"cross",
"-",
"correlation",
"using",
"numpy",
"and",
"bottleneck",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/correlate.py#L355-L402 | train | 203,259 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/correlate.py | time_multi_normxcorr | def time_multi_normxcorr(templates, stream, pads, threaded=False, *args,
**kwargs):
"""
Compute cross-correlations in the time-domain using C routine.
:param templates: 2D Array of templates
:type templates: np.ndarray
:param stream: 1D array of continuous data
:type stream: np.ndarray
:param pads: List of ints of pad lengths in the same order as templates
:type pads: list
:param threaded: Whether to use the threaded routine or not
:type threaded: bool
:return: np.ndarray of cross-correlations
:return: np.ndarray channels used
"""
used_chans = ~np.isnan(templates).any(axis=1)
utilslib = _load_cdll('libutils')
argtypes = [
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_int, ctypes.c_int,
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_int,
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
flags=native_str('C_CONTIGUOUS'))]
restype = ctypes.c_int
if threaded:
func = utilslib.multi_normxcorr_time_threaded
argtypes.append(ctypes.c_int)
else:
func = utilslib.multi_normxcorr_time
func.argtypes = argtypes
func.restype = restype
# Need to de-mean everything
templates_means = templates.mean(axis=1).astype(np.float32)[:, np.newaxis]
stream_mean = stream.mean().astype(np.float32)
templates = templates.astype(np.float32) - templates_means
stream = stream.astype(np.float32) - stream_mean
template_len = templates.shape[1]
n_templates = templates.shape[0]
image_len = stream.shape[0]
ccc = np.ascontiguousarray(
np.empty((image_len - template_len + 1) * n_templates), np.float32)
t_array = np.ascontiguousarray(templates.flatten(), np.float32)
time_args = [t_array, template_len, n_templates,
np.ascontiguousarray(stream, np.float32), image_len, ccc]
if threaded:
time_args.append(kwargs.get('cores', cpu_count()))
func(*time_args)
ccc[np.isnan(ccc)] = 0.0
ccc = ccc.reshape((n_templates, image_len - template_len + 1))
for i in range(len(pads)):
ccc[i] = np.append(ccc[i], np.zeros(pads[i]))[pads[i]:]
templates += templates_means
stream += stream_mean
return ccc, used_chans | python | def time_multi_normxcorr(templates, stream, pads, threaded=False, *args,
**kwargs):
"""
Compute cross-correlations in the time-domain using C routine.
:param templates: 2D Array of templates
:type templates: np.ndarray
:param stream: 1D array of continuous data
:type stream: np.ndarray
:param pads: List of ints of pad lengths in the same order as templates
:type pads: list
:param threaded: Whether to use the threaded routine or not
:type threaded: bool
:return: np.ndarray of cross-correlations
:return: np.ndarray channels used
"""
used_chans = ~np.isnan(templates).any(axis=1)
utilslib = _load_cdll('libutils')
argtypes = [
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_int, ctypes.c_int,
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_int,
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
flags=native_str('C_CONTIGUOUS'))]
restype = ctypes.c_int
if threaded:
func = utilslib.multi_normxcorr_time_threaded
argtypes.append(ctypes.c_int)
else:
func = utilslib.multi_normxcorr_time
func.argtypes = argtypes
func.restype = restype
# Need to de-mean everything
templates_means = templates.mean(axis=1).astype(np.float32)[:, np.newaxis]
stream_mean = stream.mean().astype(np.float32)
templates = templates.astype(np.float32) - templates_means
stream = stream.astype(np.float32) - stream_mean
template_len = templates.shape[1]
n_templates = templates.shape[0]
image_len = stream.shape[0]
ccc = np.ascontiguousarray(
np.empty((image_len - template_len + 1) * n_templates), np.float32)
t_array = np.ascontiguousarray(templates.flatten(), np.float32)
time_args = [t_array, template_len, n_templates,
np.ascontiguousarray(stream, np.float32), image_len, ccc]
if threaded:
time_args.append(kwargs.get('cores', cpu_count()))
func(*time_args)
ccc[np.isnan(ccc)] = 0.0
ccc = ccc.reshape((n_templates, image_len - template_len + 1))
for i in range(len(pads)):
ccc[i] = np.append(ccc[i], np.zeros(pads[i]))[pads[i]:]
templates += templates_means
stream += stream_mean
return ccc, used_chans | [
"def",
"time_multi_normxcorr",
"(",
"templates",
",",
"stream",
",",
"pads",
",",
"threaded",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"used_chans",
"=",
"~",
"np",
".",
"isnan",
"(",
"templates",
")",
".",
"any",
"(",
"axis... | Compute cross-correlations in the time-domain using C routine.
:param templates: 2D Array of templates
:type templates: np.ndarray
:param stream: 1D array of continuous data
:type stream: np.ndarray
:param pads: List of ints of pad lengths in the same order as templates
:type pads: list
:param threaded: Whether to use the threaded routine or not
:type threaded: bool
:return: np.ndarray of cross-correlations
:return: np.ndarray channels used | [
"Compute",
"cross",
"-",
"correlations",
"in",
"the",
"time",
"-",
"domain",
"using",
"C",
"routine",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/correlate.py#L406-L466 | train | 203,260 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/correlate.py | _time_threaded_normxcorr | def _time_threaded_normxcorr(templates, stream, *args, **kwargs):
"""
Use the threaded time-domain routine for concurrency
:type templates: list
:param templates:
A list of templates, where each one should be an obspy.Stream object
containing multiple traces of seismic data and the relevant header
information.
:type stream: obspy.core.stream.Stream
:param stream:
A single Stream object to be correlated with the templates.
:returns:
New list of :class:`numpy.ndarray` objects. These will contain
the correlation sums for each template for this day of data.
:rtype: list
:returns:
list of ints as number of channels used for each cross-correlation.
:rtype: list
:returns:
list of list of tuples of station, channel for all cross-correlations.
:rtype: list
"""
no_chans = np.zeros(len(templates))
chans = [[] for _ in range(len(templates))]
array_dict_tuple = _get_array_dicts(templates, stream)
stream_dict, template_dict, pad_dict, seed_ids = array_dict_tuple
cccsums = np.zeros([len(templates),
len(stream[0]) - len(templates[0][0]) + 1])
for seed_id in seed_ids:
tr_cc, tr_chans = time_multi_normxcorr(
template_dict[seed_id], stream_dict[seed_id], pad_dict[seed_id],
True)
cccsums = np.sum([cccsums, tr_cc], axis=0)
no_chans += tr_chans.astype(np.int)
for chan, state in zip(chans, tr_chans):
if state:
chan.append((seed_id.split('.')[1],
seed_id.split('.')[-1].split('_')[0]))
return cccsums, no_chans, chans | python | def _time_threaded_normxcorr(templates, stream, *args, **kwargs):
"""
Use the threaded time-domain routine for concurrency
:type templates: list
:param templates:
A list of templates, where each one should be an obspy.Stream object
containing multiple traces of seismic data and the relevant header
information.
:type stream: obspy.core.stream.Stream
:param stream:
A single Stream object to be correlated with the templates.
:returns:
New list of :class:`numpy.ndarray` objects. These will contain
the correlation sums for each template for this day of data.
:rtype: list
:returns:
list of ints as number of channels used for each cross-correlation.
:rtype: list
:returns:
list of list of tuples of station, channel for all cross-correlations.
:rtype: list
"""
no_chans = np.zeros(len(templates))
chans = [[] for _ in range(len(templates))]
array_dict_tuple = _get_array_dicts(templates, stream)
stream_dict, template_dict, pad_dict, seed_ids = array_dict_tuple
cccsums = np.zeros([len(templates),
len(stream[0]) - len(templates[0][0]) + 1])
for seed_id in seed_ids:
tr_cc, tr_chans = time_multi_normxcorr(
template_dict[seed_id], stream_dict[seed_id], pad_dict[seed_id],
True)
cccsums = np.sum([cccsums, tr_cc], axis=0)
no_chans += tr_chans.astype(np.int)
for chan, state in zip(chans, tr_chans):
if state:
chan.append((seed_id.split('.')[1],
seed_id.split('.')[-1].split('_')[0]))
return cccsums, no_chans, chans | [
"def",
"_time_threaded_normxcorr",
"(",
"templates",
",",
"stream",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"no_chans",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"templates",
")",
")",
"chans",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"ra... | Use the threaded time-domain routine for concurrency
:type templates: list
:param templates:
A list of templates, where each one should be an obspy.Stream object
containing multiple traces of seismic data and the relevant header
information.
:type stream: obspy.core.stream.Stream
:param stream:
A single Stream object to be correlated with the templates.
:returns:
New list of :class:`numpy.ndarray` objects. These will contain
the correlation sums for each template for this day of data.
:rtype: list
:returns:
list of ints as number of channels used for each cross-correlation.
:rtype: list
:returns:
list of list of tuples of station, channel for all cross-correlations.
:rtype: list | [
"Use",
"the",
"threaded",
"time",
"-",
"domain",
"routine",
"for",
"concurrency"
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/correlate.py#L578-L618 | train | 203,261 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/correlate.py | _fftw_stream_xcorr | def _fftw_stream_xcorr(templates, stream, *args, **kwargs):
"""
Apply fftw normxcorr routine concurrently.
:type templates: list
:param templates:
A list of templates, where each one should be an obspy.Stream object
containing multiple traces of seismic data and the relevant header
information.
:type stream: obspy.core.stream.Stream
:param stream:
A single Stream object to be correlated with the templates.
:returns:
New list of :class:`numpy.ndarray` objects. These will contain
the correlation sums for each template for this day of data.
:rtype: list
:returns:
list of ints as number of channels used for each cross-correlation.
:rtype: list
:returns:
list of list of tuples of station, channel for all cross-correlations.
:rtype: list
"""
# number of threads:
# default to using inner threads
# if `cores` or `cores_outer` passed in then use that
# else if OMP_NUM_THREADS set use that
# otherwise use all available
num_cores_inner = kwargs.get('cores')
num_cores_outer = kwargs.get('cores_outer')
if num_cores_inner is None and num_cores_outer is None:
num_cores_inner = int(os.getenv("OMP_NUM_THREADS", cpu_count()))
num_cores_outer = 1
elif num_cores_inner is not None and num_cores_outer is None:
num_cores_outer = 1
elif num_cores_outer is not None and num_cores_inner is None:
num_cores_inner = 1
chans = [[] for _i in range(len(templates))]
array_dict_tuple = _get_array_dicts(templates, stream)
stream_dict, template_dict, pad_dict, seed_ids = array_dict_tuple
assert set(seed_ids)
cccsums, tr_chans = fftw_multi_normxcorr(
template_array=template_dict, stream_array=stream_dict,
pad_array=pad_dict, seed_ids=seed_ids, cores_inner=num_cores_inner,
cores_outer=num_cores_outer)
no_chans = np.sum(np.array(tr_chans).astype(np.int), axis=0)
for seed_id, tr_chan in zip(seed_ids, tr_chans):
for chan, state in zip(chans, tr_chan):
if state:
chan.append((seed_id.split('.')[1],
seed_id.split('.')[-1].split('_')[0]))
return cccsums, no_chans, chans | python | def _fftw_stream_xcorr(templates, stream, *args, **kwargs):
"""
Apply fftw normxcorr routine concurrently.
:type templates: list
:param templates:
A list of templates, where each one should be an obspy.Stream object
containing multiple traces of seismic data and the relevant header
information.
:type stream: obspy.core.stream.Stream
:param stream:
A single Stream object to be correlated with the templates.
:returns:
New list of :class:`numpy.ndarray` objects. These will contain
the correlation sums for each template for this day of data.
:rtype: list
:returns:
list of ints as number of channels used for each cross-correlation.
:rtype: list
:returns:
list of list of tuples of station, channel for all cross-correlations.
:rtype: list
"""
# number of threads:
# default to using inner threads
# if `cores` or `cores_outer` passed in then use that
# else if OMP_NUM_THREADS set use that
# otherwise use all available
num_cores_inner = kwargs.get('cores')
num_cores_outer = kwargs.get('cores_outer')
if num_cores_inner is None and num_cores_outer is None:
num_cores_inner = int(os.getenv("OMP_NUM_THREADS", cpu_count()))
num_cores_outer = 1
elif num_cores_inner is not None and num_cores_outer is None:
num_cores_outer = 1
elif num_cores_outer is not None and num_cores_inner is None:
num_cores_inner = 1
chans = [[] for _i in range(len(templates))]
array_dict_tuple = _get_array_dicts(templates, stream)
stream_dict, template_dict, pad_dict, seed_ids = array_dict_tuple
assert set(seed_ids)
cccsums, tr_chans = fftw_multi_normxcorr(
template_array=template_dict, stream_array=stream_dict,
pad_array=pad_dict, seed_ids=seed_ids, cores_inner=num_cores_inner,
cores_outer=num_cores_outer)
no_chans = np.sum(np.array(tr_chans).astype(np.int), axis=0)
for seed_id, tr_chan in zip(seed_ids, tr_chans):
for chan, state in zip(chans, tr_chan):
if state:
chan.append((seed_id.split('.')[1],
seed_id.split('.')[-1].split('_')[0]))
return cccsums, no_chans, chans | [
"def",
"_fftw_stream_xcorr",
"(",
"templates",
",",
"stream",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# number of threads:",
"# default to using inner threads",
"# if `cores` or `cores_outer` passed in then use that",
"# else if OMP_NUM_THREADS set use that",
... | Apply fftw normxcorr routine concurrently.
:type templates: list
:param templates:
A list of templates, where each one should be an obspy.Stream object
containing multiple traces of seismic data and the relevant header
information.
:type stream: obspy.core.stream.Stream
:param stream:
A single Stream object to be correlated with the templates.
:returns:
New list of :class:`numpy.ndarray` objects. These will contain
the correlation sums for each template for this day of data.
:rtype: list
:returns:
list of ints as number of channels used for each cross-correlation.
:rtype: list
:returns:
list of list of tuples of station, channel for all cross-correlations.
:rtype: list | [
"Apply",
"fftw",
"normxcorr",
"routine",
"concurrently",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/correlate.py#L624-L677 | train | 203,262 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/correlate.py | get_stream_xcorr | def get_stream_xcorr(name_or_func=None, concurrency=None):
"""
Return a function for performing normalized cross correlation on lists of
streams.
:param name_or_func:
Either a name of a registered function or a callable that implements
the standard array_normxcorr signature.
:param concurrency:
Optional concurrency strategy, options are below.
:return: A callable with the interface of stream_normxcorr
:Concurrency options:
- multithread - use a threadpool for concurrency;
- multiprocess - use a process pool for concurrency;
- concurrent - use a customized concurrency strategy for the function,
if not defined threading will be used.
"""
func = _get_registerd_func(name_or_func)
concur = concurrency or 'stream_xcorr'
if not hasattr(func, concur):
msg = '%s does not support concurrency %s' % (func.__name__, concur)
raise ValueError(msg)
return getattr(func, concur) | python | def get_stream_xcorr(name_or_func=None, concurrency=None):
"""
Return a function for performing normalized cross correlation on lists of
streams.
:param name_or_func:
Either a name of a registered function or a callable that implements
the standard array_normxcorr signature.
:param concurrency:
Optional concurrency strategy, options are below.
:return: A callable with the interface of stream_normxcorr
:Concurrency options:
- multithread - use a threadpool for concurrency;
- multiprocess - use a process pool for concurrency;
- concurrent - use a customized concurrency strategy for the function,
if not defined threading will be used.
"""
func = _get_registerd_func(name_or_func)
concur = concurrency or 'stream_xcorr'
if not hasattr(func, concur):
msg = '%s does not support concurrency %s' % (func.__name__, concur)
raise ValueError(msg)
return getattr(func, concur) | [
"def",
"get_stream_xcorr",
"(",
"name_or_func",
"=",
"None",
",",
"concurrency",
"=",
"None",
")",
":",
"func",
"=",
"_get_registerd_func",
"(",
"name_or_func",
")",
"concur",
"=",
"concurrency",
"or",
"'stream_xcorr'",
"if",
"not",
"hasattr",
"(",
"func",
","... | Return a function for performing normalized cross correlation on lists of
streams.
:param name_or_func:
Either a name of a registered function or a callable that implements
the standard array_normxcorr signature.
:param concurrency:
Optional concurrency strategy, options are below.
:return: A callable with the interface of stream_normxcorr
:Concurrency options:
- multithread - use a threadpool for concurrency;
- multiprocess - use a process pool for concurrency;
- concurrent - use a customized concurrency strategy for the function,
if not defined threading will be used. | [
"Return",
"a",
"function",
"for",
"performing",
"normalized",
"cross",
"correlation",
"on",
"lists",
"of",
"streams",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/correlate.py#L795-L820 | train | 203,263 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/correlate.py | _get_array_dicts | def _get_array_dicts(templates, stream, copy_streams=True):
""" prepare templates and stream, return dicts """
# Do some reshaping
# init empty structures for data storage
template_dict = {}
stream_dict = {}
pad_dict = {}
t_starts = []
stream.sort(['network', 'station', 'location', 'channel'])
for template in templates:
template.sort(['network', 'station', 'location', 'channel'])
t_starts.append(min([tr.stats.starttime for tr in template]))
# get seed ids, make sure these are collected on sorted streams
seed_ids = [tr.id + '_' + str(i) for i, tr in enumerate(templates[0])]
# pull common channels out of streams and templates and put in dicts
for i, seed_id in enumerate(seed_ids):
temps_with_seed = [template[i].data for template in templates]
t_ar = np.array(temps_with_seed).astype(np.float32)
template_dict.update({seed_id: t_ar})
stream_dict.update(
{seed_id: stream.select(
id=seed_id.split('_')[0])[0].data.astype(np.float32)})
pad_list = [
int(round(template[i].stats.sampling_rate *
(template[i].stats.starttime - t_starts[j])))
for j, template in zip(range(len(templates)), templates)]
pad_dict.update({seed_id: pad_list})
return stream_dict, template_dict, pad_dict, seed_ids | python | def _get_array_dicts(templates, stream, copy_streams=True):
""" prepare templates and stream, return dicts """
# Do some reshaping
# init empty structures for data storage
template_dict = {}
stream_dict = {}
pad_dict = {}
t_starts = []
stream.sort(['network', 'station', 'location', 'channel'])
for template in templates:
template.sort(['network', 'station', 'location', 'channel'])
t_starts.append(min([tr.stats.starttime for tr in template]))
# get seed ids, make sure these are collected on sorted streams
seed_ids = [tr.id + '_' + str(i) for i, tr in enumerate(templates[0])]
# pull common channels out of streams and templates and put in dicts
for i, seed_id in enumerate(seed_ids):
temps_with_seed = [template[i].data for template in templates]
t_ar = np.array(temps_with_seed).astype(np.float32)
template_dict.update({seed_id: t_ar})
stream_dict.update(
{seed_id: stream.select(
id=seed_id.split('_')[0])[0].data.astype(np.float32)})
pad_list = [
int(round(template[i].stats.sampling_rate *
(template[i].stats.starttime - t_starts[j])))
for j, template in zip(range(len(templates)), templates)]
pad_dict.update({seed_id: pad_list})
return stream_dict, template_dict, pad_dict, seed_ids | [
"def",
"_get_array_dicts",
"(",
"templates",
",",
"stream",
",",
"copy_streams",
"=",
"True",
")",
":",
"# Do some reshaping",
"# init empty structures for data storage",
"template_dict",
"=",
"{",
"}",
"stream_dict",
"=",
"{",
"}",
"pad_dict",
"=",
"{",
"}",
"t_s... | prepare templates and stream, return dicts | [
"prepare",
"templates",
"and",
"stream",
"return",
"dicts"
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/correlate.py#L826-L855 | train | 203,264 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/despike.py | median_filter | def median_filter(tr, multiplier=10, windowlength=0.5,
interp_len=0.05, debug=0):
"""
Filter out spikes in data above a multiple of MAD of the data.
Currently only has the ability to replaces spikes with linear
interpolation. In the future we would aim to fill the gap with something
more appropriate. Works in-place on data.
:type tr: obspy.core.trace.Trace
:param tr: trace to despike
:type multiplier: float
:param multiplier:
median absolute deviation multiplier to find spikes above.
:type windowlength: float
:param windowlength: Length of window to look for spikes in in seconds.
:type interp_len: float
:param interp_len: Length in seconds to interpolate around spikes.
:type debug: int
:param debug: Debug output level between 0 and 5, higher is more output.
:returns: :class:`obspy.core.trace.Trace`
.. warning::
Not particularly effective, and may remove earthquake signals, use with
caution.
"""
num_cores = cpu_count()
if debug >= 1:
data_in = tr.copy()
# Note - might be worth finding spikes in filtered data
filt = tr.copy()
filt.detrend('linear')
try:
filt.filter('bandpass', freqmin=10.0,
freqmax=(tr.stats.sampling_rate / 2) - 1)
except Exception as e:
print("Could not filter due to error: {0}".format(e))
data = filt.data
del filt
# Loop through windows
_windowlength = int(windowlength * tr.stats.sampling_rate)
_interp_len = int(interp_len * tr.stats.sampling_rate)
peaks = []
with Timer() as t:
pool = Pool(processes=num_cores)
results = [pool.apply_async(_median_window,
args=(data[chunk * _windowlength:
(chunk + 1) * _windowlength],
chunk * _windowlength, multiplier,
tr.stats.starttime + windowlength,
tr.stats.sampling_rate,
debug))
for chunk in range(int(len(data) / _windowlength))]
pool.close()
for p in results:
peaks += p.get()
pool.join()
for peak in peaks:
tr.data = _interp_gap(tr.data, peak[1], _interp_len)
print("Despiking took: %s s" % t.secs)
if debug >= 1:
plt.plot(data_in.data, 'r', label='raw')
plt.plot(tr.data, 'k', label='despiked')
plt.legend()
plt.show()
return tr | python | def median_filter(tr, multiplier=10, windowlength=0.5,
interp_len=0.05, debug=0):
"""
Filter out spikes in data above a multiple of MAD of the data.
Currently only has the ability to replaces spikes with linear
interpolation. In the future we would aim to fill the gap with something
more appropriate. Works in-place on data.
:type tr: obspy.core.trace.Trace
:param tr: trace to despike
:type multiplier: float
:param multiplier:
median absolute deviation multiplier to find spikes above.
:type windowlength: float
:param windowlength: Length of window to look for spikes in in seconds.
:type interp_len: float
:param interp_len: Length in seconds to interpolate around spikes.
:type debug: int
:param debug: Debug output level between 0 and 5, higher is more output.
:returns: :class:`obspy.core.trace.Trace`
.. warning::
Not particularly effective, and may remove earthquake signals, use with
caution.
"""
num_cores = cpu_count()
if debug >= 1:
data_in = tr.copy()
# Note - might be worth finding spikes in filtered data
filt = tr.copy()
filt.detrend('linear')
try:
filt.filter('bandpass', freqmin=10.0,
freqmax=(tr.stats.sampling_rate / 2) - 1)
except Exception as e:
print("Could not filter due to error: {0}".format(e))
data = filt.data
del filt
# Loop through windows
_windowlength = int(windowlength * tr.stats.sampling_rate)
_interp_len = int(interp_len * tr.stats.sampling_rate)
peaks = []
with Timer() as t:
pool = Pool(processes=num_cores)
results = [pool.apply_async(_median_window,
args=(data[chunk * _windowlength:
(chunk + 1) * _windowlength],
chunk * _windowlength, multiplier,
tr.stats.starttime + windowlength,
tr.stats.sampling_rate,
debug))
for chunk in range(int(len(data) / _windowlength))]
pool.close()
for p in results:
peaks += p.get()
pool.join()
for peak in peaks:
tr.data = _interp_gap(tr.data, peak[1], _interp_len)
print("Despiking took: %s s" % t.secs)
if debug >= 1:
plt.plot(data_in.data, 'r', label='raw')
plt.plot(tr.data, 'k', label='despiked')
plt.legend()
plt.show()
return tr | [
"def",
"median_filter",
"(",
"tr",
",",
"multiplier",
"=",
"10",
",",
"windowlength",
"=",
"0.5",
",",
"interp_len",
"=",
"0.05",
",",
"debug",
"=",
"0",
")",
":",
"num_cores",
"=",
"cpu_count",
"(",
")",
"if",
"debug",
">=",
"1",
":",
"data_in",
"="... | Filter out spikes in data above a multiple of MAD of the data.
Currently only has the ability to replaces spikes with linear
interpolation. In the future we would aim to fill the gap with something
more appropriate. Works in-place on data.
:type tr: obspy.core.trace.Trace
:param tr: trace to despike
:type multiplier: float
:param multiplier:
median absolute deviation multiplier to find spikes above.
:type windowlength: float
:param windowlength: Length of window to look for spikes in in seconds.
:type interp_len: float
:param interp_len: Length in seconds to interpolate around spikes.
:type debug: int
:param debug: Debug output level between 0 and 5, higher is more output.
:returns: :class:`obspy.core.trace.Trace`
.. warning::
Not particularly effective, and may remove earthquake signals, use with
caution. | [
"Filter",
"out",
"spikes",
"in",
"data",
"above",
"a",
"multiple",
"of",
"MAD",
"of",
"the",
"data",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/despike.py#L33-L99 | train | 203,265 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/despike.py | _median_window | def _median_window(window, window_start, multiplier, starttime, sampling_rate,
debug=0):
"""
Internal function to aid parallel processing
:type window: numpy.ndarry
:param window: Data to look for peaks in.
:type window_start: int
:param window_start: Index of window start point in larger array, used \
for peak indexing.
:type multiplier: float
:param multiplier: Multiple of MAD to use as threshold
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime: Starttime of window, used in debug plotting.
:type sampling_rate: float
:param sampling_rate in Hz, used for debug plotting
:type debug: int
:param debug: debug level, if want plots, >= 4.
:returns: peaks
:rtype: list
"""
MAD = np.median(np.abs(window))
thresh = multiplier * MAD
if debug >= 2:
print('Threshold for window is: ' + str(thresh) +
'\nMedian is: ' + str(MAD) +
'\nMax is: ' + str(np.max(window)))
peaks = find_peaks2_short(arr=window,
thresh=thresh, trig_int=5, debug=0)
if debug >= 4 and peaks:
peaks_plot(window, starttime, sampling_rate,
save=False, peaks=peaks)
if peaks:
peaks = [(peak[0], peak[1] + window_start) for peak in peaks]
else:
peaks = []
return peaks | python | def _median_window(window, window_start, multiplier, starttime, sampling_rate,
debug=0):
"""
Internal function to aid parallel processing
:type window: numpy.ndarry
:param window: Data to look for peaks in.
:type window_start: int
:param window_start: Index of window start point in larger array, used \
for peak indexing.
:type multiplier: float
:param multiplier: Multiple of MAD to use as threshold
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime: Starttime of window, used in debug plotting.
:type sampling_rate: float
:param sampling_rate in Hz, used for debug plotting
:type debug: int
:param debug: debug level, if want plots, >= 4.
:returns: peaks
:rtype: list
"""
MAD = np.median(np.abs(window))
thresh = multiplier * MAD
if debug >= 2:
print('Threshold for window is: ' + str(thresh) +
'\nMedian is: ' + str(MAD) +
'\nMax is: ' + str(np.max(window)))
peaks = find_peaks2_short(arr=window,
thresh=thresh, trig_int=5, debug=0)
if debug >= 4 and peaks:
peaks_plot(window, starttime, sampling_rate,
save=False, peaks=peaks)
if peaks:
peaks = [(peak[0], peak[1] + window_start) for peak in peaks]
else:
peaks = []
return peaks | [
"def",
"_median_window",
"(",
"window",
",",
"window_start",
",",
"multiplier",
",",
"starttime",
",",
"sampling_rate",
",",
"debug",
"=",
"0",
")",
":",
"MAD",
"=",
"np",
".",
"median",
"(",
"np",
".",
"abs",
"(",
"window",
")",
")",
"thresh",
"=",
... | Internal function to aid parallel processing
:type window: numpy.ndarry
:param window: Data to look for peaks in.
:type window_start: int
:param window_start: Index of window start point in larger array, used \
for peak indexing.
:type multiplier: float
:param multiplier: Multiple of MAD to use as threshold
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime: Starttime of window, used in debug plotting.
:type sampling_rate: float
:param sampling_rate in Hz, used for debug plotting
:type debug: int
:param debug: debug level, if want plots, >= 4.
:returns: peaks
:rtype: list | [
"Internal",
"function",
"to",
"aid",
"parallel",
"processing"
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/despike.py#L102-L139 | train | 203,266 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/despike.py | _interp_gap | def _interp_gap(data, peak_loc, interp_len):
"""
Internal function for filling gap with linear interpolation
:type data: numpy.ndarray
:param data: data to remove peak in
:type peak_loc: int
:param peak_loc: peak location position
:type interp_len: int
:param interp_len: window to interpolate
:returns: Trace works in-place
:rtype: :class:`obspy.core.trace.Trace`
"""
start_loc = peak_loc - int(0.5 * interp_len)
end_loc = peak_loc + int(0.5 * interp_len)
if start_loc < 0:
start_loc = 0
if end_loc > len(data) - 1:
end_loc = len(data) - 1
fill = np.linspace(data[start_loc], data[end_loc], end_loc - start_loc)
data[start_loc:end_loc] = fill
return data | python | def _interp_gap(data, peak_loc, interp_len):
"""
Internal function for filling gap with linear interpolation
:type data: numpy.ndarray
:param data: data to remove peak in
:type peak_loc: int
:param peak_loc: peak location position
:type interp_len: int
:param interp_len: window to interpolate
:returns: Trace works in-place
:rtype: :class:`obspy.core.trace.Trace`
"""
start_loc = peak_loc - int(0.5 * interp_len)
end_loc = peak_loc + int(0.5 * interp_len)
if start_loc < 0:
start_loc = 0
if end_loc > len(data) - 1:
end_loc = len(data) - 1
fill = np.linspace(data[start_loc], data[end_loc], end_loc - start_loc)
data[start_loc:end_loc] = fill
return data | [
"def",
"_interp_gap",
"(",
"data",
",",
"peak_loc",
",",
"interp_len",
")",
":",
"start_loc",
"=",
"peak_loc",
"-",
"int",
"(",
"0.5",
"*",
"interp_len",
")",
"end_loc",
"=",
"peak_loc",
"+",
"int",
"(",
"0.5",
"*",
"interp_len",
")",
"if",
"start_loc",
... | Internal function for filling gap with linear interpolation
:type data: numpy.ndarray
:param data: data to remove peak in
:type peak_loc: int
:param peak_loc: peak location position
:type interp_len: int
:param interp_len: window to interpolate
:returns: Trace works in-place
:rtype: :class:`obspy.core.trace.Trace` | [
"Internal",
"function",
"for",
"filling",
"gap",
"with",
"linear",
"interpolation"
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/despike.py#L142-L164 | train | 203,267 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/despike.py | template_remove | def template_remove(tr, template, cc_thresh, windowlength,
interp_len, debug=0):
"""
Looks for instances of template in the trace and removes the matches.
:type tr: obspy.core.trace.Trace
:param tr: Trace to remove spikes from.
:type template: osbpy.core.trace.Trace
:param template: Spike template to look for in data.
:type cc_thresh: float
:param cc_thresh: Cross-correlation threshold (-1 - 1).
:type windowlength: float
:param windowlength: Length of window to look for spikes in in seconds.
:type interp_len: float
:param interp_len: Window length to remove and fill in seconds.
:type debug: int
:param debug: Debug level.
:returns: tr, works in place.
:rtype: :class:`obspy.core.trace.Trace`
"""
data_in = tr.copy()
_interp_len = int(tr.stats.sampling_rate * interp_len)
if _interp_len < len(template.data):
warnings.warn('Interp_len is less than the length of the template,'
'will used the length of the template!')
_interp_len = len(template.data)
if isinstance(template, Trace):
template = template.data
with Timer() as t:
cc = normxcorr2(image=tr.data.astype(np.float32),
template=template.astype(np.float32))
if debug > 3:
plt.plot(cc.flatten(), 'k', label='cross-correlation')
plt.legend()
plt.show()
peaks = find_peaks2_short(arr=cc.flatten(), thresh=cc_thresh,
trig_int=windowlength * tr.stats.
sampling_rate)
for peak in peaks:
tr.data = _interp_gap(data=tr.data,
peak_loc=peak[1] + int(0.5 * _interp_len),
interp_len=_interp_len)
print("Despiking took: %s s" % t.secs)
if debug > 2:
plt.plot(data_in.data, 'r', label='raw')
plt.plot(tr.data, 'k', label='despiked')
plt.legend()
plt.show()
return tr | python | def template_remove(tr, template, cc_thresh, windowlength,
interp_len, debug=0):
"""
Looks for instances of template in the trace and removes the matches.
:type tr: obspy.core.trace.Trace
:param tr: Trace to remove spikes from.
:type template: osbpy.core.trace.Trace
:param template: Spike template to look for in data.
:type cc_thresh: float
:param cc_thresh: Cross-correlation threshold (-1 - 1).
:type windowlength: float
:param windowlength: Length of window to look for spikes in in seconds.
:type interp_len: float
:param interp_len: Window length to remove and fill in seconds.
:type debug: int
:param debug: Debug level.
:returns: tr, works in place.
:rtype: :class:`obspy.core.trace.Trace`
"""
data_in = tr.copy()
_interp_len = int(tr.stats.sampling_rate * interp_len)
if _interp_len < len(template.data):
warnings.warn('Interp_len is less than the length of the template,'
'will used the length of the template!')
_interp_len = len(template.data)
if isinstance(template, Trace):
template = template.data
with Timer() as t:
cc = normxcorr2(image=tr.data.astype(np.float32),
template=template.astype(np.float32))
if debug > 3:
plt.plot(cc.flatten(), 'k', label='cross-correlation')
plt.legend()
plt.show()
peaks = find_peaks2_short(arr=cc.flatten(), thresh=cc_thresh,
trig_int=windowlength * tr.stats.
sampling_rate)
for peak in peaks:
tr.data = _interp_gap(data=tr.data,
peak_loc=peak[1] + int(0.5 * _interp_len),
interp_len=_interp_len)
print("Despiking took: %s s" % t.secs)
if debug > 2:
plt.plot(data_in.data, 'r', label='raw')
plt.plot(tr.data, 'k', label='despiked')
plt.legend()
plt.show()
return tr | [
"def",
"template_remove",
"(",
"tr",
",",
"template",
",",
"cc_thresh",
",",
"windowlength",
",",
"interp_len",
",",
"debug",
"=",
"0",
")",
":",
"data_in",
"=",
"tr",
".",
"copy",
"(",
")",
"_interp_len",
"=",
"int",
"(",
"tr",
".",
"stats",
".",
"s... | Looks for instances of template in the trace and removes the matches.
:type tr: obspy.core.trace.Trace
:param tr: Trace to remove spikes from.
:type template: osbpy.core.trace.Trace
:param template: Spike template to look for in data.
:type cc_thresh: float
:param cc_thresh: Cross-correlation threshold (-1 - 1).
:type windowlength: float
:param windowlength: Length of window to look for spikes in in seconds.
:type interp_len: float
:param interp_len: Window length to remove and fill in seconds.
:type debug: int
:param debug: Debug level.
:returns: tr, works in place.
:rtype: :class:`obspy.core.trace.Trace` | [
"Looks",
"for",
"instances",
"of",
"template",
"in",
"the",
"trace",
"and",
"removes",
"the",
"matches",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/despike.py#L167-L216 | train | 203,268 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/archive_read.py | read_data | def read_data(archive, arc_type, day, stachans, length=86400):
"""
Function to read the appropriate data from an archive for a day.
:type archive: str
:param archive:
The archive source - if arc_type is seishub, this should be a url,
if the arc_type is FDSN then this can be either a url or a known obspy
client. If arc_type is day_vols, then this is the path to the top
directory.
:type arc_type: str
:param arc_type: The type of archive, can be: seishub, FDSN, day_volumes
:type day: datetime.date
:param day: Date to retrieve data for
:type stachans: list
:param stachans: List of tuples of Stations and channels to try and get,
will not fail if stations are not available, but will warn.
:type length: float
:param length: Data length to extract in seconds, defaults to 1 day.
:returns: Stream of data
:rtype: obspy.core.stream.Stream
.. note:: A note on arc_types, if arc_type is day_vols, then this will \
look for directories labelled in the IRIS DMC conventions of \
Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \
Data within these files directories should be stored as day-long, \
single-channel files. This is not implemented in the fasted way \
possible to allow for a more general situation. If you require more \
speed you will need to re-write this.
.. rubric:: Example
>>> from obspy import UTCDateTime
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, missing data
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, local day-volumes
>>> # Get the path to the test data
>>> import eqcorrscan
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')]
>>> st = read_data(TEST_PATH + '/day_vols', 'day_vols',
... t1, stachans)
>>> print(st)
2 Trace(s) in Stream:
AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
"""
st = []
available_stations = _check_available_data(archive, arc_type, day)
for station in stachans:
if len(station[1]) == 2:
# Cope with two char channel naming in seisan
station_map = (station[0], station[1][0] + '*' + station[1][1])
available_stations_map = [(sta[0], sta[1][0] + '*' + sta[1][-1])
for sta in available_stations]
else:
station_map = station
available_stations_map = available_stations
if station_map not in available_stations_map:
msg = ' '.join([station[0], station_map[1], 'is not available for',
day.strftime('%Y/%m/%d')])
warnings.warn(msg)
continue
if arc_type.lower() == 'seishub':
client = SeishubClient(archive)
st += client.get_waveforms(
network='*', station=station_map[0], location='*',
channel=station_map[1], starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + length)
elif arc_type.upper() == "FDSN":
client = FDSNClient(archive)
try:
st += client.get_waveforms(
network='*', station=station_map[0], location='*',
channel=station_map[1], starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + length)
except FDSNException:
warnings.warn('No data on server despite station being ' +
'available...')
continue
elif arc_type.lower() == 'day_vols':
wavfiles = _get_station_file(os.path.join(
archive, day.strftime('Y%Y' + os.sep + 'R%j.01')),
station_map[0], station_map[1])
for wavfile in wavfiles:
st += read(wavfile, starttime=day, endtime=day + length)
st = Stream(st)
return st | python | def read_data(archive, arc_type, day, stachans, length=86400):
"""
Function to read the appropriate data from an archive for a day.
:type archive: str
:param archive:
The archive source - if arc_type is seishub, this should be a url,
if the arc_type is FDSN then this can be either a url or a known obspy
client. If arc_type is day_vols, then this is the path to the top
directory.
:type arc_type: str
:param arc_type: The type of archive, can be: seishub, FDSN, day_volumes
:type day: datetime.date
:param day: Date to retrieve data for
:type stachans: list
:param stachans: List of tuples of Stations and channels to try and get,
will not fail if stations are not available, but will warn.
:type length: float
:param length: Data length to extract in seconds, defaults to 1 day.
:returns: Stream of data
:rtype: obspy.core.stream.Stream
.. note:: A note on arc_types, if arc_type is day_vols, then this will \
look for directories labelled in the IRIS DMC conventions of \
Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \
Data within these files directories should be stored as day-long, \
single-channel files. This is not implemented in the fasted way \
possible to allow for a more general situation. If you require more \
speed you will need to re-write this.
.. rubric:: Example
>>> from obspy import UTCDateTime
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, missing data
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, local day-volumes
>>> # Get the path to the test data
>>> import eqcorrscan
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')]
>>> st = read_data(TEST_PATH + '/day_vols', 'day_vols',
... t1, stachans)
>>> print(st)
2 Trace(s) in Stream:
AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
"""
st = []
available_stations = _check_available_data(archive, arc_type, day)
for station in stachans:
if len(station[1]) == 2:
# Cope with two char channel naming in seisan
station_map = (station[0], station[1][0] + '*' + station[1][1])
available_stations_map = [(sta[0], sta[1][0] + '*' + sta[1][-1])
for sta in available_stations]
else:
station_map = station
available_stations_map = available_stations
if station_map not in available_stations_map:
msg = ' '.join([station[0], station_map[1], 'is not available for',
day.strftime('%Y/%m/%d')])
warnings.warn(msg)
continue
if arc_type.lower() == 'seishub':
client = SeishubClient(archive)
st += client.get_waveforms(
network='*', station=station_map[0], location='*',
channel=station_map[1], starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + length)
elif arc_type.upper() == "FDSN":
client = FDSNClient(archive)
try:
st += client.get_waveforms(
network='*', station=station_map[0], location='*',
channel=station_map[1], starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + length)
except FDSNException:
warnings.warn('No data on server despite station being ' +
'available...')
continue
elif arc_type.lower() == 'day_vols':
wavfiles = _get_station_file(os.path.join(
archive, day.strftime('Y%Y' + os.sep + 'R%j.01')),
station_map[0], station_map[1])
for wavfile in wavfiles:
st += read(wavfile, starttime=day, endtime=day + length)
st = Stream(st)
return st | [
"def",
"read_data",
"(",
"archive",
",",
"arc_type",
",",
"day",
",",
"stachans",
",",
"length",
"=",
"86400",
")",
":",
"st",
"=",
"[",
"]",
"available_stations",
"=",
"_check_available_data",
"(",
"archive",
",",
"arc_type",
",",
"day",
")",
"for",
"st... | Function to read the appropriate data from an archive for a day.
:type archive: str
:param archive:
The archive source - if arc_type is seishub, this should be a url,
if the arc_type is FDSN then this can be either a url or a known obspy
client. If arc_type is day_vols, then this is the path to the top
directory.
:type arc_type: str
:param arc_type: The type of archive, can be: seishub, FDSN, day_volumes
:type day: datetime.date
:param day: Date to retrieve data for
:type stachans: list
:param stachans: List of tuples of Stations and channels to try and get,
will not fail if stations are not available, but will warn.
:type length: float
:param length: Data length to extract in seconds, defaults to 1 day.
:returns: Stream of data
:rtype: obspy.core.stream.Stream
.. note:: A note on arc_types, if arc_type is day_vols, then this will \
look for directories labelled in the IRIS DMC conventions of \
Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \
Data within these files directories should be stored as day-long, \
single-channel files. This is not implemented in the fasted way \
possible to allow for a more general situation. If you require more \
speed you will need to re-write this.
.. rubric:: Example
>>> from obspy import UTCDateTime
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, missing data
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, local day-volumes
>>> # Get the path to the test data
>>> import eqcorrscan
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')]
>>> st = read_data(TEST_PATH + '/day_vols', 'day_vols',
... t1, stachans)
>>> print(st)
2 Trace(s) in Stream:
AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples | [
"Function",
"to",
"read",
"the",
"appropriate",
"data",
"from",
"an",
"archive",
"for",
"a",
"day",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/archive_read.py#L31-L140 | train | 203,269 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/archive_read.py | _get_station_file | def _get_station_file(path_name, station, channel, debug=0):
"""
Helper function to find the correct file.
:type path_name: str
:param path_name: Path to files to check.
:type station: str
:type channel: str
:returns: list of filenames, str
"""
wavfiles = glob.glob(path_name + os.sep + '*')
out_files = [_check_data(wavfile, station, channel, debug=debug)
for wavfile in wavfiles]
out_files = list(set(out_files))
return out_files | python | def _get_station_file(path_name, station, channel, debug=0):
"""
Helper function to find the correct file.
:type path_name: str
:param path_name: Path to files to check.
:type station: str
:type channel: str
:returns: list of filenames, str
"""
wavfiles = glob.glob(path_name + os.sep + '*')
out_files = [_check_data(wavfile, station, channel, debug=debug)
for wavfile in wavfiles]
out_files = list(set(out_files))
return out_files | [
"def",
"_get_station_file",
"(",
"path_name",
",",
"station",
",",
"channel",
",",
"debug",
"=",
"0",
")",
":",
"wavfiles",
"=",
"glob",
".",
"glob",
"(",
"path_name",
"+",
"os",
".",
"sep",
"+",
"'*'",
")",
"out_files",
"=",
"[",
"_check_data",
"(",
... | Helper function to find the correct file.
:type path_name: str
:param path_name: Path to files to check.
:type station: str
:type channel: str
:returns: list of filenames, str | [
"Helper",
"function",
"to",
"find",
"the",
"correct",
"file",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/archive_read.py#L143-L159 | train | 203,270 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/archive_read.py | _check_data | def _check_data(wavfile, station, channel, debug=0):
"""
Inner loop for parallel checks.
:type wavfile: str
:param wavfile: Wavefile path name to look in.
:type station: str
:param station: Channel name to check for
:type channel: str
:param channel: Channel name to check for
:type debug: int
:param debug: Debug level, if > 1, will output what it it working on.
"""
if debug > 1:
print('Checking ' + wavfile)
st = read(wavfile, headonly=True)
for tr in st:
if tr.stats.station == station and tr.stats.channel == channel:
return wavfile | python | def _check_data(wavfile, station, channel, debug=0):
"""
Inner loop for parallel checks.
:type wavfile: str
:param wavfile: Wavefile path name to look in.
:type station: str
:param station: Channel name to check for
:type channel: str
:param channel: Channel name to check for
:type debug: int
:param debug: Debug level, if > 1, will output what it it working on.
"""
if debug > 1:
print('Checking ' + wavfile)
st = read(wavfile, headonly=True)
for tr in st:
if tr.stats.station == station and tr.stats.channel == channel:
return wavfile | [
"def",
"_check_data",
"(",
"wavfile",
",",
"station",
",",
"channel",
",",
"debug",
"=",
"0",
")",
":",
"if",
"debug",
">",
"1",
":",
"print",
"(",
"'Checking '",
"+",
"wavfile",
")",
"st",
"=",
"read",
"(",
"wavfile",
",",
"headonly",
"=",
"True",
... | Inner loop for parallel checks.
:type wavfile: str
:param wavfile: Wavefile path name to look in.
:type station: str
:param station: Channel name to check for
:type channel: str
:param channel: Channel name to check for
:type debug: int
:param debug: Debug level, if > 1, will output what it it working on. | [
"Inner",
"loop",
"for",
"parallel",
"checks",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/archive_read.py#L162-L180 | train | 203,271 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/archive_read.py | _check_available_data | def _check_available_data(archive, arc_type, day):
"""
Function to check what stations are available in the archive for a given \
day.
:type archive: str
:param archive: The archive source
:type arc_type: str
:param arc_type: The type of archive, can be:
:type day: datetime.date
:param day: Date to retrieve data for
:returns: list of tuples of (station, channel) as available.
.. note:: Currently the seishub options are untested.
"""
available_stations = []
if arc_type.lower() == 'day_vols':
wavefiles = glob.glob(os.path.join(archive, day.strftime('Y%Y'),
day.strftime('R%j.01'), '*'))
for wavefile in wavefiles:
header = read(wavefile, headonly=True)
available_stations.append((header[0].stats.station,
header[0].stats.channel))
elif arc_type.lower() == 'seishub':
client = SeishubClient(archive)
st = client.get_previews(starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + 86400)
for tr in st:
available_stations.append((tr.stats.station, tr.stats.channel))
elif arc_type.lower() == 'fdsn':
client = FDSNClient(archive)
inventory = client.get_stations(starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + 86400,
level='channel')
for network in inventory:
for station in network:
for channel in station:
available_stations.append((station.code,
channel.code))
return available_stations | python | def _check_available_data(archive, arc_type, day):
"""
Function to check what stations are available in the archive for a given \
day.
:type archive: str
:param archive: The archive source
:type arc_type: str
:param arc_type: The type of archive, can be:
:type day: datetime.date
:param day: Date to retrieve data for
:returns: list of tuples of (station, channel) as available.
.. note:: Currently the seishub options are untested.
"""
available_stations = []
if arc_type.lower() == 'day_vols':
wavefiles = glob.glob(os.path.join(archive, day.strftime('Y%Y'),
day.strftime('R%j.01'), '*'))
for wavefile in wavefiles:
header = read(wavefile, headonly=True)
available_stations.append((header[0].stats.station,
header[0].stats.channel))
elif arc_type.lower() == 'seishub':
client = SeishubClient(archive)
st = client.get_previews(starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + 86400)
for tr in st:
available_stations.append((tr.stats.station, tr.stats.channel))
elif arc_type.lower() == 'fdsn':
client = FDSNClient(archive)
inventory = client.get_stations(starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + 86400,
level='channel')
for network in inventory:
for station in network:
for channel in station:
available_stations.append((station.code,
channel.code))
return available_stations | [
"def",
"_check_available_data",
"(",
"archive",
",",
"arc_type",
",",
"day",
")",
":",
"available_stations",
"=",
"[",
"]",
"if",
"arc_type",
".",
"lower",
"(",
")",
"==",
"'day_vols'",
":",
"wavefiles",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
... | Function to check what stations are available in the archive for a given \
day.
:type archive: str
:param archive: The archive source
:type arc_type: str
:param arc_type: The type of archive, can be:
:type day: datetime.date
:param day: Date to retrieve data for
:returns: list of tuples of (station, channel) as available.
.. note:: Currently the seishub options are untested. | [
"Function",
"to",
"check",
"what",
"stations",
"are",
"available",
"in",
"the",
"archive",
"for",
"a",
"given",
"\\",
"day",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/archive_read.py#L183-L224 | train | 203,272 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/seismo_logs.py | rt_time_log | def rt_time_log(logfile, startdate):
"""
Open and read reftek raw log-file.
Function to open and read a log-file as written by a RefTek RT130
datalogger. The information within is then scanned for timing errors
above the threshold.
:type logfile: str
:param logfile: The logfile to look in
:type startdate: datetime.date
:param startdate: The start of the file as a date - files contain timing \
and the julian day, but not the year.
:returns: List of tuple of (:class:`datetime.datetime`, float) as time \
stamps and phase error.
"""
if os.name == 'nt':
f = io.open(logfile, 'rb')
else:
f = io.open(logfile, 'rb')
phase_err = []
lock = []
# Extract all the phase errors
for line_binary in f:
try:
line = line_binary.decode("utf8", "ignore")
except UnicodeDecodeError:
warnings.warn('Cannot decode line, skipping')
continue
if re.search("INTERNAL CLOCK PHASE ERROR", line):
match = re.search("INTERNAL CLOCK PHASE ERROR", line)
d_start = match.start() - 13
phase_err.append((dt.datetime.strptime(str(startdate.year) +
':' +
line[d_start:d_start + 12],
'%Y:%j:%H:%M:%S'),
float(line.rstrip().split()[-2]) *
0.000001))
elif re.search("EXTERNAL CLOCK POWER IS TURNED OFF", line):
match = re.search("EXTERNAL CLOCK POWER IS TURNED OFF", line)
d_start = match.start() - 13
lock.append((dt.datetime.strptime(str(startdate.year) +
':' + line[d_start:d_start + 12],
'%Y:%j:%H:%M:%S'),
999))
if len(phase_err) == 0 and len(lock) > 0:
phase_err = lock
f.close()
return phase_err | python | def rt_time_log(logfile, startdate):
"""
Open and read reftek raw log-file.
Function to open and read a log-file as written by a RefTek RT130
datalogger. The information within is then scanned for timing errors
above the threshold.
:type logfile: str
:param logfile: The logfile to look in
:type startdate: datetime.date
:param startdate: The start of the file as a date - files contain timing \
and the julian day, but not the year.
:returns: List of tuple of (:class:`datetime.datetime`, float) as time \
stamps and phase error.
"""
if os.name == 'nt':
f = io.open(logfile, 'rb')
else:
f = io.open(logfile, 'rb')
phase_err = []
lock = []
# Extract all the phase errors
for line_binary in f:
try:
line = line_binary.decode("utf8", "ignore")
except UnicodeDecodeError:
warnings.warn('Cannot decode line, skipping')
continue
if re.search("INTERNAL CLOCK PHASE ERROR", line):
match = re.search("INTERNAL CLOCK PHASE ERROR", line)
d_start = match.start() - 13
phase_err.append((dt.datetime.strptime(str(startdate.year) +
':' +
line[d_start:d_start + 12],
'%Y:%j:%H:%M:%S'),
float(line.rstrip().split()[-2]) *
0.000001))
elif re.search("EXTERNAL CLOCK POWER IS TURNED OFF", line):
match = re.search("EXTERNAL CLOCK POWER IS TURNED OFF", line)
d_start = match.start() - 13
lock.append((dt.datetime.strptime(str(startdate.year) +
':' + line[d_start:d_start + 12],
'%Y:%j:%H:%M:%S'),
999))
if len(phase_err) == 0 and len(lock) > 0:
phase_err = lock
f.close()
return phase_err | [
"def",
"rt_time_log",
"(",
"logfile",
",",
"startdate",
")",
":",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"f",
"=",
"io",
".",
"open",
"(",
"logfile",
",",
"'rb'",
")",
"else",
":",
"f",
"=",
"io",
".",
"open",
"(",
"logfile",
",",
"'rb'",
... | Open and read reftek raw log-file.
Function to open and read a log-file as written by a RefTek RT130
datalogger. The information within is then scanned for timing errors
above the threshold.
:type logfile: str
:param logfile: The logfile to look in
:type startdate: datetime.date
:param startdate: The start of the file as a date - files contain timing \
and the julian day, but not the year.
:returns: List of tuple of (:class:`datetime.datetime`, float) as time \
stamps and phase error. | [
"Open",
"and",
"read",
"reftek",
"raw",
"log",
"-",
"file",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/seismo_logs.py#L32-L81 | train | 203,273 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/seismo_logs.py | rt_location_log | def rt_location_log(logfile):
"""
Extract location information from a RefTek raw log-file.
Function to read a specific RefTek RT130 log-file and find all location
information.
:type logfile: str
:param logfile: The logfile to look in
:returns: list of tuples of lat, lon, elevation in decimal degrees and km.
:rtype: list
"""
if os.name == 'nt':
f = open(logfile, 'rb')
else:
f = open(logfile, 'rb')
locations = []
for line_binary in f:
try:
line = line_binary.decode("utf8", "ignore")
except UnicodeDecodeError:
warnings.warn('Cannot decode line, skipping')
print(line_binary)
continue
match = re.search("GPS: POSITION:", line)
if match:
# Line is of form:
# jjj:hh:mm:ss GPS: POSITION: xDD:MM:SS.SS xDDD:MM:SS.SS xMMMMMMM
loc = line[match.end() + 1:].rstrip().split(' ')
lat_sign = loc[0][0]
lat = loc[0][1:].split(':')
lat = int(lat[0]) + (int(lat[1]) / 60.0) + (float(lat[2]) / 3600.0)
if lat_sign == 'S':
lat *= -1
lon_sign = loc[1][0]
lon = loc[1][1:].split(':')
lon = int(lon[0]) + (int(lon[1]) / 60.0) + (float(lon[2]) / 3600.0)
if lon_sign == 'W':
lon *= -1
elev_sign = loc[2][0]
elev_unit = loc[2][-1]
if not elev_unit == 'M':
raise NotImplementedError('Elevation is not in M: unit=' +
elev_unit)
elev = int(loc[2][1:-1])
if elev_sign == '-':
elev *= -1
# Convert to km
elev /= 1000
locations.append((lat, lon, elev))
f.close()
return locations | python | def rt_location_log(logfile):
"""
Extract location information from a RefTek raw log-file.
Function to read a specific RefTek RT130 log-file and find all location
information.
:type logfile: str
:param logfile: The logfile to look in
:returns: list of tuples of lat, lon, elevation in decimal degrees and km.
:rtype: list
"""
if os.name == 'nt':
f = open(logfile, 'rb')
else:
f = open(logfile, 'rb')
locations = []
for line_binary in f:
try:
line = line_binary.decode("utf8", "ignore")
except UnicodeDecodeError:
warnings.warn('Cannot decode line, skipping')
print(line_binary)
continue
match = re.search("GPS: POSITION:", line)
if match:
# Line is of form:
# jjj:hh:mm:ss GPS: POSITION: xDD:MM:SS.SS xDDD:MM:SS.SS xMMMMMMM
loc = line[match.end() + 1:].rstrip().split(' ')
lat_sign = loc[0][0]
lat = loc[0][1:].split(':')
lat = int(lat[0]) + (int(lat[1]) / 60.0) + (float(lat[2]) / 3600.0)
if lat_sign == 'S':
lat *= -1
lon_sign = loc[1][0]
lon = loc[1][1:].split(':')
lon = int(lon[0]) + (int(lon[1]) / 60.0) + (float(lon[2]) / 3600.0)
if lon_sign == 'W':
lon *= -1
elev_sign = loc[2][0]
elev_unit = loc[2][-1]
if not elev_unit == 'M':
raise NotImplementedError('Elevation is not in M: unit=' +
elev_unit)
elev = int(loc[2][1:-1])
if elev_sign == '-':
elev *= -1
# Convert to km
elev /= 1000
locations.append((lat, lon, elev))
f.close()
return locations | [
"def",
"rt_location_log",
"(",
"logfile",
")",
":",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"f",
"=",
"open",
"(",
"logfile",
",",
"'rb'",
")",
"else",
":",
"f",
"=",
"open",
"(",
"logfile",
",",
"'rb'",
")",
"locations",
"=",
"[",
"]",
"for"... | Extract location information from a RefTek raw log-file.
Function to read a specific RefTek RT130 log-file and find all location
information.
:type logfile: str
:param logfile: The logfile to look in
:returns: list of tuples of lat, lon, elevation in decimal degrees and km.
:rtype: list | [
"Extract",
"location",
"information",
"from",
"a",
"RefTek",
"raw",
"log",
"-",
"file",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/seismo_logs.py#L84-L136 | train | 203,274 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/seismo_logs.py | flag_time_err | def flag_time_err(phase_err, time_thresh=0.02):
"""
Find large time errors in list.
Scan through a list of tuples of time stamps and phase errors
and return a list of time stamps with timing errors above a threshold.
.. note::
This becomes important for networks cross-correlations, where
if timing information is uncertain at one site, the relative arrival
time (lag) will be incorrect, which will degrade the cross-correlation
sum.
:type phase_err: list
:param phase_err: List of Tuple of float, datetime.datetime
:type time_thresh: float
:param time_thresh: Threshold to declare a timing error for
:returns: List of :class:`datetime.datetime` when timing is questionable.
"""
time_err = []
for stamp in phase_err:
if abs(stamp[1]) > time_thresh:
time_err.append(stamp[0])
return time_err | python | def flag_time_err(phase_err, time_thresh=0.02):
"""
Find large time errors in list.
Scan through a list of tuples of time stamps and phase errors
and return a list of time stamps with timing errors above a threshold.
.. note::
This becomes important for networks cross-correlations, where
if timing information is uncertain at one site, the relative arrival
time (lag) will be incorrect, which will degrade the cross-correlation
sum.
:type phase_err: list
:param phase_err: List of Tuple of float, datetime.datetime
:type time_thresh: float
:param time_thresh: Threshold to declare a timing error for
:returns: List of :class:`datetime.datetime` when timing is questionable.
"""
time_err = []
for stamp in phase_err:
if abs(stamp[1]) > time_thresh:
time_err.append(stamp[0])
return time_err | [
"def",
"flag_time_err",
"(",
"phase_err",
",",
"time_thresh",
"=",
"0.02",
")",
":",
"time_err",
"=",
"[",
"]",
"for",
"stamp",
"in",
"phase_err",
":",
"if",
"abs",
"(",
"stamp",
"[",
"1",
"]",
")",
">",
"time_thresh",
":",
"time_err",
".",
"append",
... | Find large time errors in list.
Scan through a list of tuples of time stamps and phase errors
and return a list of time stamps with timing errors above a threshold.
.. note::
This becomes important for networks cross-correlations, where
if timing information is uncertain at one site, the relative arrival
time (lag) will be incorrect, which will degrade the cross-correlation
sum.
:type phase_err: list
:param phase_err: List of Tuple of float, datetime.datetime
:type time_thresh: float
:param time_thresh: Threshold to declare a timing error for
:returns: List of :class:`datetime.datetime` when timing is questionable. | [
"Find",
"large",
"time",
"errors",
"in",
"list",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/seismo_logs.py#L139-L163 | train | 203,275 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/seismo_logs.py | check_all_logs | def check_all_logs(directory, time_thresh):
"""
Check all the log-files in a directory tree for timing errors.
:type directory: str
:param directory: Directory to search within
:type time_thresh: float
:param time_thresh: Time threshold in seconds
:returns: List of :class:`datetime.datetime` for which error timing is
above threshold, e.g. times when data are questionable.
:rtype: list
"""
log_files = glob.glob(directory + '/*/0/000000000_00000000')
print('I have ' + str(len(log_files)) + ' log files to scan')
total_phase_errs = []
for i, log_file in enumerate(log_files):
startdate = dt.datetime.strptime(log_file.split('/')[-4][0:7],
'%Y%j').date()
total_phase_errs += rt_time_log(log_file, startdate)
sys.stdout.write("\r" + str(float(i) / len(log_files) * 100) +
"% \r")
sys.stdout.flush()
time_errs = flag_time_err(total_phase_errs, time_thresh)
time_errs.sort()
return time_errs, total_phase_errs | python | def check_all_logs(directory, time_thresh):
"""
Check all the log-files in a directory tree for timing errors.
:type directory: str
:param directory: Directory to search within
:type time_thresh: float
:param time_thresh: Time threshold in seconds
:returns: List of :class:`datetime.datetime` for which error timing is
above threshold, e.g. times when data are questionable.
:rtype: list
"""
log_files = glob.glob(directory + '/*/0/000000000_00000000')
print('I have ' + str(len(log_files)) + ' log files to scan')
total_phase_errs = []
for i, log_file in enumerate(log_files):
startdate = dt.datetime.strptime(log_file.split('/')[-4][0:7],
'%Y%j').date()
total_phase_errs += rt_time_log(log_file, startdate)
sys.stdout.write("\r" + str(float(i) / len(log_files) * 100) +
"% \r")
sys.stdout.flush()
time_errs = flag_time_err(total_phase_errs, time_thresh)
time_errs.sort()
return time_errs, total_phase_errs | [
"def",
"check_all_logs",
"(",
"directory",
",",
"time_thresh",
")",
":",
"log_files",
"=",
"glob",
".",
"glob",
"(",
"directory",
"+",
"'/*/0/000000000_00000000'",
")",
"print",
"(",
"'I have '",
"+",
"str",
"(",
"len",
"(",
"log_files",
")",
")",
"+",
"' ... | Check all the log-files in a directory tree for timing errors.
:type directory: str
:param directory: Directory to search within
:type time_thresh: float
:param time_thresh: Time threshold in seconds
:returns: List of :class:`datetime.datetime` for which error timing is
above threshold, e.g. times when data are questionable.
:rtype: list | [
"Check",
"all",
"the",
"log",
"-",
"files",
"in",
"a",
"directory",
"tree",
"for",
"timing",
"errors",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/seismo_logs.py#L166-L191 | train | 203,276 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/catalog_to_dd.py | _cc_round | def _cc_round(num, dp):
"""
Convenience function to take a float and round it to dp padding with zeros
to return a string
:type num: float
:param num: Number to round
:type dp: int
:param dp: Number of decimal places to round to.
:returns: str
>>> print(_cc_round(0.25364, 2))
0.25
"""
num = round(num, dp)
num = '{0:.{1}f}'.format(num, dp)
return num | python | def _cc_round(num, dp):
"""
Convenience function to take a float and round it to dp padding with zeros
to return a string
:type num: float
:param num: Number to round
:type dp: int
:param dp: Number of decimal places to round to.
:returns: str
>>> print(_cc_round(0.25364, 2))
0.25
"""
num = round(num, dp)
num = '{0:.{1}f}'.format(num, dp)
return num | [
"def",
"_cc_round",
"(",
"num",
",",
"dp",
")",
":",
"num",
"=",
"round",
"(",
"num",
",",
"dp",
")",
"num",
"=",
"'{0:.{1}f}'",
".",
"format",
"(",
"num",
",",
"dp",
")",
"return",
"num"
] | Convenience function to take a float and round it to dp padding with zeros
to return a string
:type num: float
:param num: Number to round
:type dp: int
:param dp: Number of decimal places to round to.
:returns: str
>>> print(_cc_round(0.25364, 2))
0.25 | [
"Convenience",
"function",
"to",
"take",
"a",
"float",
"and",
"round",
"it",
"to",
"dp",
"padding",
"with",
"zeros",
"to",
"return",
"a",
"string"
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/catalog_to_dd.py#L57-L74 | train | 203,277 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/catalog_to_dd.py | readSTATION0 | def readSTATION0(path, stations):
"""
Read a Seisan STATION0.HYP file on the path given.
Outputs the information, and writes to station.dat file.
:type path: str
:param path: Path to the STATION0.HYP file
:type stations: list
:param stations: Stations to look for
:returns: List of tuples of station, lat, long, elevation
:rtype: list
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> readSTATION0(TEST_PATH, ['WHFS', 'WHAT2', 'BOB'])
[('WHFS', -43.261, 170.359, 60.0), ('WHAT2', -43.2793, \
170.36038333333335, 95.0), ('BOB', 41.408166666666666, \
-174.87116666666665, 101.0)]
"""
stalist = []
f = open(path + '/STATION0.HYP', 'r')
for line in f:
if line[1:6].strip() in stations:
station = line[1:6].strip()
lat = line[6:14] # Format is either ddmm.mmS/N or ddmm(.)mmmS/N
if lat[-1] == 'S':
NS = -1
else:
NS = 1
if lat[4] == '.':
lat = (int(lat[0:2]) + float(lat[2:-1]) / 60) * NS
else:
lat = (int(lat[0:2]) + float(lat[2:4] + '.' + lat[4:-1]) /
60) * NS
lon = line[14:23]
if lon[-1] == 'W':
EW = -1
else:
EW = 1
if lon[5] == '.':
lon = (int(lon[0:3]) + float(lon[3:-1]) / 60) * EW
else:
lon = (int(lon[0:3]) + float(lon[3:5] + '.' + lon[5:-1]) /
60) * EW
elev = float(line[23:-1].strip())
# Note, negative altitude can be indicated in 1st column
if line[0] == '-':
elev *= -1
stalist.append((station, lat, lon, elev))
f.close()
f = open('station.dat', 'w')
for sta in stalist:
line = ''.join([sta[0].ljust(5), _cc_round(sta[1], 4).ljust(10),
_cc_round(sta[2], 4).ljust(10),
_cc_round(sta[3] / 1000, 4).rjust(7), '\n'])
f.write(line)
f.close()
return stalist | python | def readSTATION0(path, stations):
"""
Read a Seisan STATION0.HYP file on the path given.
Outputs the information, and writes to station.dat file.
:type path: str
:param path: Path to the STATION0.HYP file
:type stations: list
:param stations: Stations to look for
:returns: List of tuples of station, lat, long, elevation
:rtype: list
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> readSTATION0(TEST_PATH, ['WHFS', 'WHAT2', 'BOB'])
[('WHFS', -43.261, 170.359, 60.0), ('WHAT2', -43.2793, \
170.36038333333335, 95.0), ('BOB', 41.408166666666666, \
-174.87116666666665, 101.0)]
"""
stalist = []
f = open(path + '/STATION0.HYP', 'r')
for line in f:
if line[1:6].strip() in stations:
station = line[1:6].strip()
lat = line[6:14] # Format is either ddmm.mmS/N or ddmm(.)mmmS/N
if lat[-1] == 'S':
NS = -1
else:
NS = 1
if lat[4] == '.':
lat = (int(lat[0:2]) + float(lat[2:-1]) / 60) * NS
else:
lat = (int(lat[0:2]) + float(lat[2:4] + '.' + lat[4:-1]) /
60) * NS
lon = line[14:23]
if lon[-1] == 'W':
EW = -1
else:
EW = 1
if lon[5] == '.':
lon = (int(lon[0:3]) + float(lon[3:-1]) / 60) * EW
else:
lon = (int(lon[0:3]) + float(lon[3:5] + '.' + lon[5:-1]) /
60) * EW
elev = float(line[23:-1].strip())
# Note, negative altitude can be indicated in 1st column
if line[0] == '-':
elev *= -1
stalist.append((station, lat, lon, elev))
f.close()
f = open('station.dat', 'w')
for sta in stalist:
line = ''.join([sta[0].ljust(5), _cc_round(sta[1], 4).ljust(10),
_cc_round(sta[2], 4).ljust(10),
_cc_round(sta[3] / 1000, 4).rjust(7), '\n'])
f.write(line)
f.close()
return stalist | [
"def",
"readSTATION0",
"(",
"path",
",",
"stations",
")",
":",
"stalist",
"=",
"[",
"]",
"f",
"=",
"open",
"(",
"path",
"+",
"'/STATION0.HYP'",
",",
"'r'",
")",
"for",
"line",
"in",
"f",
":",
"if",
"line",
"[",
"1",
":",
"6",
"]",
".",
"strip",
... | Read a Seisan STATION0.HYP file on the path given.
Outputs the information, and writes to station.dat file.
:type path: str
:param path: Path to the STATION0.HYP file
:type stations: list
:param stations: Stations to look for
:returns: List of tuples of station, lat, long, elevation
:rtype: list
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> readSTATION0(TEST_PATH, ['WHFS', 'WHAT2', 'BOB'])
[('WHFS', -43.261, 170.359, 60.0), ('WHAT2', -43.2793, \
170.36038333333335, 95.0), ('BOB', 41.408166666666666, \
-174.87116666666665, 101.0)] | [
"Read",
"a",
"Seisan",
"STATION0",
".",
"HYP",
"file",
"on",
"the",
"path",
"given",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/catalog_to_dd.py#L130-L191 | train | 203,278 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/catalog_to_dd.py | sfiles_to_event | def sfiles_to_event(sfile_list):
"""
Write an event.dat file from a list of Seisan events
:type sfile_list: list
:param sfile_list: List of s-files to sort and put into the database
:returns: List of tuples of event ID (int) and Sfile name
"""
event_list = []
sort_list = [(readheader(sfile).origins[0].time, sfile)
for sfile in sfile_list]
sort_list.sort(key=lambda tup: tup[0])
sfile_list = [sfile[1] for sfile in sort_list]
catalog = Catalog()
for i, sfile in enumerate(sfile_list):
event_list.append((i, sfile))
catalog.append(readheader(sfile))
# Hand off to sister function
write_event(catalog)
return event_list | python | def sfiles_to_event(sfile_list):
"""
Write an event.dat file from a list of Seisan events
:type sfile_list: list
:param sfile_list: List of s-files to sort and put into the database
:returns: List of tuples of event ID (int) and Sfile name
"""
event_list = []
sort_list = [(readheader(sfile).origins[0].time, sfile)
for sfile in sfile_list]
sort_list.sort(key=lambda tup: tup[0])
sfile_list = [sfile[1] for sfile in sort_list]
catalog = Catalog()
for i, sfile in enumerate(sfile_list):
event_list.append((i, sfile))
catalog.append(readheader(sfile))
# Hand off to sister function
write_event(catalog)
return event_list | [
"def",
"sfiles_to_event",
"(",
"sfile_list",
")",
":",
"event_list",
"=",
"[",
"]",
"sort_list",
"=",
"[",
"(",
"readheader",
"(",
"sfile",
")",
".",
"origins",
"[",
"0",
"]",
".",
"time",
",",
"sfile",
")",
"for",
"sfile",
"in",
"sfile_list",
"]",
"... | Write an event.dat file from a list of Seisan events
:type sfile_list: list
:param sfile_list: List of s-files to sort and put into the database
:returns: List of tuples of event ID (int) and Sfile name | [
"Write",
"an",
"event",
".",
"dat",
"file",
"from",
"a",
"list",
"of",
"Seisan",
"events"
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/catalog_to_dd.py#L194-L214 | train | 203,279 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/catalog_to_dd.py | write_event | def write_event(catalog):
"""
Write obspy.core.event.Catalog to a hypoDD format event.dat file.
:type catalog: obspy.core.event.Catalog
:param catalog: A catalog of obspy events.
"""
f = open('event.dat', 'w')
for i, event in enumerate(catalog):
try:
evinfo = event.origins[0]
except IndexError:
raise IOError('No origin')
try:
Mag_1 = event.magnitudes[0].mag
except IndexError:
Mag_1 = 0.0
try:
t_RMS = event.origins[0].quality['standard_error']
except AttributeError:
print('No time residual in header')
t_RMS = 0.0
f.write(str(evinfo.time.year) + str(evinfo.time.month).zfill(2) +
str(evinfo.time.day).zfill(2) + ' ' +
str(evinfo.time.hour).rjust(2) +
str(evinfo.time.minute).zfill(2) +
str(evinfo.time.second).zfill(2) +
str(evinfo.time.microsecond)[0:2].zfill(2) + ' ' +
str(evinfo.latitude).ljust(8, str('0')) + ' ' +
str(evinfo.longitude).ljust(8, str('0')) + ' ' +
str(evinfo.depth / 1000).rjust(7).ljust(9, str('0')) + ' ' +
str(Mag_1) + ' 0.00 0.00 ' +
str(t_RMS).ljust(4, str('0')) +
str(i).rjust(11) + '\n')
f.close()
return | python | def write_event(catalog):
"""
Write obspy.core.event.Catalog to a hypoDD format event.dat file.
:type catalog: obspy.core.event.Catalog
:param catalog: A catalog of obspy events.
"""
f = open('event.dat', 'w')
for i, event in enumerate(catalog):
try:
evinfo = event.origins[0]
except IndexError:
raise IOError('No origin')
try:
Mag_1 = event.magnitudes[0].mag
except IndexError:
Mag_1 = 0.0
try:
t_RMS = event.origins[0].quality['standard_error']
except AttributeError:
print('No time residual in header')
t_RMS = 0.0
f.write(str(evinfo.time.year) + str(evinfo.time.month).zfill(2) +
str(evinfo.time.day).zfill(2) + ' ' +
str(evinfo.time.hour).rjust(2) +
str(evinfo.time.minute).zfill(2) +
str(evinfo.time.second).zfill(2) +
str(evinfo.time.microsecond)[0:2].zfill(2) + ' ' +
str(evinfo.latitude).ljust(8, str('0')) + ' ' +
str(evinfo.longitude).ljust(8, str('0')) + ' ' +
str(evinfo.depth / 1000).rjust(7).ljust(9, str('0')) + ' ' +
str(Mag_1) + ' 0.00 0.00 ' +
str(t_RMS).ljust(4, str('0')) +
str(i).rjust(11) + '\n')
f.close()
return | [
"def",
"write_event",
"(",
"catalog",
")",
":",
"f",
"=",
"open",
"(",
"'event.dat'",
",",
"'w'",
")",
"for",
"i",
",",
"event",
"in",
"enumerate",
"(",
"catalog",
")",
":",
"try",
":",
"evinfo",
"=",
"event",
".",
"origins",
"[",
"0",
"]",
"except... | Write obspy.core.event.Catalog to a hypoDD format event.dat file.
:type catalog: obspy.core.event.Catalog
:param catalog: A catalog of obspy events. | [
"Write",
"obspy",
".",
"core",
".",
"event",
".",
"Catalog",
"to",
"a",
"hypoDD",
"format",
"event",
".",
"dat",
"file",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/catalog_to_dd.py#L217-L253 | train | 203,280 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/catalog_to_dd.py | read_phase | def read_phase(ph_file):
"""
Read hypoDD phase files into Obspy catalog class.
:type ph_file: str
:param ph_file: Phase file to read event info from.
:returns: Catalog of events from file.
:rtype: :class:`obspy.core.event.Catalog`
>>> from obspy.core.event.catalog import Catalog
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> catalog = read_phase(TEST_PATH + '/tunnel.phase')
>>> isinstance(catalog, Catalog)
True
"""
ph_catalog = Catalog()
f = open(ph_file, 'r')
# Topline of each event is marked by # in position 0
for line in f:
if line[0] == '#':
if 'event_text' not in locals():
event_text = {'header': line.rstrip(),
'picks': []}
else:
ph_catalog.append(_phase_to_event(event_text))
event_text = {'header': line.rstrip(),
'picks': []}
else:
event_text['picks'].append(line.rstrip())
ph_catalog.append(_phase_to_event(event_text))
return ph_catalog | python | def read_phase(ph_file):
"""
Read hypoDD phase files into Obspy catalog class.
:type ph_file: str
:param ph_file: Phase file to read event info from.
:returns: Catalog of events from file.
:rtype: :class:`obspy.core.event.Catalog`
>>> from obspy.core.event.catalog import Catalog
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> catalog = read_phase(TEST_PATH + '/tunnel.phase')
>>> isinstance(catalog, Catalog)
True
"""
ph_catalog = Catalog()
f = open(ph_file, 'r')
# Topline of each event is marked by # in position 0
for line in f:
if line[0] == '#':
if 'event_text' not in locals():
event_text = {'header': line.rstrip(),
'picks': []}
else:
ph_catalog.append(_phase_to_event(event_text))
event_text = {'header': line.rstrip(),
'picks': []}
else:
event_text['picks'].append(line.rstrip())
ph_catalog.append(_phase_to_event(event_text))
return ph_catalog | [
"def",
"read_phase",
"(",
"ph_file",
")",
":",
"ph_catalog",
"=",
"Catalog",
"(",
")",
"f",
"=",
"open",
"(",
"ph_file",
",",
"'r'",
")",
"# Topline of each event is marked by # in position 0",
"for",
"line",
"in",
"f",
":",
"if",
"line",
"[",
"0",
"]",
"=... | Read hypoDD phase files into Obspy catalog class.
:type ph_file: str
:param ph_file: Phase file to read event info from.
:returns: Catalog of events from file.
:rtype: :class:`obspy.core.event.Catalog`
>>> from obspy.core.event.catalog import Catalog
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> catalog = read_phase(TEST_PATH + '/tunnel.phase')
>>> isinstance(catalog, Catalog)
True | [
"Read",
"hypoDD",
"phase",
"files",
"into",
"Obspy",
"catalog",
"class",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/catalog_to_dd.py#L633-L667 | train | 203,281 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/catalog_to_dd.py | _phase_to_event | def _phase_to_event(event_text):
"""
Function to convert the text for one event in hypoDD phase format to \
event object.
:type event_text: dict
:param event_text: dict of two elements, header and picks, header is a \
str, picks is a list of str.
:returns: obspy.core.event.Event
"""
ph_event = Event()
# Extract info from header line
# YR, MO, DY, HR, MN, SC, LAT, LON, DEP, MAG, EH, EZ, RMS, ID
header = event_text['header'].split()
ph_event.origins.append(Origin())
ph_event.origins[0].time =\
UTCDateTime(year=int(header[1]), month=int(header[2]),
day=int(header[3]), hour=int(header[4]),
minute=int(header[5]), second=int(header[6].split('.')[0]),
microsecond=int(float(('0.' + header[6].split('.')[1])) *
1000000))
ph_event.origins[0].latitude = float(header[7])
ph_event.origins[0].longitude = float(header[8])
ph_event.origins[0].depth = float(header[9]) * 1000
ph_event.origins[0].quality = OriginQuality(
standard_error=float(header[13]))
ph_event.magnitudes.append(Magnitude())
ph_event.magnitudes[0].mag = float(header[10])
ph_event.magnitudes[0].magnitude_type = 'M'
# Extract arrival info from picks!
for i, pick_line in enumerate(event_text['picks']):
pick = pick_line.split()
_waveform_id = WaveformStreamID(station_code=pick[0])
pick_time = ph_event.origins[0].time + float(pick[1])
ph_event.picks.append(Pick(waveform_id=_waveform_id,
phase_hint=pick[3],
time=pick_time))
ph_event.origins[0].arrivals.append(Arrival(phase=ph_event.picks[i],
pick_id=ph_event.picks[i].
resource_id))
ph_event.origins[0].arrivals[i].time_weight = float(pick[2])
return ph_event | python | def _phase_to_event(event_text):
"""
Function to convert the text for one event in hypoDD phase format to \
event object.
:type event_text: dict
:param event_text: dict of two elements, header and picks, header is a \
str, picks is a list of str.
:returns: obspy.core.event.Event
"""
ph_event = Event()
# Extract info from header line
# YR, MO, DY, HR, MN, SC, LAT, LON, DEP, MAG, EH, EZ, RMS, ID
header = event_text['header'].split()
ph_event.origins.append(Origin())
ph_event.origins[0].time =\
UTCDateTime(year=int(header[1]), month=int(header[2]),
day=int(header[3]), hour=int(header[4]),
minute=int(header[5]), second=int(header[6].split('.')[0]),
microsecond=int(float(('0.' + header[6].split('.')[1])) *
1000000))
ph_event.origins[0].latitude = float(header[7])
ph_event.origins[0].longitude = float(header[8])
ph_event.origins[0].depth = float(header[9]) * 1000
ph_event.origins[0].quality = OriginQuality(
standard_error=float(header[13]))
ph_event.magnitudes.append(Magnitude())
ph_event.magnitudes[0].mag = float(header[10])
ph_event.magnitudes[0].magnitude_type = 'M'
# Extract arrival info from picks!
for i, pick_line in enumerate(event_text['picks']):
pick = pick_line.split()
_waveform_id = WaveformStreamID(station_code=pick[0])
pick_time = ph_event.origins[0].time + float(pick[1])
ph_event.picks.append(Pick(waveform_id=_waveform_id,
phase_hint=pick[3],
time=pick_time))
ph_event.origins[0].arrivals.append(Arrival(phase=ph_event.picks[i],
pick_id=ph_event.picks[i].
resource_id))
ph_event.origins[0].arrivals[i].time_weight = float(pick[2])
return ph_event | [
"def",
"_phase_to_event",
"(",
"event_text",
")",
":",
"ph_event",
"=",
"Event",
"(",
")",
"# Extract info from header line",
"# YR, MO, DY, HR, MN, SC, LAT, LON, DEP, MAG, EH, EZ, RMS, ID",
"header",
"=",
"event_text",
"[",
"'header'",
"]",
".",
"split",
"(",
")",
"ph_... | Function to convert the text for one event in hypoDD phase format to \
event object.
:type event_text: dict
:param event_text: dict of two elements, header and picks, header is a \
str, picks is a list of str.
:returns: obspy.core.event.Event | [
"Function",
"to",
"convert",
"the",
"text",
"for",
"one",
"event",
"in",
"hypoDD",
"phase",
"format",
"to",
"\\",
"event",
"object",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/catalog_to_dd.py#L670-L712 | train | 203,282 |
eqcorrscan/EQcorrscan | eqcorrscan/core/template_gen.py | extract_from_stack | def extract_from_stack(stack, template, length, pre_pick, pre_pad,
Z_include=False, pre_processed=True, samp_rate=None,
lowcut=None, highcut=None, filt_order=3):
"""
Extract a multiplexed template from a stack of detections.
Function to extract a new template from a stack of previous detections.
Requires the stack, the template used to make the detections for the \
stack, and we need to know if the stack has been pre-processed.
:type stack: obspy.core.stream.Stream
:param stack: Waveform stack from detections. Can be of any length and \
can have delays already included, or not.
:type template: obspy.core.stream.Stream
:param template: Template used to make the detections in the stack. Will \
use the delays of this for the new template.
:type length: float
:param length: Length of new template in seconds
:type pre_pick: float
:param pre_pick: Extract additional data before the detection, seconds
:type pre_pad: float
:param pre_pad: Pad used in seconds when extracting the data, e.g. the \
time before the detection extracted. If using \
clustering.extract_detections this half the length of the extracted \
waveform.
:type Z_include: bool
:param Z_include: If True will include any Z-channels even if there is \
no template for this channel, as long as there is a template for this \
station at a different channel. If this is False and Z channels are \
included in the template Z channels will be included in the \
new_template anyway.
:type pre_processed: bool
:param pre_processed: Have the data been pre-processed, if True (default) \
then we will only cut the data here.
:type samp_rate: float
:param samp_rate: If pre_processed=False then this is required, desired \
sampling rate in Hz, defaults to False.
:type lowcut: float
:param lowcut: If pre_processed=False then this is required, lowcut in \
Hz, defaults to False.
:type highcut: float
:param highcut: If pre_processed=False then this is required, highcut in \
Hz, defaults to False
:type filt_order: int
:param filt_order: If pre_processed=False then this is required, filter \
order, defaults to False
:returns: Newly cut template.
:rtype: :class:`obspy.core.stream.Stream`
"""
new_template = stack.copy()
# Copy the data before we trim it to keep the stack safe
# Get the earliest time in the template as this is when the detection is
# taken.
mintime = min([tr.stats.starttime for tr in template])
# Generate a list of tuples of (station, channel, delay) with delay in
# seconds
delays = [(tr.stats.station, tr.stats.channel[-1],
tr.stats.starttime - mintime) for tr in template]
# Process the data if necessary
if not pre_processed:
new_template = pre_processing.shortproc(
st=new_template, lowcut=lowcut, highcut=highcut,
filt_order=filt_order, samp_rate=samp_rate, debug=0)
# Loop through the stack and trim!
out = Stream()
for tr in new_template:
# Find the matching delay
delay = [d[2] for d in delays if d[0] == tr.stats.station and
d[1] == tr.stats.channel[-1]]
if Z_include and len(delay) == 0:
delay = [d[2] for d in delays if d[0] == tr.stats.station]
if len(delay) == 0:
debug_print("No matching template channel found for stack channel"
" {0}.{1}".format(tr.stats.station, tr.stats.channel),
2, 3)
new_template.remove(tr)
else:
for d in delay:
out += tr.copy().trim(
starttime=tr.stats.starttime + d + pre_pad - pre_pick,
endtime=tr.stats.starttime + d + pre_pad + length -
pre_pick)
return out | python | def extract_from_stack(stack, template, length, pre_pick, pre_pad,
Z_include=False, pre_processed=True, samp_rate=None,
lowcut=None, highcut=None, filt_order=3):
"""
Extract a multiplexed template from a stack of detections.
Function to extract a new template from a stack of previous detections.
Requires the stack, the template used to make the detections for the \
stack, and we need to know if the stack has been pre-processed.
:type stack: obspy.core.stream.Stream
:param stack: Waveform stack from detections. Can be of any length and \
can have delays already included, or not.
:type template: obspy.core.stream.Stream
:param template: Template used to make the detections in the stack. Will \
use the delays of this for the new template.
:type length: float
:param length: Length of new template in seconds
:type pre_pick: float
:param pre_pick: Extract additional data before the detection, seconds
:type pre_pad: float
:param pre_pad: Pad used in seconds when extracting the data, e.g. the \
time before the detection extracted. If using \
clustering.extract_detections this half the length of the extracted \
waveform.
:type Z_include: bool
:param Z_include: If True will include any Z-channels even if there is \
no template for this channel, as long as there is a template for this \
station at a different channel. If this is False and Z channels are \
included in the template Z channels will be included in the \
new_template anyway.
:type pre_processed: bool
:param pre_processed: Have the data been pre-processed, if True (default) \
then we will only cut the data here.
:type samp_rate: float
:param samp_rate: If pre_processed=False then this is required, desired \
sampling rate in Hz, defaults to False.
:type lowcut: float
:param lowcut: If pre_processed=False then this is required, lowcut in \
Hz, defaults to False.
:type highcut: float
:param highcut: If pre_processed=False then this is required, highcut in \
Hz, defaults to False
:type filt_order: int
:param filt_order: If pre_processed=False then this is required, filter \
order, defaults to False
:returns: Newly cut template.
:rtype: :class:`obspy.core.stream.Stream`
"""
new_template = stack.copy()
# Copy the data before we trim it to keep the stack safe
# Get the earliest time in the template as this is when the detection is
# taken.
mintime = min([tr.stats.starttime for tr in template])
# Generate a list of tuples of (station, channel, delay) with delay in
# seconds
delays = [(tr.stats.station, tr.stats.channel[-1],
tr.stats.starttime - mintime) for tr in template]
# Process the data if necessary
if not pre_processed:
new_template = pre_processing.shortproc(
st=new_template, lowcut=lowcut, highcut=highcut,
filt_order=filt_order, samp_rate=samp_rate, debug=0)
# Loop through the stack and trim!
out = Stream()
for tr in new_template:
# Find the matching delay
delay = [d[2] for d in delays if d[0] == tr.stats.station and
d[1] == tr.stats.channel[-1]]
if Z_include and len(delay) == 0:
delay = [d[2] for d in delays if d[0] == tr.stats.station]
if len(delay) == 0:
debug_print("No matching template channel found for stack channel"
" {0}.{1}".format(tr.stats.station, tr.stats.channel),
2, 3)
new_template.remove(tr)
else:
for d in delay:
out += tr.copy().trim(
starttime=tr.stats.starttime + d + pre_pad - pre_pick,
endtime=tr.stats.starttime + d + pre_pad + length -
pre_pick)
return out | [
"def",
"extract_from_stack",
"(",
"stack",
",",
"template",
",",
"length",
",",
"pre_pick",
",",
"pre_pad",
",",
"Z_include",
"=",
"False",
",",
"pre_processed",
"=",
"True",
",",
"samp_rate",
"=",
"None",
",",
"lowcut",
"=",
"None",
",",
"highcut",
"=",
... | Extract a multiplexed template from a stack of detections.
Function to extract a new template from a stack of previous detections.
Requires the stack, the template used to make the detections for the \
stack, and we need to know if the stack has been pre-processed.
:type stack: obspy.core.stream.Stream
:param stack: Waveform stack from detections. Can be of any length and \
can have delays already included, or not.
:type template: obspy.core.stream.Stream
:param template: Template used to make the detections in the stack. Will \
use the delays of this for the new template.
:type length: float
:param length: Length of new template in seconds
:type pre_pick: float
:param pre_pick: Extract additional data before the detection, seconds
:type pre_pad: float
:param pre_pad: Pad used in seconds when extracting the data, e.g. the \
time before the detection extracted. If using \
clustering.extract_detections this half the length of the extracted \
waveform.
:type Z_include: bool
:param Z_include: If True will include any Z-channels even if there is \
no template for this channel, as long as there is a template for this \
station at a different channel. If this is False and Z channels are \
included in the template Z channels will be included in the \
new_template anyway.
:type pre_processed: bool
:param pre_processed: Have the data been pre-processed, if True (default) \
then we will only cut the data here.
:type samp_rate: float
:param samp_rate: If pre_processed=False then this is required, desired \
sampling rate in Hz, defaults to False.
:type lowcut: float
:param lowcut: If pre_processed=False then this is required, lowcut in \
Hz, defaults to False.
:type highcut: float
:param highcut: If pre_processed=False then this is required, highcut in \
Hz, defaults to False
:type filt_order: int
:param filt_order: If pre_processed=False then this is required, filter \
order, defaults to False
:returns: Newly cut template.
:rtype: :class:`obspy.core.stream.Stream` | [
"Extract",
"a",
"multiplexed",
"template",
"from",
"a",
"stack",
"of",
"detections",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/template_gen.py#L378-L462 | train | 203,283 |
eqcorrscan/EQcorrscan | eqcorrscan/core/template_gen.py | _group_events | def _group_events(catalog, process_len, template_length, data_pad):
"""
Internal function to group events into sub-catalogs based on process_len.
:param catalog: Catalog to groups into sub-catalogs
:type catalog: obspy.core.event.Catalog
:param process_len: Length in seconds that data will be processed in
:type process_len: int
:return: List of catalogs
:rtype: list
"""
# case for catalog only containing one event
if len(catalog) == 1:
return [catalog]
sub_catalogs = []
# Sort catalog by date
catalog.events = sorted(
catalog.events,
key=lambda e: (e.preferred_origin() or e.origins[0]).time)
sub_catalog = Catalog([catalog[0]])
for event in catalog[1:]:
origin_time = (event.preferred_origin() or event.origins[0]).time
last_pick = sorted(event.picks, key=lambda p: p.time)[-1]
max_diff = (
process_len - (last_pick.time - origin_time) - template_length)
max_diff -= 2 * data_pad
if origin_time - sub_catalog[0].origins[0].time < max_diff:
sub_catalog.append(event)
else:
sub_catalogs.append(sub_catalog)
sub_catalog = Catalog([event])
sub_catalogs.append(sub_catalog)
return sub_catalogs | python | def _group_events(catalog, process_len, template_length, data_pad):
"""
Internal function to group events into sub-catalogs based on process_len.
:param catalog: Catalog to groups into sub-catalogs
:type catalog: obspy.core.event.Catalog
:param process_len: Length in seconds that data will be processed in
:type process_len: int
:return: List of catalogs
:rtype: list
"""
# case for catalog only containing one event
if len(catalog) == 1:
return [catalog]
sub_catalogs = []
# Sort catalog by date
catalog.events = sorted(
catalog.events,
key=lambda e: (e.preferred_origin() or e.origins[0]).time)
sub_catalog = Catalog([catalog[0]])
for event in catalog[1:]:
origin_time = (event.preferred_origin() or event.origins[0]).time
last_pick = sorted(event.picks, key=lambda p: p.time)[-1]
max_diff = (
process_len - (last_pick.time - origin_time) - template_length)
max_diff -= 2 * data_pad
if origin_time - sub_catalog[0].origins[0].time < max_diff:
sub_catalog.append(event)
else:
sub_catalogs.append(sub_catalog)
sub_catalog = Catalog([event])
sub_catalogs.append(sub_catalog)
return sub_catalogs | [
"def",
"_group_events",
"(",
"catalog",
",",
"process_len",
",",
"template_length",
",",
"data_pad",
")",
":",
"# case for catalog only containing one event",
"if",
"len",
"(",
"catalog",
")",
"==",
"1",
":",
"return",
"[",
"catalog",
"]",
"sub_catalogs",
"=",
"... | Internal function to group events into sub-catalogs based on process_len.
:param catalog: Catalog to groups into sub-catalogs
:type catalog: obspy.core.event.Catalog
:param process_len: Length in seconds that data will be processed in
:type process_len: int
:return: List of catalogs
:rtype: list | [
"Internal",
"function",
"to",
"group",
"events",
"into",
"sub",
"-",
"catalogs",
"based",
"on",
"process_len",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/template_gen.py#L776-L809 | train | 203,284 |
eqcorrscan/EQcorrscan | eqcorrscan/core/template_gen.py | multi_template_gen | def multi_template_gen(catalog, st, length, swin='all', prepick=0.05,
all_horiz=False, delayed=True, plot=False, debug=0,
return_event=False, min_snr=None):
"""
Generate multiple templates from one stream of data.
Thin wrapper around _template_gen to generate multiple templates from
one stream of continuous data. Takes processed (filtered and resampled)
seismic data!
:type catalog: obspy.core.event.Catalog
:param catalog: Events to extract templates for
:type st: obspy.core.stream.Stream
:param st:
Processed stream to extract from, e.g. filtered and re-sampled to what
you want using pre_processing.dayproc.
:type length: float
:param length: Length of template in seconds
:type swin: string
:param swin:
P, S, P_all, S_all or all, defaults to all: see note in
:func:`eqcorrscan.core.template_gen.template_gen`
:type prepick: float
:param prepick:
Length in seconds to extract before the pick time default is
0.05 seconds.
:type all_horiz: bool
:param all_horiz:
To use both horizontal channels even if there is only a pick on one of
them. Defaults to False.
:type delayed: bool
:param delayed:
If True, each channel will begin relative to it's own pick-time, if set
to False, each channel will begin at the same time.
:type plot: bool
:param plot: To plot the template or not, default is True
:type debug: int
:param debug: Debug output level from 0-5.
:type return_event: bool
:param return_event: Whether to return the event and process length or not.
:type min_snr: float
:param min_snr:
Minimum signal-to-noise ratio for a channel to be included in the
template, where signal-to-noise ratio is calculated as the ratio of
the maximum amplitude in the template window to the rms amplitude in
the whole window given.
:returns: List of :class:`obspy.core.stream.Stream` templates.
:rtype: list
.. warning::
Data must be processed before using this function - highcut, lowcut and
filt_order are only used to generate the meta-data for the templates.
.. note:: By convention templates are generated with P-phases on the \
vertical channel and S-phases on the horizontal channels, normal \
seismograph naming conventions are assumed, where Z denotes vertical \
and N, E, R, T, 1 and 2 denote horizontal channels, either oriented \
or not. To this end we will **only** use Z channels if they have a \
P-pick, and will use one or other horizontal channels **only** if \
there is an S-pick on it.
.. warning:: If there is no phase_hint included in picks, and swin=all, \
all channels with picks will be used.
"""
EQcorrscanDeprecationWarning(
"Function is depreciated and will be removed soon. Use "
"template_gen.template_gen instead.")
temp_list = template_gen(
method="from_meta_file", process=False, meta_file=catalog, st=st,
lowcut=None, highcut=None, samp_rate=st[0].stats.sampling_rate,
filt_order=None, length=length, prepick=prepick,
swin=swin, all_horiz=all_horiz, delayed=delayed, plot=plot,
debug=debug, return_event=return_event, min_snr=min_snr,
parallel=False)
return temp_list | python | def multi_template_gen(catalog, st, length, swin='all', prepick=0.05,
all_horiz=False, delayed=True, plot=False, debug=0,
return_event=False, min_snr=None):
"""
Generate multiple templates from one stream of data.
Thin wrapper around _template_gen to generate multiple templates from
one stream of continuous data. Takes processed (filtered and resampled)
seismic data!
:type catalog: obspy.core.event.Catalog
:param catalog: Events to extract templates for
:type st: obspy.core.stream.Stream
:param st:
Processed stream to extract from, e.g. filtered and re-sampled to what
you want using pre_processing.dayproc.
:type length: float
:param length: Length of template in seconds
:type swin: string
:param swin:
P, S, P_all, S_all or all, defaults to all: see note in
:func:`eqcorrscan.core.template_gen.template_gen`
:type prepick: float
:param prepick:
Length in seconds to extract before the pick time default is
0.05 seconds.
:type all_horiz: bool
:param all_horiz:
To use both horizontal channels even if there is only a pick on one of
them. Defaults to False.
:type delayed: bool
:param delayed:
If True, each channel will begin relative to it's own pick-time, if set
to False, each channel will begin at the same time.
:type plot: bool
:param plot: To plot the template or not, default is True
:type debug: int
:param debug: Debug output level from 0-5.
:type return_event: bool
:param return_event: Whether to return the event and process length or not.
:type min_snr: float
:param min_snr:
Minimum signal-to-noise ratio for a channel to be included in the
template, where signal-to-noise ratio is calculated as the ratio of
the maximum amplitude in the template window to the rms amplitude in
the whole window given.
:returns: List of :class:`obspy.core.stream.Stream` templates.
:rtype: list
.. warning::
Data must be processed before using this function - highcut, lowcut and
filt_order are only used to generate the meta-data for the templates.
.. note:: By convention templates are generated with P-phases on the \
vertical channel and S-phases on the horizontal channels, normal \
seismograph naming conventions are assumed, where Z denotes vertical \
and N, E, R, T, 1 and 2 denote horizontal channels, either oriented \
or not. To this end we will **only** use Z channels if they have a \
P-pick, and will use one or other horizontal channels **only** if \
there is an S-pick on it.
.. warning:: If there is no phase_hint included in picks, and swin=all, \
all channels with picks will be used.
"""
EQcorrscanDeprecationWarning(
"Function is depreciated and will be removed soon. Use "
"template_gen.template_gen instead.")
temp_list = template_gen(
method="from_meta_file", process=False, meta_file=catalog, st=st,
lowcut=None, highcut=None, samp_rate=st[0].stats.sampling_rate,
filt_order=None, length=length, prepick=prepick,
swin=swin, all_horiz=all_horiz, delayed=delayed, plot=plot,
debug=debug, return_event=return_event, min_snr=min_snr,
parallel=False)
return temp_list | [
"def",
"multi_template_gen",
"(",
"catalog",
",",
"st",
",",
"length",
",",
"swin",
"=",
"'all'",
",",
"prepick",
"=",
"0.05",
",",
"all_horiz",
"=",
"False",
",",
"delayed",
"=",
"True",
",",
"plot",
"=",
"False",
",",
"debug",
"=",
"0",
",",
"retur... | Generate multiple templates from one stream of data.
Thin wrapper around _template_gen to generate multiple templates from
one stream of continuous data. Takes processed (filtered and resampled)
seismic data!
:type catalog: obspy.core.event.Catalog
:param catalog: Events to extract templates for
:type st: obspy.core.stream.Stream
:param st:
Processed stream to extract from, e.g. filtered and re-sampled to what
you want using pre_processing.dayproc.
:type length: float
:param length: Length of template in seconds
:type swin: string
:param swin:
P, S, P_all, S_all or all, defaults to all: see note in
:func:`eqcorrscan.core.template_gen.template_gen`
:type prepick: float
:param prepick:
Length in seconds to extract before the pick time default is
0.05 seconds.
:type all_horiz: bool
:param all_horiz:
To use both horizontal channels even if there is only a pick on one of
them. Defaults to False.
:type delayed: bool
:param delayed:
If True, each channel will begin relative to it's own pick-time, if set
to False, each channel will begin at the same time.
:type plot: bool
:param plot: To plot the template or not, default is True
:type debug: int
:param debug: Debug output level from 0-5.
:type return_event: bool
:param return_event: Whether to return the event and process length or not.
:type min_snr: float
:param min_snr:
Minimum signal-to-noise ratio for a channel to be included in the
template, where signal-to-noise ratio is calculated as the ratio of
the maximum amplitude in the template window to the rms amplitude in
the whole window given.
:returns: List of :class:`obspy.core.stream.Stream` templates.
:rtype: list
.. warning::
Data must be processed before using this function - highcut, lowcut and
filt_order are only used to generate the meta-data for the templates.
.. note:: By convention templates are generated with P-phases on the \
vertical channel and S-phases on the horizontal channels, normal \
seismograph naming conventions are assumed, where Z denotes vertical \
and N, E, R, T, 1 and 2 denote horizontal channels, either oriented \
or not. To this end we will **only** use Z channels if they have a \
P-pick, and will use one or other horizontal channels **only** if \
there is an S-pick on it.
.. warning:: If there is no phase_hint included in picks, and swin=all, \
all channels with picks will be used. | [
"Generate",
"multiple",
"templates",
"from",
"one",
"stream",
"of",
"data",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/template_gen.py#L813-L888 | train | 203,285 |
eqcorrscan/EQcorrscan | eqcorrscan/core/template_gen.py | from_client | def from_client(catalog, client_id, lowcut, highcut, samp_rate, filt_order,
length, prepick, swin, process_len=86400, data_pad=90,
all_horiz=False, delayed=True, plot=False, debug=0,
return_event=False, min_snr=None):
"""
Generate multiplexed template from FDSN client.
Function to generate templates from an FDSN client. Must be given \
an obspy.Catalog class and the client_id as input. The function returns \
a list of obspy.Stream classes containing steams for each desired \
template.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog class containing desired template events
:type client_id: str
:param client_id: Name of the client, either url, or Obspy \
mappable (see the :mod:`obspy.clients.fdsn` documentation).
:type lowcut: float
:param lowcut: Low cut (Hz), if set to None will not apply a lowcut.
:type highcut: float
:param highcut: High cut (Hz), if set to None will not apply a highcut.
:type samp_rate: float
:param samp_rate: New sampling rate in Hz.
:type filt_order: int
:param filt_order: Filter level (number of corners).
:type length: float
:param length: Extract length in seconds.
:type prepick: float
:param prepick: Pre-pick time in seconds
:type swin: str
:param swin:
P, S, P_all, S_all or all, defaults to all: see note in
:func:`eqcorrscan.core.template_gen.template_gen`
:type process_len: int
:param process_len: Length of data in seconds to download and process.
:param data_pad: Length of data (in seconds) required before and after \
any event for processing, use to reduce edge-effects of filtering on \
the templates.
:type data_pad: int
:type all_horiz: bool
:param all_horiz: To use both horizontal channels even if there is only \
a pick on one of them. Defaults to False.
:type delayed: bool
:param delayed: If True, each channel will begin relative to it's own \
pick-time, if set to False, each channel will begin at the same time.
:type plot: bool
:param plot: Plot templates or not.
:type debug: int
:param debug: Level of debugging output, higher=more
:type return_event: bool
:param return_event: Whether to return the event and process length or not.
:type min_snr: float
:param min_snr:
Minimum signal-to-noise ratio for a channel to be included in the
template, where signal-to-noise ratio is calculated as the ratio of
the maximum amplitude in the template window to the rms amplitude in
the whole window given.
:returns: List of :class:`obspy.core.stream.Stream` Templates
:rtype: list
.. warning::
This function is depreciated and will be removed in a forthcoming
release. Please use `template_gen` instead.
.. note::
process_len should be set to the same length as used when computing
detections using match_filter.match_filter, e.g. if you read
in day-long data for match_filter, process_len should be 86400.
.. rubric:: Example
>>> from obspy.clients.fdsn import Client
>>> from eqcorrscan.core.template_gen import from_client
>>> client = Client('NCEDC')
>>> catalog = client.get_events(eventid='72572665', includearrivals=True)
>>> # We are only taking two picks for this example to speed up the
>>> # example, note that you don't have to!
>>> catalog[0].picks = catalog[0].picks[0:2]
>>> templates = from_client(catalog=catalog, client_id='NCEDC',
... lowcut=2.0, highcut=9.0, samp_rate=20.0,
... filt_order=4, length=3.0, prepick=0.15,
... swin='all', process_len=300,
... all_horiz=True)
>>> templates[0].plot(equal_scale=False, size=(800,600)) # doctest: +SKIP
.. figure:: ../../plots/template_gen.from_client.png
"""
EQcorrscanDeprecationWarning(
"Function is depreciated and will be removed soon. Use "
"template_gen.template_gen instead.")
temp_list = template_gen(
method="from_client", catalog=catalog, client_id=client_id,
lowcut=lowcut, highcut=highcut, samp_rate=samp_rate,
filt_order=filt_order, length=length, prepick=prepick,
swin=swin, process_len=process_len, data_pad=data_pad,
all_horiz=all_horiz, delayed=delayed, plot=plot, debug=debug,
return_event=return_event, min_snr=min_snr)
return temp_list | python | def from_client(catalog, client_id, lowcut, highcut, samp_rate, filt_order,
length, prepick, swin, process_len=86400, data_pad=90,
all_horiz=False, delayed=True, plot=False, debug=0,
return_event=False, min_snr=None):
"""
Generate multiplexed template from FDSN client.
Function to generate templates from an FDSN client. Must be given \
an obspy.Catalog class and the client_id as input. The function returns \
a list of obspy.Stream classes containing steams for each desired \
template.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog class containing desired template events
:type client_id: str
:param client_id: Name of the client, either url, or Obspy \
mappable (see the :mod:`obspy.clients.fdsn` documentation).
:type lowcut: float
:param lowcut: Low cut (Hz), if set to None will not apply a lowcut.
:type highcut: float
:param highcut: High cut (Hz), if set to None will not apply a highcut.
:type samp_rate: float
:param samp_rate: New sampling rate in Hz.
:type filt_order: int
:param filt_order: Filter level (number of corners).
:type length: float
:param length: Extract length in seconds.
:type prepick: float
:param prepick: Pre-pick time in seconds
:type swin: str
:param swin:
P, S, P_all, S_all or all, defaults to all: see note in
:func:`eqcorrscan.core.template_gen.template_gen`
:type process_len: int
:param process_len: Length of data in seconds to download and process.
:param data_pad: Length of data (in seconds) required before and after \
any event for processing, use to reduce edge-effects of filtering on \
the templates.
:type data_pad: int
:type all_horiz: bool
:param all_horiz: To use both horizontal channels even if there is only \
a pick on one of them. Defaults to False.
:type delayed: bool
:param delayed: If True, each channel will begin relative to it's own \
pick-time, if set to False, each channel will begin at the same time.
:type plot: bool
:param plot: Plot templates or not.
:type debug: int
:param debug: Level of debugging output, higher=more
:type return_event: bool
:param return_event: Whether to return the event and process length or not.
:type min_snr: float
:param min_snr:
Minimum signal-to-noise ratio for a channel to be included in the
template, where signal-to-noise ratio is calculated as the ratio of
the maximum amplitude in the template window to the rms amplitude in
the whole window given.
:returns: List of :class:`obspy.core.stream.Stream` Templates
:rtype: list
.. warning::
This function is depreciated and will be removed in a forthcoming
release. Please use `template_gen` instead.
.. note::
process_len should be set to the same length as used when computing
detections using match_filter.match_filter, e.g. if you read
in day-long data for match_filter, process_len should be 86400.
.. rubric:: Example
>>> from obspy.clients.fdsn import Client
>>> from eqcorrscan.core.template_gen import from_client
>>> client = Client('NCEDC')
>>> catalog = client.get_events(eventid='72572665', includearrivals=True)
>>> # We are only taking two picks for this example to speed up the
>>> # example, note that you don't have to!
>>> catalog[0].picks = catalog[0].picks[0:2]
>>> templates = from_client(catalog=catalog, client_id='NCEDC',
... lowcut=2.0, highcut=9.0, samp_rate=20.0,
... filt_order=4, length=3.0, prepick=0.15,
... swin='all', process_len=300,
... all_horiz=True)
>>> templates[0].plot(equal_scale=False, size=(800,600)) # doctest: +SKIP
.. figure:: ../../plots/template_gen.from_client.png
"""
EQcorrscanDeprecationWarning(
"Function is depreciated and will be removed soon. Use "
"template_gen.template_gen instead.")
temp_list = template_gen(
method="from_client", catalog=catalog, client_id=client_id,
lowcut=lowcut, highcut=highcut, samp_rate=samp_rate,
filt_order=filt_order, length=length, prepick=prepick,
swin=swin, process_len=process_len, data_pad=data_pad,
all_horiz=all_horiz, delayed=delayed, plot=plot, debug=debug,
return_event=return_event, min_snr=min_snr)
return temp_list | [
"def",
"from_client",
"(",
"catalog",
",",
"client_id",
",",
"lowcut",
",",
"highcut",
",",
"samp_rate",
",",
"filt_order",
",",
"length",
",",
"prepick",
",",
"swin",
",",
"process_len",
"=",
"86400",
",",
"data_pad",
"=",
"90",
",",
"all_horiz",
"=",
"... | Generate multiplexed template from FDSN client.
Function to generate templates from an FDSN client. Must be given \
an obspy.Catalog class and the client_id as input. The function returns \
a list of obspy.Stream classes containing steams for each desired \
template.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog class containing desired template events
:type client_id: str
:param client_id: Name of the client, either url, or Obspy \
mappable (see the :mod:`obspy.clients.fdsn` documentation).
:type lowcut: float
:param lowcut: Low cut (Hz), if set to None will not apply a lowcut.
:type highcut: float
:param highcut: High cut (Hz), if set to None will not apply a highcut.
:type samp_rate: float
:param samp_rate: New sampling rate in Hz.
:type filt_order: int
:param filt_order: Filter level (number of corners).
:type length: float
:param length: Extract length in seconds.
:type prepick: float
:param prepick: Pre-pick time in seconds
:type swin: str
:param swin:
P, S, P_all, S_all or all, defaults to all: see note in
:func:`eqcorrscan.core.template_gen.template_gen`
:type process_len: int
:param process_len: Length of data in seconds to download and process.
:param data_pad: Length of data (in seconds) required before and after \
any event for processing, use to reduce edge-effects of filtering on \
the templates.
:type data_pad: int
:type all_horiz: bool
:param all_horiz: To use both horizontal channels even if there is only \
a pick on one of them. Defaults to False.
:type delayed: bool
:param delayed: If True, each channel will begin relative to it's own \
pick-time, if set to False, each channel will begin at the same time.
:type plot: bool
:param plot: Plot templates or not.
:type debug: int
:param debug: Level of debugging output, higher=more
:type return_event: bool
:param return_event: Whether to return the event and process length or not.
:type min_snr: float
:param min_snr:
Minimum signal-to-noise ratio for a channel to be included in the
template, where signal-to-noise ratio is calculated as the ratio of
the maximum amplitude in the template window to the rms amplitude in
the whole window given.
:returns: List of :class:`obspy.core.stream.Stream` Templates
:rtype: list
.. warning::
This function is depreciated and will be removed in a forthcoming
release. Please use `template_gen` instead.
.. note::
process_len should be set to the same length as used when computing
detections using match_filter.match_filter, e.g. if you read
in day-long data for match_filter, process_len should be 86400.
.. rubric:: Example
>>> from obspy.clients.fdsn import Client
>>> from eqcorrscan.core.template_gen import from_client
>>> client = Client('NCEDC')
>>> catalog = client.get_events(eventid='72572665', includearrivals=True)
>>> # We are only taking two picks for this example to speed up the
>>> # example, note that you don't have to!
>>> catalog[0].picks = catalog[0].picks[0:2]
>>> templates = from_client(catalog=catalog, client_id='NCEDC',
... lowcut=2.0, highcut=9.0, samp_rate=20.0,
... filt_order=4, length=3.0, prepick=0.15,
... swin='all', process_len=300,
... all_horiz=True)
>>> templates[0].plot(equal_scale=False, size=(800,600)) # doctest: +SKIP
.. figure:: ../../plots/template_gen.from_client.png | [
"Generate",
"multiplexed",
"template",
"from",
"FDSN",
"client",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/template_gen.py#L891-L989 | train | 203,286 |
eqcorrscan/EQcorrscan | eqcorrscan/core/template_gen.py | from_sac | def from_sac(sac_files, lowcut, highcut, samp_rate, filt_order, length, swin,
prepick, all_horiz=False, delayed=True, plot=False, debug=0,
return_event=False, min_snr=None):
"""
Generate a multiplexed template from a list of SAC files.
Function to read picks and waveforms from SAC data, and generate a \
template from these. Usually sac_files is a list of all single-channel \
SAC files for a given event, a single, multi-channel template will be \
created from these traces.
**All files listed in sac_files should be associated with a single event.**
:type sac_files: list
:param sac_files: osbpy.core.stream.Stream of sac waveforms, or
list of paths to sac waveforms.
:type lowcut: float
:param lowcut: Low cut (Hz), if set to None will not apply a lowcut.
:type highcut: float
:param highcut: High cut (Hz), if set to None will not apply a highcut.
:type samp_rate: float
:param samp_rate: New sampling rate in Hz.
:type filt_order: int
:param filt_order: Filter level.
:type length: float
:param length: Extract length in seconds.
:type swin: str
:param swin:
P, S, P_all, S_all or all, defaults to all: see note in
:func:`eqcorrscan.core.template_gen.template_gen`
:type prepick: float
:param prepick: Length to extract prior to the pick in seconds.
:type all_horiz: bool
:param all_horiz: To use both horizontal channels even if there is only \
a pick on one of them. Defaults to False.
:type delayed: bool
:param delayed: If True, each channel will begin relative to it's own \
pick-time, if set to False, each channel will begin at the same time.
:type plot: bool
:param plot: Turns template plotting on or off.
:type debug: int
:param debug: Debug level, higher number=more output.
:type return_event: bool
:param return_event: Whether to return the event and process length or not.
:type min_snr: float
:param min_snr:
Minimum signal-to-noise ratio for a channel to be included in the
template, where signal-to-noise ratio is calculated as the ratio of
the maximum amplitude in the template window to the rms amplitude in
the whole window given.
:returns: Newly cut template.
:rtype: :class:`obspy.core.stream.Stream`
.. note:: This functionality is not supported for obspy versions below \
1.0.0 as references times are not read in by SACIO, which are needed \
for defining pick times.
.. rubric:: Example
>>> from eqcorrscan.core.template_gen import from_sac
>>> import glob
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> # Get all the SAC-files associated with one event.
>>> sac_files = glob.glob(TEST_PATH + '/SAC/2014p611252/*')
>>> templates = from_sac(sac_files=sac_files, lowcut=2.0, highcut=10.0,
... samp_rate=25.0, filt_order=4, length=2.0,
... swin='all', prepick=0.1, all_horiz=True)
>>> print(templates[0][0].stats.sampling_rate)
25.0
>>> print(len(templates[0]))
15
"""
EQcorrscanDeprecationWarning(
"Function is depreciated and will be removed soon. Use "
"template_gen.template_gen instead.")
temp_list = template_gen(
method="from_sac", sac_files=sac_files,
lowcut=lowcut, highcut=highcut, samp_rate=samp_rate,
filt_order=filt_order, length=length, prepick=prepick,
swin=swin, all_horiz=all_horiz, delayed=delayed, plot=plot,
debug=debug, return_event=return_event, min_snr=min_snr,
parallel=False)
return temp_list | python | def from_sac(sac_files, lowcut, highcut, samp_rate, filt_order, length, swin,
prepick, all_horiz=False, delayed=True, plot=False, debug=0,
return_event=False, min_snr=None):
"""
Generate a multiplexed template from a list of SAC files.
Function to read picks and waveforms from SAC data, and generate a \
template from these. Usually sac_files is a list of all single-channel \
SAC files for a given event, a single, multi-channel template will be \
created from these traces.
**All files listed in sac_files should be associated with a single event.**
:type sac_files: list
:param sac_files: osbpy.core.stream.Stream of sac waveforms, or
list of paths to sac waveforms.
:type lowcut: float
:param lowcut: Low cut (Hz), if set to None will not apply a lowcut.
:type highcut: float
:param highcut: High cut (Hz), if set to None will not apply a highcut.
:type samp_rate: float
:param samp_rate: New sampling rate in Hz.
:type filt_order: int
:param filt_order: Filter level.
:type length: float
:param length: Extract length in seconds.
:type swin: str
:param swin:
P, S, P_all, S_all or all, defaults to all: see note in
:func:`eqcorrscan.core.template_gen.template_gen`
:type prepick: float
:param prepick: Length to extract prior to the pick in seconds.
:type all_horiz: bool
:param all_horiz: To use both horizontal channels even if there is only \
a pick on one of them. Defaults to False.
:type delayed: bool
:param delayed: If True, each channel will begin relative to it's own \
pick-time, if set to False, each channel will begin at the same time.
:type plot: bool
:param plot: Turns template plotting on or off.
:type debug: int
:param debug: Debug level, higher number=more output.
:type return_event: bool
:param return_event: Whether to return the event and process length or not.
:type min_snr: float
:param min_snr:
Minimum signal-to-noise ratio for a channel to be included in the
template, where signal-to-noise ratio is calculated as the ratio of
the maximum amplitude in the template window to the rms amplitude in
the whole window given.
:returns: Newly cut template.
:rtype: :class:`obspy.core.stream.Stream`
.. note:: This functionality is not supported for obspy versions below \
1.0.0 as references times are not read in by SACIO, which are needed \
for defining pick times.
.. rubric:: Example
>>> from eqcorrscan.core.template_gen import from_sac
>>> import glob
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> # Get all the SAC-files associated with one event.
>>> sac_files = glob.glob(TEST_PATH + '/SAC/2014p611252/*')
>>> templates = from_sac(sac_files=sac_files, lowcut=2.0, highcut=10.0,
... samp_rate=25.0, filt_order=4, length=2.0,
... swin='all', prepick=0.1, all_horiz=True)
>>> print(templates[0][0].stats.sampling_rate)
25.0
>>> print(len(templates[0]))
15
"""
EQcorrscanDeprecationWarning(
"Function is depreciated and will be removed soon. Use "
"template_gen.template_gen instead.")
temp_list = template_gen(
method="from_sac", sac_files=sac_files,
lowcut=lowcut, highcut=highcut, samp_rate=samp_rate,
filt_order=filt_order, length=length, prepick=prepick,
swin=swin, all_horiz=all_horiz, delayed=delayed, plot=plot,
debug=debug, return_event=return_event, min_snr=min_snr,
parallel=False)
return temp_list | [
"def",
"from_sac",
"(",
"sac_files",
",",
"lowcut",
",",
"highcut",
",",
"samp_rate",
",",
"filt_order",
",",
"length",
",",
"swin",
",",
"prepick",
",",
"all_horiz",
"=",
"False",
",",
"delayed",
"=",
"True",
",",
"plot",
"=",
"False",
",",
"debug",
"... | Generate a multiplexed template from a list of SAC files.
Function to read picks and waveforms from SAC data, and generate a \
template from these. Usually sac_files is a list of all single-channel \
SAC files for a given event, a single, multi-channel template will be \
created from these traces.
**All files listed in sac_files should be associated with a single event.**
:type sac_files: list
:param sac_files: osbpy.core.stream.Stream of sac waveforms, or
list of paths to sac waveforms.
:type lowcut: float
:param lowcut: Low cut (Hz), if set to None will not apply a lowcut.
:type highcut: float
:param highcut: High cut (Hz), if set to None will not apply a highcut.
:type samp_rate: float
:param samp_rate: New sampling rate in Hz.
:type filt_order: int
:param filt_order: Filter level.
:type length: float
:param length: Extract length in seconds.
:type swin: str
:param swin:
P, S, P_all, S_all or all, defaults to all: see note in
:func:`eqcorrscan.core.template_gen.template_gen`
:type prepick: float
:param prepick: Length to extract prior to the pick in seconds.
:type all_horiz: bool
:param all_horiz: To use both horizontal channels even if there is only \
a pick on one of them. Defaults to False.
:type delayed: bool
:param delayed: If True, each channel will begin relative to it's own \
pick-time, if set to False, each channel will begin at the same time.
:type plot: bool
:param plot: Turns template plotting on or off.
:type debug: int
:param debug: Debug level, higher number=more output.
:type return_event: bool
:param return_event: Whether to return the event and process length or not.
:type min_snr: float
:param min_snr:
Minimum signal-to-noise ratio for a channel to be included in the
template, where signal-to-noise ratio is calculated as the ratio of
the maximum amplitude in the template window to the rms amplitude in
the whole window given.
:returns: Newly cut template.
:rtype: :class:`obspy.core.stream.Stream`
.. note:: This functionality is not supported for obspy versions below \
1.0.0 as references times are not read in by SACIO, which are needed \
for defining pick times.
.. rubric:: Example
>>> from eqcorrscan.core.template_gen import from_sac
>>> import glob
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> # Get all the SAC-files associated with one event.
>>> sac_files = glob.glob(TEST_PATH + '/SAC/2014p611252/*')
>>> templates = from_sac(sac_files=sac_files, lowcut=2.0, highcut=10.0,
... samp_rate=25.0, filt_order=4, length=2.0,
... swin='all', prepick=0.1, all_horiz=True)
>>> print(templates[0][0].stats.sampling_rate)
25.0
>>> print(len(templates[0]))
15 | [
"Generate",
"a",
"multiplexed",
"template",
"from",
"a",
"list",
"of",
"SAC",
"files",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/template_gen.py#L1076-L1162 | train | 203,287 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/timer.py | time_func | def time_func(func, name, *args, **kwargs):
""" call a func with args and kwargs, print name of func and how
long it took. """
tic = time.time()
out = func(*args, **kwargs)
toc = time.time()
print('%s took %0.2f seconds' % (name, toc - tic))
return out | python | def time_func(func, name, *args, **kwargs):
""" call a func with args and kwargs, print name of func and how
long it took. """
tic = time.time()
out = func(*args, **kwargs)
toc = time.time()
print('%s took %0.2f seconds' % (name, toc - tic))
return out | [
"def",
"time_func",
"(",
"func",
",",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"tic",
"=",
"time",
".",
"time",
"(",
")",
"out",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"toc",
"=",
"time",
".",
"time",... | call a func with args and kwargs, print name of func and how
long it took. | [
"call",
"a",
"func",
"with",
"args",
"and",
"kwargs",
"print",
"name",
"of",
"func",
"and",
"how",
"long",
"it",
"took",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/timer.py#L47-L54 | train | 203,288 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/synth_seis.py | seis_sim | def seis_sim(sp, amp_ratio=1.5, flength=False, phaseout='all'):
"""
Generate a simulated seismogram from a given S-P time.
Will generate spikes separated by a given S-P time, which are then
convolved with a decaying sine function. The P-phase is simulated by a
positive spike of value 1, the S-arrival is simulated by a decaying
boxcar of maximum amplitude 1.5. These amplitude ratios can be altered by
changing the amp_ratio, which is the ratio S amplitude:P amplitude.
.. note::
In testing this can achieve 0.3 or greater cross-correlations with
data.
:type sp: int
:param sp: S-P time in samples
:type amp_ratio: float
:param amp_ratio: S:P amplitude ratio
:type flength: int
:param flength: Fixed length in samples, defaults to False
:type phaseout: str
:param phaseout:
Either 'P', 'S' or 'all', controls which phases to cut around, defaults
to 'all'. Can only be used with 'P' or 'S' options if flength
is set.
:returns: Simulated data.
:rtype: :class:`numpy.ndarray`
"""
if flength and 2.5 * sp < flength and 100 < flength:
additional_length = flength
elif 2.5 * sp < 100.0:
additional_length = 100
else:
additional_length = 2.5 * sp
synth = np.zeros(int(sp + 10 + additional_length))
# Make the array begin 10 samples before the P
# and at least 2.5 times the S-P samples after the S arrival
synth[10] = 1.0 # P-spike fixed at 10 samples from start of window
# The length of the decaying S-phase should depend on the SP time,\
# Some basic estimations suggest this should be atleast 10 samples\
# and that the coda should be about 1/10 of the SP time
S_length = 10 + int(sp // 3)
S_spikes = np.arange(amp_ratio, 0, -(amp_ratio / S_length))
# What we actually want, or what appears better is to have a series of\
# individual spikes, of alternating polarity...
for i in range(len(S_spikes)):
if i in np.arange(1, len(S_spikes), 2):
S_spikes[i] = 0
if i in np.arange(2, len(S_spikes), 4):
S_spikes[i] *= -1
# Put these spikes into the synthetic
synth[10 + sp:10 + sp + len(S_spikes)] = S_spikes
# Generate a rough damped sine wave to convolve with the model spikes
sine_x = np.arange(0, 10.0, 0.5)
damped_sine = np.exp(-sine_x) * np.sin(2 * np.pi * sine_x)
# Convolve the spike model with the damped sine!
synth = np.convolve(synth, damped_sine)
# Normalize snyth
synth = synth / np.max(np.abs(synth))
if not flength:
return synth
else:
if phaseout in ['all', 'P']:
synth = synth[0:flength]
elif phaseout == 'S':
synth = synth[sp:]
if len(synth) < flength:
# If this is too short, pad
synth = np.append(synth, np.zeros(flength - len(synth)))
else:
synth = synth[0:flength]
return synth | python | def seis_sim(sp, amp_ratio=1.5, flength=False, phaseout='all'):
"""
Generate a simulated seismogram from a given S-P time.
Will generate spikes separated by a given S-P time, which are then
convolved with a decaying sine function. The P-phase is simulated by a
positive spike of value 1, the S-arrival is simulated by a decaying
boxcar of maximum amplitude 1.5. These amplitude ratios can be altered by
changing the amp_ratio, which is the ratio S amplitude:P amplitude.
.. note::
In testing this can achieve 0.3 or greater cross-correlations with
data.
:type sp: int
:param sp: S-P time in samples
:type amp_ratio: float
:param amp_ratio: S:P amplitude ratio
:type flength: int
:param flength: Fixed length in samples, defaults to False
:type phaseout: str
:param phaseout:
Either 'P', 'S' or 'all', controls which phases to cut around, defaults
to 'all'. Can only be used with 'P' or 'S' options if flength
is set.
:returns: Simulated data.
:rtype: :class:`numpy.ndarray`
"""
if flength and 2.5 * sp < flength and 100 < flength:
additional_length = flength
elif 2.5 * sp < 100.0:
additional_length = 100
else:
additional_length = 2.5 * sp
synth = np.zeros(int(sp + 10 + additional_length))
# Make the array begin 10 samples before the P
# and at least 2.5 times the S-P samples after the S arrival
synth[10] = 1.0 # P-spike fixed at 10 samples from start of window
# The length of the decaying S-phase should depend on the SP time,\
# Some basic estimations suggest this should be atleast 10 samples\
# and that the coda should be about 1/10 of the SP time
S_length = 10 + int(sp // 3)
S_spikes = np.arange(amp_ratio, 0, -(amp_ratio / S_length))
# What we actually want, or what appears better is to have a series of\
# individual spikes, of alternating polarity...
for i in range(len(S_spikes)):
if i in np.arange(1, len(S_spikes), 2):
S_spikes[i] = 0
if i in np.arange(2, len(S_spikes), 4):
S_spikes[i] *= -1
# Put these spikes into the synthetic
synth[10 + sp:10 + sp + len(S_spikes)] = S_spikes
# Generate a rough damped sine wave to convolve with the model spikes
sine_x = np.arange(0, 10.0, 0.5)
damped_sine = np.exp(-sine_x) * np.sin(2 * np.pi * sine_x)
# Convolve the spike model with the damped sine!
synth = np.convolve(synth, damped_sine)
# Normalize snyth
synth = synth / np.max(np.abs(synth))
if not flength:
return synth
else:
if phaseout in ['all', 'P']:
synth = synth[0:flength]
elif phaseout == 'S':
synth = synth[sp:]
if len(synth) < flength:
# If this is too short, pad
synth = np.append(synth, np.zeros(flength - len(synth)))
else:
synth = synth[0:flength]
return synth | [
"def",
"seis_sim",
"(",
"sp",
",",
"amp_ratio",
"=",
"1.5",
",",
"flength",
"=",
"False",
",",
"phaseout",
"=",
"'all'",
")",
":",
"if",
"flength",
"and",
"2.5",
"*",
"sp",
"<",
"flength",
"and",
"100",
"<",
"flength",
":",
"additional_length",
"=",
... | Generate a simulated seismogram from a given S-P time.
Will generate spikes separated by a given S-P time, which are then
convolved with a decaying sine function. The P-phase is simulated by a
positive spike of value 1, the S-arrival is simulated by a decaying
boxcar of maximum amplitude 1.5. These amplitude ratios can be altered by
changing the amp_ratio, which is the ratio S amplitude:P amplitude.
.. note::
In testing this can achieve 0.3 or greater cross-correlations with
data.
:type sp: int
:param sp: S-P time in samples
:type amp_ratio: float
:param amp_ratio: S:P amplitude ratio
:type flength: int
:param flength: Fixed length in samples, defaults to False
:type phaseout: str
:param phaseout:
Either 'P', 'S' or 'all', controls which phases to cut around, defaults
to 'all'. Can only be used with 'P' or 'S' options if flength
is set.
:returns: Simulated data.
:rtype: :class:`numpy.ndarray` | [
"Generate",
"a",
"simulated",
"seismogram",
"from",
"a",
"given",
"S",
"-",
"P",
"time",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/synth_seis.py#L26-L98 | train | 203,289 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/synth_seis.py | SVD_sim | def SVD_sim(sp, lowcut, highcut, samp_rate,
amp_range=np.arange(-10, 10, 0.01)):
"""
Generate basis vectors of a set of simulated seismograms.
Inputs should have a range of S-P amplitude ratios, in theory to simulate \
a range of focal mechanisms.
:type sp: int
:param sp: S-P time in seconds - will be converted to samples according \
to samp_rate.
:type lowcut: float
:param lowcut: Low-cut for bandpass filter in Hz
:type highcut: float
:param highcut: High-cut for bandpass filter in Hz
:type samp_rate: float
:param samp_rate: Sampling rate in Hz
:type amp_range: numpy.ndarray
:param amp_range: Amplitude ratio range to generate synthetics for.
:returns: set of output basis vectors
:rtype: :class:`numpy.ndarray`
"""
# Convert SP to samples
sp = int(sp * samp_rate)
# Scan through a range of amplitude ratios
synthetics = [Stream(Trace(seis_sim(sp, a))) for a in amp_range]
for st in synthetics:
for tr in st:
tr.stats.station = 'SYNTH'
tr.stats.channel = 'SH1'
tr.stats.sampling_rate = samp_rate
tr.filter('bandpass', freqmin=lowcut, freqmax=highcut)
# We have a list of obspy Trace objects, we can pass this to EQcorrscan's
# SVD functions
U, s, V, stachans = clustering.svd(synthetics)
return U, s, V, stachans | python | def SVD_sim(sp, lowcut, highcut, samp_rate,
amp_range=np.arange(-10, 10, 0.01)):
"""
Generate basis vectors of a set of simulated seismograms.
Inputs should have a range of S-P amplitude ratios, in theory to simulate \
a range of focal mechanisms.
:type sp: int
:param sp: S-P time in seconds - will be converted to samples according \
to samp_rate.
:type lowcut: float
:param lowcut: Low-cut for bandpass filter in Hz
:type highcut: float
:param highcut: High-cut for bandpass filter in Hz
:type samp_rate: float
:param samp_rate: Sampling rate in Hz
:type amp_range: numpy.ndarray
:param amp_range: Amplitude ratio range to generate synthetics for.
:returns: set of output basis vectors
:rtype: :class:`numpy.ndarray`
"""
# Convert SP to samples
sp = int(sp * samp_rate)
# Scan through a range of amplitude ratios
synthetics = [Stream(Trace(seis_sim(sp, a))) for a in amp_range]
for st in synthetics:
for tr in st:
tr.stats.station = 'SYNTH'
tr.stats.channel = 'SH1'
tr.stats.sampling_rate = samp_rate
tr.filter('bandpass', freqmin=lowcut, freqmax=highcut)
# We have a list of obspy Trace objects, we can pass this to EQcorrscan's
# SVD functions
U, s, V, stachans = clustering.svd(synthetics)
return U, s, V, stachans | [
"def",
"SVD_sim",
"(",
"sp",
",",
"lowcut",
",",
"highcut",
",",
"samp_rate",
",",
"amp_range",
"=",
"np",
".",
"arange",
"(",
"-",
"10",
",",
"10",
",",
"0.01",
")",
")",
":",
"# Convert SP to samples",
"sp",
"=",
"int",
"(",
"sp",
"*",
"samp_rate",... | Generate basis vectors of a set of simulated seismograms.
Inputs should have a range of S-P amplitude ratios, in theory to simulate \
a range of focal mechanisms.
:type sp: int
:param sp: S-P time in seconds - will be converted to samples according \
to samp_rate.
:type lowcut: float
:param lowcut: Low-cut for bandpass filter in Hz
:type highcut: float
:param highcut: High-cut for bandpass filter in Hz
:type samp_rate: float
:param samp_rate: Sampling rate in Hz
:type amp_range: numpy.ndarray
:param amp_range: Amplitude ratio range to generate synthetics for.
:returns: set of output basis vectors
:rtype: :class:`numpy.ndarray` | [
"Generate",
"basis",
"vectors",
"of",
"a",
"set",
"of",
"simulated",
"seismograms",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/synth_seis.py#L101-L137 | train | 203,290 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/synth_seis.py | template_grid | def template_grid(stations, nodes, travel_times, phase, PS_ratio=1.68,
samp_rate=100, flength=False, phaseout='all'):
"""
Generate a group of synthetic seismograms for a grid of sources.
Used to simulate phase arrivals from a grid of known sources in a
three-dimensional model. Lags must be known and supplied, these can be
generated from the bright_lights function: read_tt, and resampled to fit
the desired grid dimensions and spacing using other functions therein.
These synthetic seismograms are very simple models of seismograms using
the seis_sim function herein. These approximate body-wave P and S first
arrivals as spikes convolved with damped sine waves.
:type stations: list
:param stations: List of the station names
:type nodes: list
:param nodes: List of node locations in (lon,lat,depth)
:type travel_times: numpy.ndarray
:param travel_times: Array of travel times where travel_times[i][:] \
refers to the travel times for station=stations[i], and \
travel_times[i][j] refers to stations[i] for nodes[j]
:type phase: str
:param phase: Can be either 'P' or 'S'
:type PS_ratio: float
:param PS_ratio: P/S velocity ratio, defaults to 1.68
:type samp_rate: float
:param samp_rate: Desired sample rate in Hz, defaults to 100.0
:type flength: int
:param flength: Length of template in samples, defaults to False
:type phaseout: str
:param phaseout: Either 'S', 'P', 'all' or 'both', determines which \
phases to clip around. 'all' Encompasses both phases in one channel, \
but will return nothing if the flength is not long enough, 'both' \
will return two channels for each stations, one SYN_Z with the \
synthetic P-phase, and one SYN_H with the synthetic S-phase.
:returns: List of :class:`obspy.core.stream.Stream`
"""
if phase not in ['S', 'P']:
raise IOError('Phase is neither P nor S')
# Initialize empty list for templates
templates = []
# Loop through the nodes, for every node generate a template!
for i, node in enumerate(nodes):
st = [] # Empty list to be filled with synthetics
# Loop through stations
for j, station in enumerate(stations):
tr = Trace()
tr.stats.sampling_rate = samp_rate
tr.stats.station = station
tr.stats.channel = 'SYN'
tt = travel_times[j][i]
if phase == 'P':
# If the input travel-time is the P-wave travel-time
SP_time = (tt * PS_ratio) - tt
if phaseout == 'S':
tr.stats.starttime += tt + SP_time
else:
tr.stats.starttime += tt
elif phase == 'S':
# If the input travel-time is the S-wave travel-time
SP_time = tt - (tt / PS_ratio)
if phaseout == 'S':
tr.stats.starttime += tt
else:
tr.stats.starttime += tt - SP_time
# Set start-time of trace to be travel-time for P-wave
# Check that the template length is long enough to include the SP
if flength and SP_time * samp_rate < flength - 11 \
and phaseout == 'all':
tr.data = seis_sim(sp=int(SP_time * samp_rate), amp_ratio=1.5,
flength=flength, phaseout=phaseout)
st.append(tr)
elif flength and phaseout == 'all':
warnings.warn('Cannot make a bulk synthetic with this fixed ' +
'length for station ' + station)
elif phaseout == 'all':
tr.data = seis_sim(sp=int(SP_time * samp_rate), amp_ratio=1.5,
flength=flength, phaseout=phaseout)
st.append(tr)
elif phaseout in ['P', 'S']:
tr.data = seis_sim(sp=int(SP_time * samp_rate), amp_ratio=1.5,
flength=flength, phaseout=phaseout)
st.append(tr)
elif phaseout == 'both':
for _phaseout in ['P', 'S']:
_tr = tr.copy()
_tr.data = seis_sim(sp=int(SP_time * samp_rate),
amp_ratio=1.5, flength=flength,
phaseout=_phaseout)
if _phaseout == 'P':
_tr.stats.channel = 'SYN_Z'
# starttime defaults to S-time
_tr.stats.starttime = _tr.stats.starttime - SP_time
elif _phaseout == 'S':
_tr.stats.channel = 'SYN_H'
st.append(_tr)
templates.append(Stream(st))
# Stream(st).plot(size=(800,600))
return templates | python | def template_grid(stations, nodes, travel_times, phase, PS_ratio=1.68,
samp_rate=100, flength=False, phaseout='all'):
"""
Generate a group of synthetic seismograms for a grid of sources.
Used to simulate phase arrivals from a grid of known sources in a
three-dimensional model. Lags must be known and supplied, these can be
generated from the bright_lights function: read_tt, and resampled to fit
the desired grid dimensions and spacing using other functions therein.
These synthetic seismograms are very simple models of seismograms using
the seis_sim function herein. These approximate body-wave P and S first
arrivals as spikes convolved with damped sine waves.
:type stations: list
:param stations: List of the station names
:type nodes: list
:param nodes: List of node locations in (lon,lat,depth)
:type travel_times: numpy.ndarray
:param travel_times: Array of travel times where travel_times[i][:] \
refers to the travel times for station=stations[i], and \
travel_times[i][j] refers to stations[i] for nodes[j]
:type phase: str
:param phase: Can be either 'P' or 'S'
:type PS_ratio: float
:param PS_ratio: P/S velocity ratio, defaults to 1.68
:type samp_rate: float
:param samp_rate: Desired sample rate in Hz, defaults to 100.0
:type flength: int
:param flength: Length of template in samples, defaults to False
:type phaseout: str
:param phaseout: Either 'S', 'P', 'all' or 'both', determines which \
phases to clip around. 'all' Encompasses both phases in one channel, \
but will return nothing if the flength is not long enough, 'both' \
will return two channels for each stations, one SYN_Z with the \
synthetic P-phase, and one SYN_H with the synthetic S-phase.
:returns: List of :class:`obspy.core.stream.Stream`
"""
if phase not in ['S', 'P']:
raise IOError('Phase is neither P nor S')
# Initialize empty list for templates
templates = []
# Loop through the nodes, for every node generate a template!
for i, node in enumerate(nodes):
st = [] # Empty list to be filled with synthetics
# Loop through stations
for j, station in enumerate(stations):
tr = Trace()
tr.stats.sampling_rate = samp_rate
tr.stats.station = station
tr.stats.channel = 'SYN'
tt = travel_times[j][i]
if phase == 'P':
# If the input travel-time is the P-wave travel-time
SP_time = (tt * PS_ratio) - tt
if phaseout == 'S':
tr.stats.starttime += tt + SP_time
else:
tr.stats.starttime += tt
elif phase == 'S':
# If the input travel-time is the S-wave travel-time
SP_time = tt - (tt / PS_ratio)
if phaseout == 'S':
tr.stats.starttime += tt
else:
tr.stats.starttime += tt - SP_time
# Set start-time of trace to be travel-time for P-wave
# Check that the template length is long enough to include the SP
if flength and SP_time * samp_rate < flength - 11 \
and phaseout == 'all':
tr.data = seis_sim(sp=int(SP_time * samp_rate), amp_ratio=1.5,
flength=flength, phaseout=phaseout)
st.append(tr)
elif flength and phaseout == 'all':
warnings.warn('Cannot make a bulk synthetic with this fixed ' +
'length for station ' + station)
elif phaseout == 'all':
tr.data = seis_sim(sp=int(SP_time * samp_rate), amp_ratio=1.5,
flength=flength, phaseout=phaseout)
st.append(tr)
elif phaseout in ['P', 'S']:
tr.data = seis_sim(sp=int(SP_time * samp_rate), amp_ratio=1.5,
flength=flength, phaseout=phaseout)
st.append(tr)
elif phaseout == 'both':
for _phaseout in ['P', 'S']:
_tr = tr.copy()
_tr.data = seis_sim(sp=int(SP_time * samp_rate),
amp_ratio=1.5, flength=flength,
phaseout=_phaseout)
if _phaseout == 'P':
_tr.stats.channel = 'SYN_Z'
# starttime defaults to S-time
_tr.stats.starttime = _tr.stats.starttime - SP_time
elif _phaseout == 'S':
_tr.stats.channel = 'SYN_H'
st.append(_tr)
templates.append(Stream(st))
# Stream(st).plot(size=(800,600))
return templates | [
"def",
"template_grid",
"(",
"stations",
",",
"nodes",
",",
"travel_times",
",",
"phase",
",",
"PS_ratio",
"=",
"1.68",
",",
"samp_rate",
"=",
"100",
",",
"flength",
"=",
"False",
",",
"phaseout",
"=",
"'all'",
")",
":",
"if",
"phase",
"not",
"in",
"["... | Generate a group of synthetic seismograms for a grid of sources.
Used to simulate phase arrivals from a grid of known sources in a
three-dimensional model. Lags must be known and supplied, these can be
generated from the bright_lights function: read_tt, and resampled to fit
the desired grid dimensions and spacing using other functions therein.
These synthetic seismograms are very simple models of seismograms using
the seis_sim function herein. These approximate body-wave P and S first
arrivals as spikes convolved with damped sine waves.
:type stations: list
:param stations: List of the station names
:type nodes: list
:param nodes: List of node locations in (lon,lat,depth)
:type travel_times: numpy.ndarray
:param travel_times: Array of travel times where travel_times[i][:] \
refers to the travel times for station=stations[i], and \
travel_times[i][j] refers to stations[i] for nodes[j]
:type phase: str
:param phase: Can be either 'P' or 'S'
:type PS_ratio: float
:param PS_ratio: P/S velocity ratio, defaults to 1.68
:type samp_rate: float
:param samp_rate: Desired sample rate in Hz, defaults to 100.0
:type flength: int
:param flength: Length of template in samples, defaults to False
:type phaseout: str
:param phaseout: Either 'S', 'P', 'all' or 'both', determines which \
phases to clip around. 'all' Encompasses both phases in one channel, \
but will return nothing if the flength is not long enough, 'both' \
will return two channels for each stations, one SYN_Z with the \
synthetic P-phase, and one SYN_H with the synthetic S-phase.
:returns: List of :class:`obspy.core.stream.Stream` | [
"Generate",
"a",
"group",
"of",
"synthetic",
"seismograms",
"for",
"a",
"grid",
"of",
"sources",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/synth_seis.py#L140-L239 | train | 203,291 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/synth_seis.py | generate_synth_data | def generate_synth_data(nsta, ntemplates, nseeds, samp_rate, t_length,
max_amp, max_lag, debug=0):
"""
Generate a synthetic dataset to be used for testing.
This will generate both templates and data to scan through.
Templates will be generated using the utils.synth_seis functions.
The day of data will be random noise, with random signal-to-noise
ratio copies of the templates randomly seeded throughout the day.
It also returns the seed times and signal-to-noise ratios used.
:type nsta: int
:param nsta: Number of stations to generate data for < 15.
:type ntemplates: int
:param ntemplates: Number of templates to generate, will be generated \
with random arrival times.
:type nseeds: int
:param nseeds: Number of copies of the template to seed within the \
day of noisy data for each template.
:type samp_rate: float
:param samp_rate: Sampling rate to use in Hz
:type t_length: float
:param t_length: Length of templates in seconds.
:type max_amp: float
:param max_amp: Maximum signal-to-noise ratio of seeds.
:param max_lag: Maximum lag time in seconds (randomised).
:type max_lag: float
:type debug: int
:param debug: Debug level, bigger the number, the more plotting/output.
:returns: Templates: List of :class:`obspy.core.stream.Stream`
:rtype: list
:returns: Data: :class:`obspy.core.stream.Stream` of seeded noisy data
:rtype: :class:`obspy.core.stream.Stream`
:returns: Seeds: dictionary of seed SNR and time with time in samples.
:rtype: dict
"""
# Generate random arrival times
t_times = np.abs(np.random.random([nsta, ntemplates])) * max_lag
# Generate random node locations - these do not matter as they are only
# used for naming
lats = np.random.random(ntemplates) * 90.0
lons = np.random.random(ntemplates) * 90.0
depths = np.abs(np.random.random(ntemplates) * 40.0)
nodes = zip(lats, lons, depths)
# Generating a 5x3 array to make 3 templates
stations = ['ALPH', 'BETA', 'GAMM', 'KAPP', 'ZETA', 'BOB', 'MAGG',
'ALF', 'WALR', 'ALBA', 'PENG', 'BANA', 'WIGG', 'SAUS',
'MALC']
if debug > 1:
print(nodes)
print(t_times)
print(stations[0:nsta])
templates = template_grid(stations=stations[0:nsta], nodes=nodes,
travel_times=t_times, phase='S',
samp_rate=samp_rate,
flength=int(t_length * samp_rate))
if debug > 2:
for template in templates:
print(template)
# Now we want to create a day of synthetic data
seeds = []
data = templates[0].copy() # Copy a template to get the correct length
# and stats for data, we will overwrite the data on this copy
for tr in data:
tr.data = np.zeros(86400 * int(samp_rate))
# Set all the traces to have a day of zeros
tr.stats.starttime = UTCDateTime(0)
for i, template in enumerate(templates):
impulses = np.zeros(86400 * int(samp_rate))
# Generate a series of impulses for seeding
# Need three seperate impulse traces for each of the three templates,
# all will be convolved within the data though.
impulse_times = np.random.randint(86400 * int(samp_rate),
size=nseeds)
impulse_amplitudes = np.random.randn(nseeds) * max_amp
# Generate amplitudes up to maximum amplitude in a normal distribution
seeds.append({'SNR': impulse_amplitudes,
'time': impulse_times})
for j in range(nseeds):
impulses[impulse_times[j]] = impulse_amplitudes[j]
# We now have one vector of impulses, we need nsta numbers of them,
# shifted with the appropriate lags
mintime = min([template_tr.stats.starttime
for template_tr in template])
for j, template_tr in enumerate(template):
offset = int((template_tr.stats.starttime - mintime) * samp_rate)
pad = np.zeros(offset)
tr_impulses = np.append(pad, impulses)[0:len(impulses)]
# Convolve this with the template trace to give the daylong seeds
data[j].data += np.convolve(tr_impulses,
template_tr.data)[0:len(impulses)]
# Add the noise
for tr in data:
noise = np.random.randn(86400 * int(samp_rate))
tr.data += noise / max(noise)
return templates, data, seeds | python | def generate_synth_data(nsta, ntemplates, nseeds, samp_rate, t_length,
max_amp, max_lag, debug=0):
"""
Generate a synthetic dataset to be used for testing.
This will generate both templates and data to scan through.
Templates will be generated using the utils.synth_seis functions.
The day of data will be random noise, with random signal-to-noise
ratio copies of the templates randomly seeded throughout the day.
It also returns the seed times and signal-to-noise ratios used.
:type nsta: int
:param nsta: Number of stations to generate data for < 15.
:type ntemplates: int
:param ntemplates: Number of templates to generate, will be generated \
with random arrival times.
:type nseeds: int
:param nseeds: Number of copies of the template to seed within the \
day of noisy data for each template.
:type samp_rate: float
:param samp_rate: Sampling rate to use in Hz
:type t_length: float
:param t_length: Length of templates in seconds.
:type max_amp: float
:param max_amp: Maximum signal-to-noise ratio of seeds.
:param max_lag: Maximum lag time in seconds (randomised).
:type max_lag: float
:type debug: int
:param debug: Debug level, bigger the number, the more plotting/output.
:returns: Templates: List of :class:`obspy.core.stream.Stream`
:rtype: list
:returns: Data: :class:`obspy.core.stream.Stream` of seeded noisy data
:rtype: :class:`obspy.core.stream.Stream`
:returns: Seeds: dictionary of seed SNR and time with time in samples.
:rtype: dict
"""
# Generate random arrival times
t_times = np.abs(np.random.random([nsta, ntemplates])) * max_lag
# Generate random node locations - these do not matter as they are only
# used for naming
lats = np.random.random(ntemplates) * 90.0
lons = np.random.random(ntemplates) * 90.0
depths = np.abs(np.random.random(ntemplates) * 40.0)
nodes = zip(lats, lons, depths)
# Generating a 5x3 array to make 3 templates
stations = ['ALPH', 'BETA', 'GAMM', 'KAPP', 'ZETA', 'BOB', 'MAGG',
'ALF', 'WALR', 'ALBA', 'PENG', 'BANA', 'WIGG', 'SAUS',
'MALC']
if debug > 1:
print(nodes)
print(t_times)
print(stations[0:nsta])
templates = template_grid(stations=stations[0:nsta], nodes=nodes,
travel_times=t_times, phase='S',
samp_rate=samp_rate,
flength=int(t_length * samp_rate))
if debug > 2:
for template in templates:
print(template)
# Now we want to create a day of synthetic data
seeds = []
data = templates[0].copy() # Copy a template to get the correct length
# and stats for data, we will overwrite the data on this copy
for tr in data:
tr.data = np.zeros(86400 * int(samp_rate))
# Set all the traces to have a day of zeros
tr.stats.starttime = UTCDateTime(0)
for i, template in enumerate(templates):
impulses = np.zeros(86400 * int(samp_rate))
# Generate a series of impulses for seeding
# Need three seperate impulse traces for each of the three templates,
# all will be convolved within the data though.
impulse_times = np.random.randint(86400 * int(samp_rate),
size=nseeds)
impulse_amplitudes = np.random.randn(nseeds) * max_amp
# Generate amplitudes up to maximum amplitude in a normal distribution
seeds.append({'SNR': impulse_amplitudes,
'time': impulse_times})
for j in range(nseeds):
impulses[impulse_times[j]] = impulse_amplitudes[j]
# We now have one vector of impulses, we need nsta numbers of them,
# shifted with the appropriate lags
mintime = min([template_tr.stats.starttime
for template_tr in template])
for j, template_tr in enumerate(template):
offset = int((template_tr.stats.starttime - mintime) * samp_rate)
pad = np.zeros(offset)
tr_impulses = np.append(pad, impulses)[0:len(impulses)]
# Convolve this with the template trace to give the daylong seeds
data[j].data += np.convolve(tr_impulses,
template_tr.data)[0:len(impulses)]
# Add the noise
for tr in data:
noise = np.random.randn(86400 * int(samp_rate))
tr.data += noise / max(noise)
return templates, data, seeds | [
"def",
"generate_synth_data",
"(",
"nsta",
",",
"ntemplates",
",",
"nseeds",
",",
"samp_rate",
",",
"t_length",
",",
"max_amp",
",",
"max_lag",
",",
"debug",
"=",
"0",
")",
":",
"# Generate random arrival times",
"t_times",
"=",
"np",
".",
"abs",
"(",
"np",
... | Generate a synthetic dataset to be used for testing.
This will generate both templates and data to scan through.
Templates will be generated using the utils.synth_seis functions.
The day of data will be random noise, with random signal-to-noise
ratio copies of the templates randomly seeded throughout the day.
It also returns the seed times and signal-to-noise ratios used.
:type nsta: int
:param nsta: Number of stations to generate data for < 15.
:type ntemplates: int
:param ntemplates: Number of templates to generate, will be generated \
with random arrival times.
:type nseeds: int
:param nseeds: Number of copies of the template to seed within the \
day of noisy data for each template.
:type samp_rate: float
:param samp_rate: Sampling rate to use in Hz
:type t_length: float
:param t_length: Length of templates in seconds.
:type max_amp: float
:param max_amp: Maximum signal-to-noise ratio of seeds.
:param max_lag: Maximum lag time in seconds (randomised).
:type max_lag: float
:type debug: int
:param debug: Debug level, bigger the number, the more plotting/output.
:returns: Templates: List of :class:`obspy.core.stream.Stream`
:rtype: list
:returns: Data: :class:`obspy.core.stream.Stream` of seeded noisy data
:rtype: :class:`obspy.core.stream.Stream`
:returns: Seeds: dictionary of seed SNR and time with time in samples.
:rtype: dict | [
"Generate",
"a",
"synthetic",
"dataset",
"to",
"be",
"used",
"for",
"testing",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/synth_seis.py#L242-L338 | train | 203,292 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/stacking.py | linstack | def linstack(streams, normalize=True):
"""
Compute the linear stack of a series of seismic streams of \
multiplexed data.
:type streams: list
:param streams: List of streams to stack
:type normalize: bool
:param normalize: Normalize traces before stacking, normalizes by the RMS \
amplitude.
:returns: stacked data
:rtype: :class:`obspy.core.stream.Stream`
"""
stack = streams[np.argmax([len(stream) for stream in streams])].copy()
if normalize:
for tr in stack:
tr.data = tr.data / np.sqrt(np.mean(np.square(tr.data)))
tr.data = np.nan_to_num(tr.data)
for i in range(1, len(streams)):
for tr in stack:
matchtr = streams[i].select(station=tr.stats.station,
channel=tr.stats.channel)
if matchtr:
# Normalize the data before stacking
if normalize:
norm = matchtr[0].data /\
np.sqrt(np.mean(np.square(matchtr[0].data)))
norm = np.nan_to_num(norm)
else:
norm = matchtr[0].data
tr.data = np.sum((norm, tr.data), axis=0)
return stack | python | def linstack(streams, normalize=True):
"""
Compute the linear stack of a series of seismic streams of \
multiplexed data.
:type streams: list
:param streams: List of streams to stack
:type normalize: bool
:param normalize: Normalize traces before stacking, normalizes by the RMS \
amplitude.
:returns: stacked data
:rtype: :class:`obspy.core.stream.Stream`
"""
stack = streams[np.argmax([len(stream) for stream in streams])].copy()
if normalize:
for tr in stack:
tr.data = tr.data / np.sqrt(np.mean(np.square(tr.data)))
tr.data = np.nan_to_num(tr.data)
for i in range(1, len(streams)):
for tr in stack:
matchtr = streams[i].select(station=tr.stats.station,
channel=tr.stats.channel)
if matchtr:
# Normalize the data before stacking
if normalize:
norm = matchtr[0].data /\
np.sqrt(np.mean(np.square(matchtr[0].data)))
norm = np.nan_to_num(norm)
else:
norm = matchtr[0].data
tr.data = np.sum((norm, tr.data), axis=0)
return stack | [
"def",
"linstack",
"(",
"streams",
",",
"normalize",
"=",
"True",
")",
":",
"stack",
"=",
"streams",
"[",
"np",
".",
"argmax",
"(",
"[",
"len",
"(",
"stream",
")",
"for",
"stream",
"in",
"streams",
"]",
")",
"]",
".",
"copy",
"(",
")",
"if",
"nor... | Compute the linear stack of a series of seismic streams of \
multiplexed data.
:type streams: list
:param streams: List of streams to stack
:type normalize: bool
:param normalize: Normalize traces before stacking, normalizes by the RMS \
amplitude.
:returns: stacked data
:rtype: :class:`obspy.core.stream.Stream` | [
"Compute",
"the",
"linear",
"stack",
"of",
"a",
"series",
"of",
"seismic",
"streams",
"of",
"\\",
"multiplexed",
"data",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/stacking.py#L23-L55 | train | 203,293 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/stacking.py | PWS_stack | def PWS_stack(streams, weight=2, normalize=True):
"""
Compute the phase weighted stack of a series of streams.
.. note:: It is recommended to align the traces before stacking.
:type streams: list
:param streams: List of :class:`obspy.core.stream.Stream` to stack.
:type weight: float
:param weight: Exponent to the phase stack used for weighting.
:type normalize: bool
:param normalize: Normalize traces before stacking.
:return: Stacked stream.
:rtype: :class:`obspy.core.stream.Stream`
"""
# First get the linear stack which we will weight by the phase stack
Linstack = linstack(streams)
# Compute the instantaneous phase
instaphases = []
print("Computing instantaneous phase")
for stream in streams:
instaphase = stream.copy()
for tr in instaphase:
analytic = hilbert(tr.data)
envelope = np.sqrt(np.sum((np.square(analytic),
np.square(tr.data)), axis=0))
tr.data = analytic / envelope
instaphases.append(instaphase)
# Compute the phase stack
print("Computing the phase stack")
Phasestack = linstack(instaphases, normalize=normalize)
# Compute the phase-weighted stack
for tr in Phasestack:
tr.data = Linstack.select(station=tr.stats.station)[0].data *\
np.abs(tr.data ** weight)
return Phasestack | python | def PWS_stack(streams, weight=2, normalize=True):
"""
Compute the phase weighted stack of a series of streams.
.. note:: It is recommended to align the traces before stacking.
:type streams: list
:param streams: List of :class:`obspy.core.stream.Stream` to stack.
:type weight: float
:param weight: Exponent to the phase stack used for weighting.
:type normalize: bool
:param normalize: Normalize traces before stacking.
:return: Stacked stream.
:rtype: :class:`obspy.core.stream.Stream`
"""
# First get the linear stack which we will weight by the phase stack
Linstack = linstack(streams)
# Compute the instantaneous phase
instaphases = []
print("Computing instantaneous phase")
for stream in streams:
instaphase = stream.copy()
for tr in instaphase:
analytic = hilbert(tr.data)
envelope = np.sqrt(np.sum((np.square(analytic),
np.square(tr.data)), axis=0))
tr.data = analytic / envelope
instaphases.append(instaphase)
# Compute the phase stack
print("Computing the phase stack")
Phasestack = linstack(instaphases, normalize=normalize)
# Compute the phase-weighted stack
for tr in Phasestack:
tr.data = Linstack.select(station=tr.stats.station)[0].data *\
np.abs(tr.data ** weight)
return Phasestack | [
"def",
"PWS_stack",
"(",
"streams",
",",
"weight",
"=",
"2",
",",
"normalize",
"=",
"True",
")",
":",
"# First get the linear stack which we will weight by the phase stack",
"Linstack",
"=",
"linstack",
"(",
"streams",
")",
"# Compute the instantaneous phase",
"instaphase... | Compute the phase weighted stack of a series of streams.
.. note:: It is recommended to align the traces before stacking.
:type streams: list
:param streams: List of :class:`obspy.core.stream.Stream` to stack.
:type weight: float
:param weight: Exponent to the phase stack used for weighting.
:type normalize: bool
:param normalize: Normalize traces before stacking.
:return: Stacked stream.
:rtype: :class:`obspy.core.stream.Stream` | [
"Compute",
"the",
"phase",
"weighted",
"stack",
"of",
"a",
"series",
"of",
"streams",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/stacking.py#L58-L94 | train | 203,294 |
eqcorrscan/EQcorrscan | eqcorrscan/utils/stacking.py | align_traces | def align_traces(trace_list, shift_len, master=False, positive=False,
plot=False):
"""
Align traces relative to each other based on their cross-correlation value.
Uses the :func:`eqcorrscan.core.match_filter.normxcorr2` function to find
the optimum shift to align traces relative to a master event. Either uses
a given master to align traces, or uses the trace with the highest MAD
amplitude.
:type trace_list: list
:param trace_list: List of traces to align
:type shift_len: int
:param shift_len: Length to allow shifting within in samples
:type master: obspy.core.trace.Trace
:param master: Master trace to align to, if set to False will align to \
the largest amplitude trace (default)
:type positive: bool
:param positive: Return the maximum positive cross-correlation, or the \
absolute maximum, defaults to False (absolute maximum).
:type plot: bool
:param plot: If true, will plot each trace aligned with the master.
:returns: list of shifts and correlations for best alignment in seconds.
:rtype: list
"""
from eqcorrscan.core.match_filter import normxcorr2
from eqcorrscan.utils.plotting import xcorr_plot
traces = deepcopy(trace_list)
if not master:
# Use trace with largest MAD amplitude as master
master = traces[0]
MAD_master = np.median(np.abs(master.data))
for i in range(1, len(traces)):
if np.median(np.abs(traces[i].data)) > MAD_master:
master = traces[i]
MAD_master = np.median(np.abs(master.data))
else:
print('Using master given by user')
shifts = []
ccs = []
for i in range(len(traces)):
if not master.stats.sampling_rate == traces[i].stats.sampling_rate:
raise ValueError('Sampling rates not the same')
cc_vec = normxcorr2(template=traces[i].data.
astype(np.float32)[shift_len:-shift_len],
image=master.data.astype(np.float32))
cc_vec = cc_vec[0]
shift = np.abs(cc_vec).argmax()
cc = cc_vec[shift]
if plot:
xcorr_plot(template=traces[i].data.
astype(np.float32)[shift_len:-shift_len],
image=master.data.astype(np.float32), shift=shift,
cc=cc)
shift -= shift_len
if cc < 0 and positive:
cc = cc_vec.max()
shift = cc_vec.argmax() - shift_len
shifts.append(shift / master.stats.sampling_rate)
ccs.append(cc)
return shifts, ccs | python | def align_traces(trace_list, shift_len, master=False, positive=False,
plot=False):
"""
Align traces relative to each other based on their cross-correlation value.
Uses the :func:`eqcorrscan.core.match_filter.normxcorr2` function to find
the optimum shift to align traces relative to a master event. Either uses
a given master to align traces, or uses the trace with the highest MAD
amplitude.
:type trace_list: list
:param trace_list: List of traces to align
:type shift_len: int
:param shift_len: Length to allow shifting within in samples
:type master: obspy.core.trace.Trace
:param master: Master trace to align to, if set to False will align to \
the largest amplitude trace (default)
:type positive: bool
:param positive: Return the maximum positive cross-correlation, or the \
absolute maximum, defaults to False (absolute maximum).
:type plot: bool
:param plot: If true, will plot each trace aligned with the master.
:returns: list of shifts and correlations for best alignment in seconds.
:rtype: list
"""
from eqcorrscan.core.match_filter import normxcorr2
from eqcorrscan.utils.plotting import xcorr_plot
traces = deepcopy(trace_list)
if not master:
# Use trace with largest MAD amplitude as master
master = traces[0]
MAD_master = np.median(np.abs(master.data))
for i in range(1, len(traces)):
if np.median(np.abs(traces[i].data)) > MAD_master:
master = traces[i]
MAD_master = np.median(np.abs(master.data))
else:
print('Using master given by user')
shifts = []
ccs = []
for i in range(len(traces)):
if not master.stats.sampling_rate == traces[i].stats.sampling_rate:
raise ValueError('Sampling rates not the same')
cc_vec = normxcorr2(template=traces[i].data.
astype(np.float32)[shift_len:-shift_len],
image=master.data.astype(np.float32))
cc_vec = cc_vec[0]
shift = np.abs(cc_vec).argmax()
cc = cc_vec[shift]
if plot:
xcorr_plot(template=traces[i].data.
astype(np.float32)[shift_len:-shift_len],
image=master.data.astype(np.float32), shift=shift,
cc=cc)
shift -= shift_len
if cc < 0 and positive:
cc = cc_vec.max()
shift = cc_vec.argmax() - shift_len
shifts.append(shift / master.stats.sampling_rate)
ccs.append(cc)
return shifts, ccs | [
"def",
"align_traces",
"(",
"trace_list",
",",
"shift_len",
",",
"master",
"=",
"False",
",",
"positive",
"=",
"False",
",",
"plot",
"=",
"False",
")",
":",
"from",
"eqcorrscan",
".",
"core",
".",
"match_filter",
"import",
"normxcorr2",
"from",
"eqcorrscan",... | Align traces relative to each other based on their cross-correlation value.
Uses the :func:`eqcorrscan.core.match_filter.normxcorr2` function to find
the optimum shift to align traces relative to a master event. Either uses
a given master to align traces, or uses the trace with the highest MAD
amplitude.
:type trace_list: list
:param trace_list: List of traces to align
:type shift_len: int
:param shift_len: Length to allow shifting within in samples
:type master: obspy.core.trace.Trace
:param master: Master trace to align to, if set to False will align to \
the largest amplitude trace (default)
:type positive: bool
:param positive: Return the maximum positive cross-correlation, or the \
absolute maximum, defaults to False (absolute maximum).
:type plot: bool
:param plot: If true, will plot each trace aligned with the master.
:returns: list of shifts and correlations for best alignment in seconds.
:rtype: list | [
"Align",
"traces",
"relative",
"to",
"each",
"other",
"based",
"on",
"their",
"cross",
"-",
"correlation",
"value",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/stacking.py#L97-L158 | train | 203,295 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | temporary_directory | def temporary_directory():
""" make a temporary directory, yeild its name, cleanup on exit """
dir_name = tempfile.mkdtemp()
yield dir_name
if os.path.exists(dir_name):
shutil.rmtree(dir_name) | python | def temporary_directory():
""" make a temporary directory, yeild its name, cleanup on exit """
dir_name = tempfile.mkdtemp()
yield dir_name
if os.path.exists(dir_name):
shutil.rmtree(dir_name) | [
"def",
"temporary_directory",
"(",
")",
":",
"dir_name",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"yield",
"dir_name",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dir_name",
")",
":",
"shutil",
".",
"rmtree",
"(",
"dir_name",
")"
] | make a temporary directory, yeild its name, cleanup on exit | [
"make",
"a",
"temporary",
"directory",
"yeild",
"its",
"name",
"cleanup",
"on",
"exit"
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L55-L60 | train | 203,296 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | _total_microsec | def _total_microsec(t1, t2):
"""
Calculate difference between two datetime stamps in microseconds.
:type t1: :class: `datetime.datetime`
:type t2: :class: `datetime.datetime`
:return: int
.. rubric:: Example
>>> print(_total_microsec(UTCDateTime(2013, 1, 1).datetime,
... UTCDateTime(2014, 1, 1).datetime))
-31536000000000
"""
td = t1 - t2
return (td.seconds + td.days * 24 * 3600) * 10 ** 6 + td.microseconds | python | def _total_microsec(t1, t2):
"""
Calculate difference between two datetime stamps in microseconds.
:type t1: :class: `datetime.datetime`
:type t2: :class: `datetime.datetime`
:return: int
.. rubric:: Example
>>> print(_total_microsec(UTCDateTime(2013, 1, 1).datetime,
... UTCDateTime(2014, 1, 1).datetime))
-31536000000000
"""
td = t1 - t2
return (td.seconds + td.days * 24 * 3600) * 10 ** 6 + td.microseconds | [
"def",
"_total_microsec",
"(",
"t1",
",",
"t2",
")",
":",
"td",
"=",
"t1",
"-",
"t2",
"return",
"(",
"td",
".",
"seconds",
"+",
"td",
".",
"days",
"*",
"24",
"*",
"3600",
")",
"*",
"10",
"**",
"6",
"+",
"td",
".",
"microseconds"
] | Calculate difference between two datetime stamps in microseconds.
:type t1: :class: `datetime.datetime`
:type t2: :class: `datetime.datetime`
:return: int
.. rubric:: Example
>>> print(_total_microsec(UTCDateTime(2013, 1, 1).datetime,
... UTCDateTime(2014, 1, 1).datetime))
-31536000000000 | [
"Calculate",
"difference",
"between",
"two",
"datetime",
"stamps",
"in",
"microseconds",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3227-L3242 | train | 203,297 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | _templates_match | def _templates_match(t, family_file):
"""
Return True if a tribe matches a family file path.
:type t: Tribe
:type family_file: str
:return: bool
"""
return t.name == family_file.split(os.sep)[-1].split('_detections.csv')[0] | python | def _templates_match(t, family_file):
"""
Return True if a tribe matches a family file path.
:type t: Tribe
:type family_file: str
:return: bool
"""
return t.name == family_file.split(os.sep)[-1].split('_detections.csv')[0] | [
"def",
"_templates_match",
"(",
"t",
",",
"family_file",
")",
":",
"return",
"t",
".",
"name",
"==",
"family_file",
".",
"split",
"(",
"os",
".",
"sep",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"'_detections.csv'",
")",
"[",
"0",
"]"
] | Return True if a tribe matches a family file path.
:type t: Tribe
:type family_file: str
:return: bool | [
"Return",
"True",
"if",
"a",
"tribe",
"matches",
"a",
"family",
"file",
"path",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3245-L3253 | train | 203,298 |
eqcorrscan/EQcorrscan | eqcorrscan/core/match_filter.py | _group_process | def _group_process(template_group, parallel, debug, cores, stream, daylong,
ignore_length, overlap):
"""
Process data into chunks based on template processing length.
Templates in template_group must all have the same processing parameters.
:type template_group: list
:param template_group: List of Templates.
:type parallel: bool
:param parallel: Whether to use parallel processing or not
:type debug: int
:param debug: Debug level from 0-5
:type cores: int
:param cores: Number of cores to use, can be False to use all available.
:type stream: :class:`obspy.core.stream.Stream`
:param stream: Stream to process, will be left intact.
:type daylong: bool
:param daylong: Whether to enforce day-length files or not.
:type ignore_length: bool
:param ignore_length:
If using daylong=True, then dayproc will try check that the data
are there for at least 80% of the day, if you don't want this check
(which will raise an error if too much data are missing) then set
ignore_length=True. This is not recommended!
:type overlap: float
:param overlap: Number of seconds to overlap chunks by.
:return: list of processed streams.
"""
master = template_group[0]
processed_streams = []
kwargs = {
'filt_order': master.filt_order,
'highcut': master.highcut, 'lowcut': master.lowcut,
'samp_rate': master.samp_rate, 'debug': debug,
'parallel': parallel, 'num_cores': cores}
# Processing always needs to be run to account for gaps - pre-process will
# check whether filtering and resampling needs to be done.
if daylong:
if not master.process_length == 86400:
warnings.warn(
'Processing day-long data, but template was cut from %i s long'
' data, will reduce correlations' % master.process_length)
func = dayproc
kwargs.update({'ignore_length': ignore_length})
# Check that data all start on the same day, otherwise strange
# things will happen...
starttimes = [tr.stats.starttime.date for tr in stream]
if not len(list(set(starttimes))) == 1:
warnings.warn('Data start on different days, setting to last day')
starttime = UTCDateTime(
stream.sort(['starttime'])[-1].stats.starttime.date)
else:
starttime = stream.sort(['starttime'])[0].stats.starttime
else:
# We want to use shortproc to allow overlaps
func = shortproc
starttime = stream.sort(['starttime'])[0].stats.starttime
endtime = stream.sort(['endtime'])[-1].stats.endtime
data_len_samps = round((endtime - starttime) * master.samp_rate) + 1
chunk_len_samps = (master.process_length - overlap) * master.samp_rate
n_chunks = int(data_len_samps / chunk_len_samps)
if n_chunks == 0:
print('Data must be process_length or longer, not computing')
for i in range(n_chunks):
kwargs.update(
{'starttime': starttime + (i * (master.process_length - overlap))})
if not daylong:
kwargs.update(
{'endtime': kwargs['starttime'] + master.process_length})
chunk_stream = stream.slice(starttime=kwargs['starttime'],
endtime=kwargs['endtime']).copy()
else:
chunk_stream = stream.copy()
for tr in chunk_stream:
tr.data = tr.data[0:int(
master.process_length * tr.stats.sampling_rate)]
processed_streams.append(func(st=chunk_stream, **kwargs))
return processed_streams | python | def _group_process(template_group, parallel, debug, cores, stream, daylong,
ignore_length, overlap):
"""
Process data into chunks based on template processing length.
Templates in template_group must all have the same processing parameters.
:type template_group: list
:param template_group: List of Templates.
:type parallel: bool
:param parallel: Whether to use parallel processing or not
:type debug: int
:param debug: Debug level from 0-5
:type cores: int
:param cores: Number of cores to use, can be False to use all available.
:type stream: :class:`obspy.core.stream.Stream`
:param stream: Stream to process, will be left intact.
:type daylong: bool
:param daylong: Whether to enforce day-length files or not.
:type ignore_length: bool
:param ignore_length:
If using daylong=True, then dayproc will try check that the data
are there for at least 80% of the day, if you don't want this check
(which will raise an error if too much data are missing) then set
ignore_length=True. This is not recommended!
:type overlap: float
:param overlap: Number of seconds to overlap chunks by.
:return: list of processed streams.
"""
master = template_group[0]
processed_streams = []
kwargs = {
'filt_order': master.filt_order,
'highcut': master.highcut, 'lowcut': master.lowcut,
'samp_rate': master.samp_rate, 'debug': debug,
'parallel': parallel, 'num_cores': cores}
# Processing always needs to be run to account for gaps - pre-process will
# check whether filtering and resampling needs to be done.
if daylong:
if not master.process_length == 86400:
warnings.warn(
'Processing day-long data, but template was cut from %i s long'
' data, will reduce correlations' % master.process_length)
func = dayproc
kwargs.update({'ignore_length': ignore_length})
# Check that data all start on the same day, otherwise strange
# things will happen...
starttimes = [tr.stats.starttime.date for tr in stream]
if not len(list(set(starttimes))) == 1:
warnings.warn('Data start on different days, setting to last day')
starttime = UTCDateTime(
stream.sort(['starttime'])[-1].stats.starttime.date)
else:
starttime = stream.sort(['starttime'])[0].stats.starttime
else:
# We want to use shortproc to allow overlaps
func = shortproc
starttime = stream.sort(['starttime'])[0].stats.starttime
endtime = stream.sort(['endtime'])[-1].stats.endtime
data_len_samps = round((endtime - starttime) * master.samp_rate) + 1
chunk_len_samps = (master.process_length - overlap) * master.samp_rate
n_chunks = int(data_len_samps / chunk_len_samps)
if n_chunks == 0:
print('Data must be process_length or longer, not computing')
for i in range(n_chunks):
kwargs.update(
{'starttime': starttime + (i * (master.process_length - overlap))})
if not daylong:
kwargs.update(
{'endtime': kwargs['starttime'] + master.process_length})
chunk_stream = stream.slice(starttime=kwargs['starttime'],
endtime=kwargs['endtime']).copy()
else:
chunk_stream = stream.copy()
for tr in chunk_stream:
tr.data = tr.data[0:int(
master.process_length * tr.stats.sampling_rate)]
processed_streams.append(func(st=chunk_stream, **kwargs))
return processed_streams | [
"def",
"_group_process",
"(",
"template_group",
",",
"parallel",
",",
"debug",
",",
"cores",
",",
"stream",
",",
"daylong",
",",
"ignore_length",
",",
"overlap",
")",
":",
"master",
"=",
"template_group",
"[",
"0",
"]",
"processed_streams",
"=",
"[",
"]",
... | Process data into chunks based on template processing length.
Templates in template_group must all have the same processing parameters.
:type template_group: list
:param template_group: List of Templates.
:type parallel: bool
:param parallel: Whether to use parallel processing or not
:type debug: int
:param debug: Debug level from 0-5
:type cores: int
:param cores: Number of cores to use, can be False to use all available.
:type stream: :class:`obspy.core.stream.Stream`
:param stream: Stream to process, will be left intact.
:type daylong: bool
:param daylong: Whether to enforce day-length files or not.
:type ignore_length: bool
:param ignore_length:
If using daylong=True, then dayproc will try check that the data
are there for at least 80% of the day, if you don't want this check
(which will raise an error if too much data are missing) then set
ignore_length=True. This is not recommended!
:type overlap: float
:param overlap: Number of seconds to overlap chunks by.
:return: list of processed streams. | [
"Process",
"data",
"into",
"chunks",
"based",
"on",
"template",
"processing",
"length",
"."
] | 3121b4aca801ee5d38f56ca297ce1c0f9515d9ff | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3557-L3636 | train | 203,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.