docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Creates wf instances.
Args:
roles (list): role list
Returns:
(list): wf instances | def create_wf_instances(self, roles=None):
# if roles specified then create an instance for each role
# else create only one instance
if roles:
wf_instances = [
WFInstance(
wf=self.wf,
current_actor=role,
... | 803,395 |
write wf state to DB through MQ >> Worker >> _zops_sync_wf_cache
Args:
wf_state dict: wf state | def save(self, wf_state):
self.wf_state = wf_state
self.wf_state['role_id'] = self.current.role_id
self.set(self.wf_state)
if self.wf_state['name'] not in settings.EPHEMERAL_WORKFLOWS:
self.publish(job='_zops_sync_wf_cache',
token=self.db_key... | 803,413 |
Send messages through RabbitMQ's default exchange,
which will be delivered through routing_key (sess_id).
This method only used for un-authenticated users, i.e. login process.
Args:
sess_id string: Session id
message dict: Message object. | def send_to_default_exchange(self, sess_id, message=None):
msg = json.dumps(message, cls=ZEngineJSONEncoder)
log.debug("Sending following message to %s queue through default exchange:\n%s" % (
sess_id, msg))
self.get_channel().publish(exchange='', routing_key=sess_id, body=m... | 803,417 |
Send messages through logged in users private exchange.
Args:
user_id string: User key
message dict: Message object | def send_to_prv_exchange(self, user_id, message=None):
exchange = 'prv_%s' % user_id.lower()
msg = json.dumps(message, cls=ZEngineJSONEncoder)
log.debug("Sending following users \"%s\" exchange:\n%s " % (exchange, msg))
self.get_channel().publish(exchange=exchange, routing_key='... | 803,418 |
Initialize Scene object.
Parameters:
* pyvlx: PyVLX object
* scene_id: internal id for addressing scenes.
Provided by KLF 200 device
* name: scene name | def __init__(self, pyvlx, scene_id, name):
self.pyvlx = pyvlx
self.scene_id = scene_id
self.name = name | 803,806 |
Run scene.
Parameters:
* wait_for_completion: If set, function will return
after device has reached target position. | async def run(self, wait_for_completion=True):
activate_scene = ActivateScene(
pyvlx=self.pyvlx,
wait_for_completion=wait_for_completion,
scene_id=self.scene_id)
await activate_scene.do_api_call()
if not activate_scene.success:
raise PyVLX... | 803,807 |
Initialize opening device.
Parameters:
* pyvlx: PyVLX object
* node_id: internal id for addressing nodes.
Provided by KLF 200 device
* name: node name | def __init__(self, pyvlx, node_id, name):
super().__init__(pyvlx=pyvlx, node_id=node_id, name=name)
self.position = Position() | 803,921 |
Set window to desired position.
Parameters:
* position: Position object containing the target position.
* wait_for_completion: If set, function will return
after device has reached target position. | async def set_position(self, position, wait_for_completion=True):
command_send = CommandSend(
pyvlx=self.pyvlx,
wait_for_completion=wait_for_completion,
node_id=self.node_id,
parameter=position)
await command_send.do_api_call()
if not comm... | 803,922 |
Open window.
Parameters:
* wait_for_completion: If set, function will return
after device has reached target position. | async def open(self, wait_for_completion=True):
await self.set_position(
position=Position(position_percent=0),
wait_for_completion=wait_for_completion) | 803,923 |
Close window.
Parameters:
* wait_for_completion: If set, function will return
after device has reached target position. | async def close(self, wait_for_completion=True):
await self.set_position(
position=Position(position_percent=100),
wait_for_completion=wait_for_completion) | 803,924 |
Stop window.
Parameters:
* wait_for_completion: If set, function will return
after device has reached target position. | async def stop(self, wait_for_completion=True):
await self.set_position(
position=CurrentPosition(),
wait_for_completion=wait_for_completion) | 803,925 |
Initialize Window class.
Parameters:
* pyvlx: PyVLX object
* node_id: internal id for addressing nodes.
Provided by KLF 200 device
* name: node name
* rain_sensor: set if device is equipped with a
rain sensor. | def __init__(self, pyvlx, node_id, name, rain_sensor=False):
super().__init__(pyvlx=pyvlx, node_id=node_id, name=name)
self.rain_sensor = rain_sensor | 803,926 |
Create DynamoDB table for run manifests
Arguments:
dynamodb_client - boto3 DynamoDB client (not service)
table_name - string representing existing table name | def create_manifest_table(dynamodb_client, table_name):
try:
dynamodb_client.create_table(
AttributeDefinitions=[
{
'AttributeName': DYNAMODB_RUNID_ATTRIBUTE,
'AttributeType': 'S'
},
],
TableName... | 804,033 |
Return list of all run ids inside S3 folder. It does not respect
S3 pagination (`MaxKeys`) and returns **all** keys from bucket
and won't list any prefixes with object archived to AWS Glacier
Arguments:
s3_client - boto3 S3 client (not service)
full_path - full valid S3 path to events (such as enri... | def list_runids(s3_client, full_path):
listing_finished = False # last response was not truncated
run_ids_buffer = []
last_continuation_token = None
(bucket, prefix) = split_full_path(full_path)
while not listing_finished:
options = clean_dict({
'Bucket': b... | 804,034 |
Return pair of bucket without protocol and path
Arguments:
path - valid S3 path, such as s3://somebucket/events
>>> split_full_path('s3://mybucket/path-to-events')
('mybucket', 'path-to-events/')
>>> split_full_path('s3://mybucket')
('mybucket', None)
>>> split_full_path('s3n://snowplow-bu... | def split_full_path(path):
if path.startswith('s3://'):
path = path[5:]
elif path.startswith('s3n://'):
path = path[6:]
elif path.startswith('s3a://'):
path = path[6:]
else:
raise ValueError("S3 path should start with s3://, s3n:// or "
"s3a:... | 804,035 |
Check if prefix is archived in Glacier, by checking storage class of
first object inside that prefix
Arguments:
s3_client - boto3 S3 client (not service)
bucket - valid extracted bucket (without protocol and prefix)
example: sowplow-events-data
prefix - valid S3 prefix (usually, run_id... | def is_glacier(s3_client, bucket, prefix):
response = s3_client.list_objects_v2(Bucket=bucket,
Prefix=prefix,
MaxKeys=3) # 3 to not fetch _SUCCESS
for key in response['Contents']:
if key.get('StorageClass', 'STANDAR... | 804,036 |
Extract date part from run id
Arguments:
key - full key name, such as shredded-archive/run=2012-12-11-01-31-33/
(trailing slash is required)
>>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33/')
'shredded-archive/run=2012-12-11-01-11-33/'
>>> extract_run_id('shredded-archive/ru... | def extract_run_id(key):
filename = key.split('/')[-2] # -1 element is empty string
run_id = filename.lstrip('run=')
try:
datetime.strptime(run_id, '%Y-%m-%d-%H-%M-%S')
return key
except ValueError:
return None | 804,037 |
Add run_id into DynamoDB manifest table
Arguments:
dynamodb_client - boto3 DynamoDB client (not service)
table_name - string representing existing table name
run_id - string representing run_id to store | def add_to_manifest(dynamodb_client, table_name, run_id):
dynamodb_client.put_item(
TableName=table_name,
Item={
DYNAMODB_RUNID_ATTRIBUTE: {
'S': run_id
}
}
) | 804,039 |
Check if run_id is stored in DynamoDB table.
Return True if run_id is stored or False otherwise.
Arguments:
dynamodb_client - boto3 DynamoDB client (not service)
table_name - string representing existing table name
run_id - string representing run_id to store | def is_in_manifest(dynamodb_client, table_name, run_id):
response = dynamodb_client.get_item(
TableName=table_name,
Key={
DYNAMODB_RUNID_ATTRIBUTE: {
'S': run_id
}
}
)
return response.get('Item') is not None | 804,040 |
Constructor.
Args:
channel: A grpc.Channel. | def __init__(self, channel):
self.send = channel.stream_stream(
'/predix.eventhub.Publisher/send',
request_serializer=EventHub__pb2.PublishRequest.SerializeToString,
response_deserializer=EventHub__pb2.PublishResponse.FromString,
) | 804,228 |
Constructor.
Args:
channel: A grpc.Channel. | def __init__(self, channel):
self.receive = channel.unary_stream(
'/predix.eventhub.Subscriber/receive',
request_serializer=EventHub__pb2.SubscriptionRequest.SerializeToString,
response_deserializer=EventHub__pb2.Message.FromString,
)
self.receiveWithAcks = channel.stream_st... | 804,229 |
Parse ping command output.
Args:
ping_message (str or :py:class:`~pingparsing.PingResult`):
``ping`` command output.
Returns:
:py:class:`~pingparsing.PingStats`: Parsed result. | def parse(self, ping_message):
try:
# accept PingResult instance as an input
if typepy.is_not_null_string(ping_message.stdout):
ping_message = ping_message.stdout
except AttributeError:
pass
logger.debug("parsing ping result: {}".for... | 804,294 |
Constructor.
Args:
channel: A grpc.Channel. | def __init__(self, channel):
self.Check = channel.unary_unary(
'/grpc.health.v1.Health/Check',
request_serializer=Health__pb2.HealthCheckRequest.SerializeToString,
response_deserializer=Health__pb2.HealthCheckResponse.FromString,
) | 804,324 |
Label input grid with hysteresis method.
Args:
input_grid: 2D array of values.
Returns:
Labeled output grid. | def label(self, input_grid):
unset = 0
high_labels, num_labels = label(input_grid > self.high_thresh)
region_ranking = np.argsort(maximum(input_grid, high_labels, index=np.arange(1, num_labels + 1)))[::-1]
output_grid = np.zeros(input_grid.shape, dtype=int)
stack = []
... | 804,327 |
Remove labeled objects that do not meet size threshold criteria.
Args:
labeled_grid: 2D output from label method.
min_size: minimum size of object in pixels.
Returns:
labeled grid with smaller objects removed. | def size_filter(labeled_grid, min_size):
out_grid = np.zeros(labeled_grid.shape, dtype=int)
slices = find_objects(labeled_grid)
j = 1
for i, s in enumerate(slices):
box = labeled_grid[s]
size = np.count_nonzero(box.ravel() == (i + 1))
if size ... | 804,328 |
Searches var list for variable name, checks other variable name format options.
Args:
variable (str): Variable being loaded
var_list (list): List of variables in file.
Returns:
Name of variable in file containing relevant data, and index of variable z-level if multi... | def format_var_name(variable, var_list):
z_index = None
if variable in var_list:
var_name = variable
elif variable.ljust(6, "_") in var_list:
var_name = variable.ljust(6, "_")
elif any([variable in v_sub.split("_") for v_sub in var_list]):
var... | 804,568 |
Load data from flat data files containing total track information and information about each timestep.
The two sets are combined using merge operations on the Track IDs. Additional member information is gathered
from the appropriate member file.
Args:
mode: "train" or "forecast"
... | def load_data(self, mode="train", format="csv"):
if mode in self.data.keys():
run_dates = pd.DatetimeIndex(start=self.start_dates[mode],
end=self.end_dates[mode],freq="1D")
run_date_str = [d.strftime("%Y%m%d-%H%M") for d in run_dates.date]... | 804,571 |
Calculate a copula multivariate normal distribution from the training data for each group of ensemble members.
Distributions are written to a pickle file for later use.
Args:
output_file: Pickle file
model_names: Names of the tracking models
label_columns: Names of th... | def calc_copulas(self,
output_file,
model_names=("start-time", "translation-x", "translation-y"),
label_columns=("Start_Time_Error", "Translation_Error_X", "Translation_Error_Y")):
if len(self.data['train']) == 0:
self.load_data... | 804,572 |
Fits multitask machine learning models to predict the parameters of a size distribution
Args:
model_names: List of machine learning model names
model_objs: scikit-learn style machine learning model objects
input_columns: Training data columns used as input for ML model
... | def fit_size_distribution_models(self, model_names, model_objs, input_columns,
output_columns=None, calibrate=False):
if output_columns is None:
output_columns = ["Shape", "Location", "Scale"]
groups = np.unique(self.data["train"]["member"][self.... | 804,576 |
This calculates 2 principal components for the hail size distribution between the shape and scale parameters.
Separate machine learning models are fit to predict each component.
Args:
model_names: List of machine learning model names
model_objs: List of machine learning model ob... | def fit_size_distribution_component_models(self, model_names, model_objs, input_columns, output_columns):
groups = np.unique(self.data["train"]["member"][self.group_col])
weights=None
for group in groups:
print(group)
group_data = self.data["train"]["co... | 804,577 |
Fit size models to produce discrete pdfs of forecast hail sizes.
Args:
model_names: List of model names
model_objs: List of model objects
input_columns: List of input variables
output_column: Output variable name
output_start: Hail size bin start
... | def fit_size_models(self, model_names,
model_objs,
input_columns,
output_column="Hail_Size",
output_start=5,
output_step=5,
output_stop=100):
print("Fitting si... | 804,580 |
Apply size models to forecast data.
Args:
model_names:
input_columns:
metadata_cols:
data_mode: | def predict_size_models(self, model_names,
input_columns,
metadata_cols,
data_mode="forecast"):
groups = self.size_models.keys()
predictions = {}
for group in groups:
group_data = self.data[d... | 804,581 |
Output hail forecast values to csv files by run date and ensemble member.
Args:
forecasts:
mode:
csv_path:
Returns: | def output_forecasts_csv(self, forecasts, mode, csv_path, run_date_format="%Y%m%d-%H%M"):
merged_forecasts = pd.merge(forecasts["condition"],
forecasts["dist"],
on=["Step_ID","Track_ID","Ensemble_Member","Forecast_Hour"])
a... | 804,586 |
Calculates the cumulative ranked probability score (CRPS) on the forecast data.
Args:
model_type: model type being evaluated.
model_name: machine learning model being evaluated.
condition_model_name: Name of the hail/no-hail model being evaluated
condition_thresh... | def crps(self, model_type, model_name, condition_model_name, condition_threshold, query=None):
def gamma_cdf(x, a, loc, b):
if a == 0 or b == 0:
cdf = np.ones(x.shape)
else:
cdf = gamma.cdf(x, a, loc, b)
return cdf
crps_obj =... | 804,612 |
Loads map coordinates from netCDF or pickle file created by util.makeMapGrids.
Args:
map_file: Filename for the file containing coordinate information.
Returns:
Latitude and longitude grids as numpy arrays. | def load_map_coordinates(map_file):
if map_file[-4:] == ".pkl":
map_data = pickle.load(open(map_file))
lon = map_data['lon']
lat = map_data['lat']
else:
map_data = Dataset(map_file)
if "lon" in map_data.variables.keys():
lon = map_data.variables['lon'][:]... | 804,658 |
Finds the largest value within a given radius of a point on the interpolated grid.
Args:
in_lon: 2D array of longitude values
in_lat: 2D array of latitude values
radius: radius of influence for largest neighbor search in degrees
Returns:
Array of interpo... | def max_neighbor(self, in_lon, in_lat, radius=0.05):
out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1]))
in_tree = cKDTree(np.vstack((in_lat.ravel(), in_lon.ravel())).T)
out_indices = np.indices(out_data.shape[1:])
out_rows = out_indices[0].ravel()
... | 804,663 |
Calculate the neighborhood probability over the full period of the forecast
Args:
radius: circular radius from each point in km
smoothing: width of Gaussian smoother in km
threshold: intensity of exceedance
stride: number of grid points to skip for reduced neighb... | def period_neighborhood_probability(self, radius, smoothing, threshold, stride,start_time,end_time):
neighbor_x = self.x[::stride, ::stride]
neighbor_y = self.y[::stride, ::stride]
neighbor_kd_tree = cKDTree(np.vstack((neighbor_x.ravel(), neighbor_y.ravel())).T)
neighbor_prob = ... | 804,865 |
Load map projection information and create latitude, longitude, x, y, i, and j grids for the projection.
Args:
map_file: File specifying the projection information. | def load_map_info(self, map_file):
if self.ensemble_name.upper() == "SSEF":
proj_dict, grid_dict = read_arps_map_file(map_file)
self.dx = int(grid_dict["dx"])
mapping_data = make_proj_grids(proj_dict, grid_dict)
for m, v in mapping_data.items():
... | 804,884 |
Reads a geojson file containing an STObject and initializes a new STObject from the information in the file.
Args:
filename: Name of the geojson file
Returns:
an STObject | def read_geojson(filename):
json_file = open(filename)
data = json.load(json_file)
json_file.close()
times = data["properties"]["times"]
main_data = dict(timesteps=[], masks=[], x=[], y=[], i=[], j=[])
attribute_data = dict()
for feature in data["features"]:
for main_name in mai... | 804,919 |
Calculate the center of mass at a given timestep.
Args:
time: Time at which the center of mass calculation is performed
Returns:
The x- and y-coordinates of the center of mass. | def center_of_mass(self, time):
if self.start_time <= time <= self.end_time:
diff = time - self.start_time
valid = np.flatnonzero(self.masks[diff] != 0)
if valid.size > 0:
com_x = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps... | 804,922 |
Gets the corner array indices of the STObject at a given time that corresponds
to the upper left corner of the bounding box for the STObject.
Args:
time: time at which the corner is being extracted.
Returns:
corner index. | def get_corner(self, time):
if self.start_time <= time <= self.end_time:
diff = time - self.start_time
return self.i[diff][0, 0], self.j[diff][0, 0]
else:
return -1, -1 | 804,925 |
Gets the size of the object at a given time.
Args:
time: Time value being queried.
Returns:
size of the object in pixels | def size(self, time):
if self.start_time <= time <= self.end_time:
return self.masks[time - self.start_time].sum()
else:
return 0 | 804,926 |
Adds the data from another STObject to this object.
Args:
step: another STObject being added after the current one in time. | def extend(self, step):
self.timesteps.extend(step.timesteps)
self.masks.extend(step.masks)
self.x.extend(step.x)
self.y.extend(step.y)
self.i.extend(step.i)
self.j.extend(step.j)
self.end_time = step.end_time
self.times = np.arange(self.start_tim... | 804,929 |
Estimate the motion of the object with cross-correlation on the intensity values from the previous time step.
Args:
time: time being evaluated.
intensity_grid: 2D array of intensities used in cross correlation.
max_u: Maximum x-component of motion. Used to limit search area.... | def estimate_motion(self, time, intensity_grid, max_u, max_v):
ti = np.where(time == self.times)[0][0]
mask_vals = np.where(self.masks[ti].ravel() == 1)
i_vals = self.i[ti].ravel()[mask_vals]
j_vals = self.j[ti].ravel()[mask_vals]
obj_vals = self.timesteps[ti].ravel()[ma... | 804,931 |
Extracts the data from a ModelOutput or ModelGrid object within the bounding box region of the STObject.
Args:
model_grid: A ModelGrid or ModelOutput Object
potential: Extracts from the time before instead of the same time as the object | def extract_attribute_grid(self, model_grid, potential=False, future=False):
if potential:
var_name = model_grid.variable + "-potential"
timesteps = np.arange(self.start_time - 1, self.end_time)
elif future:
var_name = model_grid.variable + "-future"
... | 804,933 |
Extracts data from a 2D array that has the same dimensions as the grid used to identify the object.
Args:
data_array: 2D numpy array | def extract_attribute_array(self, data_array, var_name):
if var_name not in self.attributes.keys():
self.attributes[var_name] = []
for t in range(self.times.size):
self.attributes[var_name].append(data_array[self.i[t], self.j[t]]) | 804,934 |
Extracts the difference in model outputs
Args:
model_grid: ModelOutput or ModelGrid object. | def extract_tendency_grid(self, model_grid):
var_name = model_grid.variable + "-tendency"
self.attributes[var_name] = []
timesteps = np.arange(self.start_time, self.end_time + 1)
for ti, t in enumerate(timesteps):
t_index = t - model_grid.start_hour
self.... | 804,935 |
Calculates summary statistics over the domains of each attribute.
Args:
statistic_name (string): numpy statistic, such as mean, std, max, min
Returns:
dict of statistics from each attribute grid. | def calc_attribute_statistics(self, statistic_name):
stats = {}
for var, grids in self.attributes.items():
if len(grids) > 1:
stats[var] = getattr(np.array([getattr(np.ma.array(x, mask=self.masks[t] == 0), statistic_name)()
... | 804,936 |
Calculate statistics based on the values of an attribute. The following statistics are supported:
mean, max, min, std, ptp (range), median, skew (mean - median), and percentile_(percentile value).
Args:
attribute: Attribute extracted from model grid
statistic: Name of statistic ... | def calc_attribute_statistic(self, attribute, statistic, time):
ti = np.where(self.times == time)[0][0]
ma = np.where(self.masks[ti].ravel() == 1)
if statistic in ['mean', 'max', 'min', 'std', 'ptp']:
stat_val = getattr(self.attributes[attribute][ti].ravel()[ma], statistic)(... | 804,937 |
Calculate statistics from the primary attribute of the StObject.
Args:
statistic: statistic being calculated
time: Timestep being investigated
Returns:
Value of the statistic | def calc_timestep_statistic(self, statistic, time):
ti = np.where(self.times == time)[0][0]
ma = np.where(self.masks[ti].ravel() == 1)
if statistic in ['mean', 'max', 'min', 'std', 'ptp']:
stat_val = getattr(self.timesteps[ti].ravel()[ma], statistic)()
elif statistic... | 804,938 |
Calculate shape statistics using regionprops applied to the object mask.
Args:
stat_names: List of statistics to be extracted from those calculated by regionprops.
Returns:
Dictionary of shape statistics | def calc_shape_statistics(self, stat_names):
stats = {}
try:
all_props = [regionprops(m) for m in self.masks]
except TypeError:
print(self.masks)
exit()
for stat in stat_names:
stats[stat] = np.mean([p[0][stat] for p in all_props])... | 804,939 |
Calculate shape statistics for a single time step
Args:
stat_names: List of shape statistics calculated from region props
time: Time being investigated
Returns:
List of shape statistics | def calc_shape_step(self, stat_names, time):
ti = np.where(self.times == time)[0][0]
props = regionprops(self.masks[ti], self.timesteps[ti])[0]
shape_stats = []
for stat_name in stat_names:
if "moments_hu" in stat_name:
hu_index = int(stat_name.split(... | 804,940 |
Output the data in the STObject to a geoJSON file.
Args:
filename: Name of the file
proj: PyProj object for converting the x and y coordinates back to latitude and longitue values.
metadata: Metadata describing the object to be included in the top-level properties. | def to_geojson(self, filename, proj, metadata=None):
if metadata is None:
metadata = {}
json_obj = {"type": "FeatureCollection", "features": [], "properties": {}}
json_obj['properties']['times'] = self.times.tolist()
json_obj['properties']['dx'] = self.dx
jso... | 804,941 |
Rescale your input data so that is ranges over integer values, which will perform better in the watershed.
Args:
data: 2D or 3D ndarray being rescaled
data_min: minimum value of input data for scaling purposes
data_max: maximum value of input data for scaling purposes
out_min: minim... | def rescale_data(data, data_min, data_max, out_min=0.0, out_max=100.0):
return (out_max - out_min) / (data_max - data_min) * (data - data_min) + out_min | 805,036 |
Labels input grid using enhanced watershed algorithm.
Args:
input_grid (numpy.ndarray): Grid to be labeled.
Returns:
Array of labeled pixels | def label(self, input_grid):
marked = self.find_local_maxima(input_grid)
marked = np.where(marked >= 0, 1, 0)
# splabel returns two things in a tuple: an array and an integer
# assign the first thing (array) to markers
markers = splabel(marked)[0]
return markers | 805,038 |
Finds the local maxima in the inputGrid and perform region growing to identify objects.
Args:
input_grid: Raw input data.
Returns:
array with labeled objects. | def find_local_maxima(self, input_grid):
pixels, q_data = self.quantize(input_grid)
centers = OrderedDict()
for p in pixels.keys():
centers[p] = []
marked = np.ones(q_data.shape, dtype=int) * self.UNMARKED
MIN_INFL = int(np.round(1 + 0.5 * np.sqrt(self.max_si... | 805,039 |
Quantize a grid into discrete steps based on input parameters.
Args:
input_grid: 2-d array of values
Returns:
Dictionary of value pointing to pixel locations, and quantized 2-d array of data | def quantize(self, input_grid):
pixels = {}
for i in range(self.max_bin+1):
pixels[i] = []
data = (np.array(input_grid, dtype=int) - self.min_thresh) / self.data_increment
data[data < 0] = -1
data[data > self.max_bin] = self.max_bin
good_points = np.... | 805,043 |
Loads observations and masking grid (if needed).
Args:
mask_threshold: Values greater than the threshold are kept, others are masked. | def load_obs(self, mask_threshold=0.5):
print("Loading obs ", self.run_date, self.model_name, self.forecast_variable)
start_date = self.run_date + timedelta(hours=self.start_hour)
end_date = self.run_date + timedelta(hours=self.end_hour)
mrms_grid = MRMSGrid(start_date, end_dat... | 805,055 |
Reads forecasts from json files and merges them with the input data from the step csv files.
Args:
input_csv_file: Name of the input data csv file being processed
forecast_json_path: Path to the forecast json files toplevel directory
condition_models: List of models used to forecast hail or... | def merge_input_csv_forecast_json(input_csv_file, forecast_json_path, condition_models, dist_models):
try:
run_date = input_csv_file[:-4].split("_")[-1]
print(run_date)
ens_member = "_".join(input_csv_file.split("/")[-1][:-4].split("_")[3:-1])
ens_name = input_csv_file.split("/"... | 805,077 |
Add two contingency tables together and return a combined one.
Args:
other: Another contingency table
Returns:
Sum of contingency tables | def __add__(self, other):
sum_ct = ContingencyTable(*(self.table + other.table).tolist())
return sum_ct | 805,518 |
Segment forecast tracks to only output data contined within a
region in the CONUS, as defined by the mapfile.
Args:
csv_path(str): Path to the full CONUS csv file.
file_dict_key(str): Dictionary key for the csv files,
currently either 'track_step' or 'track_tot... | def output_sector_csv(self,csv_path,file_dict_key,out_path):
csv_file = csv_path + "{0}_{1}_{2}_{3}.csv".format(
file_dict_key,
self.ensemble_name,
... | 805,606 |
Calculate a probability based on the number of grid points in an area that exceed a threshold.
Args:
threshold:
radius:
Returns: | def neighborhood_probability(self, threshold, radius):
weights = disk(radius, dtype=np.uint8)
thresh_data = np.zeros(self.data.shape[1:], dtype=np.uint8)
neighbor_prob = np.zeros(self.data.shape, dtype=np.float32)
for t in np.arange(self.data.shape[0]):
thresh_data[s... | 805,735 |
Calculate grid-point statistics across ensemble members.
Args:
consensus_type: mean, std, median, max, or percentile_nn
Returns:
EnsembleConsensus containing point statistic | def point_consensus(self, consensus_type):
if "mean" in consensus_type:
consensus_data = np.mean(self.data, axis=0)
elif "std" in consensus_type:
consensus_data = np.std(self.data, axis=0)
elif "median" in consensus_type:
consensus_data = np.median(se... | 805,743 |
Determine the probability of exceeding a threshold at a grid point based on the ensemble forecasts at
that point.
Args:
threshold: If >= threshold assigns a 1 to member, otherwise 0.
Returns:
EnsembleConsensus | def point_probability(self, threshold):
point_prob = np.zeros(self.data.shape[1:])
for t in range(self.data.shape[1]):
point_prob[t] = np.where(self.data[:, t] >= threshold, 1.0, 0.0).mean(axis=0)
return EnsembleConsensus(point_prob, "point_probability", self.ensemble_name,
... | 805,744 |
Calculates the neighborhood probability of exceeding a threshold at any time over the period loaded.
Args:
threshold (float): splitting threshold for probability calculatations
radius (int): distance from point in number of grid points to include in neighborhood calculation.
... | def period_max_neighborhood_probability(self, threshold, radius, sigmas=None):
if sigmas is None:
sigmas = [0]
weights = disk(radius)
neighborhood_prob = np.zeros(self.data.shape[2:], dtype=np.float32)
thresh_data = np.zeros(self.data.shape[2:], dtype=np.uint8)
... | 805,746 |
Writes data to grib2 file. Currently, grib codes are set by hand to hail.
Args:
path: Path to directory containing grib2 files.
Returns: | def write_grib2(self, path):
if self.percentile is None:
var_type = "mean"
else:
var_type = "p{0:02d}".format(self.percentile)
lscale = 1e6
grib_id_start = [7, 0, 14, 14, 2]
gdsinfo = np.array([0, np.product(self.data.shape[-2:]), 0, 0, 30], dtype... | 805,749 |
Initializes netCDF file for writing
Args:
filename: Name of the netCDF file
time_units: Units for the time variable in format "<time> since <date string>"
Returns:
Dataset object | def init_file(self, filename, time_units="seconds since 1970-01-01T00:00"):
if os.access(filename, os.R_OK):
out_data = Dataset(filename, "r+")
else:
out_data = Dataset(filename, "w")
if len(self.data.shape) == 2:
for d, dim in enumerate(["y",... | 805,751 |
Outputs data to a netCDF file. If the file does not exist, it will be created. Otherwise, additional variables
are appended to the current file
Args:
out_data: Full-path and name of output netCDF file | def write_to_file(self, out_data):
full_var_name = self.consensus_type + "_" + self.variable
if "-hour" in self.consensus_type:
if full_var_name not in out_data.variables.keys():
var = out_data.createVariable(full_var_name, "f4", ("y", "x"), zlib=True,
... | 805,752 |
Given a set of DistributedROC or DistributedReliability objects, this function performs a
bootstrap resampling of the objects and returns n_boot aggregations of them.
Args:
score_objs: A list of DistributedROC or DistributedReliability objects. Objects must have an __add__ method
n_boot (int): ... | def bootstrap(score_objs, n_boot=1000):
all_samples = np.random.choice(score_objs, size=(n_boot, len(score_objs)), replace=True)
return all_samples.sum(axis=1) | 805,799 |
Initializes the DistributedROC object. If input_str is not None, then the DistributedROC object is
initialized with the contents of input_str. Otherwise an empty contingency table is created.
Args:
thresholds (numpy.array): Array of thresholds in increasing order.
obs_threshold... | def __init__(self, thresholds=np.arange(0, 1.1, 0.1), obs_threshold=1.0, input_str=None):
self.thresholds = thresholds
self.obs_threshold = obs_threshold
self.contingency_tables = pd.DataFrame(np.zeros((thresholds.size, 4), dtype=int),
colu... | 805,802 |
Update the ROC curve with a set of forecasts and observations
Args:
forecasts: 1D array of forecast values
observations: 1D array of observation values. | def update(self, forecasts, observations):
for t, threshold in enumerate(self.thresholds):
tp = np.count_nonzero((forecasts >= threshold) & (observations >= self.obs_threshold))
fp = np.count_nonzero((forecasts >= threshold) &
(observations < se... | 805,803 |
Add two DistributedROC objects together and combine their contingency table values.
Args:
other: Another DistributedROC object. | def __add__(self, other):
sum_roc = DistributedROC(self.thresholds, self.obs_threshold)
sum_roc.contingency_tables = self.contingency_tables + other.contingency_tables
return sum_roc | 805,804 |
Ingest the values of another DistributedROC object into this one and update the statistics inplace.
Args:
other_roc: another DistributedROC object. | def merge(self, other_roc):
if other_roc.thresholds.size == self.thresholds.size and np.all(other_roc.thresholds == self.thresholds):
self.contingency_tables += other_roc.contingency_tables
else:
print("Input table thresholds do not match.") | 805,805 |
Read the DistributedROC string and parse the contingency table values from it.
Args:
in_str (str): The string output from the __str__ method | def from_str(self, in_str):
parts = in_str.split(";")
for part in parts:
var_name, value = part.split(":")
if var_name == "Obs_Threshold":
self.obs_threshold = float(value)
elif var_name == "Thresholds":
self.thresholds = np.ar... | 805,813 |
Update the statistics with a set of forecasts and observations.
Args:
forecasts (numpy.ndarray): Array of forecast probability values
observations (numpy.ndarray): Array of observation values | def update(self, forecasts, observations):
for t, threshold in enumerate(self.thresholds[:-1]):
self.frequencies.loc[t, "Positive_Freq"] += np.count_nonzero((threshold <= forecasts) &
(forecasts < self.thresholds[t+1])... | 805,819 |
Add two DistributedReliability objects together and combine their values.
Args:
other: a DistributedReliability object
Returns:
A DistributedReliability Object | def __add__(self, other):
sum_rel = DistributedReliability(self.thresholds, self.obs_threshold)
sum_rel.frequencies = self.frequencies + other.frequencies
return sum_rel | 805,820 |
Ingest another DistributedReliability and add its contents to the current object.
Args:
other_rel: a Distributed reliability object. | def merge(self, other_rel):
if other_rel.thresholds.size == self.thresholds.size and np.all(other_rel.thresholds == self.thresholds):
self.frequencies += other_rel.frequencies
else:
print("Input table thresholds do not match.") | 805,821 |
Update the statistics with forecasts and observations.
Args:
forecasts: The discrete Cumulative Distribution Functions of
observations: | def update(self, forecasts, observations):
if len(observations.shape) == 1:
obs_cdfs = np.zeros((observations.size, self.thresholds.size))
for o, observation in enumerate(observations):
obs_cdfs[o, self.thresholds >= observation] = 1
else:
obs... | 805,827 |
Match forecast and observed tracks.
Args:
model_tracks:
obs_tracks:
unique_matches:
closest_matches:
Returns: | def match_tracks(self, model_tracks, obs_tracks, unique_matches=True, closest_matches=False):
if unique_matches:
pairings = self.track_matcher.match_tracks(model_tracks, obs_tracks, closest_matches=closest_matches)
else:
pairings = self.track_matcher.neighbor_matches(mod... | 805,941 |
Given forecast and observed track pairings, maximum hail sizes are associated with each paired forecast storm
track timestep. If the duration of the forecast and observed tracks differ, then interpolation is used for the
intermediate timesteps.
Args:
model_tracks: List of model trac... | def match_hail_sizes(model_tracks, obs_tracks, track_pairings):
unpaired = list(range(len(model_tracks)))
for p, pair in enumerate(track_pairings):
model_track = model_tracks[pair[0]]
unpaired.remove(pair[0])
obs_track = obs_tracks[pair[1]]
obs_ha... | 805,943 |
Given a matching set of observed tracks for each model track,
Args:
model_tracks:
obs_tracks:
track_pairings:
Returns: | def match_hail_size_step_distributions(self, model_tracks, obs_tracks, track_pairings):
label_columns = ["Matched", "Max_Hail_Size", "Num_Matches", "Shape", "Location", "Scale"]
s = 0
for m, model_track in enumerate(model_tracks):
model_track.observations = pd.DataFrame(inde... | 805,945 |
Calculates spatial and temporal translation errors between matched
forecast and observed tracks.
Args:
model_tracks: List of model track STObjects
obs_tracks: List of observed track STObjects
track_pairings: List of tuples pairing forecast and observed tracks.
... | def calc_track_errors(model_tracks, obs_tracks, track_pairings):
columns = ['obs_track_id',
'translation_error_x',
'translation_error_y',
'start_time_difference',
'end_time_difference',
]
track_errors... | 805,946 |
Euclidean distance between the centroids of item_a and item_b.
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distan... | def centroid_distance(item_a, time_a, item_b, time_b, max_value):
ax, ay = item_a.center_of_mass(time_a)
bx, by = item_b.center_of_mass(time_b)
return np.minimum(np.sqrt((ax - bx) ** 2 + (ay - by) ** 2), max_value) / float(max_value) | 806,000 |
Centroid distance with motion corrections.
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as sca... | def shifted_centroid_distance(item_a, time_a, item_b, time_b, max_value):
ax, ay = item_a.center_of_mass(time_a)
bx, by = item_b.center_of_mass(time_b)
if time_a < time_b:
bx = bx - item_b.u
by = by - item_b.v
else:
ax = ax - item_a.u
ay = ay - item_a.v
return np... | 806,002 |
Euclidean distance between the pixels in item_a and item_b closest to each other.
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_va... | def closest_distance(item_a, time_a, item_b, time_b, max_value):
return np.minimum(item_a.closest_distance(time_a, item_b, time_b), max_value) / float(max_value) | 806,003 |
Calculate differences in the properties of ellipses fitted to each object.
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Ma... | def ellipse_distance(item_a, time_a, item_b, time_b, max_value):
ts = np.array([0, np.pi])
ell_a = item_a.get_ellipse_model(time_a)
ell_b = item_b.get_ellipse_model(time_b)
ends_a = ell_a.predict_xy(ts)
ends_b = ell_b.predict_xy(ts)
distances = np.sqrt((ends_a[:, 0:1] - ends_b[:, 0:1].T) **... | 806,004 |
Percentage of pixels in each object that do not overlap with the other object
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value:... | def nonoverlap(item_a, time_a, item_b, time_b, max_value):
return np.minimum(1 - item_a.count_overlap(time_a, item_b, time_b), max_value) / float(max_value) | 806,005 |
RMS difference in maximum intensity
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling va... | def max_intensity(item_a, time_a, item_b, time_b, max_value):
intensity_a = item_a.max_intensity(time_a)
intensity_b = item_b.max_intensity(time_b)
diff = np.sqrt((intensity_a - intensity_b) ** 2)
return np.minimum(diff, max_value) / float(max_value) | 806,006 |
RMS Difference in object areas.
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value ... | def area_difference(item_a, time_a, item_b, time_b, max_value):
size_a = item_a.size(time_a)
size_b = item_b.size(time_b)
diff = np.sqrt((size_a - size_b) ** 2)
return np.minimum(diff, max_value) / float(max_value) | 806,007 |
RMS difference in the minimum distances from the centroids of one track to the centroids of another track
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constrai... | def mean_minimum_centroid_distance(item_a, item_b, max_value):
centroids_a = np.array([item_a.center_of_mass(t) for t in item_a.times])
centroids_b = np.array([item_b.center_of_mass(t) for t in item_b.times])
distance_matrix = (centroids_a[:, 0:1] - centroids_b.T[0:1]) ** 2 + (centroids_a[:, 1:] - cent... | 806,008 |
Calculate the mean time difference among the time steps in each object.
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance... | def mean_min_time_distance(item_a, item_b, max_value):
times_a = item_a.times.reshape((item_a.times.size, 1))
times_b = item_b.times.reshape((1, item_b.times.size))
distance_matrix = (times_a - times_b) ** 2
mean_min_distances = np.sqrt(distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=... | 806,009 |
Distance between the centroids of the first step in each object.
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value ... | def start_centroid_distance(item_a, item_b, max_value):
start_a = item_a.center_of_mass(item_a.times[0])
start_b = item_b.center_of_mass(item_b.times[0])
start_distance = np.sqrt((start_a[0] - start_b[0]) ** 2 + (start_a[1] - start_b[1]) ** 2)
return np.minimum(start_distance, max_value) / float(ma... | 806,010 |
Absolute difference between the starting times of each item.
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value betw... | def start_time_distance(item_a, item_b, max_value):
start_time_diff = np.abs(item_a.times[0] - item_b.times[0])
return np.minimum(start_time_diff, max_value) / float(max_value) | 806,011 |
Absolute difference in the duration of two items
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1. | def duration_distance(item_a, item_b, max_value):
duration_a = item_a.times.size
duration_b = item_b.times.size
return np.minimum(np.abs(duration_a - duration_b), max_value) / float(max_value) | 806,012 |
Absolute difference in the means of the areas of each track over time.
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance ... | def mean_area_distance(item_a, item_b, max_value):
mean_area_a = np.mean([item_a.size(t) for t in item_a.times])
mean_area_b = np.mean([item_b.size(t) for t in item_b.times])
return np.abs(mean_area_a - mean_area_b) / float(max_value) | 806,013 |
Match two sets of objects at particular times.
Args:
set_a: list of STObjects
set_b: list of STObjects
time_a: time at which set_a is being evaluated for matching
time_b: time at which set_b is being evaluated for matching
Returns:
List of tu... | def match_objects(self, set_a, set_b, time_a, time_b):
costs = self.cost_matrix(set_a, set_b, time_a, time_b) * 100
min_row_costs = costs.min(axis=1)
min_col_costs = costs.min(axis=0)
good_rows = np.where(min_row_costs < 100)[0]
good_cols = np.where(min_col_costs < 100)[... | 806,015 |
Calculates the costs (distances) between the items in set a and set b at the specified times.
Args:
set_a: List of STObjects
set_b: List of STObjects
time_a: time at which objects in set_a are evaluated
time_b: time at whcih object in set_b are evaluated
... | def cost_matrix(self, set_a, set_b, time_a, time_b):
costs = np.zeros((len(set_a), len(set_b)))
for a, item_a in enumerate(set_a):
for b, item_b in enumerate(set_b):
costs[a, b] = self.total_cost_function(item_a, item_b, time_a, time_b)
return costs | 806,016 |
Calculate total cost function between two items.
Args:
item_a: STObject
item_b: STObject
time_a: Timestep in item_a at which cost function is evaluated
time_b: Timestep in item_b at which cost function is evaluated
Returns:
The total weighted... | def total_cost_function(self, item_a, item_b, time_a, time_b):
distances = np.zeros(len(self.weights))
for c, component in enumerate(self.cost_function_components):
distances[c] = component(item_a, time_a, item_b, time_b, self.max_values[c])
total_distance = np.sum(self.weig... | 806,017 |
Find the optimal set of matching assignments between set a and set b. This function supports optimal 1:1
matching using the Munkres method and matching from every object in set a to the closest object in set b.
In this situation set b accepts multiple matches from set a.
Args:
set_a... | def match_tracks(self, set_a, set_b, closest_matches=False):
costs = self.track_cost_matrix(set_a, set_b) * 100
min_row_costs = costs.min(axis=1)
min_col_costs = costs.min(axis=0)
good_rows = np.where(min_row_costs < 100)[0]
good_cols = np.where(min_col_costs < 100)[0]
... | 806,018 |
For each step in each track from set_a, identify all steps in all tracks from set_b that meet all
cost function criteria
Args:
set_a: List of STObjects
set_b: List of STObjects
Returns:
track_pairings: pandas.DataFrame | def match(self, set_a, set_b):
track_step_matches = [[] * len(set_a)]
costs = self.cost_matrix(set_a, set_b)
valid_costs = np.all(costs < 1, axis=2)
set_a_matches, set_b_matches = np.where(valid_costs)
s = 0
track_pairings = pd.DataFrame(index=np.arange(costs.sh... | 806,024 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.