docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Creates wf instances. Args: roles (list): role list Returns: (list): wf instances
def create_wf_instances(self, roles=None): # if roles specified then create an instance for each role # else create only one instance if roles: wf_instances = [ WFInstance( wf=self.wf, current_actor=role, task=self, name=self.wf.name ) for role in roles ] else: wf_instances = [ WFInstance( wf=self.wf, task=self, name=self.wf.name ) ] # if task type is not related with objects save instances immediately. if self.task_type in ["C", "D"]: return [wfi.save() for wfi in wf_instances] # if task type is related with its objects, save populate instances per object else: wf_obj_instances = [] for wfi in wf_instances: role = wfi.current_actor if self.task_type == "A" else None keys = self.get_object_keys(role) wf_obj_instances.extend( [WFInstance( wf=self.wf, current_actor=role, task=self, name=self.wf.name, wf_object=key, wf_object_type=self.object_type ).save() for key in keys] ) return wf_obj_instances
803,395
write wf state to DB through MQ >> Worker >> _zops_sync_wf_cache Args: wf_state dict: wf state
def save(self, wf_state): self.wf_state = wf_state self.wf_state['role_id'] = self.current.role_id self.set(self.wf_state) if self.wf_state['name'] not in settings.EPHEMERAL_WORKFLOWS: self.publish(job='_zops_sync_wf_cache', token=self.db_key)
803,413
Send messages through RabbitMQ's default exchange, which will be delivered through routing_key (sess_id). This method only used for un-authenticated users, i.e. login process. Args: sess_id string: Session id message dict: Message object.
def send_to_default_exchange(self, sess_id, message=None): msg = json.dumps(message, cls=ZEngineJSONEncoder) log.debug("Sending following message to %s queue through default exchange:\n%s" % ( sess_id, msg)) self.get_channel().publish(exchange='', routing_key=sess_id, body=msg)
803,417
Send messages through logged in users private exchange. Args: user_id string: User key message dict: Message object
def send_to_prv_exchange(self, user_id, message=None): exchange = 'prv_%s' % user_id.lower() msg = json.dumps(message, cls=ZEngineJSONEncoder) log.debug("Sending following users \"%s\" exchange:\n%s " % (exchange, msg)) self.get_channel().publish(exchange=exchange, routing_key='', body=msg)
803,418
Initialize Scene object. Parameters: * pyvlx: PyVLX object * scene_id: internal id for addressing scenes. Provided by KLF 200 device * name: scene name
def __init__(self, pyvlx, scene_id, name): self.pyvlx = pyvlx self.scene_id = scene_id self.name = name
803,806
Run scene. Parameters: * wait_for_completion: If set, function will return after device has reached target position.
async def run(self, wait_for_completion=True): activate_scene = ActivateScene( pyvlx=self.pyvlx, wait_for_completion=wait_for_completion, scene_id=self.scene_id) await activate_scene.do_api_call() if not activate_scene.success: raise PyVLXException("Unable to activate scene")
803,807
Initialize opening device. Parameters: * pyvlx: PyVLX object * node_id: internal id for addressing nodes. Provided by KLF 200 device * name: node name
def __init__(self, pyvlx, node_id, name): super().__init__(pyvlx=pyvlx, node_id=node_id, name=name) self.position = Position()
803,921
Set window to desired position. Parameters: * position: Position object containing the target position. * wait_for_completion: If set, function will return after device has reached target position.
async def set_position(self, position, wait_for_completion=True): command_send = CommandSend( pyvlx=self.pyvlx, wait_for_completion=wait_for_completion, node_id=self.node_id, parameter=position) await command_send.do_api_call() if not command_send.success: raise PyVLXException("Unable to send command") await self.after_update()
803,922
Open window. Parameters: * wait_for_completion: If set, function will return after device has reached target position.
async def open(self, wait_for_completion=True): await self.set_position( position=Position(position_percent=0), wait_for_completion=wait_for_completion)
803,923
Close window. Parameters: * wait_for_completion: If set, function will return after device has reached target position.
async def close(self, wait_for_completion=True): await self.set_position( position=Position(position_percent=100), wait_for_completion=wait_for_completion)
803,924
Stop window. Parameters: * wait_for_completion: If set, function will return after device has reached target position.
async def stop(self, wait_for_completion=True): await self.set_position( position=CurrentPosition(), wait_for_completion=wait_for_completion)
803,925
Initialize Window class. Parameters: * pyvlx: PyVLX object * node_id: internal id for addressing nodes. Provided by KLF 200 device * name: node name * rain_sensor: set if device is equipped with a rain sensor.
def __init__(self, pyvlx, node_id, name, rain_sensor=False): super().__init__(pyvlx=pyvlx, node_id=node_id, name=name) self.rain_sensor = rain_sensor
803,926
Create DynamoDB table for run manifests Arguments: dynamodb_client - boto3 DynamoDB client (not service) table_name - string representing existing table name
def create_manifest_table(dynamodb_client, table_name): try: dynamodb_client.create_table( AttributeDefinitions=[ { 'AttributeName': DYNAMODB_RUNID_ATTRIBUTE, 'AttributeType': 'S' }, ], TableName=table_name, KeySchema=[ { 'AttributeName': DYNAMODB_RUNID_ATTRIBUTE, 'KeyType': 'HASH' }, ], ProvisionedThroughput={ 'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5 } ) dynamodb_client.get_waiter('table_exists').wait(TableName=table_name) except ClientError as e: # Table already exists if e.response['Error']['Code'] == 'ResourceInUseException': pass else: raise e
804,033
Return list of all run ids inside S3 folder. It does not respect S3 pagination (`MaxKeys`) and returns **all** keys from bucket and won't list any prefixes with object archived to AWS Glacier Arguments: s3_client - boto3 S3 client (not service) full_path - full valid S3 path to events (such as enriched-archive) example: s3://acme-events-bucket/main-pipeline/enriched-archive
def list_runids(s3_client, full_path): listing_finished = False # last response was not truncated run_ids_buffer = [] last_continuation_token = None (bucket, prefix) = split_full_path(full_path) while not listing_finished: options = clean_dict({ 'Bucket': bucket, 'Prefix': prefix, 'Delimiter': '/', 'ContinuationToken': last_continuation_token }) response = s3_client.list_objects_v2(**options) keys = [extract_run_id(key['Prefix']) for key in response.get('CommonPrefixes', [])] run_ids_buffer.extend([key for key in keys if key is not None]) last_continuation_token = response.get('NextContinuationToken', None) if not response['IsTruncated']: listing_finished = True non_archived_run_ids = [run_id for run_id in run_ids_buffer if not is_glacier(s3_client, bucket, run_id)] return non_archived_run_ids
804,034
Return pair of bucket without protocol and path Arguments: path - valid S3 path, such as s3://somebucket/events >>> split_full_path('s3://mybucket/path-to-events') ('mybucket', 'path-to-events/') >>> split_full_path('s3://mybucket') ('mybucket', None) >>> split_full_path('s3n://snowplow-bucket/some/prefix/') ('snowplow-bucket', 'some/prefix/')
def split_full_path(path): if path.startswith('s3://'): path = path[5:] elif path.startswith('s3n://'): path = path[6:] elif path.startswith('s3a://'): path = path[6:] else: raise ValueError("S3 path should start with s3://, s3n:// or " "s3a:// prefix") parts = path.split('/') bucket = parts[0] path = '/'.join(parts[1:]) return bucket, normalize_prefix(path)
804,035
Check if prefix is archived in Glacier, by checking storage class of first object inside that prefix Arguments: s3_client - boto3 S3 client (not service) bucket - valid extracted bucket (without protocol and prefix) example: sowplow-events-data prefix - valid S3 prefix (usually, run_id) example: snowplow-archive/enriched/archive/
def is_glacier(s3_client, bucket, prefix): response = s3_client.list_objects_v2(Bucket=bucket, Prefix=prefix, MaxKeys=3) # 3 to not fetch _SUCCESS for key in response['Contents']: if key.get('StorageClass', 'STANDARD') == 'GLACIER': return True return False
804,036
Extract date part from run id Arguments: key - full key name, such as shredded-archive/run=2012-12-11-01-31-33/ (trailing slash is required) >>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33/') 'shredded-archive/run=2012-12-11-01-11-33/' >>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33') >>> extract_run_id('shredded-archive/run=2012-13-11-01-11-33/')
def extract_run_id(key): filename = key.split('/')[-2] # -1 element is empty string run_id = filename.lstrip('run=') try: datetime.strptime(run_id, '%Y-%m-%d-%H-%M-%S') return key except ValueError: return None
804,037
Add run_id into DynamoDB manifest table Arguments: dynamodb_client - boto3 DynamoDB client (not service) table_name - string representing existing table name run_id - string representing run_id to store
def add_to_manifest(dynamodb_client, table_name, run_id): dynamodb_client.put_item( TableName=table_name, Item={ DYNAMODB_RUNID_ATTRIBUTE: { 'S': run_id } } )
804,039
Check if run_id is stored in DynamoDB table. Return True if run_id is stored or False otherwise. Arguments: dynamodb_client - boto3 DynamoDB client (not service) table_name - string representing existing table name run_id - string representing run_id to store
def is_in_manifest(dynamodb_client, table_name, run_id): response = dynamodb_client.get_item( TableName=table_name, Key={ DYNAMODB_RUNID_ATTRIBUTE: { 'S': run_id } } ) return response.get('Item') is not None
804,040
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.send = channel.stream_stream( '/predix.eventhub.Publisher/send', request_serializer=EventHub__pb2.PublishRequest.SerializeToString, response_deserializer=EventHub__pb2.PublishResponse.FromString, )
804,228
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.receive = channel.unary_stream( '/predix.eventhub.Subscriber/receive', request_serializer=EventHub__pb2.SubscriptionRequest.SerializeToString, response_deserializer=EventHub__pb2.Message.FromString, ) self.receiveWithAcks = channel.stream_stream( '/predix.eventhub.Subscriber/receiveWithAcks', request_serializer=EventHub__pb2.SubscriptionResponse.SerializeToString, response_deserializer=EventHub__pb2.Message.FromString, ) self.subscribe = channel.stream_stream( '/predix.eventhub.Subscriber/subscribe', request_serializer=EventHub__pb2.SubscriptionAcks.SerializeToString, response_deserializer=EventHub__pb2.SubscriptionMessage.FromString, )
804,229
Parse ping command output. Args: ping_message (str or :py:class:`~pingparsing.PingResult`): ``ping`` command output. Returns: :py:class:`~pingparsing.PingStats`: Parsed result.
def parse(self, ping_message): try: # accept PingResult instance as an input if typepy.is_not_null_string(ping_message.stdout): ping_message = ping_message.stdout except AttributeError: pass logger.debug("parsing ping result: {}".format(ping_message)) self.__parser = NullPingParser() if typepy.is_null_string(ping_message): logger.debug("ping_message is empty") self.__stats = PingStats() return self.__stats ping_lines = _to_unicode(ping_message).splitlines() parser_class_list = ( LinuxPingParser, WindowsPingParser, MacOsPingParser, AlpineLinuxPingParser, ) for parser_class in parser_class_list: self.__parser = parser_class() try: self.__stats = self.__parser.parse(ping_lines) return self.__stats except ParseError as e: if e.reason != ParseErrorReason.HEADER_NOT_FOUND: raise e except pp.ParseException: pass self.__parser = NullPingParser() return self.__stats
804,294
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.Check = channel.unary_unary( '/grpc.health.v1.Health/Check', request_serializer=Health__pb2.HealthCheckRequest.SerializeToString, response_deserializer=Health__pb2.HealthCheckResponse.FromString, )
804,324
Label input grid with hysteresis method. Args: input_grid: 2D array of values. Returns: Labeled output grid.
def label(self, input_grid): unset = 0 high_labels, num_labels = label(input_grid > self.high_thresh) region_ranking = np.argsort(maximum(input_grid, high_labels, index=np.arange(1, num_labels + 1)))[::-1] output_grid = np.zeros(input_grid.shape, dtype=int) stack = [] for rank in region_ranking: label_num = rank + 1 label_i, label_j = np.where(high_labels == label_num) for i in range(label_i.size): if output_grid[label_i[i], label_j[i]] == unset: stack.append((label_i[i], label_j[i])) while len(stack) > 0: index = stack.pop() output_grid[index] = label_num for i in range(index[0] - 1, index[0] + 2): for j in range(index[1] - 1, index[1] + 2): if 0 <= i < output_grid.shape[0] and 0 <= j < output_grid.shape[1]: if (input_grid[i, j] > self.low_thresh) and (output_grid[i, j] == unset): stack.append((i, j)) return output_grid
804,327
Remove labeled objects that do not meet size threshold criteria. Args: labeled_grid: 2D output from label method. min_size: minimum size of object in pixels. Returns: labeled grid with smaller objects removed.
def size_filter(labeled_grid, min_size): out_grid = np.zeros(labeled_grid.shape, dtype=int) slices = find_objects(labeled_grid) j = 1 for i, s in enumerate(slices): box = labeled_grid[s] size = np.count_nonzero(box.ravel() == (i + 1)) if size >= min_size and box.shape[0] > 1 and box.shape[1] > 1: out_grid[np.where(labeled_grid == i + 1)] = j j += 1 return out_grid
804,328
Searches var list for variable name, checks other variable name format options. Args: variable (str): Variable being loaded var_list (list): List of variables in file. Returns: Name of variable in file containing relevant data, and index of variable z-level if multiple variables contained in same array in file.
def format_var_name(variable, var_list): z_index = None if variable in var_list: var_name = variable elif variable.ljust(6, "_") in var_list: var_name = variable.ljust(6, "_") elif any([variable in v_sub.split("_") for v_sub in var_list]): var_name = var_list[[variable in v_sub.split("_") for v_sub in var_list].index(True)] z_index = var_name.split("_").index(variable) else: raise KeyError("{0} not found in {1}".format(variable, var_list)) return var_name, z_index
804,568
Load data from flat data files containing total track information and information about each timestep. The two sets are combined using merge operations on the Track IDs. Additional member information is gathered from the appropriate member file. Args: mode: "train" or "forecast" format: file format being used. Default is "csv"
def load_data(self, mode="train", format="csv"): if mode in self.data.keys(): run_dates = pd.DatetimeIndex(start=self.start_dates[mode], end=self.end_dates[mode],freq="1D") run_date_str = [d.strftime("%Y%m%d-%H%M") for d in run_dates.date] print(run_date_str) all_total_track_files = sorted(glob(getattr(self, mode + "_data_path") + "*total_" + self.ensemble_name + "*." + format)) all_step_track_files = sorted(glob(getattr(self, mode + "_data_path") + "*step_" + self.ensemble_name + "*." + format)) total_track_files = [] for track_file in all_total_track_files: file_date = track_file.split("_")[-1][:-4] if file_date in run_date_str: total_track_files.append(track_file) step_track_files = [] for step_file in all_step_track_files: file_date = step_file.split("_")[-1][:-4] if file_date in run_date_str: step_track_files.append(step_file) self.data[mode]["total"] = pd.concat(map(pd.read_csv, total_track_files), ignore_index=True) self.data[mode]["total"] = self.data[mode]["total"].fillna(value=0) self.data[mode]["total"] = self.data[mode]["total"].replace([np.inf, -np.inf], 0) self.data[mode]["step"] = pd.concat(map(pd.read_csv, step_track_files), ignore_index=True) self.data[mode]["step"] = self.data[mode]["step"].fillna(value=0) self.data[mode]["step"] = self.data[mode]["step"].replace([np.inf, -np.inf], 0) if mode == "forecast": self.data[mode]["step"] = self.data[mode]["step"].drop_duplicates("Step_ID") self.data[mode]["member"] = pd.read_csv(self.member_files[mode]) self.data[mode]["combo"] = pd.merge(self.data[mode]["step"], self.data[mode]["total"], on=["Track_ID", "Ensemble_Name", "Ensemble_Member", "Run_Date"]) self.data[mode]["combo"] = pd.merge(self.data[mode]["combo"], self.data[mode]["member"], on="Ensemble_Member") self.data[mode]["total_group"] = pd.merge(self.data[mode]["total"], self.data[mode]["member"], on="Ensemble_Member")
804,571
Calculate a copula multivariate normal distribution from the training data for each group of ensemble members. Distributions are written to a pickle file for later use. Args: output_file: Pickle file model_names: Names of the tracking models label_columns: Names of the data columns used for labeling Returns:
def calc_copulas(self, output_file, model_names=("start-time", "translation-x", "translation-y"), label_columns=("Start_Time_Error", "Translation_Error_X", "Translation_Error_Y")): if len(self.data['train']) == 0: self.load_data() groups = self.data["train"]["member"][self.group_col].unique() copulas = {} label_columns = list(label_columns) for group in groups: print(group) group_data = self.data["train"]["total_group"].loc[ self.data["train"]["total_group"][self.group_col] == group] group_data = group_data.dropna() group_data.reset_index(drop=True, inplace=True) copulas[group] = {} copulas[group]["mean"] = group_data[label_columns].mean(axis=0).values copulas[group]["cov"] = np.cov(group_data[label_columns].values.T) copulas[group]["model_names"] = list(model_names) del group_data pickle.dump(copulas, open(output_file, "w"), pickle.HIGHEST_PROTOCOL)
804,572
Fits multitask machine learning models to predict the parameters of a size distribution Args: model_names: List of machine learning model names model_objs: scikit-learn style machine learning model objects input_columns: Training data columns used as input for ML model output_columns: Training data columns used for prediction calibrate: Whether or not to fit a log-linear regression to predictions from ML model
def fit_size_distribution_models(self, model_names, model_objs, input_columns, output_columns=None, calibrate=False): if output_columns is None: output_columns = ["Shape", "Location", "Scale"] groups = np.unique(self.data["train"]["member"][self.group_col]) weights=None for group in groups: group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group] group_data = group_data.dropna() group_data = group_data[group_data[output_columns[-1]] > 0] if self.sector: lon_obj = group_data.loc[:,'Centroid_Lon'] lat_obj = group_data.loc[:,'Centroid_Lat'] conus_lat_lon_points = zip(lon_obj.values.ravel(),lat_obj.values.ravel()) center_lon, center_lat = self.proj_dict["lon_0"],self.proj_dict["lat_0"] distances = np.array([np.sqrt((x-center_lon)**2+\ (y-center_lat)**2) for (x, y) in conus_lat_lon_points]) min_dist, max_minus_min = min(distances),max(distances)-min(distances) distance_0_1 = [1.0-((d - min_dist)/(max_minus_min)) for d in distances] weights = np.array(distance_0_1) self.size_distribution_models[group] = {"multi": {}, "lognorm": {}} if calibrate: self.size_distribution_models[group]["calshape"] = {} self.size_distribution_models[group]["calscale"] = {} log_labels = np.log(group_data[output_columns].values) log_means = log_labels.mean(axis=0) log_sds = log_labels.std(axis=0) self.size_distribution_models[group]['lognorm']['mean'] = log_means self.size_distribution_models[group]['lognorm']['sd'] = log_sds for m, model_name in enumerate(model_names): print(group, model_name) self.size_distribution_models[group]["multi"][model_name] = deepcopy(model_objs[m]) try: self.size_distribution_models[group]["multi"][model_name].fit(group_data[input_columns], (log_labels - log_means) / log_sds, sample_weight=weights) except: self.size_distribution_models[group]["multi"][model_name].fit(group_data[input_columns], (log_labels - log_means) / log_sds) if calibrate: training_predictions = self.size_distribution_models[ group]["multi"][model_name].predict(group_data[input_columns]) self.size_distribution_models[group]["calshape"][model_name] = LinearRegression() self.size_distribution_models[group]["calshape"][model_name].fit(training_predictions[:, 0:1], (log_labels[:, 0] - log_means[0]) / log_sds[ 0], sample_weight=weights) self.size_distribution_models[group]["calscale"][model_name] = LinearRegression() self.size_distribution_models[group]["calscale"][model_name].fit(training_predictions[:, 1:], (log_labels[:, 1] - log_means[1]) / log_sds[ 1], sample_weight=weights)
804,576
This calculates 2 principal components for the hail size distribution between the shape and scale parameters. Separate machine learning models are fit to predict each component. Args: model_names: List of machine learning model names model_objs: List of machine learning model objects. input_columns: List of input variables output_columns: Output columns, should contain Shape and Scale. Returns:
def fit_size_distribution_component_models(self, model_names, model_objs, input_columns, output_columns): groups = np.unique(self.data["train"]["member"][self.group_col]) weights=None for group in groups: print(group) group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group] group_data = group_data.dropna() group_data = group_data.loc[group_data[output_columns[-1]] > 0] if self.sector: lon_obj = group_data.loc[:,'Centroid_Lon'] lat_obj = group_data.loc[:,'Centroid_Lat'] conus_lat_lon_points = zip(lon_obj.values.ravel(),lat_obj.values.ravel()) center_lon, center_lat = self.proj_dict["lon_0"],self.proj_dict["lat_0"] distances = np.array([np.sqrt((x-center_lon)**2+\ (y-center_lat)**2) for (x, y) in conus_lat_lon_points]) min_dist, max_minus_min = min(distances),max(distances)-min(distances) distance_0_1 = [1.0-((d - min_dist)/(max_minus_min)) for d in distances] weights = np.array(distance_0_1) self.size_distribution_models[group] = {"lognorm": {}} self.size_distribution_models[group]["lognorm"]["pca"] = PCA(n_components=len(output_columns)) log_labels = np.log(group_data[output_columns].values) log_labels[:, np.where(output_columns == "Shape")[0]] *= -1 log_means = log_labels.mean(axis=0) log_sds = log_labels.std(axis=0) log_norm_labels = (log_labels - log_means) / log_sds out_pc_labels = self.size_distribution_models[group]["lognorm"]["pca"].fit_transform(log_norm_labels) self.size_distribution_models[group]['lognorm']['mean'] = log_means self.size_distribution_models[group]['lognorm']['sd'] = log_sds for comp in range(len(output_columns)): self.size_distribution_models[group]["pc_{0:d}".format(comp)] = dict() for m, model_name in enumerate(model_names): print(model_name, comp) self.size_distribution_models[group][ "pc_{0:d}".format(comp)][model_name] = deepcopy(model_objs[m]) try: self.size_distribution_models[group][ "pc_{0:d}".format(comp)][model_name].fit(group_data[input_columns], out_pc_labels[:, comp], sample_weight=weights) except: self.size_distribution_models[group][ "pc_{0:d}".format(comp)][model_name].fit(group_data[input_columns], out_pc_labels[:, comp]) return
804,577
Fit size models to produce discrete pdfs of forecast hail sizes. Args: model_names: List of model names model_objs: List of model objects input_columns: List of input variables output_column: Output variable name output_start: Hail size bin start output_step: hail size bin step output_stop: hail size bin stop
def fit_size_models(self, model_names, model_objs, input_columns, output_column="Hail_Size", output_start=5, output_step=5, output_stop=100): print("Fitting size models") groups = self.data["train"]["member"][self.group_col].unique() output_start = int(output_start) output_step = int(output_step) output_stop = int(output_stop) for group in groups: group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group] group_data.dropna(inplace=True) group_data = group_data[group_data[output_column] >= output_start] output_data = group_data[output_column].values.astype(int) output_data[output_data > output_stop] = output_stop discrete_data = ((output_data - output_start) // output_step) * output_step + output_start self.size_models[group] = {} self.size_models[group]["outputvalues"] = np.arange(output_start, output_stop + output_step, output_step, dtype=int) for m, model_name in enumerate(model_names): print("{0} {1}".format(group, model_name)) self.size_models[group][model_name] = deepcopy(model_objs[m]) self.size_models[group][model_name].fit(group_data[input_columns], discrete_data)
804,580
Apply size models to forecast data. Args: model_names: input_columns: metadata_cols: data_mode:
def predict_size_models(self, model_names, input_columns, metadata_cols, data_mode="forecast"): groups = self.size_models.keys() predictions = {} for group in groups: group_data = self.data[data_mode]["combo"].loc[self.data[data_mode]["combo"][self.group_col] == group] if group_data.shape[0] > 0: predictions[group] = {} output_values = self.size_models[group]["outputvalues"].astype(int) for m, model_name in enumerate(model_names): print("{0} {1}".format(group, model_name)) pred_col_names = [model_name.replace(" ", "-") + "_{0:02d}".format(p) for p in output_values] predictions[group][model_name] = group_data[metadata_cols] pred_vals = self.size_models[group][model_name].predict_proba(group_data[input_columns]) pred_classes = self.size_models[group][model_name].classes_ pred_pdf = np.zeros((pred_vals.shape[0], output_values.size)) for pcv, pc in enumerate(pred_classes): idx = np.where(output_values == pc)[0][0] pred_pdf[:, idx] = pred_vals[:, pcv] for pcn, pred_col_name in enumerate(pred_col_names): predictions[group][model_name].loc[:, pred_col_name] = pred_pdf[:, pcn] return predictions
804,581
Output hail forecast values to csv files by run date and ensemble member. Args: forecasts: mode: csv_path: Returns:
def output_forecasts_csv(self, forecasts, mode, csv_path, run_date_format="%Y%m%d-%H%M"): merged_forecasts = pd.merge(forecasts["condition"], forecasts["dist"], on=["Step_ID","Track_ID","Ensemble_Member","Forecast_Hour"]) all_members = self.data[mode]["combo"]["Ensemble_Member"] members = np.unique(all_members) all_run_dates = pd.DatetimeIndex(self.data[mode]["combo"]["Run_Date"]) run_dates = pd.DatetimeIndex(np.unique(all_run_dates)) print(run_dates) for member in members: for run_date in run_dates: mem_run_index = (all_run_dates == run_date) & (all_members == member) member_forecast = merged_forecasts.loc[mem_run_index] member_forecast.to_csv(join(csv_path, "hail_forecasts_{0}_{1}_{2}.csv".format(self.ensemble_name, member, run_date.strftime (run_date_format)))) return
804,586
Calculates the cumulative ranked probability score (CRPS) on the forecast data. Args: model_type: model type being evaluated. model_name: machine learning model being evaluated. condition_model_name: Name of the hail/no-hail model being evaluated condition_threshold: Threshold for using hail size CDF query: pandas query string to filter the forecasts based on the metadata Returns: a DistributedCRPS object
def crps(self, model_type, model_name, condition_model_name, condition_threshold, query=None): def gamma_cdf(x, a, loc, b): if a == 0 or b == 0: cdf = np.ones(x.shape) else: cdf = gamma.cdf(x, a, loc, b) return cdf crps_obj = DistributedCRPS(self.dist_thresholds) if query is not None: sub_forecasts = self.matched_forecasts[model_type][model_name].query(query) sub_forecasts = sub_forecasts.reset_index(drop=True) condition_forecasts = self.matched_forecasts["condition"][condition_model_name].query(query) condition_forecasts = condition_forecasts.reset_index(drop=True) else: sub_forecasts = self.matched_forecasts[model_type][model_name] condition_forecasts = self.matched_forecasts["condition"][condition_model_name] if sub_forecasts.shape[0] > 0: if model_type == "dist": forecast_cdfs = np.zeros((sub_forecasts.shape[0], self.dist_thresholds.size)) for f in range(sub_forecasts.shape[0]): condition_prob = condition_forecasts.loc[f, self.forecast_bins["condition"][0]] if condition_prob >= condition_threshold: f_params = [0, 0, 0] else: f_params = sub_forecasts[self.forecast_bins[model_type]].values[f] forecast_cdfs[f] = gamma_cdf(self.dist_thresholds, f_params[0], f_params[1], f_params[2]) obs_cdfs = np.array([gamma_cdf(self.dist_thresholds, *params) for params in sub_forecasts[self.type_cols[model_type]].values]) crps_obj.update(forecast_cdfs, obs_cdfs) else: crps_obj.update(sub_forecasts[self.forecast_bins[model_type].astype(str)].values, sub_forecasts[self.type_cols[model_type]].values) return crps_obj
804,612
Loads map coordinates from netCDF or pickle file created by util.makeMapGrids. Args: map_file: Filename for the file containing coordinate information. Returns: Latitude and longitude grids as numpy arrays.
def load_map_coordinates(map_file): if map_file[-4:] == ".pkl": map_data = pickle.load(open(map_file)) lon = map_data['lon'] lat = map_data['lat'] else: map_data = Dataset(map_file) if "lon" in map_data.variables.keys(): lon = map_data.variables['lon'][:] lat = map_data.variables['lat'][:] else: lon = map_data.variables["XLONG"][0] lat = map_data.variables["XLAT"][0] return lon, lat
804,658
Finds the largest value within a given radius of a point on the interpolated grid. Args: in_lon: 2D array of longitude values in_lat: 2D array of latitude values radius: radius of influence for largest neighbor search in degrees Returns: Array of interpolated data
def max_neighbor(self, in_lon, in_lat, radius=0.05): out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1])) in_tree = cKDTree(np.vstack((in_lat.ravel(), in_lon.ravel())).T) out_indices = np.indices(out_data.shape[1:]) out_rows = out_indices[0].ravel() out_cols = out_indices[1].ravel() for d in range(self.data.shape[0]): nz_points = np.where(self.data[d] > 0) if len(nz_points[0]) > 0: nz_vals = self.data[d][nz_points] nz_rank = np.argsort(nz_vals) original_points = cKDTree(np.vstack((self.lat[nz_points[0][nz_rank]], self.lon[nz_points[1][nz_rank]])).T) all_neighbors = original_points.query_ball_tree(in_tree, radius, p=2, eps=0) for n, neighbors in enumerate(all_neighbors): if len(neighbors) > 0: out_data[d, out_rows[neighbors], out_cols[neighbors]] = nz_vals[nz_rank][n] return out_data
804,663
Calculate the neighborhood probability over the full period of the forecast Args: radius: circular radius from each point in km smoothing: width of Gaussian smoother in km threshold: intensity of exceedance stride: number of grid points to skip for reduced neighborhood grid Returns: (neighborhood probabilities)
def period_neighborhood_probability(self, radius, smoothing, threshold, stride,start_time,end_time): neighbor_x = self.x[::stride, ::stride] neighbor_y = self.y[::stride, ::stride] neighbor_kd_tree = cKDTree(np.vstack((neighbor_x.ravel(), neighbor_y.ravel())).T) neighbor_prob = np.zeros((self.data.shape[0], neighbor_x.shape[0], neighbor_x.shape[1])) print('Forecast Hours: {0}-{1}'.format(start_time, end_time)) for m in range(len(self.members)): period_max = self.data[m,start_time:end_time,:,:].max(axis=0) valid_i, valid_j = np.where(period_max >= threshold) print(self.members[m], len(valid_i)) if len(valid_i) > 0: var_kd_tree = cKDTree(np.vstack((self.x[valid_i, valid_j], self.y[valid_i, valid_j])).T) exceed_points = np.unique(np.concatenate(var_kd_tree.query_ball_tree(neighbor_kd_tree, radius))).astype(int) exceed_i, exceed_j = np.unravel_index(exceed_points, neighbor_x.shape) neighbor_prob[m][exceed_i, exceed_j] = 1 if smoothing > 0: neighbor_prob[m] = gaussian_filter(neighbor_prob[m], smoothing,mode='constant') return neighbor_prob
804,865
Load map projection information and create latitude, longitude, x, y, i, and j grids for the projection. Args: map_file: File specifying the projection information.
def load_map_info(self, map_file): if self.ensemble_name.upper() == "SSEF": proj_dict, grid_dict = read_arps_map_file(map_file) self.dx = int(grid_dict["dx"]) mapping_data = make_proj_grids(proj_dict, grid_dict) for m, v in mapping_data.items(): setattr(self, m, v) self.i, self.j = np.indices(self.lon.shape) self.proj = get_proj_obj(proj_dict) elif self.ensemble_name.upper() in ["NCAR", "NCARSTORM", "HRRR", "VSE", "HREFV2"]: proj_dict, grid_dict = read_ncar_map_file(map_file) if self.member_name[0:7] == "1km_pbl": # Don't just look at the first 3 characters. You have to differentiate '1km_pbl1' and '1km_on_3km_pbl1' grid_dict["dx"] = 1000 grid_dict["dy"] = 1000 grid_dict["sw_lon"] = 258.697 grid_dict["sw_lat"] = 23.999 grid_dict["ne_lon"] = 282.868269206236 grid_dict["ne_lat"] = 36.4822338520542 self.dx = int(grid_dict["dx"]) mapping_data = make_proj_grids(proj_dict, grid_dict) for m, v in mapping_data.items(): setattr(self, m, v) self.i, self.j = np.indices(self.lon.shape) self.proj = get_proj_obj(proj_dict)
804,884
Reads a geojson file containing an STObject and initializes a new STObject from the information in the file. Args: filename: Name of the geojson file Returns: an STObject
def read_geojson(filename): json_file = open(filename) data = json.load(json_file) json_file.close() times = data["properties"]["times"] main_data = dict(timesteps=[], masks=[], x=[], y=[], i=[], j=[]) attribute_data = dict() for feature in data["features"]: for main_name in main_data.keys(): main_data[main_name].append(np.array(feature["properties"][main_name])) for k, v in feature["properties"]["attributes"].items(): if k not in attribute_data.keys(): attribute_data[k] = [np.array(v)] else: attribute_data[k].append(np.array(v)) kwargs = {} for kw in ["dx", "step", "u", "v"]: if kw in data["properties"].keys(): kwargs[kw] = data["properties"][kw] sto = STObject(main_data["timesteps"], main_data["masks"], main_data["x"], main_data["y"], main_data["i"], main_data["j"], times[0], times[-1], **kwargs) for k, v in attribute_data.items(): sto.attributes[k] = v return sto
804,919
Calculate the center of mass at a given timestep. Args: time: Time at which the center of mass calculation is performed Returns: The x- and y-coordinates of the center of mass.
def center_of_mass(self, time): if self.start_time <= time <= self.end_time: diff = time - self.start_time valid = np.flatnonzero(self.masks[diff] != 0) if valid.size > 0: com_x = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps[diff].ravel()[valid] * self.x[diff].ravel()[valid]) com_y = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps[diff].ravel()[valid] * self.y[diff].ravel()[valid]) else: com_x = np.mean(self.x[diff]) com_y = np.mean(self.y[diff]) else: com_x = None com_y = None return com_x, com_y
804,922
Gets the corner array indices of the STObject at a given time that corresponds to the upper left corner of the bounding box for the STObject. Args: time: time at which the corner is being extracted. Returns: corner index.
def get_corner(self, time): if self.start_time <= time <= self.end_time: diff = time - self.start_time return self.i[diff][0, 0], self.j[diff][0, 0] else: return -1, -1
804,925
Gets the size of the object at a given time. Args: time: Time value being queried. Returns: size of the object in pixels
def size(self, time): if self.start_time <= time <= self.end_time: return self.masks[time - self.start_time].sum() else: return 0
804,926
Adds the data from another STObject to this object. Args: step: another STObject being added after the current one in time.
def extend(self, step): self.timesteps.extend(step.timesteps) self.masks.extend(step.masks) self.x.extend(step.x) self.y.extend(step.y) self.i.extend(step.i) self.j.extend(step.j) self.end_time = step.end_time self.times = np.arange(self.start_time, self.end_time + self.step, self.step) self.u = np.concatenate((self.u, step.u)) self.v = np.concatenate((self.v, step.v)) for attr in self.attributes.keys(): if attr in step.attributes.keys(): self.attributes[attr].extend(step.attributes[attr])
804,929
Estimate the motion of the object with cross-correlation on the intensity values from the previous time step. Args: time: time being evaluated. intensity_grid: 2D array of intensities used in cross correlation. max_u: Maximum x-component of motion. Used to limit search area. max_v: Maximum y-component of motion. Used to limit search area Returns: u, v, and the minimum error.
def estimate_motion(self, time, intensity_grid, max_u, max_v): ti = np.where(time == self.times)[0][0] mask_vals = np.where(self.masks[ti].ravel() == 1) i_vals = self.i[ti].ravel()[mask_vals] j_vals = self.j[ti].ravel()[mask_vals] obj_vals = self.timesteps[ti].ravel()[mask_vals] u_shifts = np.arange(-max_u, max_u + 1) v_shifts = np.arange(-max_v, max_v + 1) min_error = 99999999999.0 best_u = 0 best_v = 0 for u in u_shifts: j_shift = j_vals - u for v in v_shifts: i_shift = i_vals - v if np.all((0 <= i_shift) & (i_shift < intensity_grid.shape[0]) & (0 <= j_shift) & (j_shift < intensity_grid.shape[1])): shift_vals = intensity_grid[i_shift, j_shift] else: shift_vals = np.zeros(i_shift.shape) # This isn't correlation; it is mean absolute error. error = np.abs(shift_vals - obj_vals).mean() if error < min_error: min_error = error best_u = u * self.dx best_v = v * self.dx # 60 seems arbitrarily high #if min_error > 60: # best_u = 0 # best_v = 0 self.u[ti] = best_u self.v[ti] = best_v return best_u, best_v, min_error
804,931
Extracts the data from a ModelOutput or ModelGrid object within the bounding box region of the STObject. Args: model_grid: A ModelGrid or ModelOutput Object potential: Extracts from the time before instead of the same time as the object
def extract_attribute_grid(self, model_grid, potential=False, future=False): if potential: var_name = model_grid.variable + "-potential" timesteps = np.arange(self.start_time - 1, self.end_time) elif future: var_name = model_grid.variable + "-future" timesteps = np.arange(self.start_time + 1, self.end_time + 2) else: var_name = model_grid.variable timesteps = np.arange(self.start_time, self.end_time + 1) self.attributes[var_name] = [] for ti, t in enumerate(timesteps): self.attributes[var_name].append( model_grid.data[t - model_grid.start_hour, self.i[ti], self.j[ti]])
804,933
Extracts data from a 2D array that has the same dimensions as the grid used to identify the object. Args: data_array: 2D numpy array
def extract_attribute_array(self, data_array, var_name): if var_name not in self.attributes.keys(): self.attributes[var_name] = [] for t in range(self.times.size): self.attributes[var_name].append(data_array[self.i[t], self.j[t]])
804,934
Extracts the difference in model outputs Args: model_grid: ModelOutput or ModelGrid object.
def extract_tendency_grid(self, model_grid): var_name = model_grid.variable + "-tendency" self.attributes[var_name] = [] timesteps = np.arange(self.start_time, self.end_time + 1) for ti, t in enumerate(timesteps): t_index = t - model_grid.start_hour self.attributes[var_name].append( model_grid.data[t_index, self.i[ti], self.j[ti]] - model_grid.data[t_index - 1, self.i[ti], self.j[ti]] )
804,935
Calculates summary statistics over the domains of each attribute. Args: statistic_name (string): numpy statistic, such as mean, std, max, min Returns: dict of statistics from each attribute grid.
def calc_attribute_statistics(self, statistic_name): stats = {} for var, grids in self.attributes.items(): if len(grids) > 1: stats[var] = getattr(np.array([getattr(np.ma.array(x, mask=self.masks[t] == 0), statistic_name)() for t, x in enumerate(grids)]), statistic_name)() else: stats[var] = getattr(np.ma.array(grids[0], mask=self.masks[0] == 0), statistic_name)() return stats
804,936
Calculate statistics based on the values of an attribute. The following statistics are supported: mean, max, min, std, ptp (range), median, skew (mean - median), and percentile_(percentile value). Args: attribute: Attribute extracted from model grid statistic: Name of statistic being used. time: timestep of the object being investigated Returns: The value of the statistic
def calc_attribute_statistic(self, attribute, statistic, time): ti = np.where(self.times == time)[0][0] ma = np.where(self.masks[ti].ravel() == 1) if statistic in ['mean', 'max', 'min', 'std', 'ptp']: stat_val = getattr(self.attributes[attribute][ti].ravel()[ma], statistic)() elif statistic == 'median': stat_val = np.median(self.attributes[attribute][ti].ravel()[ma]) elif statistic == "skew": stat_val = np.mean(self.attributes[attribute][ti].ravel()[ma]) - \ np.median(self.attributes[attribute][ti].ravel()[ma]) elif 'percentile' in statistic: per = int(statistic.split("_")[1]) stat_val = np.percentile(self.attributes[attribute][ti].ravel()[ma], per) elif 'dt' in statistic: stat_name = statistic[:-3] if ti == 0: stat_val = 0 else: stat_val = self.calc_attribute_statistic(attribute, stat_name, time) \ - self.calc_attribute_statistic(attribute, stat_name, time - 1) else: stat_val = np.nan return stat_val
804,937
Calculate statistics from the primary attribute of the StObject. Args: statistic: statistic being calculated time: Timestep being investigated Returns: Value of the statistic
def calc_timestep_statistic(self, statistic, time): ti = np.where(self.times == time)[0][0] ma = np.where(self.masks[ti].ravel() == 1) if statistic in ['mean', 'max', 'min', 'std', 'ptp']: stat_val = getattr(self.timesteps[ti].ravel()[ma], statistic)() elif statistic == 'median': stat_val = np.median(self.timesteps[ti].ravel()[ma]) elif 'percentile' in statistic: per = int(statistic.split("_")[1]) stat_val = np.percentile(self.timesteps[ti].ravel()[ma], per) elif 'dt' in statistic: stat_name = statistic[:-3] if ti == 0: stat_val = 0 else: stat_val = self.calc_timestep_statistic(stat_name, time) -\ self.calc_timestep_statistic(stat_name, time - 1) else: stat_val = np.nan return stat_val
804,938
Calculate shape statistics using regionprops applied to the object mask. Args: stat_names: List of statistics to be extracted from those calculated by regionprops. Returns: Dictionary of shape statistics
def calc_shape_statistics(self, stat_names): stats = {} try: all_props = [regionprops(m) for m in self.masks] except TypeError: print(self.masks) exit() for stat in stat_names: stats[stat] = np.mean([p[0][stat] for p in all_props]) return stats
804,939
Calculate shape statistics for a single time step Args: stat_names: List of shape statistics calculated from region props time: Time being investigated Returns: List of shape statistics
def calc_shape_step(self, stat_names, time): ti = np.where(self.times == time)[0][0] props = regionprops(self.masks[ti], self.timesteps[ti])[0] shape_stats = [] for stat_name in stat_names: if "moments_hu" in stat_name: hu_index = int(stat_name.split("_")[-1]) hu_name = "_".join(stat_name.split("_")[:-1]) hu_val = np.log(props[hu_name][hu_index]) if np.isnan(hu_val): shape_stats.append(0) else: shape_stats.append(hu_val) else: shape_stats.append(props[stat_name]) return shape_stats
804,940
Output the data in the STObject to a geoJSON file. Args: filename: Name of the file proj: PyProj object for converting the x and y coordinates back to latitude and longitue values. metadata: Metadata describing the object to be included in the top-level properties.
def to_geojson(self, filename, proj, metadata=None): if metadata is None: metadata = {} json_obj = {"type": "FeatureCollection", "features": [], "properties": {}} json_obj['properties']['times'] = self.times.tolist() json_obj['properties']['dx'] = self.dx json_obj['properties']['step'] = self.step json_obj['properties']['u'] = self.u.tolist() json_obj['properties']['v'] = self.v.tolist() for k, v in metadata.items(): json_obj['properties'][k] = v for t, time in enumerate(self.times): feature = {"type": "Feature", "geometry": {"type": "Polygon"}, "properties": {}} boundary_coords = self.boundary_polygon(time) lonlat = np.vstack(proj(boundary_coords[0], boundary_coords[1], inverse=True)) lonlat_list = lonlat.T.tolist() if len(lonlat_list) > 0: lonlat_list.append(lonlat_list[0]) feature["geometry"]["coordinates"] = [lonlat_list] for attr in ["timesteps", "masks", "x", "y", "i", "j"]: feature["properties"][attr] = getattr(self, attr)[t].tolist() feature["properties"]["attributes"] = {} for attr_name, steps in self.attributes.items(): feature["properties"]["attributes"][attr_name] = steps[t].tolist() json_obj['features'].append(feature) file_obj = open(filename, "w") json.dump(json_obj, file_obj, indent=1, sort_keys=True) file_obj.close() return
804,941
Rescale your input data so that is ranges over integer values, which will perform better in the watershed. Args: data: 2D or 3D ndarray being rescaled data_min: minimum value of input data for scaling purposes data_max: maximum value of input data for scaling purposes out_min: minimum value of scaled data out_max: maximum value of scaled data Returns: Linearly scaled ndarray
def rescale_data(data, data_min, data_max, out_min=0.0, out_max=100.0): return (out_max - out_min) / (data_max - data_min) * (data - data_min) + out_min
805,036
Labels input grid using enhanced watershed algorithm. Args: input_grid (numpy.ndarray): Grid to be labeled. Returns: Array of labeled pixels
def label(self, input_grid): marked = self.find_local_maxima(input_grid) marked = np.where(marked >= 0, 1, 0) # splabel returns two things in a tuple: an array and an integer # assign the first thing (array) to markers markers = splabel(marked)[0] return markers
805,038
Finds the local maxima in the inputGrid and perform region growing to identify objects. Args: input_grid: Raw input data. Returns: array with labeled objects.
def find_local_maxima(self, input_grid): pixels, q_data = self.quantize(input_grid) centers = OrderedDict() for p in pixels.keys(): centers[p] = [] marked = np.ones(q_data.shape, dtype=int) * self.UNMARKED MIN_INFL = int(np.round(1 + 0.5 * np.sqrt(self.max_size))) MAX_INFL = 2 * MIN_INFL marked_so_far = [] # Find the maxima. These are high-values with enough clearance # around them. # Work from high to low bins. The pixels in the highest bin mark their # neighborhoods first. If you did it from low to high the lowest maxima # would mark their neighborhoods first and interfere with the identification of higher maxima. for b in sorted(pixels.keys(),reverse=True): # Square starts large with high intensity bins and gets smaller with low intensity bins. infl_dist = MIN_INFL + int(np.round(float(b) / self.max_bin * (MAX_INFL - MIN_INFL))) for p in pixels[b]: if marked[p] == self.UNMARKED: ok = False del marked_so_far[:] # Temporarily mark unmarked points in square around point (keep track of them in list marked_so_far). # If none of the points in square were marked already from a higher intensity center, # this counts as a new center and ok=True and points will remain marked. # Otherwise ok=False and marked points that were previously unmarked will be unmarked. for (i, j), v in np.ndenumerate(marked[p[0] - infl_dist:p[0] + infl_dist + 1, p[1] - infl_dist:p[1]+ infl_dist + 1]): if v == self.UNMARKED: ok = True marked[i - infl_dist + p[0],j - infl_dist + p[1]] = b marked_so_far.append((i - infl_dist + p[0],j - infl_dist + p[1])) else: # neighborhood already taken ok = False break # ok if point and surrounding square were not marked already. if ok: # highest point in its neighborhood centers[b].append(p) else: for m in marked_so_far: marked[m] = self.UNMARKED # Erase marks and start over. You have a list of centers now. marked[:, :] = self.UNMARKED deferred_from_last = [] deferred_to_next = [] # delta (int): maximum number of increments the cluster is allowed to range over. Larger d results in clusters over larger scales. for delta in range(0, self.delta + 1): # Work from high to low bins. for b in sorted(centers.keys(), reverse=True): bin_lower = b - delta deferred_from_last[:] = deferred_to_next[:] del deferred_to_next[:] foothills = [] n_centers = len(centers[b]) tot_centers = n_centers + len(deferred_from_last) for i in range(tot_centers): # done this way to minimize memory overhead of maintaining two lists if i < n_centers: center = centers[b][i] else: center = deferred_from_last[i - n_centers] if bin_lower < 0: bin_lower = 0 if marked[center] == self.UNMARKED: captured = self.set_maximum(q_data, marked, center, bin_lower, foothills) if not captured: # decrement to lower value to see if it'll get big enough deferred_to_next.append(center) else: pass # this is the last one for this bin self.remove_foothills(q_data, marked, b, bin_lower, centers, foothills) del deferred_from_last[:] del deferred_to_next[:] return marked
805,039
Quantize a grid into discrete steps based on input parameters. Args: input_grid: 2-d array of values Returns: Dictionary of value pointing to pixel locations, and quantized 2-d array of data
def quantize(self, input_grid): pixels = {} for i in range(self.max_bin+1): pixels[i] = [] data = (np.array(input_grid, dtype=int) - self.min_thresh) / self.data_increment data[data < 0] = -1 data[data > self.max_bin] = self.max_bin good_points = np.where(data >= 0) for g in np.arange(good_points[0].shape[0]): pixels[data[(good_points[0][g], good_points[1][g])]].append((good_points[0][g], good_points[1][g])) return pixels, data
805,043
Loads observations and masking grid (if needed). Args: mask_threshold: Values greater than the threshold are kept, others are masked.
def load_obs(self, mask_threshold=0.5): print("Loading obs ", self.run_date, self.model_name, self.forecast_variable) start_date = self.run_date + timedelta(hours=self.start_hour) end_date = self.run_date + timedelta(hours=self.end_hour) mrms_grid = MRMSGrid(start_date, end_date, self.mrms_variable, self.mrms_path) mrms_grid.load_data() if len(mrms_grid.data) > 0: self.raw_obs[self.mrms_variable] = np.where(mrms_grid.data > 100, 100, mrms_grid.data) self.period_obs[self.mrms_variable] = self.raw_obs[self.mrms_variable].max(axis=0) if self.obs_mask: mask_grid = MRMSGrid(start_date, end_date, self.mask_variable, self.mrms_path) mask_grid.load_data() self.raw_obs[self.mask_variable] = np.where(mask_grid.data >= mask_threshold, 1, 0) self.period_obs[self.mask_variable] = self.raw_obs[self.mask_variable].max(axis=0)
805,055
Reads forecasts from json files and merges them with the input data from the step csv files. Args: input_csv_file: Name of the input data csv file being processed forecast_json_path: Path to the forecast json files toplevel directory condition_models: List of models used to forecast hail or no hail dist_models: List of models used to forecast the hail size distribution Returns:
def merge_input_csv_forecast_json(input_csv_file, forecast_json_path, condition_models, dist_models): try: run_date = input_csv_file[:-4].split("_")[-1] print(run_date) ens_member = "_".join(input_csv_file.split("/")[-1][:-4].split("_")[3:-1]) ens_name = input_csv_file.split("/")[-1].split("_")[2] input_data = pd.read_csv(input_csv_file, index_col="Step_ID") full_json_path = forecast_json_path + "{0}/{1}/".format(run_date, ens_member) track_ids = sorted(input_data["Track_ID"].unique()) model_pred_cols = [] condition_models_ns = [] dist_models_ns = [] gamma_params = ["Shape", "Location", "Scale"] for condition_model in condition_models: model_pred_cols.append(condition_model.replace(" ", "-") + "_Condition") condition_models_ns.append(condition_model.replace(" ", "-")) for dist_model in dist_models: dist_models_ns.append(dist_model.replace(" ", "-")) for param in gamma_params: model_pred_cols.append(dist_model.replace(" ", "-") + "_" + param) pred_data = pd.DataFrame(index=input_data.index, columns=model_pred_cols, dtype=float) for track_id in track_ids: track_id_num = track_id.split("_")[-1] json_filename = full_json_path + "{0}_{1}_{2}_model_track_{3}.json".format(ens_name, run_date, ens_member, track_id_num) json_file = open(json_filename) json_data = json.load(json_file) json_file.close() for s, step in enumerate(json_data["features"]): step_id = track_id + "_{0:02d}".format(s) for cond_model in condition_models_ns: pred_data.loc[step_id, cond_model + "_Condition"] = step["properties"]["condition_" + cond_model] for dist_model in dist_models_ns: pred_data.loc[step_id, [dist_model + "_" + p for p in gamma_params]] = step["properties"]["dist_" + dist_model] out_data = input_data.merge(pred_data, left_index=True, right_index=True) return out_data, ens_name, ens_member except Exception as e: print(traceback.format_exc()) raise e
805,077
Add two contingency tables together and return a combined one. Args: other: Another contingency table Returns: Sum of contingency tables
def __add__(self, other): sum_ct = ContingencyTable(*(self.table + other.table).tolist()) return sum_ct
805,518
Segment forecast tracks to only output data contined within a region in the CONUS, as defined by the mapfile. Args: csv_path(str): Path to the full CONUS csv file. file_dict_key(str): Dictionary key for the csv files, currently either 'track_step' or 'track_total' out_path (str): Path to output new segmented csv files. Returns: Segmented forecast tracks in a csv file.
def output_sector_csv(self,csv_path,file_dict_key,out_path): csv_file = csv_path + "{0}_{1}_{2}_{3}.csv".format( file_dict_key, self.ensemble_name, self.member, self.run_date.strftime(self.date_format)) if exists(csv_file): csv_data = pd.read_csv(csv_file) if self.inds is None: lon_obj = csv_data.loc[:,"Centroid_Lon"] lat_obj = csv_data.loc[:,"Centroid_Lat"] self.inds = np.where((self.ne_lat>=lat_obj)&(self.sw_lat<=lat_obj)\ &(self.ne_lon>=lon_obj)&(self.sw_lon<=lon_obj))[0] if np.shape(self.inds)[0] > 0: csv_data = csv_data.reindex(np.array(self.inds)) sector_csv_filename = out_path + "{0}_{1}_{2}_{3}.csv".format( file_dict_key, self.ensemble_name, self.member, self.run_date.strftime(self.date_format)) print("Output sector csv file " + sector_csv_filename) csv_data.to_csv(sector_csv_filename, na_rep="nan", float_format="%0.5f", index=False) os.chmod(sector_csv_filename, 0o666) else: print('No {0} {1} sector data found'.format(self.member, self.run_date.strftime("%Y%m%d"))) else: print('No {0} {1} csv file found'.format(self.member, self.run_date.strftime("%Y%m%d"))) return
805,606
Calculate a probability based on the number of grid points in an area that exceed a threshold. Args: threshold: radius: Returns:
def neighborhood_probability(self, threshold, radius): weights = disk(radius, dtype=np.uint8) thresh_data = np.zeros(self.data.shape[1:], dtype=np.uint8) neighbor_prob = np.zeros(self.data.shape, dtype=np.float32) for t in np.arange(self.data.shape[0]): thresh_data[self.data[t] >= threshold] = 1 maximized = fftconvolve(thresh_data, weights, mode="same") maximized[maximized > 1] = 1 maximized[maximized < 1] = 0 neighbor_prob[t] = fftconvolve(maximized, weights, mode="same") thresh_data[:] = 0 neighbor_prob[neighbor_prob < 1] = 0 neighbor_prob /= weights.sum() return neighbor_prob
805,735
Calculate grid-point statistics across ensemble members. Args: consensus_type: mean, std, median, max, or percentile_nn Returns: EnsembleConsensus containing point statistic
def point_consensus(self, consensus_type): if "mean" in consensus_type: consensus_data = np.mean(self.data, axis=0) elif "std" in consensus_type: consensus_data = np.std(self.data, axis=0) elif "median" in consensus_type: consensus_data = np.median(self.data, axis=0) elif "max" in consensus_type: consensus_data = np.max(self.data, axis=0) elif "percentile" in consensus_type: percentile = int(consensus_type.split("_")[1]) consensus_data = np.percentile(self.data, percentile, axis=0) else: consensus_data = np.zeros(self.data.shape[1:]) consensus = EnsembleConsensus(consensus_data, consensus_type, self.ensemble_name, self.run_date, self.variable, self.start_date, self.end_date, self.units) return consensus
805,743
Determine the probability of exceeding a threshold at a grid point based on the ensemble forecasts at that point. Args: threshold: If >= threshold assigns a 1 to member, otherwise 0. Returns: EnsembleConsensus
def point_probability(self, threshold): point_prob = np.zeros(self.data.shape[1:]) for t in range(self.data.shape[1]): point_prob[t] = np.where(self.data[:, t] >= threshold, 1.0, 0.0).mean(axis=0) return EnsembleConsensus(point_prob, "point_probability", self.ensemble_name, self.run_date, self.variable + "_{0:0.2f}_{1}".format(threshold, self.units.replace(" ", "_")), self.start_date, self.end_date, "")
805,744
Calculates the neighborhood probability of exceeding a threshold at any time over the period loaded. Args: threshold (float): splitting threshold for probability calculatations radius (int): distance from point in number of grid points to include in neighborhood calculation. sigmas (array of ints): Radii for Gaussian filter used to smooth neighborhood probabilities. Returns: list of EnsembleConsensus objects
def period_max_neighborhood_probability(self, threshold, radius, sigmas=None): if sigmas is None: sigmas = [0] weights = disk(radius) neighborhood_prob = np.zeros(self.data.shape[2:], dtype=np.float32) thresh_data = np.zeros(self.data.shape[2:], dtype=np.uint8) for m in range(self.data.shape[0]): thresh_data[self.data[m].max(axis=0) >= threshold] = 1 maximized = fftconvolve(thresh_data, weights, mode="same") maximized[maximized > 1] = 1 neighborhood_prob += fftconvolve(maximized, weights, mode="same") neighborhood_prob[neighborhood_prob < 1] = 0 neighborhood_prob /= (self.data.shape[0] * float(weights.sum())) consensus_probs = [] for sigma in sigmas: if sigma > 0: filtered_prob = gaussian_filter(neighborhood_prob, sigma=sigma) else: filtered_prob = neighborhood_prob ec = EnsembleConsensus(filtered_prob, "neighbor_prob_{0:02d}-hour_r_{1:d}_s_{2:d}".format(self.data.shape[1], radius, sigma), self.ensemble_name, self.run_date, self.variable + "_{0:0.2f}".format(float(threshold)), self.start_date, self.end_date, "") consensus_probs.append(ec) return consensus_probs
805,746
Writes data to grib2 file. Currently, grib codes are set by hand to hail. Args: path: Path to directory containing grib2 files. Returns:
def write_grib2(self, path): if self.percentile is None: var_type = "mean" else: var_type = "p{0:02d}".format(self.percentile) lscale = 1e6 grib_id_start = [7, 0, 14, 14, 2] gdsinfo = np.array([0, np.product(self.data.shape[-2:]), 0, 0, 30], dtype=np.int32) lon_0 = self.proj_dict["lon_0"] sw_lon = self.grid_dict["sw_lon"] if lon_0 < 0: lon_0 += 360 if sw_lon < 0: sw_lon += 360 gdtmp1 = np.array([7, 1, self.proj_dict['a'], 1, self.proj_dict['a'], 1, self.proj_dict['b'], self.data.shape[-2], self.data.shape[-1], self.grid_dict["sw_lat"] * lscale, sw_lon * lscale, 0, self.proj_dict["lat_0"] * lscale, lon_0 * lscale, self.grid_dict["dx"] * 1e3, self.grid_dict["dy"] * 1e3, 0, self.proj_dict["lat_1"] * lscale, self.proj_dict["lat_2"] * lscale, 0, 0], dtype=np.int32) pdtmp1 = np.array([1, 31, 2, 0, 116, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 192, 0, self.data.shape[0]], dtype=np.int32) for m, member in enumerate(self.members): pdtmp1[-2] = m for t, time in enumerate(self.times): time_list = list(time.utctimetuple()[0:6]) grbe = Grib2Encode(0, np.array(grib_id_start + time_list + [2, 1], dtype=np.int32)) grbe.addgrid(gdsinfo, gdtmp1) pdtmp1[8] = (time.to_pydatetime() - self.run_date).total_seconds() / 3600.0 drtmp1 = np.array([0, 0, 4, 8, 0], dtype=np.int32) data = self.data[m, t].astype(np.float32) / 1000.0 masked_data = np.ma.array(data, mask=data <= 0) grbe.addfield(1, pdtmp1, 0, drtmp1, masked_data) grbe.end() filename = path + "{0}_{1}_mlhail_{2}_{3}.grib2".format(self.ensemble_name.replace(" ", "-"), member, var_type, time.to_datetime().strftime("%Y%m%d%H%M")) print("Writing to " + filename) grib_file = open(filename, "wb") grib_file.write(grbe.msg) grib_file.close() return
805,749
Initializes netCDF file for writing Args: filename: Name of the netCDF file time_units: Units for the time variable in format "<time> since <date string>" Returns: Dataset object
def init_file(self, filename, time_units="seconds since 1970-01-01T00:00"): if os.access(filename, os.R_OK): out_data = Dataset(filename, "r+") else: out_data = Dataset(filename, "w") if len(self.data.shape) == 2: for d, dim in enumerate(["y", "x"]): out_data.createDimension(dim, self.data.shape[d]) else: for d, dim in enumerate(["y", "x"]): out_data.createDimension(dim, self.data.shape[d+1]) out_data.createDimension("time", len(self.times)) time_var = out_data.createVariable("time", "i8", ("time",)) time_var[:] = date2num(self.times.to_pydatetime(), time_units) time_var.units = time_units out_data.Conventions = "CF-1.6" return out_data
805,751
Outputs data to a netCDF file. If the file does not exist, it will be created. Otherwise, additional variables are appended to the current file Args: out_data: Full-path and name of output netCDF file
def write_to_file(self, out_data): full_var_name = self.consensus_type + "_" + self.variable if "-hour" in self.consensus_type: if full_var_name not in out_data.variables.keys(): var = out_data.createVariable(full_var_name, "f4", ("y", "x"), zlib=True, least_significant_digit=3, shuffle=True) else: var = out_data.variables[full_var_name] var.coordinates = "y x" else: if full_var_name not in out_data.variables.keys(): var = out_data.createVariable(full_var_name, "f4", ("time", "y", "x"), zlib=True, least_significant_digit=3, shuffle=True) else: var = out_data.variables[full_var_name] var.coordinates = "time y x" var[:] = self.data var.units = self.units var.long_name = self.consensus_type + "_" + self.variable return
805,752
Given a set of DistributedROC or DistributedReliability objects, this function performs a bootstrap resampling of the objects and returns n_boot aggregations of them. Args: score_objs: A list of DistributedROC or DistributedReliability objects. Objects must have an __add__ method n_boot (int): Number of bootstrap samples Returns: An array of DistributedROC or DistributedReliability
def bootstrap(score_objs, n_boot=1000): all_samples = np.random.choice(score_objs, size=(n_boot, len(score_objs)), replace=True) return all_samples.sum(axis=1)
805,799
Initializes the DistributedROC object. If input_str is not None, then the DistributedROC object is initialized with the contents of input_str. Otherwise an empty contingency table is created. Args: thresholds (numpy.array): Array of thresholds in increasing order. obs_threshold (float): Split threshold (>= is positive event) (< is negative event) input_str (None or str): String containing information for DistributedROC
def __init__(self, thresholds=np.arange(0, 1.1, 0.1), obs_threshold=1.0, input_str=None): self.thresholds = thresholds self.obs_threshold = obs_threshold self.contingency_tables = pd.DataFrame(np.zeros((thresholds.size, 4), dtype=int), columns=["TP", "FP", "FN", "TN"]) if input_str is not None: self.from_str(input_str)
805,802
Update the ROC curve with a set of forecasts and observations Args: forecasts: 1D array of forecast values observations: 1D array of observation values.
def update(self, forecasts, observations): for t, threshold in enumerate(self.thresholds): tp = np.count_nonzero((forecasts >= threshold) & (observations >= self.obs_threshold)) fp = np.count_nonzero((forecasts >= threshold) & (observations < self.obs_threshold)) fn = np.count_nonzero((forecasts < threshold) & (observations >= self.obs_threshold)) tn = np.count_nonzero((forecasts < threshold) & (observations < self.obs_threshold)) self.contingency_tables.iloc[t] += [tp, fp, fn, tn]
805,803
Add two DistributedROC objects together and combine their contingency table values. Args: other: Another DistributedROC object.
def __add__(self, other): sum_roc = DistributedROC(self.thresholds, self.obs_threshold) sum_roc.contingency_tables = self.contingency_tables + other.contingency_tables return sum_roc
805,804
Ingest the values of another DistributedROC object into this one and update the statistics inplace. Args: other_roc: another DistributedROC object.
def merge(self, other_roc): if other_roc.thresholds.size == self.thresholds.size and np.all(other_roc.thresholds == self.thresholds): self.contingency_tables += other_roc.contingency_tables else: print("Input table thresholds do not match.")
805,805
Read the DistributedROC string and parse the contingency table values from it. Args: in_str (str): The string output from the __str__ method
def from_str(self, in_str): parts = in_str.split(";") for part in parts: var_name, value = part.split(":") if var_name == "Obs_Threshold": self.obs_threshold = float(value) elif var_name == "Thresholds": self.thresholds = np.array(value.split(), dtype=float) self.contingency_tables = pd.DataFrame(columns=self.contingency_tables.columns, data=np.zeros((self.thresholds.size, self.contingency_tables.columns.size))) elif var_name in self.contingency_tables.columns: self.contingency_tables[var_name] = np.array(value.split(), dtype=int)
805,813
Update the statistics with a set of forecasts and observations. Args: forecasts (numpy.ndarray): Array of forecast probability values observations (numpy.ndarray): Array of observation values
def update(self, forecasts, observations): for t, threshold in enumerate(self.thresholds[:-1]): self.frequencies.loc[t, "Positive_Freq"] += np.count_nonzero((threshold <= forecasts) & (forecasts < self.thresholds[t+1]) & (observations >= self.obs_threshold)) self.frequencies.loc[t, "Total_Freq"] += np.count_nonzero((threshold <= forecasts) & (forecasts < self.thresholds[t+1]))
805,819
Add two DistributedReliability objects together and combine their values. Args: other: a DistributedReliability object Returns: A DistributedReliability Object
def __add__(self, other): sum_rel = DistributedReliability(self.thresholds, self.obs_threshold) sum_rel.frequencies = self.frequencies + other.frequencies return sum_rel
805,820
Ingest another DistributedReliability and add its contents to the current object. Args: other_rel: a Distributed reliability object.
def merge(self, other_rel): if other_rel.thresholds.size == self.thresholds.size and np.all(other_rel.thresholds == self.thresholds): self.frequencies += other_rel.frequencies else: print("Input table thresholds do not match.")
805,821
Update the statistics with forecasts and observations. Args: forecasts: The discrete Cumulative Distribution Functions of observations:
def update(self, forecasts, observations): if len(observations.shape) == 1: obs_cdfs = np.zeros((observations.size, self.thresholds.size)) for o, observation in enumerate(observations): obs_cdfs[o, self.thresholds >= observation] = 1 else: obs_cdfs = observations self.errors["F_2"] += np.sum(forecasts ** 2, axis=0) self.errors["F_O"] += np.sum(forecasts * obs_cdfs, axis=0) self.errors["O_2"] += np.sum(obs_cdfs ** 2, axis=0) self.errors["O"] += np.sum(obs_cdfs, axis=0) self.num_forecasts += forecasts.shape[0]
805,827
Match forecast and observed tracks. Args: model_tracks: obs_tracks: unique_matches: closest_matches: Returns:
def match_tracks(self, model_tracks, obs_tracks, unique_matches=True, closest_matches=False): if unique_matches: pairings = self.track_matcher.match_tracks(model_tracks, obs_tracks, closest_matches=closest_matches) else: pairings = self.track_matcher.neighbor_matches(model_tracks, obs_tracks) return pairings
805,941
Given forecast and observed track pairings, maximum hail sizes are associated with each paired forecast storm track timestep. If the duration of the forecast and observed tracks differ, then interpolation is used for the intermediate timesteps. Args: model_tracks: List of model track STObjects obs_tracks: List of observed STObjects track_pairings: list of tuples containing the indices of the paired (forecast, observed) tracks
def match_hail_sizes(model_tracks, obs_tracks, track_pairings): unpaired = list(range(len(model_tracks))) for p, pair in enumerate(track_pairings): model_track = model_tracks[pair[0]] unpaired.remove(pair[0]) obs_track = obs_tracks[pair[1]] obs_hail_sizes = np.array([step[obs_track.masks[t] == 1].max() for t, step in enumerate(obs_track.timesteps)]) if obs_track.times.size > 1 and model_track.times.size > 1: normalized_obs_times = 1.0 / (obs_track.times.max() - obs_track.times.min())\ * (obs_track.times - obs_track.times.min()) normalized_model_times = 1.0 / (model_track.times.max() - model_track.times.min())\ * (model_track.times - model_track.times.min()) hail_interp = interp1d(normalized_obs_times, obs_hail_sizes, kind="nearest", bounds_error=False, fill_value=0) model_track.observations = hail_interp(normalized_model_times) elif obs_track.times.size == 1: model_track.observations = np.ones(model_track.times.shape) * obs_hail_sizes[0] elif model_track.times.size == 1: model_track.observations = np.array([obs_hail_sizes.max()]) print(pair[0], "obs", obs_hail_sizes) print(pair[0], "model", model_track.observations) for u in unpaired: model_tracks[u].observations = np.zeros(model_tracks[u].times.shape)
805,943
Given a matching set of observed tracks for each model track, Args: model_tracks: obs_tracks: track_pairings: Returns:
def match_hail_size_step_distributions(self, model_tracks, obs_tracks, track_pairings): label_columns = ["Matched", "Max_Hail_Size", "Num_Matches", "Shape", "Location", "Scale"] s = 0 for m, model_track in enumerate(model_tracks): model_track.observations = pd.DataFrame(index=model_track.times, columns=label_columns, dtype=np.float64) model_track.observations.loc[:, :] = 0 model_track.observations["Matched"] = model_track.observations["Matched"].astype(np.int32) for t, time in enumerate(model_track.times): model_track.observations.loc[time, "Matched"] = track_pairings.loc[s, "Matched"] if model_track.observations.loc[time, "Matched"] > 0: all_hail_sizes = [] step_pairs = track_pairings.loc[s, "Pairings"] for step_pair in step_pairs: obs_step = obs_tracks[step_pair[0]].timesteps[step_pair[1]].ravel() obs_mask = obs_tracks[step_pair[0]].masks[step_pair[1]].ravel() all_hail_sizes.append(obs_step[(obs_mask == 1) & (obs_step >= self.mrms_ew.min_thresh)]) combined_hail_sizes = np.concatenate(all_hail_sizes) min_hail = combined_hail_sizes.min() - 0.1 model_track.observations.loc[time, "Max_Hail_Size"] = combined_hail_sizes.max() model_track.observations.loc[time, "Num_Matches"] = step_pairs.shape[0] model_track.observations.loc[time, ["Shape", "Location", "Scale"]] = gamma.fit(combined_hail_sizes, floc=min_hail) s += 1
805,945
Calculates spatial and temporal translation errors between matched forecast and observed tracks. Args: model_tracks: List of model track STObjects obs_tracks: List of observed track STObjects track_pairings: List of tuples pairing forecast and observed tracks. Returns: pandas DataFrame containing different track errors
def calc_track_errors(model_tracks, obs_tracks, track_pairings): columns = ['obs_track_id', 'translation_error_x', 'translation_error_y', 'start_time_difference', 'end_time_difference', ] track_errors = pd.DataFrame(index=list(range(len(model_tracks))), columns=columns) for p, pair in enumerate(track_pairings): model_track = model_tracks[pair[0]] if type(pair[1]) in [int, np.int64]: obs_track = obs_tracks[pair[1]] else: obs_track = obs_tracks[pair[1][0]] model_com = model_track.center_of_mass(model_track.start_time) obs_com = obs_track.center_of_mass(obs_track.start_time) track_errors.loc[pair[0], 'obs_track_id'] = pair[1] if type(pair[1]) in [int, np.int64] else pair[1][0] track_errors.loc[pair[0], 'translation_error_x'] = model_com[0] - obs_com[0] track_errors.loc[pair[0], 'translation_error_y'] = model_com[1] - obs_com[1] track_errors.loc[pair[0], 'start_time_difference'] = model_track.start_time - obs_track.start_time track_errors.loc[pair[0], 'end_time_difference'] = model_track.end_time - obs_track.end_time return track_errors
805,946
Euclidean distance between the centroids of item_a and item_b. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
def centroid_distance(item_a, time_a, item_b, time_b, max_value): ax, ay = item_a.center_of_mass(time_a) bx, by = item_b.center_of_mass(time_b) return np.minimum(np.sqrt((ax - bx) ** 2 + (ay - by) ** 2), max_value) / float(max_value)
806,000
Centroid distance with motion corrections. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
def shifted_centroid_distance(item_a, time_a, item_b, time_b, max_value): ax, ay = item_a.center_of_mass(time_a) bx, by = item_b.center_of_mass(time_b) if time_a < time_b: bx = bx - item_b.u by = by - item_b.v else: ax = ax - item_a.u ay = ay - item_a.v return np.minimum(np.sqrt((ax - bx) ** 2 + (ay - by) ** 2), max_value) / float(max_value)
806,002
Euclidean distance between the pixels in item_a and item_b closest to each other. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
def closest_distance(item_a, time_a, item_b, time_b, max_value): return np.minimum(item_a.closest_distance(time_a, item_b, time_b), max_value) / float(max_value)
806,003
Calculate differences in the properties of ellipses fitted to each object. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
def ellipse_distance(item_a, time_a, item_b, time_b, max_value): ts = np.array([0, np.pi]) ell_a = item_a.get_ellipse_model(time_a) ell_b = item_b.get_ellipse_model(time_b) ends_a = ell_a.predict_xy(ts) ends_b = ell_b.predict_xy(ts) distances = np.sqrt((ends_a[:, 0:1] - ends_b[:, 0:1].T) ** 2 + (ends_a[:, 1:] - ends_b[:, 1:].T) ** 2) return np.minimum(distances[0, 1], max_value) / float(max_value)
806,004
Percentage of pixels in each object that do not overlap with the other object Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
def nonoverlap(item_a, time_a, item_b, time_b, max_value): return np.minimum(1 - item_a.count_overlap(time_a, item_b, time_b), max_value) / float(max_value)
806,005
RMS difference in maximum intensity Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
def max_intensity(item_a, time_a, item_b, time_b, max_value): intensity_a = item_a.max_intensity(time_a) intensity_b = item_b.max_intensity(time_b) diff = np.sqrt((intensity_a - intensity_b) ** 2) return np.minimum(diff, max_value) / float(max_value)
806,006
RMS Difference in object areas. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
def area_difference(item_a, time_a, item_b, time_b, max_value): size_a = item_a.size(time_a) size_b = item_b.size(time_b) diff = np.sqrt((size_a - size_b) ** 2) return np.minimum(diff, max_value) / float(max_value)
806,007
RMS difference in the minimum distances from the centroids of one track to the centroids of another track Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
def mean_minimum_centroid_distance(item_a, item_b, max_value): centroids_a = np.array([item_a.center_of_mass(t) for t in item_a.times]) centroids_b = np.array([item_b.center_of_mass(t) for t in item_b.times]) distance_matrix = (centroids_a[:, 0:1] - centroids_b.T[0:1]) ** 2 + (centroids_a[:, 1:] - centroids_b.T[1:]) ** 2 mean_min_distances = np.sqrt(distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean()) return np.minimum(mean_min_distances, max_value) / float(max_value)
806,008
Calculate the mean time difference among the time steps in each object. Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
def mean_min_time_distance(item_a, item_b, max_value): times_a = item_a.times.reshape((item_a.times.size, 1)) times_b = item_b.times.reshape((1, item_b.times.size)) distance_matrix = (times_a - times_b) ** 2 mean_min_distances = np.sqrt(distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean()) return np.minimum(mean_min_distances, max_value) / float(max_value)
806,009
Distance between the centroids of the first step in each object. Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
def start_centroid_distance(item_a, item_b, max_value): start_a = item_a.center_of_mass(item_a.times[0]) start_b = item_b.center_of_mass(item_b.times[0]) start_distance = np.sqrt((start_a[0] - start_b[0]) ** 2 + (start_a[1] - start_b[1]) ** 2) return np.minimum(start_distance, max_value) / float(max_value)
806,010
Absolute difference between the starting times of each item. Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
def start_time_distance(item_a, item_b, max_value): start_time_diff = np.abs(item_a.times[0] - item_b.times[0]) return np.minimum(start_time_diff, max_value) / float(max_value)
806,011
Absolute difference in the duration of two items Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
def duration_distance(item_a, item_b, max_value): duration_a = item_a.times.size duration_b = item_b.times.size return np.minimum(np.abs(duration_a - duration_b), max_value) / float(max_value)
806,012
Absolute difference in the means of the areas of each track over time. Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
def mean_area_distance(item_a, item_b, max_value): mean_area_a = np.mean([item_a.size(t) for t in item_a.times]) mean_area_b = np.mean([item_b.size(t) for t in item_b.times]) return np.abs(mean_area_a - mean_area_b) / float(max_value)
806,013
Match two sets of objects at particular times. Args: set_a: list of STObjects set_b: list of STObjects time_a: time at which set_a is being evaluated for matching time_b: time at which set_b is being evaluated for matching Returns: List of tuples containing (set_a index, set_b index) for each match
def match_objects(self, set_a, set_b, time_a, time_b): costs = self.cost_matrix(set_a, set_b, time_a, time_b) * 100 min_row_costs = costs.min(axis=1) min_col_costs = costs.min(axis=0) good_rows = np.where(min_row_costs < 100)[0] good_cols = np.where(min_col_costs < 100)[0] assignments = [] if len(good_rows) > 0 and len(good_cols) > 0: munk = Munkres() initial_assignments = munk.compute(costs[tuple(np.meshgrid(good_rows, good_cols, indexing='ij'))].tolist()) initial_assignments = [(good_rows[x[0]], good_cols[x[1]]) for x in initial_assignments] for a in initial_assignments: if costs[a[0], a[1]] < 100: assignments.append(a) return assignments
806,015
Calculates the costs (distances) between the items in set a and set b at the specified times. Args: set_a: List of STObjects set_b: List of STObjects time_a: time at which objects in set_a are evaluated time_b: time at whcih object in set_b are evaluated Returns: A numpy array with shape [len(set_a), len(set_b)] containing the cost matrix between the items in set a and the items in set b.
def cost_matrix(self, set_a, set_b, time_a, time_b): costs = np.zeros((len(set_a), len(set_b))) for a, item_a in enumerate(set_a): for b, item_b in enumerate(set_b): costs[a, b] = self.total_cost_function(item_a, item_b, time_a, time_b) return costs
806,016
Calculate total cost function between two items. Args: item_a: STObject item_b: STObject time_a: Timestep in item_a at which cost function is evaluated time_b: Timestep in item_b at which cost function is evaluated Returns: The total weighted distance between item_a and item_b
def total_cost_function(self, item_a, item_b, time_a, time_b): distances = np.zeros(len(self.weights)) for c, component in enumerate(self.cost_function_components): distances[c] = component(item_a, time_a, item_b, time_b, self.max_values[c]) total_distance = np.sum(self.weights * distances) return total_distance
806,017
Find the optimal set of matching assignments between set a and set b. This function supports optimal 1:1 matching using the Munkres method and matching from every object in set a to the closest object in set b. In this situation set b accepts multiple matches from set a. Args: set_a: set_b: closest_matches: Returns:
def match_tracks(self, set_a, set_b, closest_matches=False): costs = self.track_cost_matrix(set_a, set_b) * 100 min_row_costs = costs.min(axis=1) min_col_costs = costs.min(axis=0) good_rows = np.where(min_row_costs < 100)[0] good_cols = np.where(min_col_costs < 100)[0] assignments = [] if len(good_rows) > 0 and len(good_cols) > 0: if closest_matches: b_matches = costs[np.meshgrid(good_rows, good_cols, indexing='ij')].argmin(axis=1) a_matches = np.arange(b_matches.size) initial_assignments = [(good_rows[a_matches[x]], good_cols[b_matches[x]]) for x in range(b_matches.size)] else: munk = Munkres() initial_assignments = munk.compute(costs[np.meshgrid(good_rows, good_cols, indexing='ij')].tolist()) initial_assignments = [(good_rows[x[0]], good_cols[x[1]]) for x in initial_assignments] for a in initial_assignments: if costs[a[0], a[1]] < 100: assignments.append(a) return assignments
806,018
For each step in each track from set_a, identify all steps in all tracks from set_b that meet all cost function criteria Args: set_a: List of STObjects set_b: List of STObjects Returns: track_pairings: pandas.DataFrame
def match(self, set_a, set_b): track_step_matches = [[] * len(set_a)] costs = self.cost_matrix(set_a, set_b) valid_costs = np.all(costs < 1, axis=2) set_a_matches, set_b_matches = np.where(valid_costs) s = 0 track_pairings = pd.DataFrame(index=np.arange(costs.shape[0]), columns=["Track", "Step", "Time", "Matched", "Pairings"], dtype=object) set_b_info = [] for trb, track_b in enumerate(set_b): for t, time in enumerate(track_b.times): set_b_info.append((trb, t)) set_b_info_arr = np.array(set_b_info, dtype=int) for tr, track_a in enumerate(set_a): for t, time in enumerate(track_a.times): track_pairings.loc[s, ["Track", "Step", "Time"]] = [tr, t, time] track_pairings.loc[s, "Matched"] = 1 if np.count_nonzero(set_a_matches == s) > 0 else 0 if track_pairings.loc[s, "Matched"] == 1: track_pairings.loc[s, "Pairings"] = set_b_info_arr[set_b_matches[set_a_matches == s]] else: track_pairings.loc[s, "Pairings"] = np.array([]) s += 1 return track_pairings
806,024