repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
djgagne/hagelslag
hagelslag/evaluation/MetricPlotter.py
roc_curve
def roc_curve(roc_objs, obj_labels, colors, markers, filename, figsize=(8, 8), xlabel="Probability of False Detection", ylabel="Probability of Detection", title="ROC Curve", ticks=np.arange(0, 1.1, 0.1), dpi=300, legend_params=None, bootstrap_sets=None, ci=(2.5, 97.5), label_fontsize=14, title_fontsize=16, tick_fontsize=12): """ Plots a set receiver/relative operating characteristic (ROC) curves from DistributedROC objects. The ROC curve shows how well a forecast discriminates between two outcomes over a series of thresholds. It features Probability of Detection (True Positive Rate) on the y-axis and Probability of False Detection (False Alarm Rate) on the x-axis. This plotting function allows one to customize the colors and markers of the ROC curves as well as the parameters of the legend and the title. Args: roc_objs (list): DistributedROC objects being plotted. obj_labels (list): Label describing the forecast associated with a DistributedROC object. colors (list): List of matplotlib-readable colors (names or hex-values) for each curve. markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve. filename (str): Name of figure file being saved. figsize (tuple): (Width, height) of the figure in inches. xlabel (str): Label for the x-axis. ylabel (str): Label for the y-axis. title (str): The title of the figure. ticks (numpy.ndarray): Values shown on the x and y axes. dpi (int): Figure resolution in dots per inch. legend_params (None, dict): Keyword arguments for the formatting of the figure legend. bootstrap_sets (list): List of lists of DistributedROC objects that were bootstrap resampled for each model. ci (tuple of 2 floats): Quantiles of the edges of the bootstrap confidence intervals ranging from 0 to 100. label_fontsize (int): Font size of the x and y axis labels. title_fontsize (int): Font size of the title. tick_fontsize (int): Font size of the x and y tick labels. Examples: >>> from hagelslag.evaluation import DistributedROC >>> import numpy as np >>> forecasts = np.random.random(1000) >>> obs = np.random.random_integers(0, 1, 1000) >>> roc = DistributedROC() >>> roc.update(forecasts, obs) >>> roc_curve([roc], ["Random"], ["orange"], ["o"], "random_roc.png") """ if legend_params is None: legend_params = dict(loc=4, fontsize=12, framealpha=1, frameon=True) plt.figure(figsize=figsize, dpi=dpi) plt.plot(ticks, ticks, "k--") if bootstrap_sets is not None: for b, b_set in enumerate(bootstrap_sets): broc_curves = np.dstack([b_roc.roc_curve().values for b_roc in b_set]) pod_range = np.percentile(broc_curves[:,0], ci, axis=1) pofd_range = np.percentile(broc_curves[:, 1], ci, axis=1) pod_poly = np.concatenate((pod_range[1], pod_range[0, ::-1])) pofd_poly = np.concatenate((pofd_range[0], pofd_range[1, ::-1])) pod_poly[np.isnan(pod_poly)] = 0 pofd_poly[np.isnan(pofd_poly)] = 0 plt.fill(pofd_poly, pod_poly, alpha=0.5, color=colors[b]) for r, roc_obj in enumerate(roc_objs): roc_data = roc_obj.roc_curve() plt.plot(roc_data["POFD"], roc_data["POD"], marker=markers[r], color=colors[r], label=obj_labels[r]) plt.xlabel(xlabel, fontsize=label_fontsize) plt.ylabel(ylabel, fontsize=label_fontsize) plt.xticks(ticks, fontsize=tick_fontsize) plt.yticks(ticks, fontsize=tick_fontsize) plt.title(title, fontsize=title_fontsize) plt.legend(**legend_params) plt.savefig(filename, dpi=dpi, bbox_inches="tight") plt.close()
python
def roc_curve(roc_objs, obj_labels, colors, markers, filename, figsize=(8, 8), xlabel="Probability of False Detection", ylabel="Probability of Detection", title="ROC Curve", ticks=np.arange(0, 1.1, 0.1), dpi=300, legend_params=None, bootstrap_sets=None, ci=(2.5, 97.5), label_fontsize=14, title_fontsize=16, tick_fontsize=12): """ Plots a set receiver/relative operating characteristic (ROC) curves from DistributedROC objects. The ROC curve shows how well a forecast discriminates between two outcomes over a series of thresholds. It features Probability of Detection (True Positive Rate) on the y-axis and Probability of False Detection (False Alarm Rate) on the x-axis. This plotting function allows one to customize the colors and markers of the ROC curves as well as the parameters of the legend and the title. Args: roc_objs (list): DistributedROC objects being plotted. obj_labels (list): Label describing the forecast associated with a DistributedROC object. colors (list): List of matplotlib-readable colors (names or hex-values) for each curve. markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve. filename (str): Name of figure file being saved. figsize (tuple): (Width, height) of the figure in inches. xlabel (str): Label for the x-axis. ylabel (str): Label for the y-axis. title (str): The title of the figure. ticks (numpy.ndarray): Values shown on the x and y axes. dpi (int): Figure resolution in dots per inch. legend_params (None, dict): Keyword arguments for the formatting of the figure legend. bootstrap_sets (list): List of lists of DistributedROC objects that were bootstrap resampled for each model. ci (tuple of 2 floats): Quantiles of the edges of the bootstrap confidence intervals ranging from 0 to 100. label_fontsize (int): Font size of the x and y axis labels. title_fontsize (int): Font size of the title. tick_fontsize (int): Font size of the x and y tick labels. Examples: >>> from hagelslag.evaluation import DistributedROC >>> import numpy as np >>> forecasts = np.random.random(1000) >>> obs = np.random.random_integers(0, 1, 1000) >>> roc = DistributedROC() >>> roc.update(forecasts, obs) >>> roc_curve([roc], ["Random"], ["orange"], ["o"], "random_roc.png") """ if legend_params is None: legend_params = dict(loc=4, fontsize=12, framealpha=1, frameon=True) plt.figure(figsize=figsize, dpi=dpi) plt.plot(ticks, ticks, "k--") if bootstrap_sets is not None: for b, b_set in enumerate(bootstrap_sets): broc_curves = np.dstack([b_roc.roc_curve().values for b_roc in b_set]) pod_range = np.percentile(broc_curves[:,0], ci, axis=1) pofd_range = np.percentile(broc_curves[:, 1], ci, axis=1) pod_poly = np.concatenate((pod_range[1], pod_range[0, ::-1])) pofd_poly = np.concatenate((pofd_range[0], pofd_range[1, ::-1])) pod_poly[np.isnan(pod_poly)] = 0 pofd_poly[np.isnan(pofd_poly)] = 0 plt.fill(pofd_poly, pod_poly, alpha=0.5, color=colors[b]) for r, roc_obj in enumerate(roc_objs): roc_data = roc_obj.roc_curve() plt.plot(roc_data["POFD"], roc_data["POD"], marker=markers[r], color=colors[r], label=obj_labels[r]) plt.xlabel(xlabel, fontsize=label_fontsize) plt.ylabel(ylabel, fontsize=label_fontsize) plt.xticks(ticks, fontsize=tick_fontsize) plt.yticks(ticks, fontsize=tick_fontsize) plt.title(title, fontsize=title_fontsize) plt.legend(**legend_params) plt.savefig(filename, dpi=dpi, bbox_inches="tight") plt.close()
[ "def", "roc_curve", "(", "roc_objs", ",", "obj_labels", ",", "colors", ",", "markers", ",", "filename", ",", "figsize", "=", "(", "8", ",", "8", ")", ",", "xlabel", "=", "\"Probability of False Detection\"", ",", "ylabel", "=", "\"Probability of Detection\"", ",", "title", "=", "\"ROC Curve\"", ",", "ticks", "=", "np", ".", "arange", "(", "0", ",", "1.1", ",", "0.1", ")", ",", "dpi", "=", "300", ",", "legend_params", "=", "None", ",", "bootstrap_sets", "=", "None", ",", "ci", "=", "(", "2.5", ",", "97.5", ")", ",", "label_fontsize", "=", "14", ",", "title_fontsize", "=", "16", ",", "tick_fontsize", "=", "12", ")", ":", "if", "legend_params", "is", "None", ":", "legend_params", "=", "dict", "(", "loc", "=", "4", ",", "fontsize", "=", "12", ",", "framealpha", "=", "1", ",", "frameon", "=", "True", ")", "plt", ".", "figure", "(", "figsize", "=", "figsize", ",", "dpi", "=", "dpi", ")", "plt", ".", "plot", "(", "ticks", ",", "ticks", ",", "\"k--\"", ")", "if", "bootstrap_sets", "is", "not", "None", ":", "for", "b", ",", "b_set", "in", "enumerate", "(", "bootstrap_sets", ")", ":", "broc_curves", "=", "np", ".", "dstack", "(", "[", "b_roc", ".", "roc_curve", "(", ")", ".", "values", "for", "b_roc", "in", "b_set", "]", ")", "pod_range", "=", "np", ".", "percentile", "(", "broc_curves", "[", ":", ",", "0", "]", ",", "ci", ",", "axis", "=", "1", ")", "pofd_range", "=", "np", ".", "percentile", "(", "broc_curves", "[", ":", ",", "1", "]", ",", "ci", ",", "axis", "=", "1", ")", "pod_poly", "=", "np", ".", "concatenate", "(", "(", "pod_range", "[", "1", "]", ",", "pod_range", "[", "0", ",", ":", ":", "-", "1", "]", ")", ")", "pofd_poly", "=", "np", ".", "concatenate", "(", "(", "pofd_range", "[", "0", "]", ",", "pofd_range", "[", "1", ",", ":", ":", "-", "1", "]", ")", ")", "pod_poly", "[", "np", ".", "isnan", "(", "pod_poly", ")", "]", "=", "0", "pofd_poly", "[", "np", ".", "isnan", "(", "pofd_poly", ")", "]", "=", "0", "plt", ".", "fill", "(", "pofd_poly", ",", "pod_poly", ",", "alpha", "=", "0.5", ",", "color", "=", "colors", "[", "b", "]", ")", "for", "r", ",", "roc_obj", "in", "enumerate", "(", "roc_objs", ")", ":", "roc_data", "=", "roc_obj", ".", "roc_curve", "(", ")", "plt", ".", "plot", "(", "roc_data", "[", "\"POFD\"", "]", ",", "roc_data", "[", "\"POD\"", "]", ",", "marker", "=", "markers", "[", "r", "]", ",", "color", "=", "colors", "[", "r", "]", ",", "label", "=", "obj_labels", "[", "r", "]", ")", "plt", ".", "xlabel", "(", "xlabel", ",", "fontsize", "=", "label_fontsize", ")", "plt", ".", "ylabel", "(", "ylabel", ",", "fontsize", "=", "label_fontsize", ")", "plt", ".", "xticks", "(", "ticks", ",", "fontsize", "=", "tick_fontsize", ")", "plt", ".", "yticks", "(", "ticks", ",", "fontsize", "=", "tick_fontsize", ")", "plt", ".", "title", "(", "title", ",", "fontsize", "=", "title_fontsize", ")", "plt", ".", "legend", "(", "*", "*", "legend_params", ")", "plt", ".", "savefig", "(", "filename", ",", "dpi", "=", "dpi", ",", "bbox_inches", "=", "\"tight\"", ")", "plt", ".", "close", "(", ")" ]
Plots a set receiver/relative operating characteristic (ROC) curves from DistributedROC objects. The ROC curve shows how well a forecast discriminates between two outcomes over a series of thresholds. It features Probability of Detection (True Positive Rate) on the y-axis and Probability of False Detection (False Alarm Rate) on the x-axis. This plotting function allows one to customize the colors and markers of the ROC curves as well as the parameters of the legend and the title. Args: roc_objs (list): DistributedROC objects being plotted. obj_labels (list): Label describing the forecast associated with a DistributedROC object. colors (list): List of matplotlib-readable colors (names or hex-values) for each curve. markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve. filename (str): Name of figure file being saved. figsize (tuple): (Width, height) of the figure in inches. xlabel (str): Label for the x-axis. ylabel (str): Label for the y-axis. title (str): The title of the figure. ticks (numpy.ndarray): Values shown on the x and y axes. dpi (int): Figure resolution in dots per inch. legend_params (None, dict): Keyword arguments for the formatting of the figure legend. bootstrap_sets (list): List of lists of DistributedROC objects that were bootstrap resampled for each model. ci (tuple of 2 floats): Quantiles of the edges of the bootstrap confidence intervals ranging from 0 to 100. label_fontsize (int): Font size of the x and y axis labels. title_fontsize (int): Font size of the title. tick_fontsize (int): Font size of the x and y tick labels. Examples: >>> from hagelslag.evaluation import DistributedROC >>> import numpy as np >>> forecasts = np.random.random(1000) >>> obs = np.random.random_integers(0, 1, 1000) >>> roc = DistributedROC() >>> roc.update(forecasts, obs) >>> roc_curve([roc], ["Random"], ["orange"], ["o"], "random_roc.png")
[ "Plots", "a", "set", "receiver", "/", "relative", "operating", "characteristic", "(", "ROC", ")", "curves", "from", "DistributedROC", "objects", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/MetricPlotter.py#L6-L73
train
djgagne/hagelslag
hagelslag/evaluation/MetricPlotter.py
performance_diagram
def performance_diagram(roc_objs, obj_labels, colors, markers, filename, figsize=(8, 8), xlabel="Success Ratio (1-FAR)", ylabel="Probability of Detection", ticks=np.arange(0, 1.1, 0.1), dpi=300, csi_cmap="Blues", csi_label="Critical Success Index", title="Performance Diagram", legend_params=None, bootstrap_sets=None, ci=(2.5, 97.5), label_fontsize=14, title_fontsize=16, tick_fontsize=12): """ Draws a performance diagram from a set of DistributedROC objects. A performance diagram is a variation on the ROC curve in which the Probability of False Detection on the x-axis has been replaced with the Success Ratio (1-False Alarm Ratio or Precision). The diagram also shows the Critical Success Index (CSI or Threat Score) as a series of curved contours, and the frequency bias as angled diagonal lines. Points along the 1:1 diagonal are unbiased, and better performing models should appear in the upper right corner. The performance diagram is particularly useful for displaying verification for severe weather warnings as it displays all three commonly used statistics (POD, FAR, and CSI) simultaneously on the same chart. Args: roc_objs (list): DistributedROC objects being plotted. obj_labels: list or array of labels describing each DistributedROC object. obj_labels (list): Label describing the forecast associated with a DistributedROC object. colors (list): List of matplotlib-readable colors (names or hex-values) for each curve. markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve. filename (str): Name of figure file being saved. figsize (tuple): (Width, height) of the figure in inches. xlabel (str): Label for the x-axis. ylabel (str): Label for the y-axis. title (str): The title of the figure. ticks (numpy.ndarray): Values shown on the x and y axes. dpi (int): Figure resolution in dots per inch. csi_cmap (str): Matplotlib colormap used to fill CSI contours. csi_label (str): Label for CSI colormap. legend_params (None or dict): Keyword arguments for the formatting of the figure legend. bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None, confidence regions will be plotted. ci (tuple): tuple of bootstrap confidence interval percentiles. label_fontsize (int): Font size of the x and y axis labels. title_fontsize (int): Font size of the title. tick_fontsize (int): Font size of the x and y tick labels. Examples: >>> from hagelslag.evaluation import DistributedROC >>> import numpy as np >>> forecasts = np.random.random(1000) >>> obs = np.random.random_integers(0, 1, 1000) >>> roc = DistributedROC() >>> roc.update(forecasts, obs) >>> performance_diagram([roc], ["Random"], ["orange"], ["o"], "random_performance.png") """ if legend_params is None: legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True) plt.figure(figsize=figsize) grid_ticks = np.arange(0, 1.01, 0.01) sr_g, pod_g = np.meshgrid(grid_ticks, grid_ticks) bias = pod_g / sr_g csi = 1.0 / (1.0 / sr_g + 1.0 / pod_g - 1.0) csi_contour = plt.contourf(sr_g, pod_g, csi, np.arange(0.1, 1.1, 0.1), extend="max", cmap=csi_cmap) b_contour = plt.contour(sr_g, pod_g, bias, [0.5, 1, 1.5, 2, 4], colors="k", linestyles="dashed") plt.clabel(b_contour, fmt="%1.1f", manual=[(0.2, 0.9), (0.4, 0.9), (0.6, 0.9), (0.7, 0.7)]) if bootstrap_sets is not None: for b, b_set in enumerate(bootstrap_sets): perf_curves = np.dstack([b_roc.performance_curve().values for b_roc in b_set]) pod_range = np.nanpercentile(perf_curves[:, 0], ci, axis=1) sr_range = np.nanpercentile(1 - perf_curves[:, 1], ci, axis=1) pod_poly = np.concatenate((pod_range[1], pod_range[0, ::-1])) sr_poly = np.concatenate((sr_range[1], sr_range[0, ::-1])) pod_poly[np.isnan(pod_poly)] = 0 sr_poly[np.isnan(sr_poly)] = 1 plt.fill(sr_poly, pod_poly, alpha=0.5, color=colors[b]) for r, roc_obj in enumerate(roc_objs): perf_data = roc_obj.performance_curve() plt.plot(1 - perf_data["FAR"], perf_data["POD"], marker=markers[r], color=colors[r], label=obj_labels[r]) cbar = plt.colorbar(csi_contour) cbar.set_label(csi_label) plt.xlabel(xlabel, fontsize=label_fontsize) plt.ylabel(ylabel, fontsize=label_fontsize) plt.xticks(ticks, fontsize=tick_fontsize) plt.yticks(ticks, fontsize=tick_fontsize) plt.title(title, fontsize=title_fontsize) plt.legend(**legend_params) plt.savefig(filename, dpi=dpi, bbox_inches="tight") plt.close()
python
def performance_diagram(roc_objs, obj_labels, colors, markers, filename, figsize=(8, 8), xlabel="Success Ratio (1-FAR)", ylabel="Probability of Detection", ticks=np.arange(0, 1.1, 0.1), dpi=300, csi_cmap="Blues", csi_label="Critical Success Index", title="Performance Diagram", legend_params=None, bootstrap_sets=None, ci=(2.5, 97.5), label_fontsize=14, title_fontsize=16, tick_fontsize=12): """ Draws a performance diagram from a set of DistributedROC objects. A performance diagram is a variation on the ROC curve in which the Probability of False Detection on the x-axis has been replaced with the Success Ratio (1-False Alarm Ratio or Precision). The diagram also shows the Critical Success Index (CSI or Threat Score) as a series of curved contours, and the frequency bias as angled diagonal lines. Points along the 1:1 diagonal are unbiased, and better performing models should appear in the upper right corner. The performance diagram is particularly useful for displaying verification for severe weather warnings as it displays all three commonly used statistics (POD, FAR, and CSI) simultaneously on the same chart. Args: roc_objs (list): DistributedROC objects being plotted. obj_labels: list or array of labels describing each DistributedROC object. obj_labels (list): Label describing the forecast associated with a DistributedROC object. colors (list): List of matplotlib-readable colors (names or hex-values) for each curve. markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve. filename (str): Name of figure file being saved. figsize (tuple): (Width, height) of the figure in inches. xlabel (str): Label for the x-axis. ylabel (str): Label for the y-axis. title (str): The title of the figure. ticks (numpy.ndarray): Values shown on the x and y axes. dpi (int): Figure resolution in dots per inch. csi_cmap (str): Matplotlib colormap used to fill CSI contours. csi_label (str): Label for CSI colormap. legend_params (None or dict): Keyword arguments for the formatting of the figure legend. bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None, confidence regions will be plotted. ci (tuple): tuple of bootstrap confidence interval percentiles. label_fontsize (int): Font size of the x and y axis labels. title_fontsize (int): Font size of the title. tick_fontsize (int): Font size of the x and y tick labels. Examples: >>> from hagelslag.evaluation import DistributedROC >>> import numpy as np >>> forecasts = np.random.random(1000) >>> obs = np.random.random_integers(0, 1, 1000) >>> roc = DistributedROC() >>> roc.update(forecasts, obs) >>> performance_diagram([roc], ["Random"], ["orange"], ["o"], "random_performance.png") """ if legend_params is None: legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True) plt.figure(figsize=figsize) grid_ticks = np.arange(0, 1.01, 0.01) sr_g, pod_g = np.meshgrid(grid_ticks, grid_ticks) bias = pod_g / sr_g csi = 1.0 / (1.0 / sr_g + 1.0 / pod_g - 1.0) csi_contour = plt.contourf(sr_g, pod_g, csi, np.arange(0.1, 1.1, 0.1), extend="max", cmap=csi_cmap) b_contour = plt.contour(sr_g, pod_g, bias, [0.5, 1, 1.5, 2, 4], colors="k", linestyles="dashed") plt.clabel(b_contour, fmt="%1.1f", manual=[(0.2, 0.9), (0.4, 0.9), (0.6, 0.9), (0.7, 0.7)]) if bootstrap_sets is not None: for b, b_set in enumerate(bootstrap_sets): perf_curves = np.dstack([b_roc.performance_curve().values for b_roc in b_set]) pod_range = np.nanpercentile(perf_curves[:, 0], ci, axis=1) sr_range = np.nanpercentile(1 - perf_curves[:, 1], ci, axis=1) pod_poly = np.concatenate((pod_range[1], pod_range[0, ::-1])) sr_poly = np.concatenate((sr_range[1], sr_range[0, ::-1])) pod_poly[np.isnan(pod_poly)] = 0 sr_poly[np.isnan(sr_poly)] = 1 plt.fill(sr_poly, pod_poly, alpha=0.5, color=colors[b]) for r, roc_obj in enumerate(roc_objs): perf_data = roc_obj.performance_curve() plt.plot(1 - perf_data["FAR"], perf_data["POD"], marker=markers[r], color=colors[r], label=obj_labels[r]) cbar = plt.colorbar(csi_contour) cbar.set_label(csi_label) plt.xlabel(xlabel, fontsize=label_fontsize) plt.ylabel(ylabel, fontsize=label_fontsize) plt.xticks(ticks, fontsize=tick_fontsize) plt.yticks(ticks, fontsize=tick_fontsize) plt.title(title, fontsize=title_fontsize) plt.legend(**legend_params) plt.savefig(filename, dpi=dpi, bbox_inches="tight") plt.close()
[ "def", "performance_diagram", "(", "roc_objs", ",", "obj_labels", ",", "colors", ",", "markers", ",", "filename", ",", "figsize", "=", "(", "8", ",", "8", ")", ",", "xlabel", "=", "\"Success Ratio (1-FAR)\"", ",", "ylabel", "=", "\"Probability of Detection\"", ",", "ticks", "=", "np", ".", "arange", "(", "0", ",", "1.1", ",", "0.1", ")", ",", "dpi", "=", "300", ",", "csi_cmap", "=", "\"Blues\"", ",", "csi_label", "=", "\"Critical Success Index\"", ",", "title", "=", "\"Performance Diagram\"", ",", "legend_params", "=", "None", ",", "bootstrap_sets", "=", "None", ",", "ci", "=", "(", "2.5", ",", "97.5", ")", ",", "label_fontsize", "=", "14", ",", "title_fontsize", "=", "16", ",", "tick_fontsize", "=", "12", ")", ":", "if", "legend_params", "is", "None", ":", "legend_params", "=", "dict", "(", "loc", "=", "4", ",", "fontsize", "=", "10", ",", "framealpha", "=", "1", ",", "frameon", "=", "True", ")", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "grid_ticks", "=", "np", ".", "arange", "(", "0", ",", "1.01", ",", "0.01", ")", "sr_g", ",", "pod_g", "=", "np", ".", "meshgrid", "(", "grid_ticks", ",", "grid_ticks", ")", "bias", "=", "pod_g", "/", "sr_g", "csi", "=", "1.0", "/", "(", "1.0", "/", "sr_g", "+", "1.0", "/", "pod_g", "-", "1.0", ")", "csi_contour", "=", "plt", ".", "contourf", "(", "sr_g", ",", "pod_g", ",", "csi", ",", "np", ".", "arange", "(", "0.1", ",", "1.1", ",", "0.1", ")", ",", "extend", "=", "\"max\"", ",", "cmap", "=", "csi_cmap", ")", "b_contour", "=", "plt", ".", "contour", "(", "sr_g", ",", "pod_g", ",", "bias", ",", "[", "0.5", ",", "1", ",", "1.5", ",", "2", ",", "4", "]", ",", "colors", "=", "\"k\"", ",", "linestyles", "=", "\"dashed\"", ")", "plt", ".", "clabel", "(", "b_contour", ",", "fmt", "=", "\"%1.1f\"", ",", "manual", "=", "[", "(", "0.2", ",", "0.9", ")", ",", "(", "0.4", ",", "0.9", ")", ",", "(", "0.6", ",", "0.9", ")", ",", "(", "0.7", ",", "0.7", ")", "]", ")", "if", "bootstrap_sets", "is", "not", "None", ":", "for", "b", ",", "b_set", "in", "enumerate", "(", "bootstrap_sets", ")", ":", "perf_curves", "=", "np", ".", "dstack", "(", "[", "b_roc", ".", "performance_curve", "(", ")", ".", "values", "for", "b_roc", "in", "b_set", "]", ")", "pod_range", "=", "np", ".", "nanpercentile", "(", "perf_curves", "[", ":", ",", "0", "]", ",", "ci", ",", "axis", "=", "1", ")", "sr_range", "=", "np", ".", "nanpercentile", "(", "1", "-", "perf_curves", "[", ":", ",", "1", "]", ",", "ci", ",", "axis", "=", "1", ")", "pod_poly", "=", "np", ".", "concatenate", "(", "(", "pod_range", "[", "1", "]", ",", "pod_range", "[", "0", ",", ":", ":", "-", "1", "]", ")", ")", "sr_poly", "=", "np", ".", "concatenate", "(", "(", "sr_range", "[", "1", "]", ",", "sr_range", "[", "0", ",", ":", ":", "-", "1", "]", ")", ")", "pod_poly", "[", "np", ".", "isnan", "(", "pod_poly", ")", "]", "=", "0", "sr_poly", "[", "np", ".", "isnan", "(", "sr_poly", ")", "]", "=", "1", "plt", ".", "fill", "(", "sr_poly", ",", "pod_poly", ",", "alpha", "=", "0.5", ",", "color", "=", "colors", "[", "b", "]", ")", "for", "r", ",", "roc_obj", "in", "enumerate", "(", "roc_objs", ")", ":", "perf_data", "=", "roc_obj", ".", "performance_curve", "(", ")", "plt", ".", "plot", "(", "1", "-", "perf_data", "[", "\"FAR\"", "]", ",", "perf_data", "[", "\"POD\"", "]", ",", "marker", "=", "markers", "[", "r", "]", ",", "color", "=", "colors", "[", "r", "]", ",", "label", "=", "obj_labels", "[", "r", "]", ")", "cbar", "=", "plt", ".", "colorbar", "(", "csi_contour", ")", "cbar", ".", "set_label", "(", "csi_label", ")", "plt", ".", "xlabel", "(", "xlabel", ",", "fontsize", "=", "label_fontsize", ")", "plt", ".", "ylabel", "(", "ylabel", ",", "fontsize", "=", "label_fontsize", ")", "plt", ".", "xticks", "(", "ticks", ",", "fontsize", "=", "tick_fontsize", ")", "plt", ".", "yticks", "(", "ticks", ",", "fontsize", "=", "tick_fontsize", ")", "plt", ".", "title", "(", "title", ",", "fontsize", "=", "title_fontsize", ")", "plt", ".", "legend", "(", "*", "*", "legend_params", ")", "plt", ".", "savefig", "(", "filename", ",", "dpi", "=", "dpi", ",", "bbox_inches", "=", "\"tight\"", ")", "plt", ".", "close", "(", ")" ]
Draws a performance diagram from a set of DistributedROC objects. A performance diagram is a variation on the ROC curve in which the Probability of False Detection on the x-axis has been replaced with the Success Ratio (1-False Alarm Ratio or Precision). The diagram also shows the Critical Success Index (CSI or Threat Score) as a series of curved contours, and the frequency bias as angled diagonal lines. Points along the 1:1 diagonal are unbiased, and better performing models should appear in the upper right corner. The performance diagram is particularly useful for displaying verification for severe weather warnings as it displays all three commonly used statistics (POD, FAR, and CSI) simultaneously on the same chart. Args: roc_objs (list): DistributedROC objects being plotted. obj_labels: list or array of labels describing each DistributedROC object. obj_labels (list): Label describing the forecast associated with a DistributedROC object. colors (list): List of matplotlib-readable colors (names or hex-values) for each curve. markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve. filename (str): Name of figure file being saved. figsize (tuple): (Width, height) of the figure in inches. xlabel (str): Label for the x-axis. ylabel (str): Label for the y-axis. title (str): The title of the figure. ticks (numpy.ndarray): Values shown on the x and y axes. dpi (int): Figure resolution in dots per inch. csi_cmap (str): Matplotlib colormap used to fill CSI contours. csi_label (str): Label for CSI colormap. legend_params (None or dict): Keyword arguments for the formatting of the figure legend. bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None, confidence regions will be plotted. ci (tuple): tuple of bootstrap confidence interval percentiles. label_fontsize (int): Font size of the x and y axis labels. title_fontsize (int): Font size of the title. tick_fontsize (int): Font size of the x and y tick labels. Examples: >>> from hagelslag.evaluation import DistributedROC >>> import numpy as np >>> forecasts = np.random.random(1000) >>> obs = np.random.random_integers(0, 1, 1000) >>> roc = DistributedROC() >>> roc.update(forecasts, obs) >>> performance_diagram([roc], ["Random"], ["orange"], ["o"], "random_performance.png")
[ "Draws", "a", "performance", "diagram", "from", "a", "set", "of", "DistributedROC", "objects", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/MetricPlotter.py#L76-L159
train
djgagne/hagelslag
hagelslag/evaluation/MetricPlotter.py
reliability_diagram
def reliability_diagram(rel_objs, obj_labels, colors, markers, filename, figsize=(8, 8), xlabel="Forecast Probability", ylabel="Observed Relative Frequency", ticks=np.arange(0, 1.05, 0.05), dpi=300, inset_size=1.5, title="Reliability Diagram", legend_params=None, bootstrap_sets=None, ci=(2.5, 97.5)): """ Plot reliability curves against a 1:1 diagonal to determine if probability forecasts are consistent with their observed relative frequency. Args: rel_objs (list): List of DistributedReliability objects. obj_labels (list): List of labels describing the forecast model associated with each curve. colors (list): List of colors for each line markers (list): List of line markers filename (str): Where to save the figure. figsize (tuple): (Width, height) of the figure in inches. xlabel (str): X-axis label ylabel (str): Y-axis label ticks (array): Tick value labels for the x and y axes. dpi (int): resolution of the saved figure in dots per inch. inset_size (float): Size of inset title (str): Title of figure legend_params (dict): Keyword arguments for the plot legend. bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None, confidence regions will be plotted. ci (tuple): tuple of bootstrap confidence interval percentiles """ if legend_params is None: legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True) fig, ax = plt.subplots(figsize=figsize) plt.plot(ticks, ticks, "k--") inset_hist = inset_axes(ax, width=inset_size, height=inset_size, loc=2) if bootstrap_sets is not None: for b, b_set in enumerate(bootstrap_sets): brel_curves = np.dstack([b_rel.reliability_curve().values for b_rel in b_set]) bin_range = np.percentile(brel_curves[:,0], ci, axis=1) rel_range = np.percentile(brel_curves[:, 3], ci, axis=1) bin_poly = np.concatenate((bin_range[1], bin_range[0, ::-1])) rel_poly = np.concatenate((rel_range[1], rel_range[0, ::-1])) bin_poly[np.isnan(bin_poly)] = 0 rel_poly[np.isnan(rel_poly)] = 0 plt.fill(bin_poly, rel_poly, alpha=0.5, color=colors[b]) for r, rel_obj in enumerate(rel_objs): rel_curve = rel_obj.reliability_curve() ax.plot(rel_curve["Bin_Start"], rel_curve["Positive_Relative_Freq"], color=colors[r], marker=markers[r], label=obj_labels[r]) inset_hist.semilogy(rel_curve["Bin_Start"], rel_curve["Total_Relative_Freq"], color=colors[r], marker=markers[r]) inset_hist.set_xlabel("Forecast Probability") inset_hist.set_ylabel("Forecast Relative Frequency") ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xticks(ticks) ax.set_yticks(ticks) ax.legend(**legend_params) ax.set_title(title) plt.savefig(filename, dpi=dpi, bbox_inches="tight") plt.close()
python
def reliability_diagram(rel_objs, obj_labels, colors, markers, filename, figsize=(8, 8), xlabel="Forecast Probability", ylabel="Observed Relative Frequency", ticks=np.arange(0, 1.05, 0.05), dpi=300, inset_size=1.5, title="Reliability Diagram", legend_params=None, bootstrap_sets=None, ci=(2.5, 97.5)): """ Plot reliability curves against a 1:1 diagonal to determine if probability forecasts are consistent with their observed relative frequency. Args: rel_objs (list): List of DistributedReliability objects. obj_labels (list): List of labels describing the forecast model associated with each curve. colors (list): List of colors for each line markers (list): List of line markers filename (str): Where to save the figure. figsize (tuple): (Width, height) of the figure in inches. xlabel (str): X-axis label ylabel (str): Y-axis label ticks (array): Tick value labels for the x and y axes. dpi (int): resolution of the saved figure in dots per inch. inset_size (float): Size of inset title (str): Title of figure legend_params (dict): Keyword arguments for the plot legend. bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None, confidence regions will be plotted. ci (tuple): tuple of bootstrap confidence interval percentiles """ if legend_params is None: legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True) fig, ax = plt.subplots(figsize=figsize) plt.plot(ticks, ticks, "k--") inset_hist = inset_axes(ax, width=inset_size, height=inset_size, loc=2) if bootstrap_sets is not None: for b, b_set in enumerate(bootstrap_sets): brel_curves = np.dstack([b_rel.reliability_curve().values for b_rel in b_set]) bin_range = np.percentile(brel_curves[:,0], ci, axis=1) rel_range = np.percentile(brel_curves[:, 3], ci, axis=1) bin_poly = np.concatenate((bin_range[1], bin_range[0, ::-1])) rel_poly = np.concatenate((rel_range[1], rel_range[0, ::-1])) bin_poly[np.isnan(bin_poly)] = 0 rel_poly[np.isnan(rel_poly)] = 0 plt.fill(bin_poly, rel_poly, alpha=0.5, color=colors[b]) for r, rel_obj in enumerate(rel_objs): rel_curve = rel_obj.reliability_curve() ax.plot(rel_curve["Bin_Start"], rel_curve["Positive_Relative_Freq"], color=colors[r], marker=markers[r], label=obj_labels[r]) inset_hist.semilogy(rel_curve["Bin_Start"], rel_curve["Total_Relative_Freq"], color=colors[r], marker=markers[r]) inset_hist.set_xlabel("Forecast Probability") inset_hist.set_ylabel("Forecast Relative Frequency") ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xticks(ticks) ax.set_yticks(ticks) ax.legend(**legend_params) ax.set_title(title) plt.savefig(filename, dpi=dpi, bbox_inches="tight") plt.close()
[ "def", "reliability_diagram", "(", "rel_objs", ",", "obj_labels", ",", "colors", ",", "markers", ",", "filename", ",", "figsize", "=", "(", "8", ",", "8", ")", ",", "xlabel", "=", "\"Forecast Probability\"", ",", "ylabel", "=", "\"Observed Relative Frequency\"", ",", "ticks", "=", "np", ".", "arange", "(", "0", ",", "1.05", ",", "0.05", ")", ",", "dpi", "=", "300", ",", "inset_size", "=", "1.5", ",", "title", "=", "\"Reliability Diagram\"", ",", "legend_params", "=", "None", ",", "bootstrap_sets", "=", "None", ",", "ci", "=", "(", "2.5", ",", "97.5", ")", ")", ":", "if", "legend_params", "is", "None", ":", "legend_params", "=", "dict", "(", "loc", "=", "4", ",", "fontsize", "=", "10", ",", "framealpha", "=", "1", ",", "frameon", "=", "True", ")", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "figsize", "=", "figsize", ")", "plt", ".", "plot", "(", "ticks", ",", "ticks", ",", "\"k--\"", ")", "inset_hist", "=", "inset_axes", "(", "ax", ",", "width", "=", "inset_size", ",", "height", "=", "inset_size", ",", "loc", "=", "2", ")", "if", "bootstrap_sets", "is", "not", "None", ":", "for", "b", ",", "b_set", "in", "enumerate", "(", "bootstrap_sets", ")", ":", "brel_curves", "=", "np", ".", "dstack", "(", "[", "b_rel", ".", "reliability_curve", "(", ")", ".", "values", "for", "b_rel", "in", "b_set", "]", ")", "bin_range", "=", "np", ".", "percentile", "(", "brel_curves", "[", ":", ",", "0", "]", ",", "ci", ",", "axis", "=", "1", ")", "rel_range", "=", "np", ".", "percentile", "(", "brel_curves", "[", ":", ",", "3", "]", ",", "ci", ",", "axis", "=", "1", ")", "bin_poly", "=", "np", ".", "concatenate", "(", "(", "bin_range", "[", "1", "]", ",", "bin_range", "[", "0", ",", ":", ":", "-", "1", "]", ")", ")", "rel_poly", "=", "np", ".", "concatenate", "(", "(", "rel_range", "[", "1", "]", ",", "rel_range", "[", "0", ",", ":", ":", "-", "1", "]", ")", ")", "bin_poly", "[", "np", ".", "isnan", "(", "bin_poly", ")", "]", "=", "0", "rel_poly", "[", "np", ".", "isnan", "(", "rel_poly", ")", "]", "=", "0", "plt", ".", "fill", "(", "bin_poly", ",", "rel_poly", ",", "alpha", "=", "0.5", ",", "color", "=", "colors", "[", "b", "]", ")", "for", "r", ",", "rel_obj", "in", "enumerate", "(", "rel_objs", ")", ":", "rel_curve", "=", "rel_obj", ".", "reliability_curve", "(", ")", "ax", ".", "plot", "(", "rel_curve", "[", "\"Bin_Start\"", "]", ",", "rel_curve", "[", "\"Positive_Relative_Freq\"", "]", ",", "color", "=", "colors", "[", "r", "]", ",", "marker", "=", "markers", "[", "r", "]", ",", "label", "=", "obj_labels", "[", "r", "]", ")", "inset_hist", ".", "semilogy", "(", "rel_curve", "[", "\"Bin_Start\"", "]", ",", "rel_curve", "[", "\"Total_Relative_Freq\"", "]", ",", "color", "=", "colors", "[", "r", "]", ",", "marker", "=", "markers", "[", "r", "]", ")", "inset_hist", ".", "set_xlabel", "(", "\"Forecast Probability\"", ")", "inset_hist", ".", "set_ylabel", "(", "\"Forecast Relative Frequency\"", ")", "ax", ".", "set_xlabel", "(", "xlabel", ")", "ax", ".", "set_ylabel", "(", "ylabel", ")", "ax", ".", "set_xticks", "(", "ticks", ")", "ax", ".", "set_yticks", "(", "ticks", ")", "ax", ".", "legend", "(", "*", "*", "legend_params", ")", "ax", ".", "set_title", "(", "title", ")", "plt", ".", "savefig", "(", "filename", ",", "dpi", "=", "dpi", ",", "bbox_inches", "=", "\"tight\"", ")", "plt", ".", "close", "(", ")" ]
Plot reliability curves against a 1:1 diagonal to determine if probability forecasts are consistent with their observed relative frequency. Args: rel_objs (list): List of DistributedReliability objects. obj_labels (list): List of labels describing the forecast model associated with each curve. colors (list): List of colors for each line markers (list): List of line markers filename (str): Where to save the figure. figsize (tuple): (Width, height) of the figure in inches. xlabel (str): X-axis label ylabel (str): Y-axis label ticks (array): Tick value labels for the x and y axes. dpi (int): resolution of the saved figure in dots per inch. inset_size (float): Size of inset title (str): Title of figure legend_params (dict): Keyword arguments for the plot legend. bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None, confidence regions will be plotted. ci (tuple): tuple of bootstrap confidence interval percentiles
[ "Plot", "reliability", "curves", "against", "a", "1", ":", "1", "diagonal", "to", "determine", "if", "probability", "forecasts", "are", "consistent", "with", "their", "observed", "relative", "frequency", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/MetricPlotter.py#L162-L217
train
djgagne/hagelslag
hagelslag/evaluation/MetricPlotter.py
attributes_diagram
def attributes_diagram(rel_objs, obj_labels, colors, markers, filename, figsize=(8, 8), xlabel="Forecast Probability", ylabel="Observed Relative Frequency", ticks=np.arange(0, 1.05, 0.05), dpi=300, title="Attributes Diagram", legend_params=None, inset_params=None, inset_position=(0.12, 0.72, 0.25, 0.25), bootstrap_sets=None, ci=(2.5, 97.5)): """ Plot reliability curves against a 1:1 diagonal to determine if probability forecasts are consistent with their observed relative frequency. Also adds gray areas to show where the climatological probabilities lie and what areas result in a positive Brier Skill Score. Args: rel_objs (list): List of DistributedReliability objects. obj_labels (list): List of labels describing the forecast model associated with each curve. colors (list): List of colors for each line markers (list): List of line markers filename (str): Where to save the figure. figsize (tuple): (Width, height) of the figure in inches. xlabel (str): X-axis label ylabel (str): Y-axis label ticks (array): Tick value labels for the x and y axes. dpi (int): resolution of the saved figure in dots per inch. title (str): Title of figure legend_params (dict): Keyword arguments for the plot legend. inset_params (dict): Keyword arguments for the inset axis. inset_position (tuple): Position of the inset axis in normalized axes coordinates (left, bottom, width, height) bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None, confidence regions will be plotted. ci (tuple): tuple of bootstrap confidence interval percentiles """ if legend_params is None: legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True) if inset_params is None: inset_params = dict(width="25%", height="25%", loc=2, axes_kwargs=dict(axisbg='white')) fig, ax = plt.subplots(figsize=figsize) plt.plot(ticks, ticks, "k--") inset_hist = inset_axes(ax, **inset_params) ip = InsetPosition(ax, inset_position) inset_hist.set_axes_locator(ip) climo = rel_objs[0].climatology() no_skill = 0.5 * ticks + 0.5 * climo skill_x = [climo, climo, 1, 1, climo, climo, 0, 0, climo] skill_y = [climo, 1, 1, no_skill[-1], climo, 0, 0, no_skill[0], climo] f = ax.fill(skill_x, skill_y, "0.8") f[0].set_zorder(1) ax.plot(ticks, np.ones(ticks.shape) * climo, "k--") if bootstrap_sets is not None: for b, b_set in enumerate(bootstrap_sets): brel_curves = np.vstack([b_rel.reliability_curve()["Positive_Relative_Freq"].values for b_rel in b_set]) rel_range = np.nanpercentile(brel_curves, ci, axis=0) fb = ax.fill_between(b_rel.thresholds[:-1], rel_range[1], rel_range[0], alpha=0.5, color=colors[b]) fb.set_zorder(2) for r, rel_obj in enumerate(rel_objs): rel_curve = rel_obj.reliability_curve() ax.plot(rel_curve["Bin_Start"], rel_curve["Positive_Relative_Freq"], color=colors[r], marker=markers[r], label=obj_labels[r]) inset_hist.semilogy(rel_curve["Bin_Start"] * 100, rel_obj.frequencies["Total_Freq"][:-1], color=colors[r], marker=markers[r]) inset_hist.set_xlabel("Forecast Probability") inset_hist.set_ylabel("Frequency") ax.annotate("No Skill", (0.6, no_skill[12]), rotation=22.5) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xticks(ticks) ax.set_xticklabels((ticks * 100).astype(int)) ax.set_yticks(ticks) ax.set_yticklabels((ticks * 100).astype(int)) ax.legend(**legend_params) ax.set_title(title) plt.savefig(filename, dpi=dpi, bbox_inches="tight") plt.close()
python
def attributes_diagram(rel_objs, obj_labels, colors, markers, filename, figsize=(8, 8), xlabel="Forecast Probability", ylabel="Observed Relative Frequency", ticks=np.arange(0, 1.05, 0.05), dpi=300, title="Attributes Diagram", legend_params=None, inset_params=None, inset_position=(0.12, 0.72, 0.25, 0.25), bootstrap_sets=None, ci=(2.5, 97.5)): """ Plot reliability curves against a 1:1 diagonal to determine if probability forecasts are consistent with their observed relative frequency. Also adds gray areas to show where the climatological probabilities lie and what areas result in a positive Brier Skill Score. Args: rel_objs (list): List of DistributedReliability objects. obj_labels (list): List of labels describing the forecast model associated with each curve. colors (list): List of colors for each line markers (list): List of line markers filename (str): Where to save the figure. figsize (tuple): (Width, height) of the figure in inches. xlabel (str): X-axis label ylabel (str): Y-axis label ticks (array): Tick value labels for the x and y axes. dpi (int): resolution of the saved figure in dots per inch. title (str): Title of figure legend_params (dict): Keyword arguments for the plot legend. inset_params (dict): Keyword arguments for the inset axis. inset_position (tuple): Position of the inset axis in normalized axes coordinates (left, bottom, width, height) bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None, confidence regions will be plotted. ci (tuple): tuple of bootstrap confidence interval percentiles """ if legend_params is None: legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True) if inset_params is None: inset_params = dict(width="25%", height="25%", loc=2, axes_kwargs=dict(axisbg='white')) fig, ax = plt.subplots(figsize=figsize) plt.plot(ticks, ticks, "k--") inset_hist = inset_axes(ax, **inset_params) ip = InsetPosition(ax, inset_position) inset_hist.set_axes_locator(ip) climo = rel_objs[0].climatology() no_skill = 0.5 * ticks + 0.5 * climo skill_x = [climo, climo, 1, 1, climo, climo, 0, 0, climo] skill_y = [climo, 1, 1, no_skill[-1], climo, 0, 0, no_skill[0], climo] f = ax.fill(skill_x, skill_y, "0.8") f[0].set_zorder(1) ax.plot(ticks, np.ones(ticks.shape) * climo, "k--") if bootstrap_sets is not None: for b, b_set in enumerate(bootstrap_sets): brel_curves = np.vstack([b_rel.reliability_curve()["Positive_Relative_Freq"].values for b_rel in b_set]) rel_range = np.nanpercentile(brel_curves, ci, axis=0) fb = ax.fill_between(b_rel.thresholds[:-1], rel_range[1], rel_range[0], alpha=0.5, color=colors[b]) fb.set_zorder(2) for r, rel_obj in enumerate(rel_objs): rel_curve = rel_obj.reliability_curve() ax.plot(rel_curve["Bin_Start"], rel_curve["Positive_Relative_Freq"], color=colors[r], marker=markers[r], label=obj_labels[r]) inset_hist.semilogy(rel_curve["Bin_Start"] * 100, rel_obj.frequencies["Total_Freq"][:-1], color=colors[r], marker=markers[r]) inset_hist.set_xlabel("Forecast Probability") inset_hist.set_ylabel("Frequency") ax.annotate("No Skill", (0.6, no_skill[12]), rotation=22.5) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xticks(ticks) ax.set_xticklabels((ticks * 100).astype(int)) ax.set_yticks(ticks) ax.set_yticklabels((ticks * 100).astype(int)) ax.legend(**legend_params) ax.set_title(title) plt.savefig(filename, dpi=dpi, bbox_inches="tight") plt.close()
[ "def", "attributes_diagram", "(", "rel_objs", ",", "obj_labels", ",", "colors", ",", "markers", ",", "filename", ",", "figsize", "=", "(", "8", ",", "8", ")", ",", "xlabel", "=", "\"Forecast Probability\"", ",", "ylabel", "=", "\"Observed Relative Frequency\"", ",", "ticks", "=", "np", ".", "arange", "(", "0", ",", "1.05", ",", "0.05", ")", ",", "dpi", "=", "300", ",", "title", "=", "\"Attributes Diagram\"", ",", "legend_params", "=", "None", ",", "inset_params", "=", "None", ",", "inset_position", "=", "(", "0.12", ",", "0.72", ",", "0.25", ",", "0.25", ")", ",", "bootstrap_sets", "=", "None", ",", "ci", "=", "(", "2.5", ",", "97.5", ")", ")", ":", "if", "legend_params", "is", "None", ":", "legend_params", "=", "dict", "(", "loc", "=", "4", ",", "fontsize", "=", "10", ",", "framealpha", "=", "1", ",", "frameon", "=", "True", ")", "if", "inset_params", "is", "None", ":", "inset_params", "=", "dict", "(", "width", "=", "\"25%\"", ",", "height", "=", "\"25%\"", ",", "loc", "=", "2", ",", "axes_kwargs", "=", "dict", "(", "axisbg", "=", "'white'", ")", ")", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "figsize", "=", "figsize", ")", "plt", ".", "plot", "(", "ticks", ",", "ticks", ",", "\"k--\"", ")", "inset_hist", "=", "inset_axes", "(", "ax", ",", "*", "*", "inset_params", ")", "ip", "=", "InsetPosition", "(", "ax", ",", "inset_position", ")", "inset_hist", ".", "set_axes_locator", "(", "ip", ")", "climo", "=", "rel_objs", "[", "0", "]", ".", "climatology", "(", ")", "no_skill", "=", "0.5", "*", "ticks", "+", "0.5", "*", "climo", "skill_x", "=", "[", "climo", ",", "climo", ",", "1", ",", "1", ",", "climo", ",", "climo", ",", "0", ",", "0", ",", "climo", "]", "skill_y", "=", "[", "climo", ",", "1", ",", "1", ",", "no_skill", "[", "-", "1", "]", ",", "climo", ",", "0", ",", "0", ",", "no_skill", "[", "0", "]", ",", "climo", "]", "f", "=", "ax", ".", "fill", "(", "skill_x", ",", "skill_y", ",", "\"0.8\"", ")", "f", "[", "0", "]", ".", "set_zorder", "(", "1", ")", "ax", ".", "plot", "(", "ticks", ",", "np", ".", "ones", "(", "ticks", ".", "shape", ")", "*", "climo", ",", "\"k--\"", ")", "if", "bootstrap_sets", "is", "not", "None", ":", "for", "b", ",", "b_set", "in", "enumerate", "(", "bootstrap_sets", ")", ":", "brel_curves", "=", "np", ".", "vstack", "(", "[", "b_rel", ".", "reliability_curve", "(", ")", "[", "\"Positive_Relative_Freq\"", "]", ".", "values", "for", "b_rel", "in", "b_set", "]", ")", "rel_range", "=", "np", ".", "nanpercentile", "(", "brel_curves", ",", "ci", ",", "axis", "=", "0", ")", "fb", "=", "ax", ".", "fill_between", "(", "b_rel", ".", "thresholds", "[", ":", "-", "1", "]", ",", "rel_range", "[", "1", "]", ",", "rel_range", "[", "0", "]", ",", "alpha", "=", "0.5", ",", "color", "=", "colors", "[", "b", "]", ")", "fb", ".", "set_zorder", "(", "2", ")", "for", "r", ",", "rel_obj", "in", "enumerate", "(", "rel_objs", ")", ":", "rel_curve", "=", "rel_obj", ".", "reliability_curve", "(", ")", "ax", ".", "plot", "(", "rel_curve", "[", "\"Bin_Start\"", "]", ",", "rel_curve", "[", "\"Positive_Relative_Freq\"", "]", ",", "color", "=", "colors", "[", "r", "]", ",", "marker", "=", "markers", "[", "r", "]", ",", "label", "=", "obj_labels", "[", "r", "]", ")", "inset_hist", ".", "semilogy", "(", "rel_curve", "[", "\"Bin_Start\"", "]", "*", "100", ",", "rel_obj", ".", "frequencies", "[", "\"Total_Freq\"", "]", "[", ":", "-", "1", "]", ",", "color", "=", "colors", "[", "r", "]", ",", "marker", "=", "markers", "[", "r", "]", ")", "inset_hist", ".", "set_xlabel", "(", "\"Forecast Probability\"", ")", "inset_hist", ".", "set_ylabel", "(", "\"Frequency\"", ")", "ax", ".", "annotate", "(", "\"No Skill\"", ",", "(", "0.6", ",", "no_skill", "[", "12", "]", ")", ",", "rotation", "=", "22.5", ")", "ax", ".", "set_xlabel", "(", "xlabel", ")", "ax", ".", "set_ylabel", "(", "ylabel", ")", "ax", ".", "set_xticks", "(", "ticks", ")", "ax", ".", "set_xticklabels", "(", "(", "ticks", "*", "100", ")", ".", "astype", "(", "int", ")", ")", "ax", ".", "set_yticks", "(", "ticks", ")", "ax", ".", "set_yticklabels", "(", "(", "ticks", "*", "100", ")", ".", "astype", "(", "int", ")", ")", "ax", ".", "legend", "(", "*", "*", "legend_params", ")", "ax", ".", "set_title", "(", "title", ")", "plt", ".", "savefig", "(", "filename", ",", "dpi", "=", "dpi", ",", "bbox_inches", "=", "\"tight\"", ")", "plt", ".", "close", "(", ")" ]
Plot reliability curves against a 1:1 diagonal to determine if probability forecasts are consistent with their observed relative frequency. Also adds gray areas to show where the climatological probabilities lie and what areas result in a positive Brier Skill Score. Args: rel_objs (list): List of DistributedReliability objects. obj_labels (list): List of labels describing the forecast model associated with each curve. colors (list): List of colors for each line markers (list): List of line markers filename (str): Where to save the figure. figsize (tuple): (Width, height) of the figure in inches. xlabel (str): X-axis label ylabel (str): Y-axis label ticks (array): Tick value labels for the x and y axes. dpi (int): resolution of the saved figure in dots per inch. title (str): Title of figure legend_params (dict): Keyword arguments for the plot legend. inset_params (dict): Keyword arguments for the inset axis. inset_position (tuple): Position of the inset axis in normalized axes coordinates (left, bottom, width, height) bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None, confidence regions will be plotted. ci (tuple): tuple of bootstrap confidence interval percentiles
[ "Plot", "reliability", "curves", "against", "a", "1", ":", "1", "diagonal", "to", "determine", "if", "probability", "forecasts", "are", "consistent", "with", "their", "observed", "relative", "frequency", ".", "Also", "adds", "gray", "areas", "to", "show", "where", "the", "climatological", "probabilities", "lie", "and", "what", "areas", "result", "in", "a", "positive", "Brier", "Skill", "Score", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/MetricPlotter.py#L220-L288
train
nion-software/nionswift
nion/swift/ScriptsDialog.py
RunScriptDialog.get_string
def get_string(self, prompt, default_str=None) -> str: """Return a string value that the user enters. Raises exception for cancel.""" accept_event = threading.Event() value_ref = [None] def perform(): def accepted(text): value_ref[0] = text accept_event.set() def rejected(): accept_event.set() self.__message_column.remove_all() pose_get_string_message_box(self.ui, self.__message_column, prompt, str(default_str), accepted, rejected) #self.__message_column.add(self.__make_cancel_row()) with self.__lock: self.__q.append(perform) self.document_controller.add_task("ui_" + str(id(self)), self.__handle_output_and_q) accept_event.wait() def update_message_column(): self.__message_column.remove_all() self.__message_column.add(self.__make_cancel_row()) self.document_controller.add_task("ui_" + str(id(self)), update_message_column) if value_ref[0] is None: raise Exception("Cancel") return value_ref[0]
python
def get_string(self, prompt, default_str=None) -> str: """Return a string value that the user enters. Raises exception for cancel.""" accept_event = threading.Event() value_ref = [None] def perform(): def accepted(text): value_ref[0] = text accept_event.set() def rejected(): accept_event.set() self.__message_column.remove_all() pose_get_string_message_box(self.ui, self.__message_column, prompt, str(default_str), accepted, rejected) #self.__message_column.add(self.__make_cancel_row()) with self.__lock: self.__q.append(perform) self.document_controller.add_task("ui_" + str(id(self)), self.__handle_output_and_q) accept_event.wait() def update_message_column(): self.__message_column.remove_all() self.__message_column.add(self.__make_cancel_row()) self.document_controller.add_task("ui_" + str(id(self)), update_message_column) if value_ref[0] is None: raise Exception("Cancel") return value_ref[0]
[ "def", "get_string", "(", "self", ",", "prompt", ",", "default_str", "=", "None", ")", "->", "str", ":", "accept_event", "=", "threading", ".", "Event", "(", ")", "value_ref", "=", "[", "None", "]", "def", "perform", "(", ")", ":", "def", "accepted", "(", "text", ")", ":", "value_ref", "[", "0", "]", "=", "text", "accept_event", ".", "set", "(", ")", "def", "rejected", "(", ")", ":", "accept_event", ".", "set", "(", ")", "self", ".", "__message_column", ".", "remove_all", "(", ")", "pose_get_string_message_box", "(", "self", ".", "ui", ",", "self", ".", "__message_column", ",", "prompt", ",", "str", "(", "default_str", ")", ",", "accepted", ",", "rejected", ")", "#self.__message_column.add(self.__make_cancel_row())", "with", "self", ".", "__lock", ":", "self", ".", "__q", ".", "append", "(", "perform", ")", "self", ".", "document_controller", ".", "add_task", "(", "\"ui_\"", "+", "str", "(", "id", "(", "self", ")", ")", ",", "self", ".", "__handle_output_and_q", ")", "accept_event", ".", "wait", "(", ")", "def", "update_message_column", "(", ")", ":", "self", ".", "__message_column", ".", "remove_all", "(", ")", "self", ".", "__message_column", ".", "add", "(", "self", ".", "__make_cancel_row", "(", ")", ")", "self", ".", "document_controller", ".", "add_task", "(", "\"ui_\"", "+", "str", "(", "id", "(", "self", ")", ")", ",", "update_message_column", ")", "if", "value_ref", "[", "0", "]", "is", "None", ":", "raise", "Exception", "(", "\"Cancel\"", ")", "return", "value_ref", "[", "0", "]" ]
Return a string value that the user enters. Raises exception for cancel.
[ "Return", "a", "string", "value", "that", "the", "user", "enters", ".", "Raises", "exception", "for", "cancel", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/ScriptsDialog.py#L388-L415
train
nion-software/nionswift
nion/swift/ScriptsDialog.py
RunScriptDialog.__accept_reject
def __accept_reject(self, prompt, accepted_text, rejected_text, display_rejected): """Return a boolean value for accept/reject.""" accept_event = threading.Event() result_ref = [False] def perform(): def accepted(): result_ref[0] = True accept_event.set() def rejected(): result_ref[0] = False accept_event.set() self.__message_column.remove_all() pose_confirmation_message_box(self.ui, self.__message_column, prompt, accepted, rejected, accepted_text, rejected_text, display_rejected) #self.__message_column.add(self.__make_cancel_row()) with self.__lock: self.__q.append(perform) self.document_controller.add_task("ui_" + str(id(self)), self.__handle_output_and_q) accept_event.wait() def update_message_column(): self.__message_column.remove_all() self.__message_column.add(self.__make_cancel_row()) self.document_controller.add_task("ui_" + str(id(self)), update_message_column) return result_ref[0]
python
def __accept_reject(self, prompt, accepted_text, rejected_text, display_rejected): """Return a boolean value for accept/reject.""" accept_event = threading.Event() result_ref = [False] def perform(): def accepted(): result_ref[0] = True accept_event.set() def rejected(): result_ref[0] = False accept_event.set() self.__message_column.remove_all() pose_confirmation_message_box(self.ui, self.__message_column, prompt, accepted, rejected, accepted_text, rejected_text, display_rejected) #self.__message_column.add(self.__make_cancel_row()) with self.__lock: self.__q.append(perform) self.document_controller.add_task("ui_" + str(id(self)), self.__handle_output_and_q) accept_event.wait() def update_message_column(): self.__message_column.remove_all() self.__message_column.add(self.__make_cancel_row()) self.document_controller.add_task("ui_" + str(id(self)), update_message_column) return result_ref[0]
[ "def", "__accept_reject", "(", "self", ",", "prompt", ",", "accepted_text", ",", "rejected_text", ",", "display_rejected", ")", ":", "accept_event", "=", "threading", ".", "Event", "(", ")", "result_ref", "=", "[", "False", "]", "def", "perform", "(", ")", ":", "def", "accepted", "(", ")", ":", "result_ref", "[", "0", "]", "=", "True", "accept_event", ".", "set", "(", ")", "def", "rejected", "(", ")", ":", "result_ref", "[", "0", "]", "=", "False", "accept_event", ".", "set", "(", ")", "self", ".", "__message_column", ".", "remove_all", "(", ")", "pose_confirmation_message_box", "(", "self", ".", "ui", ",", "self", ".", "__message_column", ",", "prompt", ",", "accepted", ",", "rejected", ",", "accepted_text", ",", "rejected_text", ",", "display_rejected", ")", "#self.__message_column.add(self.__make_cancel_row())", "with", "self", ".", "__lock", ":", "self", ".", "__q", ".", "append", "(", "perform", ")", "self", ".", "document_controller", ".", "add_task", "(", "\"ui_\"", "+", "str", "(", "id", "(", "self", ")", ")", ",", "self", ".", "__handle_output_and_q", ")", "accept_event", ".", "wait", "(", ")", "def", "update_message_column", "(", ")", ":", "self", ".", "__message_column", ".", "remove_all", "(", ")", "self", ".", "__message_column", ".", "add", "(", "self", ".", "__make_cancel_row", "(", ")", ")", "self", ".", "document_controller", ".", "add_task", "(", "\"ui_\"", "+", "str", "(", "id", "(", "self", ")", ")", ",", "update_message_column", ")", "return", "result_ref", "[", "0", "]" ]
Return a boolean value for accept/reject.
[ "Return", "a", "boolean", "value", "for", "accept", "/", "reject", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/ScriptsDialog.py#L444-L470
train
mgraffg/EvoDAG
EvoDAG/node.py
Variable.compute_weight
def compute_weight(self, r, ytr=None, mask=None): """Returns the weight (w) using OLS of r * w = gp._ytr """ ytr = self._ytr if ytr is None else ytr mask = self._mask if mask is None else mask return compute_weight(r, ytr, mask)
python
def compute_weight(self, r, ytr=None, mask=None): """Returns the weight (w) using OLS of r * w = gp._ytr """ ytr = self._ytr if ytr is None else ytr mask = self._mask if mask is None else mask return compute_weight(r, ytr, mask)
[ "def", "compute_weight", "(", "self", ",", "r", ",", "ytr", "=", "None", ",", "mask", "=", "None", ")", ":", "ytr", "=", "self", ".", "_ytr", "if", "ytr", "is", "None", "else", "ytr", "mask", "=", "self", ".", "_mask", "if", "mask", "is", "None", "else", "mask", "return", "compute_weight", "(", "r", ",", "ytr", ",", "mask", ")" ]
Returns the weight (w) using OLS of r * w = gp._ytr
[ "Returns", "the", "weight", "(", "w", ")", "using", "OLS", "of", "r", "*", "w", "=", "gp", ".", "_ytr" ]
e11fa1fd1ca9e69cca92696c86661a3dc7b3a1d5
https://github.com/mgraffg/EvoDAG/blob/e11fa1fd1ca9e69cca92696c86661a3dc7b3a1d5/EvoDAG/node.py#L129-L133
train
mgraffg/EvoDAG
EvoDAG/node.py
Variable.isfinite
def isfinite(self): "Test whether the predicted values are finite" if self._multiple_outputs: if self.hy_test is not None: r = [(hy.isfinite() and (hyt is None or hyt.isfinite())) for hy, hyt in zip(self.hy, self.hy_test)] else: r = [hy.isfinite() for hy in self.hy] return np.all(r) return self.hy.isfinite() and (self.hy_test is None or self.hy_test.isfinite())
python
def isfinite(self): "Test whether the predicted values are finite" if self._multiple_outputs: if self.hy_test is not None: r = [(hy.isfinite() and (hyt is None or hyt.isfinite())) for hy, hyt in zip(self.hy, self.hy_test)] else: r = [hy.isfinite() for hy in self.hy] return np.all(r) return self.hy.isfinite() and (self.hy_test is None or self.hy_test.isfinite())
[ "def", "isfinite", "(", "self", ")", ":", "if", "self", ".", "_multiple_outputs", ":", "if", "self", ".", "hy_test", "is", "not", "None", ":", "r", "=", "[", "(", "hy", ".", "isfinite", "(", ")", "and", "(", "hyt", "is", "None", "or", "hyt", ".", "isfinite", "(", ")", ")", ")", "for", "hy", ",", "hyt", "in", "zip", "(", "self", ".", "hy", ",", "self", ".", "hy_test", ")", "]", "else", ":", "r", "=", "[", "hy", ".", "isfinite", "(", ")", "for", "hy", "in", "self", ".", "hy", "]", "return", "np", ".", "all", "(", "r", ")", "return", "self", ".", "hy", ".", "isfinite", "(", ")", "and", "(", "self", ".", "hy_test", "is", "None", "or", "self", ".", "hy_test", ".", "isfinite", "(", ")", ")" ]
Test whether the predicted values are finite
[ "Test", "whether", "the", "predicted", "values", "are", "finite" ]
e11fa1fd1ca9e69cca92696c86661a3dc7b3a1d5
https://github.com/mgraffg/EvoDAG/blob/e11fa1fd1ca9e69cca92696c86661a3dc7b3a1d5/EvoDAG/node.py#L192-L202
train
ajk8/hatchery
hatchery/helpers.py
value_of_named_argument_in_function
def value_of_named_argument_in_function(argument_name, function_name, search_str, resolve_varname=False): """ Parse an arbitrary block of python code to get the value of a named argument from inside a function call """ try: search_str = unicode(search_str) except NameError: pass readline = StringIO(search_str).readline try: token_generator = tokenize.generate_tokens(readline) tokens = [SimplifiedToken(toknum, tokval) for toknum, tokval, _, _, _ in token_generator] except tokenize.TokenError as e: raise ValueError('search_str is not parse-able python code: ' + str(e)) in_function = False is_var = False for i in range(len(tokens)): if ( not in_function and tokens[i].typenum == tokenize.NAME and tokens[i].value == function_name and tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '(' ): in_function = True continue elif ( in_function and tokens[i].typenum == tokenize.NAME and tokens[i].value == argument_name and tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '=' ): # value is set to another variable which we are going to attempt to resolve if resolve_varname and tokens[i+2].typenum == 1: is_var = True argument_name = tokens[i+2].value break # again, for a very specific usecase -- get the whole value and concatenate it # this will match something like _version.__version__ j = 3 while True: if tokens[i+j].value in (',', ')') or tokens[i+j].typenum == 58: break j += 1 return ''.join([t.value for t in tokens[i+2:i+j]]).strip() # this is very dumb logic, and only works if the function argument is set to a variable # which is set to a string value if is_var: for i in range(len(tokens)): if ( tokens[i].typenum == tokenize.NAME and tokens[i].value == argument_name and tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '=' ): return tokens[i+2].value.strip() return None
python
def value_of_named_argument_in_function(argument_name, function_name, search_str, resolve_varname=False): """ Parse an arbitrary block of python code to get the value of a named argument from inside a function call """ try: search_str = unicode(search_str) except NameError: pass readline = StringIO(search_str).readline try: token_generator = tokenize.generate_tokens(readline) tokens = [SimplifiedToken(toknum, tokval) for toknum, tokval, _, _, _ in token_generator] except tokenize.TokenError as e: raise ValueError('search_str is not parse-able python code: ' + str(e)) in_function = False is_var = False for i in range(len(tokens)): if ( not in_function and tokens[i].typenum == tokenize.NAME and tokens[i].value == function_name and tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '(' ): in_function = True continue elif ( in_function and tokens[i].typenum == tokenize.NAME and tokens[i].value == argument_name and tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '=' ): # value is set to another variable which we are going to attempt to resolve if resolve_varname and tokens[i+2].typenum == 1: is_var = True argument_name = tokens[i+2].value break # again, for a very specific usecase -- get the whole value and concatenate it # this will match something like _version.__version__ j = 3 while True: if tokens[i+j].value in (',', ')') or tokens[i+j].typenum == 58: break j += 1 return ''.join([t.value for t in tokens[i+2:i+j]]).strip() # this is very dumb logic, and only works if the function argument is set to a variable # which is set to a string value if is_var: for i in range(len(tokens)): if ( tokens[i].typenum == tokenize.NAME and tokens[i].value == argument_name and tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '=' ): return tokens[i+2].value.strip() return None
[ "def", "value_of_named_argument_in_function", "(", "argument_name", ",", "function_name", ",", "search_str", ",", "resolve_varname", "=", "False", ")", ":", "try", ":", "search_str", "=", "unicode", "(", "search_str", ")", "except", "NameError", ":", "pass", "readline", "=", "StringIO", "(", "search_str", ")", ".", "readline", "try", ":", "token_generator", "=", "tokenize", ".", "generate_tokens", "(", "readline", ")", "tokens", "=", "[", "SimplifiedToken", "(", "toknum", ",", "tokval", ")", "for", "toknum", ",", "tokval", ",", "_", ",", "_", ",", "_", "in", "token_generator", "]", "except", "tokenize", ".", "TokenError", "as", "e", ":", "raise", "ValueError", "(", "'search_str is not parse-able python code: '", "+", "str", "(", "e", ")", ")", "in_function", "=", "False", "is_var", "=", "False", "for", "i", "in", "range", "(", "len", "(", "tokens", ")", ")", ":", "if", "(", "not", "in_function", "and", "tokens", "[", "i", "]", ".", "typenum", "==", "tokenize", ".", "NAME", "and", "tokens", "[", "i", "]", ".", "value", "==", "function_name", "and", "tokens", "[", "i", "+", "1", "]", ".", "typenum", "==", "tokenize", ".", "OP", "and", "tokens", "[", "i", "+", "1", "]", ".", "value", "==", "'('", ")", ":", "in_function", "=", "True", "continue", "elif", "(", "in_function", "and", "tokens", "[", "i", "]", ".", "typenum", "==", "tokenize", ".", "NAME", "and", "tokens", "[", "i", "]", ".", "value", "==", "argument_name", "and", "tokens", "[", "i", "+", "1", "]", ".", "typenum", "==", "tokenize", ".", "OP", "and", "tokens", "[", "i", "+", "1", "]", ".", "value", "==", "'='", ")", ":", "# value is set to another variable which we are going to attempt to resolve", "if", "resolve_varname", "and", "tokens", "[", "i", "+", "2", "]", ".", "typenum", "==", "1", ":", "is_var", "=", "True", "argument_name", "=", "tokens", "[", "i", "+", "2", "]", ".", "value", "break", "# again, for a very specific usecase -- get the whole value and concatenate it", "# this will match something like _version.__version__", "j", "=", "3", "while", "True", ":", "if", "tokens", "[", "i", "+", "j", "]", ".", "value", "in", "(", "','", ",", "')'", ")", "or", "tokens", "[", "i", "+", "j", "]", ".", "typenum", "==", "58", ":", "break", "j", "+=", "1", "return", "''", ".", "join", "(", "[", "t", ".", "value", "for", "t", "in", "tokens", "[", "i", "+", "2", ":", "i", "+", "j", "]", "]", ")", ".", "strip", "(", ")", "# this is very dumb logic, and only works if the function argument is set to a variable", "# which is set to a string value", "if", "is_var", ":", "for", "i", "in", "range", "(", "len", "(", "tokens", ")", ")", ":", "if", "(", "tokens", "[", "i", "]", ".", "typenum", "==", "tokenize", ".", "NAME", "and", "tokens", "[", "i", "]", ".", "value", "==", "argument_name", "and", "tokens", "[", "i", "+", "1", "]", ".", "typenum", "==", "tokenize", ".", "OP", "and", "tokens", "[", "i", "+", "1", "]", ".", "value", "==", "'='", ")", ":", "return", "tokens", "[", "i", "+", "2", "]", ".", "value", ".", "strip", "(", ")", "return", "None" ]
Parse an arbitrary block of python code to get the value of a named argument from inside a function call
[ "Parse", "an", "arbitrary", "block", "of", "python", "code", "to", "get", "the", "value", "of", "a", "named", "argument", "from", "inside", "a", "function", "call" ]
e068c9f5366d2c98225babb03d4cde36c710194f
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/helpers.py#L17-L73
train
ajk8/hatchery
hatchery/helpers.py
regex_in_file
def regex_in_file(regex, filepath, return_match=False): """ Search for a regex in a file If return_match is True, return the found object instead of a boolean """ file_content = get_file_content(filepath) re_method = funcy.re_find if return_match else funcy.re_test return re_method(regex, file_content)
python
def regex_in_file(regex, filepath, return_match=False): """ Search for a regex in a file If return_match is True, return the found object instead of a boolean """ file_content = get_file_content(filepath) re_method = funcy.re_find if return_match else funcy.re_test return re_method(regex, file_content)
[ "def", "regex_in_file", "(", "regex", ",", "filepath", ",", "return_match", "=", "False", ")", ":", "file_content", "=", "get_file_content", "(", "filepath", ")", "re_method", "=", "funcy", ".", "re_find", "if", "return_match", "else", "funcy", ".", "re_test", "return", "re_method", "(", "regex", ",", "file_content", ")" ]
Search for a regex in a file If return_match is True, return the found object instead of a boolean
[ "Search", "for", "a", "regex", "in", "a", "file" ]
e068c9f5366d2c98225babb03d4cde36c710194f
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/helpers.py#L95-L102
train
ajk8/hatchery
hatchery/helpers.py
regex_in_package_file
def regex_in_package_file(regex, filename, package_name, return_match=False): """ Search for a regex in a file contained within the package directory If return_match is True, return the found object instead of a boolean """ filepath = package_file_path(filename, package_name) return regex_in_file(regex, filepath, return_match=return_match)
python
def regex_in_package_file(regex, filename, package_name, return_match=False): """ Search for a regex in a file contained within the package directory If return_match is True, return the found object instead of a boolean """ filepath = package_file_path(filename, package_name) return regex_in_file(regex, filepath, return_match=return_match)
[ "def", "regex_in_package_file", "(", "regex", ",", "filename", ",", "package_name", ",", "return_match", "=", "False", ")", ":", "filepath", "=", "package_file_path", "(", "filename", ",", "package_name", ")", "return", "regex_in_file", "(", "regex", ",", "filepath", ",", "return_match", "=", "return_match", ")" ]
Search for a regex in a file contained within the package directory If return_match is True, return the found object instead of a boolean
[ "Search", "for", "a", "regex", "in", "a", "file", "contained", "within", "the", "package", "directory" ]
e068c9f5366d2c98225babb03d4cde36c710194f
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/helpers.py#L106-L112
train
ajk8/hatchery
hatchery/helpers.py
string_is_url
def string_is_url(test_str): """ Test to see if a string is a URL or not, defined in this case as a string for which urlparse returns a scheme component >>> string_is_url('somestring') False >>> string_is_url('https://some.domain.org/path') True """ parsed = urlparse.urlparse(test_str) return parsed.scheme is not None and parsed.scheme != ''
python
def string_is_url(test_str): """ Test to see if a string is a URL or not, defined in this case as a string for which urlparse returns a scheme component >>> string_is_url('somestring') False >>> string_is_url('https://some.domain.org/path') True """ parsed = urlparse.urlparse(test_str) return parsed.scheme is not None and parsed.scheme != ''
[ "def", "string_is_url", "(", "test_str", ")", ":", "parsed", "=", "urlparse", ".", "urlparse", "(", "test_str", ")", "return", "parsed", ".", "scheme", "is", "not", "None", "and", "parsed", ".", "scheme", "!=", "''" ]
Test to see if a string is a URL or not, defined in this case as a string for which urlparse returns a scheme component >>> string_is_url('somestring') False >>> string_is_url('https://some.domain.org/path') True
[ "Test", "to", "see", "if", "a", "string", "is", "a", "URL", "or", "not", "defined", "in", "this", "case", "as", "a", "string", "for", "which", "urlparse", "returns", "a", "scheme", "component" ]
e068c9f5366d2c98225babb03d4cde36c710194f
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/helpers.py#L116-L126
train
nion-software/nionswift
nion/swift/model/DocumentModel.py
TransactionManager.item_transaction
def item_transaction(self, item) -> Transaction: """Begin transaction state for item. A transaction state is exists to prevent writing out to disk, mainly for performance reasons. All changes to the object are delayed until the transaction state exits. This method is thread safe. """ items = self.__build_transaction_items(item) transaction = Transaction(self, item, items) self.__transactions.append(transaction) return transaction
python
def item_transaction(self, item) -> Transaction: """Begin transaction state for item. A transaction state is exists to prevent writing out to disk, mainly for performance reasons. All changes to the object are delayed until the transaction state exits. This method is thread safe. """ items = self.__build_transaction_items(item) transaction = Transaction(self, item, items) self.__transactions.append(transaction) return transaction
[ "def", "item_transaction", "(", "self", ",", "item", ")", "->", "Transaction", ":", "items", "=", "self", ".", "__build_transaction_items", "(", "item", ")", "transaction", "=", "Transaction", "(", "self", ",", "item", ",", "items", ")", "self", ".", "__transactions", ".", "append", "(", "transaction", ")", "return", "transaction" ]
Begin transaction state for item. A transaction state is exists to prevent writing out to disk, mainly for performance reasons. All changes to the object are delayed until the transaction state exits. This method is thread safe.
[ "Begin", "transaction", "state", "for", "item", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/DocumentModel.py#L174-L185
train
nion-software/nionswift
nion/swift/model/DocumentModel.py
DocumentModel.insert_data_item
def insert_data_item(self, before_index, data_item, auto_display: bool = True) -> None: """Insert a new data item into document model. This method is NOT threadsafe. """ assert data_item is not None assert data_item not in self.data_items assert before_index <= len(self.data_items) and before_index >= 0 assert data_item.uuid not in self.__uuid_to_data_item # update the session data_item.session_id = self.session_id # insert in internal list self.__insert_data_item(before_index, data_item, do_write=True) # automatically add a display if auto_display: display_item = DisplayItem.DisplayItem(data_item=data_item) self.append_display_item(display_item)
python
def insert_data_item(self, before_index, data_item, auto_display: bool = True) -> None: """Insert a new data item into document model. This method is NOT threadsafe. """ assert data_item is not None assert data_item not in self.data_items assert before_index <= len(self.data_items) and before_index >= 0 assert data_item.uuid not in self.__uuid_to_data_item # update the session data_item.session_id = self.session_id # insert in internal list self.__insert_data_item(before_index, data_item, do_write=True) # automatically add a display if auto_display: display_item = DisplayItem.DisplayItem(data_item=data_item) self.append_display_item(display_item)
[ "def", "insert_data_item", "(", "self", ",", "before_index", ",", "data_item", ",", "auto_display", ":", "bool", "=", "True", ")", "->", "None", ":", "assert", "data_item", "is", "not", "None", "assert", "data_item", "not", "in", "self", ".", "data_items", "assert", "before_index", "<=", "len", "(", "self", ".", "data_items", ")", "and", "before_index", ">=", "0", "assert", "data_item", ".", "uuid", "not", "in", "self", ".", "__uuid_to_data_item", "# update the session", "data_item", ".", "session_id", "=", "self", ".", "session_id", "# insert in internal list", "self", ".", "__insert_data_item", "(", "before_index", ",", "data_item", ",", "do_write", "=", "True", ")", "# automatically add a display", "if", "auto_display", ":", "display_item", "=", "DisplayItem", ".", "DisplayItem", "(", "data_item", "=", "data_item", ")", "self", ".", "append_display_item", "(", "display_item", ")" ]
Insert a new data item into document model. This method is NOT threadsafe.
[ "Insert", "a", "new", "data", "item", "into", "document", "model", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/DocumentModel.py#L541-L557
train
nion-software/nionswift
nion/swift/model/DocumentModel.py
DocumentModel.remove_data_item
def remove_data_item(self, data_item: DataItem.DataItem, *, safe: bool=False) -> typing.Optional[typing.Sequence]: """Remove data item from document model. This method is NOT threadsafe. """ # remove data item from any computations return self.__cascade_delete(data_item, safe=safe)
python
def remove_data_item(self, data_item: DataItem.DataItem, *, safe: bool=False) -> typing.Optional[typing.Sequence]: """Remove data item from document model. This method is NOT threadsafe. """ # remove data item from any computations return self.__cascade_delete(data_item, safe=safe)
[ "def", "remove_data_item", "(", "self", ",", "data_item", ":", "DataItem", ".", "DataItem", ",", "*", ",", "safe", ":", "bool", "=", "False", ")", "->", "typing", ".", "Optional", "[", "typing", ".", "Sequence", "]", ":", "# remove data item from any computations", "return", "self", ".", "__cascade_delete", "(", "data_item", ",", "safe", "=", "safe", ")" ]
Remove data item from document model. This method is NOT threadsafe.
[ "Remove", "data", "item", "from", "document", "model", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/DocumentModel.py#L586-L592
train
nion-software/nionswift
nion/swift/model/DocumentModel.py
DocumentModel.__cascade_delete_inner
def __cascade_delete_inner(self, master_item, safe: bool=False) -> typing.Optional[typing.Sequence]: """Cascade delete an item. Returns an undelete log that can be used to undo the cascade deletion. Builds a cascade of items to be deleted and dependencies to be removed when the passed item is deleted. Then removes computations that are no longer valid. Removing a computation may result in more deletions, so the process is repeated until nothing more gets removed. Next remove dependencies. Next remove individual items (from the most distant from the root item to the root item). """ # print(f"cascade {master_item}") # this horrible little hack ensures that computation changed messages are delayed until the end of the cascade # delete; otherwise there are cases where dependencies can be reestablished during the changed messages while # this method is partially finished. ugh. see test_computation_deletes_when_source_cycle_deletes. if self.__computation_changed_delay_list is None: computation_changed_delay_list = list() self.__computation_changed_delay_list = computation_changed_delay_list else: computation_changed_delay_list = None undelete_log = list() try: items = list() dependencies = list() self.__build_cascade(master_item, items, dependencies) cascaded = True while cascaded: cascaded = False # adjust computation bookkeeping to remove deleted items, then delete unused computations items_set = set(items) for computation in copy.copy(self.computations): output_deleted = master_item in computation._outputs computation._inputs -= items_set computation._outputs -= items_set if computation not in items and computation != self.__current_computation: # computations are auto deleted if all inputs are deleted or any output is deleted if output_deleted or all(input in items for input in computation._inputs): self.__build_cascade(computation, items, dependencies) cascaded = True # print(list(reversed(items))) # print(list(reversed(dependencies))) for source, target in reversed(dependencies): self.__remove_dependency(source, target) # now delete the actual items for item in reversed(items): for computation in self.computations: new_entries = computation.list_item_removed(item) undelete_log.extend(new_entries) container = item.container if isinstance(item, DataItem.DataItem): name = "data_items" elif isinstance(item, DisplayItem.DisplayItem): name = "display_items" elif isinstance(item, Graphics.Graphic): name = "graphics" elif isinstance(item, DataStructure.DataStructure): name = "data_structures" elif isinstance(item, Symbolic.Computation): name = "computations" elif isinstance(item, Connection.Connection): name = "connections" elif isinstance(item, DisplayItem.DisplayDataChannel): name = "display_data_channels" else: name = None assert False, "Unable to cascade delete type " + str(type(item)) assert name # print(container, name, item) if container is self and name == "data_items": # call the version of __remove_data_item that doesn't cascade again index = getattr(container, name).index(item) item_dict = item.write_to_dict() # NOTE: __remove_data_item will notify_remove_item undelete_log.extend(self.__remove_data_item(item, safe=safe)) undelete_log.append({"type": name, "index": index, "properties": item_dict}) elif container is self and name == "display_items": # call the version of __remove_data_item that doesn't cascade again index = getattr(container, name).index(item) item_dict = item.write_to_dict() # NOTE: __remove_display_item will notify_remove_item undelete_log.extend(self.__remove_display_item(item, safe=safe)) undelete_log.append({"type": name, "index": index, "properties": item_dict}) elif container: container_ref = str(container.uuid) index = getattr(container, name).index(item) item_dict = item.write_to_dict() container_properties = container.save_properties() if hasattr(container, "save_properties") else dict() undelete_log.append({"type": name, "container": container_ref, "index": index, "properties": item_dict, "container_properties": container_properties}) container.remove_item(name, item) # handle top level 'remove item' notifications for data structures, computations, and display items here # since they're not handled elsewhere. if container == self and name in ("data_structures", "computations"): self.notify_remove_item(name, item, index) except Exception as e: import sys, traceback traceback.print_exc() traceback.format_exception(*sys.exc_info()) finally: # check whether this call of __cascade_delete is the top level one that will finish the computation # changed messages. if computation_changed_delay_list is not None: self.__finish_computation_changed() return undelete_log
python
def __cascade_delete_inner(self, master_item, safe: bool=False) -> typing.Optional[typing.Sequence]: """Cascade delete an item. Returns an undelete log that can be used to undo the cascade deletion. Builds a cascade of items to be deleted and dependencies to be removed when the passed item is deleted. Then removes computations that are no longer valid. Removing a computation may result in more deletions, so the process is repeated until nothing more gets removed. Next remove dependencies. Next remove individual items (from the most distant from the root item to the root item). """ # print(f"cascade {master_item}") # this horrible little hack ensures that computation changed messages are delayed until the end of the cascade # delete; otherwise there are cases where dependencies can be reestablished during the changed messages while # this method is partially finished. ugh. see test_computation_deletes_when_source_cycle_deletes. if self.__computation_changed_delay_list is None: computation_changed_delay_list = list() self.__computation_changed_delay_list = computation_changed_delay_list else: computation_changed_delay_list = None undelete_log = list() try: items = list() dependencies = list() self.__build_cascade(master_item, items, dependencies) cascaded = True while cascaded: cascaded = False # adjust computation bookkeeping to remove deleted items, then delete unused computations items_set = set(items) for computation in copy.copy(self.computations): output_deleted = master_item in computation._outputs computation._inputs -= items_set computation._outputs -= items_set if computation not in items and computation != self.__current_computation: # computations are auto deleted if all inputs are deleted or any output is deleted if output_deleted or all(input in items for input in computation._inputs): self.__build_cascade(computation, items, dependencies) cascaded = True # print(list(reversed(items))) # print(list(reversed(dependencies))) for source, target in reversed(dependencies): self.__remove_dependency(source, target) # now delete the actual items for item in reversed(items): for computation in self.computations: new_entries = computation.list_item_removed(item) undelete_log.extend(new_entries) container = item.container if isinstance(item, DataItem.DataItem): name = "data_items" elif isinstance(item, DisplayItem.DisplayItem): name = "display_items" elif isinstance(item, Graphics.Graphic): name = "graphics" elif isinstance(item, DataStructure.DataStructure): name = "data_structures" elif isinstance(item, Symbolic.Computation): name = "computations" elif isinstance(item, Connection.Connection): name = "connections" elif isinstance(item, DisplayItem.DisplayDataChannel): name = "display_data_channels" else: name = None assert False, "Unable to cascade delete type " + str(type(item)) assert name # print(container, name, item) if container is self and name == "data_items": # call the version of __remove_data_item that doesn't cascade again index = getattr(container, name).index(item) item_dict = item.write_to_dict() # NOTE: __remove_data_item will notify_remove_item undelete_log.extend(self.__remove_data_item(item, safe=safe)) undelete_log.append({"type": name, "index": index, "properties": item_dict}) elif container is self and name == "display_items": # call the version of __remove_data_item that doesn't cascade again index = getattr(container, name).index(item) item_dict = item.write_to_dict() # NOTE: __remove_display_item will notify_remove_item undelete_log.extend(self.__remove_display_item(item, safe=safe)) undelete_log.append({"type": name, "index": index, "properties": item_dict}) elif container: container_ref = str(container.uuid) index = getattr(container, name).index(item) item_dict = item.write_to_dict() container_properties = container.save_properties() if hasattr(container, "save_properties") else dict() undelete_log.append({"type": name, "container": container_ref, "index": index, "properties": item_dict, "container_properties": container_properties}) container.remove_item(name, item) # handle top level 'remove item' notifications for data structures, computations, and display items here # since they're not handled elsewhere. if container == self and name in ("data_structures", "computations"): self.notify_remove_item(name, item, index) except Exception as e: import sys, traceback traceback.print_exc() traceback.format_exception(*sys.exc_info()) finally: # check whether this call of __cascade_delete is the top level one that will finish the computation # changed messages. if computation_changed_delay_list is not None: self.__finish_computation_changed() return undelete_log
[ "def", "__cascade_delete_inner", "(", "self", ",", "master_item", ",", "safe", ":", "bool", "=", "False", ")", "->", "typing", ".", "Optional", "[", "typing", ".", "Sequence", "]", ":", "# print(f\"cascade {master_item}\")", "# this horrible little hack ensures that computation changed messages are delayed until the end of the cascade", "# delete; otherwise there are cases where dependencies can be reestablished during the changed messages while", "# this method is partially finished. ugh. see test_computation_deletes_when_source_cycle_deletes.", "if", "self", ".", "__computation_changed_delay_list", "is", "None", ":", "computation_changed_delay_list", "=", "list", "(", ")", "self", ".", "__computation_changed_delay_list", "=", "computation_changed_delay_list", "else", ":", "computation_changed_delay_list", "=", "None", "undelete_log", "=", "list", "(", ")", "try", ":", "items", "=", "list", "(", ")", "dependencies", "=", "list", "(", ")", "self", ".", "__build_cascade", "(", "master_item", ",", "items", ",", "dependencies", ")", "cascaded", "=", "True", "while", "cascaded", ":", "cascaded", "=", "False", "# adjust computation bookkeeping to remove deleted items, then delete unused computations", "items_set", "=", "set", "(", "items", ")", "for", "computation", "in", "copy", ".", "copy", "(", "self", ".", "computations", ")", ":", "output_deleted", "=", "master_item", "in", "computation", ".", "_outputs", "computation", ".", "_inputs", "-=", "items_set", "computation", ".", "_outputs", "-=", "items_set", "if", "computation", "not", "in", "items", "and", "computation", "!=", "self", ".", "__current_computation", ":", "# computations are auto deleted if all inputs are deleted or any output is deleted", "if", "output_deleted", "or", "all", "(", "input", "in", "items", "for", "input", "in", "computation", ".", "_inputs", ")", ":", "self", ".", "__build_cascade", "(", "computation", ",", "items", ",", "dependencies", ")", "cascaded", "=", "True", "# print(list(reversed(items)))", "# print(list(reversed(dependencies)))", "for", "source", ",", "target", "in", "reversed", "(", "dependencies", ")", ":", "self", ".", "__remove_dependency", "(", "source", ",", "target", ")", "# now delete the actual items", "for", "item", "in", "reversed", "(", "items", ")", ":", "for", "computation", "in", "self", ".", "computations", ":", "new_entries", "=", "computation", ".", "list_item_removed", "(", "item", ")", "undelete_log", ".", "extend", "(", "new_entries", ")", "container", "=", "item", ".", "container", "if", "isinstance", "(", "item", ",", "DataItem", ".", "DataItem", ")", ":", "name", "=", "\"data_items\"", "elif", "isinstance", "(", "item", ",", "DisplayItem", ".", "DisplayItem", ")", ":", "name", "=", "\"display_items\"", "elif", "isinstance", "(", "item", ",", "Graphics", ".", "Graphic", ")", ":", "name", "=", "\"graphics\"", "elif", "isinstance", "(", "item", ",", "DataStructure", ".", "DataStructure", ")", ":", "name", "=", "\"data_structures\"", "elif", "isinstance", "(", "item", ",", "Symbolic", ".", "Computation", ")", ":", "name", "=", "\"computations\"", "elif", "isinstance", "(", "item", ",", "Connection", ".", "Connection", ")", ":", "name", "=", "\"connections\"", "elif", "isinstance", "(", "item", ",", "DisplayItem", ".", "DisplayDataChannel", ")", ":", "name", "=", "\"display_data_channels\"", "else", ":", "name", "=", "None", "assert", "False", ",", "\"Unable to cascade delete type \"", "+", "str", "(", "type", "(", "item", ")", ")", "assert", "name", "# print(container, name, item)", "if", "container", "is", "self", "and", "name", "==", "\"data_items\"", ":", "# call the version of __remove_data_item that doesn't cascade again", "index", "=", "getattr", "(", "container", ",", "name", ")", ".", "index", "(", "item", ")", "item_dict", "=", "item", ".", "write_to_dict", "(", ")", "# NOTE: __remove_data_item will notify_remove_item", "undelete_log", ".", "extend", "(", "self", ".", "__remove_data_item", "(", "item", ",", "safe", "=", "safe", ")", ")", "undelete_log", ".", "append", "(", "{", "\"type\"", ":", "name", ",", "\"index\"", ":", "index", ",", "\"properties\"", ":", "item_dict", "}", ")", "elif", "container", "is", "self", "and", "name", "==", "\"display_items\"", ":", "# call the version of __remove_data_item that doesn't cascade again", "index", "=", "getattr", "(", "container", ",", "name", ")", ".", "index", "(", "item", ")", "item_dict", "=", "item", ".", "write_to_dict", "(", ")", "# NOTE: __remove_display_item will notify_remove_item", "undelete_log", ".", "extend", "(", "self", ".", "__remove_display_item", "(", "item", ",", "safe", "=", "safe", ")", ")", "undelete_log", ".", "append", "(", "{", "\"type\"", ":", "name", ",", "\"index\"", ":", "index", ",", "\"properties\"", ":", "item_dict", "}", ")", "elif", "container", ":", "container_ref", "=", "str", "(", "container", ".", "uuid", ")", "index", "=", "getattr", "(", "container", ",", "name", ")", ".", "index", "(", "item", ")", "item_dict", "=", "item", ".", "write_to_dict", "(", ")", "container_properties", "=", "container", ".", "save_properties", "(", ")", "if", "hasattr", "(", "container", ",", "\"save_properties\"", ")", "else", "dict", "(", ")", "undelete_log", ".", "append", "(", "{", "\"type\"", ":", "name", ",", "\"container\"", ":", "container_ref", ",", "\"index\"", ":", "index", ",", "\"properties\"", ":", "item_dict", ",", "\"container_properties\"", ":", "container_properties", "}", ")", "container", ".", "remove_item", "(", "name", ",", "item", ")", "# handle top level 'remove item' notifications for data structures, computations, and display items here", "# since they're not handled elsewhere.", "if", "container", "==", "self", "and", "name", "in", "(", "\"data_structures\"", ",", "\"computations\"", ")", ":", "self", ".", "notify_remove_item", "(", "name", ",", "item", ",", "index", ")", "except", "Exception", "as", "e", ":", "import", "sys", ",", "traceback", "traceback", ".", "print_exc", "(", ")", "traceback", ".", "format_exception", "(", "*", "sys", ".", "exc_info", "(", ")", ")", "finally", ":", "# check whether this call of __cascade_delete is the top level one that will finish the computation", "# changed messages.", "if", "computation_changed_delay_list", "is", "not", "None", ":", "self", ".", "__finish_computation_changed", "(", ")", "return", "undelete_log" ]
Cascade delete an item. Returns an undelete log that can be used to undo the cascade deletion. Builds a cascade of items to be deleted and dependencies to be removed when the passed item is deleted. Then removes computations that are no longer valid. Removing a computation may result in more deletions, so the process is repeated until nothing more gets removed. Next remove dependencies. Next remove individual items (from the most distant from the root item to the root item).
[ "Cascade", "delete", "an", "item", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/DocumentModel.py#L852-L956
train
nion-software/nionswift
nion/swift/model/DocumentModel.py
DocumentModel.get_dependent_items
def get_dependent_items(self, item) -> typing.List: """Return the list of data items containing data that directly depends on data in this item.""" with self.__dependency_tree_lock: return copy.copy(self.__dependency_tree_source_to_target_map.get(weakref.ref(item), list()))
python
def get_dependent_items(self, item) -> typing.List: """Return the list of data items containing data that directly depends on data in this item.""" with self.__dependency_tree_lock: return copy.copy(self.__dependency_tree_source_to_target_map.get(weakref.ref(item), list()))
[ "def", "get_dependent_items", "(", "self", ",", "item", ")", "->", "typing", ".", "List", ":", "with", "self", ".", "__dependency_tree_lock", ":", "return", "copy", ".", "copy", "(", "self", ".", "__dependency_tree_source_to_target_map", ".", "get", "(", "weakref", ".", "ref", "(", "item", ")", ",", "list", "(", ")", ")", ")" ]
Return the list of data items containing data that directly depends on data in this item.
[ "Return", "the", "list", "of", "data", "items", "containing", "data", "that", "directly", "depends", "on", "data", "in", "this", "item", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/DocumentModel.py#L1151-L1154
train
nion-software/nionswift
nion/swift/model/DocumentModel.py
DocumentModel.__get_deep_dependent_item_set
def __get_deep_dependent_item_set(self, item, item_set) -> None: """Return the list of data items containing data that directly depends on data in this item.""" if not item in item_set: item_set.add(item) with self.__dependency_tree_lock: for dependent in self.get_dependent_items(item): self.__get_deep_dependent_item_set(dependent, item_set)
python
def __get_deep_dependent_item_set(self, item, item_set) -> None: """Return the list of data items containing data that directly depends on data in this item.""" if not item in item_set: item_set.add(item) with self.__dependency_tree_lock: for dependent in self.get_dependent_items(item): self.__get_deep_dependent_item_set(dependent, item_set)
[ "def", "__get_deep_dependent_item_set", "(", "self", ",", "item", ",", "item_set", ")", "->", "None", ":", "if", "not", "item", "in", "item_set", ":", "item_set", ".", "add", "(", "item", ")", "with", "self", ".", "__dependency_tree_lock", ":", "for", "dependent", "in", "self", ".", "get_dependent_items", "(", "item", ")", ":", "self", ".", "__get_deep_dependent_item_set", "(", "dependent", ",", "item_set", ")" ]
Return the list of data items containing data that directly depends on data in this item.
[ "Return", "the", "list", "of", "data", "items", "containing", "data", "that", "directly", "depends", "on", "data", "in", "this", "item", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/DocumentModel.py#L1156-L1162
train
nion-software/nionswift
nion/swift/model/DocumentModel.py
DocumentModel.get_dependent_data_items
def get_dependent_data_items(self, data_item: DataItem.DataItem) -> typing.List[DataItem.DataItem]: """Return the list of data items containing data that directly depends on data in this item.""" with self.__dependency_tree_lock: return [data_item for data_item in self.__dependency_tree_source_to_target_map.get(weakref.ref(data_item), list()) if isinstance(data_item, DataItem.DataItem)]
python
def get_dependent_data_items(self, data_item: DataItem.DataItem) -> typing.List[DataItem.DataItem]: """Return the list of data items containing data that directly depends on data in this item.""" with self.__dependency_tree_lock: return [data_item for data_item in self.__dependency_tree_source_to_target_map.get(weakref.ref(data_item), list()) if isinstance(data_item, DataItem.DataItem)]
[ "def", "get_dependent_data_items", "(", "self", ",", "data_item", ":", "DataItem", ".", "DataItem", ")", "->", "typing", ".", "List", "[", "DataItem", ".", "DataItem", "]", ":", "with", "self", ".", "__dependency_tree_lock", ":", "return", "[", "data_item", "for", "data_item", "in", "self", ".", "__dependency_tree_source_to_target_map", ".", "get", "(", "weakref", ".", "ref", "(", "data_item", ")", ",", "list", "(", ")", ")", "if", "isinstance", "(", "data_item", ",", "DataItem", ".", "DataItem", ")", "]" ]
Return the list of data items containing data that directly depends on data in this item.
[ "Return", "the", "list", "of", "data", "items", "containing", "data", "that", "directly", "depends", "on", "data", "in", "this", "item", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/DocumentModel.py#L1168-L1171
train
nion-software/nionswift
nion/swift/model/DocumentModel.py
DocumentModel.transaction_context
def transaction_context(self): """Return a context object for a document-wide transaction.""" class DocumentModelTransaction: def __init__(self, document_model): self.__document_model = document_model def __enter__(self): self.__document_model.persistent_object_context.enter_write_delay(self.__document_model) return self def __exit__(self, type, value, traceback): self.__document_model.persistent_object_context.exit_write_delay(self.__document_model) self.__document_model.persistent_object_context.rewrite_item(self.__document_model) return DocumentModelTransaction(self)
python
def transaction_context(self): """Return a context object for a document-wide transaction.""" class DocumentModelTransaction: def __init__(self, document_model): self.__document_model = document_model def __enter__(self): self.__document_model.persistent_object_context.enter_write_delay(self.__document_model) return self def __exit__(self, type, value, traceback): self.__document_model.persistent_object_context.exit_write_delay(self.__document_model) self.__document_model.persistent_object_context.rewrite_item(self.__document_model) return DocumentModelTransaction(self)
[ "def", "transaction_context", "(", "self", ")", ":", "class", "DocumentModelTransaction", ":", "def", "__init__", "(", "self", ",", "document_model", ")", ":", "self", ".", "__document_model", "=", "document_model", "def", "__enter__", "(", "self", ")", ":", "self", ".", "__document_model", ".", "persistent_object_context", ".", "enter_write_delay", "(", "self", ".", "__document_model", ")", "return", "self", "def", "__exit__", "(", "self", ",", "type", ",", "value", ",", "traceback", ")", ":", "self", ".", "__document_model", ".", "persistent_object_context", ".", "exit_write_delay", "(", "self", ".", "__document_model", ")", "self", ".", "__document_model", ".", "persistent_object_context", ".", "rewrite_item", "(", "self", ".", "__document_model", ")", "return", "DocumentModelTransaction", "(", "self", ")" ]
Return a context object for a document-wide transaction.
[ "Return", "a", "context", "object", "for", "a", "document", "-", "wide", "transaction", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/DocumentModel.py#L1195-L1209
train
nion-software/nionswift
nion/swift/model/DocumentModel.py
DocumentModel.data_item_live
def data_item_live(self, data_item): """ Return a context manager to put the data item in a 'live state'. """ class LiveContextManager: def __init__(self, manager, object): self.__manager = manager self.__object = object def __enter__(self): self.__manager.begin_data_item_live(self.__object) return self def __exit__(self, type, value, traceback): self.__manager.end_data_item_live(self.__object) return LiveContextManager(self, data_item)
python
def data_item_live(self, data_item): """ Return a context manager to put the data item in a 'live state'. """ class LiveContextManager: def __init__(self, manager, object): self.__manager = manager self.__object = object def __enter__(self): self.__manager.begin_data_item_live(self.__object) return self def __exit__(self, type, value, traceback): self.__manager.end_data_item_live(self.__object) return LiveContextManager(self, data_item)
[ "def", "data_item_live", "(", "self", ",", "data_item", ")", ":", "class", "LiveContextManager", ":", "def", "__init__", "(", "self", ",", "manager", ",", "object", ")", ":", "self", ".", "__manager", "=", "manager", "self", ".", "__object", "=", "object", "def", "__enter__", "(", "self", ")", ":", "self", ".", "__manager", ".", "begin_data_item_live", "(", "self", ".", "__object", ")", "return", "self", "def", "__exit__", "(", "self", ",", "type", ",", "value", ",", "traceback", ")", ":", "self", ".", "__manager", ".", "end_data_item_live", "(", "self", ".", "__object", ")", "return", "LiveContextManager", "(", "self", ",", "data_item", ")" ]
Return a context manager to put the data item in a 'live state'.
[ "Return", "a", "context", "manager", "to", "put", "the", "data", "item", "in", "a", "live", "state", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/DocumentModel.py#L1227-L1238
train
nion-software/nionswift
nion/swift/model/DocumentModel.py
DocumentModel.begin_data_item_live
def begin_data_item_live(self, data_item): """Begins a live state for the data item. The live state is propagated to dependent data items. This method is thread safe. See slow_test_dependent_data_item_removed_while_live_data_item_becomes_unlive. """ with self.__live_data_items_lock: old_live_count = self.__live_data_items.get(data_item.uuid, 0) self.__live_data_items[data_item.uuid] = old_live_count + 1 if old_live_count == 0: data_item._enter_live_state() for dependent_data_item in self.get_dependent_data_items(data_item): self.begin_data_item_live(dependent_data_item)
python
def begin_data_item_live(self, data_item): """Begins a live state for the data item. The live state is propagated to dependent data items. This method is thread safe. See slow_test_dependent_data_item_removed_while_live_data_item_becomes_unlive. """ with self.__live_data_items_lock: old_live_count = self.__live_data_items.get(data_item.uuid, 0) self.__live_data_items[data_item.uuid] = old_live_count + 1 if old_live_count == 0: data_item._enter_live_state() for dependent_data_item in self.get_dependent_data_items(data_item): self.begin_data_item_live(dependent_data_item)
[ "def", "begin_data_item_live", "(", "self", ",", "data_item", ")", ":", "with", "self", ".", "__live_data_items_lock", ":", "old_live_count", "=", "self", ".", "__live_data_items", ".", "get", "(", "data_item", ".", "uuid", ",", "0", ")", "self", ".", "__live_data_items", "[", "data_item", ".", "uuid", "]", "=", "old_live_count", "+", "1", "if", "old_live_count", "==", "0", ":", "data_item", ".", "_enter_live_state", "(", ")", "for", "dependent_data_item", "in", "self", ".", "get_dependent_data_items", "(", "data_item", ")", ":", "self", ".", "begin_data_item_live", "(", "dependent_data_item", ")" ]
Begins a live state for the data item. The live state is propagated to dependent data items. This method is thread safe. See slow_test_dependent_data_item_removed_while_live_data_item_becomes_unlive.
[ "Begins", "a", "live", "state", "for", "the", "data", "item", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/DocumentModel.py#L1240-L1253
train
nion-software/nionswift
nion/swift/model/DocumentModel.py
DocumentModel.end_data_item_live
def end_data_item_live(self, data_item): """Ends a live state for the data item. The live-ness property is propagated to dependent data items, similar to the transactions. This method is thread safe. """ with self.__live_data_items_lock: live_count = self.__live_data_items.get(data_item.uuid, 0) - 1 assert live_count >= 0 self.__live_data_items[data_item.uuid] = live_count if live_count == 0: data_item._exit_live_state() for dependent_data_item in self.get_dependent_data_items(data_item): self.end_data_item_live(dependent_data_item)
python
def end_data_item_live(self, data_item): """Ends a live state for the data item. The live-ness property is propagated to dependent data items, similar to the transactions. This method is thread safe. """ with self.__live_data_items_lock: live_count = self.__live_data_items.get(data_item.uuid, 0) - 1 assert live_count >= 0 self.__live_data_items[data_item.uuid] = live_count if live_count == 0: data_item._exit_live_state() for dependent_data_item in self.get_dependent_data_items(data_item): self.end_data_item_live(dependent_data_item)
[ "def", "end_data_item_live", "(", "self", ",", "data_item", ")", ":", "with", "self", ".", "__live_data_items_lock", ":", "live_count", "=", "self", ".", "__live_data_items", ".", "get", "(", "data_item", ".", "uuid", ",", "0", ")", "-", "1", "assert", "live_count", ">=", "0", "self", ".", "__live_data_items", "[", "data_item", ".", "uuid", "]", "=", "live_count", "if", "live_count", "==", "0", ":", "data_item", ".", "_exit_live_state", "(", ")", "for", "dependent_data_item", "in", "self", ".", "get_dependent_data_items", "(", "data_item", ")", ":", "self", ".", "end_data_item_live", "(", "dependent_data_item", ")" ]
Ends a live state for the data item. The live-ness property is propagated to dependent data items, similar to the transactions. This method is thread safe.
[ "Ends", "a", "live", "state", "for", "the", "data", "item", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/DocumentModel.py#L1255-L1269
train
nion-software/nionswift
nion/swift/model/DocumentModel.py
DocumentModel.__construct_data_item_reference
def __construct_data_item_reference(self, hardware_source: HardwareSource.HardwareSource, data_channel: HardwareSource.DataChannel): """Construct a data item reference. Construct a data item reference and assign a data item to it. Update data item session id and session metadata. Also connect the data channel processor. This method is thread safe. """ session_id = self.session_id key = self.make_data_item_reference_key(hardware_source.hardware_source_id, data_channel.channel_id) data_item_reference = self.get_data_item_reference(key) with data_item_reference.mutex: data_item = data_item_reference.data_item # if we still don't have a data item, create it. if data_item is None: data_item = DataItem.DataItem() data_item.ensure_data_source() data_item.title = "%s (%s)" % (hardware_source.display_name, data_channel.name) if data_channel.name else hardware_source.display_name data_item.category = "temporary" data_item_reference.data_item = data_item def append_data_item(): self.append_data_item(data_item) self._update_data_item_reference(key, data_item) self.__call_soon(append_data_item) def update_session(): # update the session, but only if necessary (this is an optimization to prevent unnecessary display updates) if data_item.session_id != session_id: data_item.session_id = session_id session_metadata = ApplicationData.get_session_metadata_dict() if data_item.session_metadata != session_metadata: data_item.session_metadata = session_metadata if data_channel.processor: src_data_channel = hardware_source.data_channels[data_channel.src_channel_index] src_data_item_reference = self.get_data_item_reference(self.make_data_item_reference_key(hardware_source.hardware_source_id, src_data_channel.channel_id)) data_channel.processor.connect_data_item_reference(src_data_item_reference) self.__call_soon(update_session) return data_item_reference
python
def __construct_data_item_reference(self, hardware_source: HardwareSource.HardwareSource, data_channel: HardwareSource.DataChannel): """Construct a data item reference. Construct a data item reference and assign a data item to it. Update data item session id and session metadata. Also connect the data channel processor. This method is thread safe. """ session_id = self.session_id key = self.make_data_item_reference_key(hardware_source.hardware_source_id, data_channel.channel_id) data_item_reference = self.get_data_item_reference(key) with data_item_reference.mutex: data_item = data_item_reference.data_item # if we still don't have a data item, create it. if data_item is None: data_item = DataItem.DataItem() data_item.ensure_data_source() data_item.title = "%s (%s)" % (hardware_source.display_name, data_channel.name) if data_channel.name else hardware_source.display_name data_item.category = "temporary" data_item_reference.data_item = data_item def append_data_item(): self.append_data_item(data_item) self._update_data_item_reference(key, data_item) self.__call_soon(append_data_item) def update_session(): # update the session, but only if necessary (this is an optimization to prevent unnecessary display updates) if data_item.session_id != session_id: data_item.session_id = session_id session_metadata = ApplicationData.get_session_metadata_dict() if data_item.session_metadata != session_metadata: data_item.session_metadata = session_metadata if data_channel.processor: src_data_channel = hardware_source.data_channels[data_channel.src_channel_index] src_data_item_reference = self.get_data_item_reference(self.make_data_item_reference_key(hardware_source.hardware_source_id, src_data_channel.channel_id)) data_channel.processor.connect_data_item_reference(src_data_item_reference) self.__call_soon(update_session) return data_item_reference
[ "def", "__construct_data_item_reference", "(", "self", ",", "hardware_source", ":", "HardwareSource", ".", "HardwareSource", ",", "data_channel", ":", "HardwareSource", ".", "DataChannel", ")", ":", "session_id", "=", "self", ".", "session_id", "key", "=", "self", ".", "make_data_item_reference_key", "(", "hardware_source", ".", "hardware_source_id", ",", "data_channel", ".", "channel_id", ")", "data_item_reference", "=", "self", ".", "get_data_item_reference", "(", "key", ")", "with", "data_item_reference", ".", "mutex", ":", "data_item", "=", "data_item_reference", ".", "data_item", "# if we still don't have a data item, create it.", "if", "data_item", "is", "None", ":", "data_item", "=", "DataItem", ".", "DataItem", "(", ")", "data_item", ".", "ensure_data_source", "(", ")", "data_item", ".", "title", "=", "\"%s (%s)\"", "%", "(", "hardware_source", ".", "display_name", ",", "data_channel", ".", "name", ")", "if", "data_channel", ".", "name", "else", "hardware_source", ".", "display_name", "data_item", ".", "category", "=", "\"temporary\"", "data_item_reference", ".", "data_item", "=", "data_item", "def", "append_data_item", "(", ")", ":", "self", ".", "append_data_item", "(", "data_item", ")", "self", ".", "_update_data_item_reference", "(", "key", ",", "data_item", ")", "self", ".", "__call_soon", "(", "append_data_item", ")", "def", "update_session", "(", ")", ":", "# update the session, but only if necessary (this is an optimization to prevent unnecessary display updates)", "if", "data_item", ".", "session_id", "!=", "session_id", ":", "data_item", ".", "session_id", "=", "session_id", "session_metadata", "=", "ApplicationData", ".", "get_session_metadata_dict", "(", ")", "if", "data_item", ".", "session_metadata", "!=", "session_metadata", ":", "data_item", ".", "session_metadata", "=", "session_metadata", "if", "data_channel", ".", "processor", ":", "src_data_channel", "=", "hardware_source", ".", "data_channels", "[", "data_channel", ".", "src_channel_index", "]", "src_data_item_reference", "=", "self", ".", "get_data_item_reference", "(", "self", ".", "make_data_item_reference_key", "(", "hardware_source", ".", "hardware_source_id", ",", "src_data_channel", ".", "channel_id", ")", ")", "data_channel", ".", "processor", ".", "connect_data_item_reference", "(", "src_data_item_reference", ")", "self", ".", "__call_soon", "(", "update_session", ")", "return", "data_item_reference" ]
Construct a data item reference. Construct a data item reference and assign a data item to it. Update data item session id and session metadata. Also connect the data channel processor. This method is thread safe.
[ "Construct", "a", "data", "item", "reference", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/DocumentModel.py#L2072-L2113
train
nion-software/nionswift
nion/swift/model/DocumentModel.py
DocumentModel.__make_computation
def __make_computation(self, processing_id: str, inputs: typing.List[typing.Tuple[DisplayItem.DisplayItem, typing.Optional[Graphics.Graphic]]], region_list_map: typing.Mapping[str, typing.List[Graphics.Graphic]]=None, parameters: typing.Mapping[str, typing.Any]=None) -> DataItem.DataItem: """Create a new data item with computation specified by processing_id, inputs, and region_list_map. The region_list_map associates a list of graphics corresponding to the required regions with a computation source (key). """ region_list_map = region_list_map or dict() parameters = parameters or dict() processing_descriptions = self._processing_descriptions processing_description = processing_descriptions[processing_id] # first process the sources in the description. match them to the inputs (which are data item/crop graphic tuples) src_dicts = processing_description.get("sources", list()) assert len(inputs) == len(src_dicts) src_names = list() src_texts = list() src_labels = list() regions = list() region_map = dict() for i, (src_dict, input) in enumerate(zip(src_dicts, inputs)): display_item = input[0] data_item = display_item.data_items[0] if display_item and len(display_item.data_items) > 0 else None if not data_item: return None # each source can have a list of requirements, check through them requirements = src_dict.get("requirements", list()) for requirement in requirements: requirement_type = requirement["type"] if requirement_type == "dimensionality": min_dimension = requirement.get("min") max_dimension = requirement.get("max") dimensionality = len(data_item.dimensional_shape) if min_dimension is not None and dimensionality < min_dimension: return None if max_dimension is not None and dimensionality > max_dimension: return None if requirement_type == "is_sequence": if not data_item.is_sequence: return None src_name = src_dict["name"] src_label = src_dict["label"] use_display_data = src_dict.get("use_display_data", True) xdata_property = "display_xdata" if use_display_data else "xdata" if src_dict.get("croppable"): xdata_property = "cropped_" + xdata_property elif src_dict.get("use_filtered_data", False): xdata_property = "filtered_" + xdata_property src_text = "{}.{}".format(src_name, xdata_property) src_names.append(src_name) src_texts.append(src_text) src_labels.append(src_label) # each source can have a list of regions to be matched to arguments or created on the source region_dict_list = src_dict.get("regions", list()) src_region_list = region_list_map.get(src_name, list()) assert len(region_dict_list) == len(src_region_list) for region_dict, region in zip(region_dict_list, src_region_list): region_params = region_dict.get("params", dict()) region_type = region_dict["type"] region_name = region_dict["name"] region_label = region_params.get("label") if region_type == "point": if region: assert isinstance(region, Graphics.PointGraphic) point_region = region else: point_region = Graphics.PointGraphic() for k, v in region_params.items(): setattr(point_region, k, v) if display_item: display_item.add_graphic(point_region) regions.append((region_name, point_region, region_label)) region_map[region_name] = point_region elif region_type == "line": if region: assert isinstance(region, Graphics.LineProfileGraphic) line_region = region else: line_region = Graphics.LineProfileGraphic() line_region.start = 0.25, 0.25 line_region.end = 0.75, 0.75 for k, v in region_params.items(): setattr(line_region, k, v) if display_item: display_item.add_graphic(line_region) regions.append((region_name, line_region, region_params.get("label"))) region_map[region_name] = line_region elif region_type == "rectangle": if region: assert isinstance(region, Graphics.RectangleGraphic) rect_region = region else: rect_region = Graphics.RectangleGraphic() rect_region.center = 0.5, 0.5 rect_region.size = 0.5, 0.5 for k, v in region_params.items(): setattr(rect_region, k, v) if display_item: display_item.add_graphic(rect_region) regions.append((region_name, rect_region, region_params.get("label"))) region_map[region_name] = rect_region elif region_type == "ellipse": if region: assert isinstance(region, Graphics.EllipseGraphic) ellipse_region = region else: ellipse_region = Graphics.RectangleGraphic() ellipse_region.center = 0.5, 0.5 ellipse_region.size = 0.5, 0.5 for k, v in region_params.items(): setattr(ellipse_region, k, v) if display_item: display_item.add_graphic(ellipse_region) regions.append((region_name, ellipse_region, region_params.get("label"))) region_map[region_name] = ellipse_region elif region_type == "spot": if region: assert isinstance(region, Graphics.SpotGraphic) spot_region = region else: spot_region = Graphics.SpotGraphic() spot_region.center = 0.25, 0.75 spot_region.size = 0.1, 0.1 for k, v in region_params.items(): setattr(spot_region, k, v) if display_item: display_item.add_graphic(spot_region) regions.append((region_name, spot_region, region_params.get("label"))) region_map[region_name] = spot_region elif region_type == "interval": if region: assert isinstance(region, Graphics.IntervalGraphic) interval_region = region else: interval_region = Graphics.IntervalGraphic() for k, v in region_params.items(): setattr(interval_region, k, v) if display_item: display_item.add_graphic(interval_region) regions.append((region_name, interval_region, region_params.get("label"))) region_map[region_name] = interval_region elif region_type == "channel": if region: assert isinstance(region, Graphics.ChannelGraphic) channel_region = region else: channel_region = Graphics.ChannelGraphic() for k, v in region_params.items(): setattr(channel_region, k, v) if display_item: display_item.add_graphic(channel_region) regions.append((region_name, channel_region, region_params.get("label"))) region_map[region_name] = channel_region # now extract the script (full script) or expression (implied imports and return statement) script = processing_description.get("script") if not script: expression = processing_description.get("expression") if expression: script = Symbolic.xdata_expression(expression) assert script # construct the computation script = script.format(**dict(zip(src_names, src_texts))) computation = self.create_computation(script) computation.label = processing_description["title"] computation.processing_id = processing_id # process the data item inputs for src_dict, src_name, src_label, input in zip(src_dicts, src_names, src_labels, inputs): in_display_item = input[0] secondary_specifier = None if src_dict.get("croppable", False): secondary_specifier = self.get_object_specifier(input[1]) display_data_channel = in_display_item.display_data_channel computation.create_object(src_name, self.get_object_specifier(display_data_channel), label=src_label, secondary_specifier=secondary_specifier) # process the regions for region_name, region, region_label in regions: computation.create_object(region_name, self.get_object_specifier(region), label=region_label) # next process the parameters for param_dict in processing_description.get("parameters", list()): parameter_value = parameters.get(param_dict["name"], param_dict["value"]) computation.create_variable(param_dict["name"], param_dict["type"], parameter_value, value_default=param_dict.get("value_default"), value_min=param_dict.get("value_min"), value_max=param_dict.get("value_max"), control_type=param_dict.get("control_type"), label=param_dict["label"]) data_item0 = inputs[0][0].data_items[0] new_data_item = DataItem.new_data_item() prefix = "{} of ".format(processing_description["title"]) new_data_item.title = prefix + data_item0.title new_data_item.category = data_item0.category self.append_data_item(new_data_item) new_display_item = self.get_display_item_for_data_item(new_data_item) # next come the output regions that get created on the target itself new_regions = dict() for out_region_dict in processing_description.get("out_regions", list()): region_type = out_region_dict["type"] region_name = out_region_dict["name"] region_params = out_region_dict.get("params", dict()) if region_type == "interval": interval_region = Graphics.IntervalGraphic() for k, v in region_params.items(): setattr(interval_region, k, v) new_display_item.add_graphic(interval_region) new_regions[region_name] = interval_region # now come the connections between the source and target for connection_dict in processing_description.get("connections", list()): connection_type = connection_dict["type"] connection_src = connection_dict["src"] connection_src_prop = connection_dict.get("src_prop") connection_dst = connection_dict["dst"] connection_dst_prop = connection_dict.get("dst_prop") if connection_type == "property": if connection_src == "display_data_channel": # TODO: how to refer to the data_items? hardcode to data_item0 for now. display_item0 = self.get_display_item_for_data_item(data_item0) display_data_channel0 = display_item0.display_data_channel if display_item0 else None connection = Connection.PropertyConnection(display_data_channel0, connection_src_prop, new_regions[connection_dst], connection_dst_prop, parent=new_data_item) self.append_connection(connection) elif connection_type == "interval_list": connection = Connection.IntervalListConnection(new_display_item, region_map[connection_dst], parent=new_data_item) self.append_connection(connection) # save setting the computation until last to work around threaded clone/merge operation bug. # the bug is that setting the computation triggers the recompute to occur on a thread. # the recompute clones the data item and runs the operation. meanwhile this thread # updates the connection. now the recompute finishes and merges back the data item # which was cloned before the connection was established, effectively reversing the # update that matched the graphic interval to the slice interval on the display. # the result is that the slice interval on the display would get set to the default # value of the graphic interval. so don't actually update the computation until after # everything is configured. permanent solution would be to improve the clone/merge to # only update data that had been changed. alternative implementation would only track # changes to the data item and then apply them again to the original during merge. self.set_data_item_computation(new_data_item, computation) return new_data_item
python
def __make_computation(self, processing_id: str, inputs: typing.List[typing.Tuple[DisplayItem.DisplayItem, typing.Optional[Graphics.Graphic]]], region_list_map: typing.Mapping[str, typing.List[Graphics.Graphic]]=None, parameters: typing.Mapping[str, typing.Any]=None) -> DataItem.DataItem: """Create a new data item with computation specified by processing_id, inputs, and region_list_map. The region_list_map associates a list of graphics corresponding to the required regions with a computation source (key). """ region_list_map = region_list_map or dict() parameters = parameters or dict() processing_descriptions = self._processing_descriptions processing_description = processing_descriptions[processing_id] # first process the sources in the description. match them to the inputs (which are data item/crop graphic tuples) src_dicts = processing_description.get("sources", list()) assert len(inputs) == len(src_dicts) src_names = list() src_texts = list() src_labels = list() regions = list() region_map = dict() for i, (src_dict, input) in enumerate(zip(src_dicts, inputs)): display_item = input[0] data_item = display_item.data_items[0] if display_item and len(display_item.data_items) > 0 else None if not data_item: return None # each source can have a list of requirements, check through them requirements = src_dict.get("requirements", list()) for requirement in requirements: requirement_type = requirement["type"] if requirement_type == "dimensionality": min_dimension = requirement.get("min") max_dimension = requirement.get("max") dimensionality = len(data_item.dimensional_shape) if min_dimension is not None and dimensionality < min_dimension: return None if max_dimension is not None and dimensionality > max_dimension: return None if requirement_type == "is_sequence": if not data_item.is_sequence: return None src_name = src_dict["name"] src_label = src_dict["label"] use_display_data = src_dict.get("use_display_data", True) xdata_property = "display_xdata" if use_display_data else "xdata" if src_dict.get("croppable"): xdata_property = "cropped_" + xdata_property elif src_dict.get("use_filtered_data", False): xdata_property = "filtered_" + xdata_property src_text = "{}.{}".format(src_name, xdata_property) src_names.append(src_name) src_texts.append(src_text) src_labels.append(src_label) # each source can have a list of regions to be matched to arguments or created on the source region_dict_list = src_dict.get("regions", list()) src_region_list = region_list_map.get(src_name, list()) assert len(region_dict_list) == len(src_region_list) for region_dict, region in zip(region_dict_list, src_region_list): region_params = region_dict.get("params", dict()) region_type = region_dict["type"] region_name = region_dict["name"] region_label = region_params.get("label") if region_type == "point": if region: assert isinstance(region, Graphics.PointGraphic) point_region = region else: point_region = Graphics.PointGraphic() for k, v in region_params.items(): setattr(point_region, k, v) if display_item: display_item.add_graphic(point_region) regions.append((region_name, point_region, region_label)) region_map[region_name] = point_region elif region_type == "line": if region: assert isinstance(region, Graphics.LineProfileGraphic) line_region = region else: line_region = Graphics.LineProfileGraphic() line_region.start = 0.25, 0.25 line_region.end = 0.75, 0.75 for k, v in region_params.items(): setattr(line_region, k, v) if display_item: display_item.add_graphic(line_region) regions.append((region_name, line_region, region_params.get("label"))) region_map[region_name] = line_region elif region_type == "rectangle": if region: assert isinstance(region, Graphics.RectangleGraphic) rect_region = region else: rect_region = Graphics.RectangleGraphic() rect_region.center = 0.5, 0.5 rect_region.size = 0.5, 0.5 for k, v in region_params.items(): setattr(rect_region, k, v) if display_item: display_item.add_graphic(rect_region) regions.append((region_name, rect_region, region_params.get("label"))) region_map[region_name] = rect_region elif region_type == "ellipse": if region: assert isinstance(region, Graphics.EllipseGraphic) ellipse_region = region else: ellipse_region = Graphics.RectangleGraphic() ellipse_region.center = 0.5, 0.5 ellipse_region.size = 0.5, 0.5 for k, v in region_params.items(): setattr(ellipse_region, k, v) if display_item: display_item.add_graphic(ellipse_region) regions.append((region_name, ellipse_region, region_params.get("label"))) region_map[region_name] = ellipse_region elif region_type == "spot": if region: assert isinstance(region, Graphics.SpotGraphic) spot_region = region else: spot_region = Graphics.SpotGraphic() spot_region.center = 0.25, 0.75 spot_region.size = 0.1, 0.1 for k, v in region_params.items(): setattr(spot_region, k, v) if display_item: display_item.add_graphic(spot_region) regions.append((region_name, spot_region, region_params.get("label"))) region_map[region_name] = spot_region elif region_type == "interval": if region: assert isinstance(region, Graphics.IntervalGraphic) interval_region = region else: interval_region = Graphics.IntervalGraphic() for k, v in region_params.items(): setattr(interval_region, k, v) if display_item: display_item.add_graphic(interval_region) regions.append((region_name, interval_region, region_params.get("label"))) region_map[region_name] = interval_region elif region_type == "channel": if region: assert isinstance(region, Graphics.ChannelGraphic) channel_region = region else: channel_region = Graphics.ChannelGraphic() for k, v in region_params.items(): setattr(channel_region, k, v) if display_item: display_item.add_graphic(channel_region) regions.append((region_name, channel_region, region_params.get("label"))) region_map[region_name] = channel_region # now extract the script (full script) or expression (implied imports and return statement) script = processing_description.get("script") if not script: expression = processing_description.get("expression") if expression: script = Symbolic.xdata_expression(expression) assert script # construct the computation script = script.format(**dict(zip(src_names, src_texts))) computation = self.create_computation(script) computation.label = processing_description["title"] computation.processing_id = processing_id # process the data item inputs for src_dict, src_name, src_label, input in zip(src_dicts, src_names, src_labels, inputs): in_display_item = input[0] secondary_specifier = None if src_dict.get("croppable", False): secondary_specifier = self.get_object_specifier(input[1]) display_data_channel = in_display_item.display_data_channel computation.create_object(src_name, self.get_object_specifier(display_data_channel), label=src_label, secondary_specifier=secondary_specifier) # process the regions for region_name, region, region_label in regions: computation.create_object(region_name, self.get_object_specifier(region), label=region_label) # next process the parameters for param_dict in processing_description.get("parameters", list()): parameter_value = parameters.get(param_dict["name"], param_dict["value"]) computation.create_variable(param_dict["name"], param_dict["type"], parameter_value, value_default=param_dict.get("value_default"), value_min=param_dict.get("value_min"), value_max=param_dict.get("value_max"), control_type=param_dict.get("control_type"), label=param_dict["label"]) data_item0 = inputs[0][0].data_items[0] new_data_item = DataItem.new_data_item() prefix = "{} of ".format(processing_description["title"]) new_data_item.title = prefix + data_item0.title new_data_item.category = data_item0.category self.append_data_item(new_data_item) new_display_item = self.get_display_item_for_data_item(new_data_item) # next come the output regions that get created on the target itself new_regions = dict() for out_region_dict in processing_description.get("out_regions", list()): region_type = out_region_dict["type"] region_name = out_region_dict["name"] region_params = out_region_dict.get("params", dict()) if region_type == "interval": interval_region = Graphics.IntervalGraphic() for k, v in region_params.items(): setattr(interval_region, k, v) new_display_item.add_graphic(interval_region) new_regions[region_name] = interval_region # now come the connections between the source and target for connection_dict in processing_description.get("connections", list()): connection_type = connection_dict["type"] connection_src = connection_dict["src"] connection_src_prop = connection_dict.get("src_prop") connection_dst = connection_dict["dst"] connection_dst_prop = connection_dict.get("dst_prop") if connection_type == "property": if connection_src == "display_data_channel": # TODO: how to refer to the data_items? hardcode to data_item0 for now. display_item0 = self.get_display_item_for_data_item(data_item0) display_data_channel0 = display_item0.display_data_channel if display_item0 else None connection = Connection.PropertyConnection(display_data_channel0, connection_src_prop, new_regions[connection_dst], connection_dst_prop, parent=new_data_item) self.append_connection(connection) elif connection_type == "interval_list": connection = Connection.IntervalListConnection(new_display_item, region_map[connection_dst], parent=new_data_item) self.append_connection(connection) # save setting the computation until last to work around threaded clone/merge operation bug. # the bug is that setting the computation triggers the recompute to occur on a thread. # the recompute clones the data item and runs the operation. meanwhile this thread # updates the connection. now the recompute finishes and merges back the data item # which was cloned before the connection was established, effectively reversing the # update that matched the graphic interval to the slice interval on the display. # the result is that the slice interval on the display would get set to the default # value of the graphic interval. so don't actually update the computation until after # everything is configured. permanent solution would be to improve the clone/merge to # only update data that had been changed. alternative implementation would only track # changes to the data item and then apply them again to the original during merge. self.set_data_item_computation(new_data_item, computation) return new_data_item
[ "def", "__make_computation", "(", "self", ",", "processing_id", ":", "str", ",", "inputs", ":", "typing", ".", "List", "[", "typing", ".", "Tuple", "[", "DisplayItem", ".", "DisplayItem", ",", "typing", ".", "Optional", "[", "Graphics", ".", "Graphic", "]", "]", "]", ",", "region_list_map", ":", "typing", ".", "Mapping", "[", "str", ",", "typing", ".", "List", "[", "Graphics", ".", "Graphic", "]", "]", "=", "None", ",", "parameters", ":", "typing", ".", "Mapping", "[", "str", ",", "typing", ".", "Any", "]", "=", "None", ")", "->", "DataItem", ".", "DataItem", ":", "region_list_map", "=", "region_list_map", "or", "dict", "(", ")", "parameters", "=", "parameters", "or", "dict", "(", ")", "processing_descriptions", "=", "self", ".", "_processing_descriptions", "processing_description", "=", "processing_descriptions", "[", "processing_id", "]", "# first process the sources in the description. match them to the inputs (which are data item/crop graphic tuples)", "src_dicts", "=", "processing_description", ".", "get", "(", "\"sources\"", ",", "list", "(", ")", ")", "assert", "len", "(", "inputs", ")", "==", "len", "(", "src_dicts", ")", "src_names", "=", "list", "(", ")", "src_texts", "=", "list", "(", ")", "src_labels", "=", "list", "(", ")", "regions", "=", "list", "(", ")", "region_map", "=", "dict", "(", ")", "for", "i", ",", "(", "src_dict", ",", "input", ")", "in", "enumerate", "(", "zip", "(", "src_dicts", ",", "inputs", ")", ")", ":", "display_item", "=", "input", "[", "0", "]", "data_item", "=", "display_item", ".", "data_items", "[", "0", "]", "if", "display_item", "and", "len", "(", "display_item", ".", "data_items", ")", ">", "0", "else", "None", "if", "not", "data_item", ":", "return", "None", "# each source can have a list of requirements, check through them", "requirements", "=", "src_dict", ".", "get", "(", "\"requirements\"", ",", "list", "(", ")", ")", "for", "requirement", "in", "requirements", ":", "requirement_type", "=", "requirement", "[", "\"type\"", "]", "if", "requirement_type", "==", "\"dimensionality\"", ":", "min_dimension", "=", "requirement", ".", "get", "(", "\"min\"", ")", "max_dimension", "=", "requirement", ".", "get", "(", "\"max\"", ")", "dimensionality", "=", "len", "(", "data_item", ".", "dimensional_shape", ")", "if", "min_dimension", "is", "not", "None", "and", "dimensionality", "<", "min_dimension", ":", "return", "None", "if", "max_dimension", "is", "not", "None", "and", "dimensionality", ">", "max_dimension", ":", "return", "None", "if", "requirement_type", "==", "\"is_sequence\"", ":", "if", "not", "data_item", ".", "is_sequence", ":", "return", "None", "src_name", "=", "src_dict", "[", "\"name\"", "]", "src_label", "=", "src_dict", "[", "\"label\"", "]", "use_display_data", "=", "src_dict", ".", "get", "(", "\"use_display_data\"", ",", "True", ")", "xdata_property", "=", "\"display_xdata\"", "if", "use_display_data", "else", "\"xdata\"", "if", "src_dict", ".", "get", "(", "\"croppable\"", ")", ":", "xdata_property", "=", "\"cropped_\"", "+", "xdata_property", "elif", "src_dict", ".", "get", "(", "\"use_filtered_data\"", ",", "False", ")", ":", "xdata_property", "=", "\"filtered_\"", "+", "xdata_property", "src_text", "=", "\"{}.{}\"", ".", "format", "(", "src_name", ",", "xdata_property", ")", "src_names", ".", "append", "(", "src_name", ")", "src_texts", ".", "append", "(", "src_text", ")", "src_labels", ".", "append", "(", "src_label", ")", "# each source can have a list of regions to be matched to arguments or created on the source", "region_dict_list", "=", "src_dict", ".", "get", "(", "\"regions\"", ",", "list", "(", ")", ")", "src_region_list", "=", "region_list_map", ".", "get", "(", "src_name", ",", "list", "(", ")", ")", "assert", "len", "(", "region_dict_list", ")", "==", "len", "(", "src_region_list", ")", "for", "region_dict", ",", "region", "in", "zip", "(", "region_dict_list", ",", "src_region_list", ")", ":", "region_params", "=", "region_dict", ".", "get", "(", "\"params\"", ",", "dict", "(", ")", ")", "region_type", "=", "region_dict", "[", "\"type\"", "]", "region_name", "=", "region_dict", "[", "\"name\"", "]", "region_label", "=", "region_params", ".", "get", "(", "\"label\"", ")", "if", "region_type", "==", "\"point\"", ":", "if", "region", ":", "assert", "isinstance", "(", "region", ",", "Graphics", ".", "PointGraphic", ")", "point_region", "=", "region", "else", ":", "point_region", "=", "Graphics", ".", "PointGraphic", "(", ")", "for", "k", ",", "v", "in", "region_params", ".", "items", "(", ")", ":", "setattr", "(", "point_region", ",", "k", ",", "v", ")", "if", "display_item", ":", "display_item", ".", "add_graphic", "(", "point_region", ")", "regions", ".", "append", "(", "(", "region_name", ",", "point_region", ",", "region_label", ")", ")", "region_map", "[", "region_name", "]", "=", "point_region", "elif", "region_type", "==", "\"line\"", ":", "if", "region", ":", "assert", "isinstance", "(", "region", ",", "Graphics", ".", "LineProfileGraphic", ")", "line_region", "=", "region", "else", ":", "line_region", "=", "Graphics", ".", "LineProfileGraphic", "(", ")", "line_region", ".", "start", "=", "0.25", ",", "0.25", "line_region", ".", "end", "=", "0.75", ",", "0.75", "for", "k", ",", "v", "in", "region_params", ".", "items", "(", ")", ":", "setattr", "(", "line_region", ",", "k", ",", "v", ")", "if", "display_item", ":", "display_item", ".", "add_graphic", "(", "line_region", ")", "regions", ".", "append", "(", "(", "region_name", ",", "line_region", ",", "region_params", ".", "get", "(", "\"label\"", ")", ")", ")", "region_map", "[", "region_name", "]", "=", "line_region", "elif", "region_type", "==", "\"rectangle\"", ":", "if", "region", ":", "assert", "isinstance", "(", "region", ",", "Graphics", ".", "RectangleGraphic", ")", "rect_region", "=", "region", "else", ":", "rect_region", "=", "Graphics", ".", "RectangleGraphic", "(", ")", "rect_region", ".", "center", "=", "0.5", ",", "0.5", "rect_region", ".", "size", "=", "0.5", ",", "0.5", "for", "k", ",", "v", "in", "region_params", ".", "items", "(", ")", ":", "setattr", "(", "rect_region", ",", "k", ",", "v", ")", "if", "display_item", ":", "display_item", ".", "add_graphic", "(", "rect_region", ")", "regions", ".", "append", "(", "(", "region_name", ",", "rect_region", ",", "region_params", ".", "get", "(", "\"label\"", ")", ")", ")", "region_map", "[", "region_name", "]", "=", "rect_region", "elif", "region_type", "==", "\"ellipse\"", ":", "if", "region", ":", "assert", "isinstance", "(", "region", ",", "Graphics", ".", "EllipseGraphic", ")", "ellipse_region", "=", "region", "else", ":", "ellipse_region", "=", "Graphics", ".", "RectangleGraphic", "(", ")", "ellipse_region", ".", "center", "=", "0.5", ",", "0.5", "ellipse_region", ".", "size", "=", "0.5", ",", "0.5", "for", "k", ",", "v", "in", "region_params", ".", "items", "(", ")", ":", "setattr", "(", "ellipse_region", ",", "k", ",", "v", ")", "if", "display_item", ":", "display_item", ".", "add_graphic", "(", "ellipse_region", ")", "regions", ".", "append", "(", "(", "region_name", ",", "ellipse_region", ",", "region_params", ".", "get", "(", "\"label\"", ")", ")", ")", "region_map", "[", "region_name", "]", "=", "ellipse_region", "elif", "region_type", "==", "\"spot\"", ":", "if", "region", ":", "assert", "isinstance", "(", "region", ",", "Graphics", ".", "SpotGraphic", ")", "spot_region", "=", "region", "else", ":", "spot_region", "=", "Graphics", ".", "SpotGraphic", "(", ")", "spot_region", ".", "center", "=", "0.25", ",", "0.75", "spot_region", ".", "size", "=", "0.1", ",", "0.1", "for", "k", ",", "v", "in", "region_params", ".", "items", "(", ")", ":", "setattr", "(", "spot_region", ",", "k", ",", "v", ")", "if", "display_item", ":", "display_item", ".", "add_graphic", "(", "spot_region", ")", "regions", ".", "append", "(", "(", "region_name", ",", "spot_region", ",", "region_params", ".", "get", "(", "\"label\"", ")", ")", ")", "region_map", "[", "region_name", "]", "=", "spot_region", "elif", "region_type", "==", "\"interval\"", ":", "if", "region", ":", "assert", "isinstance", "(", "region", ",", "Graphics", ".", "IntervalGraphic", ")", "interval_region", "=", "region", "else", ":", "interval_region", "=", "Graphics", ".", "IntervalGraphic", "(", ")", "for", "k", ",", "v", "in", "region_params", ".", "items", "(", ")", ":", "setattr", "(", "interval_region", ",", "k", ",", "v", ")", "if", "display_item", ":", "display_item", ".", "add_graphic", "(", "interval_region", ")", "regions", ".", "append", "(", "(", "region_name", ",", "interval_region", ",", "region_params", ".", "get", "(", "\"label\"", ")", ")", ")", "region_map", "[", "region_name", "]", "=", "interval_region", "elif", "region_type", "==", "\"channel\"", ":", "if", "region", ":", "assert", "isinstance", "(", "region", ",", "Graphics", ".", "ChannelGraphic", ")", "channel_region", "=", "region", "else", ":", "channel_region", "=", "Graphics", ".", "ChannelGraphic", "(", ")", "for", "k", ",", "v", "in", "region_params", ".", "items", "(", ")", ":", "setattr", "(", "channel_region", ",", "k", ",", "v", ")", "if", "display_item", ":", "display_item", ".", "add_graphic", "(", "channel_region", ")", "regions", ".", "append", "(", "(", "region_name", ",", "channel_region", ",", "region_params", ".", "get", "(", "\"label\"", ")", ")", ")", "region_map", "[", "region_name", "]", "=", "channel_region", "# now extract the script (full script) or expression (implied imports and return statement)", "script", "=", "processing_description", ".", "get", "(", "\"script\"", ")", "if", "not", "script", ":", "expression", "=", "processing_description", ".", "get", "(", "\"expression\"", ")", "if", "expression", ":", "script", "=", "Symbolic", ".", "xdata_expression", "(", "expression", ")", "assert", "script", "# construct the computation", "script", "=", "script", ".", "format", "(", "*", "*", "dict", "(", "zip", "(", "src_names", ",", "src_texts", ")", ")", ")", "computation", "=", "self", ".", "create_computation", "(", "script", ")", "computation", ".", "label", "=", "processing_description", "[", "\"title\"", "]", "computation", ".", "processing_id", "=", "processing_id", "# process the data item inputs", "for", "src_dict", ",", "src_name", ",", "src_label", ",", "input", "in", "zip", "(", "src_dicts", ",", "src_names", ",", "src_labels", ",", "inputs", ")", ":", "in_display_item", "=", "input", "[", "0", "]", "secondary_specifier", "=", "None", "if", "src_dict", ".", "get", "(", "\"croppable\"", ",", "False", ")", ":", "secondary_specifier", "=", "self", ".", "get_object_specifier", "(", "input", "[", "1", "]", ")", "display_data_channel", "=", "in_display_item", ".", "display_data_channel", "computation", ".", "create_object", "(", "src_name", ",", "self", ".", "get_object_specifier", "(", "display_data_channel", ")", ",", "label", "=", "src_label", ",", "secondary_specifier", "=", "secondary_specifier", ")", "# process the regions", "for", "region_name", ",", "region", ",", "region_label", "in", "regions", ":", "computation", ".", "create_object", "(", "region_name", ",", "self", ".", "get_object_specifier", "(", "region", ")", ",", "label", "=", "region_label", ")", "# next process the parameters", "for", "param_dict", "in", "processing_description", ".", "get", "(", "\"parameters\"", ",", "list", "(", ")", ")", ":", "parameter_value", "=", "parameters", ".", "get", "(", "param_dict", "[", "\"name\"", "]", ",", "param_dict", "[", "\"value\"", "]", ")", "computation", ".", "create_variable", "(", "param_dict", "[", "\"name\"", "]", ",", "param_dict", "[", "\"type\"", "]", ",", "parameter_value", ",", "value_default", "=", "param_dict", ".", "get", "(", "\"value_default\"", ")", ",", "value_min", "=", "param_dict", ".", "get", "(", "\"value_min\"", ")", ",", "value_max", "=", "param_dict", ".", "get", "(", "\"value_max\"", ")", ",", "control_type", "=", "param_dict", ".", "get", "(", "\"control_type\"", ")", ",", "label", "=", "param_dict", "[", "\"label\"", "]", ")", "data_item0", "=", "inputs", "[", "0", "]", "[", "0", "]", ".", "data_items", "[", "0", "]", "new_data_item", "=", "DataItem", ".", "new_data_item", "(", ")", "prefix", "=", "\"{} of \"", ".", "format", "(", "processing_description", "[", "\"title\"", "]", ")", "new_data_item", ".", "title", "=", "prefix", "+", "data_item0", ".", "title", "new_data_item", ".", "category", "=", "data_item0", ".", "category", "self", ".", "append_data_item", "(", "new_data_item", ")", "new_display_item", "=", "self", ".", "get_display_item_for_data_item", "(", "new_data_item", ")", "# next come the output regions that get created on the target itself", "new_regions", "=", "dict", "(", ")", "for", "out_region_dict", "in", "processing_description", ".", "get", "(", "\"out_regions\"", ",", "list", "(", ")", ")", ":", "region_type", "=", "out_region_dict", "[", "\"type\"", "]", "region_name", "=", "out_region_dict", "[", "\"name\"", "]", "region_params", "=", "out_region_dict", ".", "get", "(", "\"params\"", ",", "dict", "(", ")", ")", "if", "region_type", "==", "\"interval\"", ":", "interval_region", "=", "Graphics", ".", "IntervalGraphic", "(", ")", "for", "k", ",", "v", "in", "region_params", ".", "items", "(", ")", ":", "setattr", "(", "interval_region", ",", "k", ",", "v", ")", "new_display_item", ".", "add_graphic", "(", "interval_region", ")", "new_regions", "[", "region_name", "]", "=", "interval_region", "# now come the connections between the source and target", "for", "connection_dict", "in", "processing_description", ".", "get", "(", "\"connections\"", ",", "list", "(", ")", ")", ":", "connection_type", "=", "connection_dict", "[", "\"type\"", "]", "connection_src", "=", "connection_dict", "[", "\"src\"", "]", "connection_src_prop", "=", "connection_dict", ".", "get", "(", "\"src_prop\"", ")", "connection_dst", "=", "connection_dict", "[", "\"dst\"", "]", "connection_dst_prop", "=", "connection_dict", ".", "get", "(", "\"dst_prop\"", ")", "if", "connection_type", "==", "\"property\"", ":", "if", "connection_src", "==", "\"display_data_channel\"", ":", "# TODO: how to refer to the data_items? hardcode to data_item0 for now.", "display_item0", "=", "self", ".", "get_display_item_for_data_item", "(", "data_item0", ")", "display_data_channel0", "=", "display_item0", ".", "display_data_channel", "if", "display_item0", "else", "None", "connection", "=", "Connection", ".", "PropertyConnection", "(", "display_data_channel0", ",", "connection_src_prop", ",", "new_regions", "[", "connection_dst", "]", ",", "connection_dst_prop", ",", "parent", "=", "new_data_item", ")", "self", ".", "append_connection", "(", "connection", ")", "elif", "connection_type", "==", "\"interval_list\"", ":", "connection", "=", "Connection", ".", "IntervalListConnection", "(", "new_display_item", ",", "region_map", "[", "connection_dst", "]", ",", "parent", "=", "new_data_item", ")", "self", ".", "append_connection", "(", "connection", ")", "# save setting the computation until last to work around threaded clone/merge operation bug.", "# the bug is that setting the computation triggers the recompute to occur on a thread.", "# the recompute clones the data item and runs the operation. meanwhile this thread", "# updates the connection. now the recompute finishes and merges back the data item", "# which was cloned before the connection was established, effectively reversing the", "# update that matched the graphic interval to the slice interval on the display.", "# the result is that the slice interval on the display would get set to the default", "# value of the graphic interval. so don't actually update the computation until after", "# everything is configured. permanent solution would be to improve the clone/merge to", "# only update data that had been changed. alternative implementation would only track", "# changes to the data item and then apply them again to the original during merge.", "self", ".", "set_data_item_computation", "(", "new_data_item", ",", "computation", ")", "return", "new_data_item" ]
Create a new data item with computation specified by processing_id, inputs, and region_list_map. The region_list_map associates a list of graphics corresponding to the required regions with a computation source (key).
[ "Create", "a", "new", "data", "item", "with", "computation", "specified", "by", "processing_id", "inputs", "and", "region_list_map", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/DocumentModel.py#L2389-L2633
train
nion-software/nionswift
nion/swift/model/ColorMaps.py
interpolate_colors
def interpolate_colors(array: numpy.ndarray, x: int) -> numpy.ndarray: """ Creates a color map for values in array :param array: color map to interpolate :param x: number of colors :return: interpolated color map """ out_array = [] for i in range(x): if i % (x / (len(array) - 1)) == 0: index = i / (x / (len(array) - 1)) out_array.append(array[int(index)]) else: start_marker = array[math.floor(i / (x / (len(array) - 1)))] stop_marker = array[math.ceil(i / (x / (len(array) - 1)))] interp_amount = i % (x / (len(array) - 1)) / (x / (len(array) - 1)) interp_color = numpy.rint(start_marker + ((stop_marker - start_marker) * interp_amount)) out_array.append(interp_color) out_array[-1] = array[-1] return numpy.array(out_array).astype(numpy.uint8)
python
def interpolate_colors(array: numpy.ndarray, x: int) -> numpy.ndarray: """ Creates a color map for values in array :param array: color map to interpolate :param x: number of colors :return: interpolated color map """ out_array = [] for i in range(x): if i % (x / (len(array) - 1)) == 0: index = i / (x / (len(array) - 1)) out_array.append(array[int(index)]) else: start_marker = array[math.floor(i / (x / (len(array) - 1)))] stop_marker = array[math.ceil(i / (x / (len(array) - 1)))] interp_amount = i % (x / (len(array) - 1)) / (x / (len(array) - 1)) interp_color = numpy.rint(start_marker + ((stop_marker - start_marker) * interp_amount)) out_array.append(interp_color) out_array[-1] = array[-1] return numpy.array(out_array).astype(numpy.uint8)
[ "def", "interpolate_colors", "(", "array", ":", "numpy", ".", "ndarray", ",", "x", ":", "int", ")", "->", "numpy", ".", "ndarray", ":", "out_array", "=", "[", "]", "for", "i", "in", "range", "(", "x", ")", ":", "if", "i", "%", "(", "x", "/", "(", "len", "(", "array", ")", "-", "1", ")", ")", "==", "0", ":", "index", "=", "i", "/", "(", "x", "/", "(", "len", "(", "array", ")", "-", "1", ")", ")", "out_array", ".", "append", "(", "array", "[", "int", "(", "index", ")", "]", ")", "else", ":", "start_marker", "=", "array", "[", "math", ".", "floor", "(", "i", "/", "(", "x", "/", "(", "len", "(", "array", ")", "-", "1", ")", ")", ")", "]", "stop_marker", "=", "array", "[", "math", ".", "ceil", "(", "i", "/", "(", "x", "/", "(", "len", "(", "array", ")", "-", "1", ")", ")", ")", "]", "interp_amount", "=", "i", "%", "(", "x", "/", "(", "len", "(", "array", ")", "-", "1", ")", ")", "/", "(", "x", "/", "(", "len", "(", "array", ")", "-", "1", ")", ")", "interp_color", "=", "numpy", ".", "rint", "(", "start_marker", "+", "(", "(", "stop_marker", "-", "start_marker", ")", "*", "interp_amount", ")", ")", "out_array", ".", "append", "(", "interp_color", ")", "out_array", "[", "-", "1", "]", "=", "array", "[", "-", "1", "]", "return", "numpy", ".", "array", "(", "out_array", ")", ".", "astype", "(", "numpy", ".", "uint8", ")" ]
Creates a color map for values in array :param array: color map to interpolate :param x: number of colors :return: interpolated color map
[ "Creates", "a", "color", "map", "for", "values", "in", "array", ":", "param", "array", ":", "color", "map", "to", "interpolate", ":", "param", "x", ":", "number", "of", "colors", ":", "return", ":", "interpolated", "color", "map" ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/ColorMaps.py#L23-L42
train
softvar/simplegist
simplegist/do.py
Do.star
def star(self, **args): ''' star any gist by providing gistID or gistname(for authenticated user) ''' if 'name' in args: self.gist_name = args['name'] self.gist_id = self.getMyID(self.gist_name) elif 'id' in args: self.gist_id = args['id'] else: raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid to be starred') r = requests.put( '%s'%BASE_URL+'/gists/%s/star' % self.gist_id, headers=self.gist.header ) if (r.status_code == 204): response = { 'id': self.gist_id } return response raise Exception('Gist can\'t be starred')
python
def star(self, **args): ''' star any gist by providing gistID or gistname(for authenticated user) ''' if 'name' in args: self.gist_name = args['name'] self.gist_id = self.getMyID(self.gist_name) elif 'id' in args: self.gist_id = args['id'] else: raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid to be starred') r = requests.put( '%s'%BASE_URL+'/gists/%s/star' % self.gist_id, headers=self.gist.header ) if (r.status_code == 204): response = { 'id': self.gist_id } return response raise Exception('Gist can\'t be starred')
[ "def", "star", "(", "self", ",", "*", "*", "args", ")", ":", "if", "'name'", "in", "args", ":", "self", ".", "gist_name", "=", "args", "[", "'name'", "]", "self", ".", "gist_id", "=", "self", ".", "getMyID", "(", "self", ".", "gist_name", ")", "elif", "'id'", "in", "args", ":", "self", ".", "gist_id", "=", "args", "[", "'id'", "]", "else", ":", "raise", "Exception", "(", "'Either provide authenticated user\\'s Unambigious Gistname or any unique Gistid to be starred'", ")", "r", "=", "requests", ".", "put", "(", "'%s'", "%", "BASE_URL", "+", "'/gists/%s/star'", "%", "self", ".", "gist_id", ",", "headers", "=", "self", ".", "gist", ".", "header", ")", "if", "(", "r", ".", "status_code", "==", "204", ")", ":", "response", "=", "{", "'id'", ":", "self", ".", "gist_id", "}", "return", "response", "raise", "Exception", "(", "'Gist can\\'t be starred'", ")" ]
star any gist by providing gistID or gistname(for authenticated user)
[ "star", "any", "gist", "by", "providing", "gistID", "or", "gistname", "(", "for", "authenticated", "user", ")" ]
8d53edd15d76c7b10fb963a659c1cf9f46f5345d
https://github.com/softvar/simplegist/blob/8d53edd15d76c7b10fb963a659c1cf9f46f5345d/simplegist/do.py#L28-L50
train
softvar/simplegist
simplegist/do.py
Do.fork
def fork(self, **args): ''' fork any gist by providing gistID or gistname(for authenticated user) ''' if 'name' in args: self.gist_name = args['name'] self.gist_id = self.getMyID(self.gist_name) elif 'id' in args: self.gist_id = args['id'] else: raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid to be forked') r = requests.post( '%s'%BASE_URL+'/gists/%s/forks' % self.gist_id, headers=self.gist.header ) if (r.status_code == 201): response = { 'id': self.gist_id, 'description': r.json()['description'], 'public': r.json()['public'], 'comments': r.json()['comments'] } return response raise Exception('Gist can\'t be forked')
python
def fork(self, **args): ''' fork any gist by providing gistID or gistname(for authenticated user) ''' if 'name' in args: self.gist_name = args['name'] self.gist_id = self.getMyID(self.gist_name) elif 'id' in args: self.gist_id = args['id'] else: raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid to be forked') r = requests.post( '%s'%BASE_URL+'/gists/%s/forks' % self.gist_id, headers=self.gist.header ) if (r.status_code == 201): response = { 'id': self.gist_id, 'description': r.json()['description'], 'public': r.json()['public'], 'comments': r.json()['comments'] } return response raise Exception('Gist can\'t be forked')
[ "def", "fork", "(", "self", ",", "*", "*", "args", ")", ":", "if", "'name'", "in", "args", ":", "self", ".", "gist_name", "=", "args", "[", "'name'", "]", "self", ".", "gist_id", "=", "self", ".", "getMyID", "(", "self", ".", "gist_name", ")", "elif", "'id'", "in", "args", ":", "self", ".", "gist_id", "=", "args", "[", "'id'", "]", "else", ":", "raise", "Exception", "(", "'Either provide authenticated user\\'s Unambigious Gistname or any unique Gistid to be forked'", ")", "r", "=", "requests", ".", "post", "(", "'%s'", "%", "BASE_URL", "+", "'/gists/%s/forks'", "%", "self", ".", "gist_id", ",", "headers", "=", "self", ".", "gist", ".", "header", ")", "if", "(", "r", ".", "status_code", "==", "201", ")", ":", "response", "=", "{", "'id'", ":", "self", ".", "gist_id", ",", "'description'", ":", "r", ".", "json", "(", ")", "[", "'description'", "]", ",", "'public'", ":", "r", ".", "json", "(", ")", "[", "'public'", "]", ",", "'comments'", ":", "r", ".", "json", "(", ")", "[", "'comments'", "]", "}", "return", "response", "raise", "Exception", "(", "'Gist can\\'t be forked'", ")" ]
fork any gist by providing gistID or gistname(for authenticated user)
[ "fork", "any", "gist", "by", "providing", "gistID", "or", "gistname", "(", "for", "authenticated", "user", ")" ]
8d53edd15d76c7b10fb963a659c1cf9f46f5345d
https://github.com/softvar/simplegist/blob/8d53edd15d76c7b10fb963a659c1cf9f46f5345d/simplegist/do.py#L76-L101
train
softvar/simplegist
simplegist/do.py
Do.checkifstar
def checkifstar(self, **args): ''' Check a gist if starred by providing gistID or gistname(for authenticated user) ''' if 'name' in args: self.gist_name = args['name'] self.gist_id = self.getMyID(self.gist_name) elif 'id' in args: self.gist_id = args['id'] else: raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid to be checked for star') r = requests.get( '%s'%BASE_URL+'/gists/%s/star' % self.gist_id, headers=self.gist.header ) if (r.status_code == 204): response = { 'starred': 'True', 'id': self.gist_id } else: response = { 'starred': 'False' } return response
python
def checkifstar(self, **args): ''' Check a gist if starred by providing gistID or gistname(for authenticated user) ''' if 'name' in args: self.gist_name = args['name'] self.gist_id = self.getMyID(self.gist_name) elif 'id' in args: self.gist_id = args['id'] else: raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid to be checked for star') r = requests.get( '%s'%BASE_URL+'/gists/%s/star' % self.gist_id, headers=self.gist.header ) if (r.status_code == 204): response = { 'starred': 'True', 'id': self.gist_id } else: response = { 'starred': 'False' } return response
[ "def", "checkifstar", "(", "self", ",", "*", "*", "args", ")", ":", "if", "'name'", "in", "args", ":", "self", ".", "gist_name", "=", "args", "[", "'name'", "]", "self", ".", "gist_id", "=", "self", ".", "getMyID", "(", "self", ".", "gist_name", ")", "elif", "'id'", "in", "args", ":", "self", ".", "gist_id", "=", "args", "[", "'id'", "]", "else", ":", "raise", "Exception", "(", "'Either provide authenticated user\\'s Unambigious Gistname or any unique Gistid to be checked for star'", ")", "r", "=", "requests", ".", "get", "(", "'%s'", "%", "BASE_URL", "+", "'/gists/%s/star'", "%", "self", ".", "gist_id", ",", "headers", "=", "self", ".", "gist", ".", "header", ")", "if", "(", "r", ".", "status_code", "==", "204", ")", ":", "response", "=", "{", "'starred'", ":", "'True'", ",", "'id'", ":", "self", ".", "gist_id", "}", "else", ":", "response", "=", "{", "'starred'", ":", "'False'", "}", "return", "response" ]
Check a gist if starred by providing gistID or gistname(for authenticated user)
[ "Check", "a", "gist", "if", "starred", "by", "providing", "gistID", "or", "gistname", "(", "for", "authenticated", "user", ")" ]
8d53edd15d76c7b10fb963a659c1cf9f46f5345d
https://github.com/softvar/simplegist/blob/8d53edd15d76c7b10fb963a659c1cf9f46f5345d/simplegist/do.py#L103-L130
train
base4sistemas/satcfe
satcfe/resposta/extrairlogs.py
RespostaExtrairLogs.salvar
def salvar(self, destino=None, prefix='tmp', suffix='-sat.log'): """Salva o arquivo de log decodificado. :param str destino: (Opcional) Caminho completo para o arquivo onde os dados dos logs deverão ser salvos. Se não informado, será criado um arquivo temporário via :func:`tempfile.mkstemp`. :param str prefix: (Opcional) Prefixo para o nome do arquivo. Se não informado será usado ``"tmp"``. :param str suffix: (Opcional) Sufixo para o nome do arquivo. Se não informado será usado ``"-sat.log"``. :return: Retorna o caminho completo para o arquivo salvo. :rtype: str :raises IOError: Se o destino for informado e o arquivo já existir. """ if destino: if os.path.exists(destino): raise IOError((errno.EEXIST, 'File exists', destino,)) destino = os.path.abspath(destino) fd = os.open(destino, os.O_EXCL|os.O_CREAT|os.O_WRONLY) else: fd, destino = tempfile.mkstemp(prefix=prefix, suffix=suffix) os.write(fd, self.conteudo()) os.fsync(fd) os.close(fd) return os.path.abspath(destino)
python
def salvar(self, destino=None, prefix='tmp', suffix='-sat.log'): """Salva o arquivo de log decodificado. :param str destino: (Opcional) Caminho completo para o arquivo onde os dados dos logs deverão ser salvos. Se não informado, será criado um arquivo temporário via :func:`tempfile.mkstemp`. :param str prefix: (Opcional) Prefixo para o nome do arquivo. Se não informado será usado ``"tmp"``. :param str suffix: (Opcional) Sufixo para o nome do arquivo. Se não informado será usado ``"-sat.log"``. :return: Retorna o caminho completo para o arquivo salvo. :rtype: str :raises IOError: Se o destino for informado e o arquivo já existir. """ if destino: if os.path.exists(destino): raise IOError((errno.EEXIST, 'File exists', destino,)) destino = os.path.abspath(destino) fd = os.open(destino, os.O_EXCL|os.O_CREAT|os.O_WRONLY) else: fd, destino = tempfile.mkstemp(prefix=prefix, suffix=suffix) os.write(fd, self.conteudo()) os.fsync(fd) os.close(fd) return os.path.abspath(destino)
[ "def", "salvar", "(", "self", ",", "destino", "=", "None", ",", "prefix", "=", "'tmp'", ",", "suffix", "=", "'-sat.log'", ")", ":", "if", "destino", ":", "if", "os", ".", "path", ".", "exists", "(", "destino", ")", ":", "raise", "IOError", "(", "(", "errno", ".", "EEXIST", ",", "'File exists'", ",", "destino", ",", ")", ")", "destino", "=", "os", ".", "path", ".", "abspath", "(", "destino", ")", "fd", "=", "os", ".", "open", "(", "destino", ",", "os", ".", "O_EXCL", "|", "os", ".", "O_CREAT", "|", "os", ".", "O_WRONLY", ")", "else", ":", "fd", ",", "destino", "=", "tempfile", ".", "mkstemp", "(", "prefix", "=", "prefix", ",", "suffix", "=", "suffix", ")", "os", ".", "write", "(", "fd", ",", "self", ".", "conteudo", "(", ")", ")", "os", ".", "fsync", "(", "fd", ")", "os", ".", "close", "(", "fd", ")", "return", "os", ".", "path", ".", "abspath", "(", "destino", ")" ]
Salva o arquivo de log decodificado. :param str destino: (Opcional) Caminho completo para o arquivo onde os dados dos logs deverão ser salvos. Se não informado, será criado um arquivo temporário via :func:`tempfile.mkstemp`. :param str prefix: (Opcional) Prefixo para o nome do arquivo. Se não informado será usado ``"tmp"``. :param str suffix: (Opcional) Sufixo para o nome do arquivo. Se não informado será usado ``"-sat.log"``. :return: Retorna o caminho completo para o arquivo salvo. :rtype: str :raises IOError: Se o destino for informado e o arquivo já existir.
[ "Salva", "o", "arquivo", "de", "log", "decodificado", "." ]
cb8e8815f4133d3e3d94cf526fa86767b4521ed9
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/resposta/extrairlogs.py#L55-L85
train
base4sistemas/satcfe
satcfe/resposta/extrairlogs.py
RespostaExtrairLogs.analisar
def analisar(retorno): """Constrói uma :class:`RespostaExtrairLogs` a partir do retorno informado. :param unicode retorno: Retorno da função ``ExtrairLogs``. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='ExtrairLogs', classe_resposta=RespostaExtrairLogs, campos=RespostaSAT.CAMPOS + ( ('arquivoLog', unicode), ), campos_alternativos=[ # se a extração dos logs falhar espera-se o padrão de # campos no retorno... RespostaSAT.CAMPOS, ] ) if resposta.EEEEE not in ('15000',): raise ExcecaoRespostaSAT(resposta) return resposta
python
def analisar(retorno): """Constrói uma :class:`RespostaExtrairLogs` a partir do retorno informado. :param unicode retorno: Retorno da função ``ExtrairLogs``. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='ExtrairLogs', classe_resposta=RespostaExtrairLogs, campos=RespostaSAT.CAMPOS + ( ('arquivoLog', unicode), ), campos_alternativos=[ # se a extração dos logs falhar espera-se o padrão de # campos no retorno... RespostaSAT.CAMPOS, ] ) if resposta.EEEEE not in ('15000',): raise ExcecaoRespostaSAT(resposta) return resposta
[ "def", "analisar", "(", "retorno", ")", ":", "resposta", "=", "analisar_retorno", "(", "forcar_unicode", "(", "retorno", ")", ",", "funcao", "=", "'ExtrairLogs'", ",", "classe_resposta", "=", "RespostaExtrairLogs", ",", "campos", "=", "RespostaSAT", ".", "CAMPOS", "+", "(", "(", "'arquivoLog'", ",", "unicode", ")", ",", ")", ",", "campos_alternativos", "=", "[", "# se a extração dos logs falhar espera-se o padrão de", "# campos no retorno...", "RespostaSAT", ".", "CAMPOS", ",", "]", ")", "if", "resposta", ".", "EEEEE", "not", "in", "(", "'15000'", ",", ")", ":", "raise", "ExcecaoRespostaSAT", "(", "resposta", ")", "return", "resposta" ]
Constrói uma :class:`RespostaExtrairLogs` a partir do retorno informado. :param unicode retorno: Retorno da função ``ExtrairLogs``.
[ "Constrói", "uma", ":", "class", ":", "RespostaExtrairLogs", "a", "partir", "do", "retorno", "informado", "." ]
cb8e8815f4133d3e3d94cf526fa86767b4521ed9
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/resposta/extrairlogs.py#L89-L109
train
djgagne/hagelslag
hagelslag/data/ModelGrid.py
ModelGrid.load_data_old
def load_data_old(self): """ Loads time series of 2D data grids from each opened file. The code handles loading a full time series from one file or individual time steps from multiple files. Missing files are supported. """ units = "" if len(self.file_objects) == 1 and self.file_objects[0] is not None: data = self.file_objects[0].variables[self.variable][self.forecast_hours] if hasattr(self.file_objects[0].variables[self.variable], "units"): units = self.file_objects[0].variables[self.variable].units elif len(self.file_objects) > 1: grid_shape = [len(self.file_objects), 1, 1] for file_object in self.file_objects: if file_object is not None: if self.variable in file_object.variables.keys(): grid_shape = file_object.variables[self.variable].shape elif self.variable.ljust(6, "_") in file_object.variables.keys(): grid_shape = file_object.variables[self.variable.ljust(6, "_")].shape else: print("{0} not found".format(self.variable)) raise KeyError break data = np.zeros((len(self.file_objects), grid_shape[1], grid_shape[2])) for f, file_object in enumerate(self.file_objects): if file_object is not None: if self.variable in file_object.variables.keys(): var_name = self.variable elif self.variable.ljust(6, "_") in file_object.variables.keys(): var_name = self.variable.ljust(6, "_") else: print("{0} not found".format(self.variable)) raise KeyError data[f] = file_object.variables[var_name][0] if units == "" and hasattr(file_object.variables[var_name], "units"): units = file_object.variables[var_name].units else: data = None return data, units
python
def load_data_old(self): """ Loads time series of 2D data grids from each opened file. The code handles loading a full time series from one file or individual time steps from multiple files. Missing files are supported. """ units = "" if len(self.file_objects) == 1 and self.file_objects[0] is not None: data = self.file_objects[0].variables[self.variable][self.forecast_hours] if hasattr(self.file_objects[0].variables[self.variable], "units"): units = self.file_objects[0].variables[self.variable].units elif len(self.file_objects) > 1: grid_shape = [len(self.file_objects), 1, 1] for file_object in self.file_objects: if file_object is not None: if self.variable in file_object.variables.keys(): grid_shape = file_object.variables[self.variable].shape elif self.variable.ljust(6, "_") in file_object.variables.keys(): grid_shape = file_object.variables[self.variable.ljust(6, "_")].shape else: print("{0} not found".format(self.variable)) raise KeyError break data = np.zeros((len(self.file_objects), grid_shape[1], grid_shape[2])) for f, file_object in enumerate(self.file_objects): if file_object is not None: if self.variable in file_object.variables.keys(): var_name = self.variable elif self.variable.ljust(6, "_") in file_object.variables.keys(): var_name = self.variable.ljust(6, "_") else: print("{0} not found".format(self.variable)) raise KeyError data[f] = file_object.variables[var_name][0] if units == "" and hasattr(file_object.variables[var_name], "units"): units = file_object.variables[var_name].units else: data = None return data, units
[ "def", "load_data_old", "(", "self", ")", ":", "units", "=", "\"\"", "if", "len", "(", "self", ".", "file_objects", ")", "==", "1", "and", "self", ".", "file_objects", "[", "0", "]", "is", "not", "None", ":", "data", "=", "self", ".", "file_objects", "[", "0", "]", ".", "variables", "[", "self", ".", "variable", "]", "[", "self", ".", "forecast_hours", "]", "if", "hasattr", "(", "self", ".", "file_objects", "[", "0", "]", ".", "variables", "[", "self", ".", "variable", "]", ",", "\"units\"", ")", ":", "units", "=", "self", ".", "file_objects", "[", "0", "]", ".", "variables", "[", "self", ".", "variable", "]", ".", "units", "elif", "len", "(", "self", ".", "file_objects", ")", ">", "1", ":", "grid_shape", "=", "[", "len", "(", "self", ".", "file_objects", ")", ",", "1", ",", "1", "]", "for", "file_object", "in", "self", ".", "file_objects", ":", "if", "file_object", "is", "not", "None", ":", "if", "self", ".", "variable", "in", "file_object", ".", "variables", ".", "keys", "(", ")", ":", "grid_shape", "=", "file_object", ".", "variables", "[", "self", ".", "variable", "]", ".", "shape", "elif", "self", ".", "variable", ".", "ljust", "(", "6", ",", "\"_\"", ")", "in", "file_object", ".", "variables", ".", "keys", "(", ")", ":", "grid_shape", "=", "file_object", ".", "variables", "[", "self", ".", "variable", ".", "ljust", "(", "6", ",", "\"_\"", ")", "]", ".", "shape", "else", ":", "print", "(", "\"{0} not found\"", ".", "format", "(", "self", ".", "variable", ")", ")", "raise", "KeyError", "break", "data", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ".", "file_objects", ")", ",", "grid_shape", "[", "1", "]", ",", "grid_shape", "[", "2", "]", ")", ")", "for", "f", ",", "file_object", "in", "enumerate", "(", "self", ".", "file_objects", ")", ":", "if", "file_object", "is", "not", "None", ":", "if", "self", ".", "variable", "in", "file_object", ".", "variables", ".", "keys", "(", ")", ":", "var_name", "=", "self", ".", "variable", "elif", "self", ".", "variable", ".", "ljust", "(", "6", ",", "\"_\"", ")", "in", "file_object", ".", "variables", ".", "keys", "(", ")", ":", "var_name", "=", "self", ".", "variable", ".", "ljust", "(", "6", ",", "\"_\"", ")", "else", ":", "print", "(", "\"{0} not found\"", ".", "format", "(", "self", ".", "variable", ")", ")", "raise", "KeyError", "data", "[", "f", "]", "=", "file_object", ".", "variables", "[", "var_name", "]", "[", "0", "]", "if", "units", "==", "\"\"", "and", "hasattr", "(", "file_object", ".", "variables", "[", "var_name", "]", ",", "\"units\"", ")", ":", "units", "=", "file_object", ".", "variables", "[", "var_name", "]", ".", "units", "else", ":", "data", "=", "None", "return", "data", ",", "units" ]
Loads time series of 2D data grids from each opened file. The code handles loading a full time series from one file or individual time steps from multiple files. Missing files are supported.
[ "Loads", "time", "series", "of", "2D", "data", "grids", "from", "each", "opened", "file", ".", "The", "code", "handles", "loading", "a", "full", "time", "series", "from", "one", "file", "or", "individual", "time", "steps", "from", "multiple", "files", ".", "Missing", "files", "are", "supported", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/data/ModelGrid.py#L55-L94
train
djgagne/hagelslag
hagelslag/data/ModelGrid.py
ModelGrid.load_data
def load_data(self): """ Load data from netCDF file objects or list of netCDF file objects. Handles special variable name formats. Returns: Array of data loaded from files in (time, y, x) dimensions, Units """ units = "" if self.file_objects[0] is None: raise IOError() var_name, z_index = self.format_var_name(self.variable, list(self.file_objects[0].variables.keys())) ntimes = 0 if 'time' in self.file_objects[0].variables[var_name].dimensions: ntimes = len(self.file_objects[0].dimensions['time']) if ntimes > 1: if z_index is None: data = self.file_objects[0].variables[var_name][self.forecast_hours].astype(np.float32) else: data = self.file_objects[0].variables[var_name][self.forecast_hours, z_index].astype(np.float32) else: y_dim, x_dim = self.file_objects[0].variables[var_name].shape[-2:] data = np.zeros((len(self.valid_dates), y_dim, x_dim), dtype=np.float32) for f, file_object in enumerate(self.file_objects): if file_object is not None: if z_index is None: data[f] = file_object.variables[var_name][0] else: data[f] = file_object.variables[var_name][0, z_index] if hasattr(self.file_objects[0].variables[var_name], "units"): units = self.file_objects[0].variables[var_name].units return data, units
python
def load_data(self): """ Load data from netCDF file objects or list of netCDF file objects. Handles special variable name formats. Returns: Array of data loaded from files in (time, y, x) dimensions, Units """ units = "" if self.file_objects[0] is None: raise IOError() var_name, z_index = self.format_var_name(self.variable, list(self.file_objects[0].variables.keys())) ntimes = 0 if 'time' in self.file_objects[0].variables[var_name].dimensions: ntimes = len(self.file_objects[0].dimensions['time']) if ntimes > 1: if z_index is None: data = self.file_objects[0].variables[var_name][self.forecast_hours].astype(np.float32) else: data = self.file_objects[0].variables[var_name][self.forecast_hours, z_index].astype(np.float32) else: y_dim, x_dim = self.file_objects[0].variables[var_name].shape[-2:] data = np.zeros((len(self.valid_dates), y_dim, x_dim), dtype=np.float32) for f, file_object in enumerate(self.file_objects): if file_object is not None: if z_index is None: data[f] = file_object.variables[var_name][0] else: data[f] = file_object.variables[var_name][0, z_index] if hasattr(self.file_objects[0].variables[var_name], "units"): units = self.file_objects[0].variables[var_name].units return data, units
[ "def", "load_data", "(", "self", ")", ":", "units", "=", "\"\"", "if", "self", ".", "file_objects", "[", "0", "]", "is", "None", ":", "raise", "IOError", "(", ")", "var_name", ",", "z_index", "=", "self", ".", "format_var_name", "(", "self", ".", "variable", ",", "list", "(", "self", ".", "file_objects", "[", "0", "]", ".", "variables", ".", "keys", "(", ")", ")", ")", "ntimes", "=", "0", "if", "'time'", "in", "self", ".", "file_objects", "[", "0", "]", ".", "variables", "[", "var_name", "]", ".", "dimensions", ":", "ntimes", "=", "len", "(", "self", ".", "file_objects", "[", "0", "]", ".", "dimensions", "[", "'time'", "]", ")", "if", "ntimes", ">", "1", ":", "if", "z_index", "is", "None", ":", "data", "=", "self", ".", "file_objects", "[", "0", "]", ".", "variables", "[", "var_name", "]", "[", "self", ".", "forecast_hours", "]", ".", "astype", "(", "np", ".", "float32", ")", "else", ":", "data", "=", "self", ".", "file_objects", "[", "0", "]", ".", "variables", "[", "var_name", "]", "[", "self", ".", "forecast_hours", ",", "z_index", "]", ".", "astype", "(", "np", ".", "float32", ")", "else", ":", "y_dim", ",", "x_dim", "=", "self", ".", "file_objects", "[", "0", "]", ".", "variables", "[", "var_name", "]", ".", "shape", "[", "-", "2", ":", "]", "data", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ".", "valid_dates", ")", ",", "y_dim", ",", "x_dim", ")", ",", "dtype", "=", "np", ".", "float32", ")", "for", "f", ",", "file_object", "in", "enumerate", "(", "self", ".", "file_objects", ")", ":", "if", "file_object", "is", "not", "None", ":", "if", "z_index", "is", "None", ":", "data", "[", "f", "]", "=", "file_object", ".", "variables", "[", "var_name", "]", "[", "0", "]", "else", ":", "data", "[", "f", "]", "=", "file_object", ".", "variables", "[", "var_name", "]", "[", "0", ",", "z_index", "]", "if", "hasattr", "(", "self", ".", "file_objects", "[", "0", "]", ".", "variables", "[", "var_name", "]", ",", "\"units\"", ")", ":", "units", "=", "self", ".", "file_objects", "[", "0", "]", ".", "variables", "[", "var_name", "]", ".", "units", "return", "data", ",", "units" ]
Load data from netCDF file objects or list of netCDF file objects. Handles special variable name formats. Returns: Array of data loaded from files in (time, y, x) dimensions, Units
[ "Load", "data", "from", "netCDF", "file", "objects", "or", "list", "of", "netCDF", "file", "objects", ".", "Handles", "special", "variable", "name", "formats", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/data/ModelGrid.py#L96-L127
train
djgagne/hagelslag
hagelslag/data/ModelGrid.py
ModelGrid.format_var_name
def format_var_name(variable, var_list): """ Searches var list for variable name, checks other variable name format options. Args: variable (str): Variable being loaded var_list (list): List of variables in file. Returns: Name of variable in file containing relevant data, and index of variable z-level if multiple variables contained in same array in file. """ z_index = None if variable in var_list: var_name = variable elif variable.ljust(6, "_") in var_list: var_name = variable.ljust(6, "_") elif any([variable in v_sub.split("_") for v_sub in var_list]): var_name = var_list[[variable in v_sub.split("_") for v_sub in var_list].index(True)] z_index = var_name.split("_").index(variable) else: raise KeyError("{0} not found in {1}".format(variable, var_list)) return var_name, z_index
python
def format_var_name(variable, var_list): """ Searches var list for variable name, checks other variable name format options. Args: variable (str): Variable being loaded var_list (list): List of variables in file. Returns: Name of variable in file containing relevant data, and index of variable z-level if multiple variables contained in same array in file. """ z_index = None if variable in var_list: var_name = variable elif variable.ljust(6, "_") in var_list: var_name = variable.ljust(6, "_") elif any([variable in v_sub.split("_") for v_sub in var_list]): var_name = var_list[[variable in v_sub.split("_") for v_sub in var_list].index(True)] z_index = var_name.split("_").index(variable) else: raise KeyError("{0} not found in {1}".format(variable, var_list)) return var_name, z_index
[ "def", "format_var_name", "(", "variable", ",", "var_list", ")", ":", "z_index", "=", "None", "if", "variable", "in", "var_list", ":", "var_name", "=", "variable", "elif", "variable", ".", "ljust", "(", "6", ",", "\"_\"", ")", "in", "var_list", ":", "var_name", "=", "variable", ".", "ljust", "(", "6", ",", "\"_\"", ")", "elif", "any", "(", "[", "variable", "in", "v_sub", ".", "split", "(", "\"_\"", ")", "for", "v_sub", "in", "var_list", "]", ")", ":", "var_name", "=", "var_list", "[", "[", "variable", "in", "v_sub", ".", "split", "(", "\"_\"", ")", "for", "v_sub", "in", "var_list", "]", ".", "index", "(", "True", ")", "]", "z_index", "=", "var_name", ".", "split", "(", "\"_\"", ")", ".", "index", "(", "variable", ")", "else", ":", "raise", "KeyError", "(", "\"{0} not found in {1}\"", ".", "format", "(", "variable", ",", "var_list", ")", ")", "return", "var_name", ",", "z_index" ]
Searches var list for variable name, checks other variable name format options. Args: variable (str): Variable being loaded var_list (list): List of variables in file. Returns: Name of variable in file containing relevant data, and index of variable z-level if multiple variables contained in same array in file.
[ "Searches", "var", "list", "for", "variable", "name", "checks", "other", "variable", "name", "format", "options", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/data/ModelGrid.py#L130-L152
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
TrackModeler.load_data
def load_data(self, mode="train", format="csv"): """ Load data from flat data files containing total track information and information about each timestep. The two sets are combined using merge operations on the Track IDs. Additional member information is gathered from the appropriate member file. Args: mode: "train" or "forecast" format: file format being used. Default is "csv" """ if mode in self.data.keys(): run_dates = pd.DatetimeIndex(start=self.start_dates[mode], end=self.end_dates[mode],freq="1D") run_date_str = [d.strftime("%Y%m%d-%H%M") for d in run_dates.date] print(run_date_str) all_total_track_files = sorted(glob(getattr(self, mode + "_data_path") + "*total_" + self.ensemble_name + "*." + format)) all_step_track_files = sorted(glob(getattr(self, mode + "_data_path") + "*step_" + self.ensemble_name + "*." + format)) total_track_files = [] for track_file in all_total_track_files: file_date = track_file.split("_")[-1][:-4] if file_date in run_date_str: total_track_files.append(track_file) step_track_files = [] for step_file in all_step_track_files: file_date = step_file.split("_")[-1][:-4] if file_date in run_date_str: step_track_files.append(step_file) self.data[mode]["total"] = pd.concat(map(pd.read_csv, total_track_files), ignore_index=True) self.data[mode]["total"] = self.data[mode]["total"].fillna(value=0) self.data[mode]["total"] = self.data[mode]["total"].replace([np.inf, -np.inf], 0) self.data[mode]["step"] = pd.concat(map(pd.read_csv, step_track_files), ignore_index=True) self.data[mode]["step"] = self.data[mode]["step"].fillna(value=0) self.data[mode]["step"] = self.data[mode]["step"].replace([np.inf, -np.inf], 0) if mode == "forecast": self.data[mode]["step"] = self.data[mode]["step"].drop_duplicates("Step_ID") self.data[mode]["member"] = pd.read_csv(self.member_files[mode]) self.data[mode]["combo"] = pd.merge(self.data[mode]["step"], self.data[mode]["total"], on=["Track_ID", "Ensemble_Name", "Ensemble_Member", "Run_Date"]) self.data[mode]["combo"] = pd.merge(self.data[mode]["combo"], self.data[mode]["member"], on="Ensemble_Member") self.data[mode]["total_group"] = pd.merge(self.data[mode]["total"], self.data[mode]["member"], on="Ensemble_Member")
python
def load_data(self, mode="train", format="csv"): """ Load data from flat data files containing total track information and information about each timestep. The two sets are combined using merge operations on the Track IDs. Additional member information is gathered from the appropriate member file. Args: mode: "train" or "forecast" format: file format being used. Default is "csv" """ if mode in self.data.keys(): run_dates = pd.DatetimeIndex(start=self.start_dates[mode], end=self.end_dates[mode],freq="1D") run_date_str = [d.strftime("%Y%m%d-%H%M") for d in run_dates.date] print(run_date_str) all_total_track_files = sorted(glob(getattr(self, mode + "_data_path") + "*total_" + self.ensemble_name + "*." + format)) all_step_track_files = sorted(glob(getattr(self, mode + "_data_path") + "*step_" + self.ensemble_name + "*." + format)) total_track_files = [] for track_file in all_total_track_files: file_date = track_file.split("_")[-1][:-4] if file_date in run_date_str: total_track_files.append(track_file) step_track_files = [] for step_file in all_step_track_files: file_date = step_file.split("_")[-1][:-4] if file_date in run_date_str: step_track_files.append(step_file) self.data[mode]["total"] = pd.concat(map(pd.read_csv, total_track_files), ignore_index=True) self.data[mode]["total"] = self.data[mode]["total"].fillna(value=0) self.data[mode]["total"] = self.data[mode]["total"].replace([np.inf, -np.inf], 0) self.data[mode]["step"] = pd.concat(map(pd.read_csv, step_track_files), ignore_index=True) self.data[mode]["step"] = self.data[mode]["step"].fillna(value=0) self.data[mode]["step"] = self.data[mode]["step"].replace([np.inf, -np.inf], 0) if mode == "forecast": self.data[mode]["step"] = self.data[mode]["step"].drop_duplicates("Step_ID") self.data[mode]["member"] = pd.read_csv(self.member_files[mode]) self.data[mode]["combo"] = pd.merge(self.data[mode]["step"], self.data[mode]["total"], on=["Track_ID", "Ensemble_Name", "Ensemble_Member", "Run_Date"]) self.data[mode]["combo"] = pd.merge(self.data[mode]["combo"], self.data[mode]["member"], on="Ensemble_Member") self.data[mode]["total_group"] = pd.merge(self.data[mode]["total"], self.data[mode]["member"], on="Ensemble_Member")
[ "def", "load_data", "(", "self", ",", "mode", "=", "\"train\"", ",", "format", "=", "\"csv\"", ")", ":", "if", "mode", "in", "self", ".", "data", ".", "keys", "(", ")", ":", "run_dates", "=", "pd", ".", "DatetimeIndex", "(", "start", "=", "self", ".", "start_dates", "[", "mode", "]", ",", "end", "=", "self", ".", "end_dates", "[", "mode", "]", ",", "freq", "=", "\"1D\"", ")", "run_date_str", "=", "[", "d", ".", "strftime", "(", "\"%Y%m%d-%H%M\"", ")", "for", "d", "in", "run_dates", ".", "date", "]", "print", "(", "run_date_str", ")", "all_total_track_files", "=", "sorted", "(", "glob", "(", "getattr", "(", "self", ",", "mode", "+", "\"_data_path\"", ")", "+", "\"*total_\"", "+", "self", ".", "ensemble_name", "+", "\"*.\"", "+", "format", ")", ")", "all_step_track_files", "=", "sorted", "(", "glob", "(", "getattr", "(", "self", ",", "mode", "+", "\"_data_path\"", ")", "+", "\"*step_\"", "+", "self", ".", "ensemble_name", "+", "\"*.\"", "+", "format", ")", ")", "total_track_files", "=", "[", "]", "for", "track_file", "in", "all_total_track_files", ":", "file_date", "=", "track_file", ".", "split", "(", "\"_\"", ")", "[", "-", "1", "]", "[", ":", "-", "4", "]", "if", "file_date", "in", "run_date_str", ":", "total_track_files", ".", "append", "(", "track_file", ")", "step_track_files", "=", "[", "]", "for", "step_file", "in", "all_step_track_files", ":", "file_date", "=", "step_file", ".", "split", "(", "\"_\"", ")", "[", "-", "1", "]", "[", ":", "-", "4", "]", "if", "file_date", "in", "run_date_str", ":", "step_track_files", ".", "append", "(", "step_file", ")", "self", ".", "data", "[", "mode", "]", "[", "\"total\"", "]", "=", "pd", ".", "concat", "(", "map", "(", "pd", ".", "read_csv", ",", "total_track_files", ")", ",", "ignore_index", "=", "True", ")", "self", ".", "data", "[", "mode", "]", "[", "\"total\"", "]", "=", "self", ".", "data", "[", "mode", "]", "[", "\"total\"", "]", ".", "fillna", "(", "value", "=", "0", ")", "self", ".", "data", "[", "mode", "]", "[", "\"total\"", "]", "=", "self", ".", "data", "[", "mode", "]", "[", "\"total\"", "]", ".", "replace", "(", "[", "np", ".", "inf", ",", "-", "np", ".", "inf", "]", ",", "0", ")", "self", ".", "data", "[", "mode", "]", "[", "\"step\"", "]", "=", "pd", ".", "concat", "(", "map", "(", "pd", ".", "read_csv", ",", "step_track_files", ")", ",", "ignore_index", "=", "True", ")", "self", ".", "data", "[", "mode", "]", "[", "\"step\"", "]", "=", "self", ".", "data", "[", "mode", "]", "[", "\"step\"", "]", ".", "fillna", "(", "value", "=", "0", ")", "self", ".", "data", "[", "mode", "]", "[", "\"step\"", "]", "=", "self", ".", "data", "[", "mode", "]", "[", "\"step\"", "]", ".", "replace", "(", "[", "np", ".", "inf", ",", "-", "np", ".", "inf", "]", ",", "0", ")", "if", "mode", "==", "\"forecast\"", ":", "self", ".", "data", "[", "mode", "]", "[", "\"step\"", "]", "=", "self", ".", "data", "[", "mode", "]", "[", "\"step\"", "]", ".", "drop_duplicates", "(", "\"Step_ID\"", ")", "self", ".", "data", "[", "mode", "]", "[", "\"member\"", "]", "=", "pd", ".", "read_csv", "(", "self", ".", "member_files", "[", "mode", "]", ")", "self", ".", "data", "[", "mode", "]", "[", "\"combo\"", "]", "=", "pd", ".", "merge", "(", "self", ".", "data", "[", "mode", "]", "[", "\"step\"", "]", ",", "self", ".", "data", "[", "mode", "]", "[", "\"total\"", "]", ",", "on", "=", "[", "\"Track_ID\"", ",", "\"Ensemble_Name\"", ",", "\"Ensemble_Member\"", ",", "\"Run_Date\"", "]", ")", "self", ".", "data", "[", "mode", "]", "[", "\"combo\"", "]", "=", "pd", ".", "merge", "(", "self", ".", "data", "[", "mode", "]", "[", "\"combo\"", "]", ",", "self", ".", "data", "[", "mode", "]", "[", "\"member\"", "]", ",", "on", "=", "\"Ensemble_Member\"", ")", "self", ".", "data", "[", "mode", "]", "[", "\"total_group\"", "]", "=", "pd", ".", "merge", "(", "self", ".", "data", "[", "mode", "]", "[", "\"total\"", "]", ",", "self", ".", "data", "[", "mode", "]", "[", "\"member\"", "]", ",", "on", "=", "\"Ensemble_Member\"", ")" ]
Load data from flat data files containing total track information and information about each timestep. The two sets are combined using merge operations on the Track IDs. Additional member information is gathered from the appropriate member file. Args: mode: "train" or "forecast" format: file format being used. Default is "csv"
[ "Load", "data", "from", "flat", "data", "files", "containing", "total", "track", "information", "and", "information", "about", "each", "timestep", ".", "The", "two", "sets", "are", "combined", "using", "merge", "operations", "on", "the", "Track", "IDs", ".", "Additional", "member", "information", "is", "gathered", "from", "the", "appropriate", "member", "file", ".", "Args", ":", "mode", ":", "train", "or", "forecast", "format", ":", "file", "format", "being", "used", ".", "Default", "is", "csv" ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L63-L111
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
TrackModeler.calc_copulas
def calc_copulas(self, output_file, model_names=("start-time", "translation-x", "translation-y"), label_columns=("Start_Time_Error", "Translation_Error_X", "Translation_Error_Y")): """ Calculate a copula multivariate normal distribution from the training data for each group of ensemble members. Distributions are written to a pickle file for later use. Args: output_file: Pickle file model_names: Names of the tracking models label_columns: Names of the data columns used for labeling Returns: """ if len(self.data['train']) == 0: self.load_data() groups = self.data["train"]["member"][self.group_col].unique() copulas = {} label_columns = list(label_columns) for group in groups: print(group) group_data = self.data["train"]["total_group"].loc[ self.data["train"]["total_group"][self.group_col] == group] group_data = group_data.dropna() group_data.reset_index(drop=True, inplace=True) copulas[group] = {} copulas[group]["mean"] = group_data[label_columns].mean(axis=0).values copulas[group]["cov"] = np.cov(group_data[label_columns].values.T) copulas[group]["model_names"] = list(model_names) del group_data pickle.dump(copulas, open(output_file, "w"), pickle.HIGHEST_PROTOCOL)
python
def calc_copulas(self, output_file, model_names=("start-time", "translation-x", "translation-y"), label_columns=("Start_Time_Error", "Translation_Error_X", "Translation_Error_Y")): """ Calculate a copula multivariate normal distribution from the training data for each group of ensemble members. Distributions are written to a pickle file for later use. Args: output_file: Pickle file model_names: Names of the tracking models label_columns: Names of the data columns used for labeling Returns: """ if len(self.data['train']) == 0: self.load_data() groups = self.data["train"]["member"][self.group_col].unique() copulas = {} label_columns = list(label_columns) for group in groups: print(group) group_data = self.data["train"]["total_group"].loc[ self.data["train"]["total_group"][self.group_col] == group] group_data = group_data.dropna() group_data.reset_index(drop=True, inplace=True) copulas[group] = {} copulas[group]["mean"] = group_data[label_columns].mean(axis=0).values copulas[group]["cov"] = np.cov(group_data[label_columns].values.T) copulas[group]["model_names"] = list(model_names) del group_data pickle.dump(copulas, open(output_file, "w"), pickle.HIGHEST_PROTOCOL)
[ "def", "calc_copulas", "(", "self", ",", "output_file", ",", "model_names", "=", "(", "\"start-time\"", ",", "\"translation-x\"", ",", "\"translation-y\"", ")", ",", "label_columns", "=", "(", "\"Start_Time_Error\"", ",", "\"Translation_Error_X\"", ",", "\"Translation_Error_Y\"", ")", ")", ":", "if", "len", "(", "self", ".", "data", "[", "'train'", "]", ")", "==", "0", ":", "self", ".", "load_data", "(", ")", "groups", "=", "self", ".", "data", "[", "\"train\"", "]", "[", "\"member\"", "]", "[", "self", ".", "group_col", "]", ".", "unique", "(", ")", "copulas", "=", "{", "}", "label_columns", "=", "list", "(", "label_columns", ")", "for", "group", "in", "groups", ":", "print", "(", "group", ")", "group_data", "=", "self", ".", "data", "[", "\"train\"", "]", "[", "\"total_group\"", "]", ".", "loc", "[", "self", ".", "data", "[", "\"train\"", "]", "[", "\"total_group\"", "]", "[", "self", ".", "group_col", "]", "==", "group", "]", "group_data", "=", "group_data", ".", "dropna", "(", ")", "group_data", ".", "reset_index", "(", "drop", "=", "True", ",", "inplace", "=", "True", ")", "copulas", "[", "group", "]", "=", "{", "}", "copulas", "[", "group", "]", "[", "\"mean\"", "]", "=", "group_data", "[", "label_columns", "]", ".", "mean", "(", "axis", "=", "0", ")", ".", "values", "copulas", "[", "group", "]", "[", "\"cov\"", "]", "=", "np", ".", "cov", "(", "group_data", "[", "label_columns", "]", ".", "values", ".", "T", ")", "copulas", "[", "group", "]", "[", "\"model_names\"", "]", "=", "list", "(", "model_names", ")", "del", "group_data", "pickle", ".", "dump", "(", "copulas", ",", "open", "(", "output_file", ",", "\"w\"", ")", ",", "pickle", ".", "HIGHEST_PROTOCOL", ")" ]
Calculate a copula multivariate normal distribution from the training data for each group of ensemble members. Distributions are written to a pickle file for later use. Args: output_file: Pickle file model_names: Names of the tracking models label_columns: Names of the data columns used for labeling Returns:
[ "Calculate", "a", "copula", "multivariate", "normal", "distribution", "from", "the", "training", "data", "for", "each", "group", "of", "ensemble", "members", ".", "Distributions", "are", "written", "to", "a", "pickle", "file", "for", "later", "use", ".", "Args", ":", "output_file", ":", "Pickle", "file", "model_names", ":", "Names", "of", "the", "tracking", "models", "label_columns", ":", "Names", "of", "the", "data", "columns", "used", "for", "labeling", "Returns", ":" ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L113-L142
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
TrackModeler.fit_condition_models
def fit_condition_models(self, model_names, model_objs, input_columns, output_column="Matched", output_threshold=0.0): """ Fit machine learning models to predict whether or not hail will occur. Args: model_names: List of strings with the names for the particular machine learning models model_objs: scikit-learn style machine learning model objects. input_columns: list of the names of the columns used as input for the machine learning model output_column: name of the column used for labeling whether or not the event occurs output_threshold: splitting threshold to determine if event has occurred. Default 0.0 """ print("Fitting condition models") groups = self.data["train"]["member"][self.group_col].unique() weights = None for group in groups: print(group) group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group] if self.sector: lon_obj = data.loc[:,'Centroid_Lon'] lat_obj = data.loc[:,'Centroid_Lat'] left_lon,right_lon = self.grid_dict["sw_lon"],self.grid_dict["ne_lon"] lower_lat,upper_lat = self.grid_dict["sw_lat"],self.grid_dict["ne_lat"] weights = np.where((left_lon<=lon_obj)&(right_lon>=lon_obj) &\ (lower_lat<=lat_obj)&(upper_lat>=lat_obj),1,0.3) output_data = np.where(group_data[output_column] > output_threshold, 1, 0) print("Ones: ", np.count_nonzero(output_data > 0), "Zeros: ", np.count_nonzero(output_data == 0)) self.condition_models[group] = {} for m, model_name in enumerate(model_names): print(model_name) self.condition_models[group][model_name] = deepcopy(model_objs[m]) try: self.condition_models[group][model_name].fit(group_data[input_columns], output_data,sample_weight=weights) except: self.condition_models[group][model_name].fit(group_data[input_columns], output_data) if hasattr(self.condition_models[group][model_name], "best_estimator_"): print(self.condition_models[group][model_name].best_estimator_) print(self.condition_models[group][model_name].best_score_)
python
def fit_condition_models(self, model_names, model_objs, input_columns, output_column="Matched", output_threshold=0.0): """ Fit machine learning models to predict whether or not hail will occur. Args: model_names: List of strings with the names for the particular machine learning models model_objs: scikit-learn style machine learning model objects. input_columns: list of the names of the columns used as input for the machine learning model output_column: name of the column used for labeling whether or not the event occurs output_threshold: splitting threshold to determine if event has occurred. Default 0.0 """ print("Fitting condition models") groups = self.data["train"]["member"][self.group_col].unique() weights = None for group in groups: print(group) group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group] if self.sector: lon_obj = data.loc[:,'Centroid_Lon'] lat_obj = data.loc[:,'Centroid_Lat'] left_lon,right_lon = self.grid_dict["sw_lon"],self.grid_dict["ne_lon"] lower_lat,upper_lat = self.grid_dict["sw_lat"],self.grid_dict["ne_lat"] weights = np.where((left_lon<=lon_obj)&(right_lon>=lon_obj) &\ (lower_lat<=lat_obj)&(upper_lat>=lat_obj),1,0.3) output_data = np.where(group_data[output_column] > output_threshold, 1, 0) print("Ones: ", np.count_nonzero(output_data > 0), "Zeros: ", np.count_nonzero(output_data == 0)) self.condition_models[group] = {} for m, model_name in enumerate(model_names): print(model_name) self.condition_models[group][model_name] = deepcopy(model_objs[m]) try: self.condition_models[group][model_name].fit(group_data[input_columns], output_data,sample_weight=weights) except: self.condition_models[group][model_name].fit(group_data[input_columns], output_data) if hasattr(self.condition_models[group][model_name], "best_estimator_"): print(self.condition_models[group][model_name].best_estimator_) print(self.condition_models[group][model_name].best_score_)
[ "def", "fit_condition_models", "(", "self", ",", "model_names", ",", "model_objs", ",", "input_columns", ",", "output_column", "=", "\"Matched\"", ",", "output_threshold", "=", "0.0", ")", ":", "print", "(", "\"Fitting condition models\"", ")", "groups", "=", "self", ".", "data", "[", "\"train\"", "]", "[", "\"member\"", "]", "[", "self", ".", "group_col", "]", ".", "unique", "(", ")", "weights", "=", "None", "for", "group", "in", "groups", ":", "print", "(", "group", ")", "group_data", "=", "self", ".", "data", "[", "\"train\"", "]", "[", "\"combo\"", "]", ".", "loc", "[", "self", ".", "data", "[", "\"train\"", "]", "[", "\"combo\"", "]", "[", "self", ".", "group_col", "]", "==", "group", "]", "if", "self", ".", "sector", ":", "lon_obj", "=", "data", ".", "loc", "[", ":", ",", "'Centroid_Lon'", "]", "lat_obj", "=", "data", ".", "loc", "[", ":", ",", "'Centroid_Lat'", "]", "left_lon", ",", "right_lon", "=", "self", ".", "grid_dict", "[", "\"sw_lon\"", "]", ",", "self", ".", "grid_dict", "[", "\"ne_lon\"", "]", "lower_lat", ",", "upper_lat", "=", "self", ".", "grid_dict", "[", "\"sw_lat\"", "]", ",", "self", ".", "grid_dict", "[", "\"ne_lat\"", "]", "weights", "=", "np", ".", "where", "(", "(", "left_lon", "<=", "lon_obj", ")", "&", "(", "right_lon", ">=", "lon_obj", ")", "&", "(", "lower_lat", "<=", "lat_obj", ")", "&", "(", "upper_lat", ">=", "lat_obj", ")", ",", "1", ",", "0.3", ")", "output_data", "=", "np", ".", "where", "(", "group_data", "[", "output_column", "]", ">", "output_threshold", ",", "1", ",", "0", ")", "print", "(", "\"Ones: \"", ",", "np", ".", "count_nonzero", "(", "output_data", ">", "0", ")", ",", "\"Zeros: \"", ",", "np", ".", "count_nonzero", "(", "output_data", "==", "0", ")", ")", "self", ".", "condition_models", "[", "group", "]", "=", "{", "}", "for", "m", ",", "model_name", "in", "enumerate", "(", "model_names", ")", ":", "print", "(", "model_name", ")", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", "=", "deepcopy", "(", "model_objs", "[", "m", "]", ")", "try", ":", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", "[", "input_columns", "]", ",", "output_data", ",", "sample_weight", "=", "weights", ")", "except", ":", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", "[", "input_columns", "]", ",", "output_data", ")", "if", "hasattr", "(", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ",", "\"best_estimator_\"", ")", ":", "print", "(", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "best_estimator_", ")", "print", "(", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "best_score_", ")" ]
Fit machine learning models to predict whether or not hail will occur. Args: model_names: List of strings with the names for the particular machine learning models model_objs: scikit-learn style machine learning model objects. input_columns: list of the names of the columns used as input for the machine learning model output_column: name of the column used for labeling whether or not the event occurs output_threshold: splitting threshold to determine if event has occurred. Default 0.0
[ "Fit", "machine", "learning", "models", "to", "predict", "whether", "or", "not", "hail", "will", "occur", ".", "Args", ":", "model_names", ":", "List", "of", "strings", "with", "the", "names", "for", "the", "particular", "machine", "learning", "models", "model_objs", ":", "scikit", "-", "learn", "style", "machine", "learning", "model", "objects", ".", "input_columns", ":", "list", "of", "the", "names", "of", "the", "columns", "used", "as", "input", "for", "the", "machine", "learning", "model", "output_column", ":", "name", "of", "the", "column", "used", "for", "labeling", "whether", "or", "not", "the", "event", "occurs", "output_threshold", ":", "splitting", "threshold", "to", "determine", "if", "event", "has", "occurred", ".", "Default", "0", ".", "0" ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L144-L191
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
TrackModeler.fit_condition_threshold_models
def fit_condition_threshold_models(self, model_names, model_objs, input_columns, output_column="Matched", output_threshold=0.5, num_folds=5, threshold_score="ets"): """ Fit models to predict hail/no-hail and use cross-validation to determine the probaility threshold that maximizes a skill score. Args: model_names: List of machine learning model names model_objs: List of Scikit-learn ML models input_columns: List of input variables in the training data output_column: Column used for prediction output_threshold: Values exceeding this threshold are considered positive events; below are nulls num_folds: Number of folds in the cross-validation procedure threshold_score: Score available in ContingencyTable used for determining the best probability threshold Returns: None """ print("Fitting condition models") groups = self.data["train"]["member"][self.group_col].unique() weights=None for group in groups: print(group) group_data = self.data["train"]["combo"].iloc[ np.where(self.data["train"]["combo"][self.group_col] == group)[0]] if self.sector: lon_obj = group_data.loc[:,'Centroid_Lon'] lat_obj = group_data.loc[:,'Centroid_Lat'] conus_lat_lon_points = zip(lon_obj.values.ravel(),lat_obj.values.ravel()) center_lon, center_lat = self.proj_dict["lon_0"],self.proj_dict["lat_0"] distances = np.array([np.sqrt((x-center_lon)**2+\ (y-center_lat)**2) for (x, y) in conus_lat_lon_points]) min_dist, max_minus_min = min(distances),max(distances)-min(distances) distance_0_1 = [1.0-((d - min_dist)/(max_minus_min)) for d in distances] weights = np.array(distance_0_1) output_data = np.where(group_data.loc[:, output_column] > output_threshold, 1, 0) ones = np.count_nonzero(output_data > 0) print("Ones: ", ones, "Zeros: ", np.count_nonzero(output_data == 0)) self.condition_models[group] = {} num_elements = group_data[input_columns].shape[0] for m, model_name in enumerate(model_names): print(model_name) roc = DistributedROC(thresholds=np.arange(0, 1.1, 0.01)) self.condition_models[group][model_name] = deepcopy(model_objs[m]) try: kf = KFold(n_splits=num_folds) for train_index, test_index in kf.split(group_data[input_columns].values): if np.count_nonzero(output_data[train_index]) > 0: try: self.condition_models[group][model_name].fit( group_data.iloc[train_index][input_columns], output_data[train_index],sample_weight=weights[train_index]) except: self.condition_models[group][model_name].fit( group_data.iloc[train_index][input_columns], output_data[train_index]) cv_preds = self.condition_models[group][model_name].predict_proba( group_data.iloc[test_index][input_columns])[:,1] roc.update(cv_preds, output_data[test_index]) else: continue except TypeError: kf = KFold(num_elements,n_folds=num_folds) for train_index, test_index in kf: if np.count_nonzero(output_data[train_index]) > 0: try: self.condition_models[group][model_name].fit( group_data.iloc[train_index][input_columns], output_data[train_index],sample_weight=weights[train_index]) except: self.condition_models[group][model_name].fit( group_data.iloc[train_index][input_columns], output_data[train_index]) cv_preds = self.condition_models[group][model_name].predict_proba( group_data.iloc[test_index][input_columns])[:, 1] roc.update(cv_preds, output_data[test_index]) else: continue self.condition_models[group][ model_name + "_condition_threshold"], _ = roc.max_threshold_score(threshold_score) print(model_name + " condition threshold: {0:0.3f}".format( self.condition_models[group][model_name + "_condition_threshold"])) self.condition_models[group][model_name].fit(group_data[input_columns], output_data)
python
def fit_condition_threshold_models(self, model_names, model_objs, input_columns, output_column="Matched", output_threshold=0.5, num_folds=5, threshold_score="ets"): """ Fit models to predict hail/no-hail and use cross-validation to determine the probaility threshold that maximizes a skill score. Args: model_names: List of machine learning model names model_objs: List of Scikit-learn ML models input_columns: List of input variables in the training data output_column: Column used for prediction output_threshold: Values exceeding this threshold are considered positive events; below are nulls num_folds: Number of folds in the cross-validation procedure threshold_score: Score available in ContingencyTable used for determining the best probability threshold Returns: None """ print("Fitting condition models") groups = self.data["train"]["member"][self.group_col].unique() weights=None for group in groups: print(group) group_data = self.data["train"]["combo"].iloc[ np.where(self.data["train"]["combo"][self.group_col] == group)[0]] if self.sector: lon_obj = group_data.loc[:,'Centroid_Lon'] lat_obj = group_data.loc[:,'Centroid_Lat'] conus_lat_lon_points = zip(lon_obj.values.ravel(),lat_obj.values.ravel()) center_lon, center_lat = self.proj_dict["lon_0"],self.proj_dict["lat_0"] distances = np.array([np.sqrt((x-center_lon)**2+\ (y-center_lat)**2) for (x, y) in conus_lat_lon_points]) min_dist, max_minus_min = min(distances),max(distances)-min(distances) distance_0_1 = [1.0-((d - min_dist)/(max_minus_min)) for d in distances] weights = np.array(distance_0_1) output_data = np.where(group_data.loc[:, output_column] > output_threshold, 1, 0) ones = np.count_nonzero(output_data > 0) print("Ones: ", ones, "Zeros: ", np.count_nonzero(output_data == 0)) self.condition_models[group] = {} num_elements = group_data[input_columns].shape[0] for m, model_name in enumerate(model_names): print(model_name) roc = DistributedROC(thresholds=np.arange(0, 1.1, 0.01)) self.condition_models[group][model_name] = deepcopy(model_objs[m]) try: kf = KFold(n_splits=num_folds) for train_index, test_index in kf.split(group_data[input_columns].values): if np.count_nonzero(output_data[train_index]) > 0: try: self.condition_models[group][model_name].fit( group_data.iloc[train_index][input_columns], output_data[train_index],sample_weight=weights[train_index]) except: self.condition_models[group][model_name].fit( group_data.iloc[train_index][input_columns], output_data[train_index]) cv_preds = self.condition_models[group][model_name].predict_proba( group_data.iloc[test_index][input_columns])[:,1] roc.update(cv_preds, output_data[test_index]) else: continue except TypeError: kf = KFold(num_elements,n_folds=num_folds) for train_index, test_index in kf: if np.count_nonzero(output_data[train_index]) > 0: try: self.condition_models[group][model_name].fit( group_data.iloc[train_index][input_columns], output_data[train_index],sample_weight=weights[train_index]) except: self.condition_models[group][model_name].fit( group_data.iloc[train_index][input_columns], output_data[train_index]) cv_preds = self.condition_models[group][model_name].predict_proba( group_data.iloc[test_index][input_columns])[:, 1] roc.update(cv_preds, output_data[test_index]) else: continue self.condition_models[group][ model_name + "_condition_threshold"], _ = roc.max_threshold_score(threshold_score) print(model_name + " condition threshold: {0:0.3f}".format( self.condition_models[group][model_name + "_condition_threshold"])) self.condition_models[group][model_name].fit(group_data[input_columns], output_data)
[ "def", "fit_condition_threshold_models", "(", "self", ",", "model_names", ",", "model_objs", ",", "input_columns", ",", "output_column", "=", "\"Matched\"", ",", "output_threshold", "=", "0.5", ",", "num_folds", "=", "5", ",", "threshold_score", "=", "\"ets\"", ")", ":", "print", "(", "\"Fitting condition models\"", ")", "groups", "=", "self", ".", "data", "[", "\"train\"", "]", "[", "\"member\"", "]", "[", "self", ".", "group_col", "]", ".", "unique", "(", ")", "weights", "=", "None", "for", "group", "in", "groups", ":", "print", "(", "group", ")", "group_data", "=", "self", ".", "data", "[", "\"train\"", "]", "[", "\"combo\"", "]", ".", "iloc", "[", "np", ".", "where", "(", "self", ".", "data", "[", "\"train\"", "]", "[", "\"combo\"", "]", "[", "self", ".", "group_col", "]", "==", "group", ")", "[", "0", "]", "]", "if", "self", ".", "sector", ":", "lon_obj", "=", "group_data", ".", "loc", "[", ":", ",", "'Centroid_Lon'", "]", "lat_obj", "=", "group_data", ".", "loc", "[", ":", ",", "'Centroid_Lat'", "]", "conus_lat_lon_points", "=", "zip", "(", "lon_obj", ".", "values", ".", "ravel", "(", ")", ",", "lat_obj", ".", "values", ".", "ravel", "(", ")", ")", "center_lon", ",", "center_lat", "=", "self", ".", "proj_dict", "[", "\"lon_0\"", "]", ",", "self", ".", "proj_dict", "[", "\"lat_0\"", "]", "distances", "=", "np", ".", "array", "(", "[", "np", ".", "sqrt", "(", "(", "x", "-", "center_lon", ")", "**", "2", "+", "(", "y", "-", "center_lat", ")", "**", "2", ")", "for", "(", "x", ",", "y", ")", "in", "conus_lat_lon_points", "]", ")", "min_dist", ",", "max_minus_min", "=", "min", "(", "distances", ")", ",", "max", "(", "distances", ")", "-", "min", "(", "distances", ")", "distance_0_1", "=", "[", "1.0", "-", "(", "(", "d", "-", "min_dist", ")", "/", "(", "max_minus_min", ")", ")", "for", "d", "in", "distances", "]", "weights", "=", "np", ".", "array", "(", "distance_0_1", ")", "output_data", "=", "np", ".", "where", "(", "group_data", ".", "loc", "[", ":", ",", "output_column", "]", ">", "output_threshold", ",", "1", ",", "0", ")", "ones", "=", "np", ".", "count_nonzero", "(", "output_data", ">", "0", ")", "print", "(", "\"Ones: \"", ",", "ones", ",", "\"Zeros: \"", ",", "np", ".", "count_nonzero", "(", "output_data", "==", "0", ")", ")", "self", ".", "condition_models", "[", "group", "]", "=", "{", "}", "num_elements", "=", "group_data", "[", "input_columns", "]", ".", "shape", "[", "0", "]", "for", "m", ",", "model_name", "in", "enumerate", "(", "model_names", ")", ":", "print", "(", "model_name", ")", "roc", "=", "DistributedROC", "(", "thresholds", "=", "np", ".", "arange", "(", "0", ",", "1.1", ",", "0.01", ")", ")", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", "=", "deepcopy", "(", "model_objs", "[", "m", "]", ")", "try", ":", "kf", "=", "KFold", "(", "n_splits", "=", "num_folds", ")", "for", "train_index", ",", "test_index", "in", "kf", ".", "split", "(", "group_data", "[", "input_columns", "]", ".", "values", ")", ":", "if", "np", ".", "count_nonzero", "(", "output_data", "[", "train_index", "]", ")", ">", "0", ":", "try", ":", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", ".", "iloc", "[", "train_index", "]", "[", "input_columns", "]", ",", "output_data", "[", "train_index", "]", ",", "sample_weight", "=", "weights", "[", "train_index", "]", ")", "except", ":", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", ".", "iloc", "[", "train_index", "]", "[", "input_columns", "]", ",", "output_data", "[", "train_index", "]", ")", "cv_preds", "=", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "predict_proba", "(", "group_data", ".", "iloc", "[", "test_index", "]", "[", "input_columns", "]", ")", "[", ":", ",", "1", "]", "roc", ".", "update", "(", "cv_preds", ",", "output_data", "[", "test_index", "]", ")", "else", ":", "continue", "except", "TypeError", ":", "kf", "=", "KFold", "(", "num_elements", ",", "n_folds", "=", "num_folds", ")", "for", "train_index", ",", "test_index", "in", "kf", ":", "if", "np", ".", "count_nonzero", "(", "output_data", "[", "train_index", "]", ")", ">", "0", ":", "try", ":", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", ".", "iloc", "[", "train_index", "]", "[", "input_columns", "]", ",", "output_data", "[", "train_index", "]", ",", "sample_weight", "=", "weights", "[", "train_index", "]", ")", "except", ":", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", ".", "iloc", "[", "train_index", "]", "[", "input_columns", "]", ",", "output_data", "[", "train_index", "]", ")", "cv_preds", "=", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "predict_proba", "(", "group_data", ".", "iloc", "[", "test_index", "]", "[", "input_columns", "]", ")", "[", ":", ",", "1", "]", "roc", ".", "update", "(", "cv_preds", ",", "output_data", "[", "test_index", "]", ")", "else", ":", "continue", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "+", "\"_condition_threshold\"", "]", ",", "_", "=", "roc", ".", "max_threshold_score", "(", "threshold_score", ")", "print", "(", "model_name", "+", "\" condition threshold: {0:0.3f}\"", ".", "format", "(", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "+", "\"_condition_threshold\"", "]", ")", ")", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", "[", "input_columns", "]", ",", "output_data", ")" ]
Fit models to predict hail/no-hail and use cross-validation to determine the probaility threshold that maximizes a skill score. Args: model_names: List of machine learning model names model_objs: List of Scikit-learn ML models input_columns: List of input variables in the training data output_column: Column used for prediction output_threshold: Values exceeding this threshold are considered positive events; below are nulls num_folds: Number of folds in the cross-validation procedure threshold_score: Score available in ContingencyTable used for determining the best probability threshold Returns: None
[ "Fit", "models", "to", "predict", "hail", "/", "no", "-", "hail", "and", "use", "cross", "-", "validation", "to", "determine", "the", "probaility", "threshold", "that", "maximizes", "a", "skill", "score", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L193-L292
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
TrackModeler.predict_condition_models
def predict_condition_models(self, model_names, input_columns, metadata_cols, data_mode="forecast", ): """ Apply condition modelsto forecast data. Args: model_names: List of names associated with each condition model used for prediction input_columns: List of columns in data used as input into the model metadata_cols: Columns from input data that should be included in the data frame with the predictions. data_mode: Which data subset to pull from for the predictions, "forecast" by default Returns: A dictionary of data frames containing probabilities of the event and specified metadata """ groups = self.condition_models.keys() predictions = pd.DataFrame(self.data[data_mode]["combo"][metadata_cols]) for group in groups: print(group) print(self.condition_models[group]) g_idxs = self.data[data_mode]["combo"][self.group_col] == group group_count = np.count_nonzero(g_idxs) if group_count > 0: for m, model_name in enumerate(model_names): mn = model_name.replace(" ", "-") predictions.loc[g_idxs, mn + "_conditionprob"] = self.condition_models[group][ model_name].predict_proba( self.data[data_mode]["combo"].loc[g_idxs, input_columns])[:, 1] predictions.loc[g_idxs, mn + "_conditionthresh"] = np.where(predictions.loc[g_idxs, mn + "_conditionprob"] >= self.condition_models[group][ model_name + "_condition_threshold"], 1, 0) return predictions
python
def predict_condition_models(self, model_names, input_columns, metadata_cols, data_mode="forecast", ): """ Apply condition modelsto forecast data. Args: model_names: List of names associated with each condition model used for prediction input_columns: List of columns in data used as input into the model metadata_cols: Columns from input data that should be included in the data frame with the predictions. data_mode: Which data subset to pull from for the predictions, "forecast" by default Returns: A dictionary of data frames containing probabilities of the event and specified metadata """ groups = self.condition_models.keys() predictions = pd.DataFrame(self.data[data_mode]["combo"][metadata_cols]) for group in groups: print(group) print(self.condition_models[group]) g_idxs = self.data[data_mode]["combo"][self.group_col] == group group_count = np.count_nonzero(g_idxs) if group_count > 0: for m, model_name in enumerate(model_names): mn = model_name.replace(" ", "-") predictions.loc[g_idxs, mn + "_conditionprob"] = self.condition_models[group][ model_name].predict_proba( self.data[data_mode]["combo"].loc[g_idxs, input_columns])[:, 1] predictions.loc[g_idxs, mn + "_conditionthresh"] = np.where(predictions.loc[g_idxs, mn + "_conditionprob"] >= self.condition_models[group][ model_name + "_condition_threshold"], 1, 0) return predictions
[ "def", "predict_condition_models", "(", "self", ",", "model_names", ",", "input_columns", ",", "metadata_cols", ",", "data_mode", "=", "\"forecast\"", ",", ")", ":", "groups", "=", "self", ".", "condition_models", ".", "keys", "(", ")", "predictions", "=", "pd", ".", "DataFrame", "(", "self", ".", "data", "[", "data_mode", "]", "[", "\"combo\"", "]", "[", "metadata_cols", "]", ")", "for", "group", "in", "groups", ":", "print", "(", "group", ")", "print", "(", "self", ".", "condition_models", "[", "group", "]", ")", "g_idxs", "=", "self", ".", "data", "[", "data_mode", "]", "[", "\"combo\"", "]", "[", "self", ".", "group_col", "]", "==", "group", "group_count", "=", "np", ".", "count_nonzero", "(", "g_idxs", ")", "if", "group_count", ">", "0", ":", "for", "m", ",", "model_name", "in", "enumerate", "(", "model_names", ")", ":", "mn", "=", "model_name", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", "predictions", ".", "loc", "[", "g_idxs", ",", "mn", "+", "\"_conditionprob\"", "]", "=", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "predict_proba", "(", "self", ".", "data", "[", "data_mode", "]", "[", "\"combo\"", "]", ".", "loc", "[", "g_idxs", ",", "input_columns", "]", ")", "[", ":", ",", "1", "]", "predictions", ".", "loc", "[", "g_idxs", ",", "mn", "+", "\"_conditionthresh\"", "]", "=", "np", ".", "where", "(", "predictions", ".", "loc", "[", "g_idxs", ",", "mn", "+", "\"_conditionprob\"", "]", ">=", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "+", "\"_condition_threshold\"", "]", ",", "1", ",", "0", ")", "return", "predictions" ]
Apply condition modelsto forecast data. Args: model_names: List of names associated with each condition model used for prediction input_columns: List of columns in data used as input into the model metadata_cols: Columns from input data that should be included in the data frame with the predictions. data_mode: Which data subset to pull from for the predictions, "forecast" by default Returns: A dictionary of data frames containing probabilities of the event and specified metadata
[ "Apply", "condition", "modelsto", "forecast", "data", ".", "Args", ":", "model_names", ":", "List", "of", "names", "associated", "with", "each", "condition", "model", "used", "for", "prediction", "input_columns", ":", "List", "of", "columns", "in", "data", "used", "as", "input", "into", "the", "model", "metadata_cols", ":", "Columns", "from", "input", "data", "that", "should", "be", "included", "in", "the", "data", "frame", "with", "the", "predictions", ".", "data_mode", ":", "Which", "data", "subset", "to", "pull", "from", "for", "the", "predictions", "forecast", "by", "default", "Returns", ":", "A", "dictionary", "of", "data", "frames", "containing", "probabilities", "of", "the", "event", "and", "specified", "metadata" ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L294-L327
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
TrackModeler.fit_size_distribution_models
def fit_size_distribution_models(self, model_names, model_objs, input_columns, output_columns=None, calibrate=False): """ Fits multitask machine learning models to predict the parameters of a size distribution Args: model_names: List of machine learning model names model_objs: scikit-learn style machine learning model objects input_columns: Training data columns used as input for ML model output_columns: Training data columns used for prediction calibrate: Whether or not to fit a log-linear regression to predictions from ML model """ if output_columns is None: output_columns = ["Shape", "Location", "Scale"] groups = np.unique(self.data["train"]["member"][self.group_col]) weights=None for group in groups: group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group] group_data = group_data.dropna() group_data = group_data[group_data[output_columns[-1]] > 0] if self.sector: lon_obj = group_data.loc[:,'Centroid_Lon'] lat_obj = group_data.loc[:,'Centroid_Lat'] conus_lat_lon_points = zip(lon_obj.values.ravel(),lat_obj.values.ravel()) center_lon, center_lat = self.proj_dict["lon_0"],self.proj_dict["lat_0"] distances = np.array([np.sqrt((x-center_lon)**2+\ (y-center_lat)**2) for (x, y) in conus_lat_lon_points]) min_dist, max_minus_min = min(distances),max(distances)-min(distances) distance_0_1 = [1.0-((d - min_dist)/(max_minus_min)) for d in distances] weights = np.array(distance_0_1) self.size_distribution_models[group] = {"multi": {}, "lognorm": {}} if calibrate: self.size_distribution_models[group]["calshape"] = {} self.size_distribution_models[group]["calscale"] = {} log_labels = np.log(group_data[output_columns].values) log_means = log_labels.mean(axis=0) log_sds = log_labels.std(axis=0) self.size_distribution_models[group]['lognorm']['mean'] = log_means self.size_distribution_models[group]['lognorm']['sd'] = log_sds for m, model_name in enumerate(model_names): print(group, model_name) self.size_distribution_models[group]["multi"][model_name] = deepcopy(model_objs[m]) try: self.size_distribution_models[group]["multi"][model_name].fit(group_data[input_columns], (log_labels - log_means) / log_sds, sample_weight=weights) except: self.size_distribution_models[group]["multi"][model_name].fit(group_data[input_columns], (log_labels - log_means) / log_sds) if calibrate: training_predictions = self.size_distribution_models[ group]["multi"][model_name].predict(group_data[input_columns]) self.size_distribution_models[group]["calshape"][model_name] = LinearRegression() self.size_distribution_models[group]["calshape"][model_name].fit(training_predictions[:, 0:1], (log_labels[:, 0] - log_means[0]) / log_sds[ 0], sample_weight=weights) self.size_distribution_models[group]["calscale"][model_name] = LinearRegression() self.size_distribution_models[group]["calscale"][model_name].fit(training_predictions[:, 1:], (log_labels[:, 1] - log_means[1]) / log_sds[ 1], sample_weight=weights)
python
def fit_size_distribution_models(self, model_names, model_objs, input_columns, output_columns=None, calibrate=False): """ Fits multitask machine learning models to predict the parameters of a size distribution Args: model_names: List of machine learning model names model_objs: scikit-learn style machine learning model objects input_columns: Training data columns used as input for ML model output_columns: Training data columns used for prediction calibrate: Whether or not to fit a log-linear regression to predictions from ML model """ if output_columns is None: output_columns = ["Shape", "Location", "Scale"] groups = np.unique(self.data["train"]["member"][self.group_col]) weights=None for group in groups: group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group] group_data = group_data.dropna() group_data = group_data[group_data[output_columns[-1]] > 0] if self.sector: lon_obj = group_data.loc[:,'Centroid_Lon'] lat_obj = group_data.loc[:,'Centroid_Lat'] conus_lat_lon_points = zip(lon_obj.values.ravel(),lat_obj.values.ravel()) center_lon, center_lat = self.proj_dict["lon_0"],self.proj_dict["lat_0"] distances = np.array([np.sqrt((x-center_lon)**2+\ (y-center_lat)**2) for (x, y) in conus_lat_lon_points]) min_dist, max_minus_min = min(distances),max(distances)-min(distances) distance_0_1 = [1.0-((d - min_dist)/(max_minus_min)) for d in distances] weights = np.array(distance_0_1) self.size_distribution_models[group] = {"multi": {}, "lognorm": {}} if calibrate: self.size_distribution_models[group]["calshape"] = {} self.size_distribution_models[group]["calscale"] = {} log_labels = np.log(group_data[output_columns].values) log_means = log_labels.mean(axis=0) log_sds = log_labels.std(axis=0) self.size_distribution_models[group]['lognorm']['mean'] = log_means self.size_distribution_models[group]['lognorm']['sd'] = log_sds for m, model_name in enumerate(model_names): print(group, model_name) self.size_distribution_models[group]["multi"][model_name] = deepcopy(model_objs[m]) try: self.size_distribution_models[group]["multi"][model_name].fit(group_data[input_columns], (log_labels - log_means) / log_sds, sample_weight=weights) except: self.size_distribution_models[group]["multi"][model_name].fit(group_data[input_columns], (log_labels - log_means) / log_sds) if calibrate: training_predictions = self.size_distribution_models[ group]["multi"][model_name].predict(group_data[input_columns]) self.size_distribution_models[group]["calshape"][model_name] = LinearRegression() self.size_distribution_models[group]["calshape"][model_name].fit(training_predictions[:, 0:1], (log_labels[:, 0] - log_means[0]) / log_sds[ 0], sample_weight=weights) self.size_distribution_models[group]["calscale"][model_name] = LinearRegression() self.size_distribution_models[group]["calscale"][model_name].fit(training_predictions[:, 1:], (log_labels[:, 1] - log_means[1]) / log_sds[ 1], sample_weight=weights)
[ "def", "fit_size_distribution_models", "(", "self", ",", "model_names", ",", "model_objs", ",", "input_columns", ",", "output_columns", "=", "None", ",", "calibrate", "=", "False", ")", ":", "if", "output_columns", "is", "None", ":", "output_columns", "=", "[", "\"Shape\"", ",", "\"Location\"", ",", "\"Scale\"", "]", "groups", "=", "np", ".", "unique", "(", "self", ".", "data", "[", "\"train\"", "]", "[", "\"member\"", "]", "[", "self", ".", "group_col", "]", ")", "weights", "=", "None", "for", "group", "in", "groups", ":", "group_data", "=", "self", ".", "data", "[", "\"train\"", "]", "[", "\"combo\"", "]", ".", "loc", "[", "self", ".", "data", "[", "\"train\"", "]", "[", "\"combo\"", "]", "[", "self", ".", "group_col", "]", "==", "group", "]", "group_data", "=", "group_data", ".", "dropna", "(", ")", "group_data", "=", "group_data", "[", "group_data", "[", "output_columns", "[", "-", "1", "]", "]", ">", "0", "]", "if", "self", ".", "sector", ":", "lon_obj", "=", "group_data", ".", "loc", "[", ":", ",", "'Centroid_Lon'", "]", "lat_obj", "=", "group_data", ".", "loc", "[", ":", ",", "'Centroid_Lat'", "]", "conus_lat_lon_points", "=", "zip", "(", "lon_obj", ".", "values", ".", "ravel", "(", ")", ",", "lat_obj", ".", "values", ".", "ravel", "(", ")", ")", "center_lon", ",", "center_lat", "=", "self", ".", "proj_dict", "[", "\"lon_0\"", "]", ",", "self", ".", "proj_dict", "[", "\"lat_0\"", "]", "distances", "=", "np", ".", "array", "(", "[", "np", ".", "sqrt", "(", "(", "x", "-", "center_lon", ")", "**", "2", "+", "(", "y", "-", "center_lat", ")", "**", "2", ")", "for", "(", "x", ",", "y", ")", "in", "conus_lat_lon_points", "]", ")", "min_dist", ",", "max_minus_min", "=", "min", "(", "distances", ")", ",", "max", "(", "distances", ")", "-", "min", "(", "distances", ")", "distance_0_1", "=", "[", "1.0", "-", "(", "(", "d", "-", "min_dist", ")", "/", "(", "max_minus_min", ")", ")", "for", "d", "in", "distances", "]", "weights", "=", "np", ".", "array", "(", "distance_0_1", ")", "self", ".", "size_distribution_models", "[", "group", "]", "=", "{", "\"multi\"", ":", "{", "}", ",", "\"lognorm\"", ":", "{", "}", "}", "if", "calibrate", ":", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"calshape\"", "]", "=", "{", "}", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"calscale\"", "]", "=", "{", "}", "log_labels", "=", "np", ".", "log", "(", "group_data", "[", "output_columns", "]", ".", "values", ")", "log_means", "=", "log_labels", ".", "mean", "(", "axis", "=", "0", ")", "log_sds", "=", "log_labels", ".", "std", "(", "axis", "=", "0", ")", "self", ".", "size_distribution_models", "[", "group", "]", "[", "'lognorm'", "]", "[", "'mean'", "]", "=", "log_means", "self", ".", "size_distribution_models", "[", "group", "]", "[", "'lognorm'", "]", "[", "'sd'", "]", "=", "log_sds", "for", "m", ",", "model_name", "in", "enumerate", "(", "model_names", ")", ":", "print", "(", "group", ",", "model_name", ")", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"multi\"", "]", "[", "model_name", "]", "=", "deepcopy", "(", "model_objs", "[", "m", "]", ")", "try", ":", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"multi\"", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", "[", "input_columns", "]", ",", "(", "log_labels", "-", "log_means", ")", "/", "log_sds", ",", "sample_weight", "=", "weights", ")", "except", ":", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"multi\"", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", "[", "input_columns", "]", ",", "(", "log_labels", "-", "log_means", ")", "/", "log_sds", ")", "if", "calibrate", ":", "training_predictions", "=", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"multi\"", "]", "[", "model_name", "]", ".", "predict", "(", "group_data", "[", "input_columns", "]", ")", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"calshape\"", "]", "[", "model_name", "]", "=", "LinearRegression", "(", ")", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"calshape\"", "]", "[", "model_name", "]", ".", "fit", "(", "training_predictions", "[", ":", ",", "0", ":", "1", "]", ",", "(", "log_labels", "[", ":", ",", "0", "]", "-", "log_means", "[", "0", "]", ")", "/", "log_sds", "[", "0", "]", ",", "sample_weight", "=", "weights", ")", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"calscale\"", "]", "[", "model_name", "]", "=", "LinearRegression", "(", ")", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"calscale\"", "]", "[", "model_name", "]", ".", "fit", "(", "training_predictions", "[", ":", ",", "1", ":", "]", ",", "(", "log_labels", "[", ":", ",", "1", "]", "-", "log_means", "[", "1", "]", ")", "/", "log_sds", "[", "1", "]", ",", "sample_weight", "=", "weights", ")" ]
Fits multitask machine learning models to predict the parameters of a size distribution Args: model_names: List of machine learning model names model_objs: scikit-learn style machine learning model objects input_columns: Training data columns used as input for ML model output_columns: Training data columns used for prediction calibrate: Whether or not to fit a log-linear regression to predictions from ML model
[ "Fits", "multitask", "machine", "learning", "models", "to", "predict", "the", "parameters", "of", "a", "size", "distribution", "Args", ":", "model_names", ":", "List", "of", "machine", "learning", "model", "names", "model_objs", ":", "scikit", "-", "learn", "style", "machine", "learning", "model", "objects", "input_columns", ":", "Training", "data", "columns", "used", "as", "input", "for", "ML", "model", "output_columns", ":", "Training", "data", "columns", "used", "for", "prediction", "calibrate", ":", "Whether", "or", "not", "to", "fit", "a", "log", "-", "linear", "regression", "to", "predictions", "from", "ML", "model" ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L329-L399
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
TrackModeler.fit_size_distribution_component_models
def fit_size_distribution_component_models(self, model_names, model_objs, input_columns, output_columns): """ This calculates 2 principal components for the hail size distribution between the shape and scale parameters. Separate machine learning models are fit to predict each component. Args: model_names: List of machine learning model names model_objs: List of machine learning model objects. input_columns: List of input variables output_columns: Output columns, should contain Shape and Scale. Returns: """ groups = np.unique(self.data["train"]["member"][self.group_col]) weights=None for group in groups: print(group) group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group] group_data = group_data.dropna() group_data = group_data.loc[group_data[output_columns[-1]] > 0] if self.sector: lon_obj = group_data.loc[:,'Centroid_Lon'] lat_obj = group_data.loc[:,'Centroid_Lat'] conus_lat_lon_points = zip(lon_obj.values.ravel(),lat_obj.values.ravel()) center_lon, center_lat = self.proj_dict["lon_0"],self.proj_dict["lat_0"] distances = np.array([np.sqrt((x-center_lon)**2+\ (y-center_lat)**2) for (x, y) in conus_lat_lon_points]) min_dist, max_minus_min = min(distances),max(distances)-min(distances) distance_0_1 = [1.0-((d - min_dist)/(max_minus_min)) for d in distances] weights = np.array(distance_0_1) self.size_distribution_models[group] = {"lognorm": {}} self.size_distribution_models[group]["lognorm"]["pca"] = PCA(n_components=len(output_columns)) log_labels = np.log(group_data[output_columns].values) log_labels[:, np.where(output_columns == "Shape")[0]] *= -1 log_means = log_labels.mean(axis=0) log_sds = log_labels.std(axis=0) log_norm_labels = (log_labels - log_means) / log_sds out_pc_labels = self.size_distribution_models[group]["lognorm"]["pca"].fit_transform(log_norm_labels) self.size_distribution_models[group]['lognorm']['mean'] = log_means self.size_distribution_models[group]['lognorm']['sd'] = log_sds for comp in range(len(output_columns)): self.size_distribution_models[group]["pc_{0:d}".format(comp)] = dict() for m, model_name in enumerate(model_names): print(model_name, comp) self.size_distribution_models[group][ "pc_{0:d}".format(comp)][model_name] = deepcopy(model_objs[m]) try: self.size_distribution_models[group][ "pc_{0:d}".format(comp)][model_name].fit(group_data[input_columns], out_pc_labels[:, comp], sample_weight=weights) except: self.size_distribution_models[group][ "pc_{0:d}".format(comp)][model_name].fit(group_data[input_columns], out_pc_labels[:, comp]) return
python
def fit_size_distribution_component_models(self, model_names, model_objs, input_columns, output_columns): """ This calculates 2 principal components for the hail size distribution between the shape and scale parameters. Separate machine learning models are fit to predict each component. Args: model_names: List of machine learning model names model_objs: List of machine learning model objects. input_columns: List of input variables output_columns: Output columns, should contain Shape and Scale. Returns: """ groups = np.unique(self.data["train"]["member"][self.group_col]) weights=None for group in groups: print(group) group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group] group_data = group_data.dropna() group_data = group_data.loc[group_data[output_columns[-1]] > 0] if self.sector: lon_obj = group_data.loc[:,'Centroid_Lon'] lat_obj = group_data.loc[:,'Centroid_Lat'] conus_lat_lon_points = zip(lon_obj.values.ravel(),lat_obj.values.ravel()) center_lon, center_lat = self.proj_dict["lon_0"],self.proj_dict["lat_0"] distances = np.array([np.sqrt((x-center_lon)**2+\ (y-center_lat)**2) for (x, y) in conus_lat_lon_points]) min_dist, max_minus_min = min(distances),max(distances)-min(distances) distance_0_1 = [1.0-((d - min_dist)/(max_minus_min)) for d in distances] weights = np.array(distance_0_1) self.size_distribution_models[group] = {"lognorm": {}} self.size_distribution_models[group]["lognorm"]["pca"] = PCA(n_components=len(output_columns)) log_labels = np.log(group_data[output_columns].values) log_labels[:, np.where(output_columns == "Shape")[0]] *= -1 log_means = log_labels.mean(axis=0) log_sds = log_labels.std(axis=0) log_norm_labels = (log_labels - log_means) / log_sds out_pc_labels = self.size_distribution_models[group]["lognorm"]["pca"].fit_transform(log_norm_labels) self.size_distribution_models[group]['lognorm']['mean'] = log_means self.size_distribution_models[group]['lognorm']['sd'] = log_sds for comp in range(len(output_columns)): self.size_distribution_models[group]["pc_{0:d}".format(comp)] = dict() for m, model_name in enumerate(model_names): print(model_name, comp) self.size_distribution_models[group][ "pc_{0:d}".format(comp)][model_name] = deepcopy(model_objs[m]) try: self.size_distribution_models[group][ "pc_{0:d}".format(comp)][model_name].fit(group_data[input_columns], out_pc_labels[:, comp], sample_weight=weights) except: self.size_distribution_models[group][ "pc_{0:d}".format(comp)][model_name].fit(group_data[input_columns], out_pc_labels[:, comp]) return
[ "def", "fit_size_distribution_component_models", "(", "self", ",", "model_names", ",", "model_objs", ",", "input_columns", ",", "output_columns", ")", ":", "groups", "=", "np", ".", "unique", "(", "self", ".", "data", "[", "\"train\"", "]", "[", "\"member\"", "]", "[", "self", ".", "group_col", "]", ")", "weights", "=", "None", "for", "group", "in", "groups", ":", "print", "(", "group", ")", "group_data", "=", "self", ".", "data", "[", "\"train\"", "]", "[", "\"combo\"", "]", ".", "loc", "[", "self", ".", "data", "[", "\"train\"", "]", "[", "\"combo\"", "]", "[", "self", ".", "group_col", "]", "==", "group", "]", "group_data", "=", "group_data", ".", "dropna", "(", ")", "group_data", "=", "group_data", ".", "loc", "[", "group_data", "[", "output_columns", "[", "-", "1", "]", "]", ">", "0", "]", "if", "self", ".", "sector", ":", "lon_obj", "=", "group_data", ".", "loc", "[", ":", ",", "'Centroid_Lon'", "]", "lat_obj", "=", "group_data", ".", "loc", "[", ":", ",", "'Centroid_Lat'", "]", "conus_lat_lon_points", "=", "zip", "(", "lon_obj", ".", "values", ".", "ravel", "(", ")", ",", "lat_obj", ".", "values", ".", "ravel", "(", ")", ")", "center_lon", ",", "center_lat", "=", "self", ".", "proj_dict", "[", "\"lon_0\"", "]", ",", "self", ".", "proj_dict", "[", "\"lat_0\"", "]", "distances", "=", "np", ".", "array", "(", "[", "np", ".", "sqrt", "(", "(", "x", "-", "center_lon", ")", "**", "2", "+", "(", "y", "-", "center_lat", ")", "**", "2", ")", "for", "(", "x", ",", "y", ")", "in", "conus_lat_lon_points", "]", ")", "min_dist", ",", "max_minus_min", "=", "min", "(", "distances", ")", ",", "max", "(", "distances", ")", "-", "min", "(", "distances", ")", "distance_0_1", "=", "[", "1.0", "-", "(", "(", "d", "-", "min_dist", ")", "/", "(", "max_minus_min", ")", ")", "for", "d", "in", "distances", "]", "weights", "=", "np", ".", "array", "(", "distance_0_1", ")", "self", ".", "size_distribution_models", "[", "group", "]", "=", "{", "\"lognorm\"", ":", "{", "}", "}", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"lognorm\"", "]", "[", "\"pca\"", "]", "=", "PCA", "(", "n_components", "=", "len", "(", "output_columns", ")", ")", "log_labels", "=", "np", ".", "log", "(", "group_data", "[", "output_columns", "]", ".", "values", ")", "log_labels", "[", ":", ",", "np", ".", "where", "(", "output_columns", "==", "\"Shape\"", ")", "[", "0", "]", "]", "*=", "-", "1", "log_means", "=", "log_labels", ".", "mean", "(", "axis", "=", "0", ")", "log_sds", "=", "log_labels", ".", "std", "(", "axis", "=", "0", ")", "log_norm_labels", "=", "(", "log_labels", "-", "log_means", ")", "/", "log_sds", "out_pc_labels", "=", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"lognorm\"", "]", "[", "\"pca\"", "]", ".", "fit_transform", "(", "log_norm_labels", ")", "self", ".", "size_distribution_models", "[", "group", "]", "[", "'lognorm'", "]", "[", "'mean'", "]", "=", "log_means", "self", ".", "size_distribution_models", "[", "group", "]", "[", "'lognorm'", "]", "[", "'sd'", "]", "=", "log_sds", "for", "comp", "in", "range", "(", "len", "(", "output_columns", ")", ")", ":", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"pc_{0:d}\"", ".", "format", "(", "comp", ")", "]", "=", "dict", "(", ")", "for", "m", ",", "model_name", "in", "enumerate", "(", "model_names", ")", ":", "print", "(", "model_name", ",", "comp", ")", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"pc_{0:d}\"", ".", "format", "(", "comp", ")", "]", "[", "model_name", "]", "=", "deepcopy", "(", "model_objs", "[", "m", "]", ")", "try", ":", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"pc_{0:d}\"", ".", "format", "(", "comp", ")", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", "[", "input_columns", "]", ",", "out_pc_labels", "[", ":", ",", "comp", "]", ",", "sample_weight", "=", "weights", ")", "except", ":", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"pc_{0:d}\"", ".", "format", "(", "comp", ")", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", "[", "input_columns", "]", ",", "out_pc_labels", "[", ":", ",", "comp", "]", ")", "return" ]
This calculates 2 principal components for the hail size distribution between the shape and scale parameters. Separate machine learning models are fit to predict each component. Args: model_names: List of machine learning model names model_objs: List of machine learning model objects. input_columns: List of input variables output_columns: Output columns, should contain Shape and Scale. Returns:
[ "This", "calculates", "2", "principal", "components", "for", "the", "hail", "size", "distribution", "between", "the", "shape", "and", "scale", "parameters", ".", "Separate", "machine", "learning", "models", "are", "fit", "to", "predict", "each", "component", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L402-L468
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
TrackModeler.predict_size_distribution_models
def predict_size_distribution_models(self, model_names, input_columns, metadata_cols, data_mode="forecast", location=6, calibrate=False): """ Make predictions using fitted size distribution models. Args: model_names: Name of the models for predictions input_columns: Data columns used for input into ML models metadata_cols: Columns from input data that should be included in the data frame with the predictions. data_mode: Set of data used as input for prediction models location: Value of fixed location parameter calibrate: Whether or not to apply calibration model Returns: Predictions in dictionary of data frames grouped by group type """ groups = self.size_distribution_models.keys() predictions = {} for group in groups: group_data = self.data[data_mode]["combo"].loc[self.data[data_mode]["combo"][self.group_col] == group] predictions[group] = group_data[metadata_cols] if group_data.shape[0] > 0: log_mean = self.size_distribution_models[group]["lognorm"]["mean"] log_sd = self.size_distribution_models[group]["lognorm"]["sd"] for m, model_name in enumerate(model_names): multi_predictions = self.size_distribution_models[group]["multi"][model_name].predict( group_data[input_columns]) if calibrate: multi_predictions[:, 0] = self.size_distribution_models[group]["calshape"][model_name].predict( multi_predictions[:, 0:1]) multi_predictions[:, 1] = self.size_distribution_models[group]["calscale"][model_name].predict( multi_predictions[:, 1:]) multi_predictions = np.exp(multi_predictions * log_sd + log_mean) if multi_predictions.shape[1] == 2: multi_predictions_temp = np.zeros((multi_predictions.shape[0], 3)) multi_predictions_temp[:, 0] = multi_predictions[:, 0] multi_predictions_temp[:, 1] = location multi_predictions_temp[:, 2] = multi_predictions[:, 1] multi_predictions = multi_predictions_temp for p, pred_col in enumerate(["shape", "location", "scale"]): predictions[group][model_name].loc[:, model_name.replace(" ", "-") + "_" + pred_col] = \ multi_predictions[:, p] return predictions
python
def predict_size_distribution_models(self, model_names, input_columns, metadata_cols, data_mode="forecast", location=6, calibrate=False): """ Make predictions using fitted size distribution models. Args: model_names: Name of the models for predictions input_columns: Data columns used for input into ML models metadata_cols: Columns from input data that should be included in the data frame with the predictions. data_mode: Set of data used as input for prediction models location: Value of fixed location parameter calibrate: Whether or not to apply calibration model Returns: Predictions in dictionary of data frames grouped by group type """ groups = self.size_distribution_models.keys() predictions = {} for group in groups: group_data = self.data[data_mode]["combo"].loc[self.data[data_mode]["combo"][self.group_col] == group] predictions[group] = group_data[metadata_cols] if group_data.shape[0] > 0: log_mean = self.size_distribution_models[group]["lognorm"]["mean"] log_sd = self.size_distribution_models[group]["lognorm"]["sd"] for m, model_name in enumerate(model_names): multi_predictions = self.size_distribution_models[group]["multi"][model_name].predict( group_data[input_columns]) if calibrate: multi_predictions[:, 0] = self.size_distribution_models[group]["calshape"][model_name].predict( multi_predictions[:, 0:1]) multi_predictions[:, 1] = self.size_distribution_models[group]["calscale"][model_name].predict( multi_predictions[:, 1:]) multi_predictions = np.exp(multi_predictions * log_sd + log_mean) if multi_predictions.shape[1] == 2: multi_predictions_temp = np.zeros((multi_predictions.shape[0], 3)) multi_predictions_temp[:, 0] = multi_predictions[:, 0] multi_predictions_temp[:, 1] = location multi_predictions_temp[:, 2] = multi_predictions[:, 1] multi_predictions = multi_predictions_temp for p, pred_col in enumerate(["shape", "location", "scale"]): predictions[group][model_name].loc[:, model_name.replace(" ", "-") + "_" + pred_col] = \ multi_predictions[:, p] return predictions
[ "def", "predict_size_distribution_models", "(", "self", ",", "model_names", ",", "input_columns", ",", "metadata_cols", ",", "data_mode", "=", "\"forecast\"", ",", "location", "=", "6", ",", "calibrate", "=", "False", ")", ":", "groups", "=", "self", ".", "size_distribution_models", ".", "keys", "(", ")", "predictions", "=", "{", "}", "for", "group", "in", "groups", ":", "group_data", "=", "self", ".", "data", "[", "data_mode", "]", "[", "\"combo\"", "]", ".", "loc", "[", "self", ".", "data", "[", "data_mode", "]", "[", "\"combo\"", "]", "[", "self", ".", "group_col", "]", "==", "group", "]", "predictions", "[", "group", "]", "=", "group_data", "[", "metadata_cols", "]", "if", "group_data", ".", "shape", "[", "0", "]", ">", "0", ":", "log_mean", "=", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"lognorm\"", "]", "[", "\"mean\"", "]", "log_sd", "=", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"lognorm\"", "]", "[", "\"sd\"", "]", "for", "m", ",", "model_name", "in", "enumerate", "(", "model_names", ")", ":", "multi_predictions", "=", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"multi\"", "]", "[", "model_name", "]", ".", "predict", "(", "group_data", "[", "input_columns", "]", ")", "if", "calibrate", ":", "multi_predictions", "[", ":", ",", "0", "]", "=", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"calshape\"", "]", "[", "model_name", "]", ".", "predict", "(", "multi_predictions", "[", ":", ",", "0", ":", "1", "]", ")", "multi_predictions", "[", ":", ",", "1", "]", "=", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"calscale\"", "]", "[", "model_name", "]", ".", "predict", "(", "multi_predictions", "[", ":", ",", "1", ":", "]", ")", "multi_predictions", "=", "np", ".", "exp", "(", "multi_predictions", "*", "log_sd", "+", "log_mean", ")", "if", "multi_predictions", ".", "shape", "[", "1", "]", "==", "2", ":", "multi_predictions_temp", "=", "np", ".", "zeros", "(", "(", "multi_predictions", ".", "shape", "[", "0", "]", ",", "3", ")", ")", "multi_predictions_temp", "[", ":", ",", "0", "]", "=", "multi_predictions", "[", ":", ",", "0", "]", "multi_predictions_temp", "[", ":", ",", "1", "]", "=", "location", "multi_predictions_temp", "[", ":", ",", "2", "]", "=", "multi_predictions", "[", ":", ",", "1", "]", "multi_predictions", "=", "multi_predictions_temp", "for", "p", ",", "pred_col", "in", "enumerate", "(", "[", "\"shape\"", ",", "\"location\"", ",", "\"scale\"", "]", ")", ":", "predictions", "[", "group", "]", "[", "model_name", "]", ".", "loc", "[", ":", ",", "model_name", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", "+", "\"_\"", "+", "pred_col", "]", "=", "multi_predictions", "[", ":", ",", "p", "]", "return", "predictions" ]
Make predictions using fitted size distribution models. Args: model_names: Name of the models for predictions input_columns: Data columns used for input into ML models metadata_cols: Columns from input data that should be included in the data frame with the predictions. data_mode: Set of data used as input for prediction models location: Value of fixed location parameter calibrate: Whether or not to apply calibration model Returns: Predictions in dictionary of data frames grouped by group type
[ "Make", "predictions", "using", "fitted", "size", "distribution", "models", ".", "Args", ":", "model_names", ":", "Name", "of", "the", "models", "for", "predictions", "input_columns", ":", "Data", "columns", "used", "for", "input", "into", "ML", "models", "metadata_cols", ":", "Columns", "from", "input", "data", "that", "should", "be", "included", "in", "the", "data", "frame", "with", "the", "predictions", ".", "data_mode", ":", "Set", "of", "data", "used", "as", "input", "for", "prediction", "models", "location", ":", "Value", "of", "fixed", "location", "parameter", "calibrate", ":", "Whether", "or", "not", "to", "apply", "calibration", "model", "Returns", ":", "Predictions", "in", "dictionary", "of", "data", "frames", "grouped", "by", "group", "type" ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L470-L510
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
TrackModeler.predict_size_distribution_component_models
def predict_size_distribution_component_models(self, model_names, input_columns, output_columns, metadata_cols, data_mode="forecast", location=6): """ Make predictions using fitted size distribution models. Args: model_names: Name of the models for predictions input_columns: Data columns used for input into ML models output_columns: Names of output columns metadata_cols: Columns from input data that should be included in the data frame with the predictions. data_mode: Set of data used as input for prediction models location: Value of fixed location parameter Returns: Predictions in dictionary of data frames grouped by group type """ groups = self.size_distribution_models.keys() predictions = pd.DataFrame(self.data[data_mode]["combo"][metadata_cols]) for group in groups: group_idxs = self.data[data_mode]["combo"][self.group_col] == group group_count = np.count_nonzero(group_idxs) print(self.size_distribution_models[group]) if group_count > 0: log_mean = self.size_distribution_models[group]["lognorm"]["mean"] log_sd = self.size_distribution_models[group]["lognorm"]["sd"] for m, model_name in enumerate(model_names): raw_preds = np.zeros((group_count, len(output_columns))) for c in range(len(output_columns)): raw_preds[:, c] = self.size_distribution_models[group][ "pc_{0:d}".format(c)][model_name].predict(self.data[data_mode]["combo"].loc[group_idxs, input_columns]) log_norm_preds = self.size_distribution_models[group]["lognorm"]["pca"].inverse_transform(raw_preds) log_norm_preds[:, 0] *= -1 multi_predictions = np.exp(log_norm_preds * log_sd + log_mean) if multi_predictions.shape[1] == 2: multi_predictions_temp = np.zeros((multi_predictions.shape[0], 3)) multi_predictions_temp[:, 0] = multi_predictions[:, 0] multi_predictions_temp[:, 1] = location multi_predictions_temp[:, 2] = multi_predictions[:, 1] multi_predictions = multi_predictions_temp for p, pred_col in enumerate(["shape", "location", "scale"]): predictions.loc[group_idxs, model_name.replace(" ", "-") + "_" + pred_col] = \ multi_predictions[:, p] return predictions
python
def predict_size_distribution_component_models(self, model_names, input_columns, output_columns, metadata_cols, data_mode="forecast", location=6): """ Make predictions using fitted size distribution models. Args: model_names: Name of the models for predictions input_columns: Data columns used for input into ML models output_columns: Names of output columns metadata_cols: Columns from input data that should be included in the data frame with the predictions. data_mode: Set of data used as input for prediction models location: Value of fixed location parameter Returns: Predictions in dictionary of data frames grouped by group type """ groups = self.size_distribution_models.keys() predictions = pd.DataFrame(self.data[data_mode]["combo"][metadata_cols]) for group in groups: group_idxs = self.data[data_mode]["combo"][self.group_col] == group group_count = np.count_nonzero(group_idxs) print(self.size_distribution_models[group]) if group_count > 0: log_mean = self.size_distribution_models[group]["lognorm"]["mean"] log_sd = self.size_distribution_models[group]["lognorm"]["sd"] for m, model_name in enumerate(model_names): raw_preds = np.zeros((group_count, len(output_columns))) for c in range(len(output_columns)): raw_preds[:, c] = self.size_distribution_models[group][ "pc_{0:d}".format(c)][model_name].predict(self.data[data_mode]["combo"].loc[group_idxs, input_columns]) log_norm_preds = self.size_distribution_models[group]["lognorm"]["pca"].inverse_transform(raw_preds) log_norm_preds[:, 0] *= -1 multi_predictions = np.exp(log_norm_preds * log_sd + log_mean) if multi_predictions.shape[1] == 2: multi_predictions_temp = np.zeros((multi_predictions.shape[0], 3)) multi_predictions_temp[:, 0] = multi_predictions[:, 0] multi_predictions_temp[:, 1] = location multi_predictions_temp[:, 2] = multi_predictions[:, 1] multi_predictions = multi_predictions_temp for p, pred_col in enumerate(["shape", "location", "scale"]): predictions.loc[group_idxs, model_name.replace(" ", "-") + "_" + pred_col] = \ multi_predictions[:, p] return predictions
[ "def", "predict_size_distribution_component_models", "(", "self", ",", "model_names", ",", "input_columns", ",", "output_columns", ",", "metadata_cols", ",", "data_mode", "=", "\"forecast\"", ",", "location", "=", "6", ")", ":", "groups", "=", "self", ".", "size_distribution_models", ".", "keys", "(", ")", "predictions", "=", "pd", ".", "DataFrame", "(", "self", ".", "data", "[", "data_mode", "]", "[", "\"combo\"", "]", "[", "metadata_cols", "]", ")", "for", "group", "in", "groups", ":", "group_idxs", "=", "self", ".", "data", "[", "data_mode", "]", "[", "\"combo\"", "]", "[", "self", ".", "group_col", "]", "==", "group", "group_count", "=", "np", ".", "count_nonzero", "(", "group_idxs", ")", "print", "(", "self", ".", "size_distribution_models", "[", "group", "]", ")", "if", "group_count", ">", "0", ":", "log_mean", "=", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"lognorm\"", "]", "[", "\"mean\"", "]", "log_sd", "=", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"lognorm\"", "]", "[", "\"sd\"", "]", "for", "m", ",", "model_name", "in", "enumerate", "(", "model_names", ")", ":", "raw_preds", "=", "np", ".", "zeros", "(", "(", "group_count", ",", "len", "(", "output_columns", ")", ")", ")", "for", "c", "in", "range", "(", "len", "(", "output_columns", ")", ")", ":", "raw_preds", "[", ":", ",", "c", "]", "=", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"pc_{0:d}\"", ".", "format", "(", "c", ")", "]", "[", "model_name", "]", ".", "predict", "(", "self", ".", "data", "[", "data_mode", "]", "[", "\"combo\"", "]", ".", "loc", "[", "group_idxs", ",", "input_columns", "]", ")", "log_norm_preds", "=", "self", ".", "size_distribution_models", "[", "group", "]", "[", "\"lognorm\"", "]", "[", "\"pca\"", "]", ".", "inverse_transform", "(", "raw_preds", ")", "log_norm_preds", "[", ":", ",", "0", "]", "*=", "-", "1", "multi_predictions", "=", "np", ".", "exp", "(", "log_norm_preds", "*", "log_sd", "+", "log_mean", ")", "if", "multi_predictions", ".", "shape", "[", "1", "]", "==", "2", ":", "multi_predictions_temp", "=", "np", ".", "zeros", "(", "(", "multi_predictions", ".", "shape", "[", "0", "]", ",", "3", ")", ")", "multi_predictions_temp", "[", ":", ",", "0", "]", "=", "multi_predictions", "[", ":", ",", "0", "]", "multi_predictions_temp", "[", ":", ",", "1", "]", "=", "location", "multi_predictions_temp", "[", ":", ",", "2", "]", "=", "multi_predictions", "[", ":", ",", "1", "]", "multi_predictions", "=", "multi_predictions_temp", "for", "p", ",", "pred_col", "in", "enumerate", "(", "[", "\"shape\"", ",", "\"location\"", ",", "\"scale\"", "]", ")", ":", "predictions", ".", "loc", "[", "group_idxs", ",", "model_name", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", "+", "\"_\"", "+", "pred_col", "]", "=", "multi_predictions", "[", ":", ",", "p", "]", "return", "predictions" ]
Make predictions using fitted size distribution models. Args: model_names: Name of the models for predictions input_columns: Data columns used for input into ML models output_columns: Names of output columns metadata_cols: Columns from input data that should be included in the data frame with the predictions. data_mode: Set of data used as input for prediction models location: Value of fixed location parameter Returns: Predictions in dictionary of data frames grouped by group type
[ "Make", "predictions", "using", "fitted", "size", "distribution", "models", ".", "Args", ":", "model_names", ":", "Name", "of", "the", "models", "for", "predictions", "input_columns", ":", "Data", "columns", "used", "for", "input", "into", "ML", "models", "output_columns", ":", "Names", "of", "output", "columns", "metadata_cols", ":", "Columns", "from", "input", "data", "that", "should", "be", "included", "in", "the", "data", "frame", "with", "the", "predictions", ".", "data_mode", ":", "Set", "of", "data", "used", "as", "input", "for", "prediction", "models", "location", ":", "Value", "of", "fixed", "location", "parameter", "Returns", ":", "Predictions", "in", "dictionary", "of", "data", "frames", "grouped", "by", "group", "type" ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L512-L553
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
TrackModeler.fit_size_models
def fit_size_models(self, model_names, model_objs, input_columns, output_column="Hail_Size", output_start=5, output_step=5, output_stop=100): """ Fit size models to produce discrete pdfs of forecast hail sizes. Args: model_names: List of model names model_objs: List of model objects input_columns: List of input variables output_column: Output variable name output_start: Hail size bin start output_step: hail size bin step output_stop: hail size bin stop """ print("Fitting size models") groups = self.data["train"]["member"][self.group_col].unique() output_start = int(output_start) output_step = int(output_step) output_stop = int(output_stop) for group in groups: group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group] group_data.dropna(inplace=True) group_data = group_data[group_data[output_column] >= output_start] output_data = group_data[output_column].values.astype(int) output_data[output_data > output_stop] = output_stop discrete_data = ((output_data - output_start) // output_step) * output_step + output_start self.size_models[group] = {} self.size_models[group]["outputvalues"] = np.arange(output_start, output_stop + output_step, output_step, dtype=int) for m, model_name in enumerate(model_names): print("{0} {1}".format(group, model_name)) self.size_models[group][model_name] = deepcopy(model_objs[m]) self.size_models[group][model_name].fit(group_data[input_columns], discrete_data)
python
def fit_size_models(self, model_names, model_objs, input_columns, output_column="Hail_Size", output_start=5, output_step=5, output_stop=100): """ Fit size models to produce discrete pdfs of forecast hail sizes. Args: model_names: List of model names model_objs: List of model objects input_columns: List of input variables output_column: Output variable name output_start: Hail size bin start output_step: hail size bin step output_stop: hail size bin stop """ print("Fitting size models") groups = self.data["train"]["member"][self.group_col].unique() output_start = int(output_start) output_step = int(output_step) output_stop = int(output_stop) for group in groups: group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group] group_data.dropna(inplace=True) group_data = group_data[group_data[output_column] >= output_start] output_data = group_data[output_column].values.astype(int) output_data[output_data > output_stop] = output_stop discrete_data = ((output_data - output_start) // output_step) * output_step + output_start self.size_models[group] = {} self.size_models[group]["outputvalues"] = np.arange(output_start, output_stop + output_step, output_step, dtype=int) for m, model_name in enumerate(model_names): print("{0} {1}".format(group, model_name)) self.size_models[group][model_name] = deepcopy(model_objs[m]) self.size_models[group][model_name].fit(group_data[input_columns], discrete_data)
[ "def", "fit_size_models", "(", "self", ",", "model_names", ",", "model_objs", ",", "input_columns", ",", "output_column", "=", "\"Hail_Size\"", ",", "output_start", "=", "5", ",", "output_step", "=", "5", ",", "output_stop", "=", "100", ")", ":", "print", "(", "\"Fitting size models\"", ")", "groups", "=", "self", ".", "data", "[", "\"train\"", "]", "[", "\"member\"", "]", "[", "self", ".", "group_col", "]", ".", "unique", "(", ")", "output_start", "=", "int", "(", "output_start", ")", "output_step", "=", "int", "(", "output_step", ")", "output_stop", "=", "int", "(", "output_stop", ")", "for", "group", "in", "groups", ":", "group_data", "=", "self", ".", "data", "[", "\"train\"", "]", "[", "\"combo\"", "]", ".", "loc", "[", "self", ".", "data", "[", "\"train\"", "]", "[", "\"combo\"", "]", "[", "self", ".", "group_col", "]", "==", "group", "]", "group_data", ".", "dropna", "(", "inplace", "=", "True", ")", "group_data", "=", "group_data", "[", "group_data", "[", "output_column", "]", ">=", "output_start", "]", "output_data", "=", "group_data", "[", "output_column", "]", ".", "values", ".", "astype", "(", "int", ")", "output_data", "[", "output_data", ">", "output_stop", "]", "=", "output_stop", "discrete_data", "=", "(", "(", "output_data", "-", "output_start", ")", "//", "output_step", ")", "*", "output_step", "+", "output_start", "self", ".", "size_models", "[", "group", "]", "=", "{", "}", "self", ".", "size_models", "[", "group", "]", "[", "\"outputvalues\"", "]", "=", "np", ".", "arange", "(", "output_start", ",", "output_stop", "+", "output_step", ",", "output_step", ",", "dtype", "=", "int", ")", "for", "m", ",", "model_name", "in", "enumerate", "(", "model_names", ")", ":", "print", "(", "\"{0} {1}\"", ".", "format", "(", "group", ",", "model_name", ")", ")", "self", ".", "size_models", "[", "group", "]", "[", "model_name", "]", "=", "deepcopy", "(", "model_objs", "[", "m", "]", ")", "self", ".", "size_models", "[", "group", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", "[", "input_columns", "]", ",", "discrete_data", ")" ]
Fit size models to produce discrete pdfs of forecast hail sizes. Args: model_names: List of model names model_objs: List of model objects input_columns: List of input variables output_column: Output variable name output_start: Hail size bin start output_step: hail size bin step output_stop: hail size bin stop
[ "Fit", "size", "models", "to", "produce", "discrete", "pdfs", "of", "forecast", "hail", "sizes", ".", "Args", ":", "model_names", ":", "List", "of", "model", "names", "model_objs", ":", "List", "of", "model", "objects", "input_columns", ":", "List", "of", "input", "variables", "output_column", ":", "Output", "variable", "name", "output_start", ":", "Hail", "size", "bin", "start", "output_step", ":", "hail", "size", "bin", "step", "output_stop", ":", "hail", "size", "bin", "stop" ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L555-L591
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
TrackModeler.predict_size_models
def predict_size_models(self, model_names, input_columns, metadata_cols, data_mode="forecast"): """ Apply size models to forecast data. Args: model_names: input_columns: metadata_cols: data_mode: """ groups = self.size_models.keys() predictions = {} for group in groups: group_data = self.data[data_mode]["combo"].loc[self.data[data_mode]["combo"][self.group_col] == group] if group_data.shape[0] > 0: predictions[group] = {} output_values = self.size_models[group]["outputvalues"].astype(int) for m, model_name in enumerate(model_names): print("{0} {1}".format(group, model_name)) pred_col_names = [model_name.replace(" ", "-") + "_{0:02d}".format(p) for p in output_values] predictions[group][model_name] = group_data[metadata_cols] pred_vals = self.size_models[group][model_name].predict_proba(group_data[input_columns]) pred_classes = self.size_models[group][model_name].classes_ pred_pdf = np.zeros((pred_vals.shape[0], output_values.size)) for pcv, pc in enumerate(pred_classes): idx = np.where(output_values == pc)[0][0] pred_pdf[:, idx] = pred_vals[:, pcv] for pcn, pred_col_name in enumerate(pred_col_names): predictions[group][model_name].loc[:, pred_col_name] = pred_pdf[:, pcn] return predictions
python
def predict_size_models(self, model_names, input_columns, metadata_cols, data_mode="forecast"): """ Apply size models to forecast data. Args: model_names: input_columns: metadata_cols: data_mode: """ groups = self.size_models.keys() predictions = {} for group in groups: group_data = self.data[data_mode]["combo"].loc[self.data[data_mode]["combo"][self.group_col] == group] if group_data.shape[0] > 0: predictions[group] = {} output_values = self.size_models[group]["outputvalues"].astype(int) for m, model_name in enumerate(model_names): print("{0} {1}".format(group, model_name)) pred_col_names = [model_name.replace(" ", "-") + "_{0:02d}".format(p) for p in output_values] predictions[group][model_name] = group_data[metadata_cols] pred_vals = self.size_models[group][model_name].predict_proba(group_data[input_columns]) pred_classes = self.size_models[group][model_name].classes_ pred_pdf = np.zeros((pred_vals.shape[0], output_values.size)) for pcv, pc in enumerate(pred_classes): idx = np.where(output_values == pc)[0][0] pred_pdf[:, idx] = pred_vals[:, pcv] for pcn, pred_col_name in enumerate(pred_col_names): predictions[group][model_name].loc[:, pred_col_name] = pred_pdf[:, pcn] return predictions
[ "def", "predict_size_models", "(", "self", ",", "model_names", ",", "input_columns", ",", "metadata_cols", ",", "data_mode", "=", "\"forecast\"", ")", ":", "groups", "=", "self", ".", "size_models", ".", "keys", "(", ")", "predictions", "=", "{", "}", "for", "group", "in", "groups", ":", "group_data", "=", "self", ".", "data", "[", "data_mode", "]", "[", "\"combo\"", "]", ".", "loc", "[", "self", ".", "data", "[", "data_mode", "]", "[", "\"combo\"", "]", "[", "self", ".", "group_col", "]", "==", "group", "]", "if", "group_data", ".", "shape", "[", "0", "]", ">", "0", ":", "predictions", "[", "group", "]", "=", "{", "}", "output_values", "=", "self", ".", "size_models", "[", "group", "]", "[", "\"outputvalues\"", "]", ".", "astype", "(", "int", ")", "for", "m", ",", "model_name", "in", "enumerate", "(", "model_names", ")", ":", "print", "(", "\"{0} {1}\"", ".", "format", "(", "group", ",", "model_name", ")", ")", "pred_col_names", "=", "[", "model_name", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", "+", "\"_{0:02d}\"", ".", "format", "(", "p", ")", "for", "p", "in", "output_values", "]", "predictions", "[", "group", "]", "[", "model_name", "]", "=", "group_data", "[", "metadata_cols", "]", "pred_vals", "=", "self", ".", "size_models", "[", "group", "]", "[", "model_name", "]", ".", "predict_proba", "(", "group_data", "[", "input_columns", "]", ")", "pred_classes", "=", "self", ".", "size_models", "[", "group", "]", "[", "model_name", "]", ".", "classes_", "pred_pdf", "=", "np", ".", "zeros", "(", "(", "pred_vals", ".", "shape", "[", "0", "]", ",", "output_values", ".", "size", ")", ")", "for", "pcv", ",", "pc", "in", "enumerate", "(", "pred_classes", ")", ":", "idx", "=", "np", ".", "where", "(", "output_values", "==", "pc", ")", "[", "0", "]", "[", "0", "]", "pred_pdf", "[", ":", ",", "idx", "]", "=", "pred_vals", "[", ":", ",", "pcv", "]", "for", "pcn", ",", "pred_col_name", "in", "enumerate", "(", "pred_col_names", ")", ":", "predictions", "[", "group", "]", "[", "model_name", "]", ".", "loc", "[", ":", ",", "pred_col_name", "]", "=", "pred_pdf", "[", ":", ",", "pcn", "]", "return", "predictions" ]
Apply size models to forecast data. Args: model_names: input_columns: metadata_cols: data_mode:
[ "Apply", "size", "models", "to", "forecast", "data", ".", "Args", ":", "model_names", ":", "input_columns", ":", "metadata_cols", ":", "data_mode", ":" ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L593-L624
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
TrackModeler.fit_track_models
def fit_track_models(self, model_names, model_objs, input_columns, output_columns, output_ranges, ): """ Fit machine learning models to predict track error offsets. model_names: model_objs: input_columns: output_columns: output_ranges: """ print("Fitting track models") groups = self.data["train"]["member"][self.group_col].unique() for group in groups: group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group] group_data = group_data.dropna() group_data = group_data.loc[group_data["Duration_Step"] == 1] for model_type, model_dict in self.track_models.items(): model_dict[group] = {} output_data = group_data[output_columns[model_type]].values.astype(int) output_data[output_data < output_ranges[model_type][0]] = output_ranges[model_type][0] output_data[output_data > output_ranges[model_type][1]] = output_ranges[model_type][1] discrete_data = (output_data - output_ranges[model_type][0]) // output_ranges[model_type][2] * \ output_ranges[model_type][2] + output_ranges[model_type][0] model_dict[group]["outputvalues"] = np.arange(output_ranges[model_type][0], output_ranges[model_type][1] + output_ranges[model_type][2], output_ranges[model_type][2]) for m, model_name in enumerate(model_names): print("{0} {1} {2}".format(group, model_type, model_name)) model_dict[group][model_name] = deepcopy(model_objs[m]) model_dict[group][model_name].fit(group_data[input_columns], discrete_data)
python
def fit_track_models(self, model_names, model_objs, input_columns, output_columns, output_ranges, ): """ Fit machine learning models to predict track error offsets. model_names: model_objs: input_columns: output_columns: output_ranges: """ print("Fitting track models") groups = self.data["train"]["member"][self.group_col].unique() for group in groups: group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group] group_data = group_data.dropna() group_data = group_data.loc[group_data["Duration_Step"] == 1] for model_type, model_dict in self.track_models.items(): model_dict[group] = {} output_data = group_data[output_columns[model_type]].values.astype(int) output_data[output_data < output_ranges[model_type][0]] = output_ranges[model_type][0] output_data[output_data > output_ranges[model_type][1]] = output_ranges[model_type][1] discrete_data = (output_data - output_ranges[model_type][0]) // output_ranges[model_type][2] * \ output_ranges[model_type][2] + output_ranges[model_type][0] model_dict[group]["outputvalues"] = np.arange(output_ranges[model_type][0], output_ranges[model_type][1] + output_ranges[model_type][2], output_ranges[model_type][2]) for m, model_name in enumerate(model_names): print("{0} {1} {2}".format(group, model_type, model_name)) model_dict[group][model_name] = deepcopy(model_objs[m]) model_dict[group][model_name].fit(group_data[input_columns], discrete_data)
[ "def", "fit_track_models", "(", "self", ",", "model_names", ",", "model_objs", ",", "input_columns", ",", "output_columns", ",", "output_ranges", ",", ")", ":", "print", "(", "\"Fitting track models\"", ")", "groups", "=", "self", ".", "data", "[", "\"train\"", "]", "[", "\"member\"", "]", "[", "self", ".", "group_col", "]", ".", "unique", "(", ")", "for", "group", "in", "groups", ":", "group_data", "=", "self", ".", "data", "[", "\"train\"", "]", "[", "\"combo\"", "]", ".", "loc", "[", "self", ".", "data", "[", "\"train\"", "]", "[", "\"combo\"", "]", "[", "self", ".", "group_col", "]", "==", "group", "]", "group_data", "=", "group_data", ".", "dropna", "(", ")", "group_data", "=", "group_data", ".", "loc", "[", "group_data", "[", "\"Duration_Step\"", "]", "==", "1", "]", "for", "model_type", ",", "model_dict", "in", "self", ".", "track_models", ".", "items", "(", ")", ":", "model_dict", "[", "group", "]", "=", "{", "}", "output_data", "=", "group_data", "[", "output_columns", "[", "model_type", "]", "]", ".", "values", ".", "astype", "(", "int", ")", "output_data", "[", "output_data", "<", "output_ranges", "[", "model_type", "]", "[", "0", "]", "]", "=", "output_ranges", "[", "model_type", "]", "[", "0", "]", "output_data", "[", "output_data", ">", "output_ranges", "[", "model_type", "]", "[", "1", "]", "]", "=", "output_ranges", "[", "model_type", "]", "[", "1", "]", "discrete_data", "=", "(", "output_data", "-", "output_ranges", "[", "model_type", "]", "[", "0", "]", ")", "//", "output_ranges", "[", "model_type", "]", "[", "2", "]", "*", "output_ranges", "[", "model_type", "]", "[", "2", "]", "+", "output_ranges", "[", "model_type", "]", "[", "0", "]", "model_dict", "[", "group", "]", "[", "\"outputvalues\"", "]", "=", "np", ".", "arange", "(", "output_ranges", "[", "model_type", "]", "[", "0", "]", ",", "output_ranges", "[", "model_type", "]", "[", "1", "]", "+", "output_ranges", "[", "model_type", "]", "[", "2", "]", ",", "output_ranges", "[", "model_type", "]", "[", "2", "]", ")", "for", "m", ",", "model_name", "in", "enumerate", "(", "model_names", ")", ":", "print", "(", "\"{0} {1} {2}\"", ".", "format", "(", "group", ",", "model_type", ",", "model_name", ")", ")", "model_dict", "[", "group", "]", "[", "model_name", "]", "=", "deepcopy", "(", "model_objs", "[", "m", "]", ")", "model_dict", "[", "group", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", "[", "input_columns", "]", ",", "discrete_data", ")" ]
Fit machine learning models to predict track error offsets. model_names: model_objs: input_columns: output_columns: output_ranges:
[ "Fit", "machine", "learning", "models", "to", "predict", "track", "error", "offsets", ".", "model_names", ":", "model_objs", ":", "input_columns", ":", "output_columns", ":", "output_ranges", ":" ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L626-L661
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
TrackModeler.save_models
def save_models(self, model_path): """ Save machine learning models to pickle files. """ for group, condition_model_set in self.condition_models.items(): for model_name, model_obj in condition_model_set.items(): out_filename = model_path + \ "{0}_{1}_condition.pkl".format(group, model_name.replace(" ", "-")) with open(out_filename, "wb") as pickle_file: pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL) for group, size_model_set in self.size_models.items(): for model_name, model_obj in size_model_set.items(): out_filename = model_path + \ "{0}_{1}_size.pkl".format(group, model_name.replace(" ", "-")) with open(out_filename, "wb") as pickle_file: pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL) for group, dist_model_set in self.size_distribution_models.items(): for model_type, model_objs in dist_model_set.items(): for model_name, model_obj in model_objs.items(): out_filename = model_path + \ "{0}_{1}_{2}_sizedist.pkl".format(group, model_name.replace(" ", "-"), model_type) with open(out_filename, "wb") as pickle_file: pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL) for model_type, track_type_models in self.track_models.items(): for group, track_model_set in track_type_models.items(): for model_name, model_obj in track_model_set.items(): out_filename = model_path + \ "{0}_{1}_{2}_track.pkl".format(group, model_name.replace(" ", "-"), model_type) with open(out_filename, "wb") as pickle_file: pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL) return
python
def save_models(self, model_path): """ Save machine learning models to pickle files. """ for group, condition_model_set in self.condition_models.items(): for model_name, model_obj in condition_model_set.items(): out_filename = model_path + \ "{0}_{1}_condition.pkl".format(group, model_name.replace(" ", "-")) with open(out_filename, "wb") as pickle_file: pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL) for group, size_model_set in self.size_models.items(): for model_name, model_obj in size_model_set.items(): out_filename = model_path + \ "{0}_{1}_size.pkl".format(group, model_name.replace(" ", "-")) with open(out_filename, "wb") as pickle_file: pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL) for group, dist_model_set in self.size_distribution_models.items(): for model_type, model_objs in dist_model_set.items(): for model_name, model_obj in model_objs.items(): out_filename = model_path + \ "{0}_{1}_{2}_sizedist.pkl".format(group, model_name.replace(" ", "-"), model_type) with open(out_filename, "wb") as pickle_file: pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL) for model_type, track_type_models in self.track_models.items(): for group, track_model_set in track_type_models.items(): for model_name, model_obj in track_model_set.items(): out_filename = model_path + \ "{0}_{1}_{2}_track.pkl".format(group, model_name.replace(" ", "-"), model_type) with open(out_filename, "wb") as pickle_file: pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL) return
[ "def", "save_models", "(", "self", ",", "model_path", ")", ":", "for", "group", ",", "condition_model_set", "in", "self", ".", "condition_models", ".", "items", "(", ")", ":", "for", "model_name", ",", "model_obj", "in", "condition_model_set", ".", "items", "(", ")", ":", "out_filename", "=", "model_path", "+", "\"{0}_{1}_condition.pkl\"", ".", "format", "(", "group", ",", "model_name", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", ")", "with", "open", "(", "out_filename", ",", "\"wb\"", ")", "as", "pickle_file", ":", "pickle", ".", "dump", "(", "model_obj", ",", "pickle_file", ",", "pickle", ".", "HIGHEST_PROTOCOL", ")", "for", "group", ",", "size_model_set", "in", "self", ".", "size_models", ".", "items", "(", ")", ":", "for", "model_name", ",", "model_obj", "in", "size_model_set", ".", "items", "(", ")", ":", "out_filename", "=", "model_path", "+", "\"{0}_{1}_size.pkl\"", ".", "format", "(", "group", ",", "model_name", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", ")", "with", "open", "(", "out_filename", ",", "\"wb\"", ")", "as", "pickle_file", ":", "pickle", ".", "dump", "(", "model_obj", ",", "pickle_file", ",", "pickle", ".", "HIGHEST_PROTOCOL", ")", "for", "group", ",", "dist_model_set", "in", "self", ".", "size_distribution_models", ".", "items", "(", ")", ":", "for", "model_type", ",", "model_objs", "in", "dist_model_set", ".", "items", "(", ")", ":", "for", "model_name", ",", "model_obj", "in", "model_objs", ".", "items", "(", ")", ":", "out_filename", "=", "model_path", "+", "\"{0}_{1}_{2}_sizedist.pkl\"", ".", "format", "(", "group", ",", "model_name", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", ",", "model_type", ")", "with", "open", "(", "out_filename", ",", "\"wb\"", ")", "as", "pickle_file", ":", "pickle", ".", "dump", "(", "model_obj", ",", "pickle_file", ",", "pickle", ".", "HIGHEST_PROTOCOL", ")", "for", "model_type", ",", "track_type_models", "in", "self", ".", "track_models", ".", "items", "(", ")", ":", "for", "group", ",", "track_model_set", "in", "track_type_models", ".", "items", "(", ")", ":", "for", "model_name", ",", "model_obj", "in", "track_model_set", ".", "items", "(", ")", ":", "out_filename", "=", "model_path", "+", "\"{0}_{1}_{2}_track.pkl\"", ".", "format", "(", "group", ",", "model_name", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", ",", "model_type", ")", "with", "open", "(", "out_filename", ",", "\"wb\"", ")", "as", "pickle_file", ":", "pickle", ".", "dump", "(", "model_obj", ",", "pickle_file", ",", "pickle", ".", "HIGHEST_PROTOCOL", ")", "return" ]
Save machine learning models to pickle files.
[ "Save", "machine", "learning", "models", "to", "pickle", "files", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L700-L745
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
TrackModeler.load_models
def load_models(self, model_path): """ Load models from pickle files. """ condition_model_files = sorted(glob(model_path + "*_condition.pkl")) if len(condition_model_files) > 0: for condition_model_file in condition_model_files: model_comps = condition_model_file.split("/")[-1][:-4].split("_") if model_comps[0] not in self.condition_models.keys(): self.condition_models[model_comps[0]] = {} model_name = model_comps[1].replace("-", " ") with open(condition_model_file, "rb") as cmf: if "condition_threshold" in condition_model_file: self.condition_models[model_comps[0]][model_name + "_condition_threshold"] = pickle.load(cmf) else: self.condition_models[model_comps[0]][model_name] = pickle.load(cmf) size_model_files = sorted(glob(model_path + "*_size.pkl")) if len(size_model_files) > 0: for size_model_file in size_model_files: model_comps = size_model_file.split("/")[-1][:-4].split("_") if model_comps[0] not in self.size_models.keys(): self.size_models[model_comps[0]] = {} model_name = model_comps[1].replace("-", " ") with open(size_model_file, "rb") as smf: self.size_models[model_comps[0]][model_name] = pickle.load(smf) size_dist_model_files = sorted(glob(model_path + "*_sizedist.pkl")) if len(size_dist_model_files) > 0: for dist_model_file in size_dist_model_files: model_comps = dist_model_file.split("/")[-1][:-4].split("_") if model_comps[0] not in self.size_distribution_models.keys(): self.size_distribution_models[model_comps[0]] = {} if "_".join(model_comps[2:-1]) not in self.size_distribution_models[model_comps[0]].keys(): self.size_distribution_models[model_comps[0]]["_".join(model_comps[2:-1])] = {} model_name = model_comps[1].replace("-", " ") with open(dist_model_file, "rb") as dmf: self.size_distribution_models[model_comps[0]]["_".join(model_comps[2:-1])][ model_name] = pickle.load(dmf) track_model_files = sorted(glob(model_path + "*_track.pkl")) if len(track_model_files) > 0: for track_model_file in track_model_files: model_comps = track_model_file.split("/")[-1][:-4].split("_") group = model_comps[0] model_name = model_comps[1].replace("-", " ") model_type = model_comps[2] if model_type not in self.track_models.keys(): self.track_models[model_type] = {} if group not in self.track_models[model_type].keys(): self.track_models[model_type][group] = {} with open(track_model_file, "rb") as tmf: self.track_models[model_type][group][model_name] = pickle.load(tmf)
python
def load_models(self, model_path): """ Load models from pickle files. """ condition_model_files = sorted(glob(model_path + "*_condition.pkl")) if len(condition_model_files) > 0: for condition_model_file in condition_model_files: model_comps = condition_model_file.split("/")[-1][:-4].split("_") if model_comps[0] not in self.condition_models.keys(): self.condition_models[model_comps[0]] = {} model_name = model_comps[1].replace("-", " ") with open(condition_model_file, "rb") as cmf: if "condition_threshold" in condition_model_file: self.condition_models[model_comps[0]][model_name + "_condition_threshold"] = pickle.load(cmf) else: self.condition_models[model_comps[0]][model_name] = pickle.load(cmf) size_model_files = sorted(glob(model_path + "*_size.pkl")) if len(size_model_files) > 0: for size_model_file in size_model_files: model_comps = size_model_file.split("/")[-1][:-4].split("_") if model_comps[0] not in self.size_models.keys(): self.size_models[model_comps[0]] = {} model_name = model_comps[1].replace("-", " ") with open(size_model_file, "rb") as smf: self.size_models[model_comps[0]][model_name] = pickle.load(smf) size_dist_model_files = sorted(glob(model_path + "*_sizedist.pkl")) if len(size_dist_model_files) > 0: for dist_model_file in size_dist_model_files: model_comps = dist_model_file.split("/")[-1][:-4].split("_") if model_comps[0] not in self.size_distribution_models.keys(): self.size_distribution_models[model_comps[0]] = {} if "_".join(model_comps[2:-1]) not in self.size_distribution_models[model_comps[0]].keys(): self.size_distribution_models[model_comps[0]]["_".join(model_comps[2:-1])] = {} model_name = model_comps[1].replace("-", " ") with open(dist_model_file, "rb") as dmf: self.size_distribution_models[model_comps[0]]["_".join(model_comps[2:-1])][ model_name] = pickle.load(dmf) track_model_files = sorted(glob(model_path + "*_track.pkl")) if len(track_model_files) > 0: for track_model_file in track_model_files: model_comps = track_model_file.split("/")[-1][:-4].split("_") group = model_comps[0] model_name = model_comps[1].replace("-", " ") model_type = model_comps[2] if model_type not in self.track_models.keys(): self.track_models[model_type] = {} if group not in self.track_models[model_type].keys(): self.track_models[model_type][group] = {} with open(track_model_file, "rb") as tmf: self.track_models[model_type][group][model_name] = pickle.load(tmf)
[ "def", "load_models", "(", "self", ",", "model_path", ")", ":", "condition_model_files", "=", "sorted", "(", "glob", "(", "model_path", "+", "\"*_condition.pkl\"", ")", ")", "if", "len", "(", "condition_model_files", ")", ">", "0", ":", "for", "condition_model_file", "in", "condition_model_files", ":", "model_comps", "=", "condition_model_file", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "[", ":", "-", "4", "]", ".", "split", "(", "\"_\"", ")", "if", "model_comps", "[", "0", "]", "not", "in", "self", ".", "condition_models", ".", "keys", "(", ")", ":", "self", ".", "condition_models", "[", "model_comps", "[", "0", "]", "]", "=", "{", "}", "model_name", "=", "model_comps", "[", "1", "]", ".", "replace", "(", "\"-\"", ",", "\" \"", ")", "with", "open", "(", "condition_model_file", ",", "\"rb\"", ")", "as", "cmf", ":", "if", "\"condition_threshold\"", "in", "condition_model_file", ":", "self", ".", "condition_models", "[", "model_comps", "[", "0", "]", "]", "[", "model_name", "+", "\"_condition_threshold\"", "]", "=", "pickle", ".", "load", "(", "cmf", ")", "else", ":", "self", ".", "condition_models", "[", "model_comps", "[", "0", "]", "]", "[", "model_name", "]", "=", "pickle", ".", "load", "(", "cmf", ")", "size_model_files", "=", "sorted", "(", "glob", "(", "model_path", "+", "\"*_size.pkl\"", ")", ")", "if", "len", "(", "size_model_files", ")", ">", "0", ":", "for", "size_model_file", "in", "size_model_files", ":", "model_comps", "=", "size_model_file", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "[", ":", "-", "4", "]", ".", "split", "(", "\"_\"", ")", "if", "model_comps", "[", "0", "]", "not", "in", "self", ".", "size_models", ".", "keys", "(", ")", ":", "self", ".", "size_models", "[", "model_comps", "[", "0", "]", "]", "=", "{", "}", "model_name", "=", "model_comps", "[", "1", "]", ".", "replace", "(", "\"-\"", ",", "\" \"", ")", "with", "open", "(", "size_model_file", ",", "\"rb\"", ")", "as", "smf", ":", "self", ".", "size_models", "[", "model_comps", "[", "0", "]", "]", "[", "model_name", "]", "=", "pickle", ".", "load", "(", "smf", ")", "size_dist_model_files", "=", "sorted", "(", "glob", "(", "model_path", "+", "\"*_sizedist.pkl\"", ")", ")", "if", "len", "(", "size_dist_model_files", ")", ">", "0", ":", "for", "dist_model_file", "in", "size_dist_model_files", ":", "model_comps", "=", "dist_model_file", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "[", ":", "-", "4", "]", ".", "split", "(", "\"_\"", ")", "if", "model_comps", "[", "0", "]", "not", "in", "self", ".", "size_distribution_models", ".", "keys", "(", ")", ":", "self", ".", "size_distribution_models", "[", "model_comps", "[", "0", "]", "]", "=", "{", "}", "if", "\"_\"", ".", "join", "(", "model_comps", "[", "2", ":", "-", "1", "]", ")", "not", "in", "self", ".", "size_distribution_models", "[", "model_comps", "[", "0", "]", "]", ".", "keys", "(", ")", ":", "self", ".", "size_distribution_models", "[", "model_comps", "[", "0", "]", "]", "[", "\"_\"", ".", "join", "(", "model_comps", "[", "2", ":", "-", "1", "]", ")", "]", "=", "{", "}", "model_name", "=", "model_comps", "[", "1", "]", ".", "replace", "(", "\"-\"", ",", "\" \"", ")", "with", "open", "(", "dist_model_file", ",", "\"rb\"", ")", "as", "dmf", ":", "self", ".", "size_distribution_models", "[", "model_comps", "[", "0", "]", "]", "[", "\"_\"", ".", "join", "(", "model_comps", "[", "2", ":", "-", "1", "]", ")", "]", "[", "model_name", "]", "=", "pickle", ".", "load", "(", "dmf", ")", "track_model_files", "=", "sorted", "(", "glob", "(", "model_path", "+", "\"*_track.pkl\"", ")", ")", "if", "len", "(", "track_model_files", ")", ">", "0", ":", "for", "track_model_file", "in", "track_model_files", ":", "model_comps", "=", "track_model_file", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "[", ":", "-", "4", "]", ".", "split", "(", "\"_\"", ")", "group", "=", "model_comps", "[", "0", "]", "model_name", "=", "model_comps", "[", "1", "]", ".", "replace", "(", "\"-\"", ",", "\" \"", ")", "model_type", "=", "model_comps", "[", "2", "]", "if", "model_type", "not", "in", "self", ".", "track_models", ".", "keys", "(", ")", ":", "self", ".", "track_models", "[", "model_type", "]", "=", "{", "}", "if", "group", "not", "in", "self", ".", "track_models", "[", "model_type", "]", ".", "keys", "(", ")", ":", "self", ".", "track_models", "[", "model_type", "]", "[", "group", "]", "=", "{", "}", "with", "open", "(", "track_model_file", ",", "\"rb\"", ")", "as", "tmf", ":", "self", ".", "track_models", "[", "model_type", "]", "[", "group", "]", "[", "model_name", "]", "=", "pickle", ".", "load", "(", "tmf", ")" ]
Load models from pickle files.
[ "Load", "models", "from", "pickle", "files", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L747-L799
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
TrackModeler.output_forecasts_json
def output_forecasts_json(self, forecasts, condition_model_names, size_model_names, dist_model_names, track_model_names, json_data_path, out_path): """ Output forecast values to geoJSON file format. :param forecasts: :param condition_model_names: :param size_model_names: :param track_model_names: :param json_data_path: :param out_path: :return: """ total_tracks = self.data["forecast"]["total"] for r in np.arange(total_tracks.shape[0]): track_id = total_tracks.loc[r, "Track_ID"] print(track_id) track_num = track_id.split("_")[-1] ensemble_name = total_tracks.loc[r, "Ensemble_Name"] member = total_tracks.loc[r, "Ensemble_Member"] group = self.data["forecast"]["member"].loc[self.data["forecast"]["member"]["Ensemble_Member"] == member, self.group_col].values[0] run_date = track_id.split("_")[-4][:8] step_forecasts = {} for ml_model in condition_model_names: step_forecasts["condition_" + ml_model.replace(" ", "-")] = forecasts["condition"][group].loc[ forecasts["condition"][group]["Track_ID"] == track_id, ml_model] for ml_model in size_model_names: step_forecasts["size_" + ml_model.replace(" ", "-")] = forecasts["size"][group][ml_model].loc[ forecasts["size"][group][ml_model]["Track_ID"] == track_id] for ml_model in dist_model_names: step_forecasts["dist_" + ml_model.replace(" ", "-")] = forecasts["dist"][group][ml_model].loc[ forecasts["dist"][group][ml_model]["Track_ID"] == track_id] for model_type in forecasts["track"].keys(): for ml_model in track_model_names: mframe = forecasts["track"][model_type][group][ml_model] step_forecasts[model_type + "_" + ml_model.replace(" ", "-")] = mframe.loc[ mframe["Track_ID"] == track_id] json_file_name = "{0}_{1}_{2}_model_track_{3}.json".format(ensemble_name, run_date, member, track_num) full_json_path = json_data_path + "/".join([run_date, member]) + "/" + json_file_name with open(full_json_path) as json_file_obj: try: track_obj = json.load(json_file_obj) except FileNotFoundError: print(full_json_path + " not found") continue for f, feature in enumerate(track_obj['features']): del feature['properties']['attributes'] for model_name, fdata in step_forecasts.items(): ml_model_name = model_name.split("_")[1] if "condition" in model_name: feature['properties'][model_name] = fdata.values[f] else: predcols = [] for col in fdata.columns: if ml_model_name in col: predcols.append(col) feature['properties'][model_name] = fdata.loc[:, predcols].values[f].tolist() full_path = [] for part in [run_date, member]: full_path.append(part) if not os.access(out_path + "/".join(full_path), os.R_OK): try: os.mkdir(out_path + "/".join(full_path)) except OSError: print("directory already created") out_json_filename = out_path + "/".join(full_path) + "/" + json_file_name with open(out_json_filename, "w") as out_json_obj: json.dump(track_obj, out_json_obj, indent=1, sort_keys=True) return
python
def output_forecasts_json(self, forecasts, condition_model_names, size_model_names, dist_model_names, track_model_names, json_data_path, out_path): """ Output forecast values to geoJSON file format. :param forecasts: :param condition_model_names: :param size_model_names: :param track_model_names: :param json_data_path: :param out_path: :return: """ total_tracks = self.data["forecast"]["total"] for r in np.arange(total_tracks.shape[0]): track_id = total_tracks.loc[r, "Track_ID"] print(track_id) track_num = track_id.split("_")[-1] ensemble_name = total_tracks.loc[r, "Ensemble_Name"] member = total_tracks.loc[r, "Ensemble_Member"] group = self.data["forecast"]["member"].loc[self.data["forecast"]["member"]["Ensemble_Member"] == member, self.group_col].values[0] run_date = track_id.split("_")[-4][:8] step_forecasts = {} for ml_model in condition_model_names: step_forecasts["condition_" + ml_model.replace(" ", "-")] = forecasts["condition"][group].loc[ forecasts["condition"][group]["Track_ID"] == track_id, ml_model] for ml_model in size_model_names: step_forecasts["size_" + ml_model.replace(" ", "-")] = forecasts["size"][group][ml_model].loc[ forecasts["size"][group][ml_model]["Track_ID"] == track_id] for ml_model in dist_model_names: step_forecasts["dist_" + ml_model.replace(" ", "-")] = forecasts["dist"][group][ml_model].loc[ forecasts["dist"][group][ml_model]["Track_ID"] == track_id] for model_type in forecasts["track"].keys(): for ml_model in track_model_names: mframe = forecasts["track"][model_type][group][ml_model] step_forecasts[model_type + "_" + ml_model.replace(" ", "-")] = mframe.loc[ mframe["Track_ID"] == track_id] json_file_name = "{0}_{1}_{2}_model_track_{3}.json".format(ensemble_name, run_date, member, track_num) full_json_path = json_data_path + "/".join([run_date, member]) + "/" + json_file_name with open(full_json_path) as json_file_obj: try: track_obj = json.load(json_file_obj) except FileNotFoundError: print(full_json_path + " not found") continue for f, feature in enumerate(track_obj['features']): del feature['properties']['attributes'] for model_name, fdata in step_forecasts.items(): ml_model_name = model_name.split("_")[1] if "condition" in model_name: feature['properties'][model_name] = fdata.values[f] else: predcols = [] for col in fdata.columns: if ml_model_name in col: predcols.append(col) feature['properties'][model_name] = fdata.loc[:, predcols].values[f].tolist() full_path = [] for part in [run_date, member]: full_path.append(part) if not os.access(out_path + "/".join(full_path), os.R_OK): try: os.mkdir(out_path + "/".join(full_path)) except OSError: print("directory already created") out_json_filename = out_path + "/".join(full_path) + "/" + json_file_name with open(out_json_filename, "w") as out_json_obj: json.dump(track_obj, out_json_obj, indent=1, sort_keys=True) return
[ "def", "output_forecasts_json", "(", "self", ",", "forecasts", ",", "condition_model_names", ",", "size_model_names", ",", "dist_model_names", ",", "track_model_names", ",", "json_data_path", ",", "out_path", ")", ":", "total_tracks", "=", "self", ".", "data", "[", "\"forecast\"", "]", "[", "\"total\"", "]", "for", "r", "in", "np", ".", "arange", "(", "total_tracks", ".", "shape", "[", "0", "]", ")", ":", "track_id", "=", "total_tracks", ".", "loc", "[", "r", ",", "\"Track_ID\"", "]", "print", "(", "track_id", ")", "track_num", "=", "track_id", ".", "split", "(", "\"_\"", ")", "[", "-", "1", "]", "ensemble_name", "=", "total_tracks", ".", "loc", "[", "r", ",", "\"Ensemble_Name\"", "]", "member", "=", "total_tracks", ".", "loc", "[", "r", ",", "\"Ensemble_Member\"", "]", "group", "=", "self", ".", "data", "[", "\"forecast\"", "]", "[", "\"member\"", "]", ".", "loc", "[", "self", ".", "data", "[", "\"forecast\"", "]", "[", "\"member\"", "]", "[", "\"Ensemble_Member\"", "]", "==", "member", ",", "self", ".", "group_col", "]", ".", "values", "[", "0", "]", "run_date", "=", "track_id", ".", "split", "(", "\"_\"", ")", "[", "-", "4", "]", "[", ":", "8", "]", "step_forecasts", "=", "{", "}", "for", "ml_model", "in", "condition_model_names", ":", "step_forecasts", "[", "\"condition_\"", "+", "ml_model", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", "]", "=", "forecasts", "[", "\"condition\"", "]", "[", "group", "]", ".", "loc", "[", "forecasts", "[", "\"condition\"", "]", "[", "group", "]", "[", "\"Track_ID\"", "]", "==", "track_id", ",", "ml_model", "]", "for", "ml_model", "in", "size_model_names", ":", "step_forecasts", "[", "\"size_\"", "+", "ml_model", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", "]", "=", "forecasts", "[", "\"size\"", "]", "[", "group", "]", "[", "ml_model", "]", ".", "loc", "[", "forecasts", "[", "\"size\"", "]", "[", "group", "]", "[", "ml_model", "]", "[", "\"Track_ID\"", "]", "==", "track_id", "]", "for", "ml_model", "in", "dist_model_names", ":", "step_forecasts", "[", "\"dist_\"", "+", "ml_model", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", "]", "=", "forecasts", "[", "\"dist\"", "]", "[", "group", "]", "[", "ml_model", "]", ".", "loc", "[", "forecasts", "[", "\"dist\"", "]", "[", "group", "]", "[", "ml_model", "]", "[", "\"Track_ID\"", "]", "==", "track_id", "]", "for", "model_type", "in", "forecasts", "[", "\"track\"", "]", ".", "keys", "(", ")", ":", "for", "ml_model", "in", "track_model_names", ":", "mframe", "=", "forecasts", "[", "\"track\"", "]", "[", "model_type", "]", "[", "group", "]", "[", "ml_model", "]", "step_forecasts", "[", "model_type", "+", "\"_\"", "+", "ml_model", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", "]", "=", "mframe", ".", "loc", "[", "mframe", "[", "\"Track_ID\"", "]", "==", "track_id", "]", "json_file_name", "=", "\"{0}_{1}_{2}_model_track_{3}.json\"", ".", "format", "(", "ensemble_name", ",", "run_date", ",", "member", ",", "track_num", ")", "full_json_path", "=", "json_data_path", "+", "\"/\"", ".", "join", "(", "[", "run_date", ",", "member", "]", ")", "+", "\"/\"", "+", "json_file_name", "with", "open", "(", "full_json_path", ")", "as", "json_file_obj", ":", "try", ":", "track_obj", "=", "json", ".", "load", "(", "json_file_obj", ")", "except", "FileNotFoundError", ":", "print", "(", "full_json_path", "+", "\" not found\"", ")", "continue", "for", "f", ",", "feature", "in", "enumerate", "(", "track_obj", "[", "'features'", "]", ")", ":", "del", "feature", "[", "'properties'", "]", "[", "'attributes'", "]", "for", "model_name", ",", "fdata", "in", "step_forecasts", ".", "items", "(", ")", ":", "ml_model_name", "=", "model_name", ".", "split", "(", "\"_\"", ")", "[", "1", "]", "if", "\"condition\"", "in", "model_name", ":", "feature", "[", "'properties'", "]", "[", "model_name", "]", "=", "fdata", ".", "values", "[", "f", "]", "else", ":", "predcols", "=", "[", "]", "for", "col", "in", "fdata", ".", "columns", ":", "if", "ml_model_name", "in", "col", ":", "predcols", ".", "append", "(", "col", ")", "feature", "[", "'properties'", "]", "[", "model_name", "]", "=", "fdata", ".", "loc", "[", ":", ",", "predcols", "]", ".", "values", "[", "f", "]", ".", "tolist", "(", ")", "full_path", "=", "[", "]", "for", "part", "in", "[", "run_date", ",", "member", "]", ":", "full_path", ".", "append", "(", "part", ")", "if", "not", "os", ".", "access", "(", "out_path", "+", "\"/\"", ".", "join", "(", "full_path", ")", ",", "os", ".", "R_OK", ")", ":", "try", ":", "os", ".", "mkdir", "(", "out_path", "+", "\"/\"", ".", "join", "(", "full_path", ")", ")", "except", "OSError", ":", "print", "(", "\"directory already created\"", ")", "out_json_filename", "=", "out_path", "+", "\"/\"", ".", "join", "(", "full_path", ")", "+", "\"/\"", "+", "json_file_name", "with", "open", "(", "out_json_filename", ",", "\"w\"", ")", "as", "out_json_obj", ":", "json", ".", "dump", "(", "track_obj", ",", "out_json_obj", ",", "indent", "=", "1", ",", "sort_keys", "=", "True", ")", "return" ]
Output forecast values to geoJSON file format. :param forecasts: :param condition_model_names: :param size_model_names: :param track_model_names: :param json_data_path: :param out_path: :return:
[ "Output", "forecast", "values", "to", "geoJSON", "file", "format", ".", ":", "param", "forecasts", ":", ":", "param", "condition_model_names", ":", ":", "param", "size_model_names", ":", ":", "param", "track_model_names", ":", ":", "param", "json_data_path", ":", ":", "param", "out_path", ":", ":", "return", ":" ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L801-L877
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
TrackModeler.output_forecasts_csv
def output_forecasts_csv(self, forecasts, mode, csv_path, run_date_format="%Y%m%d-%H%M"): """ Output hail forecast values to csv files by run date and ensemble member. Args: forecasts: mode: csv_path: Returns: """ merged_forecasts = pd.merge(forecasts["condition"], forecasts["dist"], on=["Step_ID","Track_ID","Ensemble_Member","Forecast_Hour"]) all_members = self.data[mode]["combo"]["Ensemble_Member"] members = np.unique(all_members) all_run_dates = pd.DatetimeIndex(self.data[mode]["combo"]["Run_Date"]) run_dates = pd.DatetimeIndex(np.unique(all_run_dates)) print(run_dates) for member in members: for run_date in run_dates: mem_run_index = (all_run_dates == run_date) & (all_members == member) member_forecast = merged_forecasts.loc[mem_run_index] member_forecast.to_csv(join(csv_path, "hail_forecasts_{0}_{1}_{2}.csv".format(self.ensemble_name, member, run_date.strftime (run_date_format)))) return
python
def output_forecasts_csv(self, forecasts, mode, csv_path, run_date_format="%Y%m%d-%H%M"): """ Output hail forecast values to csv files by run date and ensemble member. Args: forecasts: mode: csv_path: Returns: """ merged_forecasts = pd.merge(forecasts["condition"], forecasts["dist"], on=["Step_ID","Track_ID","Ensemble_Member","Forecast_Hour"]) all_members = self.data[mode]["combo"]["Ensemble_Member"] members = np.unique(all_members) all_run_dates = pd.DatetimeIndex(self.data[mode]["combo"]["Run_Date"]) run_dates = pd.DatetimeIndex(np.unique(all_run_dates)) print(run_dates) for member in members: for run_date in run_dates: mem_run_index = (all_run_dates == run_date) & (all_members == member) member_forecast = merged_forecasts.loc[mem_run_index] member_forecast.to_csv(join(csv_path, "hail_forecasts_{0}_{1}_{2}.csv".format(self.ensemble_name, member, run_date.strftime (run_date_format)))) return
[ "def", "output_forecasts_csv", "(", "self", ",", "forecasts", ",", "mode", ",", "csv_path", ",", "run_date_format", "=", "\"%Y%m%d-%H%M\"", ")", ":", "merged_forecasts", "=", "pd", ".", "merge", "(", "forecasts", "[", "\"condition\"", "]", ",", "forecasts", "[", "\"dist\"", "]", ",", "on", "=", "[", "\"Step_ID\"", ",", "\"Track_ID\"", ",", "\"Ensemble_Member\"", ",", "\"Forecast_Hour\"", "]", ")", "all_members", "=", "self", ".", "data", "[", "mode", "]", "[", "\"combo\"", "]", "[", "\"Ensemble_Member\"", "]", "members", "=", "np", ".", "unique", "(", "all_members", ")", "all_run_dates", "=", "pd", ".", "DatetimeIndex", "(", "self", ".", "data", "[", "mode", "]", "[", "\"combo\"", "]", "[", "\"Run_Date\"", "]", ")", "run_dates", "=", "pd", ".", "DatetimeIndex", "(", "np", ".", "unique", "(", "all_run_dates", ")", ")", "print", "(", "run_dates", ")", "for", "member", "in", "members", ":", "for", "run_date", "in", "run_dates", ":", "mem_run_index", "=", "(", "all_run_dates", "==", "run_date", ")", "&", "(", "all_members", "==", "member", ")", "member_forecast", "=", "merged_forecasts", ".", "loc", "[", "mem_run_index", "]", "member_forecast", ".", "to_csv", "(", "join", "(", "csv_path", ",", "\"hail_forecasts_{0}_{1}_{2}.csv\"", ".", "format", "(", "self", ".", "ensemble_name", ",", "member", ",", "run_date", ".", "strftime", "(", "run_date_format", ")", ")", ")", ")", "return" ]
Output hail forecast values to csv files by run date and ensemble member. Args: forecasts: mode: csv_path: Returns:
[ "Output", "hail", "forecast", "values", "to", "csv", "files", "by", "run", "date", "and", "ensemble", "member", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L879-L905
train
base4sistemas/satcfe
satcfe/base.py
BibliotecaSAT._carregar
def _carregar(self): """Carrega (ou recarrega) a biblioteca SAT. Se a convenção de chamada ainda não tiver sido definida, será determinada pela extensão do arquivo da biblioteca. :raises ValueError: Se a convenção de chamada não puder ser determinada ou se não for um valor válido. """ if self._convencao is None: if self._caminho.endswith(('.DLL', '.dll')): self._convencao = constantes.WINDOWS_STDCALL else: self._convencao = constantes.STANDARD_C if self._convencao == constantes.STANDARD_C: loader = ctypes.CDLL elif self._convencao == constantes.WINDOWS_STDCALL: loader = ctypes.WinDLL else: raise ValueError('Convencao de chamada desconhecida: {!r}'.format( self._convencao)) self._libsat = loader(self._caminho)
python
def _carregar(self): """Carrega (ou recarrega) a biblioteca SAT. Se a convenção de chamada ainda não tiver sido definida, será determinada pela extensão do arquivo da biblioteca. :raises ValueError: Se a convenção de chamada não puder ser determinada ou se não for um valor válido. """ if self._convencao is None: if self._caminho.endswith(('.DLL', '.dll')): self._convencao = constantes.WINDOWS_STDCALL else: self._convencao = constantes.STANDARD_C if self._convencao == constantes.STANDARD_C: loader = ctypes.CDLL elif self._convencao == constantes.WINDOWS_STDCALL: loader = ctypes.WinDLL else: raise ValueError('Convencao de chamada desconhecida: {!r}'.format( self._convencao)) self._libsat = loader(self._caminho)
[ "def", "_carregar", "(", "self", ")", ":", "if", "self", ".", "_convencao", "is", "None", ":", "if", "self", ".", "_caminho", ".", "endswith", "(", "(", "'.DLL'", ",", "'.dll'", ")", ")", ":", "self", ".", "_convencao", "=", "constantes", ".", "WINDOWS_STDCALL", "else", ":", "self", ".", "_convencao", "=", "constantes", ".", "STANDARD_C", "if", "self", ".", "_convencao", "==", "constantes", ".", "STANDARD_C", ":", "loader", "=", "ctypes", ".", "CDLL", "elif", "self", ".", "_convencao", "==", "constantes", ".", "WINDOWS_STDCALL", ":", "loader", "=", "ctypes", ".", "WinDLL", "else", ":", "raise", "ValueError", "(", "'Convencao de chamada desconhecida: {!r}'", ".", "format", "(", "self", ".", "_convencao", ")", ")", "self", ".", "_libsat", "=", "loader", "(", "self", ".", "_caminho", ")" ]
Carrega (ou recarrega) a biblioteca SAT. Se a convenção de chamada ainda não tiver sido definida, será determinada pela extensão do arquivo da biblioteca. :raises ValueError: Se a convenção de chamada não puder ser determinada ou se não for um valor válido.
[ "Carrega", "(", "ou", "recarrega", ")", "a", "biblioteca", "SAT", ".", "Se", "a", "convenção", "de", "chamada", "ainda", "não", "tiver", "sido", "definida", "será", "determinada", "pela", "extensão", "do", "arquivo", "da", "biblioteca", "." ]
cb8e8815f4133d3e3d94cf526fa86767b4521ed9
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/base.py#L81-L105
train
base4sistemas/satcfe
satcfe/base.py
FuncoesSAT.ativar_sat
def ativar_sat(self, tipo_certificado, cnpj, codigo_uf): """Função ``AtivarSAT`` conforme ER SAT, item 6.1.1. Ativação do equipamento SAT. Dependendo do tipo do certificado, o procedimento de ativação é complementado enviando-se o certificado emitido pela ICP-Brasil (:meth:`comunicar_certificado_icpbrasil`). :param int tipo_certificado: Deverá ser um dos valores :attr:`satcomum.constantes.CERTIFICADO_ACSAT_SEFAZ`, :attr:`satcomum.constantes.CERTIFICADO_ICPBRASIL` ou :attr:`satcomum.constantes.CERTIFICADO_ICPBRASIL_RENOVACAO`, mas nenhuma validação será realizada antes que a função de ativação seja efetivamente invocada. :param str cnpj: Número do CNPJ do estabelecimento contribuinte, contendo apenas os dígitos. Nenhuma validação do número do CNPJ será realizada antes que a função de ativação seja efetivamente invocada. :param int codigo_uf: Código da unidade federativa onde o equipamento SAT será ativado (eg. ``35`` para o Estado de São Paulo). Nenhuma validação do código da UF será realizada antes que a função de ativação seja efetivamente invocada. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string """ return self.invocar__AtivarSAT( self.gerar_numero_sessao(), tipo_certificado, self._codigo_ativacao, cnpj, codigo_uf)
python
def ativar_sat(self, tipo_certificado, cnpj, codigo_uf): """Função ``AtivarSAT`` conforme ER SAT, item 6.1.1. Ativação do equipamento SAT. Dependendo do tipo do certificado, o procedimento de ativação é complementado enviando-se o certificado emitido pela ICP-Brasil (:meth:`comunicar_certificado_icpbrasil`). :param int tipo_certificado: Deverá ser um dos valores :attr:`satcomum.constantes.CERTIFICADO_ACSAT_SEFAZ`, :attr:`satcomum.constantes.CERTIFICADO_ICPBRASIL` ou :attr:`satcomum.constantes.CERTIFICADO_ICPBRASIL_RENOVACAO`, mas nenhuma validação será realizada antes que a função de ativação seja efetivamente invocada. :param str cnpj: Número do CNPJ do estabelecimento contribuinte, contendo apenas os dígitos. Nenhuma validação do número do CNPJ será realizada antes que a função de ativação seja efetivamente invocada. :param int codigo_uf: Código da unidade federativa onde o equipamento SAT será ativado (eg. ``35`` para o Estado de São Paulo). Nenhuma validação do código da UF será realizada antes que a função de ativação seja efetivamente invocada. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string """ return self.invocar__AtivarSAT( self.gerar_numero_sessao(), tipo_certificado, self._codigo_ativacao, cnpj, codigo_uf)
[ "def", "ativar_sat", "(", "self", ",", "tipo_certificado", ",", "cnpj", ",", "codigo_uf", ")", ":", "return", "self", ".", "invocar__AtivarSAT", "(", "self", ".", "gerar_numero_sessao", "(", ")", ",", "tipo_certificado", ",", "self", ".", "_codigo_ativacao", ",", "cnpj", ",", "codigo_uf", ")" ]
Função ``AtivarSAT`` conforme ER SAT, item 6.1.1. Ativação do equipamento SAT. Dependendo do tipo do certificado, o procedimento de ativação é complementado enviando-se o certificado emitido pela ICP-Brasil (:meth:`comunicar_certificado_icpbrasil`). :param int tipo_certificado: Deverá ser um dos valores :attr:`satcomum.constantes.CERTIFICADO_ACSAT_SEFAZ`, :attr:`satcomum.constantes.CERTIFICADO_ICPBRASIL` ou :attr:`satcomum.constantes.CERTIFICADO_ICPBRASIL_RENOVACAO`, mas nenhuma validação será realizada antes que a função de ativação seja efetivamente invocada. :param str cnpj: Número do CNPJ do estabelecimento contribuinte, contendo apenas os dígitos. Nenhuma validação do número do CNPJ será realizada antes que a função de ativação seja efetivamente invocada. :param int codigo_uf: Código da unidade federativa onde o equipamento SAT será ativado (eg. ``35`` para o Estado de São Paulo). Nenhuma validação do código da UF será realizada antes que a função de ativação seja efetivamente invocada. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string
[ "Função", "AtivarSAT", "conforme", "ER", "SAT", "item", "6", ".", "1", ".", "1", ".", "Ativação", "do", "equipamento", "SAT", ".", "Dependendo", "do", "tipo", "do", "certificado", "o", "procedimento", "de", "ativação", "é", "complementado", "enviando", "-", "se", "o", "certificado", "emitido", "pela", "ICP", "-", "Brasil", "(", ":", "meth", ":", "comunicar_certificado_icpbrasil", ")", "." ]
cb8e8815f4133d3e3d94cf526fa86767b4521ed9
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/base.py#L262-L290
train
base4sistemas/satcfe
satcfe/base.py
FuncoesSAT.comunicar_certificado_icpbrasil
def comunicar_certificado_icpbrasil(self, certificado): """Função ``ComunicarCertificadoICPBRASIL`` conforme ER SAT, item 6.1.2. Envio do certificado criado pela ICP-Brasil. :param str certificado: Conteúdo do certificado digital criado pela autoridade certificadora ICP-Brasil. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string """ return self.invocar__ComunicarCertificadoICPBRASIL( self.gerar_numero_sessao(), self._codigo_ativacao, certificado)
python
def comunicar_certificado_icpbrasil(self, certificado): """Função ``ComunicarCertificadoICPBRASIL`` conforme ER SAT, item 6.1.2. Envio do certificado criado pela ICP-Brasil. :param str certificado: Conteúdo do certificado digital criado pela autoridade certificadora ICP-Brasil. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string """ return self.invocar__ComunicarCertificadoICPBRASIL( self.gerar_numero_sessao(), self._codigo_ativacao, certificado)
[ "def", "comunicar_certificado_icpbrasil", "(", "self", ",", "certificado", ")", ":", "return", "self", ".", "invocar__ComunicarCertificadoICPBRASIL", "(", "self", ".", "gerar_numero_sessao", "(", ")", ",", "self", ".", "_codigo_ativacao", ",", "certificado", ")" ]
Função ``ComunicarCertificadoICPBRASIL`` conforme ER SAT, item 6.1.2. Envio do certificado criado pela ICP-Brasil. :param str certificado: Conteúdo do certificado digital criado pela autoridade certificadora ICP-Brasil. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string
[ "Função", "ComunicarCertificadoICPBRASIL", "conforme", "ER", "SAT", "item", "6", ".", "1", ".", "2", ".", "Envio", "do", "certificado", "criado", "pela", "ICP", "-", "Brasil", "." ]
cb8e8815f4133d3e3d94cf526fa86767b4521ed9
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/base.py#L293-L304
train
base4sistemas/satcfe
satcfe/base.py
FuncoesSAT.enviar_dados_venda
def enviar_dados_venda(self, dados_venda): """Função ``EnviarDadosVenda`` conforme ER SAT, item 6.1.3. Envia o CF-e de venda para o equipamento SAT, que o enviará para autorização pela SEFAZ. :param dados_venda: Uma instância de :class:`~satcfe.entidades.CFeVenda` ou uma string contendo o XML do CF-e de venda. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string """ cfe_venda = dados_venda \ if isinstance(dados_venda, basestring) \ else dados_venda.documento() return self.invocar__EnviarDadosVenda( self.gerar_numero_sessao(), self._codigo_ativacao, cfe_venda)
python
def enviar_dados_venda(self, dados_venda): """Função ``EnviarDadosVenda`` conforme ER SAT, item 6.1.3. Envia o CF-e de venda para o equipamento SAT, que o enviará para autorização pela SEFAZ. :param dados_venda: Uma instância de :class:`~satcfe.entidades.CFeVenda` ou uma string contendo o XML do CF-e de venda. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string """ cfe_venda = dados_venda \ if isinstance(dados_venda, basestring) \ else dados_venda.documento() return self.invocar__EnviarDadosVenda( self.gerar_numero_sessao(), self._codigo_ativacao, cfe_venda)
[ "def", "enviar_dados_venda", "(", "self", ",", "dados_venda", ")", ":", "cfe_venda", "=", "dados_venda", "if", "isinstance", "(", "dados_venda", ",", "basestring", ")", "else", "dados_venda", ".", "documento", "(", ")", "return", "self", ".", "invocar__EnviarDadosVenda", "(", "self", ".", "gerar_numero_sessao", "(", ")", ",", "self", ".", "_codigo_ativacao", ",", "cfe_venda", ")" ]
Função ``EnviarDadosVenda`` conforme ER SAT, item 6.1.3. Envia o CF-e de venda para o equipamento SAT, que o enviará para autorização pela SEFAZ. :param dados_venda: Uma instância de :class:`~satcfe.entidades.CFeVenda` ou uma string contendo o XML do CF-e de venda. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string
[ "Função", "EnviarDadosVenda", "conforme", "ER", "SAT", "item", "6", ".", "1", ".", "3", ".", "Envia", "o", "CF", "-", "e", "de", "venda", "para", "o", "equipamento", "SAT", "que", "o", "enviará", "para", "autorização", "pela", "SEFAZ", "." ]
cb8e8815f4133d3e3d94cf526fa86767b4521ed9
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/base.py#L307-L323
train
base4sistemas/satcfe
satcfe/base.py
FuncoesSAT.cancelar_ultima_venda
def cancelar_ultima_venda(self, chave_cfe, dados_cancelamento): """Função ``CancelarUltimaVenda`` conforme ER SAT, item 6.1.4. Envia o CF-e de cancelamento para o equipamento SAT, que o enviará para autorização e cancelamento do CF-e pela SEFAZ. :param chave_cfe: String contendo a chave do CF-e a ser cancelado, prefixada com o literal ``CFe``. :param dados_cancelamento: Uma instância de :class:`~satcfe.entidades.CFeCancelamento` ou uma string contendo o XML do CF-e de cancelamento. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string """ cfe_canc = dados_cancelamento \ if isinstance(dados_cancelamento, basestring) \ else dados_cancelamento.documento() return self.invocar__CancelarUltimaVenda( self.gerar_numero_sessao(), self._codigo_ativacao, chave_cfe, cfe_canc)
python
def cancelar_ultima_venda(self, chave_cfe, dados_cancelamento): """Função ``CancelarUltimaVenda`` conforme ER SAT, item 6.1.4. Envia o CF-e de cancelamento para o equipamento SAT, que o enviará para autorização e cancelamento do CF-e pela SEFAZ. :param chave_cfe: String contendo a chave do CF-e a ser cancelado, prefixada com o literal ``CFe``. :param dados_cancelamento: Uma instância de :class:`~satcfe.entidades.CFeCancelamento` ou uma string contendo o XML do CF-e de cancelamento. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string """ cfe_canc = dados_cancelamento \ if isinstance(dados_cancelamento, basestring) \ else dados_cancelamento.documento() return self.invocar__CancelarUltimaVenda( self.gerar_numero_sessao(), self._codigo_ativacao, chave_cfe, cfe_canc)
[ "def", "cancelar_ultima_venda", "(", "self", ",", "chave_cfe", ",", "dados_cancelamento", ")", ":", "cfe_canc", "=", "dados_cancelamento", "if", "isinstance", "(", "dados_cancelamento", ",", "basestring", ")", "else", "dados_cancelamento", ".", "documento", "(", ")", "return", "self", ".", "invocar__CancelarUltimaVenda", "(", "self", ".", "gerar_numero_sessao", "(", ")", ",", "self", ".", "_codigo_ativacao", ",", "chave_cfe", ",", "cfe_canc", ")" ]
Função ``CancelarUltimaVenda`` conforme ER SAT, item 6.1.4. Envia o CF-e de cancelamento para o equipamento SAT, que o enviará para autorização e cancelamento do CF-e pela SEFAZ. :param chave_cfe: String contendo a chave do CF-e a ser cancelado, prefixada com o literal ``CFe``. :param dados_cancelamento: Uma instância de :class:`~satcfe.entidades.CFeCancelamento` ou uma string contendo o XML do CF-e de cancelamento. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string
[ "Função", "CancelarUltimaVenda", "conforme", "ER", "SAT", "item", "6", ".", "1", ".", "4", ".", "Envia", "o", "CF", "-", "e", "de", "cancelamento", "para", "o", "equipamento", "SAT", "que", "o", "enviará", "para", "autorização", "e", "cancelamento", "do", "CF", "-", "e", "pela", "SEFAZ", "." ]
cb8e8815f4133d3e3d94cf526fa86767b4521ed9
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/base.py#L326-L347
train
base4sistemas/satcfe
satcfe/base.py
FuncoesSAT.consultar_numero_sessao
def consultar_numero_sessao(self, numero_sessao): """Função ``ConsultarNumeroSessao`` conforme ER SAT, item 6.1.8. Consulta o equipamento SAT por um número de sessão específico. :param int numero_sessao: Número da sessão que se quer consultar. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string """ return self.invocar__ConsultarNumeroSessao(self.gerar_numero_sessao(), self._codigo_ativacao, numero_sessao)
python
def consultar_numero_sessao(self, numero_sessao): """Função ``ConsultarNumeroSessao`` conforme ER SAT, item 6.1.8. Consulta o equipamento SAT por um número de sessão específico. :param int numero_sessao: Número da sessão que se quer consultar. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string """ return self.invocar__ConsultarNumeroSessao(self.gerar_numero_sessao(), self._codigo_ativacao, numero_sessao)
[ "def", "consultar_numero_sessao", "(", "self", ",", "numero_sessao", ")", ":", "return", "self", ".", "invocar__ConsultarNumeroSessao", "(", "self", ".", "gerar_numero_sessao", "(", ")", ",", "self", ".", "_codigo_ativacao", ",", "numero_sessao", ")" ]
Função ``ConsultarNumeroSessao`` conforme ER SAT, item 6.1.8. Consulta o equipamento SAT por um número de sessão específico. :param int numero_sessao: Número da sessão que se quer consultar. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string
[ "Função", "ConsultarNumeroSessao", "conforme", "ER", "SAT", "item", "6", ".", "1", ".", "8", ".", "Consulta", "o", "equipamento", "SAT", "por", "um", "número", "de", "sessão", "específico", "." ]
cb8e8815f4133d3e3d94cf526fa86767b4521ed9
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/base.py#L389-L399
train
base4sistemas/satcfe
satcfe/base.py
FuncoesSAT.configurar_interface_de_rede
def configurar_interface_de_rede(self, configuracao): """Função ``ConfigurarInterfaceDeRede`` conforme ER SAT, item 6.1.9. Configurção da interface de comunicação do equipamento SAT. :param configuracao: Instância de :class:`~satcfe.rede.ConfiguracaoRede` ou uma string contendo o XML com as configurações de rede. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string """ conf_xml = configuracao \ if isinstance(configuracao, basestring) \ else configuracao.documento() return self.invocar__ConfigurarInterfaceDeRede( self.gerar_numero_sessao(), self._codigo_ativacao, conf_xml)
python
def configurar_interface_de_rede(self, configuracao): """Função ``ConfigurarInterfaceDeRede`` conforme ER SAT, item 6.1.9. Configurção da interface de comunicação do equipamento SAT. :param configuracao: Instância de :class:`~satcfe.rede.ConfiguracaoRede` ou uma string contendo o XML com as configurações de rede. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string """ conf_xml = configuracao \ if isinstance(configuracao, basestring) \ else configuracao.documento() return self.invocar__ConfigurarInterfaceDeRede( self.gerar_numero_sessao(), self._codigo_ativacao, conf_xml)
[ "def", "configurar_interface_de_rede", "(", "self", ",", "configuracao", ")", ":", "conf_xml", "=", "configuracao", "if", "isinstance", "(", "configuracao", ",", "basestring", ")", "else", "configuracao", ".", "documento", "(", ")", "return", "self", ".", "invocar__ConfigurarInterfaceDeRede", "(", "self", ".", "gerar_numero_sessao", "(", ")", ",", "self", ".", "_codigo_ativacao", ",", "conf_xml", ")" ]
Função ``ConfigurarInterfaceDeRede`` conforme ER SAT, item 6.1.9. Configurção da interface de comunicação do equipamento SAT. :param configuracao: Instância de :class:`~satcfe.rede.ConfiguracaoRede` ou uma string contendo o XML com as configurações de rede. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string
[ "Função", "ConfigurarInterfaceDeRede", "conforme", "ER", "SAT", "item", "6", ".", "1", ".", "9", ".", "Configurção", "da", "interface", "de", "comunicação", "do", "equipamento", "SAT", "." ]
cb8e8815f4133d3e3d94cf526fa86767b4521ed9
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/base.py#L402-L417
train
base4sistemas/satcfe
satcfe/base.py
FuncoesSAT.associar_assinatura
def associar_assinatura(self, sequencia_cnpj, assinatura_ac): """Função ``AssociarAssinatura`` conforme ER SAT, item 6.1.10. Associação da assinatura do aplicativo comercial. :param sequencia_cnpj: Sequência string de 28 dígitos composta do CNPJ do desenvolvedor da AC e do CNPJ do estabelecimento comercial contribuinte, conforme ER SAT, item 2.3.1. :param assinatura_ac: Sequência string contendo a assinatura digital do parâmetro ``sequencia_cnpj`` codificada em base64. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string """ return self.invocar__AssociarAssinatura( self.gerar_numero_sessao(), self._codigo_ativacao, sequencia_cnpj, assinatura_ac)
python
def associar_assinatura(self, sequencia_cnpj, assinatura_ac): """Função ``AssociarAssinatura`` conforme ER SAT, item 6.1.10. Associação da assinatura do aplicativo comercial. :param sequencia_cnpj: Sequência string de 28 dígitos composta do CNPJ do desenvolvedor da AC e do CNPJ do estabelecimento comercial contribuinte, conforme ER SAT, item 2.3.1. :param assinatura_ac: Sequência string contendo a assinatura digital do parâmetro ``sequencia_cnpj`` codificada em base64. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string """ return self.invocar__AssociarAssinatura( self.gerar_numero_sessao(), self._codigo_ativacao, sequencia_cnpj, assinatura_ac)
[ "def", "associar_assinatura", "(", "self", ",", "sequencia_cnpj", ",", "assinatura_ac", ")", ":", "return", "self", ".", "invocar__AssociarAssinatura", "(", "self", ".", "gerar_numero_sessao", "(", ")", ",", "self", ".", "_codigo_ativacao", ",", "sequencia_cnpj", ",", "assinatura_ac", ")" ]
Função ``AssociarAssinatura`` conforme ER SAT, item 6.1.10. Associação da assinatura do aplicativo comercial. :param sequencia_cnpj: Sequência string de 28 dígitos composta do CNPJ do desenvolvedor da AC e do CNPJ do estabelecimento comercial contribuinte, conforme ER SAT, item 2.3.1. :param assinatura_ac: Sequência string contendo a assinatura digital do parâmetro ``sequencia_cnpj`` codificada em base64. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string
[ "Função", "AssociarAssinatura", "conforme", "ER", "SAT", "item", "6", ".", "1", ".", "10", ".", "Associação", "da", "assinatura", "do", "aplicativo", "comercial", "." ]
cb8e8815f4133d3e3d94cf526fa86767b4521ed9
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/base.py#L420-L436
train
base4sistemas/satcfe
satcfe/base.py
FuncoesSAT.trocar_codigo_de_ativacao
def trocar_codigo_de_ativacao(self, novo_codigo_ativacao, opcao=constantes.CODIGO_ATIVACAO_REGULAR, codigo_emergencia=None): """Função ``TrocarCodigoDeAtivacao`` conforme ER SAT, item 6.1.15. Troca do código de ativação do equipamento SAT. :param str novo_codigo_ativacao: O novo código de ativação escolhido pelo contribuinte. :param int opcao: Indica se deverá ser utilizado o código de ativação atualmente configurado, que é um código de ativação regular, definido pelo contribuinte, ou se deverá ser usado um código de emergência. Deverá ser o valor de uma das constantes :attr:`satcomum.constantes.CODIGO_ATIVACAO_REGULAR` (padrão) ou :attr:`satcomum.constantes.CODIGO_ATIVACAO_EMERGENCIA`. Nenhuma validação será realizada antes que a função seja efetivamente invocada. Entretanto, se opção de código de ativação indicada for ``CODIGO_ATIVACAO_EMERGENCIA``, então o argumento que informa o ``codigo_emergencia`` será checado e deverá avaliar como verdadeiro. :param str codigo_emergencia: O código de ativação de emergência, que é definido pelo fabricante do equipamento SAT. Este código deverá ser usado quando o usuário perder o código de ativação regular, e precisar definir um novo código de ativação. Note que, o argumento ``opcao`` deverá ser informado com o valor :attr:`satcomum.constantes.CODIGO_ATIVACAO_EMERGENCIA` para que este código de emergência seja considerado. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string :raises ValueError: Se o novo código de ativação avaliar como falso (possuir uma string nula por exemplo) ou se o código de emergencia avaliar como falso quando a opção for pelo código de ativação de emergência. .. warning:: Os argumentos da função ``TrocarCodigoDeAtivacao`` requerem que o novo código de ativação seja especificado duas vezes (dois argumentos com o mesmo conteúdo, como confirmação). Este método irá simplesmente informar duas vezes o argumento ``novo_codigo_ativacao`` na função SAT, mantendo a confirmação do código de ativação fora do escopo desta API. """ if not novo_codigo_ativacao: raise ValueError('Novo codigo de ativacao invalido: {!r}'.format( novo_codigo_ativacao)) codigo_ativacao = self._codigo_ativacao if opcao == constantes.CODIGO_ATIVACAO_EMERGENCIA: if codigo_emergencia: codigo_ativacao = codigo_emergencia else: raise ValueError('Codigo de ativacao de emergencia invalido: ' '{!r} (opcao={!r})'.format(codigo_emergencia, opcao)) return self.invocar__TrocarCodigoDeAtivacao( self.gerar_numero_sessao(), codigo_ativacao, opcao, novo_codigo_ativacao, novo_codigo_ativacao)
python
def trocar_codigo_de_ativacao(self, novo_codigo_ativacao, opcao=constantes.CODIGO_ATIVACAO_REGULAR, codigo_emergencia=None): """Função ``TrocarCodigoDeAtivacao`` conforme ER SAT, item 6.1.15. Troca do código de ativação do equipamento SAT. :param str novo_codigo_ativacao: O novo código de ativação escolhido pelo contribuinte. :param int opcao: Indica se deverá ser utilizado o código de ativação atualmente configurado, que é um código de ativação regular, definido pelo contribuinte, ou se deverá ser usado um código de emergência. Deverá ser o valor de uma das constantes :attr:`satcomum.constantes.CODIGO_ATIVACAO_REGULAR` (padrão) ou :attr:`satcomum.constantes.CODIGO_ATIVACAO_EMERGENCIA`. Nenhuma validação será realizada antes que a função seja efetivamente invocada. Entretanto, se opção de código de ativação indicada for ``CODIGO_ATIVACAO_EMERGENCIA``, então o argumento que informa o ``codigo_emergencia`` será checado e deverá avaliar como verdadeiro. :param str codigo_emergencia: O código de ativação de emergência, que é definido pelo fabricante do equipamento SAT. Este código deverá ser usado quando o usuário perder o código de ativação regular, e precisar definir um novo código de ativação. Note que, o argumento ``opcao`` deverá ser informado com o valor :attr:`satcomum.constantes.CODIGO_ATIVACAO_EMERGENCIA` para que este código de emergência seja considerado. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string :raises ValueError: Se o novo código de ativação avaliar como falso (possuir uma string nula por exemplo) ou se o código de emergencia avaliar como falso quando a opção for pelo código de ativação de emergência. .. warning:: Os argumentos da função ``TrocarCodigoDeAtivacao`` requerem que o novo código de ativação seja especificado duas vezes (dois argumentos com o mesmo conteúdo, como confirmação). Este método irá simplesmente informar duas vezes o argumento ``novo_codigo_ativacao`` na função SAT, mantendo a confirmação do código de ativação fora do escopo desta API. """ if not novo_codigo_ativacao: raise ValueError('Novo codigo de ativacao invalido: {!r}'.format( novo_codigo_ativacao)) codigo_ativacao = self._codigo_ativacao if opcao == constantes.CODIGO_ATIVACAO_EMERGENCIA: if codigo_emergencia: codigo_ativacao = codigo_emergencia else: raise ValueError('Codigo de ativacao de emergencia invalido: ' '{!r} (opcao={!r})'.format(codigo_emergencia, opcao)) return self.invocar__TrocarCodigoDeAtivacao( self.gerar_numero_sessao(), codigo_ativacao, opcao, novo_codigo_ativacao, novo_codigo_ativacao)
[ "def", "trocar_codigo_de_ativacao", "(", "self", ",", "novo_codigo_ativacao", ",", "opcao", "=", "constantes", ".", "CODIGO_ATIVACAO_REGULAR", ",", "codigo_emergencia", "=", "None", ")", ":", "if", "not", "novo_codigo_ativacao", ":", "raise", "ValueError", "(", "'Novo codigo de ativacao invalido: {!r}'", ".", "format", "(", "novo_codigo_ativacao", ")", ")", "codigo_ativacao", "=", "self", ".", "_codigo_ativacao", "if", "opcao", "==", "constantes", ".", "CODIGO_ATIVACAO_EMERGENCIA", ":", "if", "codigo_emergencia", ":", "codigo_ativacao", "=", "codigo_emergencia", "else", ":", "raise", "ValueError", "(", "'Codigo de ativacao de emergencia invalido: '", "'{!r} (opcao={!r})'", ".", "format", "(", "codigo_emergencia", ",", "opcao", ")", ")", "return", "self", ".", "invocar__TrocarCodigoDeAtivacao", "(", "self", ".", "gerar_numero_sessao", "(", ")", ",", "codigo_ativacao", ",", "opcao", ",", "novo_codigo_ativacao", ",", "novo_codigo_ativacao", ")" ]
Função ``TrocarCodigoDeAtivacao`` conforme ER SAT, item 6.1.15. Troca do código de ativação do equipamento SAT. :param str novo_codigo_ativacao: O novo código de ativação escolhido pelo contribuinte. :param int opcao: Indica se deverá ser utilizado o código de ativação atualmente configurado, que é um código de ativação regular, definido pelo contribuinte, ou se deverá ser usado um código de emergência. Deverá ser o valor de uma das constantes :attr:`satcomum.constantes.CODIGO_ATIVACAO_REGULAR` (padrão) ou :attr:`satcomum.constantes.CODIGO_ATIVACAO_EMERGENCIA`. Nenhuma validação será realizada antes que a função seja efetivamente invocada. Entretanto, se opção de código de ativação indicada for ``CODIGO_ATIVACAO_EMERGENCIA``, então o argumento que informa o ``codigo_emergencia`` será checado e deverá avaliar como verdadeiro. :param str codigo_emergencia: O código de ativação de emergência, que é definido pelo fabricante do equipamento SAT. Este código deverá ser usado quando o usuário perder o código de ativação regular, e precisar definir um novo código de ativação. Note que, o argumento ``opcao`` deverá ser informado com o valor :attr:`satcomum.constantes.CODIGO_ATIVACAO_EMERGENCIA` para que este código de emergência seja considerado. :return: Retorna *verbatim* a resposta da função SAT. :rtype: string :raises ValueError: Se o novo código de ativação avaliar como falso (possuir uma string nula por exemplo) ou se o código de emergencia avaliar como falso quando a opção for pelo código de ativação de emergência. .. warning:: Os argumentos da função ``TrocarCodigoDeAtivacao`` requerem que o novo código de ativação seja especificado duas vezes (dois argumentos com o mesmo conteúdo, como confirmação). Este método irá simplesmente informar duas vezes o argumento ``novo_codigo_ativacao`` na função SAT, mantendo a confirmação do código de ativação fora do escopo desta API.
[ "Função", "TrocarCodigoDeAtivacao", "conforme", "ER", "SAT", "item", "6", ".", "1", ".", "15", ".", "Troca", "do", "código", "de", "ativação", "do", "equipamento", "SAT", "." ]
cb8e8815f4133d3e3d94cf526fa86767b4521ed9
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/base.py#L483-L545
train
djgagne/hagelslag
hagelslag/evaluation/ObjectEvaluator.py
ObjectEvaluator.load_forecasts
def load_forecasts(self): """ Loads the forecast files and gathers the forecast information into pandas DataFrames. """ forecast_path = self.forecast_json_path + "/{0}/{1}/".format(self.run_date.strftime("%Y%m%d"), self.ensemble_member) forecast_files = sorted(glob(forecast_path + "*.json")) for forecast_file in forecast_files: file_obj = open(forecast_file) json_obj = json.load(file_obj) file_obj.close() track_id = json_obj['properties']["id"] obs_track_id = json_obj['properties']["obs_track_id"] forecast_hours = json_obj['properties']['times'] duration = json_obj['properties']['duration'] for f, feature in enumerate(json_obj['features']): area = np.sum(feature["properties"]["masks"]) step_id = track_id + "_{0:02d}".format(f) for model_type in self.model_types: for model_name in self.model_names[model_type]: prediction = feature['properties'][model_type + "_" + model_name.replace(" ", "-")] if model_type == "condition": prediction = [prediction] row = [track_id, obs_track_id, self.ensemble_name, self.ensemble_member, forecast_hours[f], f + 1, duration, area] + prediction self.forecasts[model_type][model_name].loc[step_id] = row
python
def load_forecasts(self): """ Loads the forecast files and gathers the forecast information into pandas DataFrames. """ forecast_path = self.forecast_json_path + "/{0}/{1}/".format(self.run_date.strftime("%Y%m%d"), self.ensemble_member) forecast_files = sorted(glob(forecast_path + "*.json")) for forecast_file in forecast_files: file_obj = open(forecast_file) json_obj = json.load(file_obj) file_obj.close() track_id = json_obj['properties']["id"] obs_track_id = json_obj['properties']["obs_track_id"] forecast_hours = json_obj['properties']['times'] duration = json_obj['properties']['duration'] for f, feature in enumerate(json_obj['features']): area = np.sum(feature["properties"]["masks"]) step_id = track_id + "_{0:02d}".format(f) for model_type in self.model_types: for model_name in self.model_names[model_type]: prediction = feature['properties'][model_type + "_" + model_name.replace(" ", "-")] if model_type == "condition": prediction = [prediction] row = [track_id, obs_track_id, self.ensemble_name, self.ensemble_member, forecast_hours[f], f + 1, duration, area] + prediction self.forecasts[model_type][model_name].loc[step_id] = row
[ "def", "load_forecasts", "(", "self", ")", ":", "forecast_path", "=", "self", ".", "forecast_json_path", "+", "\"/{0}/{1}/\"", ".", "format", "(", "self", ".", "run_date", ".", "strftime", "(", "\"%Y%m%d\"", ")", ",", "self", ".", "ensemble_member", ")", "forecast_files", "=", "sorted", "(", "glob", "(", "forecast_path", "+", "\"*.json\"", ")", ")", "for", "forecast_file", "in", "forecast_files", ":", "file_obj", "=", "open", "(", "forecast_file", ")", "json_obj", "=", "json", ".", "load", "(", "file_obj", ")", "file_obj", ".", "close", "(", ")", "track_id", "=", "json_obj", "[", "'properties'", "]", "[", "\"id\"", "]", "obs_track_id", "=", "json_obj", "[", "'properties'", "]", "[", "\"obs_track_id\"", "]", "forecast_hours", "=", "json_obj", "[", "'properties'", "]", "[", "'times'", "]", "duration", "=", "json_obj", "[", "'properties'", "]", "[", "'duration'", "]", "for", "f", ",", "feature", "in", "enumerate", "(", "json_obj", "[", "'features'", "]", ")", ":", "area", "=", "np", ".", "sum", "(", "feature", "[", "\"properties\"", "]", "[", "\"masks\"", "]", ")", "step_id", "=", "track_id", "+", "\"_{0:02d}\"", ".", "format", "(", "f", ")", "for", "model_type", "in", "self", ".", "model_types", ":", "for", "model_name", "in", "self", ".", "model_names", "[", "model_type", "]", ":", "prediction", "=", "feature", "[", "'properties'", "]", "[", "model_type", "+", "\"_\"", "+", "model_name", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", "]", "if", "model_type", "==", "\"condition\"", ":", "prediction", "=", "[", "prediction", "]", "row", "=", "[", "track_id", ",", "obs_track_id", ",", "self", ".", "ensemble_name", ",", "self", ".", "ensemble_member", ",", "forecast_hours", "[", "f", "]", ",", "f", "+", "1", ",", "duration", ",", "area", "]", "+", "prediction", "self", ".", "forecasts", "[", "model_type", "]", "[", "model_name", "]", ".", "loc", "[", "step_id", "]", "=", "row" ]
Loads the forecast files and gathers the forecast information into pandas DataFrames.
[ "Loads", "the", "forecast", "files", "and", "gathers", "the", "forecast", "information", "into", "pandas", "DataFrames", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/ObjectEvaluator.py#L62-L87
train
djgagne/hagelslag
hagelslag/evaluation/ObjectEvaluator.py
ObjectEvaluator.load_obs
def load_obs(self): """ Loads the track total and step files and merges the information into a single data frame. """ track_total_file = self.track_data_csv_path + \ "track_total_{0}_{1}_{2}.csv".format(self.ensemble_name, self.ensemble_member, self.run_date.strftime("%Y%m%d")) track_step_file = self.track_data_csv_path + \ "track_step_{0}_{1}_{2}.csv".format(self.ensemble_name, self.ensemble_member, self.run_date.strftime("%Y%m%d")) track_total_cols = ["Track_ID", "Translation_Error_X", "Translation_Error_Y", "Start_Time_Error"] track_step_cols = ["Step_ID", "Track_ID", "Hail_Size", "Shape", "Location", "Scale"] track_total_data = pd.read_csv(track_total_file, usecols=track_total_cols) track_step_data = pd.read_csv(track_step_file, usecols=track_step_cols) obs_data = pd.merge(track_step_data, track_total_data, on="Track_ID", how="left") self.obs = obs_data
python
def load_obs(self): """ Loads the track total and step files and merges the information into a single data frame. """ track_total_file = self.track_data_csv_path + \ "track_total_{0}_{1}_{2}.csv".format(self.ensemble_name, self.ensemble_member, self.run_date.strftime("%Y%m%d")) track_step_file = self.track_data_csv_path + \ "track_step_{0}_{1}_{2}.csv".format(self.ensemble_name, self.ensemble_member, self.run_date.strftime("%Y%m%d")) track_total_cols = ["Track_ID", "Translation_Error_X", "Translation_Error_Y", "Start_Time_Error"] track_step_cols = ["Step_ID", "Track_ID", "Hail_Size", "Shape", "Location", "Scale"] track_total_data = pd.read_csv(track_total_file, usecols=track_total_cols) track_step_data = pd.read_csv(track_step_file, usecols=track_step_cols) obs_data = pd.merge(track_step_data, track_total_data, on="Track_ID", how="left") self.obs = obs_data
[ "def", "load_obs", "(", "self", ")", ":", "track_total_file", "=", "self", ".", "track_data_csv_path", "+", "\"track_total_{0}_{1}_{2}.csv\"", ".", "format", "(", "self", ".", "ensemble_name", ",", "self", ".", "ensemble_member", ",", "self", ".", "run_date", ".", "strftime", "(", "\"%Y%m%d\"", ")", ")", "track_step_file", "=", "self", ".", "track_data_csv_path", "+", "\"track_step_{0}_{1}_{2}.csv\"", ".", "format", "(", "self", ".", "ensemble_name", ",", "self", ".", "ensemble_member", ",", "self", ".", "run_date", ".", "strftime", "(", "\"%Y%m%d\"", ")", ")", "track_total_cols", "=", "[", "\"Track_ID\"", ",", "\"Translation_Error_X\"", ",", "\"Translation_Error_Y\"", ",", "\"Start_Time_Error\"", "]", "track_step_cols", "=", "[", "\"Step_ID\"", ",", "\"Track_ID\"", ",", "\"Hail_Size\"", ",", "\"Shape\"", ",", "\"Location\"", ",", "\"Scale\"", "]", "track_total_data", "=", "pd", ".", "read_csv", "(", "track_total_file", ",", "usecols", "=", "track_total_cols", ")", "track_step_data", "=", "pd", ".", "read_csv", "(", "track_step_file", ",", "usecols", "=", "track_step_cols", ")", "obs_data", "=", "pd", ".", "merge", "(", "track_step_data", ",", "track_total_data", ",", "on", "=", "\"Track_ID\"", ",", "how", "=", "\"left\"", ")", "self", ".", "obs", "=", "obs_data" ]
Loads the track total and step files and merges the information into a single data frame.
[ "Loads", "the", "track", "total", "and", "step", "files", "and", "merges", "the", "information", "into", "a", "single", "data", "frame", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/ObjectEvaluator.py#L89-L106
train
djgagne/hagelslag
hagelslag/evaluation/ObjectEvaluator.py
ObjectEvaluator.merge_obs
def merge_obs(self): """ Match forecasts and observations. """ for model_type in self.model_types: self.matched_forecasts[model_type] = {} for model_name in self.model_names[model_type]: self.matched_forecasts[model_type][model_name] = pd.merge(self.forecasts[model_type][model_name], self.obs, right_on="Step_ID", how="left", left_index=True)
python
def merge_obs(self): """ Match forecasts and observations. """ for model_type in self.model_types: self.matched_forecasts[model_type] = {} for model_name in self.model_names[model_type]: self.matched_forecasts[model_type][model_name] = pd.merge(self.forecasts[model_type][model_name], self.obs, right_on="Step_ID", how="left", left_index=True)
[ "def", "merge_obs", "(", "self", ")", ":", "for", "model_type", "in", "self", ".", "model_types", ":", "self", ".", "matched_forecasts", "[", "model_type", "]", "=", "{", "}", "for", "model_name", "in", "self", ".", "model_names", "[", "model_type", "]", ":", "self", ".", "matched_forecasts", "[", "model_type", "]", "[", "model_name", "]", "=", "pd", ".", "merge", "(", "self", ".", "forecasts", "[", "model_type", "]", "[", "model_name", "]", ",", "self", ".", "obs", ",", "right_on", "=", "\"Step_ID\"", ",", "how", "=", "\"left\"", ",", "left_index", "=", "True", ")" ]
Match forecasts and observations.
[ "Match", "forecasts", "and", "observations", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/ObjectEvaluator.py#L108-L117
train
djgagne/hagelslag
hagelslag/evaluation/ObjectEvaluator.py
ObjectEvaluator.crps
def crps(self, model_type, model_name, condition_model_name, condition_threshold, query=None): """ Calculates the cumulative ranked probability score (CRPS) on the forecast data. Args: model_type: model type being evaluated. model_name: machine learning model being evaluated. condition_model_name: Name of the hail/no-hail model being evaluated condition_threshold: Threshold for using hail size CDF query: pandas query string to filter the forecasts based on the metadata Returns: a DistributedCRPS object """ def gamma_cdf(x, a, loc, b): if a == 0 or b == 0: cdf = np.ones(x.shape) else: cdf = gamma.cdf(x, a, loc, b) return cdf crps_obj = DistributedCRPS(self.dist_thresholds) if query is not None: sub_forecasts = self.matched_forecasts[model_type][model_name].query(query) sub_forecasts = sub_forecasts.reset_index(drop=True) condition_forecasts = self.matched_forecasts["condition"][condition_model_name].query(query) condition_forecasts = condition_forecasts.reset_index(drop=True) else: sub_forecasts = self.matched_forecasts[model_type][model_name] condition_forecasts = self.matched_forecasts["condition"][condition_model_name] if sub_forecasts.shape[0] > 0: if model_type == "dist": forecast_cdfs = np.zeros((sub_forecasts.shape[0], self.dist_thresholds.size)) for f in range(sub_forecasts.shape[0]): condition_prob = condition_forecasts.loc[f, self.forecast_bins["condition"][0]] if condition_prob >= condition_threshold: f_params = [0, 0, 0] else: f_params = sub_forecasts[self.forecast_bins[model_type]].values[f] forecast_cdfs[f] = gamma_cdf(self.dist_thresholds, f_params[0], f_params[1], f_params[2]) obs_cdfs = np.array([gamma_cdf(self.dist_thresholds, *params) for params in sub_forecasts[self.type_cols[model_type]].values]) crps_obj.update(forecast_cdfs, obs_cdfs) else: crps_obj.update(sub_forecasts[self.forecast_bins[model_type].astype(str)].values, sub_forecasts[self.type_cols[model_type]].values) return crps_obj
python
def crps(self, model_type, model_name, condition_model_name, condition_threshold, query=None): """ Calculates the cumulative ranked probability score (CRPS) on the forecast data. Args: model_type: model type being evaluated. model_name: machine learning model being evaluated. condition_model_name: Name of the hail/no-hail model being evaluated condition_threshold: Threshold for using hail size CDF query: pandas query string to filter the forecasts based on the metadata Returns: a DistributedCRPS object """ def gamma_cdf(x, a, loc, b): if a == 0 or b == 0: cdf = np.ones(x.shape) else: cdf = gamma.cdf(x, a, loc, b) return cdf crps_obj = DistributedCRPS(self.dist_thresholds) if query is not None: sub_forecasts = self.matched_forecasts[model_type][model_name].query(query) sub_forecasts = sub_forecasts.reset_index(drop=True) condition_forecasts = self.matched_forecasts["condition"][condition_model_name].query(query) condition_forecasts = condition_forecasts.reset_index(drop=True) else: sub_forecasts = self.matched_forecasts[model_type][model_name] condition_forecasts = self.matched_forecasts["condition"][condition_model_name] if sub_forecasts.shape[0] > 0: if model_type == "dist": forecast_cdfs = np.zeros((sub_forecasts.shape[0], self.dist_thresholds.size)) for f in range(sub_forecasts.shape[0]): condition_prob = condition_forecasts.loc[f, self.forecast_bins["condition"][0]] if condition_prob >= condition_threshold: f_params = [0, 0, 0] else: f_params = sub_forecasts[self.forecast_bins[model_type]].values[f] forecast_cdfs[f] = gamma_cdf(self.dist_thresholds, f_params[0], f_params[1], f_params[2]) obs_cdfs = np.array([gamma_cdf(self.dist_thresholds, *params) for params in sub_forecasts[self.type_cols[model_type]].values]) crps_obj.update(forecast_cdfs, obs_cdfs) else: crps_obj.update(sub_forecasts[self.forecast_bins[model_type].astype(str)].values, sub_forecasts[self.type_cols[model_type]].values) return crps_obj
[ "def", "crps", "(", "self", ",", "model_type", ",", "model_name", ",", "condition_model_name", ",", "condition_threshold", ",", "query", "=", "None", ")", ":", "def", "gamma_cdf", "(", "x", ",", "a", ",", "loc", ",", "b", ")", ":", "if", "a", "==", "0", "or", "b", "==", "0", ":", "cdf", "=", "np", ".", "ones", "(", "x", ".", "shape", ")", "else", ":", "cdf", "=", "gamma", ".", "cdf", "(", "x", ",", "a", ",", "loc", ",", "b", ")", "return", "cdf", "crps_obj", "=", "DistributedCRPS", "(", "self", ".", "dist_thresholds", ")", "if", "query", "is", "not", "None", ":", "sub_forecasts", "=", "self", ".", "matched_forecasts", "[", "model_type", "]", "[", "model_name", "]", ".", "query", "(", "query", ")", "sub_forecasts", "=", "sub_forecasts", ".", "reset_index", "(", "drop", "=", "True", ")", "condition_forecasts", "=", "self", ".", "matched_forecasts", "[", "\"condition\"", "]", "[", "condition_model_name", "]", ".", "query", "(", "query", ")", "condition_forecasts", "=", "condition_forecasts", ".", "reset_index", "(", "drop", "=", "True", ")", "else", ":", "sub_forecasts", "=", "self", ".", "matched_forecasts", "[", "model_type", "]", "[", "model_name", "]", "condition_forecasts", "=", "self", ".", "matched_forecasts", "[", "\"condition\"", "]", "[", "condition_model_name", "]", "if", "sub_forecasts", ".", "shape", "[", "0", "]", ">", "0", ":", "if", "model_type", "==", "\"dist\"", ":", "forecast_cdfs", "=", "np", ".", "zeros", "(", "(", "sub_forecasts", ".", "shape", "[", "0", "]", ",", "self", ".", "dist_thresholds", ".", "size", ")", ")", "for", "f", "in", "range", "(", "sub_forecasts", ".", "shape", "[", "0", "]", ")", ":", "condition_prob", "=", "condition_forecasts", ".", "loc", "[", "f", ",", "self", ".", "forecast_bins", "[", "\"condition\"", "]", "[", "0", "]", "]", "if", "condition_prob", ">=", "condition_threshold", ":", "f_params", "=", "[", "0", ",", "0", ",", "0", "]", "else", ":", "f_params", "=", "sub_forecasts", "[", "self", ".", "forecast_bins", "[", "model_type", "]", "]", ".", "values", "[", "f", "]", "forecast_cdfs", "[", "f", "]", "=", "gamma_cdf", "(", "self", ".", "dist_thresholds", ",", "f_params", "[", "0", "]", ",", "f_params", "[", "1", "]", ",", "f_params", "[", "2", "]", ")", "obs_cdfs", "=", "np", ".", "array", "(", "[", "gamma_cdf", "(", "self", ".", "dist_thresholds", ",", "*", "params", ")", "for", "params", "in", "sub_forecasts", "[", "self", ".", "type_cols", "[", "model_type", "]", "]", ".", "values", "]", ")", "crps_obj", ".", "update", "(", "forecast_cdfs", ",", "obs_cdfs", ")", "else", ":", "crps_obj", ".", "update", "(", "sub_forecasts", "[", "self", ".", "forecast_bins", "[", "model_type", "]", ".", "astype", "(", "str", ")", "]", ".", "values", ",", "sub_forecasts", "[", "self", ".", "type_cols", "[", "model_type", "]", "]", ".", "values", ")", "return", "crps_obj" ]
Calculates the cumulative ranked probability score (CRPS) on the forecast data. Args: model_type: model type being evaluated. model_name: machine learning model being evaluated. condition_model_name: Name of the hail/no-hail model being evaluated condition_threshold: Threshold for using hail size CDF query: pandas query string to filter the forecasts based on the metadata Returns: a DistributedCRPS object
[ "Calculates", "the", "cumulative", "ranked", "probability", "score", "(", "CRPS", ")", "on", "the", "forecast", "data", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/ObjectEvaluator.py#L119-L168
train
djgagne/hagelslag
hagelslag/evaluation/ObjectEvaluator.py
ObjectEvaluator.roc
def roc(self, model_type, model_name, intensity_threshold, prob_thresholds, query=None): """ Calculates a ROC curve at a specified intensity threshold. Args: model_type: type of model being evaluated (e.g. size). model_name: machine learning model being evaluated intensity_threshold: forecast bin used as the split point for evaluation prob_thresholds: Array of probability thresholds being evaluated. query: str to filter forecasts based on values of forecasts, obs, and metadata. Returns: A DistributedROC object """ roc_obj = DistributedROC(prob_thresholds, 0.5) if query is not None: sub_forecasts = self.matched_forecasts[model_type][model_name].query(query) sub_forecasts = sub_forecasts.reset_index(drop=True) else: sub_forecasts = self.matched_forecasts[model_type][model_name] obs_values = np.zeros(sub_forecasts.shape[0]) if sub_forecasts.shape[0] > 0: if model_type == "dist": forecast_values = np.array([gamma_sf(intensity_threshold, *params) for params in sub_forecasts[self.forecast_bins[model_type]].values]) obs_probs = np.array([gamma_sf(intensity_threshold, *params) for params in sub_forecasts[self.type_cols[model_type]].values]) obs_values[obs_probs >= 0.01] = 1 elif len(self.forecast_bins[model_type]) > 1: fbin = np.argmin(np.abs(self.forecast_bins[model_type] - intensity_threshold)) forecast_values = 1 - sub_forecasts[self.forecast_bins[model_type].astype(str)].values.cumsum(axis=1)[:, fbin] obs_values[sub_forecasts[self.type_cols[model_type]].values >= intensity_threshold] = 1 else: forecast_values = sub_forecasts[self.forecast_bins[model_type].astype(str)[0]].values obs_values[sub_forecasts[self.type_cols[model_type]].values >= intensity_threshold] = 1 roc_obj.update(forecast_values, obs_values) return roc_obj
python
def roc(self, model_type, model_name, intensity_threshold, prob_thresholds, query=None): """ Calculates a ROC curve at a specified intensity threshold. Args: model_type: type of model being evaluated (e.g. size). model_name: machine learning model being evaluated intensity_threshold: forecast bin used as the split point for evaluation prob_thresholds: Array of probability thresholds being evaluated. query: str to filter forecasts based on values of forecasts, obs, and metadata. Returns: A DistributedROC object """ roc_obj = DistributedROC(prob_thresholds, 0.5) if query is not None: sub_forecasts = self.matched_forecasts[model_type][model_name].query(query) sub_forecasts = sub_forecasts.reset_index(drop=True) else: sub_forecasts = self.matched_forecasts[model_type][model_name] obs_values = np.zeros(sub_forecasts.shape[0]) if sub_forecasts.shape[0] > 0: if model_type == "dist": forecast_values = np.array([gamma_sf(intensity_threshold, *params) for params in sub_forecasts[self.forecast_bins[model_type]].values]) obs_probs = np.array([gamma_sf(intensity_threshold, *params) for params in sub_forecasts[self.type_cols[model_type]].values]) obs_values[obs_probs >= 0.01] = 1 elif len(self.forecast_bins[model_type]) > 1: fbin = np.argmin(np.abs(self.forecast_bins[model_type] - intensity_threshold)) forecast_values = 1 - sub_forecasts[self.forecast_bins[model_type].astype(str)].values.cumsum(axis=1)[:, fbin] obs_values[sub_forecasts[self.type_cols[model_type]].values >= intensity_threshold] = 1 else: forecast_values = sub_forecasts[self.forecast_bins[model_type].astype(str)[0]].values obs_values[sub_forecasts[self.type_cols[model_type]].values >= intensity_threshold] = 1 roc_obj.update(forecast_values, obs_values) return roc_obj
[ "def", "roc", "(", "self", ",", "model_type", ",", "model_name", ",", "intensity_threshold", ",", "prob_thresholds", ",", "query", "=", "None", ")", ":", "roc_obj", "=", "DistributedROC", "(", "prob_thresholds", ",", "0.5", ")", "if", "query", "is", "not", "None", ":", "sub_forecasts", "=", "self", ".", "matched_forecasts", "[", "model_type", "]", "[", "model_name", "]", ".", "query", "(", "query", ")", "sub_forecasts", "=", "sub_forecasts", ".", "reset_index", "(", "drop", "=", "True", ")", "else", ":", "sub_forecasts", "=", "self", ".", "matched_forecasts", "[", "model_type", "]", "[", "model_name", "]", "obs_values", "=", "np", ".", "zeros", "(", "sub_forecasts", ".", "shape", "[", "0", "]", ")", "if", "sub_forecasts", ".", "shape", "[", "0", "]", ">", "0", ":", "if", "model_type", "==", "\"dist\"", ":", "forecast_values", "=", "np", ".", "array", "(", "[", "gamma_sf", "(", "intensity_threshold", ",", "*", "params", ")", "for", "params", "in", "sub_forecasts", "[", "self", ".", "forecast_bins", "[", "model_type", "]", "]", ".", "values", "]", ")", "obs_probs", "=", "np", ".", "array", "(", "[", "gamma_sf", "(", "intensity_threshold", ",", "*", "params", ")", "for", "params", "in", "sub_forecasts", "[", "self", ".", "type_cols", "[", "model_type", "]", "]", ".", "values", "]", ")", "obs_values", "[", "obs_probs", ">=", "0.01", "]", "=", "1", "elif", "len", "(", "self", ".", "forecast_bins", "[", "model_type", "]", ")", ">", "1", ":", "fbin", "=", "np", ".", "argmin", "(", "np", ".", "abs", "(", "self", ".", "forecast_bins", "[", "model_type", "]", "-", "intensity_threshold", ")", ")", "forecast_values", "=", "1", "-", "sub_forecasts", "[", "self", ".", "forecast_bins", "[", "model_type", "]", ".", "astype", "(", "str", ")", "]", ".", "values", ".", "cumsum", "(", "axis", "=", "1", ")", "[", ":", ",", "fbin", "]", "obs_values", "[", "sub_forecasts", "[", "self", ".", "type_cols", "[", "model_type", "]", "]", ".", "values", ">=", "intensity_threshold", "]", "=", "1", "else", ":", "forecast_values", "=", "sub_forecasts", "[", "self", ".", "forecast_bins", "[", "model_type", "]", ".", "astype", "(", "str", ")", "[", "0", "]", "]", ".", "values", "obs_values", "[", "sub_forecasts", "[", "self", ".", "type_cols", "[", "model_type", "]", "]", ".", "values", ">=", "intensity_threshold", "]", "=", "1", "roc_obj", ".", "update", "(", "forecast_values", ",", "obs_values", ")", "return", "roc_obj" ]
Calculates a ROC curve at a specified intensity threshold. Args: model_type: type of model being evaluated (e.g. size). model_name: machine learning model being evaluated intensity_threshold: forecast bin used as the split point for evaluation prob_thresholds: Array of probability thresholds being evaluated. query: str to filter forecasts based on values of forecasts, obs, and metadata. Returns: A DistributedROC object
[ "Calculates", "a", "ROC", "curve", "at", "a", "specified", "intensity", "threshold", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/ObjectEvaluator.py#L170-L207
train
djgagne/hagelslag
hagelslag/evaluation/ObjectEvaluator.py
ObjectEvaluator.sample_forecast_max_hail
def sample_forecast_max_hail(self, dist_model_name, condition_model_name, num_samples, condition_threshold=0.5, query=None): """ Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes. Hail sizes are sampled from each predicted gamma distribution. The total number of samples equals num_samples * area of the hail object. To get the maximum hail size for each realization, the maximum value within each area sample is used. Args: dist_model_name: Name of the distribution machine learning model being evaluated condition_model_name: Name of the hail/no-hail model being evaluated num_samples: Number of maximum hail samples to draw condition_threshold: Threshold for drawing hail samples query: A str that selects a subset of the data for evaluation Returns: A numpy array containing maximum hail samples for each forecast object. """ if query is not None: dist_forecasts = self.matched_forecasts["dist"][dist_model_name].query(query) dist_forecasts = dist_forecasts.reset_index(drop=True) condition_forecasts = self.matched_forecasts["condition"][condition_model_name].query(query) condition_forecasts = condition_forecasts.reset_index(drop=True) else: dist_forecasts = self.matched_forecasts["dist"][dist_model_name] condition_forecasts = self.matched_forecasts["condition"][condition_model_name] max_hail_samples = np.zeros((dist_forecasts.shape[0], num_samples)) areas = dist_forecasts["Area"].values for f in np.arange(dist_forecasts.shape[0]): condition_prob = condition_forecasts.loc[f, self.forecast_bins["condition"][0]] if condition_prob >= condition_threshold: max_hail_samples[f] = np.sort(gamma.rvs(*dist_forecasts.loc[f, self.forecast_bins["dist"]].values, size=(num_samples, areas[f])).max(axis=1)) return max_hail_samples
python
def sample_forecast_max_hail(self, dist_model_name, condition_model_name, num_samples, condition_threshold=0.5, query=None): """ Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes. Hail sizes are sampled from each predicted gamma distribution. The total number of samples equals num_samples * area of the hail object. To get the maximum hail size for each realization, the maximum value within each area sample is used. Args: dist_model_name: Name of the distribution machine learning model being evaluated condition_model_name: Name of the hail/no-hail model being evaluated num_samples: Number of maximum hail samples to draw condition_threshold: Threshold for drawing hail samples query: A str that selects a subset of the data for evaluation Returns: A numpy array containing maximum hail samples for each forecast object. """ if query is not None: dist_forecasts = self.matched_forecasts["dist"][dist_model_name].query(query) dist_forecasts = dist_forecasts.reset_index(drop=True) condition_forecasts = self.matched_forecasts["condition"][condition_model_name].query(query) condition_forecasts = condition_forecasts.reset_index(drop=True) else: dist_forecasts = self.matched_forecasts["dist"][dist_model_name] condition_forecasts = self.matched_forecasts["condition"][condition_model_name] max_hail_samples = np.zeros((dist_forecasts.shape[0], num_samples)) areas = dist_forecasts["Area"].values for f in np.arange(dist_forecasts.shape[0]): condition_prob = condition_forecasts.loc[f, self.forecast_bins["condition"][0]] if condition_prob >= condition_threshold: max_hail_samples[f] = np.sort(gamma.rvs(*dist_forecasts.loc[f, self.forecast_bins["dist"]].values, size=(num_samples, areas[f])).max(axis=1)) return max_hail_samples
[ "def", "sample_forecast_max_hail", "(", "self", ",", "dist_model_name", ",", "condition_model_name", ",", "num_samples", ",", "condition_threshold", "=", "0.5", ",", "query", "=", "None", ")", ":", "if", "query", "is", "not", "None", ":", "dist_forecasts", "=", "self", ".", "matched_forecasts", "[", "\"dist\"", "]", "[", "dist_model_name", "]", ".", "query", "(", "query", ")", "dist_forecasts", "=", "dist_forecasts", ".", "reset_index", "(", "drop", "=", "True", ")", "condition_forecasts", "=", "self", ".", "matched_forecasts", "[", "\"condition\"", "]", "[", "condition_model_name", "]", ".", "query", "(", "query", ")", "condition_forecasts", "=", "condition_forecasts", ".", "reset_index", "(", "drop", "=", "True", ")", "else", ":", "dist_forecasts", "=", "self", ".", "matched_forecasts", "[", "\"dist\"", "]", "[", "dist_model_name", "]", "condition_forecasts", "=", "self", ".", "matched_forecasts", "[", "\"condition\"", "]", "[", "condition_model_name", "]", "max_hail_samples", "=", "np", ".", "zeros", "(", "(", "dist_forecasts", ".", "shape", "[", "0", "]", ",", "num_samples", ")", ")", "areas", "=", "dist_forecasts", "[", "\"Area\"", "]", ".", "values", "for", "f", "in", "np", ".", "arange", "(", "dist_forecasts", ".", "shape", "[", "0", "]", ")", ":", "condition_prob", "=", "condition_forecasts", ".", "loc", "[", "f", ",", "self", ".", "forecast_bins", "[", "\"condition\"", "]", "[", "0", "]", "]", "if", "condition_prob", ">=", "condition_threshold", ":", "max_hail_samples", "[", "f", "]", "=", "np", ".", "sort", "(", "gamma", ".", "rvs", "(", "*", "dist_forecasts", ".", "loc", "[", "f", ",", "self", ".", "forecast_bins", "[", "\"dist\"", "]", "]", ".", "values", ",", "size", "=", "(", "num_samples", ",", "areas", "[", "f", "]", ")", ")", ".", "max", "(", "axis", "=", "1", ")", ")", "return", "max_hail_samples" ]
Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes. Hail sizes are sampled from each predicted gamma distribution. The total number of samples equals num_samples * area of the hail object. To get the maximum hail size for each realization, the maximum value within each area sample is used. Args: dist_model_name: Name of the distribution machine learning model being evaluated condition_model_name: Name of the hail/no-hail model being evaluated num_samples: Number of maximum hail samples to draw condition_threshold: Threshold for drawing hail samples query: A str that selects a subset of the data for evaluation Returns: A numpy array containing maximum hail samples for each forecast object.
[ "Samples", "every", "forecast", "hail", "object", "and", "returns", "an", "empirical", "distribution", "of", "possible", "maximum", "hail", "sizes", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/ObjectEvaluator.py#L248-L282
train
paymentwall/paymentwall-python
paymentwall/widget.py
Widget.get_params
def get_params(self): """Get signature and params """ params = { 'key': self.get_app_key(), 'uid': self.user_id, 'widget': self.widget_code } products_number = len(self.products) if self.get_api_type() == self.API_GOODS: if isinstance(self.products, list): if products_number == 1: product = self.products[0] if isinstance(product, Product): post_trial_product = None if isinstance(product.get_trial_product(), Product): post_trial_product = product product = product.get_trial_product() params['amount'] = product.get_amount() params['currencyCode'] = product.get_currency_code() params['ag_name'] = product.get_name() params['ag_external_id'] = product.get_id() params['ag_type'] = product.get_type() if product.get_type() == Product.TYPE_SUBSCRIPTION: params['ag_period_length'] = product.get_period_length() params['ag_period_type'] = product.get_period_type() if product.is_recurring(): params['ag_recurring'] = 1 if product.is_recurring() else 0 if post_trial_product: params['ag_trial'] = 1 params['ag_post_trial_external_id'] = post_trial_product.get_id() params['ag_post_trial_period_length'] = post_trial_product.get_period_length() params['ag_post_trial_period_type'] = post_trial_product.get_period_type() params['ag_post_trial_name'] = post_trial_product.get_name() params['post_trial_amount'] = post_trial_product.get_amount() params['post_trial_currencyCode'] = post_trial_product.get_currency_code() else: self.append_to_errors('Not a Product instance') else: self.append_to_errors('Only 1 product is allowed') elif self.get_api_type() == self.API_CART: index = 0 for product in self.products: params['external_ids[' + str(index) + ']'] = product.get_id() if product.get_amount() > 0: params['prices[' + str(index) + ']'] = product.get_amount() if product.get_currency_code() != '' and product.get_currency_code() is not None: params['currencies[' + str(index) + ']'] = product.get_currency_code() index += 1 params['sign_version'] = signature_version = str(self.get_default_widget_signature()) if not self.is_empty(self.extra_params, 'sign_version'): signature_version = params['sign_version'] = str(self.extra_params['sign_version']) params = self.array_merge(params, self.extra_params) params['sign'] = self.calculate_signature(params, self.get_secret_key(), int(signature_version)) return params
python
def get_params(self): """Get signature and params """ params = { 'key': self.get_app_key(), 'uid': self.user_id, 'widget': self.widget_code } products_number = len(self.products) if self.get_api_type() == self.API_GOODS: if isinstance(self.products, list): if products_number == 1: product = self.products[0] if isinstance(product, Product): post_trial_product = None if isinstance(product.get_trial_product(), Product): post_trial_product = product product = product.get_trial_product() params['amount'] = product.get_amount() params['currencyCode'] = product.get_currency_code() params['ag_name'] = product.get_name() params['ag_external_id'] = product.get_id() params['ag_type'] = product.get_type() if product.get_type() == Product.TYPE_SUBSCRIPTION: params['ag_period_length'] = product.get_period_length() params['ag_period_type'] = product.get_period_type() if product.is_recurring(): params['ag_recurring'] = 1 if product.is_recurring() else 0 if post_trial_product: params['ag_trial'] = 1 params['ag_post_trial_external_id'] = post_trial_product.get_id() params['ag_post_trial_period_length'] = post_trial_product.get_period_length() params['ag_post_trial_period_type'] = post_trial_product.get_period_type() params['ag_post_trial_name'] = post_trial_product.get_name() params['post_trial_amount'] = post_trial_product.get_amount() params['post_trial_currencyCode'] = post_trial_product.get_currency_code() else: self.append_to_errors('Not a Product instance') else: self.append_to_errors('Only 1 product is allowed') elif self.get_api_type() == self.API_CART: index = 0 for product in self.products: params['external_ids[' + str(index) + ']'] = product.get_id() if product.get_amount() > 0: params['prices[' + str(index) + ']'] = product.get_amount() if product.get_currency_code() != '' and product.get_currency_code() is not None: params['currencies[' + str(index) + ']'] = product.get_currency_code() index += 1 params['sign_version'] = signature_version = str(self.get_default_widget_signature()) if not self.is_empty(self.extra_params, 'sign_version'): signature_version = params['sign_version'] = str(self.extra_params['sign_version']) params = self.array_merge(params, self.extra_params) params['sign'] = self.calculate_signature(params, self.get_secret_key(), int(signature_version)) return params
[ "def", "get_params", "(", "self", ")", ":", "params", "=", "{", "'key'", ":", "self", ".", "get_app_key", "(", ")", ",", "'uid'", ":", "self", ".", "user_id", ",", "'widget'", ":", "self", ".", "widget_code", "}", "products_number", "=", "len", "(", "self", ".", "products", ")", "if", "self", ".", "get_api_type", "(", ")", "==", "self", ".", "API_GOODS", ":", "if", "isinstance", "(", "self", ".", "products", ",", "list", ")", ":", "if", "products_number", "==", "1", ":", "product", "=", "self", ".", "products", "[", "0", "]", "if", "isinstance", "(", "product", ",", "Product", ")", ":", "post_trial_product", "=", "None", "if", "isinstance", "(", "product", ".", "get_trial_product", "(", ")", ",", "Product", ")", ":", "post_trial_product", "=", "product", "product", "=", "product", ".", "get_trial_product", "(", ")", "params", "[", "'amount'", "]", "=", "product", ".", "get_amount", "(", ")", "params", "[", "'currencyCode'", "]", "=", "product", ".", "get_currency_code", "(", ")", "params", "[", "'ag_name'", "]", "=", "product", ".", "get_name", "(", ")", "params", "[", "'ag_external_id'", "]", "=", "product", ".", "get_id", "(", ")", "params", "[", "'ag_type'", "]", "=", "product", ".", "get_type", "(", ")", "if", "product", ".", "get_type", "(", ")", "==", "Product", ".", "TYPE_SUBSCRIPTION", ":", "params", "[", "'ag_period_length'", "]", "=", "product", ".", "get_period_length", "(", ")", "params", "[", "'ag_period_type'", "]", "=", "product", ".", "get_period_type", "(", ")", "if", "product", ".", "is_recurring", "(", ")", ":", "params", "[", "'ag_recurring'", "]", "=", "1", "if", "product", ".", "is_recurring", "(", ")", "else", "0", "if", "post_trial_product", ":", "params", "[", "'ag_trial'", "]", "=", "1", "params", "[", "'ag_post_trial_external_id'", "]", "=", "post_trial_product", ".", "get_id", "(", ")", "params", "[", "'ag_post_trial_period_length'", "]", "=", "post_trial_product", ".", "get_period_length", "(", ")", "params", "[", "'ag_post_trial_period_type'", "]", "=", "post_trial_product", ".", "get_period_type", "(", ")", "params", "[", "'ag_post_trial_name'", "]", "=", "post_trial_product", ".", "get_name", "(", ")", "params", "[", "'post_trial_amount'", "]", "=", "post_trial_product", ".", "get_amount", "(", ")", "params", "[", "'post_trial_currencyCode'", "]", "=", "post_trial_product", ".", "get_currency_code", "(", ")", "else", ":", "self", ".", "append_to_errors", "(", "'Not a Product instance'", ")", "else", ":", "self", ".", "append_to_errors", "(", "'Only 1 product is allowed'", ")", "elif", "self", ".", "get_api_type", "(", ")", "==", "self", ".", "API_CART", ":", "index", "=", "0", "for", "product", "in", "self", ".", "products", ":", "params", "[", "'external_ids['", "+", "str", "(", "index", ")", "+", "']'", "]", "=", "product", ".", "get_id", "(", ")", "if", "product", ".", "get_amount", "(", ")", ">", "0", ":", "params", "[", "'prices['", "+", "str", "(", "index", ")", "+", "']'", "]", "=", "product", ".", "get_amount", "(", ")", "if", "product", ".", "get_currency_code", "(", ")", "!=", "''", "and", "product", ".", "get_currency_code", "(", ")", "is", "not", "None", ":", "params", "[", "'currencies['", "+", "str", "(", "index", ")", "+", "']'", "]", "=", "product", ".", "get_currency_code", "(", ")", "index", "+=", "1", "params", "[", "'sign_version'", "]", "=", "signature_version", "=", "str", "(", "self", ".", "get_default_widget_signature", "(", ")", ")", "if", "not", "self", ".", "is_empty", "(", "self", ".", "extra_params", ",", "'sign_version'", ")", ":", "signature_version", "=", "params", "[", "'sign_version'", "]", "=", "str", "(", "self", ".", "extra_params", "[", "'sign_version'", "]", ")", "params", "=", "self", ".", "array_merge", "(", "params", ",", "self", ".", "extra_params", ")", "params", "[", "'sign'", "]", "=", "self", ".", "calculate_signature", "(", "params", ",", "self", ".", "get_secret_key", "(", ")", ",", "int", "(", "signature_version", ")", ")", "return", "params" ]
Get signature and params
[ "Get", "signature", "and", "params" ]
5f65cb4460074787bbf75b8f276ace5ca8480d17
https://github.com/paymentwall/paymentwall-python/blob/5f65cb4460074787bbf75b8f276ace5ca8480d17/paymentwall/widget.py#L25-L95
train
base4sistemas/satcfe
satcfe/util.py
hms
def hms(segundos): # TODO: mover para util.py """ Retorna o número de horas, minutos e segundos a partir do total de segundos informado. .. sourcecode:: python >>> hms(1) (0, 0, 1) >>> hms(60) (0, 1, 0) >>> hms(3600) (1, 0, 0) >>> hms(3601) (1, 0, 1) >>> hms(3661) (1, 1, 1) :param int segundos: O número total de segundos. :returns: Uma tupla contendo trẽs elementos representando, respectivamente, o número de horas, minutos e segundos calculados a partir do total de segundos. :rtype: tuple """ h = (segundos / 3600) m = (segundos - (3600 * h)) / 60 s = (segundos - (3600 * h) - (m * 60)); return (h, m, s)
python
def hms(segundos): # TODO: mover para util.py """ Retorna o número de horas, minutos e segundos a partir do total de segundos informado. .. sourcecode:: python >>> hms(1) (0, 0, 1) >>> hms(60) (0, 1, 0) >>> hms(3600) (1, 0, 0) >>> hms(3601) (1, 0, 1) >>> hms(3661) (1, 1, 1) :param int segundos: O número total de segundos. :returns: Uma tupla contendo trẽs elementos representando, respectivamente, o número de horas, minutos e segundos calculados a partir do total de segundos. :rtype: tuple """ h = (segundos / 3600) m = (segundos - (3600 * h)) / 60 s = (segundos - (3600 * h) - (m * 60)); return (h, m, s)
[ "def", "hms", "(", "segundos", ")", ":", "# TODO: mover para util.py", "h", "=", "(", "segundos", "/", "3600", ")", "m", "=", "(", "segundos", "-", "(", "3600", "*", "h", ")", ")", "/", "60", "s", "=", "(", "segundos", "-", "(", "3600", "*", "h", ")", "-", "(", "m", "*", "60", ")", ")", "return", "(", "h", ",", "m", ",", "s", ")" ]
Retorna o número de horas, minutos e segundos a partir do total de segundos informado. .. sourcecode:: python >>> hms(1) (0, 0, 1) >>> hms(60) (0, 1, 0) >>> hms(3600) (1, 0, 0) >>> hms(3601) (1, 0, 1) >>> hms(3661) (1, 1, 1) :param int segundos: O número total de segundos. :returns: Uma tupla contendo trẽs elementos representando, respectivamente, o número de horas, minutos e segundos calculados a partir do total de segundos. :rtype: tuple
[ "Retorna", "o", "número", "de", "horas", "minutos", "e", "segundos", "a", "partir", "do", "total", "de", "segundos", "informado", "." ]
cb8e8815f4133d3e3d94cf526fa86767b4521ed9
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/util.py#L166-L199
train
base4sistemas/satcfe
satcfe/util.py
hms_humanizado
def hms_humanizado(segundos): # TODO: mover para util.py """ Retorna um texto legível que descreve o total de horas, minutos e segundos calculados a partir do total de segundos informados. .. sourcecode:: python >>> hms_humanizado(0) 'zero segundos' >>> hms_humanizado(1) '1 segundo' >>> hms_humanizado(2) '2 segundos' >>> hms_humanizado(3600) '1 hora' >>> hms_humanizado(3602) '1 hora e 2 segundos' >>> hms_humanizado(3721) '1 hora, 2 minutos e 1 segundo' :rtype: str """ p = lambda n, s, p: p if n > 1 else s h, m, s = hms(segundos) tokens = [ '' if h == 0 else '{:d} {}'.format(h, p(h, 'hora', 'horas')), '' if m == 0 else '{:d} {}'.format(m, p(m, 'minuto', 'minutos')), '' if s == 0 else '{:d} {}'.format(s, p(s, 'segundo', 'segundos'))] tokens = [token for token in tokens if token] if len(tokens) == 1: return tokens[0] if len(tokens) > 1: return '{} e {}'.format(', '.join(tokens[:-1]), tokens[-1]) return 'zero segundos'
python
def hms_humanizado(segundos): # TODO: mover para util.py """ Retorna um texto legível que descreve o total de horas, minutos e segundos calculados a partir do total de segundos informados. .. sourcecode:: python >>> hms_humanizado(0) 'zero segundos' >>> hms_humanizado(1) '1 segundo' >>> hms_humanizado(2) '2 segundos' >>> hms_humanizado(3600) '1 hora' >>> hms_humanizado(3602) '1 hora e 2 segundos' >>> hms_humanizado(3721) '1 hora, 2 minutos e 1 segundo' :rtype: str """ p = lambda n, s, p: p if n > 1 else s h, m, s = hms(segundos) tokens = [ '' if h == 0 else '{:d} {}'.format(h, p(h, 'hora', 'horas')), '' if m == 0 else '{:d} {}'.format(m, p(m, 'minuto', 'minutos')), '' if s == 0 else '{:d} {}'.format(s, p(s, 'segundo', 'segundos'))] tokens = [token for token in tokens if token] if len(tokens) == 1: return tokens[0] if len(tokens) > 1: return '{} e {}'.format(', '.join(tokens[:-1]), tokens[-1]) return 'zero segundos'
[ "def", "hms_humanizado", "(", "segundos", ")", ":", "# TODO: mover para util.py", "p", "=", "lambda", "n", ",", "s", ",", "p", ":", "p", "if", "n", ">", "1", "else", "s", "h", ",", "m", ",", "s", "=", "hms", "(", "segundos", ")", "tokens", "=", "[", "''", "if", "h", "==", "0", "else", "'{:d} {}'", ".", "format", "(", "h", ",", "p", "(", "h", ",", "'hora'", ",", "'horas'", ")", ")", ",", "''", "if", "m", "==", "0", "else", "'{:d} {}'", ".", "format", "(", "m", ",", "p", "(", "m", ",", "'minuto'", ",", "'minutos'", ")", ")", ",", "''", "if", "s", "==", "0", "else", "'{:d} {}'", ".", "format", "(", "s", ",", "p", "(", "s", ",", "'segundo'", ",", "'segundos'", ")", ")", "]", "tokens", "=", "[", "token", "for", "token", "in", "tokens", "if", "token", "]", "if", "len", "(", "tokens", ")", "==", "1", ":", "return", "tokens", "[", "0", "]", "if", "len", "(", "tokens", ")", ">", "1", ":", "return", "'{} e {}'", ".", "format", "(", "', '", ".", "join", "(", "tokens", "[", ":", "-", "1", "]", ")", ",", "tokens", "[", "-", "1", "]", ")", "return", "'zero segundos'" ]
Retorna um texto legível que descreve o total de horas, minutos e segundos calculados a partir do total de segundos informados. .. sourcecode:: python >>> hms_humanizado(0) 'zero segundos' >>> hms_humanizado(1) '1 segundo' >>> hms_humanizado(2) '2 segundos' >>> hms_humanizado(3600) '1 hora' >>> hms_humanizado(3602) '1 hora e 2 segundos' >>> hms_humanizado(3721) '1 hora, 2 minutos e 1 segundo' :rtype: str
[ "Retorna", "um", "texto", "legível", "que", "descreve", "o", "total", "de", "horas", "minutos", "e", "segundos", "calculados", "a", "partir", "do", "total", "de", "segundos", "informados", "." ]
cb8e8815f4133d3e3d94cf526fa86767b4521ed9
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/util.py#L202-L245
train
djgagne/hagelslag
hagelslag/data/HREFv2ModelGrid.py
ModelGrid.format_grib_name
def format_grib_name(self, selected_variable): """ Assigns name to grib2 message number with name 'unknown'. Names based on NOAA grib2 abbreviations. Args: selected_variable(str): name of selected variable for loading Names: 3: LCDC: Low Cloud Cover 4: MCDC: Medium Cloud Cover 5: HCDC: High Cloud Cover 197: RETOP: Echo Top 198: MAXREF: Hourly Maximum of Simulated Reflectivity at 1 km AGL 199: MXUPHL: Hourly Maximum of Updraft Helicity over Layer 2km to 5 km AGL, and 0km to 3km AGL examples:' MXUPHL_5000' or 'MXUPHL_3000' 200: MNUPHL: Hourly Minimum of Updraft Helicity at same levels of MXUPHL examples:' MNUPHL_5000' or 'MNUPHL_3000' 220: MAXUVV: Hourly Maximum of Upward Vertical Velocity in the lowest 400hPa 221: MAXDVV: Hourly Maximum of Downward Vertical Velocity in the lowest 400hPa 222: MAXUW: U Component of Hourly Maximum 10m Wind Speed 223: MAXVW: V Component of Hourly Maximum 10m Wind Speed Returns: Given an uknown string name of a variable, returns the grib2 message Id and units of the variable, based on the self.unknown_name and self.unknown_units dictonaries above. Allows access of data values of unknown variable name, given the ID. """ names = self.unknown_names units = self.unknown_units for key, value in names.items(): if selected_variable == value: Id = key u = units[key] return Id, u
python
def format_grib_name(self, selected_variable): """ Assigns name to grib2 message number with name 'unknown'. Names based on NOAA grib2 abbreviations. Args: selected_variable(str): name of selected variable for loading Names: 3: LCDC: Low Cloud Cover 4: MCDC: Medium Cloud Cover 5: HCDC: High Cloud Cover 197: RETOP: Echo Top 198: MAXREF: Hourly Maximum of Simulated Reflectivity at 1 km AGL 199: MXUPHL: Hourly Maximum of Updraft Helicity over Layer 2km to 5 km AGL, and 0km to 3km AGL examples:' MXUPHL_5000' or 'MXUPHL_3000' 200: MNUPHL: Hourly Minimum of Updraft Helicity at same levels of MXUPHL examples:' MNUPHL_5000' or 'MNUPHL_3000' 220: MAXUVV: Hourly Maximum of Upward Vertical Velocity in the lowest 400hPa 221: MAXDVV: Hourly Maximum of Downward Vertical Velocity in the lowest 400hPa 222: MAXUW: U Component of Hourly Maximum 10m Wind Speed 223: MAXVW: V Component of Hourly Maximum 10m Wind Speed Returns: Given an uknown string name of a variable, returns the grib2 message Id and units of the variable, based on the self.unknown_name and self.unknown_units dictonaries above. Allows access of data values of unknown variable name, given the ID. """ names = self.unknown_names units = self.unknown_units for key, value in names.items(): if selected_variable == value: Id = key u = units[key] return Id, u
[ "def", "format_grib_name", "(", "self", ",", "selected_variable", ")", ":", "names", "=", "self", ".", "unknown_names", "units", "=", "self", ".", "unknown_units", "for", "key", ",", "value", "in", "names", ".", "items", "(", ")", ":", "if", "selected_variable", "==", "value", ":", "Id", "=", "key", "u", "=", "units", "[", "key", "]", "return", "Id", ",", "u" ]
Assigns name to grib2 message number with name 'unknown'. Names based on NOAA grib2 abbreviations. Args: selected_variable(str): name of selected variable for loading Names: 3: LCDC: Low Cloud Cover 4: MCDC: Medium Cloud Cover 5: HCDC: High Cloud Cover 197: RETOP: Echo Top 198: MAXREF: Hourly Maximum of Simulated Reflectivity at 1 km AGL 199: MXUPHL: Hourly Maximum of Updraft Helicity over Layer 2km to 5 km AGL, and 0km to 3km AGL examples:' MXUPHL_5000' or 'MXUPHL_3000' 200: MNUPHL: Hourly Minimum of Updraft Helicity at same levels of MXUPHL examples:' MNUPHL_5000' or 'MNUPHL_3000' 220: MAXUVV: Hourly Maximum of Upward Vertical Velocity in the lowest 400hPa 221: MAXDVV: Hourly Maximum of Downward Vertical Velocity in the lowest 400hPa 222: MAXUW: U Component of Hourly Maximum 10m Wind Speed 223: MAXVW: V Component of Hourly Maximum 10m Wind Speed Returns: Given an uknown string name of a variable, returns the grib2 message Id and units of the variable, based on the self.unknown_name and self.unknown_units dictonaries above. Allows access of data values of unknown variable name, given the ID.
[ "Assigns", "name", "to", "grib2", "message", "number", "with", "name", "unknown", ".", "Names", "based", "on", "NOAA", "grib2", "abbreviations", ".", "Args", ":", "selected_variable", "(", "str", ")", ":", "name", "of", "selected", "variable", "for", "loading", "Names", ":", "3", ":", "LCDC", ":", "Low", "Cloud", "Cover", "4", ":", "MCDC", ":", "Medium", "Cloud", "Cover", "5", ":", "HCDC", ":", "High", "Cloud", "Cover", "197", ":", "RETOP", ":", "Echo", "Top", "198", ":", "MAXREF", ":", "Hourly", "Maximum", "of", "Simulated", "Reflectivity", "at", "1", "km", "AGL", "199", ":", "MXUPHL", ":", "Hourly", "Maximum", "of", "Updraft", "Helicity", "over", "Layer", "2km", "to", "5", "km", "AGL", "and", "0km", "to", "3km", "AGL", "examples", ":", "MXUPHL_5000", "or", "MXUPHL_3000", "200", ":", "MNUPHL", ":", "Hourly", "Minimum", "of", "Updraft", "Helicity", "at", "same", "levels", "of", "MXUPHL", "examples", ":", "MNUPHL_5000", "or", "MNUPHL_3000", "220", ":", "MAXUVV", ":", "Hourly", "Maximum", "of", "Upward", "Vertical", "Velocity", "in", "the", "lowest", "400hPa", "221", ":", "MAXDVV", ":", "Hourly", "Maximum", "of", "Downward", "Vertical", "Velocity", "in", "the", "lowest", "400hPa", "222", ":", "MAXUW", ":", "U", "Component", "of", "Hourly", "Maximum", "10m", "Wind", "Speed", "223", ":", "MAXVW", ":", "V", "Component", "of", "Hourly", "Maximum", "10m", "Wind", "Speed", "Returns", ":", "Given", "an", "uknown", "string", "name", "of", "a", "variable", "returns", "the", "grib2", "message", "Id", "and", "units", "of", "the", "variable", "based", "on", "the", "self", ".", "unknown_name", "and", "self", ".", "unknown_units", "dictonaries", "above", ".", "Allows", "access", "of", "data", "values", "of", "unknown", "variable", "name", "given", "the", "ID", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/data/HREFv2ModelGrid.py#L70-L101
train
djgagne/hagelslag
hagelslag/data/HREFv2ModelGrid.py
ModelGrid.load_data
def load_data(self): """ Loads data from grib2 file objects or list of grib2 file objects. Handles specific grib2 variable names and grib2 message numbers. Returns: Array of data loaded from files in (time, y, x) dimensions, Units """ file_objects = self.file_objects var = self.variable valid_date = self.valid_dates data = self.data unknown_names = self.unknown_names unknown_units = self.unknown_units member = self.member lat = self.lat lon = self.lon if self.sector_ind_path: inds_file = pd.read_csv(self.sector_ind_path+'sector_data_indices.csv') inds = inds_file.loc[:,'indices'] out_x = self.mapping_data["x"] if not file_objects: print() print("No {0} model runs on {1}".format(member,self.run_date)) print() units = None return self.data, units for f, file in enumerate(file_objects): grib = pygrib.open(file) if type(var) is int: data_values = grib[var].values #lat, lon = grib[var].latlons() #proj = Proj(grib[var].projparams) if grib[var].units == 'unknown': Id = grib[var].parameterNumber units = self.unknown_units[Id] else: units = grib[var].units elif type(var) is str: if '_' in var: variable = var.split('_')[0] level = int(var.split('_')[1]) if variable in unknown_names.values(): Id, units = self.format_grib_name(variable) data_values = grib.select(parameterNumber=Id, level=level)[0].values #lat, lon = grib.select(parameterNumber=Id, level=level)[0].latlons() #proj = Proj(grib.select(parameterNumber=Id, level=level)[0].projparams) else: data_values = grib.select(name=variable, level=level)[0].values units = grib.select(name=variable, level=level)[0].units #lat, lon = grib.select(name=variable, level=level)[0].latlons() #proj = Proj(grib.select(name=variable, level=level)[0].projparams) else: if var in unknown_names.values(): Id, units = self.format_grib_name(var) data_values = grib.select(parameterNumber=Id)[0].values #lat, lon = grib.select(parameterNumber=Id)[0].latlons() #proj = Proj(grib.select(parameterNumber=Id)[0].projparams) elif len(grib.select(name=var)) > 1: raise NameError("Multiple '{0}' records found. Rename with level:'{0}_level'".format(var)) else: data_values = grib.select(name=var)[0].values units = grib.select(name=var)[0].units #lat, lon = grib.select(name=var)[0].latlons() #proj = Proj(grib.select(name=var)[0].projparams) if data is None: data = np.empty((len(valid_date), out_x.shape[0], out_x.shape[1]), dtype=float) if self.sector_ind_path: data[f] = data_values[:].flatten()[inds].reshape(out_x.shape) else: data[f]=data_values[:] else: if self.sector_ind_path: data[f] = data_values[:].flatten()[inds].reshape(out_x.shape) else: data[f]=data_values[:] return data, units
python
def load_data(self): """ Loads data from grib2 file objects or list of grib2 file objects. Handles specific grib2 variable names and grib2 message numbers. Returns: Array of data loaded from files in (time, y, x) dimensions, Units """ file_objects = self.file_objects var = self.variable valid_date = self.valid_dates data = self.data unknown_names = self.unknown_names unknown_units = self.unknown_units member = self.member lat = self.lat lon = self.lon if self.sector_ind_path: inds_file = pd.read_csv(self.sector_ind_path+'sector_data_indices.csv') inds = inds_file.loc[:,'indices'] out_x = self.mapping_data["x"] if not file_objects: print() print("No {0} model runs on {1}".format(member,self.run_date)) print() units = None return self.data, units for f, file in enumerate(file_objects): grib = pygrib.open(file) if type(var) is int: data_values = grib[var].values #lat, lon = grib[var].latlons() #proj = Proj(grib[var].projparams) if grib[var].units == 'unknown': Id = grib[var].parameterNumber units = self.unknown_units[Id] else: units = grib[var].units elif type(var) is str: if '_' in var: variable = var.split('_')[0] level = int(var.split('_')[1]) if variable in unknown_names.values(): Id, units = self.format_grib_name(variable) data_values = grib.select(parameterNumber=Id, level=level)[0].values #lat, lon = grib.select(parameterNumber=Id, level=level)[0].latlons() #proj = Proj(grib.select(parameterNumber=Id, level=level)[0].projparams) else: data_values = grib.select(name=variable, level=level)[0].values units = grib.select(name=variable, level=level)[0].units #lat, lon = grib.select(name=variable, level=level)[0].latlons() #proj = Proj(grib.select(name=variable, level=level)[0].projparams) else: if var in unknown_names.values(): Id, units = self.format_grib_name(var) data_values = grib.select(parameterNumber=Id)[0].values #lat, lon = grib.select(parameterNumber=Id)[0].latlons() #proj = Proj(grib.select(parameterNumber=Id)[0].projparams) elif len(grib.select(name=var)) > 1: raise NameError("Multiple '{0}' records found. Rename with level:'{0}_level'".format(var)) else: data_values = grib.select(name=var)[0].values units = grib.select(name=var)[0].units #lat, lon = grib.select(name=var)[0].latlons() #proj = Proj(grib.select(name=var)[0].projparams) if data is None: data = np.empty((len(valid_date), out_x.shape[0], out_x.shape[1]), dtype=float) if self.sector_ind_path: data[f] = data_values[:].flatten()[inds].reshape(out_x.shape) else: data[f]=data_values[:] else: if self.sector_ind_path: data[f] = data_values[:].flatten()[inds].reshape(out_x.shape) else: data[f]=data_values[:] return data, units
[ "def", "load_data", "(", "self", ")", ":", "file_objects", "=", "self", ".", "file_objects", "var", "=", "self", ".", "variable", "valid_date", "=", "self", ".", "valid_dates", "data", "=", "self", ".", "data", "unknown_names", "=", "self", ".", "unknown_names", "unknown_units", "=", "self", ".", "unknown_units", "member", "=", "self", ".", "member", "lat", "=", "self", ".", "lat", "lon", "=", "self", ".", "lon", "if", "self", ".", "sector_ind_path", ":", "inds_file", "=", "pd", ".", "read_csv", "(", "self", ".", "sector_ind_path", "+", "'sector_data_indices.csv'", ")", "inds", "=", "inds_file", ".", "loc", "[", ":", ",", "'indices'", "]", "out_x", "=", "self", ".", "mapping_data", "[", "\"x\"", "]", "if", "not", "file_objects", ":", "print", "(", ")", "print", "(", "\"No {0} model runs on {1}\"", ".", "format", "(", "member", ",", "self", ".", "run_date", ")", ")", "print", "(", ")", "units", "=", "None", "return", "self", ".", "data", ",", "units", "for", "f", ",", "file", "in", "enumerate", "(", "file_objects", ")", ":", "grib", "=", "pygrib", ".", "open", "(", "file", ")", "if", "type", "(", "var", ")", "is", "int", ":", "data_values", "=", "grib", "[", "var", "]", ".", "values", "#lat, lon = grib[var].latlons()", "#proj = Proj(grib[var].projparams)", "if", "grib", "[", "var", "]", ".", "units", "==", "'unknown'", ":", "Id", "=", "grib", "[", "var", "]", ".", "parameterNumber", "units", "=", "self", ".", "unknown_units", "[", "Id", "]", "else", ":", "units", "=", "grib", "[", "var", "]", ".", "units", "elif", "type", "(", "var", ")", "is", "str", ":", "if", "'_'", "in", "var", ":", "variable", "=", "var", ".", "split", "(", "'_'", ")", "[", "0", "]", "level", "=", "int", "(", "var", ".", "split", "(", "'_'", ")", "[", "1", "]", ")", "if", "variable", "in", "unknown_names", ".", "values", "(", ")", ":", "Id", ",", "units", "=", "self", ".", "format_grib_name", "(", "variable", ")", "data_values", "=", "grib", ".", "select", "(", "parameterNumber", "=", "Id", ",", "level", "=", "level", ")", "[", "0", "]", ".", "values", "#lat, lon = grib.select(parameterNumber=Id, level=level)[0].latlons()", "#proj = Proj(grib.select(parameterNumber=Id, level=level)[0].projparams)", "else", ":", "data_values", "=", "grib", ".", "select", "(", "name", "=", "variable", ",", "level", "=", "level", ")", "[", "0", "]", ".", "values", "units", "=", "grib", ".", "select", "(", "name", "=", "variable", ",", "level", "=", "level", ")", "[", "0", "]", ".", "units", "#lat, lon = grib.select(name=variable, level=level)[0].latlons()", "#proj = Proj(grib.select(name=variable, level=level)[0].projparams)", "else", ":", "if", "var", "in", "unknown_names", ".", "values", "(", ")", ":", "Id", ",", "units", "=", "self", ".", "format_grib_name", "(", "var", ")", "data_values", "=", "grib", ".", "select", "(", "parameterNumber", "=", "Id", ")", "[", "0", "]", ".", "values", "#lat, lon = grib.select(parameterNumber=Id)[0].latlons() ", "#proj = Proj(grib.select(parameterNumber=Id)[0].projparams)", "elif", "len", "(", "grib", ".", "select", "(", "name", "=", "var", ")", ")", ">", "1", ":", "raise", "NameError", "(", "\"Multiple '{0}' records found. Rename with level:'{0}_level'\"", ".", "format", "(", "var", ")", ")", "else", ":", "data_values", "=", "grib", ".", "select", "(", "name", "=", "var", ")", "[", "0", "]", ".", "values", "units", "=", "grib", ".", "select", "(", "name", "=", "var", ")", "[", "0", "]", ".", "units", "#lat, lon = grib.select(name=var)[0].latlons()", "#proj = Proj(grib.select(name=var)[0].projparams)", "if", "data", "is", "None", ":", "data", "=", "np", ".", "empty", "(", "(", "len", "(", "valid_date", ")", ",", "out_x", ".", "shape", "[", "0", "]", ",", "out_x", ".", "shape", "[", "1", "]", ")", ",", "dtype", "=", "float", ")", "if", "self", ".", "sector_ind_path", ":", "data", "[", "f", "]", "=", "data_values", "[", ":", "]", ".", "flatten", "(", ")", "[", "inds", "]", ".", "reshape", "(", "out_x", ".", "shape", ")", "else", ":", "data", "[", "f", "]", "=", "data_values", "[", ":", "]", "else", ":", "if", "self", ".", "sector_ind_path", ":", "data", "[", "f", "]", "=", "data_values", "[", ":", "]", ".", "flatten", "(", ")", "[", "inds", "]", ".", "reshape", "(", "out_x", ".", "shape", ")", "else", ":", "data", "[", "f", "]", "=", "data_values", "[", ":", "]", "return", "data", ",", "units" ]
Loads data from grib2 file objects or list of grib2 file objects. Handles specific grib2 variable names and grib2 message numbers. Returns: Array of data loaded from files in (time, y, x) dimensions, Units
[ "Loads", "data", "from", "grib2", "file", "objects", "or", "list", "of", "grib2", "file", "objects", ".", "Handles", "specific", "grib2", "variable", "names", "and", "grib2", "message", "numbers", ".", "Returns", ":", "Array", "of", "data", "loaded", "from", "files", "in", "(", "time", "y", "x", ")", "dimensions", "Units" ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/data/HREFv2ModelGrid.py#L103-L187
train
djgagne/hagelslag
hagelslag/evaluation/GridEvaluator.py
GridEvaluator.load_forecasts
def load_forecasts(self): """ Load the forecast files into memory. """ run_date_str = self.run_date.strftime("%Y%m%d") for model_name in self.model_names: self.raw_forecasts[model_name] = {} forecast_file = self.forecast_path + run_date_str + "/" + \ model_name.replace(" ", "-") + "_hailprobs_{0}_{1}.nc".format(self.ensemble_member, run_date_str) forecast_obj = Dataset(forecast_file) forecast_hours = forecast_obj.variables["forecast_hour"][:] valid_hour_indices = np.where((self.start_hour <= forecast_hours) & (forecast_hours <= self.end_hour))[0] for size_threshold in self.size_thresholds: self.raw_forecasts[model_name][size_threshold] = \ forecast_obj.variables["prob_hail_{0:02d}_mm".format(size_threshold)][valid_hour_indices] forecast_obj.close()
python
def load_forecasts(self): """ Load the forecast files into memory. """ run_date_str = self.run_date.strftime("%Y%m%d") for model_name in self.model_names: self.raw_forecasts[model_name] = {} forecast_file = self.forecast_path + run_date_str + "/" + \ model_name.replace(" ", "-") + "_hailprobs_{0}_{1}.nc".format(self.ensemble_member, run_date_str) forecast_obj = Dataset(forecast_file) forecast_hours = forecast_obj.variables["forecast_hour"][:] valid_hour_indices = np.where((self.start_hour <= forecast_hours) & (forecast_hours <= self.end_hour))[0] for size_threshold in self.size_thresholds: self.raw_forecasts[model_name][size_threshold] = \ forecast_obj.variables["prob_hail_{0:02d}_mm".format(size_threshold)][valid_hour_indices] forecast_obj.close()
[ "def", "load_forecasts", "(", "self", ")", ":", "run_date_str", "=", "self", ".", "run_date", ".", "strftime", "(", "\"%Y%m%d\"", ")", "for", "model_name", "in", "self", ".", "model_names", ":", "self", ".", "raw_forecasts", "[", "model_name", "]", "=", "{", "}", "forecast_file", "=", "self", ".", "forecast_path", "+", "run_date_str", "+", "\"/\"", "+", "model_name", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", "+", "\"_hailprobs_{0}_{1}.nc\"", ".", "format", "(", "self", ".", "ensemble_member", ",", "run_date_str", ")", "forecast_obj", "=", "Dataset", "(", "forecast_file", ")", "forecast_hours", "=", "forecast_obj", ".", "variables", "[", "\"forecast_hour\"", "]", "[", ":", "]", "valid_hour_indices", "=", "np", ".", "where", "(", "(", "self", ".", "start_hour", "<=", "forecast_hours", ")", "&", "(", "forecast_hours", "<=", "self", ".", "end_hour", ")", ")", "[", "0", "]", "for", "size_threshold", "in", "self", ".", "size_thresholds", ":", "self", ".", "raw_forecasts", "[", "model_name", "]", "[", "size_threshold", "]", "=", "forecast_obj", ".", "variables", "[", "\"prob_hail_{0:02d}_mm\"", ".", "format", "(", "size_threshold", ")", "]", "[", "valid_hour_indices", "]", "forecast_obj", ".", "close", "(", ")" ]
Load the forecast files into memory.
[ "Load", "the", "forecast", "files", "into", "memory", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/GridEvaluator.py#L77-L92
train
djgagne/hagelslag
hagelslag/evaluation/GridEvaluator.py
GridEvaluator.get_window_forecasts
def get_window_forecasts(self): """ Aggregate the forecasts within the specified time windows. """ for model_name in self.model_names: self.window_forecasts[model_name] = {} for size_threshold in self.size_thresholds: self.window_forecasts[model_name][size_threshold] = \ np.array([self.raw_forecasts[model_name][size_threshold][sl].sum(axis=0) for sl in self.hour_windows])
python
def get_window_forecasts(self): """ Aggregate the forecasts within the specified time windows. """ for model_name in self.model_names: self.window_forecasts[model_name] = {} for size_threshold in self.size_thresholds: self.window_forecasts[model_name][size_threshold] = \ np.array([self.raw_forecasts[model_name][size_threshold][sl].sum(axis=0) for sl in self.hour_windows])
[ "def", "get_window_forecasts", "(", "self", ")", ":", "for", "model_name", "in", "self", ".", "model_names", ":", "self", ".", "window_forecasts", "[", "model_name", "]", "=", "{", "}", "for", "size_threshold", "in", "self", ".", "size_thresholds", ":", "self", ".", "window_forecasts", "[", "model_name", "]", "[", "size_threshold", "]", "=", "np", ".", "array", "(", "[", "self", ".", "raw_forecasts", "[", "model_name", "]", "[", "size_threshold", "]", "[", "sl", "]", ".", "sum", "(", "axis", "=", "0", ")", "for", "sl", "in", "self", ".", "hour_windows", "]", ")" ]
Aggregate the forecasts within the specified time windows.
[ "Aggregate", "the", "forecasts", "within", "the", "specified", "time", "windows", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/GridEvaluator.py#L94-L103
train
djgagne/hagelslag
hagelslag/evaluation/GridEvaluator.py
GridEvaluator.load_obs
def load_obs(self, mask_threshold=0.5): """ Loads observations and masking grid (if needed). :param mask_threshold: Values greater than the threshold are kept, others are masked. :return: """ start_date = self.run_date + timedelta(hours=self.start_hour) end_date = self.run_date + timedelta(hours=self.end_hour) mrms_grid = MRMSGrid(start_date, end_date, self.mrms_variable, self.mrms_path) mrms_grid.load_data() if len(mrms_grid.data) > 0: self.raw_obs[self.mrms_variable] = np.where(mrms_grid.data > 100, 100, mrms_grid.data) self.window_obs[self.mrms_variable] = np.array([self.raw_obs[self.mrms_variable][sl].max(axis=0) for sl in self.hour_windows]) if self.obs_mask: mask_grid = MRMSGrid(start_date, end_date, self.mask_variable, self.mrms_path) mask_grid.load_data() self.raw_obs[self.mask_variable] = np.where(mask_grid.data >= mask_threshold, 1, 0) self.window_obs[self.mask_variable] = np.array([self.raw_obs[self.mask_variable][sl].max(axis=0) for sl in self.hour_windows])
python
def load_obs(self, mask_threshold=0.5): """ Loads observations and masking grid (if needed). :param mask_threshold: Values greater than the threshold are kept, others are masked. :return: """ start_date = self.run_date + timedelta(hours=self.start_hour) end_date = self.run_date + timedelta(hours=self.end_hour) mrms_grid = MRMSGrid(start_date, end_date, self.mrms_variable, self.mrms_path) mrms_grid.load_data() if len(mrms_grid.data) > 0: self.raw_obs[self.mrms_variable] = np.where(mrms_grid.data > 100, 100, mrms_grid.data) self.window_obs[self.mrms_variable] = np.array([self.raw_obs[self.mrms_variable][sl].max(axis=0) for sl in self.hour_windows]) if self.obs_mask: mask_grid = MRMSGrid(start_date, end_date, self.mask_variable, self.mrms_path) mask_grid.load_data() self.raw_obs[self.mask_variable] = np.where(mask_grid.data >= mask_threshold, 1, 0) self.window_obs[self.mask_variable] = np.array([self.raw_obs[self.mask_variable][sl].max(axis=0) for sl in self.hour_windows])
[ "def", "load_obs", "(", "self", ",", "mask_threshold", "=", "0.5", ")", ":", "start_date", "=", "self", ".", "run_date", "+", "timedelta", "(", "hours", "=", "self", ".", "start_hour", ")", "end_date", "=", "self", ".", "run_date", "+", "timedelta", "(", "hours", "=", "self", ".", "end_hour", ")", "mrms_grid", "=", "MRMSGrid", "(", "start_date", ",", "end_date", ",", "self", ".", "mrms_variable", ",", "self", ".", "mrms_path", ")", "mrms_grid", ".", "load_data", "(", ")", "if", "len", "(", "mrms_grid", ".", "data", ")", ">", "0", ":", "self", ".", "raw_obs", "[", "self", ".", "mrms_variable", "]", "=", "np", ".", "where", "(", "mrms_grid", ".", "data", ">", "100", ",", "100", ",", "mrms_grid", ".", "data", ")", "self", ".", "window_obs", "[", "self", ".", "mrms_variable", "]", "=", "np", ".", "array", "(", "[", "self", ".", "raw_obs", "[", "self", ".", "mrms_variable", "]", "[", "sl", "]", ".", "max", "(", "axis", "=", "0", ")", "for", "sl", "in", "self", ".", "hour_windows", "]", ")", "if", "self", ".", "obs_mask", ":", "mask_grid", "=", "MRMSGrid", "(", "start_date", ",", "end_date", ",", "self", ".", "mask_variable", ",", "self", ".", "mrms_path", ")", "mask_grid", ".", "load_data", "(", ")", "self", ".", "raw_obs", "[", "self", ".", "mask_variable", "]", "=", "np", ".", "where", "(", "mask_grid", ".", "data", ">=", "mask_threshold", ",", "1", ",", "0", ")", "self", ".", "window_obs", "[", "self", ".", "mask_variable", "]", "=", "np", ".", "array", "(", "[", "self", ".", "raw_obs", "[", "self", ".", "mask_variable", "]", "[", "sl", "]", ".", "max", "(", "axis", "=", "0", ")", "for", "sl", "in", "self", ".", "hour_windows", "]", ")" ]
Loads observations and masking grid (if needed). :param mask_threshold: Values greater than the threshold are kept, others are masked. :return:
[ "Loads", "observations", "and", "masking", "grid", "(", "if", "needed", ")", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/GridEvaluator.py#L105-L125
train
djgagne/hagelslag
hagelslag/evaluation/GridEvaluator.py
GridEvaluator.dilate_obs
def dilate_obs(self, dilation_radius): """ Use a dilation filter to grow positive observation areas by a specified number of grid points :param dilation_radius: Number of times to dilate the grid. :return: """ for s in self.size_thresholds: self.dilated_obs[s] = np.zeros(self.window_obs[self.mrms_variable].shape) for t in range(self.dilated_obs[s].shape[0]): self.dilated_obs[s][t][binary_dilation(self.window_obs[self.mrms_variable][t] >= s, iterations=dilation_radius)] = 1
python
def dilate_obs(self, dilation_radius): """ Use a dilation filter to grow positive observation areas by a specified number of grid points :param dilation_radius: Number of times to dilate the grid. :return: """ for s in self.size_thresholds: self.dilated_obs[s] = np.zeros(self.window_obs[self.mrms_variable].shape) for t in range(self.dilated_obs[s].shape[0]): self.dilated_obs[s][t][binary_dilation(self.window_obs[self.mrms_variable][t] >= s, iterations=dilation_radius)] = 1
[ "def", "dilate_obs", "(", "self", ",", "dilation_radius", ")", ":", "for", "s", "in", "self", ".", "size_thresholds", ":", "self", ".", "dilated_obs", "[", "s", "]", "=", "np", ".", "zeros", "(", "self", ".", "window_obs", "[", "self", ".", "mrms_variable", "]", ".", "shape", ")", "for", "t", "in", "range", "(", "self", ".", "dilated_obs", "[", "s", "]", ".", "shape", "[", "0", "]", ")", ":", "self", ".", "dilated_obs", "[", "s", "]", "[", "t", "]", "[", "binary_dilation", "(", "self", ".", "window_obs", "[", "self", ".", "mrms_variable", "]", "[", "t", "]", ">=", "s", ",", "iterations", "=", "dilation_radius", ")", "]", "=", "1" ]
Use a dilation filter to grow positive observation areas by a specified number of grid points :param dilation_radius: Number of times to dilate the grid. :return:
[ "Use", "a", "dilation", "filter", "to", "grow", "positive", "observation", "areas", "by", "a", "specified", "number", "of", "grid", "points" ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/GridEvaluator.py#L127-L137
train
djgagne/hagelslag
hagelslag/evaluation/GridEvaluator.py
GridEvaluator.roc_curves
def roc_curves(self, prob_thresholds): """ Generate ROC Curve objects for each machine learning model, size threshold, and time window. :param prob_thresholds: Probability thresholds for the ROC Curve :param dilation_radius: Number of times to dilate the observation grid. :return: a dictionary of DistributedROC objects. """ all_roc_curves = {} for model_name in self.model_names: all_roc_curves[model_name] = {} for size_threshold in self.size_thresholds: all_roc_curves[model_name][size_threshold] = {} for h, hour_window in enumerate(self.hour_windows): hour_range = (hour_window.start, hour_window.stop) all_roc_curves[model_name][size_threshold][hour_range] = \ DistributedROC(prob_thresholds, 1) if self.obs_mask: all_roc_curves[model_name][size_threshold][hour_range].update( self.window_forecasts[model_name][size_threshold][h][ self.window_obs[self.mask_variable][h] > 0], self.dilated_obs[size_threshold][h][self.window_obs[self.mask_variable][h] > 0] ) else: all_roc_curves[model_name][size_threshold][hour_range].update( self.window_forecasts[model_name][size_threshold][h], self.dilated_obs[size_threshold][h] ) return all_roc_curves
python
def roc_curves(self, prob_thresholds): """ Generate ROC Curve objects for each machine learning model, size threshold, and time window. :param prob_thresholds: Probability thresholds for the ROC Curve :param dilation_radius: Number of times to dilate the observation grid. :return: a dictionary of DistributedROC objects. """ all_roc_curves = {} for model_name in self.model_names: all_roc_curves[model_name] = {} for size_threshold in self.size_thresholds: all_roc_curves[model_name][size_threshold] = {} for h, hour_window in enumerate(self.hour_windows): hour_range = (hour_window.start, hour_window.stop) all_roc_curves[model_name][size_threshold][hour_range] = \ DistributedROC(prob_thresholds, 1) if self.obs_mask: all_roc_curves[model_name][size_threshold][hour_range].update( self.window_forecasts[model_name][size_threshold][h][ self.window_obs[self.mask_variable][h] > 0], self.dilated_obs[size_threshold][h][self.window_obs[self.mask_variable][h] > 0] ) else: all_roc_curves[model_name][size_threshold][hour_range].update( self.window_forecasts[model_name][size_threshold][h], self.dilated_obs[size_threshold][h] ) return all_roc_curves
[ "def", "roc_curves", "(", "self", ",", "prob_thresholds", ")", ":", "all_roc_curves", "=", "{", "}", "for", "model_name", "in", "self", ".", "model_names", ":", "all_roc_curves", "[", "model_name", "]", "=", "{", "}", "for", "size_threshold", "in", "self", ".", "size_thresholds", ":", "all_roc_curves", "[", "model_name", "]", "[", "size_threshold", "]", "=", "{", "}", "for", "h", ",", "hour_window", "in", "enumerate", "(", "self", ".", "hour_windows", ")", ":", "hour_range", "=", "(", "hour_window", ".", "start", ",", "hour_window", ".", "stop", ")", "all_roc_curves", "[", "model_name", "]", "[", "size_threshold", "]", "[", "hour_range", "]", "=", "DistributedROC", "(", "prob_thresholds", ",", "1", ")", "if", "self", ".", "obs_mask", ":", "all_roc_curves", "[", "model_name", "]", "[", "size_threshold", "]", "[", "hour_range", "]", ".", "update", "(", "self", ".", "window_forecasts", "[", "model_name", "]", "[", "size_threshold", "]", "[", "h", "]", "[", "self", ".", "window_obs", "[", "self", ".", "mask_variable", "]", "[", "h", "]", ">", "0", "]", ",", "self", ".", "dilated_obs", "[", "size_threshold", "]", "[", "h", "]", "[", "self", ".", "window_obs", "[", "self", ".", "mask_variable", "]", "[", "h", "]", ">", "0", "]", ")", "else", ":", "all_roc_curves", "[", "model_name", "]", "[", "size_threshold", "]", "[", "hour_range", "]", ".", "update", "(", "self", ".", "window_forecasts", "[", "model_name", "]", "[", "size_threshold", "]", "[", "h", "]", ",", "self", ".", "dilated_obs", "[", "size_threshold", "]", "[", "h", "]", ")", "return", "all_roc_curves" ]
Generate ROC Curve objects for each machine learning model, size threshold, and time window. :param prob_thresholds: Probability thresholds for the ROC Curve :param dilation_radius: Number of times to dilate the observation grid. :return: a dictionary of DistributedROC objects.
[ "Generate", "ROC", "Curve", "objects", "for", "each", "machine", "learning", "model", "size", "threshold", "and", "time", "window", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/GridEvaluator.py#L139-L167
train
djgagne/hagelslag
hagelslag/evaluation/GridEvaluator.py
GridEvaluator.reliability_curves
def reliability_curves(self, prob_thresholds): """ Output reliability curves for each machine learning model, size threshold, and time window. :param prob_thresholds: :param dilation_radius: :return: """ all_rel_curves = {} for model_name in self.model_names: all_rel_curves[model_name] = {} for size_threshold in self.size_thresholds: all_rel_curves[model_name][size_threshold] = {} for h, hour_window in enumerate(self.hour_windows): hour_range = (hour_window.start, hour_window.stop) all_rel_curves[model_name][size_threshold][hour_range] = \ DistributedReliability(prob_thresholds, 1) if self.obs_mask: all_rel_curves[model_name][size_threshold][hour_range].update( self.window_forecasts[model_name][size_threshold][h][ self.window_obs[self.mask_variable][h] > 0], self.dilated_obs[size_threshold][h][self.window_obs[self.mask_variable][h] > 0] ) else: all_rel_curves[model_name][size_threshold][hour_range].update( self.window_forecasts[model_name][size_threshold][h], self.dilated_obs[size_threshold][h] ) return all_rel_curves
python
def reliability_curves(self, prob_thresholds): """ Output reliability curves for each machine learning model, size threshold, and time window. :param prob_thresholds: :param dilation_radius: :return: """ all_rel_curves = {} for model_name in self.model_names: all_rel_curves[model_name] = {} for size_threshold in self.size_thresholds: all_rel_curves[model_name][size_threshold] = {} for h, hour_window in enumerate(self.hour_windows): hour_range = (hour_window.start, hour_window.stop) all_rel_curves[model_name][size_threshold][hour_range] = \ DistributedReliability(prob_thresholds, 1) if self.obs_mask: all_rel_curves[model_name][size_threshold][hour_range].update( self.window_forecasts[model_name][size_threshold][h][ self.window_obs[self.mask_variable][h] > 0], self.dilated_obs[size_threshold][h][self.window_obs[self.mask_variable][h] > 0] ) else: all_rel_curves[model_name][size_threshold][hour_range].update( self.window_forecasts[model_name][size_threshold][h], self.dilated_obs[size_threshold][h] ) return all_rel_curves
[ "def", "reliability_curves", "(", "self", ",", "prob_thresholds", ")", ":", "all_rel_curves", "=", "{", "}", "for", "model_name", "in", "self", ".", "model_names", ":", "all_rel_curves", "[", "model_name", "]", "=", "{", "}", "for", "size_threshold", "in", "self", ".", "size_thresholds", ":", "all_rel_curves", "[", "model_name", "]", "[", "size_threshold", "]", "=", "{", "}", "for", "h", ",", "hour_window", "in", "enumerate", "(", "self", ".", "hour_windows", ")", ":", "hour_range", "=", "(", "hour_window", ".", "start", ",", "hour_window", ".", "stop", ")", "all_rel_curves", "[", "model_name", "]", "[", "size_threshold", "]", "[", "hour_range", "]", "=", "DistributedReliability", "(", "prob_thresholds", ",", "1", ")", "if", "self", ".", "obs_mask", ":", "all_rel_curves", "[", "model_name", "]", "[", "size_threshold", "]", "[", "hour_range", "]", ".", "update", "(", "self", ".", "window_forecasts", "[", "model_name", "]", "[", "size_threshold", "]", "[", "h", "]", "[", "self", ".", "window_obs", "[", "self", ".", "mask_variable", "]", "[", "h", "]", ">", "0", "]", ",", "self", ".", "dilated_obs", "[", "size_threshold", "]", "[", "h", "]", "[", "self", ".", "window_obs", "[", "self", ".", "mask_variable", "]", "[", "h", "]", ">", "0", "]", ")", "else", ":", "all_rel_curves", "[", "model_name", "]", "[", "size_threshold", "]", "[", "hour_range", "]", ".", "update", "(", "self", ".", "window_forecasts", "[", "model_name", "]", "[", "size_threshold", "]", "[", "h", "]", ",", "self", ".", "dilated_obs", "[", "size_threshold", "]", "[", "h", "]", ")", "return", "all_rel_curves" ]
Output reliability curves for each machine learning model, size threshold, and time window. :param prob_thresholds: :param dilation_radius: :return:
[ "Output", "reliability", "curves", "for", "each", "machine", "learning", "model", "size", "threshold", "and", "time", "window", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/GridEvaluator.py#L169-L197
train
djgagne/hagelslag
hagelslag/util/convert_mrms_grids.py
load_map_coordinates
def load_map_coordinates(map_file): """ Loads map coordinates from netCDF or pickle file created by util.makeMapGrids. Args: map_file: Filename for the file containing coordinate information. Returns: Latitude and longitude grids as numpy arrays. """ if map_file[-4:] == ".pkl": map_data = pickle.load(open(map_file)) lon = map_data['lon'] lat = map_data['lat'] else: map_data = Dataset(map_file) if "lon" in map_data.variables.keys(): lon = map_data.variables['lon'][:] lat = map_data.variables['lat'][:] else: lon = map_data.variables["XLONG"][0] lat = map_data.variables["XLAT"][0] return lon, lat
python
def load_map_coordinates(map_file): """ Loads map coordinates from netCDF or pickle file created by util.makeMapGrids. Args: map_file: Filename for the file containing coordinate information. Returns: Latitude and longitude grids as numpy arrays. """ if map_file[-4:] == ".pkl": map_data = pickle.load(open(map_file)) lon = map_data['lon'] lat = map_data['lat'] else: map_data = Dataset(map_file) if "lon" in map_data.variables.keys(): lon = map_data.variables['lon'][:] lat = map_data.variables['lat'][:] else: lon = map_data.variables["XLONG"][0] lat = map_data.variables["XLAT"][0] return lon, lat
[ "def", "load_map_coordinates", "(", "map_file", ")", ":", "if", "map_file", "[", "-", "4", ":", "]", "==", "\".pkl\"", ":", "map_data", "=", "pickle", ".", "load", "(", "open", "(", "map_file", ")", ")", "lon", "=", "map_data", "[", "'lon'", "]", "lat", "=", "map_data", "[", "'lat'", "]", "else", ":", "map_data", "=", "Dataset", "(", "map_file", ")", "if", "\"lon\"", "in", "map_data", ".", "variables", ".", "keys", "(", ")", ":", "lon", "=", "map_data", ".", "variables", "[", "'lon'", "]", "[", ":", "]", "lat", "=", "map_data", ".", "variables", "[", "'lat'", "]", "[", ":", "]", "else", ":", "lon", "=", "map_data", ".", "variables", "[", "\"XLONG\"", "]", "[", "0", "]", "lat", "=", "map_data", ".", "variables", "[", "\"XLAT\"", "]", "[", "0", "]", "return", "lon", ",", "lat" ]
Loads map coordinates from netCDF or pickle file created by util.makeMapGrids. Args: map_file: Filename for the file containing coordinate information. Returns: Latitude and longitude grids as numpy arrays.
[ "Loads", "map", "coordinates", "from", "netCDF", "or", "pickle", "file", "created", "by", "util", ".", "makeMapGrids", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/util/convert_mrms_grids.py#L56-L78
train
djgagne/hagelslag
hagelslag/util/convert_mrms_grids.py
interpolate_mrms_day
def interpolate_mrms_day(start_date, variable, interp_type, mrms_path, map_filename, out_path): """ For a given day, this module interpolates hourly MRMS data to a specified latitude and longitude grid, and saves the interpolated grids to CF-compliant netCDF4 files. Args: start_date (datetime.datetime): Date of data being interpolated variable (str): MRMS variable interp_type (str): Whether to use maximum neighbor or spline mrms_path (str): Path to top-level directory of MRMS GRIB2 files map_filename (str): Name of the map filename. Supports ARPS map file format and netCDF files containing latitude and longitude variables out_path (str): Path to location where interpolated netCDF4 files are saved. """ try: print(start_date, variable) end_date = start_date + timedelta(hours=23) mrms = MRMSGrid(start_date, end_date, variable, mrms_path) if mrms.data is not None: if map_filename[-3:] == "map": mapping_data = make_proj_grids(*read_arps_map_file(map_filename)) mrms.interpolate_to_netcdf(mapping_data['lon'], mapping_data['lat'], out_path, interp_type=interp_type) elif map_filename[-3:] == "txt": mapping_data = make_proj_grids(*read_ncar_map_file(map_filename)) mrms.interpolate_to_netcdf(mapping_data["lon"], mapping_data["lat"], out_path, interp_type=interp_type) else: lon, lat = load_map_coordinates(map_filename) mrms.interpolate_to_netcdf(lon, lat, out_path, interp_type=interp_type) except Exception as e: # This exception catches any errors when run in multiprocessing, prints the stack trace, # and ends the process. Otherwise the process will stall. print(traceback.format_exc()) raise e
python
def interpolate_mrms_day(start_date, variable, interp_type, mrms_path, map_filename, out_path): """ For a given day, this module interpolates hourly MRMS data to a specified latitude and longitude grid, and saves the interpolated grids to CF-compliant netCDF4 files. Args: start_date (datetime.datetime): Date of data being interpolated variable (str): MRMS variable interp_type (str): Whether to use maximum neighbor or spline mrms_path (str): Path to top-level directory of MRMS GRIB2 files map_filename (str): Name of the map filename. Supports ARPS map file format and netCDF files containing latitude and longitude variables out_path (str): Path to location where interpolated netCDF4 files are saved. """ try: print(start_date, variable) end_date = start_date + timedelta(hours=23) mrms = MRMSGrid(start_date, end_date, variable, mrms_path) if mrms.data is not None: if map_filename[-3:] == "map": mapping_data = make_proj_grids(*read_arps_map_file(map_filename)) mrms.interpolate_to_netcdf(mapping_data['lon'], mapping_data['lat'], out_path, interp_type=interp_type) elif map_filename[-3:] == "txt": mapping_data = make_proj_grids(*read_ncar_map_file(map_filename)) mrms.interpolate_to_netcdf(mapping_data["lon"], mapping_data["lat"], out_path, interp_type=interp_type) else: lon, lat = load_map_coordinates(map_filename) mrms.interpolate_to_netcdf(lon, lat, out_path, interp_type=interp_type) except Exception as e: # This exception catches any errors when run in multiprocessing, prints the stack trace, # and ends the process. Otherwise the process will stall. print(traceback.format_exc()) raise e
[ "def", "interpolate_mrms_day", "(", "start_date", ",", "variable", ",", "interp_type", ",", "mrms_path", ",", "map_filename", ",", "out_path", ")", ":", "try", ":", "print", "(", "start_date", ",", "variable", ")", "end_date", "=", "start_date", "+", "timedelta", "(", "hours", "=", "23", ")", "mrms", "=", "MRMSGrid", "(", "start_date", ",", "end_date", ",", "variable", ",", "mrms_path", ")", "if", "mrms", ".", "data", "is", "not", "None", ":", "if", "map_filename", "[", "-", "3", ":", "]", "==", "\"map\"", ":", "mapping_data", "=", "make_proj_grids", "(", "*", "read_arps_map_file", "(", "map_filename", ")", ")", "mrms", ".", "interpolate_to_netcdf", "(", "mapping_data", "[", "'lon'", "]", ",", "mapping_data", "[", "'lat'", "]", ",", "out_path", ",", "interp_type", "=", "interp_type", ")", "elif", "map_filename", "[", "-", "3", ":", "]", "==", "\"txt\"", ":", "mapping_data", "=", "make_proj_grids", "(", "*", "read_ncar_map_file", "(", "map_filename", ")", ")", "mrms", ".", "interpolate_to_netcdf", "(", "mapping_data", "[", "\"lon\"", "]", ",", "mapping_data", "[", "\"lat\"", "]", ",", "out_path", ",", "interp_type", "=", "interp_type", ")", "else", ":", "lon", ",", "lat", "=", "load_map_coordinates", "(", "map_filename", ")", "mrms", ".", "interpolate_to_netcdf", "(", "lon", ",", "lat", ",", "out_path", ",", "interp_type", "=", "interp_type", ")", "except", "Exception", "as", "e", ":", "# This exception catches any errors when run in multiprocessing, prints the stack trace,", "# and ends the process. Otherwise the process will stall.", "print", "(", "traceback", ".", "format_exc", "(", ")", ")", "raise", "e" ]
For a given day, this module interpolates hourly MRMS data to a specified latitude and longitude grid, and saves the interpolated grids to CF-compliant netCDF4 files. Args: start_date (datetime.datetime): Date of data being interpolated variable (str): MRMS variable interp_type (str): Whether to use maximum neighbor or spline mrms_path (str): Path to top-level directory of MRMS GRIB2 files map_filename (str): Name of the map filename. Supports ARPS map file format and netCDF files containing latitude and longitude variables out_path (str): Path to location where interpolated netCDF4 files are saved.
[ "For", "a", "given", "day", "this", "module", "interpolates", "hourly", "MRMS", "data", "to", "a", "specified", "latitude", "and", "longitude", "grid", "and", "saves", "the", "interpolated", "grids", "to", "CF", "-", "compliant", "netCDF4", "files", ".", "Args", ":", "start_date", "(", "datetime", ".", "datetime", ")", ":", "Date", "of", "data", "being", "interpolated", "variable", "(", "str", ")", ":", "MRMS", "variable", "interp_type", "(", "str", ")", ":", "Whether", "to", "use", "maximum", "neighbor", "or", "spline", "mrms_path", "(", "str", ")", ":", "Path", "to", "top", "-", "level", "directory", "of", "MRMS", "GRIB2", "files", "map_filename", "(", "str", ")", ":", "Name", "of", "the", "map", "filename", ".", "Supports", "ARPS", "map", "file", "format", "and", "netCDF", "files", "containing", "latitude", "and", "longitude", "variables", "out_path", "(", "str", ")", ":", "Path", "to", "location", "where", "interpolated", "netCDF4", "files", "are", "saved", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/util/convert_mrms_grids.py#L81-L113
train
djgagne/hagelslag
hagelslag/util/convert_mrms_grids.py
MRMSGrid.load_data
def load_data(self): """ Loads data from MRMS GRIB2 files and handles compression duties if files are compressed. """ data = [] loaded_dates = [] loaded_indices = [] for t, timestamp in enumerate(self.all_dates): date_str = timestamp.date().strftime("%Y%m%d") full_path = self.path_start + date_str + "/" if self.variable in os.listdir(full_path): full_path += self.variable + "/" data_files = sorted(os.listdir(full_path)) file_dates = pd.to_datetime([d.split("_")[-1][0:13] for d in data_files]) if timestamp in file_dates: data_file = data_files[np.where(timestamp==file_dates)[0][0]] print(full_path + data_file) if data_file[-2:] == "gz": subprocess.call(["gunzip", full_path + data_file]) file_obj = Nio.open_file(full_path + data_file[:-3]) else: file_obj = Nio.open_file(full_path + data_file) var_name = sorted(file_obj.variables.keys())[0] data.append(file_obj.variables[var_name][:]) if self.lon is None: self.lon = file_obj.variables["lon_0"][:] # Translates longitude values from 0:360 to -180:180 if np.count_nonzero(self.lon > 180) > 0: self.lon -= 360 self.lat = file_obj.variables["lat_0"][:] file_obj.close() if data_file[-2:] == "gz": subprocess.call(["gzip", full_path + data_file[:-3]]) else: subprocess.call(["gzip", full_path + data_file]) loaded_dates.append(timestamp) loaded_indices.append(t) if len(loaded_dates) > 0: self.loaded_dates = pd.DatetimeIndex(loaded_dates) self.data = np.ones((self.all_dates.shape[0], data[0].shape[0], data[0].shape[1])) * -9999 self.data[loaded_indices] = np.array(data)
python
def load_data(self): """ Loads data from MRMS GRIB2 files and handles compression duties if files are compressed. """ data = [] loaded_dates = [] loaded_indices = [] for t, timestamp in enumerate(self.all_dates): date_str = timestamp.date().strftime("%Y%m%d") full_path = self.path_start + date_str + "/" if self.variable in os.listdir(full_path): full_path += self.variable + "/" data_files = sorted(os.listdir(full_path)) file_dates = pd.to_datetime([d.split("_")[-1][0:13] for d in data_files]) if timestamp in file_dates: data_file = data_files[np.where(timestamp==file_dates)[0][0]] print(full_path + data_file) if data_file[-2:] == "gz": subprocess.call(["gunzip", full_path + data_file]) file_obj = Nio.open_file(full_path + data_file[:-3]) else: file_obj = Nio.open_file(full_path + data_file) var_name = sorted(file_obj.variables.keys())[0] data.append(file_obj.variables[var_name][:]) if self.lon is None: self.lon = file_obj.variables["lon_0"][:] # Translates longitude values from 0:360 to -180:180 if np.count_nonzero(self.lon > 180) > 0: self.lon -= 360 self.lat = file_obj.variables["lat_0"][:] file_obj.close() if data_file[-2:] == "gz": subprocess.call(["gzip", full_path + data_file[:-3]]) else: subprocess.call(["gzip", full_path + data_file]) loaded_dates.append(timestamp) loaded_indices.append(t) if len(loaded_dates) > 0: self.loaded_dates = pd.DatetimeIndex(loaded_dates) self.data = np.ones((self.all_dates.shape[0], data[0].shape[0], data[0].shape[1])) * -9999 self.data[loaded_indices] = np.array(data)
[ "def", "load_data", "(", "self", ")", ":", "data", "=", "[", "]", "loaded_dates", "=", "[", "]", "loaded_indices", "=", "[", "]", "for", "t", ",", "timestamp", "in", "enumerate", "(", "self", ".", "all_dates", ")", ":", "date_str", "=", "timestamp", ".", "date", "(", ")", ".", "strftime", "(", "\"%Y%m%d\"", ")", "full_path", "=", "self", ".", "path_start", "+", "date_str", "+", "\"/\"", "if", "self", ".", "variable", "in", "os", ".", "listdir", "(", "full_path", ")", ":", "full_path", "+=", "self", ".", "variable", "+", "\"/\"", "data_files", "=", "sorted", "(", "os", ".", "listdir", "(", "full_path", ")", ")", "file_dates", "=", "pd", ".", "to_datetime", "(", "[", "d", ".", "split", "(", "\"_\"", ")", "[", "-", "1", "]", "[", "0", ":", "13", "]", "for", "d", "in", "data_files", "]", ")", "if", "timestamp", "in", "file_dates", ":", "data_file", "=", "data_files", "[", "np", ".", "where", "(", "timestamp", "==", "file_dates", ")", "[", "0", "]", "[", "0", "]", "]", "print", "(", "full_path", "+", "data_file", ")", "if", "data_file", "[", "-", "2", ":", "]", "==", "\"gz\"", ":", "subprocess", ".", "call", "(", "[", "\"gunzip\"", ",", "full_path", "+", "data_file", "]", ")", "file_obj", "=", "Nio", ".", "open_file", "(", "full_path", "+", "data_file", "[", ":", "-", "3", "]", ")", "else", ":", "file_obj", "=", "Nio", ".", "open_file", "(", "full_path", "+", "data_file", ")", "var_name", "=", "sorted", "(", "file_obj", ".", "variables", ".", "keys", "(", ")", ")", "[", "0", "]", "data", ".", "append", "(", "file_obj", ".", "variables", "[", "var_name", "]", "[", ":", "]", ")", "if", "self", ".", "lon", "is", "None", ":", "self", ".", "lon", "=", "file_obj", ".", "variables", "[", "\"lon_0\"", "]", "[", ":", "]", "# Translates longitude values from 0:360 to -180:180", "if", "np", ".", "count_nonzero", "(", "self", ".", "lon", ">", "180", ")", ">", "0", ":", "self", ".", "lon", "-=", "360", "self", ".", "lat", "=", "file_obj", ".", "variables", "[", "\"lat_0\"", "]", "[", ":", "]", "file_obj", ".", "close", "(", ")", "if", "data_file", "[", "-", "2", ":", "]", "==", "\"gz\"", ":", "subprocess", ".", "call", "(", "[", "\"gzip\"", ",", "full_path", "+", "data_file", "[", ":", "-", "3", "]", "]", ")", "else", ":", "subprocess", ".", "call", "(", "[", "\"gzip\"", ",", "full_path", "+", "data_file", "]", ")", "loaded_dates", ".", "append", "(", "timestamp", ")", "loaded_indices", ".", "append", "(", "t", ")", "if", "len", "(", "loaded_dates", ")", ">", "0", ":", "self", ".", "loaded_dates", "=", "pd", ".", "DatetimeIndex", "(", "loaded_dates", ")", "self", ".", "data", "=", "np", ".", "ones", "(", "(", "self", ".", "all_dates", ".", "shape", "[", "0", "]", ",", "data", "[", "0", "]", ".", "shape", "[", "0", "]", ",", "data", "[", "0", "]", ".", "shape", "[", "1", "]", ")", ")", "*", "-", "9999", "self", ".", "data", "[", "loaded_indices", "]", "=", "np", ".", "array", "(", "data", ")" ]
Loads data from MRMS GRIB2 files and handles compression duties if files are compressed.
[ "Loads", "data", "from", "MRMS", "GRIB2", "files", "and", "handles", "compression", "duties", "if", "files", "are", "compressed", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/util/convert_mrms_grids.py#L134-L174
train
djgagne/hagelslag
hagelslag/util/convert_mrms_grids.py
MRMSGrid.interpolate_grid
def interpolate_grid(self, in_lon, in_lat): """ Interpolates MRMS data to a different grid using cubic bivariate splines """ out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1])) for d in range(self.data.shape[0]): print("Loading ", d, self.variable, self.start_date) if self.data[d].max() > -999: step = self.data[d] step[step < 0] = 0 if self.lat[-1] < self.lat[0]: spline = RectBivariateSpline(self.lat[::-1], self.lon, step[::-1], kx=3, ky=3) else: spline = RectBivariateSpline(self.lat, self.lon, step, kx=3, ky=3) print("Evaluating", d, self.variable, self.start_date) flat_data = spline.ev(in_lat.ravel(), in_lon.ravel()) out_data[d] = flat_data.reshape(in_lon.shape) del spline else: print(d, " is missing") out_data[d] = -9999 return out_data
python
def interpolate_grid(self, in_lon, in_lat): """ Interpolates MRMS data to a different grid using cubic bivariate splines """ out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1])) for d in range(self.data.shape[0]): print("Loading ", d, self.variable, self.start_date) if self.data[d].max() > -999: step = self.data[d] step[step < 0] = 0 if self.lat[-1] < self.lat[0]: spline = RectBivariateSpline(self.lat[::-1], self.lon, step[::-1], kx=3, ky=3) else: spline = RectBivariateSpline(self.lat, self.lon, step, kx=3, ky=3) print("Evaluating", d, self.variable, self.start_date) flat_data = spline.ev(in_lat.ravel(), in_lon.ravel()) out_data[d] = flat_data.reshape(in_lon.shape) del spline else: print(d, " is missing") out_data[d] = -9999 return out_data
[ "def", "interpolate_grid", "(", "self", ",", "in_lon", ",", "in_lat", ")", ":", "out_data", "=", "np", ".", "zeros", "(", "(", "self", ".", "data", ".", "shape", "[", "0", "]", ",", "in_lon", ".", "shape", "[", "0", "]", ",", "in_lon", ".", "shape", "[", "1", "]", ")", ")", "for", "d", "in", "range", "(", "self", ".", "data", ".", "shape", "[", "0", "]", ")", ":", "print", "(", "\"Loading \"", ",", "d", ",", "self", ".", "variable", ",", "self", ".", "start_date", ")", "if", "self", ".", "data", "[", "d", "]", ".", "max", "(", ")", ">", "-", "999", ":", "step", "=", "self", ".", "data", "[", "d", "]", "step", "[", "step", "<", "0", "]", "=", "0", "if", "self", ".", "lat", "[", "-", "1", "]", "<", "self", ".", "lat", "[", "0", "]", ":", "spline", "=", "RectBivariateSpline", "(", "self", ".", "lat", "[", ":", ":", "-", "1", "]", ",", "self", ".", "lon", ",", "step", "[", ":", ":", "-", "1", "]", ",", "kx", "=", "3", ",", "ky", "=", "3", ")", "else", ":", "spline", "=", "RectBivariateSpline", "(", "self", ".", "lat", ",", "self", ".", "lon", ",", "step", ",", "kx", "=", "3", ",", "ky", "=", "3", ")", "print", "(", "\"Evaluating\"", ",", "d", ",", "self", ".", "variable", ",", "self", ".", "start_date", ")", "flat_data", "=", "spline", ".", "ev", "(", "in_lat", ".", "ravel", "(", ")", ",", "in_lon", ".", "ravel", "(", ")", ")", "out_data", "[", "d", "]", "=", "flat_data", ".", "reshape", "(", "in_lon", ".", "shape", ")", "del", "spline", "else", ":", "print", "(", "d", ",", "\" is missing\"", ")", "out_data", "[", "d", "]", "=", "-", "9999", "return", "out_data" ]
Interpolates MRMS data to a different grid using cubic bivariate splines
[ "Interpolates", "MRMS", "data", "to", "a", "different", "grid", "using", "cubic", "bivariate", "splines" ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/util/convert_mrms_grids.py#L176-L197
train
djgagne/hagelslag
hagelslag/util/convert_mrms_grids.py
MRMSGrid.max_neighbor
def max_neighbor(self, in_lon, in_lat, radius=0.05): """ Finds the largest value within a given radius of a point on the interpolated grid. Args: in_lon: 2D array of longitude values in_lat: 2D array of latitude values radius: radius of influence for largest neighbor search in degrees Returns: Array of interpolated data """ out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1])) in_tree = cKDTree(np.vstack((in_lat.ravel(), in_lon.ravel())).T) out_indices = np.indices(out_data.shape[1:]) out_rows = out_indices[0].ravel() out_cols = out_indices[1].ravel() for d in range(self.data.shape[0]): nz_points = np.where(self.data[d] > 0) if len(nz_points[0]) > 0: nz_vals = self.data[d][nz_points] nz_rank = np.argsort(nz_vals) original_points = cKDTree(np.vstack((self.lat[nz_points[0][nz_rank]], self.lon[nz_points[1][nz_rank]])).T) all_neighbors = original_points.query_ball_tree(in_tree, radius, p=2, eps=0) for n, neighbors in enumerate(all_neighbors): if len(neighbors) > 0: out_data[d, out_rows[neighbors], out_cols[neighbors]] = nz_vals[nz_rank][n] return out_data
python
def max_neighbor(self, in_lon, in_lat, radius=0.05): """ Finds the largest value within a given radius of a point on the interpolated grid. Args: in_lon: 2D array of longitude values in_lat: 2D array of latitude values radius: radius of influence for largest neighbor search in degrees Returns: Array of interpolated data """ out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1])) in_tree = cKDTree(np.vstack((in_lat.ravel(), in_lon.ravel())).T) out_indices = np.indices(out_data.shape[1:]) out_rows = out_indices[0].ravel() out_cols = out_indices[1].ravel() for d in range(self.data.shape[0]): nz_points = np.where(self.data[d] > 0) if len(nz_points[0]) > 0: nz_vals = self.data[d][nz_points] nz_rank = np.argsort(nz_vals) original_points = cKDTree(np.vstack((self.lat[nz_points[0][nz_rank]], self.lon[nz_points[1][nz_rank]])).T) all_neighbors = original_points.query_ball_tree(in_tree, radius, p=2, eps=0) for n, neighbors in enumerate(all_neighbors): if len(neighbors) > 0: out_data[d, out_rows[neighbors], out_cols[neighbors]] = nz_vals[nz_rank][n] return out_data
[ "def", "max_neighbor", "(", "self", ",", "in_lon", ",", "in_lat", ",", "radius", "=", "0.05", ")", ":", "out_data", "=", "np", ".", "zeros", "(", "(", "self", ".", "data", ".", "shape", "[", "0", "]", ",", "in_lon", ".", "shape", "[", "0", "]", ",", "in_lon", ".", "shape", "[", "1", "]", ")", ")", "in_tree", "=", "cKDTree", "(", "np", ".", "vstack", "(", "(", "in_lat", ".", "ravel", "(", ")", ",", "in_lon", ".", "ravel", "(", ")", ")", ")", ".", "T", ")", "out_indices", "=", "np", ".", "indices", "(", "out_data", ".", "shape", "[", "1", ":", "]", ")", "out_rows", "=", "out_indices", "[", "0", "]", ".", "ravel", "(", ")", "out_cols", "=", "out_indices", "[", "1", "]", ".", "ravel", "(", ")", "for", "d", "in", "range", "(", "self", ".", "data", ".", "shape", "[", "0", "]", ")", ":", "nz_points", "=", "np", ".", "where", "(", "self", ".", "data", "[", "d", "]", ">", "0", ")", "if", "len", "(", "nz_points", "[", "0", "]", ")", ">", "0", ":", "nz_vals", "=", "self", ".", "data", "[", "d", "]", "[", "nz_points", "]", "nz_rank", "=", "np", ".", "argsort", "(", "nz_vals", ")", "original_points", "=", "cKDTree", "(", "np", ".", "vstack", "(", "(", "self", ".", "lat", "[", "nz_points", "[", "0", "]", "[", "nz_rank", "]", "]", ",", "self", ".", "lon", "[", "nz_points", "[", "1", "]", "[", "nz_rank", "]", "]", ")", ")", ".", "T", ")", "all_neighbors", "=", "original_points", ".", "query_ball_tree", "(", "in_tree", ",", "radius", ",", "p", "=", "2", ",", "eps", "=", "0", ")", "for", "n", ",", "neighbors", "in", "enumerate", "(", "all_neighbors", ")", ":", "if", "len", "(", "neighbors", ")", ">", "0", ":", "out_data", "[", "d", ",", "out_rows", "[", "neighbors", "]", ",", "out_cols", "[", "neighbors", "]", "]", "=", "nz_vals", "[", "nz_rank", "]", "[", "n", "]", "return", "out_data" ]
Finds the largest value within a given radius of a point on the interpolated grid. Args: in_lon: 2D array of longitude values in_lat: 2D array of latitude values radius: radius of influence for largest neighbor search in degrees Returns: Array of interpolated data
[ "Finds", "the", "largest", "value", "within", "a", "given", "radius", "of", "a", "point", "on", "the", "interpolated", "grid", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/util/convert_mrms_grids.py#L199-L226
train
djgagne/hagelslag
hagelslag/util/convert_mrms_grids.py
MRMSGrid.interpolate_to_netcdf
def interpolate_to_netcdf(self, in_lon, in_lat, out_path, date_unit="seconds since 1970-01-01T00:00", interp_type="spline"): """ Calls the interpolation function and then saves the MRMS data to a netCDF file. It will also create separate directories for each variable if they are not already available. """ if interp_type == "spline": out_data = self.interpolate_grid(in_lon, in_lat) else: out_data = self.max_neighbor(in_lon, in_lat) if not os.access(out_path + self.variable, os.R_OK): try: os.mkdir(out_path + self.variable) except OSError: print(out_path + self.variable + " already created") out_file = out_path + self.variable + "/" + "{0}_{1}_{2}.nc".format(self.variable, self.start_date.strftime("%Y%m%d-%H:%M"), self.end_date.strftime("%Y%m%d-%H:%M")) out_obj = Dataset(out_file, "w") out_obj.createDimension("time", out_data.shape[0]) out_obj.createDimension("y", out_data.shape[1]) out_obj.createDimension("x", out_data.shape[2]) data_var = out_obj.createVariable(self.variable, "f4", ("time", "y", "x"), zlib=True, fill_value=-9999.0, least_significant_digit=3) data_var[:] = out_data data_var.long_name = self.variable data_var.coordinates = "latitude longitude" if "MESH" in self.variable or "QPE" in self.variable: data_var.units = "mm" elif "Reflectivity" in self.variable: data_var.units = "dBZ" elif "Rotation" in self.variable: data_var.units = "s-1" else: data_var.units = "" out_lon = out_obj.createVariable("longitude", "f4", ("y", "x"), zlib=True) out_lon[:] = in_lon out_lon.units = "degrees_east" out_lat = out_obj.createVariable("latitude", "f4", ("y", "x"), zlib=True) out_lat[:] = in_lat out_lat.units = "degrees_north" dates = out_obj.createVariable("time", "i8", ("time",), zlib=True) dates[:] = np.round(date2num(self.all_dates.to_pydatetime(), date_unit)).astype(np.int64) dates.long_name = "Valid date" dates.units = date_unit out_obj.Conventions="CF-1.6" out_obj.close() return
python
def interpolate_to_netcdf(self, in_lon, in_lat, out_path, date_unit="seconds since 1970-01-01T00:00", interp_type="spline"): """ Calls the interpolation function and then saves the MRMS data to a netCDF file. It will also create separate directories for each variable if they are not already available. """ if interp_type == "spline": out_data = self.interpolate_grid(in_lon, in_lat) else: out_data = self.max_neighbor(in_lon, in_lat) if not os.access(out_path + self.variable, os.R_OK): try: os.mkdir(out_path + self.variable) except OSError: print(out_path + self.variable + " already created") out_file = out_path + self.variable + "/" + "{0}_{1}_{2}.nc".format(self.variable, self.start_date.strftime("%Y%m%d-%H:%M"), self.end_date.strftime("%Y%m%d-%H:%M")) out_obj = Dataset(out_file, "w") out_obj.createDimension("time", out_data.shape[0]) out_obj.createDimension("y", out_data.shape[1]) out_obj.createDimension("x", out_data.shape[2]) data_var = out_obj.createVariable(self.variable, "f4", ("time", "y", "x"), zlib=True, fill_value=-9999.0, least_significant_digit=3) data_var[:] = out_data data_var.long_name = self.variable data_var.coordinates = "latitude longitude" if "MESH" in self.variable or "QPE" in self.variable: data_var.units = "mm" elif "Reflectivity" in self.variable: data_var.units = "dBZ" elif "Rotation" in self.variable: data_var.units = "s-1" else: data_var.units = "" out_lon = out_obj.createVariable("longitude", "f4", ("y", "x"), zlib=True) out_lon[:] = in_lon out_lon.units = "degrees_east" out_lat = out_obj.createVariable("latitude", "f4", ("y", "x"), zlib=True) out_lat[:] = in_lat out_lat.units = "degrees_north" dates = out_obj.createVariable("time", "i8", ("time",), zlib=True) dates[:] = np.round(date2num(self.all_dates.to_pydatetime(), date_unit)).astype(np.int64) dates.long_name = "Valid date" dates.units = date_unit out_obj.Conventions="CF-1.6" out_obj.close() return
[ "def", "interpolate_to_netcdf", "(", "self", ",", "in_lon", ",", "in_lat", ",", "out_path", ",", "date_unit", "=", "\"seconds since 1970-01-01T00:00\"", ",", "interp_type", "=", "\"spline\"", ")", ":", "if", "interp_type", "==", "\"spline\"", ":", "out_data", "=", "self", ".", "interpolate_grid", "(", "in_lon", ",", "in_lat", ")", "else", ":", "out_data", "=", "self", ".", "max_neighbor", "(", "in_lon", ",", "in_lat", ")", "if", "not", "os", ".", "access", "(", "out_path", "+", "self", ".", "variable", ",", "os", ".", "R_OK", ")", ":", "try", ":", "os", ".", "mkdir", "(", "out_path", "+", "self", ".", "variable", ")", "except", "OSError", ":", "print", "(", "out_path", "+", "self", ".", "variable", "+", "\" already created\"", ")", "out_file", "=", "out_path", "+", "self", ".", "variable", "+", "\"/\"", "+", "\"{0}_{1}_{2}.nc\"", ".", "format", "(", "self", ".", "variable", ",", "self", ".", "start_date", ".", "strftime", "(", "\"%Y%m%d-%H:%M\"", ")", ",", "self", ".", "end_date", ".", "strftime", "(", "\"%Y%m%d-%H:%M\"", ")", ")", "out_obj", "=", "Dataset", "(", "out_file", ",", "\"w\"", ")", "out_obj", ".", "createDimension", "(", "\"time\"", ",", "out_data", ".", "shape", "[", "0", "]", ")", "out_obj", ".", "createDimension", "(", "\"y\"", ",", "out_data", ".", "shape", "[", "1", "]", ")", "out_obj", ".", "createDimension", "(", "\"x\"", ",", "out_data", ".", "shape", "[", "2", "]", ")", "data_var", "=", "out_obj", ".", "createVariable", "(", "self", ".", "variable", ",", "\"f4\"", ",", "(", "\"time\"", ",", "\"y\"", ",", "\"x\"", ")", ",", "zlib", "=", "True", ",", "fill_value", "=", "-", "9999.0", ",", "least_significant_digit", "=", "3", ")", "data_var", "[", ":", "]", "=", "out_data", "data_var", ".", "long_name", "=", "self", ".", "variable", "data_var", ".", "coordinates", "=", "\"latitude longitude\"", "if", "\"MESH\"", "in", "self", ".", "variable", "or", "\"QPE\"", "in", "self", ".", "variable", ":", "data_var", ".", "units", "=", "\"mm\"", "elif", "\"Reflectivity\"", "in", "self", ".", "variable", ":", "data_var", ".", "units", "=", "\"dBZ\"", "elif", "\"Rotation\"", "in", "self", ".", "variable", ":", "data_var", ".", "units", "=", "\"s-1\"", "else", ":", "data_var", ".", "units", "=", "\"\"", "out_lon", "=", "out_obj", ".", "createVariable", "(", "\"longitude\"", ",", "\"f4\"", ",", "(", "\"y\"", ",", "\"x\"", ")", ",", "zlib", "=", "True", ")", "out_lon", "[", ":", "]", "=", "in_lon", "out_lon", ".", "units", "=", "\"degrees_east\"", "out_lat", "=", "out_obj", ".", "createVariable", "(", "\"latitude\"", ",", "\"f4\"", ",", "(", "\"y\"", ",", "\"x\"", ")", ",", "zlib", "=", "True", ")", "out_lat", "[", ":", "]", "=", "in_lat", "out_lat", ".", "units", "=", "\"degrees_north\"", "dates", "=", "out_obj", ".", "createVariable", "(", "\"time\"", ",", "\"i8\"", ",", "(", "\"time\"", ",", ")", ",", "zlib", "=", "True", ")", "dates", "[", ":", "]", "=", "np", ".", "round", "(", "date2num", "(", "self", ".", "all_dates", ".", "to_pydatetime", "(", ")", ",", "date_unit", ")", ")", ".", "astype", "(", "np", ".", "int64", ")", "dates", ".", "long_name", "=", "\"Valid date\"", "dates", ".", "units", "=", "date_unit", "out_obj", ".", "Conventions", "=", "\"CF-1.6\"", "out_obj", ".", "close", "(", ")", "return" ]
Calls the interpolation function and then saves the MRMS data to a netCDF file. It will also create separate directories for each variable if they are not already available.
[ "Calls", "the", "interpolation", "function", "and", "then", "saves", "the", "MRMS", "data", "to", "a", "netCDF", "file", ".", "It", "will", "also", "create", "separate", "directories", "for", "each", "variable", "if", "they", "are", "not", "already", "available", "." ]
6fb6c3df90bf4867e13a97d3460b14471d107df1
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/util/convert_mrms_grids.py#L228-L276
train
nion-software/nionswift
nion/swift/model/HardwareSource.py
get_data_generator_by_id
def get_data_generator_by_id(hardware_source_id, sync=True): """ Return a generator for data. :param bool sync: whether to wait for current frame to finish then collect next frame NOTE: a new ndarray is created for each call. """ hardware_source = HardwareSourceManager().get_hardware_source_for_hardware_source_id(hardware_source_id) def get_last_data(): return hardware_source.get_next_xdatas_to_finish()[0].data.copy() yield get_last_data
python
def get_data_generator_by_id(hardware_source_id, sync=True): """ Return a generator for data. :param bool sync: whether to wait for current frame to finish then collect next frame NOTE: a new ndarray is created for each call. """ hardware_source = HardwareSourceManager().get_hardware_source_for_hardware_source_id(hardware_source_id) def get_last_data(): return hardware_source.get_next_xdatas_to_finish()[0].data.copy() yield get_last_data
[ "def", "get_data_generator_by_id", "(", "hardware_source_id", ",", "sync", "=", "True", ")", ":", "hardware_source", "=", "HardwareSourceManager", "(", ")", ".", "get_hardware_source_for_hardware_source_id", "(", "hardware_source_id", ")", "def", "get_last_data", "(", ")", ":", "return", "hardware_source", ".", "get_next_xdatas_to_finish", "(", ")", "[", "0", "]", ".", "data", ".", "copy", "(", ")", "yield", "get_last_data" ]
Return a generator for data. :param bool sync: whether to wait for current frame to finish then collect next frame NOTE: a new ndarray is created for each call.
[ "Return", "a", "generator", "for", "data", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1053-L1064
train
nion-software/nionswift
nion/swift/model/HardwareSource.py
parse_hardware_aliases_config_file
def parse_hardware_aliases_config_file(config_path): """ Parse config file for aliases and automatically register them. Returns True if alias file was found and parsed (successfully or unsuccessfully). Returns False if alias file was not found. Config file is a standard .ini file with a section """ if os.path.exists(config_path): logging.info("Parsing alias file {:s}".format(config_path)) try: config = configparser.ConfigParser() config.read(config_path) for section in config.sections(): device = config.get(section, "device") hardware_alias = config.get(section, "hardware_alias") display_name = config.get(section, "display_name") try: logging.info("Adding alias {:s} for device {:s}, display name: {:s} ".format(hardware_alias, device, display_name)) HardwareSourceManager().make_instrument_alias(device, hardware_alias, _(display_name)) except Exception as e: logging.info("Error creating hardware alias {:s} for device {:s} ".format(hardware_alias, device)) logging.info(traceback.format_exc()) except Exception as e: logging.info("Error reading alias file from: " + config_path) logging.info(traceback.format_exc()) return True return False
python
def parse_hardware_aliases_config_file(config_path): """ Parse config file for aliases and automatically register them. Returns True if alias file was found and parsed (successfully or unsuccessfully). Returns False if alias file was not found. Config file is a standard .ini file with a section """ if os.path.exists(config_path): logging.info("Parsing alias file {:s}".format(config_path)) try: config = configparser.ConfigParser() config.read(config_path) for section in config.sections(): device = config.get(section, "device") hardware_alias = config.get(section, "hardware_alias") display_name = config.get(section, "display_name") try: logging.info("Adding alias {:s} for device {:s}, display name: {:s} ".format(hardware_alias, device, display_name)) HardwareSourceManager().make_instrument_alias(device, hardware_alias, _(display_name)) except Exception as e: logging.info("Error creating hardware alias {:s} for device {:s} ".format(hardware_alias, device)) logging.info(traceback.format_exc()) except Exception as e: logging.info("Error reading alias file from: " + config_path) logging.info(traceback.format_exc()) return True return False
[ "def", "parse_hardware_aliases_config_file", "(", "config_path", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "config_path", ")", ":", "logging", ".", "info", "(", "\"Parsing alias file {:s}\"", ".", "format", "(", "config_path", ")", ")", "try", ":", "config", "=", "configparser", ".", "ConfigParser", "(", ")", "config", ".", "read", "(", "config_path", ")", "for", "section", "in", "config", ".", "sections", "(", ")", ":", "device", "=", "config", ".", "get", "(", "section", ",", "\"device\"", ")", "hardware_alias", "=", "config", ".", "get", "(", "section", ",", "\"hardware_alias\"", ")", "display_name", "=", "config", ".", "get", "(", "section", ",", "\"display_name\"", ")", "try", ":", "logging", ".", "info", "(", "\"Adding alias {:s} for device {:s}, display name: {:s} \"", ".", "format", "(", "hardware_alias", ",", "device", ",", "display_name", ")", ")", "HardwareSourceManager", "(", ")", ".", "make_instrument_alias", "(", "device", ",", "hardware_alias", ",", "_", "(", "display_name", ")", ")", "except", "Exception", "as", "e", ":", "logging", ".", "info", "(", "\"Error creating hardware alias {:s} for device {:s} \"", ".", "format", "(", "hardware_alias", ",", "device", ")", ")", "logging", ".", "info", "(", "traceback", ".", "format_exc", "(", ")", ")", "except", "Exception", "as", "e", ":", "logging", ".", "info", "(", "\"Error reading alias file from: \"", "+", "config_path", ")", "logging", ".", "info", "(", "traceback", ".", "format_exc", "(", ")", ")", "return", "True", "return", "False" ]
Parse config file for aliases and automatically register them. Returns True if alias file was found and parsed (successfully or unsuccessfully). Returns False if alias file was not found. Config file is a standard .ini file with a section
[ "Parse", "config", "file", "for", "aliases", "and", "automatically", "register", "them", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1072-L1101
train
nion-software/nionswift
nion/swift/model/HardwareSource.py
HardwareSourceManager.make_instrument_alias
def make_instrument_alias(self, instrument_id, alias_instrument_id, display_name): """ Configure an alias. Callers can use the alias to refer to the instrument or hardware source. The alias should be lowercase, no spaces. The display name may be used to display alias to the user. Neither the original instrument or hardware source id and the alias id should ever be visible to end users. :param str instrument_id: the hardware source id (lowercase, no spaces) :param str alias_instrument_id: the alias of the hardware source id (lowercase, no spaces) :param str display_name: the display name for the alias """ self.__aliases[alias_instrument_id] = (instrument_id, display_name) for f in self.aliases_updated: f()
python
def make_instrument_alias(self, instrument_id, alias_instrument_id, display_name): """ Configure an alias. Callers can use the alias to refer to the instrument or hardware source. The alias should be lowercase, no spaces. The display name may be used to display alias to the user. Neither the original instrument or hardware source id and the alias id should ever be visible to end users. :param str instrument_id: the hardware source id (lowercase, no spaces) :param str alias_instrument_id: the alias of the hardware source id (lowercase, no spaces) :param str display_name: the display name for the alias """ self.__aliases[alias_instrument_id] = (instrument_id, display_name) for f in self.aliases_updated: f()
[ "def", "make_instrument_alias", "(", "self", ",", "instrument_id", ",", "alias_instrument_id", ",", "display_name", ")", ":", "self", ".", "__aliases", "[", "alias_instrument_id", "]", "=", "(", "instrument_id", ",", "display_name", ")", "for", "f", "in", "self", ".", "aliases_updated", ":", "f", "(", ")" ]
Configure an alias. Callers can use the alias to refer to the instrument or hardware source. The alias should be lowercase, no spaces. The display name may be used to display alias to the user. Neither the original instrument or hardware source id and the alias id should ever be visible to end users. :param str instrument_id: the hardware source id (lowercase, no spaces) :param str alias_instrument_id: the alias of the hardware source id (lowercase, no spaces) :param str display_name: the display name for the alias
[ "Configure", "an", "alias", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L168-L182
train
nion-software/nionswift
nion/swift/model/HardwareSource.py
DataChannel.update
def update(self, data_and_metadata: DataAndMetadata.DataAndMetadata, state: str, sub_area, view_id) -> None: """Called from hardware source when new data arrives.""" self.__state = state self.__sub_area = sub_area hardware_source_id = self.__hardware_source.hardware_source_id channel_index = self.index channel_id = self.channel_id channel_name = self.name metadata = copy.deepcopy(data_and_metadata.metadata) hardware_source_metadata = dict() hardware_source_metadata["hardware_source_id"] = hardware_source_id hardware_source_metadata["channel_index"] = channel_index if channel_id is not None: hardware_source_metadata["reference_key"] = "_".join([hardware_source_id, channel_id]) hardware_source_metadata["channel_id"] = channel_id else: hardware_source_metadata["reference_key"] = hardware_source_id if channel_name is not None: hardware_source_metadata["channel_name"] = channel_name if view_id: hardware_source_metadata["view_id"] = view_id metadata.setdefault("hardware_source", dict()).update(hardware_source_metadata) data = data_and_metadata.data master_data = self.__data_and_metadata.data if self.__data_and_metadata else None data_matches = master_data is not None and data.shape == master_data.shape and data.dtype == master_data.dtype if data_matches and sub_area is not None: top = sub_area[0][0] bottom = sub_area[0][0] + sub_area[1][0] left = sub_area[0][1] right = sub_area[0][1] + sub_area[1][1] if top > 0 or left > 0 or bottom < data.shape[0] or right < data.shape[1]: master_data = numpy.copy(master_data) master_data[top:bottom, left:right] = data[top:bottom, left:right] else: master_data = numpy.copy(data) else: master_data = data # numpy.copy(data). assume data does not need a copy. data_descriptor = data_and_metadata.data_descriptor intensity_calibration = data_and_metadata.intensity_calibration if data_and_metadata else None dimensional_calibrations = data_and_metadata.dimensional_calibrations if data_and_metadata else None timestamp = data_and_metadata.timestamp new_extended_data = DataAndMetadata.new_data_and_metadata(master_data, intensity_calibration=intensity_calibration, dimensional_calibrations=dimensional_calibrations, metadata=metadata, timestamp=timestamp, data_descriptor=data_descriptor) self.__data_and_metadata = new_extended_data self.data_channel_updated_event.fire(new_extended_data) self.is_dirty = True
python
def update(self, data_and_metadata: DataAndMetadata.DataAndMetadata, state: str, sub_area, view_id) -> None: """Called from hardware source when new data arrives.""" self.__state = state self.__sub_area = sub_area hardware_source_id = self.__hardware_source.hardware_source_id channel_index = self.index channel_id = self.channel_id channel_name = self.name metadata = copy.deepcopy(data_and_metadata.metadata) hardware_source_metadata = dict() hardware_source_metadata["hardware_source_id"] = hardware_source_id hardware_source_metadata["channel_index"] = channel_index if channel_id is not None: hardware_source_metadata["reference_key"] = "_".join([hardware_source_id, channel_id]) hardware_source_metadata["channel_id"] = channel_id else: hardware_source_metadata["reference_key"] = hardware_source_id if channel_name is not None: hardware_source_metadata["channel_name"] = channel_name if view_id: hardware_source_metadata["view_id"] = view_id metadata.setdefault("hardware_source", dict()).update(hardware_source_metadata) data = data_and_metadata.data master_data = self.__data_and_metadata.data if self.__data_and_metadata else None data_matches = master_data is not None and data.shape == master_data.shape and data.dtype == master_data.dtype if data_matches and sub_area is not None: top = sub_area[0][0] bottom = sub_area[0][0] + sub_area[1][0] left = sub_area[0][1] right = sub_area[0][1] + sub_area[1][1] if top > 0 or left > 0 or bottom < data.shape[0] or right < data.shape[1]: master_data = numpy.copy(master_data) master_data[top:bottom, left:right] = data[top:bottom, left:right] else: master_data = numpy.copy(data) else: master_data = data # numpy.copy(data). assume data does not need a copy. data_descriptor = data_and_metadata.data_descriptor intensity_calibration = data_and_metadata.intensity_calibration if data_and_metadata else None dimensional_calibrations = data_and_metadata.dimensional_calibrations if data_and_metadata else None timestamp = data_and_metadata.timestamp new_extended_data = DataAndMetadata.new_data_and_metadata(master_data, intensity_calibration=intensity_calibration, dimensional_calibrations=dimensional_calibrations, metadata=metadata, timestamp=timestamp, data_descriptor=data_descriptor) self.__data_and_metadata = new_extended_data self.data_channel_updated_event.fire(new_extended_data) self.is_dirty = True
[ "def", "update", "(", "self", ",", "data_and_metadata", ":", "DataAndMetadata", ".", "DataAndMetadata", ",", "state", ":", "str", ",", "sub_area", ",", "view_id", ")", "->", "None", ":", "self", ".", "__state", "=", "state", "self", ".", "__sub_area", "=", "sub_area", "hardware_source_id", "=", "self", ".", "__hardware_source", ".", "hardware_source_id", "channel_index", "=", "self", ".", "index", "channel_id", "=", "self", ".", "channel_id", "channel_name", "=", "self", ".", "name", "metadata", "=", "copy", ".", "deepcopy", "(", "data_and_metadata", ".", "metadata", ")", "hardware_source_metadata", "=", "dict", "(", ")", "hardware_source_metadata", "[", "\"hardware_source_id\"", "]", "=", "hardware_source_id", "hardware_source_metadata", "[", "\"channel_index\"", "]", "=", "channel_index", "if", "channel_id", "is", "not", "None", ":", "hardware_source_metadata", "[", "\"reference_key\"", "]", "=", "\"_\"", ".", "join", "(", "[", "hardware_source_id", ",", "channel_id", "]", ")", "hardware_source_metadata", "[", "\"channel_id\"", "]", "=", "channel_id", "else", ":", "hardware_source_metadata", "[", "\"reference_key\"", "]", "=", "hardware_source_id", "if", "channel_name", "is", "not", "None", ":", "hardware_source_metadata", "[", "\"channel_name\"", "]", "=", "channel_name", "if", "view_id", ":", "hardware_source_metadata", "[", "\"view_id\"", "]", "=", "view_id", "metadata", ".", "setdefault", "(", "\"hardware_source\"", ",", "dict", "(", ")", ")", ".", "update", "(", "hardware_source_metadata", ")", "data", "=", "data_and_metadata", ".", "data", "master_data", "=", "self", ".", "__data_and_metadata", ".", "data", "if", "self", ".", "__data_and_metadata", "else", "None", "data_matches", "=", "master_data", "is", "not", "None", "and", "data", ".", "shape", "==", "master_data", ".", "shape", "and", "data", ".", "dtype", "==", "master_data", ".", "dtype", "if", "data_matches", "and", "sub_area", "is", "not", "None", ":", "top", "=", "sub_area", "[", "0", "]", "[", "0", "]", "bottom", "=", "sub_area", "[", "0", "]", "[", "0", "]", "+", "sub_area", "[", "1", "]", "[", "0", "]", "left", "=", "sub_area", "[", "0", "]", "[", "1", "]", "right", "=", "sub_area", "[", "0", "]", "[", "1", "]", "+", "sub_area", "[", "1", "]", "[", "1", "]", "if", "top", ">", "0", "or", "left", ">", "0", "or", "bottom", "<", "data", ".", "shape", "[", "0", "]", "or", "right", "<", "data", ".", "shape", "[", "1", "]", ":", "master_data", "=", "numpy", ".", "copy", "(", "master_data", ")", "master_data", "[", "top", ":", "bottom", ",", "left", ":", "right", "]", "=", "data", "[", "top", ":", "bottom", ",", "left", ":", "right", "]", "else", ":", "master_data", "=", "numpy", ".", "copy", "(", "data", ")", "else", ":", "master_data", "=", "data", "# numpy.copy(data). assume data does not need a copy.", "data_descriptor", "=", "data_and_metadata", ".", "data_descriptor", "intensity_calibration", "=", "data_and_metadata", ".", "intensity_calibration", "if", "data_and_metadata", "else", "None", "dimensional_calibrations", "=", "data_and_metadata", ".", "dimensional_calibrations", "if", "data_and_metadata", "else", "None", "timestamp", "=", "data_and_metadata", ".", "timestamp", "new_extended_data", "=", "DataAndMetadata", ".", "new_data_and_metadata", "(", "master_data", ",", "intensity_calibration", "=", "intensity_calibration", ",", "dimensional_calibrations", "=", "dimensional_calibrations", ",", "metadata", "=", "metadata", ",", "timestamp", "=", "timestamp", ",", "data_descriptor", "=", "data_descriptor", ")", "self", ".", "__data_and_metadata", "=", "new_extended_data", "self", ".", "data_channel_updated_event", ".", "fire", "(", "new_extended_data", ")", "self", ".", "is_dirty", "=", "True" ]
Called from hardware source when new data arrives.
[ "Called", "from", "hardware", "source", "when", "new", "data", "arrives", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L487-L536
train
nion-software/nionswift
nion/swift/model/HardwareSource.py
DataChannel.start
def start(self): """Called from hardware source when data starts streaming.""" old_start_count = self.__start_count self.__start_count += 1 if old_start_count == 0: self.data_channel_start_event.fire()
python
def start(self): """Called from hardware source when data starts streaming.""" old_start_count = self.__start_count self.__start_count += 1 if old_start_count == 0: self.data_channel_start_event.fire()
[ "def", "start", "(", "self", ")", ":", "old_start_count", "=", "self", ".", "__start_count", "self", ".", "__start_count", "+=", "1", "if", "old_start_count", "==", "0", ":", "self", ".", "data_channel_start_event", ".", "fire", "(", ")" ]
Called from hardware source when data starts streaming.
[ "Called", "from", "hardware", "source", "when", "data", "starts", "streaming", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L538-L543
train
nion-software/nionswift
nion/swift/model/HardwareSource.py
SumProcessor.connect_data_item_reference
def connect_data_item_reference(self, data_item_reference): """Connect to the data item reference, creating a crop graphic if necessary. If the data item reference does not yet have an associated data item, add a listener and wait for the data item to be set, then connect. """ display_item = data_item_reference.display_item data_item = display_item.data_item if display_item else None if data_item and display_item: self.__connect_display(display_item) else: def data_item_reference_changed(): self.__data_item_reference_changed_event_listener.close() self.connect_data_item_reference(data_item_reference) # ugh. recursive mess. self.__data_item_reference_changed_event_listener = data_item_reference.data_item_reference_changed_event.listen(data_item_reference_changed)
python
def connect_data_item_reference(self, data_item_reference): """Connect to the data item reference, creating a crop graphic if necessary. If the data item reference does not yet have an associated data item, add a listener and wait for the data item to be set, then connect. """ display_item = data_item_reference.display_item data_item = display_item.data_item if display_item else None if data_item and display_item: self.__connect_display(display_item) else: def data_item_reference_changed(): self.__data_item_reference_changed_event_listener.close() self.connect_data_item_reference(data_item_reference) # ugh. recursive mess. self.__data_item_reference_changed_event_listener = data_item_reference.data_item_reference_changed_event.listen(data_item_reference_changed)
[ "def", "connect_data_item_reference", "(", "self", ",", "data_item_reference", ")", ":", "display_item", "=", "data_item_reference", ".", "display_item", "data_item", "=", "display_item", ".", "data_item", "if", "display_item", "else", "None", "if", "data_item", "and", "display_item", ":", "self", ".", "__connect_display", "(", "display_item", ")", "else", ":", "def", "data_item_reference_changed", "(", ")", ":", "self", ".", "__data_item_reference_changed_event_listener", ".", "close", "(", ")", "self", ".", "connect_data_item_reference", "(", "data_item_reference", ")", "# ugh. recursive mess.", "self", ".", "__data_item_reference_changed_event_listener", "=", "data_item_reference", ".", "data_item_reference_changed_event", ".", "listen", "(", "data_item_reference_changed", ")" ]
Connect to the data item reference, creating a crop graphic if necessary. If the data item reference does not yet have an associated data item, add a listener and wait for the data item to be set, then connect.
[ "Connect", "to", "the", "data", "item", "reference", "creating", "a", "crop", "graphic", "if", "necessary", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1001-L1015
train
nion-software/nionswift
nion/swift/model/HardwareSource.py
DataChannelBuffer.grab_earliest
def grab_earliest(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the earliest data from the buffer, blocking until one is available.""" timeout = timeout if timeout is not None else 10.0 with self.__buffer_lock: if len(self.__buffer) == 0: done_event = threading.Event() self.__done_events.append(done_event) self.__buffer_lock.release() done = done_event.wait(timeout) self.__buffer_lock.acquire() if not done: raise Exception("Could not grab latest.") return self.__buffer.pop(0)
python
def grab_earliest(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the earliest data from the buffer, blocking until one is available.""" timeout = timeout if timeout is not None else 10.0 with self.__buffer_lock: if len(self.__buffer) == 0: done_event = threading.Event() self.__done_events.append(done_event) self.__buffer_lock.release() done = done_event.wait(timeout) self.__buffer_lock.acquire() if not done: raise Exception("Could not grab latest.") return self.__buffer.pop(0)
[ "def", "grab_earliest", "(", "self", ",", "timeout", ":", "float", "=", "None", ")", "->", "typing", ".", "List", "[", "DataAndMetadata", ".", "DataAndMetadata", "]", ":", "timeout", "=", "timeout", "if", "timeout", "is", "not", "None", "else", "10.0", "with", "self", ".", "__buffer_lock", ":", "if", "len", "(", "self", ".", "__buffer", ")", "==", "0", ":", "done_event", "=", "threading", ".", "Event", "(", ")", "self", ".", "__done_events", ".", "append", "(", "done_event", ")", "self", ".", "__buffer_lock", ".", "release", "(", ")", "done", "=", "done_event", ".", "wait", "(", "timeout", ")", "self", ".", "__buffer_lock", ".", "acquire", "(", ")", "if", "not", "done", ":", "raise", "Exception", "(", "\"Could not grab latest.\"", ")", "return", "self", ".", "__buffer", ".", "pop", "(", "0", ")" ]
Grab the earliest data from the buffer, blocking until one is available.
[ "Grab", "the", "earliest", "data", "from", "the", "buffer", "blocking", "until", "one", "is", "available", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1194-L1206
train
nion-software/nionswift
nion/swift/model/HardwareSource.py
DataChannelBuffer.grab_next
def grab_next(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the next data to finish from the buffer, blocking until one is available.""" with self.__buffer_lock: self.__buffer = list() return self.grab_latest(timeout)
python
def grab_next(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the next data to finish from the buffer, blocking until one is available.""" with self.__buffer_lock: self.__buffer = list() return self.grab_latest(timeout)
[ "def", "grab_next", "(", "self", ",", "timeout", ":", "float", "=", "None", ")", "->", "typing", ".", "List", "[", "DataAndMetadata", ".", "DataAndMetadata", "]", ":", "with", "self", ".", "__buffer_lock", ":", "self", ".", "__buffer", "=", "list", "(", ")", "return", "self", ".", "grab_latest", "(", "timeout", ")" ]
Grab the next data to finish from the buffer, blocking until one is available.
[ "Grab", "the", "next", "data", "to", "finish", "from", "the", "buffer", "blocking", "until", "one", "is", "available", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1208-L1212
train
nion-software/nionswift
nion/swift/model/HardwareSource.py
DataChannelBuffer.grab_following
def grab_following(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the next data to start from the buffer, blocking until one is available.""" self.grab_next(timeout) return self.grab_next(timeout)
python
def grab_following(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the next data to start from the buffer, blocking until one is available.""" self.grab_next(timeout) return self.grab_next(timeout)
[ "def", "grab_following", "(", "self", ",", "timeout", ":", "float", "=", "None", ")", "->", "typing", ".", "List", "[", "DataAndMetadata", ".", "DataAndMetadata", "]", ":", "self", ".", "grab_next", "(", "timeout", ")", "return", "self", ".", "grab_next", "(", "timeout", ")" ]
Grab the next data to start from the buffer, blocking until one is available.
[ "Grab", "the", "next", "data", "to", "start", "from", "the", "buffer", "blocking", "until", "one", "is", "available", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1214-L1217
train
nion-software/nionswift
nion/swift/model/HardwareSource.py
DataChannelBuffer.pause
def pause(self) -> None: """Pause recording. Thread safe and UI safe.""" with self.__state_lock: if self.__state == DataChannelBuffer.State.started: self.__state = DataChannelBuffer.State.paused
python
def pause(self) -> None: """Pause recording. Thread safe and UI safe.""" with self.__state_lock: if self.__state == DataChannelBuffer.State.started: self.__state = DataChannelBuffer.State.paused
[ "def", "pause", "(", "self", ")", "->", "None", ":", "with", "self", ".", "__state_lock", ":", "if", "self", ".", "__state", "==", "DataChannelBuffer", ".", "State", ".", "started", ":", "self", ".", "__state", "=", "DataChannelBuffer", ".", "State", ".", "paused" ]
Pause recording. Thread safe and UI safe.
[ "Pause", "recording", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1226-L1232
train
nion-software/nionswift
nion/swift/model/HardwareSource.py
DataChannelBuffer.resume
def resume(self) -> None: """Resume recording after pause. Thread safe and UI safe.""" with self.__state_lock: if self.__state == DataChannelBuffer.State.paused: self.__state = DataChannelBuffer.State.started
python
def resume(self) -> None: """Resume recording after pause. Thread safe and UI safe.""" with self.__state_lock: if self.__state == DataChannelBuffer.State.paused: self.__state = DataChannelBuffer.State.started
[ "def", "resume", "(", "self", ")", "->", "None", ":", "with", "self", ".", "__state_lock", ":", "if", "self", ".", "__state", "==", "DataChannelBuffer", ".", "State", ".", "paused", ":", "self", ".", "__state", "=", "DataChannelBuffer", ".", "State", ".", "started" ]
Resume recording after pause. Thread safe and UI safe.
[ "Resume", "recording", "after", "pause", "." ]
d43693eaf057b8683b9638e575000f055fede452
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1234-L1240
train
nvictus/priority-queue-dictionary
pqdict/__init__.py
nlargest
def nlargest(n, mapping): """ Takes a mapping and returns the n keys associated with the largest values in descending order. If the mapping has fewer than n items, all its keys are returned. Equivalent to: ``next(zip(*heapq.nlargest(mapping.items(), key=lambda x: x[1])))`` Returns ------- list of up to n keys from the mapping """ try: it = mapping.iteritems() except AttributeError: it = iter(mapping.items()) pq = minpq() try: for i in range(n): pq.additem(*next(it)) except StopIteration: pass try: while it: pq.pushpopitem(*next(it)) except StopIteration: pass out = list(pq.popkeys()) out.reverse() return out
python
def nlargest(n, mapping): """ Takes a mapping and returns the n keys associated with the largest values in descending order. If the mapping has fewer than n items, all its keys are returned. Equivalent to: ``next(zip(*heapq.nlargest(mapping.items(), key=lambda x: x[1])))`` Returns ------- list of up to n keys from the mapping """ try: it = mapping.iteritems() except AttributeError: it = iter(mapping.items()) pq = minpq() try: for i in range(n): pq.additem(*next(it)) except StopIteration: pass try: while it: pq.pushpopitem(*next(it)) except StopIteration: pass out = list(pq.popkeys()) out.reverse() return out
[ "def", "nlargest", "(", "n", ",", "mapping", ")", ":", "try", ":", "it", "=", "mapping", ".", "iteritems", "(", ")", "except", "AttributeError", ":", "it", "=", "iter", "(", "mapping", ".", "items", "(", ")", ")", "pq", "=", "minpq", "(", ")", "try", ":", "for", "i", "in", "range", "(", "n", ")", ":", "pq", ".", "additem", "(", "*", "next", "(", "it", ")", ")", "except", "StopIteration", ":", "pass", "try", ":", "while", "it", ":", "pq", ".", "pushpopitem", "(", "*", "next", "(", "it", ")", ")", "except", "StopIteration", ":", "pass", "out", "=", "list", "(", "pq", ".", "popkeys", "(", ")", ")", "out", ".", "reverse", "(", ")", "return", "out" ]
Takes a mapping and returns the n keys associated with the largest values in descending order. If the mapping has fewer than n items, all its keys are returned. Equivalent to: ``next(zip(*heapq.nlargest(mapping.items(), key=lambda x: x[1])))`` Returns ------- list of up to n keys from the mapping
[ "Takes", "a", "mapping", "and", "returns", "the", "n", "keys", "associated", "with", "the", "largest", "values", "in", "descending", "order", ".", "If", "the", "mapping", "has", "fewer", "than", "n", "items", "all", "its", "keys", "are", "returned", "." ]
577f9d3086058bec0e49cc2050dd9454b788d93b
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L512-L543
train
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.fromkeys
def fromkeys(cls, iterable, value, **kwargs): """ Return a new pqict mapping keys from an iterable to the same value. """ return cls(((k, value) for k in iterable), **kwargs)
python
def fromkeys(cls, iterable, value, **kwargs): """ Return a new pqict mapping keys from an iterable to the same value. """ return cls(((k, value) for k in iterable), **kwargs)
[ "def", "fromkeys", "(", "cls", ",", "iterable", ",", "value", ",", "*", "*", "kwargs", ")", ":", "return", "cls", "(", "(", "(", "k", ",", "value", ")", "for", "k", "in", "iterable", ")", ",", "*", "*", "kwargs", ")" ]
Return a new pqict mapping keys from an iterable to the same value.
[ "Return", "a", "new", "pqict", "mapping", "keys", "from", "an", "iterable", "to", "the", "same", "value", "." ]
577f9d3086058bec0e49cc2050dd9454b788d93b
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L121-L126
train
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.copy
def copy(self): """ Return a shallow copy of a pqdict. """ return self.__class__(self, key=self._keyfn, precedes=self._precedes)
python
def copy(self): """ Return a shallow copy of a pqdict. """ return self.__class__(self, key=self._keyfn, precedes=self._precedes)
[ "def", "copy", "(", "self", ")", ":", "return", "self", ".", "__class__", "(", "self", ",", "key", "=", "self", ".", "_keyfn", ",", "precedes", "=", "self", ".", "_precedes", ")" ]
Return a shallow copy of a pqdict.
[ "Return", "a", "shallow", "copy", "of", "a", "pqdict", "." ]
577f9d3086058bec0e49cc2050dd9454b788d93b
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L201-L206
train
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.pop
def pop(self, key=__marker, default=__marker): """ If ``key`` is in the pqdict, remove it and return its priority value, else return ``default``. If ``default`` is not provided and ``key`` is not in the pqdict, raise a ``KeyError``. If ``key`` is not provided, remove the top item and return its key, or raise ``KeyError`` if the pqdict is empty. """ heap = self._heap position = self._position # pq semantics: remove and return top *key* (value is discarded) if key is self.__marker: if not heap: raise KeyError('pqdict is empty') key = heap[0].key del self[key] return key # dict semantics: remove and return *value* mapped from key try: pos = position.pop(key) # raises KeyError except KeyError: if default is self.__marker: raise return default else: node_to_delete = heap[pos] end = heap.pop() if end is not node_to_delete: heap[pos] = end position[end.key] = pos self._reheapify(pos) value = node_to_delete.value del node_to_delete return value
python
def pop(self, key=__marker, default=__marker): """ If ``key`` is in the pqdict, remove it and return its priority value, else return ``default``. If ``default`` is not provided and ``key`` is not in the pqdict, raise a ``KeyError``. If ``key`` is not provided, remove the top item and return its key, or raise ``KeyError`` if the pqdict is empty. """ heap = self._heap position = self._position # pq semantics: remove and return top *key* (value is discarded) if key is self.__marker: if not heap: raise KeyError('pqdict is empty') key = heap[0].key del self[key] return key # dict semantics: remove and return *value* mapped from key try: pos = position.pop(key) # raises KeyError except KeyError: if default is self.__marker: raise return default else: node_to_delete = heap[pos] end = heap.pop() if end is not node_to_delete: heap[pos] = end position[end.key] = pos self._reheapify(pos) value = node_to_delete.value del node_to_delete return value
[ "def", "pop", "(", "self", ",", "key", "=", "__marker", ",", "default", "=", "__marker", ")", ":", "heap", "=", "self", ".", "_heap", "position", "=", "self", ".", "_position", "# pq semantics: remove and return top *key* (value is discarded)", "if", "key", "is", "self", ".", "__marker", ":", "if", "not", "heap", ":", "raise", "KeyError", "(", "'pqdict is empty'", ")", "key", "=", "heap", "[", "0", "]", ".", "key", "del", "self", "[", "key", "]", "return", "key", "# dict semantics: remove and return *value* mapped from key", "try", ":", "pos", "=", "position", ".", "pop", "(", "key", ")", "# raises KeyError", "except", "KeyError", ":", "if", "default", "is", "self", ".", "__marker", ":", "raise", "return", "default", "else", ":", "node_to_delete", "=", "heap", "[", "pos", "]", "end", "=", "heap", ".", "pop", "(", ")", "if", "end", "is", "not", "node_to_delete", ":", "heap", "[", "pos", "]", "=", "end", "position", "[", "end", ".", "key", "]", "=", "pos", "self", ".", "_reheapify", "(", "pos", ")", "value", "=", "node_to_delete", ".", "value", "del", "node_to_delete", "return", "value" ]
If ``key`` is in the pqdict, remove it and return its priority value, else return ``default``. If ``default`` is not provided and ``key`` is not in the pqdict, raise a ``KeyError``. If ``key`` is not provided, remove the top item and return its key, or raise ``KeyError`` if the pqdict is empty.
[ "If", "key", "is", "in", "the", "pqdict", "remove", "it", "and", "return", "its", "priority", "value", "else", "return", "default", ".", "If", "default", "is", "not", "provided", "and", "key", "is", "not", "in", "the", "pqdict", "raise", "a", "KeyError", "." ]
577f9d3086058bec0e49cc2050dd9454b788d93b
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L208-L243
train
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.popitem
def popitem(self): """ Remove and return the item with highest priority. Raises ``KeyError`` if pqdict is empty. """ heap = self._heap position = self._position try: end = heap.pop(-1) except IndexError: raise KeyError('pqdict is empty') if heap: node = heap[0] heap[0] = end position[end.key] = 0 self._sink(0) else: node = end del position[node.key] return node.key, node.value
python
def popitem(self): """ Remove and return the item with highest priority. Raises ``KeyError`` if pqdict is empty. """ heap = self._heap position = self._position try: end = heap.pop(-1) except IndexError: raise KeyError('pqdict is empty') if heap: node = heap[0] heap[0] = end position[end.key] = 0 self._sink(0) else: node = end del position[node.key] return node.key, node.value
[ "def", "popitem", "(", "self", ")", ":", "heap", "=", "self", ".", "_heap", "position", "=", "self", ".", "_position", "try", ":", "end", "=", "heap", ".", "pop", "(", "-", "1", ")", "except", "IndexError", ":", "raise", "KeyError", "(", "'pqdict is empty'", ")", "if", "heap", ":", "node", "=", "heap", "[", "0", "]", "heap", "[", "0", "]", "=", "end", "position", "[", "end", ".", "key", "]", "=", "0", "self", ".", "_sink", "(", "0", ")", "else", ":", "node", "=", "end", "del", "position", "[", "node", ".", "key", "]", "return", "node", ".", "key", ",", "node", ".", "value" ]
Remove and return the item with highest priority. Raises ``KeyError`` if pqdict is empty.
[ "Remove", "and", "return", "the", "item", "with", "highest", "priority", ".", "Raises", "KeyError", "if", "pqdict", "is", "empty", "." ]
577f9d3086058bec0e49cc2050dd9454b788d93b
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L260-L282
train
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.topitem
def topitem(self): """ Return the item with highest priority. Raises ``KeyError`` if pqdict is empty. """ try: node = self._heap[0] except IndexError: raise KeyError('pqdict is empty') return node.key, node.value
python
def topitem(self): """ Return the item with highest priority. Raises ``KeyError`` if pqdict is empty. """ try: node = self._heap[0] except IndexError: raise KeyError('pqdict is empty') return node.key, node.value
[ "def", "topitem", "(", "self", ")", ":", "try", ":", "node", "=", "self", ".", "_heap", "[", "0", "]", "except", "IndexError", ":", "raise", "KeyError", "(", "'pqdict is empty'", ")", "return", "node", ".", "key", ",", "node", ".", "value" ]
Return the item with highest priority. Raises ``KeyError`` if pqdict is empty.
[ "Return", "the", "item", "with", "highest", "priority", ".", "Raises", "KeyError", "if", "pqdict", "is", "empty", "." ]
577f9d3086058bec0e49cc2050dd9454b788d93b
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L284-L294
train
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.additem
def additem(self, key, value): """ Add a new item. Raises ``KeyError`` if key is already in the pqdict. """ if key in self._position: raise KeyError('%s is already in the queue' % repr(key)) self[key] = value
python
def additem(self, key, value): """ Add a new item. Raises ``KeyError`` if key is already in the pqdict. """ if key in self._position: raise KeyError('%s is already in the queue' % repr(key)) self[key] = value
[ "def", "additem", "(", "self", ",", "key", ",", "value", ")", ":", "if", "key", "in", "self", ".", "_position", ":", "raise", "KeyError", "(", "'%s is already in the queue'", "%", "repr", "(", "key", ")", ")", "self", "[", "key", "]", "=", "value" ]
Add a new item. Raises ``KeyError`` if key is already in the pqdict.
[ "Add", "a", "new", "item", ".", "Raises", "KeyError", "if", "key", "is", "already", "in", "the", "pqdict", "." ]
577f9d3086058bec0e49cc2050dd9454b788d93b
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L296-L303
train