INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Return which inputs of this operation are variable (i.e. depend on the model inputs).
def _variable_inputs(self, op): """ Return which inputs of this operation are variable (i.e. depend on the model inputs). """ if op.name not in self._vinputs: self._vinputs[op.name] = np.array([t.op in self.between_ops or t in self.model_inputs for t in op.inputs]) return self._vinputs[op.name]
Get the SHAP value computation graph for a given model output.
def phi_symbolic(self, i): """ Get the SHAP value computation graph for a given model output. """ if self.phi_symbolics[i] is None: # replace the gradients for all the non-linear activations # we do this by hacking our way into the registry (TODO: find a public API for this if it exists) reg = tf_ops._gradient_registry._registry for n in op_handlers: if n in reg: self.orig_grads[n] = reg[n]["type"] if op_handlers[n] is not passthrough: reg[n]["type"] = self.custom_grad elif n in self.used_types: raise Exception(n + " was used in the model but is not in the gradient registry!") # In TensorFlow 1.10 they started pruning out nodes that they think can't be backpropped # unfortunately that includes the index of embedding layers so we disable that check here if hasattr(tf_gradients_impl, "_IsBackpropagatable"): orig_IsBackpropagatable = tf_gradients_impl._IsBackpropagatable tf_gradients_impl._IsBackpropagatable = lambda tensor: True # define the computation graph for the attribution values using custom a gradient-like computation try: out = self.model_output[:,i] if self.multi_output else self.model_output self.phi_symbolics[i] = tf.gradients(out, self.model_inputs) finally: # reinstate the backpropagatable check if hasattr(tf_gradients_impl, "_IsBackpropagatable"): tf_gradients_impl._IsBackpropagatable = orig_IsBackpropagatable # restore the original gradient definitions for n in op_handlers: if n in reg: reg[n]["type"] = self.orig_grads[n] return self.phi_symbolics[i]
Runs the model while also setting the learning phase flags to False.
def run(self, out, model_inputs, X): """ Runs the model while also setting the learning phase flags to False. """ feed_dict = dict(zip(model_inputs, X)) for t in self.learning_phase_flags: feed_dict[t] = False return self.session.run(out, feed_dict)
Passes a gradient op creation request to the correct handler.
def custom_grad(self, op, *grads): """ Passes a gradient op creation request to the correct handler. """ return op_handlers[op.type](self, op, *grads)
Use ssh to run the experiments on remote machines in parallel. Parameters ---------- experiments : iterable Output of shap.benchmark.experiments(...). thread_hosts : list of strings Each host has the format "host_name:path_to_python_binary" and can appear multiple times in the list (one for each parallel execution you want on that machine). rate_limit : int How many ssh connections we make per minute to each host (to avoid throttling issues).
def run_remote_experiments(experiments, thread_hosts, rate_limit=10): """ Use ssh to run the experiments on remote machines in parallel. Parameters ---------- experiments : iterable Output of shap.benchmark.experiments(...). thread_hosts : list of strings Each host has the format "host_name:path_to_python_binary" and can appear multiple times in the list (one for each parallel execution you want on that machine). rate_limit : int How many ssh connections we make per minute to each host (to avoid throttling issues). """ global ssh_conn_per_min_limit ssh_conn_per_min_limit = rate_limit # first we kill any remaining workers from previous runs # note we don't check_call because pkill kills our ssh call as well thread_hosts = copy.copy(thread_hosts) random.shuffle(thread_hosts) for host in set(thread_hosts): hostname,_ = host.split(":") try: subprocess.run(["ssh", hostname, "pkill -f shap.benchmark.run_experiment"], timeout=15) except subprocess.TimeoutExpired: print("Failed to connect to", hostname, "after 15 seconds! Exiting.") return experiments = copy.copy(list(experiments)) random.shuffle(experiments) # this way all the hard experiments don't get put on one machine global nexperiments, total_sent, total_done, total_failed, host_records nexperiments = len(experiments) total_sent = 0 total_done = 0 total_failed = 0 host_records = {} q = Queue() for host in thread_hosts: worker = Thread(target=__thread_worker, args=(q, host)) worker.setDaemon(True) worker.start() for experiment in experiments: q.put(experiment) q.join()
Create a SHAP monitoring plot. (Note this function is preliminary and subject to change!!) A SHAP monitoring plot is meant to display the behavior of a model over time. Often the shap_values given to this plot explain the loss of a model, so changes in a feature's impact on the model's loss over time can help in monitoring the model's performance. Parameters ---------- ind : int Index of the feature to plot. shap_values : numpy.array Matrix of SHAP values (# samples x # features) features : numpy.array or pandas.DataFrame Matrix of feature values (# samples x # features) feature_names : list Names of the features (length # features)
def monitoring_plot(ind, shap_values, features, feature_names=None): """ Create a SHAP monitoring plot. (Note this function is preliminary and subject to change!!) A SHAP monitoring plot is meant to display the behavior of a model over time. Often the shap_values given to this plot explain the loss of a model, so changes in a feature's impact on the model's loss over time can help in monitoring the model's performance. Parameters ---------- ind : int Index of the feature to plot. shap_values : numpy.array Matrix of SHAP values (# samples x # features) features : numpy.array or pandas.DataFrame Matrix of feature values (# samples x # features) feature_names : list Names of the features (length # features) """ if str(type(features)).endswith("'pandas.core.frame.DataFrame'>"): if feature_names is None: feature_names = features.columns features = features.values pl.figure(figsize=(10,3)) ys = shap_values[:,ind] xs = np.arange(len(ys))#np.linspace(0, 12*2, len(ys)) pvals = [] inc = 50 for i in range(inc, len(ys)-inc, inc): #stat, pval = scipy.stats.mannwhitneyu(v[:i], v[i:], alternative="two-sided") stat, pval = scipy.stats.ttest_ind(ys[:i], ys[i:]) pvals.append(pval) min_pval = np.min(pvals) min_pval_ind = np.argmin(pvals)*inc + inc if min_pval < 0.05 / shap_values.shape[1]: pl.axvline(min_pval_ind, linestyle="dashed", color="#666666", alpha=0.2) pl.scatter(xs, ys, s=10, c=features[:,ind], cmap=colors.red_blue) pl.xlabel("Sample index") pl.ylabel(truncate_text(feature_names[ind], 30) + "\nSHAP value", size=13) pl.gca().xaxis.set_ticks_position('bottom') pl.gca().yaxis.set_ticks_position('left') pl.gca().spines['right'].set_visible(False) pl.gca().spines['top'].set_visible(False) cb = pl.colorbar() cb.outline.set_visible(False) bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted()) cb.ax.set_aspect((bbox.height - 0.7) * 20) cb.set_label(truncate_text(feature_names[ind], 30), size=13) pl.show()
Summarize a dataset with k mean samples weighted by the number of data points they each represent. Parameters ---------- X : numpy.array or pandas.DataFrame Matrix of data samples to summarize (# samples x # features) k : int Number of means to use for approximation. round_values : bool For all i, round the ith dimension of each mean sample to match the nearest value from X[:,i]. This ensures discrete features always get a valid value. Returns ------- DenseData object.
def kmeans(X, k, round_values=True): """ Summarize a dataset with k mean samples weighted by the number of data points they each represent. Parameters ---------- X : numpy.array or pandas.DataFrame Matrix of data samples to summarize (# samples x # features) k : int Number of means to use for approximation. round_values : bool For all i, round the ith dimension of each mean sample to match the nearest value from X[:,i]. This ensures discrete features always get a valid value. Returns ------- DenseData object. """ group_names = [str(i) for i in range(X.shape[1])] if str(type(X)).endswith("'pandas.core.frame.DataFrame'>"): group_names = X.columns X = X.values kmeans = KMeans(n_clusters=k, random_state=0).fit(X) if round_values: for i in range(k): for j in range(X.shape[1]): ind = np.argmin(np.abs(X[:,j] - kmeans.cluster_centers_[i,j])) kmeans.cluster_centers_[i,j] = X[ind,j] return DenseData(kmeans.cluster_centers_, group_names, None, 1.0*np.bincount(kmeans.labels_))
Estimate the SHAP values for a set of samples. Parameters ---------- X : numpy.array or pandas.DataFrame or any scipy.sparse matrix A matrix of samples (# samples x # features) on which to explain the model's output. nsamples : "auto" or int Number of times to re-evaluate the model when explaining each prediction. More samples lead to lower variance estimates of the SHAP values. The "auto" setting uses `nsamples = 2 * X.shape[1] + 2048`. l1_reg : "num_features(int)", "auto" (default for now, but deprecated), "aic", "bic", or float The l1 regularization to use for feature selection (the estimation procedure is based on a debiased lasso). The auto option currently uses "aic" when less that 20% of the possible sample space is enumerated, otherwise it uses no regularization. THE BEHAVIOR OF "auto" WILL CHANGE in a future version to be based on num_features instead of AIC. The "aic" and "bic" options use the AIC and BIC rules for regularization. Using "num_features(int)" selects a fix number of top features. Passing a float directly sets the "alpha" parameter of the sklearn.linear_model.Lasso model used for feature selection. Returns ------- For models with a single output this returns a matrix of SHAP values (# samples x # features). Each row sums to the difference between the model output for that sample and the expected value of the model output (which is stored as expected_value attribute of the explainer). For models with vector outputs this returns a list of such matrices, one for each output.
def shap_values(self, X, **kwargs): """ Estimate the SHAP values for a set of samples. Parameters ---------- X : numpy.array or pandas.DataFrame or any scipy.sparse matrix A matrix of samples (# samples x # features) on which to explain the model's output. nsamples : "auto" or int Number of times to re-evaluate the model when explaining each prediction. More samples lead to lower variance estimates of the SHAP values. The "auto" setting uses `nsamples = 2 * X.shape[1] + 2048`. l1_reg : "num_features(int)", "auto" (default for now, but deprecated), "aic", "bic", or float The l1 regularization to use for feature selection (the estimation procedure is based on a debiased lasso). The auto option currently uses "aic" when less that 20% of the possible sample space is enumerated, otherwise it uses no regularization. THE BEHAVIOR OF "auto" WILL CHANGE in a future version to be based on num_features instead of AIC. The "aic" and "bic" options use the AIC and BIC rules for regularization. Using "num_features(int)" selects a fix number of top features. Passing a float directly sets the "alpha" parameter of the sklearn.linear_model.Lasso model used for feature selection. Returns ------- For models with a single output this returns a matrix of SHAP values (# samples x # features). Each row sums to the difference between the model output for that sample and the expected value of the model output (which is stored as expected_value attribute of the explainer). For models with vector outputs this returns a list of such matrices, one for each output. """ # convert dataframes if str(type(X)).endswith("pandas.core.series.Series'>"): X = X.values elif str(type(X)).endswith("'pandas.core.frame.DataFrame'>"): if self.keep_index: index_value = X.index.values index_name = X.index.name column_name = list(X.columns) X = X.values x_type = str(type(X)) arr_type = "'numpy.ndarray'>" # if sparse, convert to lil for performance if sp.sparse.issparse(X) and not sp.sparse.isspmatrix_lil(X): X = X.tolil() assert x_type.endswith(arr_type) or sp.sparse.isspmatrix_lil(X), "Unknown instance type: " + x_type assert len(X.shape) == 1 or len(X.shape) == 2, "Instance must have 1 or 2 dimensions!" # single instance if len(X.shape) == 1: data = X.reshape((1, X.shape[0])) if self.keep_index: data = convert_to_instance_with_index(data, column_name, index_name, index_value) explanation = self.explain(data, **kwargs) # vector-output s = explanation.shape if len(s) == 2: outs = [np.zeros(s[0]) for j in range(s[1])] for j in range(s[1]): outs[j] = explanation[:, j] return outs # single-output else: out = np.zeros(s[0]) out[:] = explanation return out # explain the whole dataset elif len(X.shape) == 2: explanations = [] for i in tqdm(range(X.shape[0]), disable=kwargs.get("silent", False)): data = X[i:i + 1, :] if self.keep_index: data = convert_to_instance_with_index(data, column_name, index_value[i:i + 1], index_name) explanations.append(self.explain(data, **kwargs)) # vector-output s = explanations[0].shape if len(s) == 2: outs = [np.zeros((X.shape[0], s[0])) for j in range(s[1])] for i in range(X.shape[0]): for j in range(s[1]): outs[j][i] = explanations[i][:, j] return outs # single-output else: out = np.zeros((X.shape[0], s[0])) for i in range(X.shape[0]): out[i] = explanations[i] return out
Use the SHAP values as an embedding which we project to 2D for visualization. Parameters ---------- ind : int or string If this is an int it is the index of the feature to use to color the embedding. If this is a string it is either the name of the feature, or it can have the form "rank(int)" to specify the feature with that rank (ordered by mean absolute SHAP value over all the samples), or "sum()" to mean the sum of all the SHAP values, which is the model's output (minus it's expected value). shap_values : numpy.array Matrix of SHAP values (# samples x # features). feature_names : None or list The names of the features in the shap_values array. method : "pca" or numpy.array How to reduce the dimensions of the shap_values to 2D. If "pca" then the 2D PCA projection of shap_values is used. If a numpy array then is should be (# samples x 2) and represent the embedding of that values. alpha : float The transparency of the data points (between 0 and 1). This can be useful to the show density of the data points when using a large dataset.
def embedding_plot(ind, shap_values, feature_names=None, method="pca", alpha=1.0, show=True): """ Use the SHAP values as an embedding which we project to 2D for visualization. Parameters ---------- ind : int or string If this is an int it is the index of the feature to use to color the embedding. If this is a string it is either the name of the feature, or it can have the form "rank(int)" to specify the feature with that rank (ordered by mean absolute SHAP value over all the samples), or "sum()" to mean the sum of all the SHAP values, which is the model's output (minus it's expected value). shap_values : numpy.array Matrix of SHAP values (# samples x # features). feature_names : None or list The names of the features in the shap_values array. method : "pca" or numpy.array How to reduce the dimensions of the shap_values to 2D. If "pca" then the 2D PCA projection of shap_values is used. If a numpy array then is should be (# samples x 2) and represent the embedding of that values. alpha : float The transparency of the data points (between 0 and 1). This can be useful to the show density of the data points when using a large dataset. """ if feature_names is None: feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])] ind = convert_name(ind, shap_values, feature_names) if ind == "sum()": cvals = shap_values.sum(1) fname = "sum(SHAP values)" else: cvals = shap_values[:,ind] fname = feature_names[ind] # see if we need to compute the embedding if type(method) == str and method == "pca": pca = sklearn.decomposition.PCA(2) embedding_values = pca.fit_transform(shap_values) elif hasattr(method, "shape") and method.shape[1] == 2: embedding_values = method else: print("Unsupported embedding method:", method) pl.scatter( embedding_values[:,0], embedding_values[:,1], c=cvals, cmap=colors.red_blue, alpha=alpha, linewidth=0 ) pl.axis("off") #pl.title(feature_names[ind]) cb = pl.colorbar() cb.set_label("SHAP value for\n"+fname, size=13) cb.outline.set_visible(False) pl.gcf().set_size_inches(7.5, 5) bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted()) cb.ax.set_aspect((bbox.height - 0.7) * 10) cb.set_alpha(1) if show: pl.show()
Create a SHAP dependence plot, colored by an interaction feature. Plots the value of the feature on the x-axis and the SHAP value of the same feature on the y-axis. This shows how the model depends on the given feature, and is like a richer extenstion of the classical parital dependence plots. Vertical dispersion of the data points represents interaction effects. Grey ticks along the y-axis are data points where the feature's value was NaN. Parameters ---------- ind : int or string If this is an int it is the index of the feature to plot. If this is a string it is either the name of the feature to plot, or it can have the form "rank(int)" to specify the feature with that rank (ordered by mean absolute SHAP value over all the samples). shap_values : numpy.array Matrix of SHAP values (# samples x # features). features : numpy.array or pandas.DataFrame Matrix of feature values (# samples x # features). feature_names : list Names of the features (length # features). display_features : numpy.array or pandas.DataFrame Matrix of feature values for visual display (such as strings instead of coded values). interaction_index : "auto", None, int, or string The index of the feature used to color the plot. The name of a feature can also be passed as a string. If "auto" then shap.common.approximate_interactions is used to pick what seems to be the strongest interaction (note that to find to true stongest interaction you need to compute the SHAP interaction values). x_jitter : float (0 - 1) Adds random jitter to feature values. May increase plot readability when feature is discrete. alpha : float The transparency of the data points (between 0 and 1). This can be useful to the show density of the data points when using a large dataset. xmin : float or string Represents the lower bound of the plot's x-axis. It can be a string of the format "percentile(float)" to denote that percentile of the feature's value used on the x-axis. xmax : float or string Represents the upper bound of the plot's x-axis. It can be a string of the format "percentile(float)" to denote that percentile of the feature's value used on the x-axis.
def dependence_plot(ind, shap_values, features, feature_names=None, display_features=None, interaction_index="auto", color="#1E88E5", axis_color="#333333", cmap=colors.red_blue, dot_size=16, x_jitter=0, alpha=1, title=None, xmin=None, xmax=None, show=True): """ Create a SHAP dependence plot, colored by an interaction feature. Plots the value of the feature on the x-axis and the SHAP value of the same feature on the y-axis. This shows how the model depends on the given feature, and is like a richer extenstion of the classical parital dependence plots. Vertical dispersion of the data points represents interaction effects. Grey ticks along the y-axis are data points where the feature's value was NaN. Parameters ---------- ind : int or string If this is an int it is the index of the feature to plot. If this is a string it is either the name of the feature to plot, or it can have the form "rank(int)" to specify the feature with that rank (ordered by mean absolute SHAP value over all the samples). shap_values : numpy.array Matrix of SHAP values (# samples x # features). features : numpy.array or pandas.DataFrame Matrix of feature values (# samples x # features). feature_names : list Names of the features (length # features). display_features : numpy.array or pandas.DataFrame Matrix of feature values for visual display (such as strings instead of coded values). interaction_index : "auto", None, int, or string The index of the feature used to color the plot. The name of a feature can also be passed as a string. If "auto" then shap.common.approximate_interactions is used to pick what seems to be the strongest interaction (note that to find to true stongest interaction you need to compute the SHAP interaction values). x_jitter : float (0 - 1) Adds random jitter to feature values. May increase plot readability when feature is discrete. alpha : float The transparency of the data points (between 0 and 1). This can be useful to the show density of the data points when using a large dataset. xmin : float or string Represents the lower bound of the plot's x-axis. It can be a string of the format "percentile(float)" to denote that percentile of the feature's value used on the x-axis. xmax : float or string Represents the upper bound of the plot's x-axis. It can be a string of the format "percentile(float)" to denote that percentile of the feature's value used on the x-axis. """ # convert from DataFrames if we got any if str(type(features)).endswith("'pandas.core.frame.DataFrame'>"): if feature_names is None: feature_names = features.columns features = features.values if str(type(display_features)).endswith("'pandas.core.frame.DataFrame'>"): if feature_names is None: feature_names = display_features.columns display_features = display_features.values elif display_features is None: display_features = features if feature_names is None: feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])] # allow vectors to be passed if len(shap_values.shape) == 1: shap_values = np.reshape(shap_values, len(shap_values), 1) if len(features.shape) == 1: features = np.reshape(features, len(features), 1) ind = convert_name(ind, shap_values, feature_names) # plotting SHAP interaction values if len(shap_values.shape) == 3 and len(ind) == 2: ind1 = convert_name(ind[0], shap_values, feature_names) ind2 = convert_name(ind[1], shap_values, feature_names) if ind1 == ind2: proj_shap_values = shap_values[:, ind2, :] else: proj_shap_values = shap_values[:, ind2, :] * 2 # off-diag values are split in half # TODO: remove recursion; generally the functions should be shorter for more maintainable code dependence_plot( ind1, proj_shap_values, features, feature_names=feature_names, interaction_index=ind2, display_features=display_features, show=False, xmin=xmin, xmax=xmax ) if ind1 == ind2: pl.ylabel(labels['MAIN_EFFECT'] % feature_names[ind1]) else: pl.ylabel(labels['INTERACTION_EFFECT'] % (feature_names[ind1], feature_names[ind2])) if show: pl.show() return assert shap_values.shape[0] == features.shape[0], \ "'shap_values' and 'features' values must have the same number of rows!" assert shap_values.shape[1] == features.shape[1], \ "'shap_values' must have the same number of columns as 'features'!" # get both the raw and display feature values oinds = np.arange(shap_values.shape[0]) # we randomize the ordering so plotting overlaps are not related to data ordering np.random.shuffle(oinds) xv = features[oinds, ind].astype(np.float64) xd = display_features[oinds, ind] s = shap_values[oinds, ind] if type(xd[0]) == str: name_map = {} for i in range(len(xv)): name_map[xd[i]] = xv[i] xnames = list(name_map.keys()) # allow a single feature name to be passed alone if type(feature_names) == str: feature_names = [feature_names] name = feature_names[ind] # guess what other feature as the stongest interaction with the plotted feature if interaction_index == "auto": interaction_index = approximate_interactions(ind, shap_values, features)[0] interaction_index = convert_name(interaction_index, shap_values, feature_names) categorical_interaction = False # get both the raw and display color values color_norm = None if interaction_index is not None: cv = features[:, interaction_index] cd = display_features[:, interaction_index] clow = np.nanpercentile(cv.astype(np.float), 5) chigh = np.nanpercentile(cv.astype(np.float), 95) if type(cd[0]) == str: cname_map = {} for i in range(len(cv)): cname_map[cd[i]] = cv[i] cnames = list(cname_map.keys()) categorical_interaction = True elif clow % 1 == 0 and chigh % 1 == 0 and chigh - clow < 10: categorical_interaction = True # discritize colors for categorical features if categorical_interaction and clow != chigh: clow = np.nanmin(cv.astype(np.float)) chigh = np.nanmax(cv.astype(np.float)) bounds = np.linspace(clow, chigh, int(chigh - clow + 2)) color_norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N-1) # optionally add jitter to feature values if x_jitter > 0: if x_jitter > 1: x_jitter = 1 xvals = xv.copy() if isinstance(xvals[0], float): xvals = xvals.astype(np.float) xvals = xvals[~np.isnan(xvals)] xvals = np.unique(xvals) if len(xvals) >= 2: smallest_diff = np.min(np.diff(np.sort(xvals))) jitter_amount = x_jitter * smallest_diff xv += (np.random.ranf(size = len(xv))*jitter_amount) - (jitter_amount/2) # the actual scatter plot, TODO: adapt the dot_size to the number of data points? xv_nan = np.isnan(xv) xv_notnan = np.invert(xv_nan) if interaction_index is not None: # plot the nan values in the interaction feature as grey cvals = features[oinds, interaction_index].astype(np.float64) cvals_imp = cvals.copy() cvals_imp[np.isnan(cvals)] = (clow + chigh) / 2.0 cvals[cvals_imp > chigh] = chigh cvals[cvals_imp < clow] = clow p = pl.scatter( xv[xv_notnan], s[xv_notnan], s=dot_size, linewidth=0, c=cvals[xv_notnan], cmap=cmap, alpha=alpha, vmin=clow, vmax=chigh, norm=color_norm, rasterized=len(xv) > 500 ) p.set_array(cvals[xv_notnan]) else: pl.scatter(xv, s, s=dot_size, linewidth=0, color=color, alpha=alpha, rasterized=len(xv) > 500) if interaction_index != ind and interaction_index is not None: # draw the color bar if type(cd[0]) == str: tick_positions = [cname_map[n] for n in cnames] if len(tick_positions) == 2: tick_positions[0] -= 0.25 tick_positions[1] += 0.25 cb = pl.colorbar(ticks=tick_positions) cb.set_ticklabels(cnames) else: cb = pl.colorbar() cb.set_label(feature_names[interaction_index], size=13) cb.ax.tick_params(labelsize=11) if categorical_interaction: cb.ax.tick_params(length=0) cb.set_alpha(1) cb.outline.set_visible(False) bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted()) cb.ax.set_aspect((bbox.height - 0.7) * 20) # handles any setting of xmax and xmin # note that we handle None,float, or "percentile(float)" formats if xmin is not None or xmax is not None: if type(xmin) == str and xmin.startswith("percentile"): xmin = np.nanpercentile(xv, float(xmin[11:-1])) if type(xmax) == str and xmax.startswith("percentile"): xmax = np.nanpercentile(xv, float(xmax[11:-1])) if xmin is None or xmin == np.nanmin(xv): xmin = np.nanmin(xv) - (xmax - np.nanmin(xv))/20 if xmax is None or xmax == np.nanmax(xv): xmax = np.nanmax(xv) + (np.nanmax(xv) - xmin)/20 pl.xlim(xmin, xmax) # plot any nan feature values as tick marks along the y-axis xlim = pl.xlim() if interaction_index is not None: p = pl.scatter( xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1, linewidth=2, c=cvals_imp[xv_nan], cmap=cmap, alpha=alpha, vmin=clow, vmax=chigh ) p.set_array(cvals[xv_nan]) else: pl.scatter( xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1, linewidth=2, color=color, alpha=alpha ) pl.xlim(*xlim) # make the plot more readable if interaction_index != ind: pl.gcf().set_size_inches(7.5, 5) else: pl.gcf().set_size_inches(6, 5) pl.xlabel(name, color=axis_color, fontsize=13) pl.ylabel(labels['VALUE_FOR'] % name, color=axis_color, fontsize=13) if title is not None: pl.title(title, color=axis_color, fontsize=13) pl.gca().xaxis.set_ticks_position('bottom') pl.gca().yaxis.set_ticks_position('left') pl.gca().spines['right'].set_visible(False) pl.gca().spines['top'].set_visible(False) pl.gca().tick_params(color=axis_color, labelcolor=axis_color, labelsize=11) for spine in pl.gca().spines.values(): spine.set_edgecolor(axis_color) if type(xd[0]) == str: pl.xticks([name_map[n] for n in xnames], xnames, rotation='vertical', fontsize=11) if show: with warnings.catch_warnings(): # ignore expected matplotlib warnings warnings.simplefilter("ignore", RuntimeWarning) pl.show()
Runtime transform = "negate" sort_order = 1
def runtime(X, y, model_generator, method_name): """ Runtime transform = "negate" sort_order = 1 """ old_seed = np.random.seed() np.random.seed(3293) # average the method scores over several train/test splits method_reps = [] for i in range(1): X_train, X_test, y_train, _ = train_test_split(__toarray(X), y, test_size=100, random_state=i) # define the model we are going to explain model = model_generator() model.fit(X_train, y_train) # evaluate each method start = time.time() explainer = getattr(methods, method_name)(model, X_train) build_time = time.time() - start start = time.time() explainer(X_test) explain_time = time.time() - start # we always normalize the explain time as though we were explaining 1000 samples # even if to reduce the runtime of the benchmark we do less (like just 100) method_reps.append(build_time + explain_time * 1000.0 / X_test.shape[0]) np.random.seed(old_seed) return None, np.mean(method_reps)
Local Accuracy transform = "identity" sort_order = 2
def local_accuracy(X, y, model_generator, method_name): """ Local Accuracy transform = "identity" sort_order = 2 """ def score_map(true, pred): """ Converts local accuracy from % of standard deviation to numerical scores for coloring. """ v = min(1.0, np.std(pred - true) / (np.std(true) + 1e-8)) if v < 1e-6: return 1.0 elif v < 0.01: return 0.9 elif v < 0.05: return 0.75 elif v < 0.1: return 0.6 elif v < 0.2: return 0.4 elif v < 0.3: return 0.3 elif v < 0.5: return 0.2 elif v < 0.7: return 0.1 else: return 0.0 def score_function(X_train, X_test, y_train, y_test, attr_function, trained_model, random_state): return measures.local_accuracy( X_train, y_train, X_test, y_test, attr_function(X_test), model_generator, score_map, trained_model ) return None, __score_method(X, y, None, model_generator, score_function, method_name)
Keep Negative (mask) xlabel = "Max fraction of features kept" ylabel = "Negative mean model output" transform = "negate" sort_order = 5
def keep_negative_mask(X, y, model_generator, method_name, num_fcounts=11): """ Keep Negative (mask) xlabel = "Max fraction of features kept" ylabel = "Negative mean model output" transform = "negate" sort_order = 5 """ return __run_measure(measures.keep_mask, X, y, model_generator, method_name, -1, num_fcounts, __mean_pred)
Keep Absolute (mask) xlabel = "Max fraction of features kept" ylabel = "R^2" transform = "identity" sort_order = 6
def keep_absolute_mask__r2(X, y, model_generator, method_name, num_fcounts=11): """ Keep Absolute (mask) xlabel = "Max fraction of features kept" ylabel = "R^2" transform = "identity" sort_order = 6 """ return __run_measure(measures.keep_mask, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
Remove Positive (mask) xlabel = "Max fraction of features removed" ylabel = "Negative mean model output" transform = "negate" sort_order = 7
def remove_positive_mask(X, y, model_generator, method_name, num_fcounts=11): """ Remove Positive (mask) xlabel = "Max fraction of features removed" ylabel = "Negative mean model output" transform = "negate" sort_order = 7 """ return __run_measure(measures.remove_mask, X, y, model_generator, method_name, 1, num_fcounts, __mean_pred)
Remove Absolute (mask) xlabel = "Max fraction of features removed" ylabel = "1 - R^2" transform = "one_minus" sort_order = 9
def remove_absolute_mask__r2(X, y, model_generator, method_name, num_fcounts=11): """ Remove Absolute (mask) xlabel = "Max fraction of features removed" ylabel = "1 - R^2" transform = "one_minus" sort_order = 9 """ return __run_measure(measures.remove_mask, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
Keep Negative (resample) xlabel = "Max fraction of features kept" ylabel = "Negative mean model output" transform = "negate" sort_order = 11
def keep_negative_resample(X, y, model_generator, method_name, num_fcounts=11): """ Keep Negative (resample) xlabel = "Max fraction of features kept" ylabel = "Negative mean model output" transform = "negate" sort_order = 11 """ return __run_measure(measures.keep_resample, X, y, model_generator, method_name, -1, num_fcounts, __mean_pred)
Keep Absolute (resample) xlabel = "Max fraction of features kept" ylabel = "R^2" transform = "identity" sort_order = 12
def keep_absolute_resample__r2(X, y, model_generator, method_name, num_fcounts=11): """ Keep Absolute (resample) xlabel = "Max fraction of features kept" ylabel = "R^2" transform = "identity" sort_order = 12 """ return __run_measure(measures.keep_resample, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
Keep Absolute (resample) xlabel = "Max fraction of features kept" ylabel = "ROC AUC" transform = "identity" sort_order = 12
def keep_absolute_resample__roc_auc(X, y, model_generator, method_name, num_fcounts=11): """ Keep Absolute (resample) xlabel = "Max fraction of features kept" ylabel = "ROC AUC" transform = "identity" sort_order = 12 """ return __run_measure(measures.keep_resample, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.roc_auc_score)
Remove Positive (resample) xlabel = "Max fraction of features removed" ylabel = "Negative mean model output" transform = "negate" sort_order = 13
def remove_positive_resample(X, y, model_generator, method_name, num_fcounts=11): """ Remove Positive (resample) xlabel = "Max fraction of features removed" ylabel = "Negative mean model output" transform = "negate" sort_order = 13 """ return __run_measure(measures.remove_resample, X, y, model_generator, method_name, 1, num_fcounts, __mean_pred)
Remove Absolute (resample) xlabel = "Max fraction of features removed" ylabel = "1 - R^2" transform = "one_minus" sort_order = 15
def remove_absolute_resample__r2(X, y, model_generator, method_name, num_fcounts=11): """ Remove Absolute (resample) xlabel = "Max fraction of features removed" ylabel = "1 - R^2" transform = "one_minus" sort_order = 15 """ return __run_measure(measures.remove_resample, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
Remove Absolute (resample) xlabel = "Max fraction of features removed" ylabel = "1 - ROC AUC" transform = "one_minus" sort_order = 15
def remove_absolute_resample__roc_auc(X, y, model_generator, method_name, num_fcounts=11): """ Remove Absolute (resample) xlabel = "Max fraction of features removed" ylabel = "1 - ROC AUC" transform = "one_minus" sort_order = 15 """ return __run_measure(measures.remove_resample, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.roc_auc_score)
Keep Negative (impute) xlabel = "Max fraction of features kept" ylabel = "Negative mean model output" transform = "negate" sort_order = 17
def keep_negative_impute(X, y, model_generator, method_name, num_fcounts=11): """ Keep Negative (impute) xlabel = "Max fraction of features kept" ylabel = "Negative mean model output" transform = "negate" sort_order = 17 """ return __run_measure(measures.keep_impute, X, y, model_generator, method_name, -1, num_fcounts, __mean_pred)
Keep Absolute (impute) xlabel = "Max fraction of features kept" ylabel = "R^2" transform = "identity" sort_order = 18
def keep_absolute_impute__r2(X, y, model_generator, method_name, num_fcounts=11): """ Keep Absolute (impute) xlabel = "Max fraction of features kept" ylabel = "R^2" transform = "identity" sort_order = 18 """ return __run_measure(measures.keep_impute, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
Keep Absolute (impute) xlabel = "Max fraction of features kept" ylabel = "ROC AUC" transform = "identity" sort_order = 19
def keep_absolute_impute__roc_auc(X, y, model_generator, method_name, num_fcounts=11): """ Keep Absolute (impute) xlabel = "Max fraction of features kept" ylabel = "ROC AUC" transform = "identity" sort_order = 19 """ return __run_measure(measures.keep_mask, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.roc_auc_score)
Remove Positive (impute) xlabel = "Max fraction of features removed" ylabel = "Negative mean model output" transform = "negate" sort_order = 7
def remove_positive_impute(X, y, model_generator, method_name, num_fcounts=11): """ Remove Positive (impute) xlabel = "Max fraction of features removed" ylabel = "Negative mean model output" transform = "negate" sort_order = 7 """ return __run_measure(measures.remove_impute, X, y, model_generator, method_name, 1, num_fcounts, __mean_pred)
Remove Absolute (impute) xlabel = "Max fraction of features removed" ylabel = "1 - R^2" transform = "one_minus" sort_order = 9
def remove_absolute_impute__r2(X, y, model_generator, method_name, num_fcounts=11): """ Remove Absolute (impute) xlabel = "Max fraction of features removed" ylabel = "1 - R^2" transform = "one_minus" sort_order = 9 """ return __run_measure(measures.remove_impute, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
Remove Absolute (impute) xlabel = "Max fraction of features removed" ylabel = "1 - ROC AUC" transform = "one_minus" sort_order = 9
def remove_absolute_impute__roc_auc(X, y, model_generator, method_name, num_fcounts=11): """ Remove Absolute (impute) xlabel = "Max fraction of features removed" ylabel = "1 - ROC AUC" transform = "one_minus" sort_order = 9 """ return __run_measure(measures.remove_mask, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.roc_auc_score)
Keep Negative (retrain) xlabel = "Max fraction of features kept" ylabel = "Negative mean model output" transform = "negate" sort_order = 7
def keep_negative_retrain(X, y, model_generator, method_name, num_fcounts=11): """ Keep Negative (retrain) xlabel = "Max fraction of features kept" ylabel = "Negative mean model output" transform = "negate" sort_order = 7 """ return __run_measure(measures.keep_retrain, X, y, model_generator, method_name, -1, num_fcounts, __mean_pred)
Remove Positive (retrain) xlabel = "Max fraction of features removed" ylabel = "Negative mean model output" transform = "negate" sort_order = 11
def remove_positive_retrain(X, y, model_generator, method_name, num_fcounts=11): """ Remove Positive (retrain) xlabel = "Max fraction of features removed" ylabel = "Negative mean model output" transform = "negate" sort_order = 11 """ return __run_measure(measures.remove_retrain, X, y, model_generator, method_name, 1, num_fcounts, __mean_pred)
Batch Remove Absolute (retrain) xlabel = "Fraction of features removed" ylabel = "1 - R^2" transform = "one_minus" sort_order = 13
def batch_remove_absolute_retrain__r2(X, y, model_generator, method_name, num_fcounts=11): """ Batch Remove Absolute (retrain) xlabel = "Fraction of features removed" ylabel = "1 - R^2" transform = "one_minus" sort_order = 13 """ return __run_batch_abs_metric(measures.batch_remove_retrain, X, y, model_generator, method_name, sklearn.metrics.r2_score, num_fcounts)
Batch Keep Absolute (retrain) xlabel = "Fraction of features kept" ylabel = "R^2" transform = "identity" sort_order = 13
def batch_keep_absolute_retrain__r2(X, y, model_generator, method_name, num_fcounts=11): """ Batch Keep Absolute (retrain) xlabel = "Fraction of features kept" ylabel = "R^2" transform = "identity" sort_order = 13 """ return __run_batch_abs_metric(measures.batch_keep_retrain, X, y, model_generator, method_name, sklearn.metrics.r2_score, num_fcounts)
Batch Remove Absolute (retrain) xlabel = "Fraction of features removed" ylabel = "1 - ROC AUC" transform = "one_minus" sort_order = 13
def batch_remove_absolute_retrain__roc_auc(X, y, model_generator, method_name, num_fcounts=11): """ Batch Remove Absolute (retrain) xlabel = "Fraction of features removed" ylabel = "1 - ROC AUC" transform = "one_minus" sort_order = 13 """ return __run_batch_abs_metric(measures.batch_remove_retrain, X, y, model_generator, method_name, sklearn.metrics.roc_auc_score, num_fcounts)
Batch Keep Absolute (retrain) xlabel = "Fraction of features kept" ylabel = "ROC AUC" transform = "identity" sort_order = 13
def batch_keep_absolute_retrain__roc_auc(X, y, model_generator, method_name, num_fcounts=11): """ Batch Keep Absolute (retrain) xlabel = "Fraction of features kept" ylabel = "ROC AUC" transform = "identity" sort_order = 13 """ return __run_batch_abs_metric(measures.batch_keep_retrain, X, y, model_generator, method_name, sklearn.metrics.roc_auc_score, num_fcounts)
Test an explanation method.
def __score_method(X, y, fcounts, model_generator, score_function, method_name, nreps=10, test_size=100, cache_dir="/tmp"): """ Test an explanation method. """ old_seed = np.random.seed() np.random.seed(3293) # average the method scores over several train/test splits method_reps = [] data_hash = hashlib.sha256(__toarray(X).flatten()).hexdigest() + hashlib.sha256(__toarray(y)).hexdigest() for i in range(nreps): X_train, X_test, y_train, y_test = train_test_split(__toarray(X), y, test_size=test_size, random_state=i) # define the model we are going to explain, caching so we onlu build it once model_id = "model_cache__v" + "__".join([__version__, data_hash, model_generator.__name__])+".pickle" cache_file = os.path.join(cache_dir, model_id + ".pickle") if os.path.isfile(cache_file): with open(cache_file, "rb") as f: model = pickle.load(f) else: model = model_generator() model.fit(X_train, y_train) with open(cache_file, "wb") as f: pickle.dump(model, f) attr_key = "_".join([model_generator.__name__, method_name, str(test_size), str(nreps), str(i), data_hash]) def score(attr_function): def cached_attr_function(X_inner): if attr_key not in _attribution_cache: _attribution_cache[attr_key] = attr_function(X_inner) return _attribution_cache[attr_key] #cached_attr_function = lambda X: __check_cache(attr_function, X) if fcounts is None: return score_function(X_train, X_test, y_train, y_test, cached_attr_function, model, i) else: scores = [] for f in fcounts: scores.append(score_function(f, X_train, X_test, y_train, y_test, cached_attr_function, model, i)) return np.array(scores) # evaluate the method (only building the attribution function if we need to) if attr_key not in _attribution_cache: method_reps.append(score(getattr(methods, method_name)(model, X_train))) else: method_reps.append(score(None)) np.random.seed(old_seed) return np.array(method_reps).mean(0)
AND (false/false) This tests how well a feature attribution method agrees with human intuition for an AND operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever and cough: +6 points transform = "identity" sort_order = 0
def human_and_00(X, y, model_generator, method_name): """ AND (false/false) This tests how well a feature attribution method agrees with human intuition for an AND operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever and cough: +6 points transform = "identity" sort_order = 0 """ return _human_and(X, model_generator, method_name, False, False)
AND (false/true) This tests how well a feature attribution method agrees with human intuition for an AND operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever and cough: +6 points transform = "identity" sort_order = 1
def human_and_01(X, y, model_generator, method_name): """ AND (false/true) This tests how well a feature attribution method agrees with human intuition for an AND operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever and cough: +6 points transform = "identity" sort_order = 1 """ return _human_and(X, model_generator, method_name, False, True)
AND (true/true) This tests how well a feature attribution method agrees with human intuition for an AND operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever and cough: +6 points transform = "identity" sort_order = 2
def human_and_11(X, y, model_generator, method_name): """ AND (true/true) This tests how well a feature attribution method agrees with human intuition for an AND operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever and cough: +6 points transform = "identity" sort_order = 2 """ return _human_and(X, model_generator, method_name, True, True)
OR (false/false) This tests how well a feature attribution method agrees with human intuition for an OR operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever or cough: +6 points transform = "identity" sort_order = 0
def human_or_00(X, y, model_generator, method_name): """ OR (false/false) This tests how well a feature attribution method agrees with human intuition for an OR operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever or cough: +6 points transform = "identity" sort_order = 0 """ return _human_or(X, model_generator, method_name, False, False)
OR (false/true) This tests how well a feature attribution method agrees with human intuition for an OR operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever or cough: +6 points transform = "identity" sort_order = 1
def human_or_01(X, y, model_generator, method_name): """ OR (false/true) This tests how well a feature attribution method agrees with human intuition for an OR operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever or cough: +6 points transform = "identity" sort_order = 1 """ return _human_or(X, model_generator, method_name, False, True)
OR (true/true) This tests how well a feature attribution method agrees with human intuition for an OR operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever or cough: +6 points transform = "identity" sort_order = 2
def human_or_11(X, y, model_generator, method_name): """ OR (true/true) This tests how well a feature attribution method agrees with human intuition for an OR operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever or cough: +6 points transform = "identity" sort_order = 2 """ return _human_or(X, model_generator, method_name, True, True)
XOR (false/false) This tests how well a feature attribution method agrees with human intuition for an eXclusive OR operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever or cough but not both: +6 points transform = "identity" sort_order = 3
def human_xor_00(X, y, model_generator, method_name): """ XOR (false/false) This tests how well a feature attribution method agrees with human intuition for an eXclusive OR operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever or cough but not both: +6 points transform = "identity" sort_order = 3 """ return _human_xor(X, model_generator, method_name, False, False)
XOR (false/true) This tests how well a feature attribution method agrees with human intuition for an eXclusive OR operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever or cough but not both: +6 points transform = "identity" sort_order = 4
def human_xor_01(X, y, model_generator, method_name): """ XOR (false/true) This tests how well a feature attribution method agrees with human intuition for an eXclusive OR operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever or cough but not both: +6 points transform = "identity" sort_order = 4 """ return _human_xor(X, model_generator, method_name, False, True)
XOR (true/true) This tests how well a feature attribution method agrees with human intuition for an eXclusive OR operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever or cough but not both: +6 points transform = "identity" sort_order = 5
def human_xor_11(X, y, model_generator, method_name): """ XOR (true/true) This tests how well a feature attribution method agrees with human intuition for an eXclusive OR operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever or cough but not both: +6 points transform = "identity" sort_order = 5 """ return _human_xor(X, model_generator, method_name, True, True)
SUM (false/true) This tests how well a feature attribution method agrees with human intuition for a SUM operation. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points transform = "identity" sort_order = 1
def human_sum_01(X, y, model_generator, method_name): """ SUM (false/true) This tests how well a feature attribution method agrees with human intuition for a SUM operation. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points transform = "identity" sort_order = 1 """ return _human_sum(X, model_generator, method_name, False, True)
SUM (false/false) This tests how well a feature attribution method agrees with human intuition for a SUM operation. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points transform = "identity" sort_order = 0
def human_sum_00(X, y, model_generator, method_name): """ SUM (false/false) This tests how well a feature attribution method agrees with human intuition for a SUM operation. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points transform = "identity" sort_order = 0 """ return _human_sum(X, model_generator, method_name, False, False)
SUM (true/true) This tests how well a feature attribution method agrees with human intuition for a SUM operation. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points transform = "identity" sort_order = 2
def human_sum_11(X, y, model_generator, method_name): """ SUM (true/true) This tests how well a feature attribution method agrees with human intuition for a SUM operation. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points transform = "identity" sort_order = 2 """ return _human_sum(X, model_generator, method_name, True, True)
Uses block matrix inversion identities to quickly estimate transforms. After a bit of matrix math we can isolate a transform matrix (# features x # features) that is independent of any sample we are explaining. It is the result of averaging over all feature permutations, but we just use a fixed number of samples to estimate the value. TODO: Do a brute force enumeration when # feature subsets is less than nsamples. This could happen through a recursive method that uses the same block matrix inversion as below.
def _estimate_transforms(self, nsamples): """ Uses block matrix inversion identities to quickly estimate transforms. After a bit of matrix math we can isolate a transform matrix (# features x # features) that is independent of any sample we are explaining. It is the result of averaging over all feature permutations, but we just use a fixed number of samples to estimate the value. TODO: Do a brute force enumeration when # feature subsets is less than nsamples. This could happen through a recursive method that uses the same block matrix inversion as below. """ M = len(self.coef) mean_transform = np.zeros((M,M)) x_transform = np.zeros((M,M)) inds = np.arange(M, dtype=np.int) for _ in tqdm(range(nsamples), "Estimating transforms"): np.random.shuffle(inds) cov_inv_SiSi = np.zeros((0,0)) cov_Si = np.zeros((M,0)) for j in range(M): i = inds[j] # use the last Si as the new S cov_S = cov_Si cov_inv_SS = cov_inv_SiSi # get the new cov_Si cov_Si = self.cov[:,inds[:j+1]] # compute the new cov_inv_SiSi from cov_inv_SS d = cov_Si[i,:-1].T t = np.matmul(cov_inv_SS, d) Z = self.cov[i, i] u = Z - np.matmul(t.T, d) cov_inv_SiSi = np.zeros((j+1, j+1)) if j > 0: cov_inv_SiSi[:-1, :-1] = cov_inv_SS + np.outer(t, t) / u cov_inv_SiSi[:-1, -1] = cov_inv_SiSi[-1,:-1] = -t / u cov_inv_SiSi[-1, -1] = 1 / u # + coef @ (Q(bar(Sui)) - Q(bar(S))) mean_transform[i, i] += self.coef[i] # + coef @ R(Sui) coef_R_Si = np.matmul(self.coef[inds[j+1:]], np.matmul(cov_Si, cov_inv_SiSi)[inds[j+1:]]) mean_transform[i, inds[:j+1]] += coef_R_Si # - coef @ R(S) coef_R_S = np.matmul(self.coef[inds[j:]], np.matmul(cov_S, cov_inv_SS)[inds[j:]]) mean_transform[i, inds[:j]] -= coef_R_S # - coef @ (Q(Sui) - Q(S)) x_transform[i, i] += self.coef[i] # + coef @ R(Sui) x_transform[i, inds[:j+1]] += coef_R_Si # - coef @ R(S) x_transform[i, inds[:j]] -= coef_R_S mean_transform /= nsamples x_transform /= nsamples return mean_transform, x_transform
Estimate the SHAP values for a set of samples. Parameters ---------- X : numpy.array or pandas.DataFrame A matrix of samples (# samples x # features) on which to explain the model's output. Returns ------- For models with a single output this returns a matrix of SHAP values (# samples x # features). Each row sums to the difference between the model output for that sample and the expected value of the model output (which is stored as expected_value attribute of the explainer).
def shap_values(self, X): """ Estimate the SHAP values for a set of samples. Parameters ---------- X : numpy.array or pandas.DataFrame A matrix of samples (# samples x # features) on which to explain the model's output. Returns ------- For models with a single output this returns a matrix of SHAP values (# samples x # features). Each row sums to the difference between the model output for that sample and the expected value of the model output (which is stored as expected_value attribute of the explainer). """ # convert dataframes if str(type(X)).endswith("pandas.core.series.Series'>"): X = X.values elif str(type(X)).endswith("'pandas.core.frame.DataFrame'>"): X = X.values #assert str(type(X)).endswith("'numpy.ndarray'>"), "Unknown instance type: " + str(type(X)) assert len(X.shape) == 1 or len(X.shape) == 2, "Instance must have 1 or 2 dimensions!" if self.feature_dependence == "correlation": phi = np.matmul(np.matmul(X[:,self.valid_inds], self.avg_proj.T), self.x_transform.T) - self.mean_transformed phi = np.matmul(phi, self.avg_proj) full_phi = np.zeros(((phi.shape[0], self.M))) full_phi[:,self.valid_inds] = phi return full_phi elif self.feature_dependence == "independent": if len(self.coef.shape) == 1: return np.array(X - self.mean) * self.coef else: return [np.array(X - self.mean) * self.coef[i] for i in range(self.coef.shape[0])]
4-Layer Neural Network
def independentlinear60__ffnn(): """ 4-Layer Neural Network """ from keras.models import Sequential from keras.layers import Dense model = Sequential() model.add(Dense(32, activation='relu', input_dim=60)) model.add(Dense(20, activation='relu')) model.add(Dense(20, activation='relu')) model.add(Dense(1)) model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mean_squared_error']) return KerasWrap(model, 30, flatten_output=True)
Lasso Regression
def cric__lasso(): """ Lasso Regression """ model = sklearn.linear_model.LogisticRegression(penalty="l1", C=0.002) # we want to explain the raw probability outputs of the trees model.predict = lambda X: model.predict_proba(X)[:,1] return model
Ridge Regression
def cric__ridge(): """ Ridge Regression """ model = sklearn.linear_model.LogisticRegression(penalty="l2") # we want to explain the raw probability outputs of the trees model.predict = lambda X: model.predict_proba(X)[:,1] return model
Decision Tree
def cric__decision_tree(): """ Decision Tree """ model = sklearn.tree.DecisionTreeClassifier(random_state=0, max_depth=4) # we want to explain the raw probability outputs of the trees model.predict = lambda X: model.predict_proba(X)[:,1] return model
Random Forest
def cric__random_forest(): """ Random Forest """ model = sklearn.ensemble.RandomForestClassifier(100, random_state=0) # we want to explain the raw probability outputs of the trees model.predict = lambda X: model.predict_proba(X)[:,1] return model
Gradient Boosted Trees
def cric__gbm(): """ Gradient Boosted Trees """ import xgboost # max_depth and subsample match the params used for the full cric data in the paper # learning_rate was set a bit higher to allow for faster runtimes # n_estimators was chosen based on a train/test split of the data model = xgboost.XGBClassifier(max_depth=5, n_estimators=400, learning_rate=0.01, subsample=0.2, n_jobs=8, random_state=0) # we want to explain the margin, not the transformed probability outputs model.__orig_predict = model.predict model.predict = lambda X: model.__orig_predict(X, output_margin=True) # pylint: disable=E1123 return model
Decision Tree
def human__decision_tree(): """ Decision Tree """ # build data N = 1000000 M = 3 X = np.zeros((N,M)) X.shape y = np.zeros(N) X[0, 0] = 1 y[0] = 8 X[1, 1] = 1 y[1] = 8 X[2, 0:2] = 1 y[2] = 4 # fit model xor_model = sklearn.tree.DecisionTreeRegressor(max_depth=2) xor_model.fit(X, y) return xor_model
Create a SHAP summary plot, colored by feature values when they are provided. Parameters ---------- shap_values : numpy.array Matrix of SHAP values (# samples x # features) features : numpy.array or pandas.DataFrame or list Matrix of feature values (# samples x # features) or a feature_names list as shorthand feature_names : list Names of the features (length # features) max_display : int How many top features to include in the plot (default is 20, or 7 for interaction plots) plot_type : "dot" (default) or "violin" What type of summary plot to produce
def summary_plot(shap_values, features=None, feature_names=None, max_display=None, plot_type="dot", color=None, axis_color="#333333", title=None, alpha=1, show=True, sort=True, color_bar=True, auto_size_plot=True, layered_violin_max_num_bins=20, class_names=None): """Create a SHAP summary plot, colored by feature values when they are provided. Parameters ---------- shap_values : numpy.array Matrix of SHAP values (# samples x # features) features : numpy.array or pandas.DataFrame or list Matrix of feature values (# samples x # features) or a feature_names list as shorthand feature_names : list Names of the features (length # features) max_display : int How many top features to include in the plot (default is 20, or 7 for interaction plots) plot_type : "dot" (default) or "violin" What type of summary plot to produce """ multi_class = False if isinstance(shap_values, list): multi_class = True plot_type = "bar" # only type supported for now else: assert len(shap_values.shape) != 1, "Summary plots need a matrix of shap_values, not a vector." # default color: if color is None: if plot_type == 'layered_violin': color = "coolwarm" elif multi_class: color = lambda i: colors.red_blue_circle(i/len(shap_values)) else: color = colors.blue_rgb # convert from a DataFrame or other types if str(type(features)) == "<class 'pandas.core.frame.DataFrame'>": if feature_names is None: feature_names = features.columns features = features.values elif isinstance(features, list): if feature_names is None: feature_names = features features = None elif (features is not None) and len(features.shape) == 1 and feature_names is None: feature_names = features features = None num_features = (shap_values[0].shape[1] if multi_class else shap_values.shape[1]) if feature_names is None: feature_names = np.array([labels['FEATURE'] % str(i) for i in range(num_features)]) # plotting SHAP interaction values if not multi_class and len(shap_values.shape) == 3: if max_display is None: max_display = 7 else: max_display = min(len(feature_names), max_display) sort_inds = np.argsort(-np.abs(shap_values.sum(1)).sum(0)) # get plotting limits delta = 1.0 / (shap_values.shape[1] ** 2) slow = np.nanpercentile(shap_values, delta) shigh = np.nanpercentile(shap_values, 100 - delta) v = max(abs(slow), abs(shigh)) slow = -v shigh = v pl.figure(figsize=(1.5 * max_display + 1, 0.8 * max_display + 1)) pl.subplot(1, max_display, 1) proj_shap_values = shap_values[:, sort_inds[0], sort_inds] proj_shap_values[:, 1:] *= 2 # because off diag effects are split in half summary_plot( proj_shap_values, features[:, sort_inds] if features is not None else None, feature_names=feature_names[sort_inds], sort=False, show=False, color_bar=False, auto_size_plot=False, max_display=max_display ) pl.xlim((slow, shigh)) pl.xlabel("") title_length_limit = 11 pl.title(shorten_text(feature_names[sort_inds[0]], title_length_limit)) for i in range(1, min(len(sort_inds), max_display)): ind = sort_inds[i] pl.subplot(1, max_display, i + 1) proj_shap_values = shap_values[:, ind, sort_inds] proj_shap_values *= 2 proj_shap_values[:, i] /= 2 # because only off diag effects are split in half summary_plot( proj_shap_values, features[:, sort_inds] if features is not None else None, sort=False, feature_names=["" for i in range(len(feature_names))], show=False, color_bar=False, auto_size_plot=False, max_display=max_display ) pl.xlim((slow, shigh)) pl.xlabel("") if i == min(len(sort_inds), max_display) // 2: pl.xlabel(labels['INTERACTION_VALUE']) pl.title(shorten_text(feature_names[ind], title_length_limit)) pl.tight_layout(pad=0, w_pad=0, h_pad=0.0) pl.subplots_adjust(hspace=0, wspace=0.1) if show: pl.show() return if max_display is None: max_display = 20 if sort: # order features by the sum of their effect magnitudes if multi_class: feature_order = np.argsort(np.sum(np.mean(np.abs(shap_values), axis=0), axis=0)) else: feature_order = np.argsort(np.sum(np.abs(shap_values), axis=0)) feature_order = feature_order[-min(max_display, len(feature_order)):] else: feature_order = np.flip(np.arange(min(max_display, num_features)), 0) row_height = 0.4 if auto_size_plot: pl.gcf().set_size_inches(8, len(feature_order) * row_height + 1.5) pl.axvline(x=0, color="#999999", zorder=-1) if plot_type == "dot": for pos, i in enumerate(feature_order): pl.axhline(y=pos, color="#cccccc", lw=0.5, dashes=(1, 5), zorder=-1) shaps = shap_values[:, i] values = None if features is None else features[:, i] inds = np.arange(len(shaps)) np.random.shuffle(inds) if values is not None: values = values[inds] shaps = shaps[inds] colored_feature = True try: values = np.array(values, dtype=np.float64) # make sure this can be numeric except: colored_feature = False N = len(shaps) # hspacing = (np.max(shaps) - np.min(shaps)) / 200 # curr_bin = [] nbins = 100 quant = np.round(nbins * (shaps - np.min(shaps)) / (np.max(shaps) - np.min(shaps) + 1e-8)) inds = np.argsort(quant + np.random.randn(N) * 1e-6) layer = 0 last_bin = -1 ys = np.zeros(N) for ind in inds: if quant[ind] != last_bin: layer = 0 ys[ind] = np.ceil(layer / 2) * ((layer % 2) * 2 - 1) layer += 1 last_bin = quant[ind] ys *= 0.9 * (row_height / np.max(ys + 1)) if features is not None and colored_feature: # trim the color range, but prevent the color range from collapsing vmin = np.nanpercentile(values, 5) vmax = np.nanpercentile(values, 95) if vmin == vmax: vmin = np.nanpercentile(values, 1) vmax = np.nanpercentile(values, 99) if vmin == vmax: vmin = np.min(values) vmax = np.max(values) assert features.shape[0] == len(shaps), "Feature and SHAP matrices must have the same number of rows!" # plot the nan values in the interaction feature as grey nan_mask = np.isnan(values) pl.scatter(shaps[nan_mask], pos + ys[nan_mask], color="#777777", vmin=vmin, vmax=vmax, s=16, alpha=alpha, linewidth=0, zorder=3, rasterized=len(shaps) > 500) # plot the non-nan values colored by the trimmed feature value cvals = values[np.invert(nan_mask)].astype(np.float64) cvals_imp = cvals.copy() cvals_imp[np.isnan(cvals)] = (vmin + vmax) / 2.0 cvals[cvals_imp > vmax] = vmax cvals[cvals_imp < vmin] = vmin pl.scatter(shaps[np.invert(nan_mask)], pos + ys[np.invert(nan_mask)], cmap=colors.red_blue, vmin=vmin, vmax=vmax, s=16, c=cvals, alpha=alpha, linewidth=0, zorder=3, rasterized=len(shaps) > 500) else: pl.scatter(shaps, pos + ys, s=16, alpha=alpha, linewidth=0, zorder=3, color=color if colored_feature else "#777777", rasterized=len(shaps) > 500) elif plot_type == "violin": for pos, i in enumerate(feature_order): pl.axhline(y=pos, color="#cccccc", lw=0.5, dashes=(1, 5), zorder=-1) if features is not None: global_low = np.nanpercentile(shap_values[:, :len(feature_names)].flatten(), 1) global_high = np.nanpercentile(shap_values[:, :len(feature_names)].flatten(), 99) for pos, i in enumerate(feature_order): shaps = shap_values[:, i] shap_min, shap_max = np.min(shaps), np.max(shaps) rng = shap_max - shap_min xs = np.linspace(np.min(shaps) - rng * 0.2, np.max(shaps) + rng * 0.2, 100) if np.std(shaps) < (global_high - global_low) / 100: ds = gaussian_kde(shaps + np.random.randn(len(shaps)) * (global_high - global_low) / 100)(xs) else: ds = gaussian_kde(shaps)(xs) ds /= np.max(ds) * 3 values = features[:, i] window_size = max(10, len(values) // 20) smooth_values = np.zeros(len(xs) - 1) sort_inds = np.argsort(shaps) trailing_pos = 0 leading_pos = 0 running_sum = 0 back_fill = 0 for j in range(len(xs) - 1): while leading_pos < len(shaps) and xs[j] >= shaps[sort_inds[leading_pos]]: running_sum += values[sort_inds[leading_pos]] leading_pos += 1 if leading_pos - trailing_pos > 20: running_sum -= values[sort_inds[trailing_pos]] trailing_pos += 1 if leading_pos - trailing_pos > 0: smooth_values[j] = running_sum / (leading_pos - trailing_pos) for k in range(back_fill): smooth_values[j - k - 1] = smooth_values[j] else: back_fill += 1 vmin = np.nanpercentile(values, 5) vmax = np.nanpercentile(values, 95) if vmin == vmax: vmin = np.nanpercentile(values, 1) vmax = np.nanpercentile(values, 99) if vmin == vmax: vmin = np.min(values) vmax = np.max(values) pl.scatter(shaps, np.ones(shap_values.shape[0]) * pos, s=9, cmap=colors.red_blue, vmin=vmin, vmax=vmax, c=values, alpha=alpha, linewidth=0, zorder=1) # smooth_values -= nxp.nanpercentile(smooth_values, 5) # smooth_values /= np.nanpercentile(smooth_values, 95) smooth_values -= vmin if vmax - vmin > 0: smooth_values /= vmax - vmin for i in range(len(xs) - 1): if ds[i] > 0.05 or ds[i + 1] > 0.05: pl.fill_between([xs[i], xs[i + 1]], [pos + ds[i], pos + ds[i + 1]], [pos - ds[i], pos - ds[i + 1]], color=colors.red_blue(smooth_values[i]), zorder=2) else: parts = pl.violinplot(shap_values[:, feature_order], range(len(feature_order)), points=200, vert=False, widths=0.7, showmeans=False, showextrema=False, showmedians=False) for pc in parts['bodies']: pc.set_facecolor(color) pc.set_edgecolor('none') pc.set_alpha(alpha) elif plot_type == "layered_violin": # courtesy of @kodonnell num_x_points = 200 bins = np.linspace(0, features.shape[0], layered_violin_max_num_bins + 1).round(0).astype( 'int') # the indices of the feature data corresponding to each bin shap_min, shap_max = np.min(shap_values), np.max(shap_values) x_points = np.linspace(shap_min, shap_max, num_x_points) # loop through each feature and plot: for pos, ind in enumerate(feature_order): # decide how to handle: if #unique < layered_violin_max_num_bins then split by unique value, otherwise use bins/percentiles. # to keep simpler code, in the case of uniques, we just adjust the bins to align with the unique counts. feature = features[:, ind] unique, counts = np.unique(feature, return_counts=True) if unique.shape[0] <= layered_violin_max_num_bins: order = np.argsort(unique) thesebins = np.cumsum(counts[order]) thesebins = np.insert(thesebins, 0, 0) else: thesebins = bins nbins = thesebins.shape[0] - 1 # order the feature data so we can apply percentiling order = np.argsort(feature) # x axis is located at y0 = pos, with pos being there for offset y0 = np.ones(num_x_points) * pos # calculate kdes: ys = np.zeros((nbins, num_x_points)) for i in range(nbins): # get shap values in this bin: shaps = shap_values[order[thesebins[i]:thesebins[i + 1]], ind] # if there's only one element, then we can't if shaps.shape[0] == 1: warnings.warn( "not enough data in bin #%d for feature %s, so it'll be ignored. Try increasing the number of records to plot." % (i, feature_names[ind])) # to ignore it, just set it to the previous y-values (so the area between them will be zero). Not ys is already 0, so there's # nothing to do if i == 0 if i > 0: ys[i, :] = ys[i - 1, :] continue # save kde of them: note that we add a tiny bit of gaussian noise to avoid singular matrix errors ys[i, :] = gaussian_kde(shaps + np.random.normal(loc=0, scale=0.001, size=shaps.shape[0]))(x_points) # scale it up so that the 'size' of each y represents the size of the bin. For continuous data this will # do nothing, but when we've gone with the unqique option, this will matter - e.g. if 99% are male and 1% # female, we want the 1% to appear a lot smaller. size = thesebins[i + 1] - thesebins[i] bin_size_if_even = features.shape[0] / nbins relative_bin_size = size / bin_size_if_even ys[i, :] *= relative_bin_size # now plot 'em. We don't plot the individual strips, as this can leave whitespace between them. # instead, we plot the full kde, then remove outer strip and plot over it, etc., to ensure no # whitespace ys = np.cumsum(ys, axis=0) width = 0.8 scale = ys.max() * 2 / width # 2 is here as we plot both sides of x axis for i in range(nbins - 1, -1, -1): y = ys[i, :] / scale c = pl.get_cmap(color)(i / ( nbins - 1)) if color in pl.cm.datad else color # if color is a cmap, use it, otherwise use a color pl.fill_between(x_points, pos - y, pos + y, facecolor=c) pl.xlim(shap_min, shap_max) elif not multi_class and plot_type == "bar": feature_inds = feature_order[:max_display] y_pos = np.arange(len(feature_inds)) global_shap_values = np.abs(shap_values).mean(0) pl.barh(y_pos, global_shap_values[feature_inds], 0.7, align='center', color=color) pl.yticks(y_pos, fontsize=13) pl.gca().set_yticklabels([feature_names[i] for i in feature_inds]) elif multi_class and plot_type == "bar": if class_names is None: class_names = ["Class "+str(i) for i in range(len(shap_values))] feature_inds = feature_order[:max_display] y_pos = np.arange(len(feature_inds)) left_pos = np.zeros(len(feature_inds)) class_inds = np.argsort([-np.abs(shap_values[i]).mean() for i in range(len(shap_values))]) for i,ind in enumerate(class_inds): global_shap_values = np.abs(shap_values[ind]).mean(0) pl.barh( y_pos, global_shap_values[feature_inds], 0.7, left=left_pos, align='center', color=color(i), label=class_names[ind] ) left_pos += global_shap_values[feature_inds] pl.yticks(y_pos, fontsize=13) pl.gca().set_yticklabels([feature_names[i] for i in feature_inds]) pl.legend(frameon=False, fontsize=12) # draw the color bar if color_bar and features is not None and plot_type != "bar" and \ (plot_type != "layered_violin" or color in pl.cm.datad): import matplotlib.cm as cm m = cm.ScalarMappable(cmap=colors.red_blue if plot_type != "layered_violin" else pl.get_cmap(color)) m.set_array([0, 1]) cb = pl.colorbar(m, ticks=[0, 1], aspect=1000) cb.set_ticklabels([labels['FEATURE_VALUE_LOW'], labels['FEATURE_VALUE_HIGH']]) cb.set_label(labels['FEATURE_VALUE'], size=12, labelpad=0) cb.ax.tick_params(labelsize=11, length=0) cb.set_alpha(1) cb.outline.set_visible(False) bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted()) cb.ax.set_aspect((bbox.height - 0.9) * 20) # cb.draw_all() pl.gca().xaxis.set_ticks_position('bottom') pl.gca().yaxis.set_ticks_position('none') pl.gca().spines['right'].set_visible(False) pl.gca().spines['top'].set_visible(False) pl.gca().spines['left'].set_visible(False) pl.gca().tick_params(color=axis_color, labelcolor=axis_color) pl.yticks(range(len(feature_order)), [feature_names[i] for i in feature_order], fontsize=13) if plot_type != "bar": pl.gca().tick_params('y', length=20, width=0.5, which='major') pl.gca().tick_params('x', labelsize=11) pl.ylim(-1, len(feature_order)) if plot_type == "bar": pl.xlabel(labels['GLOBAL_VALUE'], fontsize=13) else: pl.xlabel(labels['VALUE'], fontsize=13) if show: pl.show()
Kernel SHAP 1000 mean ref. color = red_blue_circle(0.5) linestyle = solid
def kernel_shap_1000_meanref(model, data): """ Kernel SHAP 1000 mean ref. color = red_blue_circle(0.5) linestyle = solid """ return lambda X: KernelExplainer(model.predict, kmeans(data, 1)).shap_values(X, nsamples=1000, l1_reg=0)
IME 1000 color = red_blue_circle(0.5) linestyle = dashed
def sampling_shap_1000(model, data): """ IME 1000 color = red_blue_circle(0.5) linestyle = dashed """ return lambda X: SamplingExplainer(model.predict, data).shap_values(X, nsamples=1000)
TreeExplainer (independent) color = red_blue_circle(0) linestyle = dashed
def tree_shap_independent_200(model, data): """ TreeExplainer (independent) color = red_blue_circle(0) linestyle = dashed """ data_subsample = sklearn.utils.resample(data, replace=False, n_samples=min(200, data.shape[0]), random_state=0) return TreeExplainer(model, data_subsample, feature_dependence="independent").shap_values
mean(|TreeExplainer|) color = red_blue_circle(0.25) linestyle = solid
def mean_abs_tree_shap(model, data): """ mean(|TreeExplainer|) color = red_blue_circle(0.25) linestyle = solid """ def f(X): v = TreeExplainer(model).shap_values(X) if isinstance(v, list): return [np.tile(np.abs(sv).mean(0), (X.shape[0], 1)) for sv in v] else: return np.tile(np.abs(v).mean(0), (X.shape[0], 1)) return f
Saabas color = red_blue_circle(0) linestyle = dotted
def saabas(model, data): """ Saabas color = red_blue_circle(0) linestyle = dotted """ return lambda X: TreeExplainer(model).shap_values(X, approximate=True)
LIME Tabular 1000
def lime_tabular_regression_1000(model, data): """ LIME Tabular 1000 """ return lambda X: other.LimeTabularExplainer(model.predict, data, mode="regression").attributions(X, nsamples=1000)
Deep SHAP (DeepLIFT)
def deep_shap(model, data): """ Deep SHAP (DeepLIFT) """ if isinstance(model, KerasWrap): model = model.model explainer = DeepExplainer(model, kmeans(data, 1).data) def f(X): phi = explainer.shap_values(X) if type(phi) is list and len(phi) == 1: return phi[0] else: return phi return f
Expected Gradients
def expected_gradients(model, data): """ Expected Gradients """ if isinstance(model, KerasWrap): model = model.model explainer = GradientExplainer(model, data) def f(X): phi = explainer.shap_values(X) if type(phi) is list and len(phi) == 1: return phi[0] else: return phi return f
Return approximate SHAP values for the model applied to the data given by X. Parameters ---------- X : list, if framework == 'tensorflow': numpy.array, or pandas.DataFrame if framework == 'pytorch': torch.tensor A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to explain the model's output. ranked_outputs : None or int If ranked_outputs is None then we explain all the outputs in a multi-output model. If ranked_outputs is a positive integer then we only explain that many of the top model outputs (where "top" is determined by output_rank_order). Note that this causes a pair of values to be returned (shap_values, indexes), where shap_values is a list of numpy arrays for each of the output ranks, and indexes is a matrix that indicates for each sample which output indexes were choses as "top". output_rank_order : "max", "min", or "max_abs" How to order the model outputs when using ranked_outputs, either by maximum, minimum, or maximum absolute value. Returns ------- For a models with a single output this returns a tensor of SHAP values with the same shape as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of which are the same shape as X. If ranked_outputs is None then this list of tensors matches the number of model outputs. If ranked_outputs is a positive integer a pair is returned (shap_values, indexes), where shap_values is a list of tensors with a length of ranked_outputs, and indexes is a matrix that indicates for each sample which output indexes were chosen as "top".
def shap_values(self, X, ranked_outputs=None, output_rank_order='max'): """ Return approximate SHAP values for the model applied to the data given by X. Parameters ---------- X : list, if framework == 'tensorflow': numpy.array, or pandas.DataFrame if framework == 'pytorch': torch.tensor A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to explain the model's output. ranked_outputs : None or int If ranked_outputs is None then we explain all the outputs in a multi-output model. If ranked_outputs is a positive integer then we only explain that many of the top model outputs (where "top" is determined by output_rank_order). Note that this causes a pair of values to be returned (shap_values, indexes), where shap_values is a list of numpy arrays for each of the output ranks, and indexes is a matrix that indicates for each sample which output indexes were choses as "top". output_rank_order : "max", "min", or "max_abs" How to order the model outputs when using ranked_outputs, either by maximum, minimum, or maximum absolute value. Returns ------- For a models with a single output this returns a tensor of SHAP values with the same shape as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of which are the same shape as X. If ranked_outputs is None then this list of tensors matches the number of model outputs. If ranked_outputs is a positive integer a pair is returned (shap_values, indexes), where shap_values is a list of tensors with a length of ranked_outputs, and indexes is a matrix that indicates for each sample which output indexes were chosen as "top". """ return self.explainer.shap_values(X, ranked_outputs, output_rank_order)
Returns dummy agent class for if PyTorch etc. is not installed.
def _agent_import_failed(trace): """Returns dummy agent class for if PyTorch etc. is not installed.""" class _AgentImportFailed(Trainer): _name = "AgentImportFailed" _default_config = with_common_config({}) def _setup(self, config): raise ImportError(trace) return _AgentImportFailed
Executes training. Args: run_or_experiment (function|class|str|Experiment): If function|class|str, this is the algorithm or model to train. This may refer to the name of a built-on algorithm (e.g. RLLib's DQN or PPO), a user-defined trainable function or class, or the string identifier of a trainable function or class registered in the tune registry. If Experiment, then Tune will execute training based on Experiment.spec. name (str): Name of experiment. stop (dict): The stopping criteria. The keys may be any field in the return result of 'train()', whichever is reached first. Defaults to empty dict. config (dict): Algorithm-specific configuration for Tune variant generation (e.g. env, hyperparams). Defaults to empty dict. Custom search algorithms may ignore this. resources_per_trial (dict): Machine resources to allocate per trial, e.g. ``{"cpu": 64, "gpu": 8}``. Note that GPUs will not be assigned unless you specify them here. Defaults to 1 CPU and 0 GPUs in ``Trainable.default_resource_request()``. num_samples (int): Number of times to sample from the hyperparameter space. Defaults to 1. If `grid_search` is provided as an argument, the grid will be repeated `num_samples` of times. local_dir (str): Local dir to save training results to. Defaults to ``~/ray_results``. upload_dir (str): Optional URI to sync training results to (e.g. ``s3://bucket``). trial_name_creator (func): Optional function for generating the trial string representation. loggers (list): List of logger creators to be used with each Trial. If None, defaults to ray.tune.logger.DEFAULT_LOGGERS. See `ray/tune/logger.py`. sync_function (func|str): Function for syncing the local_dir to upload_dir. If string, then it must be a string template for syncer to run. If not provided, the sync command defaults to standard S3 or gsutil sync comamnds. checkpoint_freq (int): How many training iterations between checkpoints. A value of 0 (default) disables checkpointing. checkpoint_at_end (bool): Whether to checkpoint at the end of the experiment regardless of the checkpoint_freq. Default is False. export_formats (list): List of formats that exported at the end of the experiment. Default is None. max_failures (int): Try to recover a trial from its last checkpoint at least this many times. Only applies if checkpointing is enabled. Setting to -1 will lead to infinite recovery retries. Defaults to 3. restore (str): Path to checkpoint. Only makes sense to set if running 1 trial. Defaults to None. search_alg (SearchAlgorithm): Search Algorithm. Defaults to BasicVariantGenerator. scheduler (TrialScheduler): Scheduler for executing the experiment. Choose among FIFO (default), MedianStopping, AsyncHyperBand, and HyperBand. with_server (bool): Starts a background Tune server. Needed for using the Client API. server_port (int): Port number for launching TuneServer. verbose (int): 0, 1, or 2. Verbosity mode. 0 = silent, 1 = only status updates, 2 = status and trial results. resume (bool|"prompt"): If checkpoint exists, the experiment will resume from there. If resume is "prompt", Tune will prompt if checkpoint detected. queue_trials (bool): Whether to queue trials when the cluster does not currently have enough resources to launch one. This should be set to True when running on an autoscaling cluster to enable automatic scale-up. reuse_actors (bool): Whether to reuse actors between different trials when possible. This can drastically speed up experiments that start and stop actors often (e.g., PBT in time-multiplexing mode). This requires trials to have the same resource requirements. trial_executor (TrialExecutor): Manage the execution of trials. raise_on_failed_trial (bool): Raise TuneError if there exists failed trial (of ERROR state) when the experiments complete. Returns: List of Trial objects. Raises: TuneError if any trials failed and `raise_on_failed_trial` is True. Examples: >>> tune.run(mytrainable, scheduler=PopulationBasedTraining()) >>> tune.run(mytrainable, num_samples=5, reuse_actors=True) >>> tune.run( "PG", num_samples=5, config={ "env": "CartPole-v0", "lr": tune.sample_from(lambda _: np.random.rand()) } )
def run(run_or_experiment, name=None, stop=None, config=None, resources_per_trial=None, num_samples=1, local_dir=None, upload_dir=None, trial_name_creator=None, loggers=None, sync_function=None, checkpoint_freq=0, checkpoint_at_end=False, export_formats=None, max_failures=3, restore=None, search_alg=None, scheduler=None, with_server=False, server_port=TuneServer.DEFAULT_PORT, verbose=2, resume=False, queue_trials=False, reuse_actors=False, trial_executor=None, raise_on_failed_trial=True): """Executes training. Args: run_or_experiment (function|class|str|Experiment): If function|class|str, this is the algorithm or model to train. This may refer to the name of a built-on algorithm (e.g. RLLib's DQN or PPO), a user-defined trainable function or class, or the string identifier of a trainable function or class registered in the tune registry. If Experiment, then Tune will execute training based on Experiment.spec. name (str): Name of experiment. stop (dict): The stopping criteria. The keys may be any field in the return result of 'train()', whichever is reached first. Defaults to empty dict. config (dict): Algorithm-specific configuration for Tune variant generation (e.g. env, hyperparams). Defaults to empty dict. Custom search algorithms may ignore this. resources_per_trial (dict): Machine resources to allocate per trial, e.g. ``{"cpu": 64, "gpu": 8}``. Note that GPUs will not be assigned unless you specify them here. Defaults to 1 CPU and 0 GPUs in ``Trainable.default_resource_request()``. num_samples (int): Number of times to sample from the hyperparameter space. Defaults to 1. If `grid_search` is provided as an argument, the grid will be repeated `num_samples` of times. local_dir (str): Local dir to save training results to. Defaults to ``~/ray_results``. upload_dir (str): Optional URI to sync training results to (e.g. ``s3://bucket``). trial_name_creator (func): Optional function for generating the trial string representation. loggers (list): List of logger creators to be used with each Trial. If None, defaults to ray.tune.logger.DEFAULT_LOGGERS. See `ray/tune/logger.py`. sync_function (func|str): Function for syncing the local_dir to upload_dir. If string, then it must be a string template for syncer to run. If not provided, the sync command defaults to standard S3 or gsutil sync comamnds. checkpoint_freq (int): How many training iterations between checkpoints. A value of 0 (default) disables checkpointing. checkpoint_at_end (bool): Whether to checkpoint at the end of the experiment regardless of the checkpoint_freq. Default is False. export_formats (list): List of formats that exported at the end of the experiment. Default is None. max_failures (int): Try to recover a trial from its last checkpoint at least this many times. Only applies if checkpointing is enabled. Setting to -1 will lead to infinite recovery retries. Defaults to 3. restore (str): Path to checkpoint. Only makes sense to set if running 1 trial. Defaults to None. search_alg (SearchAlgorithm): Search Algorithm. Defaults to BasicVariantGenerator. scheduler (TrialScheduler): Scheduler for executing the experiment. Choose among FIFO (default), MedianStopping, AsyncHyperBand, and HyperBand. with_server (bool): Starts a background Tune server. Needed for using the Client API. server_port (int): Port number for launching TuneServer. verbose (int): 0, 1, or 2. Verbosity mode. 0 = silent, 1 = only status updates, 2 = status and trial results. resume (bool|"prompt"): If checkpoint exists, the experiment will resume from there. If resume is "prompt", Tune will prompt if checkpoint detected. queue_trials (bool): Whether to queue trials when the cluster does not currently have enough resources to launch one. This should be set to True when running on an autoscaling cluster to enable automatic scale-up. reuse_actors (bool): Whether to reuse actors between different trials when possible. This can drastically speed up experiments that start and stop actors often (e.g., PBT in time-multiplexing mode). This requires trials to have the same resource requirements. trial_executor (TrialExecutor): Manage the execution of trials. raise_on_failed_trial (bool): Raise TuneError if there exists failed trial (of ERROR state) when the experiments complete. Returns: List of Trial objects. Raises: TuneError if any trials failed and `raise_on_failed_trial` is True. Examples: >>> tune.run(mytrainable, scheduler=PopulationBasedTraining()) >>> tune.run(mytrainable, num_samples=5, reuse_actors=True) >>> tune.run( "PG", num_samples=5, config={ "env": "CartPole-v0", "lr": tune.sample_from(lambda _: np.random.rand()) } ) """ experiment = run_or_experiment if not isinstance(run_or_experiment, Experiment): experiment = Experiment( name, run_or_experiment, stop, config, resources_per_trial, num_samples, local_dir, upload_dir, trial_name_creator, loggers, sync_function, checkpoint_freq, checkpoint_at_end, export_formats, max_failures, restore) else: logger.debug("Ignoring some parameters passed into tune.run.") checkpoint_dir = _find_checkpoint_dir(experiment) should_restore = _prompt_restore(checkpoint_dir, resume) runner = None if should_restore: try: runner = TrialRunner.restore(checkpoint_dir, search_alg, scheduler, trial_executor) except Exception: logger.exception("Runner restore failed. Restarting experiment.") else: logger.info("Starting a new experiment.") if not runner: scheduler = scheduler or FIFOScheduler() search_alg = search_alg or BasicVariantGenerator() search_alg.add_configurations([experiment]) runner = TrialRunner( search_alg, scheduler=scheduler, metadata_checkpoint_dir=checkpoint_dir, launch_web_server=with_server, server_port=server_port, verbose=bool(verbose > 1), queue_trials=queue_trials, reuse_actors=reuse_actors, trial_executor=trial_executor) if verbose: print(runner.debug_string(max_debug=99999)) last_debug = 0 while not runner.is_finished(): runner.step() if time.time() - last_debug > DEBUG_PRINT_INTERVAL: if verbose: print(runner.debug_string()) last_debug = time.time() if verbose: print(runner.debug_string(max_debug=99999)) wait_for_log_sync() errored_trials = [] for trial in runner.get_trials(): if trial.status != Trial.TERMINATED: errored_trials += [trial] if errored_trials: if raise_on_failed_trial: raise TuneError("Trials did not complete", errored_trials) else: logger.error("Trials did not complete: %s", errored_trials) return runner.get_trials()
Runs and blocks until all trials finish. Examples: >>> experiment_spec = Experiment("experiment", my_func) >>> run_experiments(experiments=experiment_spec) >>> experiment_spec = {"experiment": {"run": my_func}} >>> run_experiments(experiments=experiment_spec) >>> run_experiments( >>> experiments=experiment_spec, >>> scheduler=MedianStoppingRule(...)) >>> run_experiments( >>> experiments=experiment_spec, >>> search_alg=SearchAlgorithm(), >>> scheduler=MedianStoppingRule(...)) Returns: List of Trial objects, holding data for each executed trial.
def run_experiments(experiments, search_alg=None, scheduler=None, with_server=False, server_port=TuneServer.DEFAULT_PORT, verbose=2, resume=False, queue_trials=False, reuse_actors=False, trial_executor=None, raise_on_failed_trial=True): """Runs and blocks until all trials finish. Examples: >>> experiment_spec = Experiment("experiment", my_func) >>> run_experiments(experiments=experiment_spec) >>> experiment_spec = {"experiment": {"run": my_func}} >>> run_experiments(experiments=experiment_spec) >>> run_experiments( >>> experiments=experiment_spec, >>> scheduler=MedianStoppingRule(...)) >>> run_experiments( >>> experiments=experiment_spec, >>> search_alg=SearchAlgorithm(), >>> scheduler=MedianStoppingRule(...)) Returns: List of Trial objects, holding data for each executed trial. """ # This is important to do this here # because it schematize the experiments # and it conducts the implicit registration. experiments = convert_to_experiment_list(experiments) trials = [] for exp in experiments: trials += run( exp, search_alg=search_alg, scheduler=scheduler, with_server=with_server, server_port=server_port, verbose=verbose, resume=resume, queue_trials=queue_trials, reuse_actors=reuse_actors, trial_executor=trial_executor, raise_on_failed_trial=raise_on_failed_trial) return trials
Flushes remaining output records in the output queues to plasma. None is used as special type of record that is propagated from sources to sink to notify that the end of data in a stream. Attributes: close (bool): A flag denoting whether the channel should be also marked as 'closed' (True) or not (False) after flushing.
def _flush(self, close=False): """Flushes remaining output records in the output queues to plasma. None is used as special type of record that is propagated from sources to sink to notify that the end of data in a stream. Attributes: close (bool): A flag denoting whether the channel should be also marked as 'closed' (True) or not (False) after flushing. """ for channel in self.forward_channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.shuffle_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.shuffle_key_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.round_robin_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes()
Returns an appropriate preprocessor class for the given space.
def get_preprocessor(space): """Returns an appropriate preprocessor class for the given space.""" legacy_patch_shapes(space) obs_shape = space.shape if isinstance(space, gym.spaces.Discrete): preprocessor = OneHotPreprocessor elif obs_shape == ATARI_OBS_SHAPE: preprocessor = GenericPixelPreprocessor elif obs_shape == ATARI_RAM_OBS_SHAPE: preprocessor = AtariRamPreprocessor elif isinstance(space, gym.spaces.Tuple): preprocessor = TupleFlatteningPreprocessor elif isinstance(space, gym.spaces.Dict): preprocessor = DictFlatteningPreprocessor else: preprocessor = NoPreprocessor return preprocessor
Assigns shapes to spaces that don't have shapes. This is only needed for older gym versions that don't set shapes properly for Tuple and Discrete spaces.
def legacy_patch_shapes(space): """Assigns shapes to spaces that don't have shapes. This is only needed for older gym versions that don't set shapes properly for Tuple and Discrete spaces. """ if not hasattr(space, "shape"): if isinstance(space, gym.spaces.Discrete): space.shape = () elif isinstance(space, gym.spaces.Tuple): shapes = [] for s in space.spaces: shape = legacy_patch_shapes(s) shapes.append(shape) space.shape = tuple(shapes) return space.shape
Downsamples images from (210, 160, 3) by the configured factor.
def transform(self, observation): """Downsamples images from (210, 160, 3) by the configured factor.""" self.check_shape(observation) scaled = observation[25:-25, :, :] if self._dim < 84: scaled = cv2.resize(scaled, (84, 84)) # OpenAI: Resize by half, then down to 42x42 (essentially mipmapping). # If we resize directly we lose pixels that, when mapped to 42x42, # aren't close enough to the pixel boundary. scaled = cv2.resize(scaled, (self._dim, self._dim)) if self._grayscale: scaled = scaled.mean(2) scaled = scaled.astype(np.float32) # Rescale needed for maintaining 1 channel scaled = np.reshape(scaled, [self._dim, self._dim, 1]) if self._zero_mean: scaled = (scaled - 128) / 128 else: scaled *= 1.0 / 255.0 return scaled
Get a new batch from the internal ring buffer. Returns: buf: Data item saved from inqueue. released: True if the item is now removed from the ring buffer.
def get(self): """Get a new batch from the internal ring buffer. Returns: buf: Data item saved from inqueue. released: True if the item is now removed from the ring buffer. """ if self.ttl[self.idx] <= 0: self.buffers[self.idx] = self.inqueue.get(timeout=300.0) self.ttl[self.idx] = self.cur_max_ttl if self.cur_max_ttl < self.max_ttl: self.cur_max_ttl += 1 buf = self.buffers[self.idx] self.ttl[self.idx] -= 1 released = self.ttl[self.idx] <= 0 if released: self.buffers[self.idx] = None self.idx = (self.idx + 1) % len(self.buffers) return buf, released
Runs one logical iteration of training. Subclasses should override ``_train()`` instead to return results. This class automatically fills the following fields in the result: `done` (bool): training is terminated. Filled only if not provided. `time_this_iter_s` (float): Time in seconds this iteration took to run. This may be overriden in order to override the system-computed time difference. `time_total_s` (float): Accumulated time in seconds for this entire experiment. `experiment_id` (str): Unique string identifier for this experiment. This id is preserved across checkpoint / restore calls. `training_iteration` (int): The index of this training iteration, e.g. call to train(). `pid` (str): The pid of the training process. `date` (str): A formatted date of when the result was processed. `timestamp` (str): A UNIX timestamp of when the result was processed. `hostname` (str): Hostname of the machine hosting the training process. `node_ip` (str): Node ip of the machine hosting the training process. Returns: A dict that describes training progress.
def train(self): """Runs one logical iteration of training. Subclasses should override ``_train()`` instead to return results. This class automatically fills the following fields in the result: `done` (bool): training is terminated. Filled only if not provided. `time_this_iter_s` (float): Time in seconds this iteration took to run. This may be overriden in order to override the system-computed time difference. `time_total_s` (float): Accumulated time in seconds for this entire experiment. `experiment_id` (str): Unique string identifier for this experiment. This id is preserved across checkpoint / restore calls. `training_iteration` (int): The index of this training iteration, e.g. call to train(). `pid` (str): The pid of the training process. `date` (str): A formatted date of when the result was processed. `timestamp` (str): A UNIX timestamp of when the result was processed. `hostname` (str): Hostname of the machine hosting the training process. `node_ip` (str): Node ip of the machine hosting the training process. Returns: A dict that describes training progress. """ start = time.time() result = self._train() assert isinstance(result, dict), "_train() needs to return a dict." # We do not modify internal state nor update this result if duplicate. if RESULT_DUPLICATE in result: return result result = result.copy() self._iteration += 1 self._iterations_since_restore += 1 if result.get(TIME_THIS_ITER_S) is not None: time_this_iter = result[TIME_THIS_ITER_S] else: time_this_iter = time.time() - start self._time_total += time_this_iter self._time_since_restore += time_this_iter result.setdefault(DONE, False) # self._timesteps_total should only be tracked if increments provided if result.get(TIMESTEPS_THIS_ITER) is not None: if self._timesteps_total is None: self._timesteps_total = 0 self._timesteps_total += result[TIMESTEPS_THIS_ITER] self._timesteps_since_restore += result[TIMESTEPS_THIS_ITER] # self._episodes_total should only be tracked if increments provided if result.get(EPISODES_THIS_ITER) is not None: if self._episodes_total is None: self._episodes_total = 0 self._episodes_total += result[EPISODES_THIS_ITER] # self._timesteps_total should not override user-provided total result.setdefault(TIMESTEPS_TOTAL, self._timesteps_total) result.setdefault(EPISODES_TOTAL, self._episodes_total) result.setdefault(TRAINING_ITERATION, self._iteration) # Provides auto-filled neg_mean_loss for avoiding regressions if result.get("mean_loss"): result.setdefault("neg_mean_loss", -result["mean_loss"]) now = datetime.today() result.update( experiment_id=self._experiment_id, date=now.strftime("%Y-%m-%d_%H-%M-%S"), timestamp=int(time.mktime(now.timetuple())), time_this_iter_s=time_this_iter, time_total_s=self._time_total, pid=os.getpid(), hostname=os.uname()[1], node_ip=self._local_ip, config=self.config, time_since_restore=self._time_since_restore, timesteps_since_restore=self._timesteps_since_restore, iterations_since_restore=self._iterations_since_restore) self._log_result(result) return result
Removes subdirectory within checkpoint_folder Parameters ---------- checkpoint_dir : path to checkpoint
def delete_checkpoint(self, checkpoint_dir): """Removes subdirectory within checkpoint_folder Parameters ---------- checkpoint_dir : path to checkpoint """ if os.path.isfile(checkpoint_dir): shutil.rmtree(os.path.dirname(checkpoint_dir)) else: shutil.rmtree(checkpoint_dir)
Saves the current model state to a checkpoint. Subclasses should override ``_save()`` instead to save state. This method dumps additional metadata alongside the saved path. Args: checkpoint_dir (str): Optional dir to place the checkpoint. Returns: Checkpoint path that may be passed to restore().
def save(self, checkpoint_dir=None): """Saves the current model state to a checkpoint. Subclasses should override ``_save()`` instead to save state. This method dumps additional metadata alongside the saved path. Args: checkpoint_dir (str): Optional dir to place the checkpoint. Returns: Checkpoint path that may be passed to restore(). """ checkpoint_dir = os.path.join(checkpoint_dir or self.logdir, "checkpoint_{}".format(self._iteration)) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) checkpoint = self._save(checkpoint_dir) saved_as_dict = False if isinstance(checkpoint, string_types): if (not checkpoint.startswith(checkpoint_dir) or checkpoint == checkpoint_dir): raise ValueError( "The returned checkpoint path must be within the " "given checkpoint dir {}: {}".format( checkpoint_dir, checkpoint)) if not os.path.exists(checkpoint): raise ValueError( "The returned checkpoint path does not exist: {}".format( checkpoint)) checkpoint_path = checkpoint elif isinstance(checkpoint, dict): saved_as_dict = True checkpoint_path = os.path.join(checkpoint_dir, "checkpoint") with open(checkpoint_path, "wb") as f: pickle.dump(checkpoint, f) else: raise ValueError( "`_save` must return a dict or string type: {}".format( str(type(checkpoint)))) with open(checkpoint_path + ".tune_metadata", "wb") as f: pickle.dump({ "experiment_id": self._experiment_id, "iteration": self._iteration, "timesteps_total": self._timesteps_total, "time_total": self._time_total, "episodes_total": self._episodes_total, "saved_as_dict": saved_as_dict }, f) return checkpoint_path
Saves the current model state to a Python object. It also saves to disk but does not return the checkpoint path. Returns: Object holding checkpoint data.
def save_to_object(self): """Saves the current model state to a Python object. It also saves to disk but does not return the checkpoint path. Returns: Object holding checkpoint data. """ tmpdir = tempfile.mkdtemp("save_to_object", dir=self.logdir) checkpoint_prefix = self.save(tmpdir) data = {} base_dir = os.path.dirname(checkpoint_prefix) for path in os.listdir(base_dir): path = os.path.join(base_dir, path) if path.startswith(checkpoint_prefix): with open(path, "rb") as f: data[os.path.basename(path)] = f.read() out = io.BytesIO() data_dict = pickle.dumps({ "checkpoint_name": os.path.basename(checkpoint_prefix), "data": data, }) if len(data_dict) > 10e6: # getting pretty large logger.info("Checkpoint size is {} bytes".format(len(data_dict))) out.write(data_dict) shutil.rmtree(tmpdir) return out.getvalue()
Restores training state from a given model checkpoint. These checkpoints are returned from calls to save(). Subclasses should override ``_restore()`` instead to restore state. This method restores additional metadata saved with the checkpoint.
def restore(self, checkpoint_path): """Restores training state from a given model checkpoint. These checkpoints are returned from calls to save(). Subclasses should override ``_restore()`` instead to restore state. This method restores additional metadata saved with the checkpoint. """ with open(checkpoint_path + ".tune_metadata", "rb") as f: metadata = pickle.load(f) self._experiment_id = metadata["experiment_id"] self._iteration = metadata["iteration"] self._timesteps_total = metadata["timesteps_total"] self._time_total = metadata["time_total"] self._episodes_total = metadata["episodes_total"] saved_as_dict = metadata["saved_as_dict"] if saved_as_dict: with open(checkpoint_path, "rb") as loaded_state: checkpoint_dict = pickle.load(loaded_state) self._restore(checkpoint_dict) else: self._restore(checkpoint_path) self._time_since_restore = 0.0 self._timesteps_since_restore = 0 self._iterations_since_restore = 0 self._restored = True
Restores training state from a checkpoint object. These checkpoints are returned from calls to save_to_object().
def restore_from_object(self, obj): """Restores training state from a checkpoint object. These checkpoints are returned from calls to save_to_object(). """ info = pickle.loads(obj) data = info["data"] tmpdir = tempfile.mkdtemp("restore_from_object", dir=self.logdir) checkpoint_path = os.path.join(tmpdir, info["checkpoint_name"]) for file_name, file_contents in data.items(): with open(os.path.join(tmpdir, file_name), "wb") as f: f.write(file_contents) self.restore(checkpoint_path) shutil.rmtree(tmpdir)
Exports model based on export_formats. Subclasses should override _export_model() to actually export model to local directory. Args: export_formats (list): List of formats that should be exported. export_dir (str): Optional dir to place the exported model. Defaults to self.logdir. Return: A dict that maps ExportFormats to successfully exported models.
def export_model(self, export_formats, export_dir=None): """Exports model based on export_formats. Subclasses should override _export_model() to actually export model to local directory. Args: export_formats (list): List of formats that should be exported. export_dir (str): Optional dir to place the exported model. Defaults to self.logdir. Return: A dict that maps ExportFormats to successfully exported models. """ export_dir = export_dir or self.logdir return self._export_model(export_formats, export_dir)
See Schedule.value
def value(self, t): """See Schedule.value""" fraction = min(float(t) / max(1, self.schedule_timesteps), 1.0) return self.initial_p + fraction * (self.final_p - self.initial_p)
Dump a whole json record into the given file. Overwrite the file if the overwrite flag set. Args: json_info (dict): Information dict to be dumped. json_file (str): File path to be dumped to. overwrite(boolean)
def dump_json(json_info, json_file, overwrite=True): """Dump a whole json record into the given file. Overwrite the file if the overwrite flag set. Args: json_info (dict): Information dict to be dumped. json_file (str): File path to be dumped to. overwrite(boolean) """ if overwrite: mode = "w" else: mode = "w+" try: with open(json_file, mode) as f: f.write(json.dumps(json_info)) except BaseException as e: logging.error(e.message)
Parse a whole json record from the given file. Return None if the json file does not exists or exception occurs. Args: json_file (str): File path to be parsed. Returns: A dict of json info.
def parse_json(json_file): """Parse a whole json record from the given file. Return None if the json file does not exists or exception occurs. Args: json_file (str): File path to be parsed. Returns: A dict of json info. """ if not os.path.exists(json_file): return None try: with open(json_file, "r") as f: info_str = f.readlines() info_str = "".join(info_str) json_info = json.loads(info_str) return unicode2str(json_info) except BaseException as e: logging.error(e.message) return None
Parse multiple json records from the given file. Seek to the offset as the start point before parsing if offset set. return empty list if the json file does not exists or exception occurs. Args: json_file (str): File path to be parsed. offset (int): Initial seek position of the file. Returns: A dict of json info. New offset after parsing.
def parse_multiple_json(json_file, offset=None): """Parse multiple json records from the given file. Seek to the offset as the start point before parsing if offset set. return empty list if the json file does not exists or exception occurs. Args: json_file (str): File path to be parsed. offset (int): Initial seek position of the file. Returns: A dict of json info. New offset after parsing. """ json_info_list = [] if not os.path.exists(json_file): return json_info_list try: with open(json_file, "r") as f: if offset: f.seek(offset) for line in f: if line[-1] != "\n": # Incomplete line break json_info = json.loads(line) json_info_list.append(json_info) offset += len(line) except BaseException as e: logging.error(e.message) return json_info_list, offset
Convert the unicode element of the content to str recursively.
def unicode2str(content): """Convert the unicode element of the content to str recursively.""" if isinstance(content, dict): result = {} for key in content.keys(): result[unicode2str(key)] = unicode2str(content[key]) return result elif isinstance(content, list): return [unicode2str(element) for element in content] elif isinstance(content, int) or isinstance(content, float): return content else: return content.encode("utf-8")
Computes the loss of the network.
def loss(self, xs, ys): """Computes the loss of the network.""" return float( self.sess.run( self.cross_entropy, feed_dict={ self.x: xs, self.y_: ys }))
Computes the gradients of the network.
def grad(self, xs, ys): """Computes the gradients of the network.""" return self.sess.run( self.cross_entropy_grads, feed_dict={ self.x: xs, self.y_: ys })
Creates the queue and preprocessing operations for the dataset. Args: data_path: Filename for cifar10 data. size: The number of images in the dataset. dataset: The dataset we are using. Returns: queue: A Tensorflow queue for extracting the images and labels.
def build_data(data_path, size, dataset): """Creates the queue and preprocessing operations for the dataset. Args: data_path: Filename for cifar10 data. size: The number of images in the dataset. dataset: The dataset we are using. Returns: queue: A Tensorflow queue for extracting the images and labels. """ image_size = 32 if dataset == "cifar10": label_bytes = 1 label_offset = 0 elif dataset == "cifar100": label_bytes = 1 label_offset = 1 depth = 3 image_bytes = image_size * image_size * depth record_bytes = label_bytes + label_offset + image_bytes def load_transform(value): # Convert these examples to dense labels and processed images. record = tf.reshape(tf.decode_raw(value, tf.uint8), [record_bytes]) label = tf.cast(tf.slice(record, [label_offset], [label_bytes]), tf.int32) # Convert from string to [depth * height * width] to # [depth, height, width]. depth_major = tf.reshape( tf.slice(record, [label_bytes], [image_bytes]), [depth, image_size, image_size]) # Convert from [depth, height, width] to [height, width, depth]. image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32) return (image, label) # Read examples from files in the filename queue. data_files = tf.gfile.Glob(data_path) data = tf.contrib.data.FixedLengthRecordDataset(data_files, record_bytes=record_bytes) data = data.map(load_transform) data = data.batch(size) iterator = data.make_one_shot_iterator() return iterator.get_next()
Create or update a Ray cluster.
def create_or_update(cluster_config_file, min_workers, max_workers, no_restart, restart_only, yes, cluster_name): """Create or update a Ray cluster.""" if restart_only or no_restart: assert restart_only != no_restart, "Cannot set both 'restart_only' " \ "and 'no_restart' at the same time!" create_or_update_cluster(cluster_config_file, min_workers, max_workers, no_restart, restart_only, yes, cluster_name)
Build CIFAR image and labels. Args: data_path: Filename for cifar10 data. batch_size: Input batch size. train: True if we are training and false if we are testing. Returns: images: Batches of images of size [batch_size, image_size, image_size, 3]. labels: Batches of labels of size [batch_size, num_classes]. Raises: ValueError: When the specified dataset is not supported.
def build_input(data, batch_size, dataset, train): """Build CIFAR image and labels. Args: data_path: Filename for cifar10 data. batch_size: Input batch size. train: True if we are training and false if we are testing. Returns: images: Batches of images of size [batch_size, image_size, image_size, 3]. labels: Batches of labels of size [batch_size, num_classes]. Raises: ValueError: When the specified dataset is not supported. """ image_size = 32 depth = 3 num_classes = 10 if dataset == "cifar10" else 100 images, labels = data num_samples = images.shape[0] - images.shape[0] % batch_size dataset = tf.contrib.data.Dataset.from_tensor_slices( (images[:num_samples], labels[:num_samples])) def map_train(image, label): image = tf.image.resize_image_with_crop_or_pad(image, image_size + 4, image_size + 4) image = tf.random_crop(image, [image_size, image_size, 3]) image = tf.image.random_flip_left_right(image) image = tf.image.per_image_standardization(image) return (image, label) def map_test(image, label): image = tf.image.resize_image_with_crop_or_pad(image, image_size, image_size) image = tf.image.per_image_standardization(image) return (image, label) dataset = dataset.map(map_train if train else map_test) dataset = dataset.batch(batch_size) dataset = dataset.repeat() if train: dataset = dataset.shuffle(buffer_size=16 * batch_size) images, labels = dataset.make_one_shot_iterator().get_next() images = tf.reshape(images, [batch_size, image_size, image_size, depth]) labels = tf.reshape(labels, [batch_size, 1]) indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1]) labels = tf.sparse_to_dense( tf.concat([indices, labels], 1), [batch_size, num_classes], 1.0, 0.0) assert len(images.get_shape()) == 4 assert images.get_shape()[0] == batch_size assert images.get_shape()[-1] == 3 assert len(labels.get_shape()) == 2 assert labels.get_shape()[0] == batch_size assert labels.get_shape()[1] == num_classes if not train: tf.summary.image("images", images) return images, labels
Tear down the Ray cluster.
def teardown(cluster_config_file, yes, workers_only, cluster_name): """Tear down the Ray cluster.""" teardown_cluster(cluster_config_file, yes, workers_only, cluster_name)
Kills a random Ray node. For testing purposes only.
def kill_random_node(cluster_config_file, yes, cluster_name): """Kills a random Ray node. For testing purposes only.""" click.echo("Killed node with IP " + kill_node(cluster_config_file, yes, cluster_name))
Uploads and runs a script on the specified cluster. The script is automatically synced to the following location: os.path.join("~", os.path.basename(script))
def submit(cluster_config_file, docker, screen, tmux, stop, start, cluster_name, port_forward, script, script_args): """Uploads and runs a script on the specified cluster. The script is automatically synced to the following location: os.path.join("~", os.path.basename(script)) """ assert not (screen and tmux), "Can specify only one of `screen` or `tmux`." if start: create_or_update_cluster(cluster_config_file, None, None, False, False, True, cluster_name) target = os.path.join("~", os.path.basename(script)) rsync(cluster_config_file, script, target, cluster_name, down=False) cmd = " ".join(["python", target] + list(script_args)) exec_cluster(cluster_config_file, cmd, docker, screen, tmux, stop, False, cluster_name, port_forward)
Build a whole graph for the model.
def build_graph(self): """Build a whole graph for the model.""" self.global_step = tf.Variable(0, trainable=False) self._build_model() if self.mode == "train": self._build_train_op() else: # Additional initialization for the test network. self.variables = ray.experimental.tf_utils.TensorFlowVariables( self.cost) self.summaries = tf.summary.merge_all()
Build the core model within the graph.
def _build_model(self): """Build the core model within the graph.""" with tf.variable_scope("init"): x = self._conv("init_conv", self._images, 3, 3, 16, self._stride_arr(1)) strides = [1, 2, 2] activate_before_residual = [True, False, False] if self.hps.use_bottleneck: res_func = self._bottleneck_residual filters = [16, 64, 128, 256] else: res_func = self._residual filters = [16, 16, 32, 64] with tf.variable_scope("unit_1_0"): x = res_func(x, filters[0], filters[1], self._stride_arr( strides[0]), activate_before_residual[0]) for i in range(1, self.hps.num_residual_units): with tf.variable_scope("unit_1_%d" % i): x = res_func(x, filters[1], filters[1], self._stride_arr(1), False) with tf.variable_scope("unit_2_0"): x = res_func(x, filters[1], filters[2], self._stride_arr( strides[1]), activate_before_residual[1]) for i in range(1, self.hps.num_residual_units): with tf.variable_scope("unit_2_%d" % i): x = res_func(x, filters[2], filters[2], self._stride_arr(1), False) with tf.variable_scope("unit_3_0"): x = res_func(x, filters[2], filters[3], self._stride_arr( strides[2]), activate_before_residual[2]) for i in range(1, self.hps.num_residual_units): with tf.variable_scope("unit_3_%d" % i): x = res_func(x, filters[3], filters[3], self._stride_arr(1), False) with tf.variable_scope("unit_last"): x = self._batch_norm("final_bn", x) x = self._relu(x, self.hps.relu_leakiness) x = self._global_avg_pool(x) with tf.variable_scope("logit"): logits = self._fully_connected(x, self.hps.num_classes) self.predictions = tf.nn.softmax(logits) with tf.variable_scope("costs"): xent = tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=self.labels) self.cost = tf.reduce_mean(xent, name="xent") self.cost += self._decay() if self.mode == "eval": tf.summary.scalar("cost", self.cost)
Build training specific ops for the graph.
def _build_train_op(self): """Build training specific ops for the graph.""" num_gpus = self.hps.num_gpus if self.hps.num_gpus != 0 else 1 # The learning rate schedule is dependent on the number of gpus. boundaries = [int(20000 * i / np.sqrt(num_gpus)) for i in range(2, 5)] values = [0.1, 0.01, 0.001, 0.0001] self.lrn_rate = tf.train.piecewise_constant(self.global_step, boundaries, values) tf.summary.scalar("learning rate", self.lrn_rate) if self.hps.optimizer == "sgd": optimizer = tf.train.GradientDescentOptimizer(self.lrn_rate) elif self.hps.optimizer == "mom": optimizer = tf.train.MomentumOptimizer(self.lrn_rate, 0.9) apply_op = optimizer.minimize(self.cost, global_step=self.global_step) train_ops = [apply_op] + self._extra_train_ops self.train_op = tf.group(*train_ops) self.variables = ray.experimental.tf_utils.TensorFlowVariables( self.train_op)
Batch normalization.
def _batch_norm(self, name, x): """Batch normalization.""" with tf.variable_scope(name): params_shape = [x.get_shape()[-1]] beta = tf.get_variable( "beta", params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32)) gamma = tf.get_variable( "gamma", params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32)) if self.mode == "train": mean, variance = tf.nn.moments(x, [0, 1, 2], name="moments") moving_mean = tf.get_variable( "moving_mean", params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32), trainable=False) moving_variance = tf.get_variable( "moving_variance", params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32), trainable=False) self._extra_train_ops.append( moving_averages.assign_moving_average( moving_mean, mean, 0.9)) self._extra_train_ops.append( moving_averages.assign_moving_average( moving_variance, variance, 0.9)) else: mean = tf.get_variable( "moving_mean", params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32), trainable=False) variance = tf.get_variable( "moving_variance", params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32), trainable=False) tf.summary.histogram(mean.op.name, mean) tf.summary.histogram(variance.op.name, variance) # elipson used to be 1e-5. Maybe 0.001 solves NaN problem in deeper # net. y = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 0.001) y.set_shape(x.get_shape()) return y
L2 weight decay loss.
def _decay(self): """L2 weight decay loss.""" costs = [] for var in tf.trainable_variables(): if var.op.name.find(r"DW") > 0: costs.append(tf.nn.l2_loss(var)) return tf.multiply(self.hps.weight_decay_rate, tf.add_n(costs))
Convolution.
def _conv(self, name, x, filter_size, in_filters, out_filters, strides): """Convolution.""" with tf.variable_scope(name): n = filter_size * filter_size * out_filters kernel = tf.get_variable( "DW", [filter_size, filter_size, in_filters, out_filters], tf.float32, initializer=tf.random_normal_initializer( stddev=np.sqrt(2.0 / n))) return tf.nn.conv2d(x, kernel, strides, padding="SAME")