repo
stringlengths 7
55
| path
stringlengths 4
223
| func_name
stringlengths 1
134
| original_string
stringlengths 75
104k
| language
stringclasses 1
value | code
stringlengths 75
104k
| code_tokens
listlengths 19
28.4k
| docstring
stringlengths 1
46.9k
| docstring_tokens
listlengths 1
1.97k
| sha
stringlengths 40
40
| url
stringlengths 87
315
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
apache/incubator-mxnet
|
python/mxnet/contrib/onnx/mx2onnx/export_onnx.py
|
MXNetGraph.get_outputs
|
def get_outputs(sym, params, in_shape, in_label):
""" Infer output shapes and return dictionary of output name to shape
:param :class:`~mxnet.symbol.Symbol` sym: symbol to perform infer shape on
:param dic of (str, nd.NDArray) params:
:param list of tuple(int, ...) in_shape: list of all input shapes
:param in_label: name of label typically used in loss that may be left in graph. This name is
removed from list of inputs required by symbol
:return: dictionary of output name to shape
:rtype: dict of (str, tuple(int, ...))
"""
# remove any input listed in params from sym.list_inputs() and bind them to the input shapes provided
# by user. Also remove in_label, which is the name of the label symbol that may have been used
# as the label for loss during training.
inputs = {n: tuple(s) for n, s in zip([n for n in sym.list_inputs() if n not in params and n != in_label],
in_shape)}
# Add params and their shape to list of inputs
inputs.update({n: v.shape for n, v in params.items() if n in sym.list_inputs()})
# Provide input data as well as input params to infer_shape()
_, out_shapes, _ = sym.infer_shape(**inputs)
out_names = list()
for name in sym.list_outputs():
if name.endswith('_output'):
out_names.append(name[:-len('_output')])
else:
logging.info("output '%s' does not end with '_output'", name)
out_names.append(name)
assert len(out_shapes) == len(out_names)
# bind output shapes with output names
graph_outputs = {n: s for n, s in zip(out_names, out_shapes)}
return graph_outputs
|
python
|
def get_outputs(sym, params, in_shape, in_label):
""" Infer output shapes and return dictionary of output name to shape
:param :class:`~mxnet.symbol.Symbol` sym: symbol to perform infer shape on
:param dic of (str, nd.NDArray) params:
:param list of tuple(int, ...) in_shape: list of all input shapes
:param in_label: name of label typically used in loss that may be left in graph. This name is
removed from list of inputs required by symbol
:return: dictionary of output name to shape
:rtype: dict of (str, tuple(int, ...))
"""
# remove any input listed in params from sym.list_inputs() and bind them to the input shapes provided
# by user. Also remove in_label, which is the name of the label symbol that may have been used
# as the label for loss during training.
inputs = {n: tuple(s) for n, s in zip([n for n in sym.list_inputs() if n not in params and n != in_label],
in_shape)}
# Add params and their shape to list of inputs
inputs.update({n: v.shape for n, v in params.items() if n in sym.list_inputs()})
# Provide input data as well as input params to infer_shape()
_, out_shapes, _ = sym.infer_shape(**inputs)
out_names = list()
for name in sym.list_outputs():
if name.endswith('_output'):
out_names.append(name[:-len('_output')])
else:
logging.info("output '%s' does not end with '_output'", name)
out_names.append(name)
assert len(out_shapes) == len(out_names)
# bind output shapes with output names
graph_outputs = {n: s for n, s in zip(out_names, out_shapes)}
return graph_outputs
|
[
"def",
"get_outputs",
"(",
"sym",
",",
"params",
",",
"in_shape",
",",
"in_label",
")",
":",
"# remove any input listed in params from sym.list_inputs() and bind them to the input shapes provided",
"# by user. Also remove in_label, which is the name of the label symbol that may have been used",
"# as the label for loss during training.",
"inputs",
"=",
"{",
"n",
":",
"tuple",
"(",
"s",
")",
"for",
"n",
",",
"s",
"in",
"zip",
"(",
"[",
"n",
"for",
"n",
"in",
"sym",
".",
"list_inputs",
"(",
")",
"if",
"n",
"not",
"in",
"params",
"and",
"n",
"!=",
"in_label",
"]",
",",
"in_shape",
")",
"}",
"# Add params and their shape to list of inputs",
"inputs",
".",
"update",
"(",
"{",
"n",
":",
"v",
".",
"shape",
"for",
"n",
",",
"v",
"in",
"params",
".",
"items",
"(",
")",
"if",
"n",
"in",
"sym",
".",
"list_inputs",
"(",
")",
"}",
")",
"# Provide input data as well as input params to infer_shape()",
"_",
",",
"out_shapes",
",",
"_",
"=",
"sym",
".",
"infer_shape",
"(",
"*",
"*",
"inputs",
")",
"out_names",
"=",
"list",
"(",
")",
"for",
"name",
"in",
"sym",
".",
"list_outputs",
"(",
")",
":",
"if",
"name",
".",
"endswith",
"(",
"'_output'",
")",
":",
"out_names",
".",
"append",
"(",
"name",
"[",
":",
"-",
"len",
"(",
"'_output'",
")",
"]",
")",
"else",
":",
"logging",
".",
"info",
"(",
"\"output '%s' does not end with '_output'\"",
",",
"name",
")",
"out_names",
".",
"append",
"(",
"name",
")",
"assert",
"len",
"(",
"out_shapes",
")",
"==",
"len",
"(",
"out_names",
")",
"# bind output shapes with output names",
"graph_outputs",
"=",
"{",
"n",
":",
"s",
"for",
"n",
",",
"s",
"in",
"zip",
"(",
"out_names",
",",
"out_shapes",
")",
"}",
"return",
"graph_outputs"
] |
Infer output shapes and return dictionary of output name to shape
:param :class:`~mxnet.symbol.Symbol` sym: symbol to perform infer shape on
:param dic of (str, nd.NDArray) params:
:param list of tuple(int, ...) in_shape: list of all input shapes
:param in_label: name of label typically used in loss that may be left in graph. This name is
removed from list of inputs required by symbol
:return: dictionary of output name to shape
:rtype: dict of (str, tuple(int, ...))
|
[
"Infer",
"output",
"shapes",
"and",
"return",
"dictionary",
"of",
"output",
"name",
"to",
"shape"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/export_onnx.py#L123-L156
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/onnx/mx2onnx/export_onnx.py
|
MXNetGraph.convert_weights_to_numpy
|
def convert_weights_to_numpy(weights_dict):
"""Convert weights to numpy"""
return dict([(k.replace("arg:", "").replace("aux:", ""), v.asnumpy())
for k, v in weights_dict.items()])
|
python
|
def convert_weights_to_numpy(weights_dict):
"""Convert weights to numpy"""
return dict([(k.replace("arg:", "").replace("aux:", ""), v.asnumpy())
for k, v in weights_dict.items()])
|
[
"def",
"convert_weights_to_numpy",
"(",
"weights_dict",
")",
":",
"return",
"dict",
"(",
"[",
"(",
"k",
".",
"replace",
"(",
"\"arg:\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"aux:\"",
",",
"\"\"",
")",
",",
"v",
".",
"asnumpy",
"(",
")",
")",
"for",
"k",
",",
"v",
"in",
"weights_dict",
".",
"items",
"(",
")",
"]",
")"
] |
Convert weights to numpy
|
[
"Convert",
"weights",
"to",
"numpy"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/export_onnx.py#L159-L162
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/onnx/mx2onnx/export_onnx.py
|
MXNetGraph.create_onnx_graph_proto
|
def create_onnx_graph_proto(self, sym, params, in_shape, in_type, verbose=False):
"""Convert MXNet graph to ONNX graph
Parameters
----------
sym : :class:`~mxnet.symbol.Symbol`
MXNet symbol object
params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
in_shape : List of tuple
Input shape of the model e.g [(1,3,224,224)]
in_type : data type
Input data type e.g. np.float32
verbose : Boolean
If true will print logs of the model conversion
Returns
-------
graph : GraphProto
ONNX graph
"""
try:
from onnx import (checker, helper, NodeProto, ValueInfoProto, TensorProto)
from onnx.helper import make_tensor_value_info
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
# When MXNet model is saved to json file , MXNet adds a node for label.
# The name of this node is, name of the last node + "_label" ( i.e if last node
# name is "Softmax", this node will have a name "Softmax_label". Also, the new node
# will always be second last node in the json graph.
# Deriving the output_label name.
output_label = sym.get_internals()[len(sym.get_internals()) - 1].name + "_label"
weights = MXNetGraph.convert_weights_to_numpy(params)
mx_graph = json.loads(sym.tojson())["nodes"]
initializer = []
all_processed_nodes = []
onnx_processed_nodes = []
onnx_processed_inputs = []
onnx_processed_outputs = []
index_lookup = []
# Determine output shape
graph_outputs = MXNetGraph.get_outputs(sym, params, in_shape, output_label)
graph_input_idx = 0
for idx, node in enumerate(mx_graph):
op = node["op"]
name = node["name"]
if verbose:
logging.info("Converting idx: %d, op: %s, name: %s", idx, op, name)
# A node is an input node if its op_name is "null" and is not
# in params dict
if op == "null" and name not in params:
# Handling graph input
# Skipping output_label node, as this node is not part of graph
# Refer "output_label" assignment above for more details.
if name == output_label:
continue
converted = MXNetGraph.convert_layer(
node,
is_input=True,
mx_graph=mx_graph,
weights=weights,
in_shape=in_shape[graph_input_idx],
in_type=in_type,
proc_nodes=all_processed_nodes,
initializer=initializer,
index_lookup=index_lookup)
graph_input_idx += 1
else:
# Handling graph layers
converted = MXNetGraph.convert_layer(
node,
is_input=False,
mx_graph=mx_graph,
weights=weights,
in_shape=in_shape,
in_type=in_type,
proc_nodes=all_processed_nodes,
initializer=initializer,
index_lookup=index_lookup,
idx=idx
)
if isinstance(converted, list):
# Iterate for all converted nodes
for converted_node in converted:
# If converted node is ValueInfoProto, add it in inputs
if isinstance(converted_node, ValueInfoProto):
onnx_processed_inputs.append(converted_node)
# If converted node is NodeProto, add it in processed nodes list
elif isinstance(converted_node, NodeProto):
onnx_processed_nodes.append(converted_node)
# some operators have multiple outputs,
# therefore, check all output node names
node_names = list(converted_node.output)
for nodename in node_names:
if nodename in graph_outputs:
onnx_processed_outputs.append(
make_tensor_value_info(
name=nodename,
elem_type=in_type,
shape=graph_outputs[nodename]
)
)
if verbose:
logging.info("Output node is: %s", nodename)
elif isinstance(converted_node, TensorProto):
raise ValueError("Did not expect TensorProto")
else:
raise ValueError("node is of an unrecognized type: %s" % type(node))
all_processed_nodes.append(converted_node)
if idx > 0:
# Handling extra node added to the graph if the MXNet model was
# saved to json file,
# refer "output_label" initialization above for more details.
# if extra node was added then prev_index to the last node is adjusted.
if idx == (len(mx_graph) - 1) and \
mx_graph[len(mx_graph)-2]["name"] == output_label:
prev_index = index_lookup[idx - 2]
else:
prev_index = index_lookup[idx - 1]
index_lookup.append(prev_index+len(converted))
else:
index_lookup.append(len(converted) - 1)
else:
logging.info("Operator converter function should always return a list")
graph = helper.make_graph(
onnx_processed_nodes,
"mxnet_converted_model",
onnx_processed_inputs,
onnx_processed_outputs
)
graph.initializer.extend(initializer)
checker.check_graph(graph)
return graph
|
python
|
def create_onnx_graph_proto(self, sym, params, in_shape, in_type, verbose=False):
"""Convert MXNet graph to ONNX graph
Parameters
----------
sym : :class:`~mxnet.symbol.Symbol`
MXNet symbol object
params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
in_shape : List of tuple
Input shape of the model e.g [(1,3,224,224)]
in_type : data type
Input data type e.g. np.float32
verbose : Boolean
If true will print logs of the model conversion
Returns
-------
graph : GraphProto
ONNX graph
"""
try:
from onnx import (checker, helper, NodeProto, ValueInfoProto, TensorProto)
from onnx.helper import make_tensor_value_info
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
# When MXNet model is saved to json file , MXNet adds a node for label.
# The name of this node is, name of the last node + "_label" ( i.e if last node
# name is "Softmax", this node will have a name "Softmax_label". Also, the new node
# will always be second last node in the json graph.
# Deriving the output_label name.
output_label = sym.get_internals()[len(sym.get_internals()) - 1].name + "_label"
weights = MXNetGraph.convert_weights_to_numpy(params)
mx_graph = json.loads(sym.tojson())["nodes"]
initializer = []
all_processed_nodes = []
onnx_processed_nodes = []
onnx_processed_inputs = []
onnx_processed_outputs = []
index_lookup = []
# Determine output shape
graph_outputs = MXNetGraph.get_outputs(sym, params, in_shape, output_label)
graph_input_idx = 0
for idx, node in enumerate(mx_graph):
op = node["op"]
name = node["name"]
if verbose:
logging.info("Converting idx: %d, op: %s, name: %s", idx, op, name)
# A node is an input node if its op_name is "null" and is not
# in params dict
if op == "null" and name not in params:
# Handling graph input
# Skipping output_label node, as this node is not part of graph
# Refer "output_label" assignment above for more details.
if name == output_label:
continue
converted = MXNetGraph.convert_layer(
node,
is_input=True,
mx_graph=mx_graph,
weights=weights,
in_shape=in_shape[graph_input_idx],
in_type=in_type,
proc_nodes=all_processed_nodes,
initializer=initializer,
index_lookup=index_lookup)
graph_input_idx += 1
else:
# Handling graph layers
converted = MXNetGraph.convert_layer(
node,
is_input=False,
mx_graph=mx_graph,
weights=weights,
in_shape=in_shape,
in_type=in_type,
proc_nodes=all_processed_nodes,
initializer=initializer,
index_lookup=index_lookup,
idx=idx
)
if isinstance(converted, list):
# Iterate for all converted nodes
for converted_node in converted:
# If converted node is ValueInfoProto, add it in inputs
if isinstance(converted_node, ValueInfoProto):
onnx_processed_inputs.append(converted_node)
# If converted node is NodeProto, add it in processed nodes list
elif isinstance(converted_node, NodeProto):
onnx_processed_nodes.append(converted_node)
# some operators have multiple outputs,
# therefore, check all output node names
node_names = list(converted_node.output)
for nodename in node_names:
if nodename in graph_outputs:
onnx_processed_outputs.append(
make_tensor_value_info(
name=nodename,
elem_type=in_type,
shape=graph_outputs[nodename]
)
)
if verbose:
logging.info("Output node is: %s", nodename)
elif isinstance(converted_node, TensorProto):
raise ValueError("Did not expect TensorProto")
else:
raise ValueError("node is of an unrecognized type: %s" % type(node))
all_processed_nodes.append(converted_node)
if idx > 0:
# Handling extra node added to the graph if the MXNet model was
# saved to json file,
# refer "output_label" initialization above for more details.
# if extra node was added then prev_index to the last node is adjusted.
if idx == (len(mx_graph) - 1) and \
mx_graph[len(mx_graph)-2]["name"] == output_label:
prev_index = index_lookup[idx - 2]
else:
prev_index = index_lookup[idx - 1]
index_lookup.append(prev_index+len(converted))
else:
index_lookup.append(len(converted) - 1)
else:
logging.info("Operator converter function should always return a list")
graph = helper.make_graph(
onnx_processed_nodes,
"mxnet_converted_model",
onnx_processed_inputs,
onnx_processed_outputs
)
graph.initializer.extend(initializer)
checker.check_graph(graph)
return graph
|
[
"def",
"create_onnx_graph_proto",
"(",
"self",
",",
"sym",
",",
"params",
",",
"in_shape",
",",
"in_type",
",",
"verbose",
"=",
"False",
")",
":",
"try",
":",
"from",
"onnx",
"import",
"(",
"checker",
",",
"helper",
",",
"NodeProto",
",",
"ValueInfoProto",
",",
"TensorProto",
")",
"from",
"onnx",
".",
"helper",
"import",
"make_tensor_value_info",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"Onnx and protobuf need to be installed. \"",
"+",
"\"Instructions to install - https://github.com/onnx/onnx\"",
")",
"# When MXNet model is saved to json file , MXNet adds a node for label.",
"# The name of this node is, name of the last node + \"_label\" ( i.e if last node",
"# name is \"Softmax\", this node will have a name \"Softmax_label\". Also, the new node",
"# will always be second last node in the json graph.",
"# Deriving the output_label name.",
"output_label",
"=",
"sym",
".",
"get_internals",
"(",
")",
"[",
"len",
"(",
"sym",
".",
"get_internals",
"(",
")",
")",
"-",
"1",
"]",
".",
"name",
"+",
"\"_label\"",
"weights",
"=",
"MXNetGraph",
".",
"convert_weights_to_numpy",
"(",
"params",
")",
"mx_graph",
"=",
"json",
".",
"loads",
"(",
"sym",
".",
"tojson",
"(",
")",
")",
"[",
"\"nodes\"",
"]",
"initializer",
"=",
"[",
"]",
"all_processed_nodes",
"=",
"[",
"]",
"onnx_processed_nodes",
"=",
"[",
"]",
"onnx_processed_inputs",
"=",
"[",
"]",
"onnx_processed_outputs",
"=",
"[",
"]",
"index_lookup",
"=",
"[",
"]",
"# Determine output shape",
"graph_outputs",
"=",
"MXNetGraph",
".",
"get_outputs",
"(",
"sym",
",",
"params",
",",
"in_shape",
",",
"output_label",
")",
"graph_input_idx",
"=",
"0",
"for",
"idx",
",",
"node",
"in",
"enumerate",
"(",
"mx_graph",
")",
":",
"op",
"=",
"node",
"[",
"\"op\"",
"]",
"name",
"=",
"node",
"[",
"\"name\"",
"]",
"if",
"verbose",
":",
"logging",
".",
"info",
"(",
"\"Converting idx: %d, op: %s, name: %s\"",
",",
"idx",
",",
"op",
",",
"name",
")",
"# A node is an input node if its op_name is \"null\" and is not",
"# in params dict",
"if",
"op",
"==",
"\"null\"",
"and",
"name",
"not",
"in",
"params",
":",
"# Handling graph input",
"# Skipping output_label node, as this node is not part of graph",
"# Refer \"output_label\" assignment above for more details.",
"if",
"name",
"==",
"output_label",
":",
"continue",
"converted",
"=",
"MXNetGraph",
".",
"convert_layer",
"(",
"node",
",",
"is_input",
"=",
"True",
",",
"mx_graph",
"=",
"mx_graph",
",",
"weights",
"=",
"weights",
",",
"in_shape",
"=",
"in_shape",
"[",
"graph_input_idx",
"]",
",",
"in_type",
"=",
"in_type",
",",
"proc_nodes",
"=",
"all_processed_nodes",
",",
"initializer",
"=",
"initializer",
",",
"index_lookup",
"=",
"index_lookup",
")",
"graph_input_idx",
"+=",
"1",
"else",
":",
"# Handling graph layers",
"converted",
"=",
"MXNetGraph",
".",
"convert_layer",
"(",
"node",
",",
"is_input",
"=",
"False",
",",
"mx_graph",
"=",
"mx_graph",
",",
"weights",
"=",
"weights",
",",
"in_shape",
"=",
"in_shape",
",",
"in_type",
"=",
"in_type",
",",
"proc_nodes",
"=",
"all_processed_nodes",
",",
"initializer",
"=",
"initializer",
",",
"index_lookup",
"=",
"index_lookup",
",",
"idx",
"=",
"idx",
")",
"if",
"isinstance",
"(",
"converted",
",",
"list",
")",
":",
"# Iterate for all converted nodes",
"for",
"converted_node",
"in",
"converted",
":",
"# If converted node is ValueInfoProto, add it in inputs",
"if",
"isinstance",
"(",
"converted_node",
",",
"ValueInfoProto",
")",
":",
"onnx_processed_inputs",
".",
"append",
"(",
"converted_node",
")",
"# If converted node is NodeProto, add it in processed nodes list",
"elif",
"isinstance",
"(",
"converted_node",
",",
"NodeProto",
")",
":",
"onnx_processed_nodes",
".",
"append",
"(",
"converted_node",
")",
"# some operators have multiple outputs,",
"# therefore, check all output node names",
"node_names",
"=",
"list",
"(",
"converted_node",
".",
"output",
")",
"for",
"nodename",
"in",
"node_names",
":",
"if",
"nodename",
"in",
"graph_outputs",
":",
"onnx_processed_outputs",
".",
"append",
"(",
"make_tensor_value_info",
"(",
"name",
"=",
"nodename",
",",
"elem_type",
"=",
"in_type",
",",
"shape",
"=",
"graph_outputs",
"[",
"nodename",
"]",
")",
")",
"if",
"verbose",
":",
"logging",
".",
"info",
"(",
"\"Output node is: %s\"",
",",
"nodename",
")",
"elif",
"isinstance",
"(",
"converted_node",
",",
"TensorProto",
")",
":",
"raise",
"ValueError",
"(",
"\"Did not expect TensorProto\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"node is of an unrecognized type: %s\"",
"%",
"type",
"(",
"node",
")",
")",
"all_processed_nodes",
".",
"append",
"(",
"converted_node",
")",
"if",
"idx",
">",
"0",
":",
"# Handling extra node added to the graph if the MXNet model was",
"# saved to json file,",
"# refer \"output_label\" initialization above for more details.",
"# if extra node was added then prev_index to the last node is adjusted.",
"if",
"idx",
"==",
"(",
"len",
"(",
"mx_graph",
")",
"-",
"1",
")",
"and",
"mx_graph",
"[",
"len",
"(",
"mx_graph",
")",
"-",
"2",
"]",
"[",
"\"name\"",
"]",
"==",
"output_label",
":",
"prev_index",
"=",
"index_lookup",
"[",
"idx",
"-",
"2",
"]",
"else",
":",
"prev_index",
"=",
"index_lookup",
"[",
"idx",
"-",
"1",
"]",
"index_lookup",
".",
"append",
"(",
"prev_index",
"+",
"len",
"(",
"converted",
")",
")",
"else",
":",
"index_lookup",
".",
"append",
"(",
"len",
"(",
"converted",
")",
"-",
"1",
")",
"else",
":",
"logging",
".",
"info",
"(",
"\"Operator converter function should always return a list\"",
")",
"graph",
"=",
"helper",
".",
"make_graph",
"(",
"onnx_processed_nodes",
",",
"\"mxnet_converted_model\"",
",",
"onnx_processed_inputs",
",",
"onnx_processed_outputs",
")",
"graph",
".",
"initializer",
".",
"extend",
"(",
"initializer",
")",
"checker",
".",
"check_graph",
"(",
"graph",
")",
"return",
"graph"
] |
Convert MXNet graph to ONNX graph
Parameters
----------
sym : :class:`~mxnet.symbol.Symbol`
MXNet symbol object
params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
in_shape : List of tuple
Input shape of the model e.g [(1,3,224,224)]
in_type : data type
Input data type e.g. np.float32
verbose : Boolean
If true will print logs of the model conversion
Returns
-------
graph : GraphProto
ONNX graph
|
[
"Convert",
"MXNet",
"graph",
"to",
"ONNX",
"graph"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/export_onnx.py#L164-L313
|
train
|
apache/incubator-mxnet
|
example/ssd/train/train_net.py
|
get_lr_scheduler
|
def get_lr_scheduler(learning_rate, lr_refactor_step, lr_refactor_ratio,
num_example, batch_size, begin_epoch):
"""
Compute learning rate and refactor scheduler
Parameters:
---------
learning_rate : float
original learning rate
lr_refactor_step : comma separated str
epochs to change learning rate
lr_refactor_ratio : float
lr *= ratio at certain steps
num_example : int
number of training images, used to estimate the iterations given epochs
batch_size : int
training batch size
begin_epoch : int
starting epoch
Returns:
---------
(learning_rate, mx.lr_scheduler) as tuple
"""
assert lr_refactor_ratio > 0
iter_refactor = [int(r) for r in lr_refactor_step.split(',') if r.strip()]
if lr_refactor_ratio >= 1:
return (learning_rate, None)
else:
lr = learning_rate
epoch_size = num_example // batch_size
for s in iter_refactor:
if begin_epoch >= s:
lr *= lr_refactor_ratio
if lr != learning_rate:
logging.getLogger().info("Adjusted learning rate to {} for epoch {}".format(lr, begin_epoch))
steps = [epoch_size * (x - begin_epoch) for x in iter_refactor if x > begin_epoch]
if not steps:
return (lr, None)
lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=lr_refactor_ratio)
return (lr, lr_scheduler)
|
python
|
def get_lr_scheduler(learning_rate, lr_refactor_step, lr_refactor_ratio,
num_example, batch_size, begin_epoch):
"""
Compute learning rate and refactor scheduler
Parameters:
---------
learning_rate : float
original learning rate
lr_refactor_step : comma separated str
epochs to change learning rate
lr_refactor_ratio : float
lr *= ratio at certain steps
num_example : int
number of training images, used to estimate the iterations given epochs
batch_size : int
training batch size
begin_epoch : int
starting epoch
Returns:
---------
(learning_rate, mx.lr_scheduler) as tuple
"""
assert lr_refactor_ratio > 0
iter_refactor = [int(r) for r in lr_refactor_step.split(',') if r.strip()]
if lr_refactor_ratio >= 1:
return (learning_rate, None)
else:
lr = learning_rate
epoch_size = num_example // batch_size
for s in iter_refactor:
if begin_epoch >= s:
lr *= lr_refactor_ratio
if lr != learning_rate:
logging.getLogger().info("Adjusted learning rate to {} for epoch {}".format(lr, begin_epoch))
steps = [epoch_size * (x - begin_epoch) for x in iter_refactor if x > begin_epoch]
if not steps:
return (lr, None)
lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=lr_refactor_ratio)
return (lr, lr_scheduler)
|
[
"def",
"get_lr_scheduler",
"(",
"learning_rate",
",",
"lr_refactor_step",
",",
"lr_refactor_ratio",
",",
"num_example",
",",
"batch_size",
",",
"begin_epoch",
")",
":",
"assert",
"lr_refactor_ratio",
">",
"0",
"iter_refactor",
"=",
"[",
"int",
"(",
"r",
")",
"for",
"r",
"in",
"lr_refactor_step",
".",
"split",
"(",
"','",
")",
"if",
"r",
".",
"strip",
"(",
")",
"]",
"if",
"lr_refactor_ratio",
">=",
"1",
":",
"return",
"(",
"learning_rate",
",",
"None",
")",
"else",
":",
"lr",
"=",
"learning_rate",
"epoch_size",
"=",
"num_example",
"//",
"batch_size",
"for",
"s",
"in",
"iter_refactor",
":",
"if",
"begin_epoch",
">=",
"s",
":",
"lr",
"*=",
"lr_refactor_ratio",
"if",
"lr",
"!=",
"learning_rate",
":",
"logging",
".",
"getLogger",
"(",
")",
".",
"info",
"(",
"\"Adjusted learning rate to {} for epoch {}\"",
".",
"format",
"(",
"lr",
",",
"begin_epoch",
")",
")",
"steps",
"=",
"[",
"epoch_size",
"*",
"(",
"x",
"-",
"begin_epoch",
")",
"for",
"x",
"in",
"iter_refactor",
"if",
"x",
">",
"begin_epoch",
"]",
"if",
"not",
"steps",
":",
"return",
"(",
"lr",
",",
"None",
")",
"lr_scheduler",
"=",
"mx",
".",
"lr_scheduler",
".",
"MultiFactorScheduler",
"(",
"step",
"=",
"steps",
",",
"factor",
"=",
"lr_refactor_ratio",
")",
"return",
"(",
"lr",
",",
"lr_scheduler",
")"
] |
Compute learning rate and refactor scheduler
Parameters:
---------
learning_rate : float
original learning rate
lr_refactor_step : comma separated str
epochs to change learning rate
lr_refactor_ratio : float
lr *= ratio at certain steps
num_example : int
number of training images, used to estimate the iterations given epochs
batch_size : int
training batch size
begin_epoch : int
starting epoch
Returns:
---------
(learning_rate, mx.lr_scheduler) as tuple
|
[
"Compute",
"learning",
"rate",
"and",
"refactor",
"scheduler"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/train/train_net.py#L48-L88
|
train
|
apache/incubator-mxnet
|
example/ssd/train/train_net.py
|
train_net
|
def train_net(net, train_path, num_classes, batch_size,
data_shape, mean_pixels, resume, finetune, pretrained, epoch,
prefix, ctx, begin_epoch, end_epoch, frequent, learning_rate,
momentum, weight_decay, lr_refactor_step, lr_refactor_ratio,
freeze_layer_pattern='',
num_example=10000, label_pad_width=350,
nms_thresh=0.45, force_nms=False, ovp_thresh=0.5,
use_difficult=False, class_names=None,
voc07_metric=False, nms_topk=400, force_suppress=False,
train_list="", val_path="", val_list="", iter_monitor=0,
monitor_pattern=".*", log_file=None, kv_store=None):
"""
Wrapper for training phase.
Parameters:
----------
net : str
symbol name for the network structure
train_path : str
record file path for training
num_classes : int
number of object classes, not including background
batch_size : int
training batch-size
data_shape : int or tuple
width/height as integer or (3, height, width) tuple
mean_pixels : tuple of floats
mean pixel values for red, green and blue
resume : int
resume from previous checkpoint if > 0
finetune : int
fine-tune from previous checkpoint if > 0
pretrained : str
prefix of pretrained model, including path
epoch : int
load epoch of either resume/finetune/pretrained model
prefix : str
prefix for saving checkpoints
ctx : [mx.cpu()] or [mx.gpu(x)]
list of mxnet contexts
begin_epoch : int
starting epoch for training, should be 0 if not otherwise specified
end_epoch : int
end epoch of training
frequent : int
frequency to print out training status
learning_rate : float
training learning rate
momentum : float
trainig momentum
weight_decay : float
training weight decay param
lr_refactor_ratio : float
multiplier for reducing learning rate
lr_refactor_step : comma separated integers
at which epoch to rescale learning rate, e.g. '30, 60, 90'
freeze_layer_pattern : str
regex pattern for layers need to be fixed
num_example : int
number of training images
label_pad_width : int
force padding training and validation labels to sync their label widths
nms_thresh : float
non-maximum suppression threshold for validation
force_nms : boolean
suppress overlaped objects from different classes
train_list : str
list file path for training, this will replace the embeded labels in record
val_path : str
record file path for validation
val_list : str
list file path for validation, this will replace the embeded labels in record
iter_monitor : int
monitor internal stats in networks if > 0, specified by monitor_pattern
monitor_pattern : str
regex pattern for monitoring network stats
log_file : str
log to file if enabled
"""
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if log_file:
fh = logging.FileHandler(log_file)
logger.addHandler(fh)
# check args
if isinstance(data_shape, int):
data_shape = (3, data_shape, data_shape)
assert len(data_shape) == 3 and data_shape[0] == 3
prefix += '_' + net + '_' + str(data_shape[1])
if isinstance(mean_pixels, (int, float)):
mean_pixels = [mean_pixels, mean_pixels, mean_pixels]
assert len(mean_pixels) == 3, "must provide all RGB mean values"
train_iter = DetRecordIter(train_path, batch_size, data_shape, mean_pixels=mean_pixels,
label_pad_width=label_pad_width, path_imglist=train_list, **cfg.train)
if val_path:
val_iter = DetRecordIter(val_path, batch_size, data_shape, mean_pixels=mean_pixels,
label_pad_width=label_pad_width, path_imglist=val_list, **cfg.valid)
else:
val_iter = None
# load symbol
net = get_symbol_train(net, data_shape[1], num_classes=num_classes,
nms_thresh=nms_thresh, force_suppress=force_suppress, nms_topk=nms_topk)
# define layers with fixed weight/bias
if freeze_layer_pattern.strip():
re_prog = re.compile(freeze_layer_pattern)
fixed_param_names = [name for name in net.list_arguments() if re_prog.match(name)]
else:
fixed_param_names = None
# load pretrained or resume from previous state
ctx_str = '('+ ','.join([str(c) for c in ctx]) + ')'
if resume > 0:
logger.info("Resume training with {} from epoch {}"
.format(ctx_str, resume))
_, args, auxs = mx.model.load_checkpoint(prefix, resume)
begin_epoch = resume
elif finetune > 0:
logger.info("Start finetuning with {} from epoch {}"
.format(ctx_str, finetune))
_, args, auxs = mx.model.load_checkpoint(prefix, finetune)
begin_epoch = finetune
# the prediction convolution layers name starts with relu, so it's fine
fixed_param_names = [name for name in net.list_arguments() \
if name.startswith('conv')]
elif pretrained:
logger.info("Start training with {} from pretrained model {}"
.format(ctx_str, pretrained))
_, args, auxs = mx.model.load_checkpoint(pretrained, epoch)
args = convert_pretrained(pretrained, args)
else:
logger.info("Experimental: start training from scratch with {}"
.format(ctx_str))
args = None
auxs = None
fixed_param_names = None
# helper information
if fixed_param_names:
logger.info("Freezed parameters: [" + ','.join(fixed_param_names) + ']')
# init training module
mod = mx.mod.Module(net, label_names=('label',), logger=logger, context=ctx,
fixed_param_names=fixed_param_names)
# fit parameters
batch_end_callback = mx.callback.Speedometer(train_iter.batch_size, frequent=frequent)
epoch_end_callback = mx.callback.do_checkpoint(prefix)
learning_rate, lr_scheduler = get_lr_scheduler(learning_rate, lr_refactor_step,
lr_refactor_ratio, num_example, batch_size, begin_epoch)
optimizer_params={'learning_rate':learning_rate,
'momentum':momentum,
'wd':weight_decay,
'lr_scheduler':lr_scheduler,
'clip_gradient':None,
'rescale_grad': 1.0 / len(ctx) if len(ctx) > 0 else 1.0 }
monitor = mx.mon.Monitor(iter_monitor, pattern=monitor_pattern) if iter_monitor > 0 else None
# run fit net, every n epochs we run evaluation network to get mAP
if voc07_metric:
valid_metric = VOC07MApMetric(ovp_thresh, use_difficult, class_names, pred_idx=3)
else:
valid_metric = MApMetric(ovp_thresh, use_difficult, class_names, pred_idx=3)
# create kvstore when there are gpus
kv = mx.kvstore.create(kv_store) if kv_store else None
mod.fit(train_iter,
val_iter,
eval_metric=MultiBoxMetric(),
validation_metric=valid_metric,
batch_end_callback=batch_end_callback,
epoch_end_callback=epoch_end_callback,
optimizer='sgd',
optimizer_params=optimizer_params,
begin_epoch=begin_epoch,
num_epoch=end_epoch,
initializer=mx.init.Xavier(),
arg_params=args,
aux_params=auxs,
allow_missing=True,
monitor=monitor,
kvstore=kv)
|
python
|
def train_net(net, train_path, num_classes, batch_size,
data_shape, mean_pixels, resume, finetune, pretrained, epoch,
prefix, ctx, begin_epoch, end_epoch, frequent, learning_rate,
momentum, weight_decay, lr_refactor_step, lr_refactor_ratio,
freeze_layer_pattern='',
num_example=10000, label_pad_width=350,
nms_thresh=0.45, force_nms=False, ovp_thresh=0.5,
use_difficult=False, class_names=None,
voc07_metric=False, nms_topk=400, force_suppress=False,
train_list="", val_path="", val_list="", iter_monitor=0,
monitor_pattern=".*", log_file=None, kv_store=None):
"""
Wrapper for training phase.
Parameters:
----------
net : str
symbol name for the network structure
train_path : str
record file path for training
num_classes : int
number of object classes, not including background
batch_size : int
training batch-size
data_shape : int or tuple
width/height as integer or (3, height, width) tuple
mean_pixels : tuple of floats
mean pixel values for red, green and blue
resume : int
resume from previous checkpoint if > 0
finetune : int
fine-tune from previous checkpoint if > 0
pretrained : str
prefix of pretrained model, including path
epoch : int
load epoch of either resume/finetune/pretrained model
prefix : str
prefix for saving checkpoints
ctx : [mx.cpu()] or [mx.gpu(x)]
list of mxnet contexts
begin_epoch : int
starting epoch for training, should be 0 if not otherwise specified
end_epoch : int
end epoch of training
frequent : int
frequency to print out training status
learning_rate : float
training learning rate
momentum : float
trainig momentum
weight_decay : float
training weight decay param
lr_refactor_ratio : float
multiplier for reducing learning rate
lr_refactor_step : comma separated integers
at which epoch to rescale learning rate, e.g. '30, 60, 90'
freeze_layer_pattern : str
regex pattern for layers need to be fixed
num_example : int
number of training images
label_pad_width : int
force padding training and validation labels to sync their label widths
nms_thresh : float
non-maximum suppression threshold for validation
force_nms : boolean
suppress overlaped objects from different classes
train_list : str
list file path for training, this will replace the embeded labels in record
val_path : str
record file path for validation
val_list : str
list file path for validation, this will replace the embeded labels in record
iter_monitor : int
monitor internal stats in networks if > 0, specified by monitor_pattern
monitor_pattern : str
regex pattern for monitoring network stats
log_file : str
log to file if enabled
"""
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if log_file:
fh = logging.FileHandler(log_file)
logger.addHandler(fh)
# check args
if isinstance(data_shape, int):
data_shape = (3, data_shape, data_shape)
assert len(data_shape) == 3 and data_shape[0] == 3
prefix += '_' + net + '_' + str(data_shape[1])
if isinstance(mean_pixels, (int, float)):
mean_pixels = [mean_pixels, mean_pixels, mean_pixels]
assert len(mean_pixels) == 3, "must provide all RGB mean values"
train_iter = DetRecordIter(train_path, batch_size, data_shape, mean_pixels=mean_pixels,
label_pad_width=label_pad_width, path_imglist=train_list, **cfg.train)
if val_path:
val_iter = DetRecordIter(val_path, batch_size, data_shape, mean_pixels=mean_pixels,
label_pad_width=label_pad_width, path_imglist=val_list, **cfg.valid)
else:
val_iter = None
# load symbol
net = get_symbol_train(net, data_shape[1], num_classes=num_classes,
nms_thresh=nms_thresh, force_suppress=force_suppress, nms_topk=nms_topk)
# define layers with fixed weight/bias
if freeze_layer_pattern.strip():
re_prog = re.compile(freeze_layer_pattern)
fixed_param_names = [name for name in net.list_arguments() if re_prog.match(name)]
else:
fixed_param_names = None
# load pretrained or resume from previous state
ctx_str = '('+ ','.join([str(c) for c in ctx]) + ')'
if resume > 0:
logger.info("Resume training with {} from epoch {}"
.format(ctx_str, resume))
_, args, auxs = mx.model.load_checkpoint(prefix, resume)
begin_epoch = resume
elif finetune > 0:
logger.info("Start finetuning with {} from epoch {}"
.format(ctx_str, finetune))
_, args, auxs = mx.model.load_checkpoint(prefix, finetune)
begin_epoch = finetune
# the prediction convolution layers name starts with relu, so it's fine
fixed_param_names = [name for name in net.list_arguments() \
if name.startswith('conv')]
elif pretrained:
logger.info("Start training with {} from pretrained model {}"
.format(ctx_str, pretrained))
_, args, auxs = mx.model.load_checkpoint(pretrained, epoch)
args = convert_pretrained(pretrained, args)
else:
logger.info("Experimental: start training from scratch with {}"
.format(ctx_str))
args = None
auxs = None
fixed_param_names = None
# helper information
if fixed_param_names:
logger.info("Freezed parameters: [" + ','.join(fixed_param_names) + ']')
# init training module
mod = mx.mod.Module(net, label_names=('label',), logger=logger, context=ctx,
fixed_param_names=fixed_param_names)
# fit parameters
batch_end_callback = mx.callback.Speedometer(train_iter.batch_size, frequent=frequent)
epoch_end_callback = mx.callback.do_checkpoint(prefix)
learning_rate, lr_scheduler = get_lr_scheduler(learning_rate, lr_refactor_step,
lr_refactor_ratio, num_example, batch_size, begin_epoch)
optimizer_params={'learning_rate':learning_rate,
'momentum':momentum,
'wd':weight_decay,
'lr_scheduler':lr_scheduler,
'clip_gradient':None,
'rescale_grad': 1.0 / len(ctx) if len(ctx) > 0 else 1.0 }
monitor = mx.mon.Monitor(iter_monitor, pattern=monitor_pattern) if iter_monitor > 0 else None
# run fit net, every n epochs we run evaluation network to get mAP
if voc07_metric:
valid_metric = VOC07MApMetric(ovp_thresh, use_difficult, class_names, pred_idx=3)
else:
valid_metric = MApMetric(ovp_thresh, use_difficult, class_names, pred_idx=3)
# create kvstore when there are gpus
kv = mx.kvstore.create(kv_store) if kv_store else None
mod.fit(train_iter,
val_iter,
eval_metric=MultiBoxMetric(),
validation_metric=valid_metric,
batch_end_callback=batch_end_callback,
epoch_end_callback=epoch_end_callback,
optimizer='sgd',
optimizer_params=optimizer_params,
begin_epoch=begin_epoch,
num_epoch=end_epoch,
initializer=mx.init.Xavier(),
arg_params=args,
aux_params=auxs,
allow_missing=True,
monitor=monitor,
kvstore=kv)
|
[
"def",
"train_net",
"(",
"net",
",",
"train_path",
",",
"num_classes",
",",
"batch_size",
",",
"data_shape",
",",
"mean_pixels",
",",
"resume",
",",
"finetune",
",",
"pretrained",
",",
"epoch",
",",
"prefix",
",",
"ctx",
",",
"begin_epoch",
",",
"end_epoch",
",",
"frequent",
",",
"learning_rate",
",",
"momentum",
",",
"weight_decay",
",",
"lr_refactor_step",
",",
"lr_refactor_ratio",
",",
"freeze_layer_pattern",
"=",
"''",
",",
"num_example",
"=",
"10000",
",",
"label_pad_width",
"=",
"350",
",",
"nms_thresh",
"=",
"0.45",
",",
"force_nms",
"=",
"False",
",",
"ovp_thresh",
"=",
"0.5",
",",
"use_difficult",
"=",
"False",
",",
"class_names",
"=",
"None",
",",
"voc07_metric",
"=",
"False",
",",
"nms_topk",
"=",
"400",
",",
"force_suppress",
"=",
"False",
",",
"train_list",
"=",
"\"\"",
",",
"val_path",
"=",
"\"\"",
",",
"val_list",
"=",
"\"\"",
",",
"iter_monitor",
"=",
"0",
",",
"monitor_pattern",
"=",
"\".*\"",
",",
"log_file",
"=",
"None",
",",
"kv_store",
"=",
"None",
")",
":",
"# set up logger",
"logging",
".",
"basicConfig",
"(",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
")",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"if",
"log_file",
":",
"fh",
"=",
"logging",
".",
"FileHandler",
"(",
"log_file",
")",
"logger",
".",
"addHandler",
"(",
"fh",
")",
"# check args",
"if",
"isinstance",
"(",
"data_shape",
",",
"int",
")",
":",
"data_shape",
"=",
"(",
"3",
",",
"data_shape",
",",
"data_shape",
")",
"assert",
"len",
"(",
"data_shape",
")",
"==",
"3",
"and",
"data_shape",
"[",
"0",
"]",
"==",
"3",
"prefix",
"+=",
"'_'",
"+",
"net",
"+",
"'_'",
"+",
"str",
"(",
"data_shape",
"[",
"1",
"]",
")",
"if",
"isinstance",
"(",
"mean_pixels",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"mean_pixels",
"=",
"[",
"mean_pixels",
",",
"mean_pixels",
",",
"mean_pixels",
"]",
"assert",
"len",
"(",
"mean_pixels",
")",
"==",
"3",
",",
"\"must provide all RGB mean values\"",
"train_iter",
"=",
"DetRecordIter",
"(",
"train_path",
",",
"batch_size",
",",
"data_shape",
",",
"mean_pixels",
"=",
"mean_pixels",
",",
"label_pad_width",
"=",
"label_pad_width",
",",
"path_imglist",
"=",
"train_list",
",",
"*",
"*",
"cfg",
".",
"train",
")",
"if",
"val_path",
":",
"val_iter",
"=",
"DetRecordIter",
"(",
"val_path",
",",
"batch_size",
",",
"data_shape",
",",
"mean_pixels",
"=",
"mean_pixels",
",",
"label_pad_width",
"=",
"label_pad_width",
",",
"path_imglist",
"=",
"val_list",
",",
"*",
"*",
"cfg",
".",
"valid",
")",
"else",
":",
"val_iter",
"=",
"None",
"# load symbol",
"net",
"=",
"get_symbol_train",
"(",
"net",
",",
"data_shape",
"[",
"1",
"]",
",",
"num_classes",
"=",
"num_classes",
",",
"nms_thresh",
"=",
"nms_thresh",
",",
"force_suppress",
"=",
"force_suppress",
",",
"nms_topk",
"=",
"nms_topk",
")",
"# define layers with fixed weight/bias",
"if",
"freeze_layer_pattern",
".",
"strip",
"(",
")",
":",
"re_prog",
"=",
"re",
".",
"compile",
"(",
"freeze_layer_pattern",
")",
"fixed_param_names",
"=",
"[",
"name",
"for",
"name",
"in",
"net",
".",
"list_arguments",
"(",
")",
"if",
"re_prog",
".",
"match",
"(",
"name",
")",
"]",
"else",
":",
"fixed_param_names",
"=",
"None",
"# load pretrained or resume from previous state",
"ctx_str",
"=",
"'('",
"+",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"c",
")",
"for",
"c",
"in",
"ctx",
"]",
")",
"+",
"')'",
"if",
"resume",
">",
"0",
":",
"logger",
".",
"info",
"(",
"\"Resume training with {} from epoch {}\"",
".",
"format",
"(",
"ctx_str",
",",
"resume",
")",
")",
"_",
",",
"args",
",",
"auxs",
"=",
"mx",
".",
"model",
".",
"load_checkpoint",
"(",
"prefix",
",",
"resume",
")",
"begin_epoch",
"=",
"resume",
"elif",
"finetune",
">",
"0",
":",
"logger",
".",
"info",
"(",
"\"Start finetuning with {} from epoch {}\"",
".",
"format",
"(",
"ctx_str",
",",
"finetune",
")",
")",
"_",
",",
"args",
",",
"auxs",
"=",
"mx",
".",
"model",
".",
"load_checkpoint",
"(",
"prefix",
",",
"finetune",
")",
"begin_epoch",
"=",
"finetune",
"# the prediction convolution layers name starts with relu, so it's fine",
"fixed_param_names",
"=",
"[",
"name",
"for",
"name",
"in",
"net",
".",
"list_arguments",
"(",
")",
"if",
"name",
".",
"startswith",
"(",
"'conv'",
")",
"]",
"elif",
"pretrained",
":",
"logger",
".",
"info",
"(",
"\"Start training with {} from pretrained model {}\"",
".",
"format",
"(",
"ctx_str",
",",
"pretrained",
")",
")",
"_",
",",
"args",
",",
"auxs",
"=",
"mx",
".",
"model",
".",
"load_checkpoint",
"(",
"pretrained",
",",
"epoch",
")",
"args",
"=",
"convert_pretrained",
"(",
"pretrained",
",",
"args",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Experimental: start training from scratch with {}\"",
".",
"format",
"(",
"ctx_str",
")",
")",
"args",
"=",
"None",
"auxs",
"=",
"None",
"fixed_param_names",
"=",
"None",
"# helper information",
"if",
"fixed_param_names",
":",
"logger",
".",
"info",
"(",
"\"Freezed parameters: [\"",
"+",
"','",
".",
"join",
"(",
"fixed_param_names",
")",
"+",
"']'",
")",
"# init training module",
"mod",
"=",
"mx",
".",
"mod",
".",
"Module",
"(",
"net",
",",
"label_names",
"=",
"(",
"'label'",
",",
")",
",",
"logger",
"=",
"logger",
",",
"context",
"=",
"ctx",
",",
"fixed_param_names",
"=",
"fixed_param_names",
")",
"# fit parameters",
"batch_end_callback",
"=",
"mx",
".",
"callback",
".",
"Speedometer",
"(",
"train_iter",
".",
"batch_size",
",",
"frequent",
"=",
"frequent",
")",
"epoch_end_callback",
"=",
"mx",
".",
"callback",
".",
"do_checkpoint",
"(",
"prefix",
")",
"learning_rate",
",",
"lr_scheduler",
"=",
"get_lr_scheduler",
"(",
"learning_rate",
",",
"lr_refactor_step",
",",
"lr_refactor_ratio",
",",
"num_example",
",",
"batch_size",
",",
"begin_epoch",
")",
"optimizer_params",
"=",
"{",
"'learning_rate'",
":",
"learning_rate",
",",
"'momentum'",
":",
"momentum",
",",
"'wd'",
":",
"weight_decay",
",",
"'lr_scheduler'",
":",
"lr_scheduler",
",",
"'clip_gradient'",
":",
"None",
",",
"'rescale_grad'",
":",
"1.0",
"/",
"len",
"(",
"ctx",
")",
"if",
"len",
"(",
"ctx",
")",
">",
"0",
"else",
"1.0",
"}",
"monitor",
"=",
"mx",
".",
"mon",
".",
"Monitor",
"(",
"iter_monitor",
",",
"pattern",
"=",
"monitor_pattern",
")",
"if",
"iter_monitor",
">",
"0",
"else",
"None",
"# run fit net, every n epochs we run evaluation network to get mAP",
"if",
"voc07_metric",
":",
"valid_metric",
"=",
"VOC07MApMetric",
"(",
"ovp_thresh",
",",
"use_difficult",
",",
"class_names",
",",
"pred_idx",
"=",
"3",
")",
"else",
":",
"valid_metric",
"=",
"MApMetric",
"(",
"ovp_thresh",
",",
"use_difficult",
",",
"class_names",
",",
"pred_idx",
"=",
"3",
")",
"# create kvstore when there are gpus",
"kv",
"=",
"mx",
".",
"kvstore",
".",
"create",
"(",
"kv_store",
")",
"if",
"kv_store",
"else",
"None",
"mod",
".",
"fit",
"(",
"train_iter",
",",
"val_iter",
",",
"eval_metric",
"=",
"MultiBoxMetric",
"(",
")",
",",
"validation_metric",
"=",
"valid_metric",
",",
"batch_end_callback",
"=",
"batch_end_callback",
",",
"epoch_end_callback",
"=",
"epoch_end_callback",
",",
"optimizer",
"=",
"'sgd'",
",",
"optimizer_params",
"=",
"optimizer_params",
",",
"begin_epoch",
"=",
"begin_epoch",
",",
"num_epoch",
"=",
"end_epoch",
",",
"initializer",
"=",
"mx",
".",
"init",
".",
"Xavier",
"(",
")",
",",
"arg_params",
"=",
"args",
",",
"aux_params",
"=",
"auxs",
",",
"allow_missing",
"=",
"True",
",",
"monitor",
"=",
"monitor",
",",
"kvstore",
"=",
"kv",
")"
] |
Wrapper for training phase.
Parameters:
----------
net : str
symbol name for the network structure
train_path : str
record file path for training
num_classes : int
number of object classes, not including background
batch_size : int
training batch-size
data_shape : int or tuple
width/height as integer or (3, height, width) tuple
mean_pixels : tuple of floats
mean pixel values for red, green and blue
resume : int
resume from previous checkpoint if > 0
finetune : int
fine-tune from previous checkpoint if > 0
pretrained : str
prefix of pretrained model, including path
epoch : int
load epoch of either resume/finetune/pretrained model
prefix : str
prefix for saving checkpoints
ctx : [mx.cpu()] or [mx.gpu(x)]
list of mxnet contexts
begin_epoch : int
starting epoch for training, should be 0 if not otherwise specified
end_epoch : int
end epoch of training
frequent : int
frequency to print out training status
learning_rate : float
training learning rate
momentum : float
trainig momentum
weight_decay : float
training weight decay param
lr_refactor_ratio : float
multiplier for reducing learning rate
lr_refactor_step : comma separated integers
at which epoch to rescale learning rate, e.g. '30, 60, 90'
freeze_layer_pattern : str
regex pattern for layers need to be fixed
num_example : int
number of training images
label_pad_width : int
force padding training and validation labels to sync their label widths
nms_thresh : float
non-maximum suppression threshold for validation
force_nms : boolean
suppress overlaped objects from different classes
train_list : str
list file path for training, this will replace the embeded labels in record
val_path : str
record file path for validation
val_list : str
list file path for validation, this will replace the embeded labels in record
iter_monitor : int
monitor internal stats in networks if > 0, specified by monitor_pattern
monitor_pattern : str
regex pattern for monitoring network stats
log_file : str
log to file if enabled
|
[
"Wrapper",
"for",
"training",
"phase",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/train/train_net.py#L90-L279
|
train
|
slundberg/shap
|
shap/datasets.py
|
imagenet50
|
def imagenet50(display=False, resolution=224):
""" This is a set of 50 images representative of ImageNet images.
This dataset was collected by randomly finding a working ImageNet link and then pasting the
original ImageNet image into Google image search restricted to images licensed for reuse. A
similar image (now with rights to reuse) was downloaded as a rough replacment for the original
ImageNet image. The point is to have a random sample of ImageNet for use as a background
distribution for explaining models trained on ImageNet data.
Note that because the images are only rough replacements the labels might no longer be correct.
"""
prefix = github_data_url + "imagenet50_"
X = np.load(cache(prefix + "%sx%s.npy" % (resolution, resolution))).astype(np.float32)
y = np.loadtxt(cache(prefix + "labels.csv"))
return X, y
|
python
|
def imagenet50(display=False, resolution=224):
""" This is a set of 50 images representative of ImageNet images.
This dataset was collected by randomly finding a working ImageNet link and then pasting the
original ImageNet image into Google image search restricted to images licensed for reuse. A
similar image (now with rights to reuse) was downloaded as a rough replacment for the original
ImageNet image. The point is to have a random sample of ImageNet for use as a background
distribution for explaining models trained on ImageNet data.
Note that because the images are only rough replacements the labels might no longer be correct.
"""
prefix = github_data_url + "imagenet50_"
X = np.load(cache(prefix + "%sx%s.npy" % (resolution, resolution))).astype(np.float32)
y = np.loadtxt(cache(prefix + "labels.csv"))
return X, y
|
[
"def",
"imagenet50",
"(",
"display",
"=",
"False",
",",
"resolution",
"=",
"224",
")",
":",
"prefix",
"=",
"github_data_url",
"+",
"\"imagenet50_\"",
"X",
"=",
"np",
".",
"load",
"(",
"cache",
"(",
"prefix",
"+",
"\"%sx%s.npy\"",
"%",
"(",
"resolution",
",",
"resolution",
")",
")",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"y",
"=",
"np",
".",
"loadtxt",
"(",
"cache",
"(",
"prefix",
"+",
"\"labels.csv\"",
")",
")",
"return",
"X",
",",
"y"
] |
This is a set of 50 images representative of ImageNet images.
This dataset was collected by randomly finding a working ImageNet link and then pasting the
original ImageNet image into Google image search restricted to images licensed for reuse. A
similar image (now with rights to reuse) was downloaded as a rough replacment for the original
ImageNet image. The point is to have a random sample of ImageNet for use as a background
distribution for explaining models trained on ImageNet data.
Note that because the images are only rough replacements the labels might no longer be correct.
|
[
"This",
"is",
"a",
"set",
"of",
"50",
"images",
"representative",
"of",
"ImageNet",
"images",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L13-L28
|
train
|
slundberg/shap
|
shap/datasets.py
|
boston
|
def boston(display=False):
""" Return the boston housing data in a nice package. """
d = sklearn.datasets.load_boston()
df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101
return df, d.target
|
python
|
def boston(display=False):
""" Return the boston housing data in a nice package. """
d = sklearn.datasets.load_boston()
df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101
return df, d.target
|
[
"def",
"boston",
"(",
"display",
"=",
"False",
")",
":",
"d",
"=",
"sklearn",
".",
"datasets",
".",
"load_boston",
"(",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
"=",
"d",
".",
"data",
",",
"columns",
"=",
"d",
".",
"feature_names",
")",
"# pylint: disable=E1101",
"return",
"df",
",",
"d",
".",
"target"
] |
Return the boston housing data in a nice package.
|
[
"Return",
"the",
"boston",
"housing",
"data",
"in",
"a",
"nice",
"package",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L30-L35
|
train
|
slundberg/shap
|
shap/datasets.py
|
imdb
|
def imdb(display=False):
""" Return the clssic IMDB sentiment analysis training data in a nice package.
Full data is at: http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
Paper to cite when using the data is: http://www.aclweb.org/anthology/P11-1015
"""
with open(cache(github_data_url + "imdb_train.txt")) as f:
data = f.readlines()
y = np.ones(25000, dtype=np.bool)
y[:12500] = 0
return data, y
|
python
|
def imdb(display=False):
""" Return the clssic IMDB sentiment analysis training data in a nice package.
Full data is at: http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
Paper to cite when using the data is: http://www.aclweb.org/anthology/P11-1015
"""
with open(cache(github_data_url + "imdb_train.txt")) as f:
data = f.readlines()
y = np.ones(25000, dtype=np.bool)
y[:12500] = 0
return data, y
|
[
"def",
"imdb",
"(",
"display",
"=",
"False",
")",
":",
"with",
"open",
"(",
"cache",
"(",
"github_data_url",
"+",
"\"imdb_train.txt\"",
")",
")",
"as",
"f",
":",
"data",
"=",
"f",
".",
"readlines",
"(",
")",
"y",
"=",
"np",
".",
"ones",
"(",
"25000",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"y",
"[",
":",
"12500",
"]",
"=",
"0",
"return",
"data",
",",
"y"
] |
Return the clssic IMDB sentiment analysis training data in a nice package.
Full data is at: http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
Paper to cite when using the data is: http://www.aclweb.org/anthology/P11-1015
|
[
"Return",
"the",
"clssic",
"IMDB",
"sentiment",
"analysis",
"training",
"data",
"in",
"a",
"nice",
"package",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L37-L48
|
train
|
slundberg/shap
|
shap/datasets.py
|
communitiesandcrime
|
def communitiesandcrime(display=False):
""" Predict total number of non-violent crimes per 100K popuation.
This dataset is from the classic UCI Machine Learning repository:
https://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized
"""
raw_data = pd.read_csv(
cache(github_data_url + "CommViolPredUnnormalizedData.txt"),
na_values="?"
)
# find the indices where the total violent crimes are known
valid_inds = np.where(np.invert(np.isnan(raw_data.iloc[:,-2])))[0]
y = np.array(raw_data.iloc[valid_inds,-2], dtype=np.float)
# extract the predictive features and remove columns with missing values
X = raw_data.iloc[valid_inds,5:-18]
valid_cols = np.where(np.isnan(X.values).sum(0) == 0)[0]
X = X.iloc[:,valid_cols]
return X, y
|
python
|
def communitiesandcrime(display=False):
""" Predict total number of non-violent crimes per 100K popuation.
This dataset is from the classic UCI Machine Learning repository:
https://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized
"""
raw_data = pd.read_csv(
cache(github_data_url + "CommViolPredUnnormalizedData.txt"),
na_values="?"
)
# find the indices where the total violent crimes are known
valid_inds = np.where(np.invert(np.isnan(raw_data.iloc[:,-2])))[0]
y = np.array(raw_data.iloc[valid_inds,-2], dtype=np.float)
# extract the predictive features and remove columns with missing values
X = raw_data.iloc[valid_inds,5:-18]
valid_cols = np.where(np.isnan(X.values).sum(0) == 0)[0]
X = X.iloc[:,valid_cols]
return X, y
|
[
"def",
"communitiesandcrime",
"(",
"display",
"=",
"False",
")",
":",
"raw_data",
"=",
"pd",
".",
"read_csv",
"(",
"cache",
"(",
"github_data_url",
"+",
"\"CommViolPredUnnormalizedData.txt\"",
")",
",",
"na_values",
"=",
"\"?\"",
")",
"# find the indices where the total violent crimes are known",
"valid_inds",
"=",
"np",
".",
"where",
"(",
"np",
".",
"invert",
"(",
"np",
".",
"isnan",
"(",
"raw_data",
".",
"iloc",
"[",
":",
",",
"-",
"2",
"]",
")",
")",
")",
"[",
"0",
"]",
"y",
"=",
"np",
".",
"array",
"(",
"raw_data",
".",
"iloc",
"[",
"valid_inds",
",",
"-",
"2",
"]",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"# extract the predictive features and remove columns with missing values",
"X",
"=",
"raw_data",
".",
"iloc",
"[",
"valid_inds",
",",
"5",
":",
"-",
"18",
"]",
"valid_cols",
"=",
"np",
".",
"where",
"(",
"np",
".",
"isnan",
"(",
"X",
".",
"values",
")",
".",
"sum",
"(",
"0",
")",
"==",
"0",
")",
"[",
"0",
"]",
"X",
"=",
"X",
".",
"iloc",
"[",
":",
",",
"valid_cols",
"]",
"return",
"X",
",",
"y"
] |
Predict total number of non-violent crimes per 100K popuation.
This dataset is from the classic UCI Machine Learning repository:
https://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized
|
[
"Predict",
"total",
"number",
"of",
"non",
"-",
"violent",
"crimes",
"per",
"100K",
"popuation",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L50-L71
|
train
|
slundberg/shap
|
shap/datasets.py
|
diabetes
|
def diabetes(display=False):
""" Return the diabetes data in a nice package. """
d = sklearn.datasets.load_diabetes()
df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101
return df, d.target
|
python
|
def diabetes(display=False):
""" Return the diabetes data in a nice package. """
d = sklearn.datasets.load_diabetes()
df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101
return df, d.target
|
[
"def",
"diabetes",
"(",
"display",
"=",
"False",
")",
":",
"d",
"=",
"sklearn",
".",
"datasets",
".",
"load_diabetes",
"(",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
"=",
"d",
".",
"data",
",",
"columns",
"=",
"d",
".",
"feature_names",
")",
"# pylint: disable=E1101",
"return",
"df",
",",
"d",
".",
"target"
] |
Return the diabetes data in a nice package.
|
[
"Return",
"the",
"diabetes",
"data",
"in",
"a",
"nice",
"package",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L73-L78
|
train
|
slundberg/shap
|
shap/datasets.py
|
iris
|
def iris(display=False):
""" Return the classic iris data in a nice package. """
d = sklearn.datasets.load_iris()
df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101
if display:
return df, [d.target_names[v] for v in d.target] # pylint: disable=E1101
else:
return df, d.target
|
python
|
def iris(display=False):
""" Return the classic iris data in a nice package. """
d = sklearn.datasets.load_iris()
df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101
if display:
return df, [d.target_names[v] for v in d.target] # pylint: disable=E1101
else:
return df, d.target
|
[
"def",
"iris",
"(",
"display",
"=",
"False",
")",
":",
"d",
"=",
"sklearn",
".",
"datasets",
".",
"load_iris",
"(",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
"=",
"d",
".",
"data",
",",
"columns",
"=",
"d",
".",
"feature_names",
")",
"# pylint: disable=E1101",
"if",
"display",
":",
"return",
"df",
",",
"[",
"d",
".",
"target_names",
"[",
"v",
"]",
"for",
"v",
"in",
"d",
".",
"target",
"]",
"# pylint: disable=E1101",
"else",
":",
"return",
"df",
",",
"d",
".",
"target"
] |
Return the classic iris data in a nice package.
|
[
"Return",
"the",
"classic",
"iris",
"data",
"in",
"a",
"nice",
"package",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L81-L89
|
train
|
slundberg/shap
|
shap/datasets.py
|
adult
|
def adult(display=False):
""" Return the Adult census data in a nice package. """
dtypes = [
("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"),
("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"),
("Occupation", "category"), ("Relationship", "category"), ("Race", "category"),
("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"),
("Hours per week", "float32"), ("Country", "category"), ("Target", "category")
]
raw_data = pd.read_csv(
cache(github_data_url + "adult.data"),
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
data = raw_data.drop(["Education"], axis=1) # redundant with Education-Num
filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes))
data["Target"] = data["Target"] == " >50K"
rcode = {
"Not-in-family": 0,
"Unmarried": 1,
"Other-relative": 2,
"Own-child": 3,
"Husband": 4,
"Wife": 5
}
for k, dtype in filt_dtypes:
if dtype == "category":
if k == "Relationship":
data[k] = np.array([rcode[v.strip()] for v in data[k]])
else:
data[k] = data[k].cat.codes
if display:
return raw_data.drop(["Education", "Target", "fnlwgt"], axis=1), data["Target"].values
else:
return data.drop(["Target", "fnlwgt"], axis=1), data["Target"].values
|
python
|
def adult(display=False):
""" Return the Adult census data in a nice package. """
dtypes = [
("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"),
("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"),
("Occupation", "category"), ("Relationship", "category"), ("Race", "category"),
("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"),
("Hours per week", "float32"), ("Country", "category"), ("Target", "category")
]
raw_data = pd.read_csv(
cache(github_data_url + "adult.data"),
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
data = raw_data.drop(["Education"], axis=1) # redundant with Education-Num
filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes))
data["Target"] = data["Target"] == " >50K"
rcode = {
"Not-in-family": 0,
"Unmarried": 1,
"Other-relative": 2,
"Own-child": 3,
"Husband": 4,
"Wife": 5
}
for k, dtype in filt_dtypes:
if dtype == "category":
if k == "Relationship":
data[k] = np.array([rcode[v.strip()] for v in data[k]])
else:
data[k] = data[k].cat.codes
if display:
return raw_data.drop(["Education", "Target", "fnlwgt"], axis=1), data["Target"].values
else:
return data.drop(["Target", "fnlwgt"], axis=1), data["Target"].values
|
[
"def",
"adult",
"(",
"display",
"=",
"False",
")",
":",
"dtypes",
"=",
"[",
"(",
"\"Age\"",
",",
"\"float32\"",
")",
",",
"(",
"\"Workclass\"",
",",
"\"category\"",
")",
",",
"(",
"\"fnlwgt\"",
",",
"\"float32\"",
")",
",",
"(",
"\"Education\"",
",",
"\"category\"",
")",
",",
"(",
"\"Education-Num\"",
",",
"\"float32\"",
")",
",",
"(",
"\"Marital Status\"",
",",
"\"category\"",
")",
",",
"(",
"\"Occupation\"",
",",
"\"category\"",
")",
",",
"(",
"\"Relationship\"",
",",
"\"category\"",
")",
",",
"(",
"\"Race\"",
",",
"\"category\"",
")",
",",
"(",
"\"Sex\"",
",",
"\"category\"",
")",
",",
"(",
"\"Capital Gain\"",
",",
"\"float32\"",
")",
",",
"(",
"\"Capital Loss\"",
",",
"\"float32\"",
")",
",",
"(",
"\"Hours per week\"",
",",
"\"float32\"",
")",
",",
"(",
"\"Country\"",
",",
"\"category\"",
")",
",",
"(",
"\"Target\"",
",",
"\"category\"",
")",
"]",
"raw_data",
"=",
"pd",
".",
"read_csv",
"(",
"cache",
"(",
"github_data_url",
"+",
"\"adult.data\"",
")",
",",
"names",
"=",
"[",
"d",
"[",
"0",
"]",
"for",
"d",
"in",
"dtypes",
"]",
",",
"na_values",
"=",
"\"?\"",
",",
"dtype",
"=",
"dict",
"(",
"dtypes",
")",
")",
"data",
"=",
"raw_data",
".",
"drop",
"(",
"[",
"\"Education\"",
"]",
",",
"axis",
"=",
"1",
")",
"# redundant with Education-Num",
"filt_dtypes",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"x",
":",
"not",
"(",
"x",
"[",
"0",
"]",
"in",
"[",
"\"Target\"",
",",
"\"Education\"",
"]",
")",
",",
"dtypes",
")",
")",
"data",
"[",
"\"Target\"",
"]",
"=",
"data",
"[",
"\"Target\"",
"]",
"==",
"\" >50K\"",
"rcode",
"=",
"{",
"\"Not-in-family\"",
":",
"0",
",",
"\"Unmarried\"",
":",
"1",
",",
"\"Other-relative\"",
":",
"2",
",",
"\"Own-child\"",
":",
"3",
",",
"\"Husband\"",
":",
"4",
",",
"\"Wife\"",
":",
"5",
"}",
"for",
"k",
",",
"dtype",
"in",
"filt_dtypes",
":",
"if",
"dtype",
"==",
"\"category\"",
":",
"if",
"k",
"==",
"\"Relationship\"",
":",
"data",
"[",
"k",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"rcode",
"[",
"v",
".",
"strip",
"(",
")",
"]",
"for",
"v",
"in",
"data",
"[",
"k",
"]",
"]",
")",
"else",
":",
"data",
"[",
"k",
"]",
"=",
"data",
"[",
"k",
"]",
".",
"cat",
".",
"codes",
"if",
"display",
":",
"return",
"raw_data",
".",
"drop",
"(",
"[",
"\"Education\"",
",",
"\"Target\"",
",",
"\"fnlwgt\"",
"]",
",",
"axis",
"=",
"1",
")",
",",
"data",
"[",
"\"Target\"",
"]",
".",
"values",
"else",
":",
"return",
"data",
".",
"drop",
"(",
"[",
"\"Target\"",
",",
"\"fnlwgt\"",
"]",
",",
"axis",
"=",
"1",
")",
",",
"data",
"[",
"\"Target\"",
"]",
".",
"values"
] |
Return the Adult census data in a nice package.
|
[
"Return",
"the",
"Adult",
"census",
"data",
"in",
"a",
"nice",
"package",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L92-L128
|
train
|
slundberg/shap
|
shap/datasets.py
|
nhanesi
|
def nhanesi(display=False):
""" A nicely packaged version of NHANES I data with surivival times as labels.
"""
X = pd.read_csv(cache(github_data_url + "NHANESI_subset_X.csv"))
y = pd.read_csv(cache(github_data_url + "NHANESI_subset_y.csv"))["y"]
if display:
X_display = X.copy()
X_display["Sex"] = ["Male" if v == 1 else "Female" for v in X["Sex"]]
return X_display, np.array(y)
else:
return X, np.array(y)
|
python
|
def nhanesi(display=False):
""" A nicely packaged version of NHANES I data with surivival times as labels.
"""
X = pd.read_csv(cache(github_data_url + "NHANESI_subset_X.csv"))
y = pd.read_csv(cache(github_data_url + "NHANESI_subset_y.csv"))["y"]
if display:
X_display = X.copy()
X_display["Sex"] = ["Male" if v == 1 else "Female" for v in X["Sex"]]
return X_display, np.array(y)
else:
return X, np.array(y)
|
[
"def",
"nhanesi",
"(",
"display",
"=",
"False",
")",
":",
"X",
"=",
"pd",
".",
"read_csv",
"(",
"cache",
"(",
"github_data_url",
"+",
"\"NHANESI_subset_X.csv\"",
")",
")",
"y",
"=",
"pd",
".",
"read_csv",
"(",
"cache",
"(",
"github_data_url",
"+",
"\"NHANESI_subset_y.csv\"",
")",
")",
"[",
"\"y\"",
"]",
"if",
"display",
":",
"X_display",
"=",
"X",
".",
"copy",
"(",
")",
"X_display",
"[",
"\"Sex\"",
"]",
"=",
"[",
"\"Male\"",
"if",
"v",
"==",
"1",
"else",
"\"Female\"",
"for",
"v",
"in",
"X",
"[",
"\"Sex\"",
"]",
"]",
"return",
"X_display",
",",
"np",
".",
"array",
"(",
"y",
")",
"else",
":",
"return",
"X",
",",
"np",
".",
"array",
"(",
"y",
")"
] |
A nicely packaged version of NHANES I data with surivival times as labels.
|
[
"A",
"nicely",
"packaged",
"version",
"of",
"NHANES",
"I",
"data",
"with",
"surivival",
"times",
"as",
"labels",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L131-L141
|
train
|
slundberg/shap
|
shap/datasets.py
|
cric
|
def cric(display=False):
""" A nicely packaged version of CRIC data with progression to ESRD within 4 years as the label.
"""
X = pd.read_csv(cache(github_data_url + "CRIC_time_4yearESRD_X.csv"))
y = np.loadtxt(cache(github_data_url + "CRIC_time_4yearESRD_y.csv"))
if display:
X_display = X.copy()
return X_display, y
else:
return X, y
|
python
|
def cric(display=False):
""" A nicely packaged version of CRIC data with progression to ESRD within 4 years as the label.
"""
X = pd.read_csv(cache(github_data_url + "CRIC_time_4yearESRD_X.csv"))
y = np.loadtxt(cache(github_data_url + "CRIC_time_4yearESRD_y.csv"))
if display:
X_display = X.copy()
return X_display, y
else:
return X, y
|
[
"def",
"cric",
"(",
"display",
"=",
"False",
")",
":",
"X",
"=",
"pd",
".",
"read_csv",
"(",
"cache",
"(",
"github_data_url",
"+",
"\"CRIC_time_4yearESRD_X.csv\"",
")",
")",
"y",
"=",
"np",
".",
"loadtxt",
"(",
"cache",
"(",
"github_data_url",
"+",
"\"CRIC_time_4yearESRD_y.csv\"",
")",
")",
"if",
"display",
":",
"X_display",
"=",
"X",
".",
"copy",
"(",
")",
"return",
"X_display",
",",
"y",
"else",
":",
"return",
"X",
",",
"y"
] |
A nicely packaged version of CRIC data with progression to ESRD within 4 years as the label.
|
[
"A",
"nicely",
"packaged",
"version",
"of",
"CRIC",
"data",
"with",
"progression",
"to",
"ESRD",
"within",
"4",
"years",
"as",
"the",
"label",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L143-L152
|
train
|
slundberg/shap
|
shap/datasets.py
|
corrgroups60
|
def corrgroups60(display=False):
""" Correlated Groups 60
A simulated dataset with tight correlations among distinct groups of features.
"""
# set a constant seed
old_seed = np.random.seed()
np.random.seed(0)
# generate dataset with known correlation
N = 1000
M = 60
# set one coefficent from each group of 3 to 1
beta = np.zeros(M)
beta[0:30:3] = 1
# build a correlation matrix with groups of 3 tightly correlated features
C = np.eye(M)
for i in range(0,30,3):
C[i,i+1] = C[i+1,i] = 0.99
C[i,i+2] = C[i+2,i] = 0.99
C[i+1,i+2] = C[i+2,i+1] = 0.99
f = lambda X: np.matmul(X, beta)
# Make sure the sample correlation is a perfect match
X_start = np.random.randn(N, M)
X_centered = X_start - X_start.mean(0)
Sigma = np.matmul(X_centered.T, X_centered) / X_centered.shape[0]
W = np.linalg.cholesky(np.linalg.inv(Sigma)).T
X_white = np.matmul(X_centered, W.T)
assert np.linalg.norm(np.corrcoef(np.matmul(X_centered, W.T).T) - np.eye(M)) < 1e-6 # ensure this decorrelates the data
# create the final data
X_final = np.matmul(X_white, np.linalg.cholesky(C).T)
X = X_final
y = f(X) + np.random.randn(N) * 1e-2
# restore the previous numpy random seed
np.random.seed(old_seed)
return pd.DataFrame(X), y
|
python
|
def corrgroups60(display=False):
""" Correlated Groups 60
A simulated dataset with tight correlations among distinct groups of features.
"""
# set a constant seed
old_seed = np.random.seed()
np.random.seed(0)
# generate dataset with known correlation
N = 1000
M = 60
# set one coefficent from each group of 3 to 1
beta = np.zeros(M)
beta[0:30:3] = 1
# build a correlation matrix with groups of 3 tightly correlated features
C = np.eye(M)
for i in range(0,30,3):
C[i,i+1] = C[i+1,i] = 0.99
C[i,i+2] = C[i+2,i] = 0.99
C[i+1,i+2] = C[i+2,i+1] = 0.99
f = lambda X: np.matmul(X, beta)
# Make sure the sample correlation is a perfect match
X_start = np.random.randn(N, M)
X_centered = X_start - X_start.mean(0)
Sigma = np.matmul(X_centered.T, X_centered) / X_centered.shape[0]
W = np.linalg.cholesky(np.linalg.inv(Sigma)).T
X_white = np.matmul(X_centered, W.T)
assert np.linalg.norm(np.corrcoef(np.matmul(X_centered, W.T).T) - np.eye(M)) < 1e-6 # ensure this decorrelates the data
# create the final data
X_final = np.matmul(X_white, np.linalg.cholesky(C).T)
X = X_final
y = f(X) + np.random.randn(N) * 1e-2
# restore the previous numpy random seed
np.random.seed(old_seed)
return pd.DataFrame(X), y
|
[
"def",
"corrgroups60",
"(",
"display",
"=",
"False",
")",
":",
"# set a constant seed",
"old_seed",
"=",
"np",
".",
"random",
".",
"seed",
"(",
")",
"np",
".",
"random",
".",
"seed",
"(",
"0",
")",
"# generate dataset with known correlation",
"N",
"=",
"1000",
"M",
"=",
"60",
"# set one coefficent from each group of 3 to 1",
"beta",
"=",
"np",
".",
"zeros",
"(",
"M",
")",
"beta",
"[",
"0",
":",
"30",
":",
"3",
"]",
"=",
"1",
"# build a correlation matrix with groups of 3 tightly correlated features",
"C",
"=",
"np",
".",
"eye",
"(",
"M",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"30",
",",
"3",
")",
":",
"C",
"[",
"i",
",",
"i",
"+",
"1",
"]",
"=",
"C",
"[",
"i",
"+",
"1",
",",
"i",
"]",
"=",
"0.99",
"C",
"[",
"i",
",",
"i",
"+",
"2",
"]",
"=",
"C",
"[",
"i",
"+",
"2",
",",
"i",
"]",
"=",
"0.99",
"C",
"[",
"i",
"+",
"1",
",",
"i",
"+",
"2",
"]",
"=",
"C",
"[",
"i",
"+",
"2",
",",
"i",
"+",
"1",
"]",
"=",
"0.99",
"f",
"=",
"lambda",
"X",
":",
"np",
".",
"matmul",
"(",
"X",
",",
"beta",
")",
"# Make sure the sample correlation is a perfect match",
"X_start",
"=",
"np",
".",
"random",
".",
"randn",
"(",
"N",
",",
"M",
")",
"X_centered",
"=",
"X_start",
"-",
"X_start",
".",
"mean",
"(",
"0",
")",
"Sigma",
"=",
"np",
".",
"matmul",
"(",
"X_centered",
".",
"T",
",",
"X_centered",
")",
"/",
"X_centered",
".",
"shape",
"[",
"0",
"]",
"W",
"=",
"np",
".",
"linalg",
".",
"cholesky",
"(",
"np",
".",
"linalg",
".",
"inv",
"(",
"Sigma",
")",
")",
".",
"T",
"X_white",
"=",
"np",
".",
"matmul",
"(",
"X_centered",
",",
"W",
".",
"T",
")",
"assert",
"np",
".",
"linalg",
".",
"norm",
"(",
"np",
".",
"corrcoef",
"(",
"np",
".",
"matmul",
"(",
"X_centered",
",",
"W",
".",
"T",
")",
".",
"T",
")",
"-",
"np",
".",
"eye",
"(",
"M",
")",
")",
"<",
"1e-6",
"# ensure this decorrelates the data",
"# create the final data",
"X_final",
"=",
"np",
".",
"matmul",
"(",
"X_white",
",",
"np",
".",
"linalg",
".",
"cholesky",
"(",
"C",
")",
".",
"T",
")",
"X",
"=",
"X_final",
"y",
"=",
"f",
"(",
"X",
")",
"+",
"np",
".",
"random",
".",
"randn",
"(",
"N",
")",
"*",
"1e-2",
"# restore the previous numpy random seed",
"np",
".",
"random",
".",
"seed",
"(",
"old_seed",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"X",
")",
",",
"y"
] |
Correlated Groups 60
A simulated dataset with tight correlations among distinct groups of features.
|
[
"Correlated",
"Groups",
"60",
"A",
"simulated",
"dataset",
"with",
"tight",
"correlations",
"among",
"distinct",
"groups",
"of",
"features",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L155-L197
|
train
|
slundberg/shap
|
shap/datasets.py
|
independentlinear60
|
def independentlinear60(display=False):
""" A simulated dataset with tight correlations among distinct groups of features.
"""
# set a constant seed
old_seed = np.random.seed()
np.random.seed(0)
# generate dataset with known correlation
N = 1000
M = 60
# set one coefficent from each group of 3 to 1
beta = np.zeros(M)
beta[0:30:3] = 1
f = lambda X: np.matmul(X, beta)
# Make sure the sample correlation is a perfect match
X_start = np.random.randn(N, M)
X = X_start - X_start.mean(0)
y = f(X) + np.random.randn(N) * 1e-2
# restore the previous numpy random seed
np.random.seed(old_seed)
return pd.DataFrame(X), y
|
python
|
def independentlinear60(display=False):
""" A simulated dataset with tight correlations among distinct groups of features.
"""
# set a constant seed
old_seed = np.random.seed()
np.random.seed(0)
# generate dataset with known correlation
N = 1000
M = 60
# set one coefficent from each group of 3 to 1
beta = np.zeros(M)
beta[0:30:3] = 1
f = lambda X: np.matmul(X, beta)
# Make sure the sample correlation is a perfect match
X_start = np.random.randn(N, M)
X = X_start - X_start.mean(0)
y = f(X) + np.random.randn(N) * 1e-2
# restore the previous numpy random seed
np.random.seed(old_seed)
return pd.DataFrame(X), y
|
[
"def",
"independentlinear60",
"(",
"display",
"=",
"False",
")",
":",
"# set a constant seed",
"old_seed",
"=",
"np",
".",
"random",
".",
"seed",
"(",
")",
"np",
".",
"random",
".",
"seed",
"(",
"0",
")",
"# generate dataset with known correlation",
"N",
"=",
"1000",
"M",
"=",
"60",
"# set one coefficent from each group of 3 to 1",
"beta",
"=",
"np",
".",
"zeros",
"(",
"M",
")",
"beta",
"[",
"0",
":",
"30",
":",
"3",
"]",
"=",
"1",
"f",
"=",
"lambda",
"X",
":",
"np",
".",
"matmul",
"(",
"X",
",",
"beta",
")",
"# Make sure the sample correlation is a perfect match",
"X_start",
"=",
"np",
".",
"random",
".",
"randn",
"(",
"N",
",",
"M",
")",
"X",
"=",
"X_start",
"-",
"X_start",
".",
"mean",
"(",
"0",
")",
"y",
"=",
"f",
"(",
"X",
")",
"+",
"np",
".",
"random",
".",
"randn",
"(",
"N",
")",
"*",
"1e-2",
"# restore the previous numpy random seed",
"np",
".",
"random",
".",
"seed",
"(",
"old_seed",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"X",
")",
",",
"y"
] |
A simulated dataset with tight correlations among distinct groups of features.
|
[
"A",
"simulated",
"dataset",
"with",
"tight",
"correlations",
"among",
"distinct",
"groups",
"of",
"features",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L200-L225
|
train
|
slundberg/shap
|
shap/datasets.py
|
rank
|
def rank():
""" Ranking datasets from lightgbm repository.
"""
rank_data_url = 'https://raw.githubusercontent.com/Microsoft/LightGBM/master/examples/lambdarank/'
x_train, y_train = sklearn.datasets.load_svmlight_file(cache(rank_data_url + 'rank.train'))
x_test, y_test = sklearn.datasets.load_svmlight_file(cache(rank_data_url + 'rank.test'))
q_train = np.loadtxt(cache(rank_data_url + 'rank.train.query'))
q_test = np.loadtxt(cache(rank_data_url + 'rank.test.query'))
return x_train, y_train, x_test, y_test, q_train, q_test
|
python
|
def rank():
""" Ranking datasets from lightgbm repository.
"""
rank_data_url = 'https://raw.githubusercontent.com/Microsoft/LightGBM/master/examples/lambdarank/'
x_train, y_train = sklearn.datasets.load_svmlight_file(cache(rank_data_url + 'rank.train'))
x_test, y_test = sklearn.datasets.load_svmlight_file(cache(rank_data_url + 'rank.test'))
q_train = np.loadtxt(cache(rank_data_url + 'rank.train.query'))
q_test = np.loadtxt(cache(rank_data_url + 'rank.test.query'))
return x_train, y_train, x_test, y_test, q_train, q_test
|
[
"def",
"rank",
"(",
")",
":",
"rank_data_url",
"=",
"'https://raw.githubusercontent.com/Microsoft/LightGBM/master/examples/lambdarank/'",
"x_train",
",",
"y_train",
"=",
"sklearn",
".",
"datasets",
".",
"load_svmlight_file",
"(",
"cache",
"(",
"rank_data_url",
"+",
"'rank.train'",
")",
")",
"x_test",
",",
"y_test",
"=",
"sklearn",
".",
"datasets",
".",
"load_svmlight_file",
"(",
"cache",
"(",
"rank_data_url",
"+",
"'rank.test'",
")",
")",
"q_train",
"=",
"np",
".",
"loadtxt",
"(",
"cache",
"(",
"rank_data_url",
"+",
"'rank.train.query'",
")",
")",
"q_test",
"=",
"np",
".",
"loadtxt",
"(",
"cache",
"(",
"rank_data_url",
"+",
"'rank.test.query'",
")",
")",
"return",
"x_train",
",",
"y_train",
",",
"x_test",
",",
"y_test",
",",
"q_train",
",",
"q_test"
] |
Ranking datasets from lightgbm repository.
|
[
"Ranking",
"datasets",
"from",
"lightgbm",
"repository",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L234-L242
|
train
|
slundberg/shap
|
shap/benchmark/measures.py
|
batch_remove_retrain
|
def batch_remove_retrain(nmask_train, nmask_test, X_train, y_train, X_test, y_test, attr_train, attr_test, model_generator, metric):
""" An approximation of holdout that only retraines the model once.
This is alse called ROAR (RemOve And Retrain) in work by Google. It is much more computationally
efficient that the holdout method because it masks the most important features in every sample
and then retrains the model once, instead of retraining the model for every test sample like
the holdout metric.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# mask nmask top features for each explanation
X_train_tmp = X_train.copy()
X_train_mean = X_train.mean(0)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
for i in range(len(y_train)):
if nmask_train[i] > 0:
ordering = np.argsort(-attr_train[i, :] + tie_breaking_noise)
X_train_tmp[i, ordering[:nmask_train[i]]] = X_train_mean[ordering[:nmask_train[i]]]
X_test_tmp = X_test.copy()
for i in range(len(y_test)):
if nmask_test[i] > 0:
ordering = np.argsort(-attr_test[i, :] + tie_breaking_noise)
X_test_tmp[i, ordering[:nmask_test[i]]] = X_train_mean[ordering[:nmask_test[i]]]
# train the model with all the given features masked
model_masked = model_generator()
model_masked.fit(X_train_tmp, y_train)
yp_test_masked = model_masked.predict(X_test_tmp)
return metric(y_test, yp_test_masked)
|
python
|
def batch_remove_retrain(nmask_train, nmask_test, X_train, y_train, X_test, y_test, attr_train, attr_test, model_generator, metric):
""" An approximation of holdout that only retraines the model once.
This is alse called ROAR (RemOve And Retrain) in work by Google. It is much more computationally
efficient that the holdout method because it masks the most important features in every sample
and then retrains the model once, instead of retraining the model for every test sample like
the holdout metric.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# mask nmask top features for each explanation
X_train_tmp = X_train.copy()
X_train_mean = X_train.mean(0)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
for i in range(len(y_train)):
if nmask_train[i] > 0:
ordering = np.argsort(-attr_train[i, :] + tie_breaking_noise)
X_train_tmp[i, ordering[:nmask_train[i]]] = X_train_mean[ordering[:nmask_train[i]]]
X_test_tmp = X_test.copy()
for i in range(len(y_test)):
if nmask_test[i] > 0:
ordering = np.argsort(-attr_test[i, :] + tie_breaking_noise)
X_test_tmp[i, ordering[:nmask_test[i]]] = X_train_mean[ordering[:nmask_test[i]]]
# train the model with all the given features masked
model_masked = model_generator()
model_masked.fit(X_train_tmp, y_train)
yp_test_masked = model_masked.predict(X_test_tmp)
return metric(y_test, yp_test_masked)
|
[
"def",
"batch_remove_retrain",
"(",
"nmask_train",
",",
"nmask_test",
",",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
",",
"attr_train",
",",
"attr_test",
",",
"model_generator",
",",
"metric",
")",
":",
"warnings",
".",
"warn",
"(",
"\"The retrain based measures can incorrectly evaluate models in some cases!\"",
")",
"X_train",
",",
"X_test",
"=",
"to_array",
"(",
"X_train",
",",
"X_test",
")",
"# how many features to mask",
"assert",
"X_train",
".",
"shape",
"[",
"1",
"]",
"==",
"X_test",
".",
"shape",
"[",
"1",
"]",
"# mask nmask top features for each explanation",
"X_train_tmp",
"=",
"X_train",
".",
"copy",
"(",
")",
"X_train_mean",
"=",
"X_train",
".",
"mean",
"(",
"0",
")",
"tie_breaking_noise",
"=",
"const_rand",
"(",
"X_train",
".",
"shape",
"[",
"1",
"]",
")",
"*",
"1e-6",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"y_train",
")",
")",
":",
"if",
"nmask_train",
"[",
"i",
"]",
">",
"0",
":",
"ordering",
"=",
"np",
".",
"argsort",
"(",
"-",
"attr_train",
"[",
"i",
",",
":",
"]",
"+",
"tie_breaking_noise",
")",
"X_train_tmp",
"[",
"i",
",",
"ordering",
"[",
":",
"nmask_train",
"[",
"i",
"]",
"]",
"]",
"=",
"X_train_mean",
"[",
"ordering",
"[",
":",
"nmask_train",
"[",
"i",
"]",
"]",
"]",
"X_test_tmp",
"=",
"X_test",
".",
"copy",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"y_test",
")",
")",
":",
"if",
"nmask_test",
"[",
"i",
"]",
">",
"0",
":",
"ordering",
"=",
"np",
".",
"argsort",
"(",
"-",
"attr_test",
"[",
"i",
",",
":",
"]",
"+",
"tie_breaking_noise",
")",
"X_test_tmp",
"[",
"i",
",",
"ordering",
"[",
":",
"nmask_test",
"[",
"i",
"]",
"]",
"]",
"=",
"X_train_mean",
"[",
"ordering",
"[",
":",
"nmask_test",
"[",
"i",
"]",
"]",
"]",
"# train the model with all the given features masked",
"model_masked",
"=",
"model_generator",
"(",
")",
"model_masked",
".",
"fit",
"(",
"X_train_tmp",
",",
"y_train",
")",
"yp_test_masked",
"=",
"model_masked",
".",
"predict",
"(",
"X_test_tmp",
")",
"return",
"metric",
"(",
"y_test",
",",
"yp_test_masked",
")"
] |
An approximation of holdout that only retraines the model once.
This is alse called ROAR (RemOve And Retrain) in work by Google. It is much more computationally
efficient that the holdout method because it masks the most important features in every sample
and then retrains the model once, instead of retraining the model for every test sample like
the holdout metric.
|
[
"An",
"approximation",
"of",
"holdout",
"that",
"only",
"retraines",
"the",
"model",
"once",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/measures.py#L158-L193
|
train
|
slundberg/shap
|
shap/benchmark/measures.py
|
keep_retrain
|
def keep_retrain(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is retrained for each test sample with the non-important features set to a constant.
If you want to know how important a set of features is you can ask how the model would be
different if only those features had existed. To determine this we can mask the other features
across the entire training and test datasets, then retrain the model. If we apply compare the
output of this retrained model to the original model we can see the effect produced by only
knowning the important features. Since for individualized explanation methods each test sample
has a different set of most important features we need to retrain the model for every test sample
to get the change in model performance when a specified fraction of the most important features
are retained.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
# see if we match the last cached call
global _keep_cache
args = (X_train, y_train, X_test, y_test, model_generator, metric)
cache_match = False
if "args" in _keep_cache:
if all(a is b for a,b in zip(_keep_cache["args"], args)) and np.all(_keep_cache["attr_test"] == attr_test):
cache_match = True
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# this is the model we will retrain many times
model_masked = model_generator()
# keep nkeep top features and re-train the model for each test explanation
X_train_tmp = np.zeros(X_train.shape)
X_test_tmp = np.zeros(X_test.shape)
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
last_nkeep = _keep_cache.get("nkeep", None)
last_yp_masked_test = _keep_cache.get("yp_masked_test", None)
for i in tqdm(range(len(y_test)), "Retraining for the 'keep' metric"):
if cache_match and last_nkeep[i] == nkeep[i]:
yp_masked_test[i] = last_yp_masked_test[i]
elif nkeep[i] == attr_test.shape[1]:
yp_masked_test[i] = trained_model.predict(X_test[i:i+1])[0]
else:
# mask out the most important features for this test instance
X_train_tmp[:] = X_train
X_test_tmp[:] = X_test
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_train_tmp[:,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean()
X_test_tmp[i,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean()
# retrain the model and make a prediction
model_masked.fit(X_train_tmp, y_train)
yp_masked_test[i] = model_masked.predict(X_test_tmp[i:i+1])[0]
# save our results so the next call to us can be faster when there is redundancy
_keep_cache["nkeep"] = nkeep
_keep_cache["yp_masked_test"] = yp_masked_test
_keep_cache["attr_test"] = attr_test
_keep_cache["args"] = args
return metric(y_test, yp_masked_test)
|
python
|
def keep_retrain(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is retrained for each test sample with the non-important features set to a constant.
If you want to know how important a set of features is you can ask how the model would be
different if only those features had existed. To determine this we can mask the other features
across the entire training and test datasets, then retrain the model. If we apply compare the
output of this retrained model to the original model we can see the effect produced by only
knowning the important features. Since for individualized explanation methods each test sample
has a different set of most important features we need to retrain the model for every test sample
to get the change in model performance when a specified fraction of the most important features
are retained.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
# see if we match the last cached call
global _keep_cache
args = (X_train, y_train, X_test, y_test, model_generator, metric)
cache_match = False
if "args" in _keep_cache:
if all(a is b for a,b in zip(_keep_cache["args"], args)) and np.all(_keep_cache["attr_test"] == attr_test):
cache_match = True
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# this is the model we will retrain many times
model_masked = model_generator()
# keep nkeep top features and re-train the model for each test explanation
X_train_tmp = np.zeros(X_train.shape)
X_test_tmp = np.zeros(X_test.shape)
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
last_nkeep = _keep_cache.get("nkeep", None)
last_yp_masked_test = _keep_cache.get("yp_masked_test", None)
for i in tqdm(range(len(y_test)), "Retraining for the 'keep' metric"):
if cache_match and last_nkeep[i] == nkeep[i]:
yp_masked_test[i] = last_yp_masked_test[i]
elif nkeep[i] == attr_test.shape[1]:
yp_masked_test[i] = trained_model.predict(X_test[i:i+1])[0]
else:
# mask out the most important features for this test instance
X_train_tmp[:] = X_train
X_test_tmp[:] = X_test
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_train_tmp[:,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean()
X_test_tmp[i,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean()
# retrain the model and make a prediction
model_masked.fit(X_train_tmp, y_train)
yp_masked_test[i] = model_masked.predict(X_test_tmp[i:i+1])[0]
# save our results so the next call to us can be faster when there is redundancy
_keep_cache["nkeep"] = nkeep
_keep_cache["yp_masked_test"] = yp_masked_test
_keep_cache["attr_test"] = attr_test
_keep_cache["args"] = args
return metric(y_test, yp_masked_test)
|
[
"def",
"keep_retrain",
"(",
"nkeep",
",",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
",",
"attr_test",
",",
"model_generator",
",",
"metric",
",",
"trained_model",
",",
"random_state",
")",
":",
"warnings",
".",
"warn",
"(",
"\"The retrain based measures can incorrectly evaluate models in some cases!\"",
")",
"# see if we match the last cached call",
"global",
"_keep_cache",
"args",
"=",
"(",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
",",
"model_generator",
",",
"metric",
")",
"cache_match",
"=",
"False",
"if",
"\"args\"",
"in",
"_keep_cache",
":",
"if",
"all",
"(",
"a",
"is",
"b",
"for",
"a",
",",
"b",
"in",
"zip",
"(",
"_keep_cache",
"[",
"\"args\"",
"]",
",",
"args",
")",
")",
"and",
"np",
".",
"all",
"(",
"_keep_cache",
"[",
"\"attr_test\"",
"]",
"==",
"attr_test",
")",
":",
"cache_match",
"=",
"True",
"X_train",
",",
"X_test",
"=",
"to_array",
"(",
"X_train",
",",
"X_test",
")",
"# how many features to mask",
"assert",
"X_train",
".",
"shape",
"[",
"1",
"]",
"==",
"X_test",
".",
"shape",
"[",
"1",
"]",
"# this is the model we will retrain many times",
"model_masked",
"=",
"model_generator",
"(",
")",
"# keep nkeep top features and re-train the model for each test explanation",
"X_train_tmp",
"=",
"np",
".",
"zeros",
"(",
"X_train",
".",
"shape",
")",
"X_test_tmp",
"=",
"np",
".",
"zeros",
"(",
"X_test",
".",
"shape",
")",
"yp_masked_test",
"=",
"np",
".",
"zeros",
"(",
"y_test",
".",
"shape",
")",
"tie_breaking_noise",
"=",
"const_rand",
"(",
"X_train",
".",
"shape",
"[",
"1",
"]",
")",
"*",
"1e-6",
"last_nkeep",
"=",
"_keep_cache",
".",
"get",
"(",
"\"nkeep\"",
",",
"None",
")",
"last_yp_masked_test",
"=",
"_keep_cache",
".",
"get",
"(",
"\"yp_masked_test\"",
",",
"None",
")",
"for",
"i",
"in",
"tqdm",
"(",
"range",
"(",
"len",
"(",
"y_test",
")",
")",
",",
"\"Retraining for the 'keep' metric\"",
")",
":",
"if",
"cache_match",
"and",
"last_nkeep",
"[",
"i",
"]",
"==",
"nkeep",
"[",
"i",
"]",
":",
"yp_masked_test",
"[",
"i",
"]",
"=",
"last_yp_masked_test",
"[",
"i",
"]",
"elif",
"nkeep",
"[",
"i",
"]",
"==",
"attr_test",
".",
"shape",
"[",
"1",
"]",
":",
"yp_masked_test",
"[",
"i",
"]",
"=",
"trained_model",
".",
"predict",
"(",
"X_test",
"[",
"i",
":",
"i",
"+",
"1",
"]",
")",
"[",
"0",
"]",
"else",
":",
"# mask out the most important features for this test instance",
"X_train_tmp",
"[",
":",
"]",
"=",
"X_train",
"X_test_tmp",
"[",
":",
"]",
"=",
"X_test",
"ordering",
"=",
"np",
".",
"argsort",
"(",
"-",
"attr_test",
"[",
"i",
",",
":",
"]",
"+",
"tie_breaking_noise",
")",
"X_train_tmp",
"[",
":",
",",
"ordering",
"[",
"nkeep",
"[",
"i",
"]",
":",
"]",
"]",
"=",
"X_train",
"[",
":",
",",
"ordering",
"[",
"nkeep",
"[",
"i",
"]",
":",
"]",
"]",
".",
"mean",
"(",
")",
"X_test_tmp",
"[",
"i",
",",
"ordering",
"[",
"nkeep",
"[",
"i",
"]",
":",
"]",
"]",
"=",
"X_train",
"[",
":",
",",
"ordering",
"[",
"nkeep",
"[",
"i",
"]",
":",
"]",
"]",
".",
"mean",
"(",
")",
"# retrain the model and make a prediction",
"model_masked",
".",
"fit",
"(",
"X_train_tmp",
",",
"y_train",
")",
"yp_masked_test",
"[",
"i",
"]",
"=",
"model_masked",
".",
"predict",
"(",
"X_test_tmp",
"[",
"i",
":",
"i",
"+",
"1",
"]",
")",
"[",
"0",
"]",
"# save our results so the next call to us can be faster when there is redundancy",
"_keep_cache",
"[",
"\"nkeep\"",
"]",
"=",
"nkeep",
"_keep_cache",
"[",
"\"yp_masked_test\"",
"]",
"=",
"yp_masked_test",
"_keep_cache",
"[",
"\"attr_test\"",
"]",
"=",
"attr_test",
"_keep_cache",
"[",
"\"args\"",
"]",
"=",
"args",
"return",
"metric",
"(",
"y_test",
",",
"yp_masked_test",
")"
] |
The model is retrained for each test sample with the non-important features set to a constant.
If you want to know how important a set of features is you can ask how the model would be
different if only those features had existed. To determine this we can mask the other features
across the entire training and test datasets, then retrain the model. If we apply compare the
output of this retrained model to the original model we can see the effect produced by only
knowning the important features. Since for individualized explanation methods each test sample
has a different set of most important features we need to retrain the model for every test sample
to get the change in model performance when a specified fraction of the most important features
are retained.
|
[
"The",
"model",
"is",
"retrained",
"for",
"each",
"test",
"sample",
"with",
"the",
"non",
"-",
"important",
"features",
"set",
"to",
"a",
"constant",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/measures.py#L196-L258
|
train
|
slundberg/shap
|
shap/benchmark/measures.py
|
keep_mask
|
def keep_mask(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to their mean.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nkeep[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i,ordering[nkeep[i]:]] = mean_vals[ordering[nkeep[i]:]]
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
|
python
|
def keep_mask(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to their mean.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nkeep[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i,ordering[nkeep[i]:]] = mean_vals[ordering[nkeep[i]:]]
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
|
[
"def",
"keep_mask",
"(",
"nkeep",
",",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
",",
"attr_test",
",",
"model_generator",
",",
"metric",
",",
"trained_model",
",",
"random_state",
")",
":",
"X_train",
",",
"X_test",
"=",
"to_array",
"(",
"X_train",
",",
"X_test",
")",
"# how many features to mask",
"assert",
"X_train",
".",
"shape",
"[",
"1",
"]",
"==",
"X_test",
".",
"shape",
"[",
"1",
"]",
"# keep nkeep top features for each test explanation",
"X_test_tmp",
"=",
"X_test",
".",
"copy",
"(",
")",
"yp_masked_test",
"=",
"np",
".",
"zeros",
"(",
"y_test",
".",
"shape",
")",
"tie_breaking_noise",
"=",
"const_rand",
"(",
"X_train",
".",
"shape",
"[",
"1",
"]",
",",
"random_state",
")",
"*",
"1e-6",
"mean_vals",
"=",
"X_train",
".",
"mean",
"(",
"0",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"y_test",
")",
")",
":",
"if",
"nkeep",
"[",
"i",
"]",
"<",
"X_test",
".",
"shape",
"[",
"1",
"]",
":",
"ordering",
"=",
"np",
".",
"argsort",
"(",
"-",
"attr_test",
"[",
"i",
",",
":",
"]",
"+",
"tie_breaking_noise",
")",
"X_test_tmp",
"[",
"i",
",",
"ordering",
"[",
"nkeep",
"[",
"i",
"]",
":",
"]",
"]",
"=",
"mean_vals",
"[",
"ordering",
"[",
"nkeep",
"[",
"i",
"]",
":",
"]",
"]",
"yp_masked_test",
"=",
"trained_model",
".",
"predict",
"(",
"X_test_tmp",
")",
"return",
"metric",
"(",
"y_test",
",",
"yp_masked_test",
")"
] |
The model is revaluated for each test sample with the non-important features set to their mean.
|
[
"The",
"model",
"is",
"revaluated",
"for",
"each",
"test",
"sample",
"with",
"the",
"non",
"-",
"important",
"features",
"set",
"to",
"their",
"mean",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/measures.py#L260-L281
|
train
|
slundberg/shap
|
shap/benchmark/measures.py
|
keep_impute
|
def keep_impute(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to an imputed value.
Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on
being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should
be significantly bigger than X_train.shape[1].
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
C = np.cov(X_train.T)
C += np.eye(C.shape[0]) * 1e-6
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nkeep[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
observe_inds = ordering[:nkeep[i]]
impute_inds = ordering[nkeep[i]:]
# impute missing data assuming it follows a multivariate normal distribution
Coo_inv = np.linalg.inv(C[observe_inds,:][:,observe_inds])
Cio = C[impute_inds,:][:,observe_inds]
impute = mean_vals[impute_inds] + Cio @ Coo_inv @ (X_test[i, observe_inds] - mean_vals[observe_inds])
X_test_tmp[i, impute_inds] = impute
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
|
python
|
def keep_impute(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to an imputed value.
Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on
being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should
be significantly bigger than X_train.shape[1].
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
C = np.cov(X_train.T)
C += np.eye(C.shape[0]) * 1e-6
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nkeep[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
observe_inds = ordering[:nkeep[i]]
impute_inds = ordering[nkeep[i]:]
# impute missing data assuming it follows a multivariate normal distribution
Coo_inv = np.linalg.inv(C[observe_inds,:][:,observe_inds])
Cio = C[impute_inds,:][:,observe_inds]
impute = mean_vals[impute_inds] + Cio @ Coo_inv @ (X_test[i, observe_inds] - mean_vals[observe_inds])
X_test_tmp[i, impute_inds] = impute
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
|
[
"def",
"keep_impute",
"(",
"nkeep",
",",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
",",
"attr_test",
",",
"model_generator",
",",
"metric",
",",
"trained_model",
",",
"random_state",
")",
":",
"X_train",
",",
"X_test",
"=",
"to_array",
"(",
"X_train",
",",
"X_test",
")",
"# how many features to mask",
"assert",
"X_train",
".",
"shape",
"[",
"1",
"]",
"==",
"X_test",
".",
"shape",
"[",
"1",
"]",
"# keep nkeep top features for each test explanation",
"C",
"=",
"np",
".",
"cov",
"(",
"X_train",
".",
"T",
")",
"C",
"+=",
"np",
".",
"eye",
"(",
"C",
".",
"shape",
"[",
"0",
"]",
")",
"*",
"1e-6",
"X_test_tmp",
"=",
"X_test",
".",
"copy",
"(",
")",
"yp_masked_test",
"=",
"np",
".",
"zeros",
"(",
"y_test",
".",
"shape",
")",
"tie_breaking_noise",
"=",
"const_rand",
"(",
"X_train",
".",
"shape",
"[",
"1",
"]",
",",
"random_state",
")",
"*",
"1e-6",
"mean_vals",
"=",
"X_train",
".",
"mean",
"(",
"0",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"y_test",
")",
")",
":",
"if",
"nkeep",
"[",
"i",
"]",
"<",
"X_test",
".",
"shape",
"[",
"1",
"]",
":",
"ordering",
"=",
"np",
".",
"argsort",
"(",
"-",
"attr_test",
"[",
"i",
",",
":",
"]",
"+",
"tie_breaking_noise",
")",
"observe_inds",
"=",
"ordering",
"[",
":",
"nkeep",
"[",
"i",
"]",
"]",
"impute_inds",
"=",
"ordering",
"[",
"nkeep",
"[",
"i",
"]",
":",
"]",
"# impute missing data assuming it follows a multivariate normal distribution",
"Coo_inv",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"C",
"[",
"observe_inds",
",",
":",
"]",
"[",
":",
",",
"observe_inds",
"]",
")",
"Cio",
"=",
"C",
"[",
"impute_inds",
",",
":",
"]",
"[",
":",
",",
"observe_inds",
"]",
"impute",
"=",
"mean_vals",
"[",
"impute_inds",
"]",
"+",
"Cio",
"@",
"Coo_inv",
"@",
"(",
"X_test",
"[",
"i",
",",
"observe_inds",
"]",
"-",
"mean_vals",
"[",
"observe_inds",
"]",
")",
"X_test_tmp",
"[",
"i",
",",
"impute_inds",
"]",
"=",
"impute",
"yp_masked_test",
"=",
"trained_model",
".",
"predict",
"(",
"X_test_tmp",
")",
"return",
"metric",
"(",
"y_test",
",",
"yp_masked_test",
")"
] |
The model is revaluated for each test sample with the non-important features set to an imputed value.
Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on
being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should
be significantly bigger than X_train.shape[1].
|
[
"The",
"model",
"is",
"revaluated",
"for",
"each",
"test",
"sample",
"with",
"the",
"non",
"-",
"important",
"features",
"set",
"to",
"an",
"imputed",
"value",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/measures.py#L283-L318
|
train
|
slundberg/shap
|
shap/benchmark/measures.py
|
keep_resample
|
def keep_resample(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to resample background values.
""" # why broken? overwriting?
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# how many samples to take
nsamples = 100
# keep nkeep top features for each test explanation
N,M = X_test.shape
X_test_tmp = np.tile(X_test, [1, nsamples]).reshape(nsamples * N, M)
tie_breaking_noise = const_rand(M) * 1e-6
inds = sklearn.utils.resample(np.arange(N), n_samples=nsamples, random_state=random_state)
for i in range(N):
if nkeep[i] < M:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i*nsamples:(i+1)*nsamples, ordering[nkeep[i]:]] = X_train[inds, :][:, ordering[nkeep[i]:]]
yp_masked_test = trained_model.predict(X_test_tmp)
yp_masked_test = np.reshape(yp_masked_test, (N, nsamples)).mean(1) # take the mean output over all samples
return metric(y_test, yp_masked_test)
|
python
|
def keep_resample(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to resample background values.
""" # why broken? overwriting?
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# how many samples to take
nsamples = 100
# keep nkeep top features for each test explanation
N,M = X_test.shape
X_test_tmp = np.tile(X_test, [1, nsamples]).reshape(nsamples * N, M)
tie_breaking_noise = const_rand(M) * 1e-6
inds = sklearn.utils.resample(np.arange(N), n_samples=nsamples, random_state=random_state)
for i in range(N):
if nkeep[i] < M:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i*nsamples:(i+1)*nsamples, ordering[nkeep[i]:]] = X_train[inds, :][:, ordering[nkeep[i]:]]
yp_masked_test = trained_model.predict(X_test_tmp)
yp_masked_test = np.reshape(yp_masked_test, (N, nsamples)).mean(1) # take the mean output over all samples
return metric(y_test, yp_masked_test)
|
[
"def",
"keep_resample",
"(",
"nkeep",
",",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
",",
"attr_test",
",",
"model_generator",
",",
"metric",
",",
"trained_model",
",",
"random_state",
")",
":",
"# why broken? overwriting?",
"X_train",
",",
"X_test",
"=",
"to_array",
"(",
"X_train",
",",
"X_test",
")",
"# how many features to mask",
"assert",
"X_train",
".",
"shape",
"[",
"1",
"]",
"==",
"X_test",
".",
"shape",
"[",
"1",
"]",
"# how many samples to take",
"nsamples",
"=",
"100",
"# keep nkeep top features for each test explanation",
"N",
",",
"M",
"=",
"X_test",
".",
"shape",
"X_test_tmp",
"=",
"np",
".",
"tile",
"(",
"X_test",
",",
"[",
"1",
",",
"nsamples",
"]",
")",
".",
"reshape",
"(",
"nsamples",
"*",
"N",
",",
"M",
")",
"tie_breaking_noise",
"=",
"const_rand",
"(",
"M",
")",
"*",
"1e-6",
"inds",
"=",
"sklearn",
".",
"utils",
".",
"resample",
"(",
"np",
".",
"arange",
"(",
"N",
")",
",",
"n_samples",
"=",
"nsamples",
",",
"random_state",
"=",
"random_state",
")",
"for",
"i",
"in",
"range",
"(",
"N",
")",
":",
"if",
"nkeep",
"[",
"i",
"]",
"<",
"M",
":",
"ordering",
"=",
"np",
".",
"argsort",
"(",
"-",
"attr_test",
"[",
"i",
",",
":",
"]",
"+",
"tie_breaking_noise",
")",
"X_test_tmp",
"[",
"i",
"*",
"nsamples",
":",
"(",
"i",
"+",
"1",
")",
"*",
"nsamples",
",",
"ordering",
"[",
"nkeep",
"[",
"i",
"]",
":",
"]",
"]",
"=",
"X_train",
"[",
"inds",
",",
":",
"]",
"[",
":",
",",
"ordering",
"[",
"nkeep",
"[",
"i",
"]",
":",
"]",
"]",
"yp_masked_test",
"=",
"trained_model",
".",
"predict",
"(",
"X_test_tmp",
")",
"yp_masked_test",
"=",
"np",
".",
"reshape",
"(",
"yp_masked_test",
",",
"(",
"N",
",",
"nsamples",
")",
")",
".",
"mean",
"(",
"1",
")",
"# take the mean output over all samples",
"return",
"metric",
"(",
"y_test",
",",
"yp_masked_test",
")"
] |
The model is revaluated for each test sample with the non-important features set to resample background values.
|
[
"The",
"model",
"is",
"revaluated",
"for",
"each",
"test",
"sample",
"with",
"the",
"non",
"-",
"important",
"features",
"set",
"to",
"resample",
"background",
"values",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/measures.py#L320-L345
|
train
|
slundberg/shap
|
shap/benchmark/measures.py
|
local_accuracy
|
def local_accuracy(X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model):
""" The how well do the features plus a constant base rate sum up to the model output.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features and re-train the model for each test explanation
yp_test = trained_model.predict(X_test)
return metric(yp_test, strip_list(attr_test).sum(1))
|
python
|
def local_accuracy(X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model):
""" The how well do the features plus a constant base rate sum up to the model output.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features and re-train the model for each test explanation
yp_test = trained_model.predict(X_test)
return metric(yp_test, strip_list(attr_test).sum(1))
|
[
"def",
"local_accuracy",
"(",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
",",
"attr_test",
",",
"model_generator",
",",
"metric",
",",
"trained_model",
")",
":",
"X_train",
",",
"X_test",
"=",
"to_array",
"(",
"X_train",
",",
"X_test",
")",
"# how many features to mask",
"assert",
"X_train",
".",
"shape",
"[",
"1",
"]",
"==",
"X_test",
".",
"shape",
"[",
"1",
"]",
"# keep nkeep top features and re-train the model for each test explanation",
"yp_test",
"=",
"trained_model",
".",
"predict",
"(",
"X_test",
")",
"return",
"metric",
"(",
"yp_test",
",",
"strip_list",
"(",
"attr_test",
")",
".",
"sum",
"(",
"1",
")",
")"
] |
The how well do the features plus a constant base rate sum up to the model output.
|
[
"The",
"how",
"well",
"do",
"the",
"features",
"plus",
"a",
"constant",
"base",
"rate",
"sum",
"up",
"to",
"the",
"model",
"output",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/measures.py#L384-L396
|
train
|
slundberg/shap
|
shap/benchmark/measures.py
|
const_rand
|
def const_rand(size, seed=23980):
""" Generate a random array with a fixed seed.
"""
old_seed = np.random.seed()
np.random.seed(seed)
out = np.random.rand(size)
np.random.seed(old_seed)
return out
|
python
|
def const_rand(size, seed=23980):
""" Generate a random array with a fixed seed.
"""
old_seed = np.random.seed()
np.random.seed(seed)
out = np.random.rand(size)
np.random.seed(old_seed)
return out
|
[
"def",
"const_rand",
"(",
"size",
",",
"seed",
"=",
"23980",
")",
":",
"old_seed",
"=",
"np",
".",
"random",
".",
"seed",
"(",
")",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"out",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"size",
")",
"np",
".",
"random",
".",
"seed",
"(",
"old_seed",
")",
"return",
"out"
] |
Generate a random array with a fixed seed.
|
[
"Generate",
"a",
"random",
"array",
"with",
"a",
"fixed",
"seed",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/measures.py#L401-L408
|
train
|
slundberg/shap
|
shap/benchmark/measures.py
|
const_shuffle
|
def const_shuffle(arr, seed=23980):
""" Shuffle an array in-place with a fixed seed.
"""
old_seed = np.random.seed()
np.random.seed(seed)
np.random.shuffle(arr)
np.random.seed(old_seed)
|
python
|
def const_shuffle(arr, seed=23980):
""" Shuffle an array in-place with a fixed seed.
"""
old_seed = np.random.seed()
np.random.seed(seed)
np.random.shuffle(arr)
np.random.seed(old_seed)
|
[
"def",
"const_shuffle",
"(",
"arr",
",",
"seed",
"=",
"23980",
")",
":",
"old_seed",
"=",
"np",
".",
"random",
".",
"seed",
"(",
")",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"np",
".",
"random",
".",
"shuffle",
"(",
"arr",
")",
"np",
".",
"random",
".",
"seed",
"(",
"old_seed",
")"
] |
Shuffle an array in-place with a fixed seed.
|
[
"Shuffle",
"an",
"array",
"in",
"-",
"place",
"with",
"a",
"fixed",
"seed",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/measures.py#L410-L416
|
train
|
slundberg/shap
|
shap/explainers/mimic.py
|
MimicExplainer.shap_values
|
def shap_values(self, X, **kwargs):
""" Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array or pandas.DataFrame
A matrix of samples (# samples x # features) on which to explain the model's output.
Returns
-------
For a models with a single output this returns a matrix of SHAP values
(# samples x # features + 1). The last column is the base value of the model, which is
the expected value of the model applied to the background dataset. This causes each row to
sum to the model output for that sample. For models with vector outputs this returns a list
of such matrices, one for each output.
"""
phi = None
if self.mimic_model_type == "xgboost":
if not str(type(X)).endswith("xgboost.core.DMatrix'>"):
X = xgboost.DMatrix(X)
phi = self.trees.predict(X, pred_contribs=True)
if phi is not None:
if len(phi.shape) == 3:
return [phi[:, i, :] for i in range(phi.shape[1])]
else:
return phi
|
python
|
def shap_values(self, X, **kwargs):
""" Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array or pandas.DataFrame
A matrix of samples (# samples x # features) on which to explain the model's output.
Returns
-------
For a models with a single output this returns a matrix of SHAP values
(# samples x # features + 1). The last column is the base value of the model, which is
the expected value of the model applied to the background dataset. This causes each row to
sum to the model output for that sample. For models with vector outputs this returns a list
of such matrices, one for each output.
"""
phi = None
if self.mimic_model_type == "xgboost":
if not str(type(X)).endswith("xgboost.core.DMatrix'>"):
X = xgboost.DMatrix(X)
phi = self.trees.predict(X, pred_contribs=True)
if phi is not None:
if len(phi.shape) == 3:
return [phi[:, i, :] for i in range(phi.shape[1])]
else:
return phi
|
[
"def",
"shap_values",
"(",
"self",
",",
"X",
",",
"*",
"*",
"kwargs",
")",
":",
"phi",
"=",
"None",
"if",
"self",
".",
"mimic_model_type",
"==",
"\"xgboost\"",
":",
"if",
"not",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"xgboost.core.DMatrix'>\"",
")",
":",
"X",
"=",
"xgboost",
".",
"DMatrix",
"(",
"X",
")",
"phi",
"=",
"self",
".",
"trees",
".",
"predict",
"(",
"X",
",",
"pred_contribs",
"=",
"True",
")",
"if",
"phi",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"phi",
".",
"shape",
")",
"==",
"3",
":",
"return",
"[",
"phi",
"[",
":",
",",
"i",
",",
":",
"]",
"for",
"i",
"in",
"range",
"(",
"phi",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"else",
":",
"return",
"phi"
] |
Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array or pandas.DataFrame
A matrix of samples (# samples x # features) on which to explain the model's output.
Returns
-------
For a models with a single output this returns a matrix of SHAP values
(# samples x # features + 1). The last column is the base value of the model, which is
the expected value of the model applied to the background dataset. This causes each row to
sum to the model output for that sample. For models with vector outputs this returns a list
of such matrices, one for each output.
|
[
"Estimate",
"the",
"SHAP",
"values",
"for",
"a",
"set",
"of",
"samples",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/mimic.py#L75-L102
|
train
|
slundberg/shap
|
shap/plots/image.py
|
image_plot
|
def image_plot(shap_values, x, labels=None, show=True, width=20, aspect=0.2, hspace=0.2, labelpad=None):
""" Plots SHAP values for image inputs.
"""
multi_output = True
if type(shap_values) != list:
multi_output = False
shap_values = [shap_values]
# make sure labels
if labels is not None:
assert labels.shape[0] == shap_values[0].shape[0], "Labels must have same row count as shap_values arrays!"
if multi_output:
assert labels.shape[1] == len(shap_values), "Labels must have a column for each output in shap_values!"
else:
assert len(labels.shape) == 1, "Labels must be a vector for single output shap_values."
label_kwargs = {} if labelpad is None else {'pad': labelpad}
# plot our explanations
fig_size = np.array([3 * (len(shap_values) + 1), 2.5 * (x.shape[0] + 1)])
if fig_size[0] > width:
fig_size *= width / fig_size[0]
fig, axes = pl.subplots(nrows=x.shape[0], ncols=len(shap_values) + 1, figsize=fig_size)
if len(axes.shape) == 1:
axes = axes.reshape(1,axes.size)
for row in range(x.shape[0]):
x_curr = x[row].copy()
# make sure
if len(x_curr.shape) == 3 and x_curr.shape[2] == 1:
x_curr = x_curr.reshape(x_curr.shape[:2])
if x_curr.max() > 1:
x_curr /= 255.
# get a grayscale version of the image
if len(x_curr.shape) == 3 and x_curr.shape[2] == 3:
x_curr_gray = (0.2989 * x_curr[:,:,0] + 0.5870 * x_curr[:,:,1] + 0.1140 * x_curr[:,:,2]) # rgb to gray
else:
x_curr_gray = x_curr
axes[row,0].imshow(x_curr, cmap=pl.get_cmap('gray'))
axes[row,0].axis('off')
if len(shap_values[0][row].shape) == 2:
abs_vals = np.stack([np.abs(shap_values[i]) for i in range(len(shap_values))], 0).flatten()
else:
abs_vals = np.stack([np.abs(shap_values[i].sum(-1)) for i in range(len(shap_values))], 0).flatten()
max_val = np.nanpercentile(abs_vals, 99.9)
for i in range(len(shap_values)):
if labels is not None:
axes[row,i+1].set_title(labels[row,i], **label_kwargs)
sv = shap_values[i][row] if len(shap_values[i][row].shape) == 2 else shap_values[i][row].sum(-1)
axes[row,i+1].imshow(x_curr_gray, cmap=pl.get_cmap('gray'), alpha=0.15, extent=(-1, sv.shape[0], sv.shape[1], -1))
im = axes[row,i+1].imshow(sv, cmap=colors.red_transparent_blue, vmin=-max_val, vmax=max_val)
axes[row,i+1].axis('off')
if hspace == 'auto':
fig.tight_layout()
else:
fig.subplots_adjust(hspace=hspace)
cb = fig.colorbar(im, ax=np.ravel(axes).tolist(), label="SHAP value", orientation="horizontal", aspect=fig_size[0]/aspect)
cb.outline.set_visible(False)
if show:
pl.show()
|
python
|
def image_plot(shap_values, x, labels=None, show=True, width=20, aspect=0.2, hspace=0.2, labelpad=None):
""" Plots SHAP values for image inputs.
"""
multi_output = True
if type(shap_values) != list:
multi_output = False
shap_values = [shap_values]
# make sure labels
if labels is not None:
assert labels.shape[0] == shap_values[0].shape[0], "Labels must have same row count as shap_values arrays!"
if multi_output:
assert labels.shape[1] == len(shap_values), "Labels must have a column for each output in shap_values!"
else:
assert len(labels.shape) == 1, "Labels must be a vector for single output shap_values."
label_kwargs = {} if labelpad is None else {'pad': labelpad}
# plot our explanations
fig_size = np.array([3 * (len(shap_values) + 1), 2.5 * (x.shape[0] + 1)])
if fig_size[0] > width:
fig_size *= width / fig_size[0]
fig, axes = pl.subplots(nrows=x.shape[0], ncols=len(shap_values) + 1, figsize=fig_size)
if len(axes.shape) == 1:
axes = axes.reshape(1,axes.size)
for row in range(x.shape[0]):
x_curr = x[row].copy()
# make sure
if len(x_curr.shape) == 3 and x_curr.shape[2] == 1:
x_curr = x_curr.reshape(x_curr.shape[:2])
if x_curr.max() > 1:
x_curr /= 255.
# get a grayscale version of the image
if len(x_curr.shape) == 3 and x_curr.shape[2] == 3:
x_curr_gray = (0.2989 * x_curr[:,:,0] + 0.5870 * x_curr[:,:,1] + 0.1140 * x_curr[:,:,2]) # rgb to gray
else:
x_curr_gray = x_curr
axes[row,0].imshow(x_curr, cmap=pl.get_cmap('gray'))
axes[row,0].axis('off')
if len(shap_values[0][row].shape) == 2:
abs_vals = np.stack([np.abs(shap_values[i]) for i in range(len(shap_values))], 0).flatten()
else:
abs_vals = np.stack([np.abs(shap_values[i].sum(-1)) for i in range(len(shap_values))], 0).flatten()
max_val = np.nanpercentile(abs_vals, 99.9)
for i in range(len(shap_values)):
if labels is not None:
axes[row,i+1].set_title(labels[row,i], **label_kwargs)
sv = shap_values[i][row] if len(shap_values[i][row].shape) == 2 else shap_values[i][row].sum(-1)
axes[row,i+1].imshow(x_curr_gray, cmap=pl.get_cmap('gray'), alpha=0.15, extent=(-1, sv.shape[0], sv.shape[1], -1))
im = axes[row,i+1].imshow(sv, cmap=colors.red_transparent_blue, vmin=-max_val, vmax=max_val)
axes[row,i+1].axis('off')
if hspace == 'auto':
fig.tight_layout()
else:
fig.subplots_adjust(hspace=hspace)
cb = fig.colorbar(im, ax=np.ravel(axes).tolist(), label="SHAP value", orientation="horizontal", aspect=fig_size[0]/aspect)
cb.outline.set_visible(False)
if show:
pl.show()
|
[
"def",
"image_plot",
"(",
"shap_values",
",",
"x",
",",
"labels",
"=",
"None",
",",
"show",
"=",
"True",
",",
"width",
"=",
"20",
",",
"aspect",
"=",
"0.2",
",",
"hspace",
"=",
"0.2",
",",
"labelpad",
"=",
"None",
")",
":",
"multi_output",
"=",
"True",
"if",
"type",
"(",
"shap_values",
")",
"!=",
"list",
":",
"multi_output",
"=",
"False",
"shap_values",
"=",
"[",
"shap_values",
"]",
"# make sure labels",
"if",
"labels",
"is",
"not",
"None",
":",
"assert",
"labels",
".",
"shape",
"[",
"0",
"]",
"==",
"shap_values",
"[",
"0",
"]",
".",
"shape",
"[",
"0",
"]",
",",
"\"Labels must have same row count as shap_values arrays!\"",
"if",
"multi_output",
":",
"assert",
"labels",
".",
"shape",
"[",
"1",
"]",
"==",
"len",
"(",
"shap_values",
")",
",",
"\"Labels must have a column for each output in shap_values!\"",
"else",
":",
"assert",
"len",
"(",
"labels",
".",
"shape",
")",
"==",
"1",
",",
"\"Labels must be a vector for single output shap_values.\"",
"label_kwargs",
"=",
"{",
"}",
"if",
"labelpad",
"is",
"None",
"else",
"{",
"'pad'",
":",
"labelpad",
"}",
"# plot our explanations",
"fig_size",
"=",
"np",
".",
"array",
"(",
"[",
"3",
"*",
"(",
"len",
"(",
"shap_values",
")",
"+",
"1",
")",
",",
"2.5",
"*",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
"+",
"1",
")",
"]",
")",
"if",
"fig_size",
"[",
"0",
"]",
">",
"width",
":",
"fig_size",
"*=",
"width",
"/",
"fig_size",
"[",
"0",
"]",
"fig",
",",
"axes",
"=",
"pl",
".",
"subplots",
"(",
"nrows",
"=",
"x",
".",
"shape",
"[",
"0",
"]",
",",
"ncols",
"=",
"len",
"(",
"shap_values",
")",
"+",
"1",
",",
"figsize",
"=",
"fig_size",
")",
"if",
"len",
"(",
"axes",
".",
"shape",
")",
"==",
"1",
":",
"axes",
"=",
"axes",
".",
"reshape",
"(",
"1",
",",
"axes",
".",
"size",
")",
"for",
"row",
"in",
"range",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
")",
":",
"x_curr",
"=",
"x",
"[",
"row",
"]",
".",
"copy",
"(",
")",
"# make sure",
"if",
"len",
"(",
"x_curr",
".",
"shape",
")",
"==",
"3",
"and",
"x_curr",
".",
"shape",
"[",
"2",
"]",
"==",
"1",
":",
"x_curr",
"=",
"x_curr",
".",
"reshape",
"(",
"x_curr",
".",
"shape",
"[",
":",
"2",
"]",
")",
"if",
"x_curr",
".",
"max",
"(",
")",
">",
"1",
":",
"x_curr",
"/=",
"255.",
"# get a grayscale version of the image",
"if",
"len",
"(",
"x_curr",
".",
"shape",
")",
"==",
"3",
"and",
"x_curr",
".",
"shape",
"[",
"2",
"]",
"==",
"3",
":",
"x_curr_gray",
"=",
"(",
"0.2989",
"*",
"x_curr",
"[",
":",
",",
":",
",",
"0",
"]",
"+",
"0.5870",
"*",
"x_curr",
"[",
":",
",",
":",
",",
"1",
"]",
"+",
"0.1140",
"*",
"x_curr",
"[",
":",
",",
":",
",",
"2",
"]",
")",
"# rgb to gray",
"else",
":",
"x_curr_gray",
"=",
"x_curr",
"axes",
"[",
"row",
",",
"0",
"]",
".",
"imshow",
"(",
"x_curr",
",",
"cmap",
"=",
"pl",
".",
"get_cmap",
"(",
"'gray'",
")",
")",
"axes",
"[",
"row",
",",
"0",
"]",
".",
"axis",
"(",
"'off'",
")",
"if",
"len",
"(",
"shap_values",
"[",
"0",
"]",
"[",
"row",
"]",
".",
"shape",
")",
"==",
"2",
":",
"abs_vals",
"=",
"np",
".",
"stack",
"(",
"[",
"np",
".",
"abs",
"(",
"shap_values",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"shap_values",
")",
")",
"]",
",",
"0",
")",
".",
"flatten",
"(",
")",
"else",
":",
"abs_vals",
"=",
"np",
".",
"stack",
"(",
"[",
"np",
".",
"abs",
"(",
"shap_values",
"[",
"i",
"]",
".",
"sum",
"(",
"-",
"1",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"shap_values",
")",
")",
"]",
",",
"0",
")",
".",
"flatten",
"(",
")",
"max_val",
"=",
"np",
".",
"nanpercentile",
"(",
"abs_vals",
",",
"99.9",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"shap_values",
")",
")",
":",
"if",
"labels",
"is",
"not",
"None",
":",
"axes",
"[",
"row",
",",
"i",
"+",
"1",
"]",
".",
"set_title",
"(",
"labels",
"[",
"row",
",",
"i",
"]",
",",
"*",
"*",
"label_kwargs",
")",
"sv",
"=",
"shap_values",
"[",
"i",
"]",
"[",
"row",
"]",
"if",
"len",
"(",
"shap_values",
"[",
"i",
"]",
"[",
"row",
"]",
".",
"shape",
")",
"==",
"2",
"else",
"shap_values",
"[",
"i",
"]",
"[",
"row",
"]",
".",
"sum",
"(",
"-",
"1",
")",
"axes",
"[",
"row",
",",
"i",
"+",
"1",
"]",
".",
"imshow",
"(",
"x_curr_gray",
",",
"cmap",
"=",
"pl",
".",
"get_cmap",
"(",
"'gray'",
")",
",",
"alpha",
"=",
"0.15",
",",
"extent",
"=",
"(",
"-",
"1",
",",
"sv",
".",
"shape",
"[",
"0",
"]",
",",
"sv",
".",
"shape",
"[",
"1",
"]",
",",
"-",
"1",
")",
")",
"im",
"=",
"axes",
"[",
"row",
",",
"i",
"+",
"1",
"]",
".",
"imshow",
"(",
"sv",
",",
"cmap",
"=",
"colors",
".",
"red_transparent_blue",
",",
"vmin",
"=",
"-",
"max_val",
",",
"vmax",
"=",
"max_val",
")",
"axes",
"[",
"row",
",",
"i",
"+",
"1",
"]",
".",
"axis",
"(",
"'off'",
")",
"if",
"hspace",
"==",
"'auto'",
":",
"fig",
".",
"tight_layout",
"(",
")",
"else",
":",
"fig",
".",
"subplots_adjust",
"(",
"hspace",
"=",
"hspace",
")",
"cb",
"=",
"fig",
".",
"colorbar",
"(",
"im",
",",
"ax",
"=",
"np",
".",
"ravel",
"(",
"axes",
")",
".",
"tolist",
"(",
")",
",",
"label",
"=",
"\"SHAP value\"",
",",
"orientation",
"=",
"\"horizontal\"",
",",
"aspect",
"=",
"fig_size",
"[",
"0",
"]",
"/",
"aspect",
")",
"cb",
".",
"outline",
".",
"set_visible",
"(",
"False",
")",
"if",
"show",
":",
"pl",
".",
"show",
"(",
")"
] |
Plots SHAP values for image inputs.
|
[
"Plots",
"SHAP",
"values",
"for",
"image",
"inputs",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/plots/image.py#L10-L72
|
train
|
slundberg/shap
|
shap/common.py
|
hclust_ordering
|
def hclust_ordering(X, metric="sqeuclidean"):
""" A leaf ordering is under-defined, this picks the ordering that keeps nearby samples similar.
"""
# compute a hierarchical clustering
D = sp.spatial.distance.pdist(X, metric)
cluster_matrix = sp.cluster.hierarchy.complete(D)
# merge clusters, rotating them to make the end points match as best we can
sets = [[i] for i in range(X.shape[0])]
for i in range(cluster_matrix.shape[0]):
s1 = sets[int(cluster_matrix[i,0])]
s2 = sets[int(cluster_matrix[i,1])]
# compute distances between the end points of the lists
d_s1_s2 = pdist(np.vstack([X[s1[-1],:], X[s2[0],:]]), metric)[0]
d_s2_s1 = pdist(np.vstack([X[s1[0],:], X[s2[-1],:]]), metric)[0]
d_s1r_s2 = pdist(np.vstack([X[s1[0],:], X[s2[0],:]]), metric)[0]
d_s1_s2r = pdist(np.vstack([X[s1[-1],:], X[s2[-1],:]]), metric)[0]
# concatenete the lists in the way the minimizes the difference between
# the samples at the junction
best = min(d_s1_s2, d_s2_s1, d_s1r_s2, d_s1_s2r)
if best == d_s1_s2:
sets.append(s1 + s2)
elif best == d_s2_s1:
sets.append(s2 + s1)
elif best == d_s1r_s2:
sets.append(list(reversed(s1)) + s2)
else:
sets.append(s1 + list(reversed(s2)))
return sets[-1]
|
python
|
def hclust_ordering(X, metric="sqeuclidean"):
""" A leaf ordering is under-defined, this picks the ordering that keeps nearby samples similar.
"""
# compute a hierarchical clustering
D = sp.spatial.distance.pdist(X, metric)
cluster_matrix = sp.cluster.hierarchy.complete(D)
# merge clusters, rotating them to make the end points match as best we can
sets = [[i] for i in range(X.shape[0])]
for i in range(cluster_matrix.shape[0]):
s1 = sets[int(cluster_matrix[i,0])]
s2 = sets[int(cluster_matrix[i,1])]
# compute distances between the end points of the lists
d_s1_s2 = pdist(np.vstack([X[s1[-1],:], X[s2[0],:]]), metric)[0]
d_s2_s1 = pdist(np.vstack([X[s1[0],:], X[s2[-1],:]]), metric)[0]
d_s1r_s2 = pdist(np.vstack([X[s1[0],:], X[s2[0],:]]), metric)[0]
d_s1_s2r = pdist(np.vstack([X[s1[-1],:], X[s2[-1],:]]), metric)[0]
# concatenete the lists in the way the minimizes the difference between
# the samples at the junction
best = min(d_s1_s2, d_s2_s1, d_s1r_s2, d_s1_s2r)
if best == d_s1_s2:
sets.append(s1 + s2)
elif best == d_s2_s1:
sets.append(s2 + s1)
elif best == d_s1r_s2:
sets.append(list(reversed(s1)) + s2)
else:
sets.append(s1 + list(reversed(s2)))
return sets[-1]
|
[
"def",
"hclust_ordering",
"(",
"X",
",",
"metric",
"=",
"\"sqeuclidean\"",
")",
":",
"# compute a hierarchical clustering",
"D",
"=",
"sp",
".",
"spatial",
".",
"distance",
".",
"pdist",
"(",
"X",
",",
"metric",
")",
"cluster_matrix",
"=",
"sp",
".",
"cluster",
".",
"hierarchy",
".",
"complete",
"(",
"D",
")",
"# merge clusters, rotating them to make the end points match as best we can",
"sets",
"=",
"[",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"cluster_matrix",
".",
"shape",
"[",
"0",
"]",
")",
":",
"s1",
"=",
"sets",
"[",
"int",
"(",
"cluster_matrix",
"[",
"i",
",",
"0",
"]",
")",
"]",
"s2",
"=",
"sets",
"[",
"int",
"(",
"cluster_matrix",
"[",
"i",
",",
"1",
"]",
")",
"]",
"# compute distances between the end points of the lists",
"d_s1_s2",
"=",
"pdist",
"(",
"np",
".",
"vstack",
"(",
"[",
"X",
"[",
"s1",
"[",
"-",
"1",
"]",
",",
":",
"]",
",",
"X",
"[",
"s2",
"[",
"0",
"]",
",",
":",
"]",
"]",
")",
",",
"metric",
")",
"[",
"0",
"]",
"d_s2_s1",
"=",
"pdist",
"(",
"np",
".",
"vstack",
"(",
"[",
"X",
"[",
"s1",
"[",
"0",
"]",
",",
":",
"]",
",",
"X",
"[",
"s2",
"[",
"-",
"1",
"]",
",",
":",
"]",
"]",
")",
",",
"metric",
")",
"[",
"0",
"]",
"d_s1r_s2",
"=",
"pdist",
"(",
"np",
".",
"vstack",
"(",
"[",
"X",
"[",
"s1",
"[",
"0",
"]",
",",
":",
"]",
",",
"X",
"[",
"s2",
"[",
"0",
"]",
",",
":",
"]",
"]",
")",
",",
"metric",
")",
"[",
"0",
"]",
"d_s1_s2r",
"=",
"pdist",
"(",
"np",
".",
"vstack",
"(",
"[",
"X",
"[",
"s1",
"[",
"-",
"1",
"]",
",",
":",
"]",
",",
"X",
"[",
"s2",
"[",
"-",
"1",
"]",
",",
":",
"]",
"]",
")",
",",
"metric",
")",
"[",
"0",
"]",
"# concatenete the lists in the way the minimizes the difference between",
"# the samples at the junction",
"best",
"=",
"min",
"(",
"d_s1_s2",
",",
"d_s2_s1",
",",
"d_s1r_s2",
",",
"d_s1_s2r",
")",
"if",
"best",
"==",
"d_s1_s2",
":",
"sets",
".",
"append",
"(",
"s1",
"+",
"s2",
")",
"elif",
"best",
"==",
"d_s2_s1",
":",
"sets",
".",
"append",
"(",
"s2",
"+",
"s1",
")",
"elif",
"best",
"==",
"d_s1r_s2",
":",
"sets",
".",
"append",
"(",
"list",
"(",
"reversed",
"(",
"s1",
")",
")",
"+",
"s2",
")",
"else",
":",
"sets",
".",
"append",
"(",
"s1",
"+",
"list",
"(",
"reversed",
"(",
"s2",
")",
")",
")",
"return",
"sets",
"[",
"-",
"1",
"]"
] |
A leaf ordering is under-defined, this picks the ordering that keeps nearby samples similar.
|
[
"A",
"leaf",
"ordering",
"is",
"under",
"-",
"defined",
"this",
"picks",
"the",
"ordering",
"that",
"keeps",
"nearby",
"samples",
"similar",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/common.py#L215-L247
|
train
|
slundberg/shap
|
shap/common.py
|
approximate_interactions
|
def approximate_interactions(index, shap_values, X, feature_names=None):
""" Order other features by how much interaction they seem to have with the feature at the given index.
This just bins the SHAP values for a feature along that feature's value. For true Shapley interaction
index values for SHAP see the interaction_contribs option implemented in XGBoost.
"""
# convert from DataFrames if we got any
if str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = X.columns
X = X.values
index = convert_name(index, shap_values, feature_names)
if X.shape[0] > 10000:
a = np.arange(X.shape[0])
np.random.shuffle(a)
inds = a[:10000]
else:
inds = np.arange(X.shape[0])
x = X[inds, index]
srt = np.argsort(x)
shap_ref = shap_values[inds, index]
shap_ref = shap_ref[srt]
inc = max(min(int(len(x) / 10.0), 50), 1)
interactions = []
for i in range(X.shape[1]):
val_other = X[inds, i][srt].astype(np.float)
v = 0.0
if not (i == index or np.sum(np.abs(val_other)) < 1e-8):
for j in range(0, len(x), inc):
if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0:
v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1])
val_v = v
val_other = np.isnan(X[inds, i][srt].astype(np.float))
v = 0.0
if not (i == index or np.sum(np.abs(val_other)) < 1e-8):
for j in range(0, len(x), inc):
if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0:
v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1])
nan_v = v
interactions.append(max(val_v, nan_v))
return np.argsort(-np.abs(interactions))
|
python
|
def approximate_interactions(index, shap_values, X, feature_names=None):
""" Order other features by how much interaction they seem to have with the feature at the given index.
This just bins the SHAP values for a feature along that feature's value. For true Shapley interaction
index values for SHAP see the interaction_contribs option implemented in XGBoost.
"""
# convert from DataFrames if we got any
if str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = X.columns
X = X.values
index = convert_name(index, shap_values, feature_names)
if X.shape[0] > 10000:
a = np.arange(X.shape[0])
np.random.shuffle(a)
inds = a[:10000]
else:
inds = np.arange(X.shape[0])
x = X[inds, index]
srt = np.argsort(x)
shap_ref = shap_values[inds, index]
shap_ref = shap_ref[srt]
inc = max(min(int(len(x) / 10.0), 50), 1)
interactions = []
for i in range(X.shape[1]):
val_other = X[inds, i][srt].astype(np.float)
v = 0.0
if not (i == index or np.sum(np.abs(val_other)) < 1e-8):
for j in range(0, len(x), inc):
if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0:
v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1])
val_v = v
val_other = np.isnan(X[inds, i][srt].astype(np.float))
v = 0.0
if not (i == index or np.sum(np.abs(val_other)) < 1e-8):
for j in range(0, len(x), inc):
if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0:
v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1])
nan_v = v
interactions.append(max(val_v, nan_v))
return np.argsort(-np.abs(interactions))
|
[
"def",
"approximate_interactions",
"(",
"index",
",",
"shap_values",
",",
"X",
",",
"feature_names",
"=",
"None",
")",
":",
"# convert from DataFrames if we got any",
"if",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"'pandas.core.frame.DataFrame'>\"",
")",
":",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"X",
".",
"columns",
"X",
"=",
"X",
".",
"values",
"index",
"=",
"convert_name",
"(",
"index",
",",
"shap_values",
",",
"feature_names",
")",
"if",
"X",
".",
"shape",
"[",
"0",
"]",
">",
"10000",
":",
"a",
"=",
"np",
".",
"arange",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"np",
".",
"random",
".",
"shuffle",
"(",
"a",
")",
"inds",
"=",
"a",
"[",
":",
"10000",
"]",
"else",
":",
"inds",
"=",
"np",
".",
"arange",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"x",
"=",
"X",
"[",
"inds",
",",
"index",
"]",
"srt",
"=",
"np",
".",
"argsort",
"(",
"x",
")",
"shap_ref",
"=",
"shap_values",
"[",
"inds",
",",
"index",
"]",
"shap_ref",
"=",
"shap_ref",
"[",
"srt",
"]",
"inc",
"=",
"max",
"(",
"min",
"(",
"int",
"(",
"len",
"(",
"x",
")",
"/",
"10.0",
")",
",",
"50",
")",
",",
"1",
")",
"interactions",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
")",
":",
"val_other",
"=",
"X",
"[",
"inds",
",",
"i",
"]",
"[",
"srt",
"]",
".",
"astype",
"(",
"np",
".",
"float",
")",
"v",
"=",
"0.0",
"if",
"not",
"(",
"i",
"==",
"index",
"or",
"np",
".",
"sum",
"(",
"np",
".",
"abs",
"(",
"val_other",
")",
")",
"<",
"1e-8",
")",
":",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"x",
")",
",",
"inc",
")",
":",
"if",
"np",
".",
"std",
"(",
"val_other",
"[",
"j",
":",
"j",
"+",
"inc",
"]",
")",
">",
"0",
"and",
"np",
".",
"std",
"(",
"shap_ref",
"[",
"j",
":",
"j",
"+",
"inc",
"]",
")",
">",
"0",
":",
"v",
"+=",
"abs",
"(",
"np",
".",
"corrcoef",
"(",
"shap_ref",
"[",
"j",
":",
"j",
"+",
"inc",
"]",
",",
"val_other",
"[",
"j",
":",
"j",
"+",
"inc",
"]",
")",
"[",
"0",
",",
"1",
"]",
")",
"val_v",
"=",
"v",
"val_other",
"=",
"np",
".",
"isnan",
"(",
"X",
"[",
"inds",
",",
"i",
"]",
"[",
"srt",
"]",
".",
"astype",
"(",
"np",
".",
"float",
")",
")",
"v",
"=",
"0.0",
"if",
"not",
"(",
"i",
"==",
"index",
"or",
"np",
".",
"sum",
"(",
"np",
".",
"abs",
"(",
"val_other",
")",
")",
"<",
"1e-8",
")",
":",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"x",
")",
",",
"inc",
")",
":",
"if",
"np",
".",
"std",
"(",
"val_other",
"[",
"j",
":",
"j",
"+",
"inc",
"]",
")",
">",
"0",
"and",
"np",
".",
"std",
"(",
"shap_ref",
"[",
"j",
":",
"j",
"+",
"inc",
"]",
")",
">",
"0",
":",
"v",
"+=",
"abs",
"(",
"np",
".",
"corrcoef",
"(",
"shap_ref",
"[",
"j",
":",
"j",
"+",
"inc",
"]",
",",
"val_other",
"[",
"j",
":",
"j",
"+",
"inc",
"]",
")",
"[",
"0",
",",
"1",
"]",
")",
"nan_v",
"=",
"v",
"interactions",
".",
"append",
"(",
"max",
"(",
"val_v",
",",
"nan_v",
")",
")",
"return",
"np",
".",
"argsort",
"(",
"-",
"np",
".",
"abs",
"(",
"interactions",
")",
")"
] |
Order other features by how much interaction they seem to have with the feature at the given index.
This just bins the SHAP values for a feature along that feature's value. For true Shapley interaction
index values for SHAP see the interaction_contribs option implemented in XGBoost.
|
[
"Order",
"other",
"features",
"by",
"how",
"much",
"interaction",
"they",
"seem",
"to",
"have",
"with",
"the",
"feature",
"at",
"the",
"given",
"index",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/common.py#L271-L318
|
train
|
slundberg/shap
|
shap/benchmark/plots.py
|
_human_score_map
|
def _human_score_map(human_consensus, methods_attrs):
""" Converts human agreement differences to numerical scores for coloring.
"""
v = 1 - min(np.sum(np.abs(methods_attrs - human_consensus)) / (np.abs(human_consensus).sum() + 1), 1.0)
return v
|
python
|
def _human_score_map(human_consensus, methods_attrs):
""" Converts human agreement differences to numerical scores for coloring.
"""
v = 1 - min(np.sum(np.abs(methods_attrs - human_consensus)) / (np.abs(human_consensus).sum() + 1), 1.0)
return v
|
[
"def",
"_human_score_map",
"(",
"human_consensus",
",",
"methods_attrs",
")",
":",
"v",
"=",
"1",
"-",
"min",
"(",
"np",
".",
"sum",
"(",
"np",
".",
"abs",
"(",
"methods_attrs",
"-",
"human_consensus",
")",
")",
"/",
"(",
"np",
".",
"abs",
"(",
"human_consensus",
")",
".",
"sum",
"(",
")",
"+",
"1",
")",
",",
"1.0",
")",
"return",
"v"
] |
Converts human agreement differences to numerical scores for coloring.
|
[
"Converts",
"human",
"agreement",
"differences",
"to",
"numerical",
"scores",
"for",
"coloring",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/plots.py#L370-L375
|
train
|
slundberg/shap
|
shap/plots/force_matplotlib.py
|
draw_bars
|
def draw_bars(out_value, features, feature_type, width_separators, width_bar):
"""Draw the bars and separators."""
rectangle_list = []
separator_list = []
pre_val = out_value
for index, features in zip(range(len(features)), features):
if feature_type == 'positive':
left_bound = float(features[0])
right_bound = pre_val
pre_val = left_bound
separator_indent = np.abs(width_separators)
separator_pos = left_bound
colors = ['#FF0D57', '#FFC3D5']
else:
left_bound = pre_val
right_bound = float(features[0])
pre_val = right_bound
separator_indent = - np.abs(width_separators)
separator_pos = right_bound
colors = ['#1E88E5', '#D1E6FA']
# Create rectangle
if index == 0:
if feature_type == 'positive':
points_rectangle = [[left_bound, 0],
[right_bound, 0],
[right_bound, width_bar],
[left_bound, width_bar],
[left_bound + separator_indent, (width_bar / 2)]
]
else:
points_rectangle = [[right_bound, 0],
[left_bound, 0],
[left_bound, width_bar],
[right_bound, width_bar],
[right_bound + separator_indent, (width_bar / 2)]
]
else:
points_rectangle = [[left_bound, 0],
[right_bound, 0],
[right_bound + separator_indent * 0.90, (width_bar / 2)],
[right_bound, width_bar],
[left_bound, width_bar],
[left_bound + separator_indent * 0.90, (width_bar / 2)]]
line = plt.Polygon(points_rectangle, closed=True, fill=True,
facecolor=colors[0], linewidth=0)
rectangle_list += [line]
# Create seperator
points_separator = [[separator_pos, 0],
[separator_pos + separator_indent, (width_bar / 2)],
[separator_pos, width_bar]]
line = plt.Polygon(points_separator, closed=None, fill=None,
edgecolor=colors[1], lw=3)
separator_list += [line]
return rectangle_list, separator_list
|
python
|
def draw_bars(out_value, features, feature_type, width_separators, width_bar):
"""Draw the bars and separators."""
rectangle_list = []
separator_list = []
pre_val = out_value
for index, features in zip(range(len(features)), features):
if feature_type == 'positive':
left_bound = float(features[0])
right_bound = pre_val
pre_val = left_bound
separator_indent = np.abs(width_separators)
separator_pos = left_bound
colors = ['#FF0D57', '#FFC3D5']
else:
left_bound = pre_val
right_bound = float(features[0])
pre_val = right_bound
separator_indent = - np.abs(width_separators)
separator_pos = right_bound
colors = ['#1E88E5', '#D1E6FA']
# Create rectangle
if index == 0:
if feature_type == 'positive':
points_rectangle = [[left_bound, 0],
[right_bound, 0],
[right_bound, width_bar],
[left_bound, width_bar],
[left_bound + separator_indent, (width_bar / 2)]
]
else:
points_rectangle = [[right_bound, 0],
[left_bound, 0],
[left_bound, width_bar],
[right_bound, width_bar],
[right_bound + separator_indent, (width_bar / 2)]
]
else:
points_rectangle = [[left_bound, 0],
[right_bound, 0],
[right_bound + separator_indent * 0.90, (width_bar / 2)],
[right_bound, width_bar],
[left_bound, width_bar],
[left_bound + separator_indent * 0.90, (width_bar / 2)]]
line = plt.Polygon(points_rectangle, closed=True, fill=True,
facecolor=colors[0], linewidth=0)
rectangle_list += [line]
# Create seperator
points_separator = [[separator_pos, 0],
[separator_pos + separator_indent, (width_bar / 2)],
[separator_pos, width_bar]]
line = plt.Polygon(points_separator, closed=None, fill=None,
edgecolor=colors[1], lw=3)
separator_list += [line]
return rectangle_list, separator_list
|
[
"def",
"draw_bars",
"(",
"out_value",
",",
"features",
",",
"feature_type",
",",
"width_separators",
",",
"width_bar",
")",
":",
"rectangle_list",
"=",
"[",
"]",
"separator_list",
"=",
"[",
"]",
"pre_val",
"=",
"out_value",
"for",
"index",
",",
"features",
"in",
"zip",
"(",
"range",
"(",
"len",
"(",
"features",
")",
")",
",",
"features",
")",
":",
"if",
"feature_type",
"==",
"'positive'",
":",
"left_bound",
"=",
"float",
"(",
"features",
"[",
"0",
"]",
")",
"right_bound",
"=",
"pre_val",
"pre_val",
"=",
"left_bound",
"separator_indent",
"=",
"np",
".",
"abs",
"(",
"width_separators",
")",
"separator_pos",
"=",
"left_bound",
"colors",
"=",
"[",
"'#FF0D57'",
",",
"'#FFC3D5'",
"]",
"else",
":",
"left_bound",
"=",
"pre_val",
"right_bound",
"=",
"float",
"(",
"features",
"[",
"0",
"]",
")",
"pre_val",
"=",
"right_bound",
"separator_indent",
"=",
"-",
"np",
".",
"abs",
"(",
"width_separators",
")",
"separator_pos",
"=",
"right_bound",
"colors",
"=",
"[",
"'#1E88E5'",
",",
"'#D1E6FA'",
"]",
"# Create rectangle",
"if",
"index",
"==",
"0",
":",
"if",
"feature_type",
"==",
"'positive'",
":",
"points_rectangle",
"=",
"[",
"[",
"left_bound",
",",
"0",
"]",
",",
"[",
"right_bound",
",",
"0",
"]",
",",
"[",
"right_bound",
",",
"width_bar",
"]",
",",
"[",
"left_bound",
",",
"width_bar",
"]",
",",
"[",
"left_bound",
"+",
"separator_indent",
",",
"(",
"width_bar",
"/",
"2",
")",
"]",
"]",
"else",
":",
"points_rectangle",
"=",
"[",
"[",
"right_bound",
",",
"0",
"]",
",",
"[",
"left_bound",
",",
"0",
"]",
",",
"[",
"left_bound",
",",
"width_bar",
"]",
",",
"[",
"right_bound",
",",
"width_bar",
"]",
",",
"[",
"right_bound",
"+",
"separator_indent",
",",
"(",
"width_bar",
"/",
"2",
")",
"]",
"]",
"else",
":",
"points_rectangle",
"=",
"[",
"[",
"left_bound",
",",
"0",
"]",
",",
"[",
"right_bound",
",",
"0",
"]",
",",
"[",
"right_bound",
"+",
"separator_indent",
"*",
"0.90",
",",
"(",
"width_bar",
"/",
"2",
")",
"]",
",",
"[",
"right_bound",
",",
"width_bar",
"]",
",",
"[",
"left_bound",
",",
"width_bar",
"]",
",",
"[",
"left_bound",
"+",
"separator_indent",
"*",
"0.90",
",",
"(",
"width_bar",
"/",
"2",
")",
"]",
"]",
"line",
"=",
"plt",
".",
"Polygon",
"(",
"points_rectangle",
",",
"closed",
"=",
"True",
",",
"fill",
"=",
"True",
",",
"facecolor",
"=",
"colors",
"[",
"0",
"]",
",",
"linewidth",
"=",
"0",
")",
"rectangle_list",
"+=",
"[",
"line",
"]",
"# Create seperator",
"points_separator",
"=",
"[",
"[",
"separator_pos",
",",
"0",
"]",
",",
"[",
"separator_pos",
"+",
"separator_indent",
",",
"(",
"width_bar",
"/",
"2",
")",
"]",
",",
"[",
"separator_pos",
",",
"width_bar",
"]",
"]",
"line",
"=",
"plt",
".",
"Polygon",
"(",
"points_separator",
",",
"closed",
"=",
"None",
",",
"fill",
"=",
"None",
",",
"edgecolor",
"=",
"colors",
"[",
"1",
"]",
",",
"lw",
"=",
"3",
")",
"separator_list",
"+=",
"[",
"line",
"]",
"return",
"rectangle_list",
",",
"separator_list"
] |
Draw the bars and separators.
|
[
"Draw",
"the",
"bars",
"and",
"separators",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/plots/force_matplotlib.py#L15-L77
|
train
|
slundberg/shap
|
shap/plots/force_matplotlib.py
|
format_data
|
def format_data(data):
"""Format data."""
# Format negative features
neg_features = np.array([[data['features'][x]['effect'],
data['features'][x]['value'],
data['featureNames'][x]]
for x in data['features'].keys() if data['features'][x]['effect'] < 0])
neg_features = np.array(sorted(neg_features, key=lambda x: float(x[0]), reverse=False))
# Format postive features
pos_features = np.array([[data['features'][x]['effect'],
data['features'][x]['value'],
data['featureNames'][x]]
for x in data['features'].keys() if data['features'][x]['effect'] >= 0])
pos_features = np.array(sorted(pos_features, key=lambda x: float(x[0]), reverse=True))
# Define link function
if data['link'] == 'identity':
convert_func = lambda x: x
elif data['link'] == 'logit':
convert_func = lambda x: 1 / (1 + np.exp(-x))
else:
assert False, "ERROR: Unrecognized link function: " + str(data['link'])
# Convert negative feature values to plot values
neg_val = data['outValue']
for i in neg_features:
val = float(i[0])
neg_val = neg_val + np.abs(val)
i[0] = convert_func(neg_val)
if len(neg_features) > 0:
total_neg = np.max(neg_features[:, 0].astype(float)) - \
np.min(neg_features[:, 0].astype(float))
else:
total_neg = 0
# Convert positive feature values to plot values
pos_val = data['outValue']
for i in pos_features:
val = float(i[0])
pos_val = pos_val - np.abs(val)
i[0] = convert_func(pos_val)
if len(pos_features) > 0:
total_pos = np.max(pos_features[:, 0].astype(float)) - \
np.min(pos_features[:, 0].astype(float))
else:
total_pos = 0
# Convert output value and base value
data['outValue'] = convert_func(data['outValue'])
data['baseValue'] = convert_func(data['baseValue'])
return neg_features, total_neg, pos_features, total_pos
|
python
|
def format_data(data):
"""Format data."""
# Format negative features
neg_features = np.array([[data['features'][x]['effect'],
data['features'][x]['value'],
data['featureNames'][x]]
for x in data['features'].keys() if data['features'][x]['effect'] < 0])
neg_features = np.array(sorted(neg_features, key=lambda x: float(x[0]), reverse=False))
# Format postive features
pos_features = np.array([[data['features'][x]['effect'],
data['features'][x]['value'],
data['featureNames'][x]]
for x in data['features'].keys() if data['features'][x]['effect'] >= 0])
pos_features = np.array(sorted(pos_features, key=lambda x: float(x[0]), reverse=True))
# Define link function
if data['link'] == 'identity':
convert_func = lambda x: x
elif data['link'] == 'logit':
convert_func = lambda x: 1 / (1 + np.exp(-x))
else:
assert False, "ERROR: Unrecognized link function: " + str(data['link'])
# Convert negative feature values to plot values
neg_val = data['outValue']
for i in neg_features:
val = float(i[0])
neg_val = neg_val + np.abs(val)
i[0] = convert_func(neg_val)
if len(neg_features) > 0:
total_neg = np.max(neg_features[:, 0].astype(float)) - \
np.min(neg_features[:, 0].astype(float))
else:
total_neg = 0
# Convert positive feature values to plot values
pos_val = data['outValue']
for i in pos_features:
val = float(i[0])
pos_val = pos_val - np.abs(val)
i[0] = convert_func(pos_val)
if len(pos_features) > 0:
total_pos = np.max(pos_features[:, 0].astype(float)) - \
np.min(pos_features[:, 0].astype(float))
else:
total_pos = 0
# Convert output value and base value
data['outValue'] = convert_func(data['outValue'])
data['baseValue'] = convert_func(data['baseValue'])
return neg_features, total_neg, pos_features, total_pos
|
[
"def",
"format_data",
"(",
"data",
")",
":",
"# Format negative features",
"neg_features",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"data",
"[",
"'features'",
"]",
"[",
"x",
"]",
"[",
"'effect'",
"]",
",",
"data",
"[",
"'features'",
"]",
"[",
"x",
"]",
"[",
"'value'",
"]",
",",
"data",
"[",
"'featureNames'",
"]",
"[",
"x",
"]",
"]",
"for",
"x",
"in",
"data",
"[",
"'features'",
"]",
".",
"keys",
"(",
")",
"if",
"data",
"[",
"'features'",
"]",
"[",
"x",
"]",
"[",
"'effect'",
"]",
"<",
"0",
"]",
")",
"neg_features",
"=",
"np",
".",
"array",
"(",
"sorted",
"(",
"neg_features",
",",
"key",
"=",
"lambda",
"x",
":",
"float",
"(",
"x",
"[",
"0",
"]",
")",
",",
"reverse",
"=",
"False",
")",
")",
"# Format postive features",
"pos_features",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"data",
"[",
"'features'",
"]",
"[",
"x",
"]",
"[",
"'effect'",
"]",
",",
"data",
"[",
"'features'",
"]",
"[",
"x",
"]",
"[",
"'value'",
"]",
",",
"data",
"[",
"'featureNames'",
"]",
"[",
"x",
"]",
"]",
"for",
"x",
"in",
"data",
"[",
"'features'",
"]",
".",
"keys",
"(",
")",
"if",
"data",
"[",
"'features'",
"]",
"[",
"x",
"]",
"[",
"'effect'",
"]",
">=",
"0",
"]",
")",
"pos_features",
"=",
"np",
".",
"array",
"(",
"sorted",
"(",
"pos_features",
",",
"key",
"=",
"lambda",
"x",
":",
"float",
"(",
"x",
"[",
"0",
"]",
")",
",",
"reverse",
"=",
"True",
")",
")",
"# Define link function",
"if",
"data",
"[",
"'link'",
"]",
"==",
"'identity'",
":",
"convert_func",
"=",
"lambda",
"x",
":",
"x",
"elif",
"data",
"[",
"'link'",
"]",
"==",
"'logit'",
":",
"convert_func",
"=",
"lambda",
"x",
":",
"1",
"/",
"(",
"1",
"+",
"np",
".",
"exp",
"(",
"-",
"x",
")",
")",
"else",
":",
"assert",
"False",
",",
"\"ERROR: Unrecognized link function: \"",
"+",
"str",
"(",
"data",
"[",
"'link'",
"]",
")",
"# Convert negative feature values to plot values",
"neg_val",
"=",
"data",
"[",
"'outValue'",
"]",
"for",
"i",
"in",
"neg_features",
":",
"val",
"=",
"float",
"(",
"i",
"[",
"0",
"]",
")",
"neg_val",
"=",
"neg_val",
"+",
"np",
".",
"abs",
"(",
"val",
")",
"i",
"[",
"0",
"]",
"=",
"convert_func",
"(",
"neg_val",
")",
"if",
"len",
"(",
"neg_features",
")",
">",
"0",
":",
"total_neg",
"=",
"np",
".",
"max",
"(",
"neg_features",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"float",
")",
")",
"-",
"np",
".",
"min",
"(",
"neg_features",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"float",
")",
")",
"else",
":",
"total_neg",
"=",
"0",
"# Convert positive feature values to plot values",
"pos_val",
"=",
"data",
"[",
"'outValue'",
"]",
"for",
"i",
"in",
"pos_features",
":",
"val",
"=",
"float",
"(",
"i",
"[",
"0",
"]",
")",
"pos_val",
"=",
"pos_val",
"-",
"np",
".",
"abs",
"(",
"val",
")",
"i",
"[",
"0",
"]",
"=",
"convert_func",
"(",
"pos_val",
")",
"if",
"len",
"(",
"pos_features",
")",
">",
"0",
":",
"total_pos",
"=",
"np",
".",
"max",
"(",
"pos_features",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"float",
")",
")",
"-",
"np",
".",
"min",
"(",
"pos_features",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"float",
")",
")",
"else",
":",
"total_pos",
"=",
"0",
"# Convert output value and base value",
"data",
"[",
"'outValue'",
"]",
"=",
"convert_func",
"(",
"data",
"[",
"'outValue'",
"]",
")",
"data",
"[",
"'baseValue'",
"]",
"=",
"convert_func",
"(",
"data",
"[",
"'baseValue'",
"]",
")",
"return",
"neg_features",
",",
"total_neg",
",",
"pos_features",
",",
"total_pos"
] |
Format data.
|
[
"Format",
"data",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/plots/force_matplotlib.py#L199-L253
|
train
|
slundberg/shap
|
shap/plots/force_matplotlib.py
|
draw_additive_plot
|
def draw_additive_plot(data, figsize, show, text_rotation=0):
"""Draw additive plot."""
# Turn off interactive plot
if show == False:
plt.ioff()
# Format data
neg_features, total_neg, pos_features, total_pos = format_data(data)
# Compute overall metrics
base_value = data['baseValue']
out_value = data['outValue']
offset_text = (np.abs(total_neg) + np.abs(total_pos)) * 0.04
# Define plots
fig, ax = plt.subplots(figsize=figsize)
# Compute axis limit
update_axis_limits(ax, total_pos, pos_features, total_neg,
neg_features, base_value)
# Define width of bar
width_bar = 0.1
width_separators = (ax.get_xlim()[1] - ax.get_xlim()[0]) / 200
# Create bar for negative shap values
rectangle_list, separator_list = draw_bars(out_value, neg_features, 'negative',
width_separators, width_bar)
for i in rectangle_list:
ax.add_patch(i)
for i in separator_list:
ax.add_patch(i)
# Create bar for positive shap values
rectangle_list, separator_list = draw_bars(out_value, pos_features, 'positive',
width_separators, width_bar)
for i in rectangle_list:
ax.add_patch(i)
for i in separator_list:
ax.add_patch(i)
# Add labels
total_effect = np.abs(total_neg) + total_pos
fig, ax = draw_labels(fig, ax, out_value, neg_features, 'negative',
offset_text, total_effect, min_perc=0.05, text_rotation=text_rotation)
fig, ax = draw_labels(fig, ax, out_value, pos_features, 'positive',
offset_text, total_effect, min_perc=0.05, text_rotation=text_rotation)
# higher lower legend
draw_higher_lower_element(out_value, offset_text)
# Add label for base value
draw_base_element(base_value, ax)
# Add output label
out_names = data['outNames'][0]
draw_output_element(out_names, out_value, ax)
if show:
plt.show()
else:
return plt.gcf()
|
python
|
def draw_additive_plot(data, figsize, show, text_rotation=0):
"""Draw additive plot."""
# Turn off interactive plot
if show == False:
plt.ioff()
# Format data
neg_features, total_neg, pos_features, total_pos = format_data(data)
# Compute overall metrics
base_value = data['baseValue']
out_value = data['outValue']
offset_text = (np.abs(total_neg) + np.abs(total_pos)) * 0.04
# Define plots
fig, ax = plt.subplots(figsize=figsize)
# Compute axis limit
update_axis_limits(ax, total_pos, pos_features, total_neg,
neg_features, base_value)
# Define width of bar
width_bar = 0.1
width_separators = (ax.get_xlim()[1] - ax.get_xlim()[0]) / 200
# Create bar for negative shap values
rectangle_list, separator_list = draw_bars(out_value, neg_features, 'negative',
width_separators, width_bar)
for i in rectangle_list:
ax.add_patch(i)
for i in separator_list:
ax.add_patch(i)
# Create bar for positive shap values
rectangle_list, separator_list = draw_bars(out_value, pos_features, 'positive',
width_separators, width_bar)
for i in rectangle_list:
ax.add_patch(i)
for i in separator_list:
ax.add_patch(i)
# Add labels
total_effect = np.abs(total_neg) + total_pos
fig, ax = draw_labels(fig, ax, out_value, neg_features, 'negative',
offset_text, total_effect, min_perc=0.05, text_rotation=text_rotation)
fig, ax = draw_labels(fig, ax, out_value, pos_features, 'positive',
offset_text, total_effect, min_perc=0.05, text_rotation=text_rotation)
# higher lower legend
draw_higher_lower_element(out_value, offset_text)
# Add label for base value
draw_base_element(base_value, ax)
# Add output label
out_names = data['outNames'][0]
draw_output_element(out_names, out_value, ax)
if show:
plt.show()
else:
return plt.gcf()
|
[
"def",
"draw_additive_plot",
"(",
"data",
",",
"figsize",
",",
"show",
",",
"text_rotation",
"=",
"0",
")",
":",
"# Turn off interactive plot",
"if",
"show",
"==",
"False",
":",
"plt",
".",
"ioff",
"(",
")",
"# Format data",
"neg_features",
",",
"total_neg",
",",
"pos_features",
",",
"total_pos",
"=",
"format_data",
"(",
"data",
")",
"# Compute overall metrics",
"base_value",
"=",
"data",
"[",
"'baseValue'",
"]",
"out_value",
"=",
"data",
"[",
"'outValue'",
"]",
"offset_text",
"=",
"(",
"np",
".",
"abs",
"(",
"total_neg",
")",
"+",
"np",
".",
"abs",
"(",
"total_pos",
")",
")",
"*",
"0.04",
"# Define plots",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"figsize",
"=",
"figsize",
")",
"# Compute axis limit",
"update_axis_limits",
"(",
"ax",
",",
"total_pos",
",",
"pos_features",
",",
"total_neg",
",",
"neg_features",
",",
"base_value",
")",
"# Define width of bar",
"width_bar",
"=",
"0.1",
"width_separators",
"=",
"(",
"ax",
".",
"get_xlim",
"(",
")",
"[",
"1",
"]",
"-",
"ax",
".",
"get_xlim",
"(",
")",
"[",
"0",
"]",
")",
"/",
"200",
"# Create bar for negative shap values",
"rectangle_list",
",",
"separator_list",
"=",
"draw_bars",
"(",
"out_value",
",",
"neg_features",
",",
"'negative'",
",",
"width_separators",
",",
"width_bar",
")",
"for",
"i",
"in",
"rectangle_list",
":",
"ax",
".",
"add_patch",
"(",
"i",
")",
"for",
"i",
"in",
"separator_list",
":",
"ax",
".",
"add_patch",
"(",
"i",
")",
"# Create bar for positive shap values",
"rectangle_list",
",",
"separator_list",
"=",
"draw_bars",
"(",
"out_value",
",",
"pos_features",
",",
"'positive'",
",",
"width_separators",
",",
"width_bar",
")",
"for",
"i",
"in",
"rectangle_list",
":",
"ax",
".",
"add_patch",
"(",
"i",
")",
"for",
"i",
"in",
"separator_list",
":",
"ax",
".",
"add_patch",
"(",
"i",
")",
"# Add labels",
"total_effect",
"=",
"np",
".",
"abs",
"(",
"total_neg",
")",
"+",
"total_pos",
"fig",
",",
"ax",
"=",
"draw_labels",
"(",
"fig",
",",
"ax",
",",
"out_value",
",",
"neg_features",
",",
"'negative'",
",",
"offset_text",
",",
"total_effect",
",",
"min_perc",
"=",
"0.05",
",",
"text_rotation",
"=",
"text_rotation",
")",
"fig",
",",
"ax",
"=",
"draw_labels",
"(",
"fig",
",",
"ax",
",",
"out_value",
",",
"pos_features",
",",
"'positive'",
",",
"offset_text",
",",
"total_effect",
",",
"min_perc",
"=",
"0.05",
",",
"text_rotation",
"=",
"text_rotation",
")",
"# higher lower legend",
"draw_higher_lower_element",
"(",
"out_value",
",",
"offset_text",
")",
"# Add label for base value",
"draw_base_element",
"(",
"base_value",
",",
"ax",
")",
"# Add output label",
"out_names",
"=",
"data",
"[",
"'outNames'",
"]",
"[",
"0",
"]",
"draw_output_element",
"(",
"out_names",
",",
"out_value",
",",
"ax",
")",
"if",
"show",
":",
"plt",
".",
"show",
"(",
")",
"else",
":",
"return",
"plt",
".",
"gcf",
"(",
")"
] |
Draw additive plot.
|
[
"Draw",
"additive",
"plot",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/plots/force_matplotlib.py#L333-L397
|
train
|
slundberg/shap
|
setup.py
|
try_run_setup
|
def try_run_setup(**kwargs):
""" Fails gracefully when various install steps don't work.
"""
try:
run_setup(**kwargs)
except Exception as e:
print(str(e))
if "xgboost" in str(e).lower():
kwargs["test_xgboost"] = False
print("Couldn't install XGBoost for testing!")
try_run_setup(**kwargs)
elif "lightgbm" in str(e).lower():
kwargs["test_lightgbm"] = False
print("Couldn't install LightGBM for testing!")
try_run_setup(**kwargs)
elif kwargs["with_binary"]:
kwargs["with_binary"] = False
print("WARNING: The C extension could not be compiled, sklearn tree models not supported.")
try_run_setup(**kwargs)
else:
print("ERROR: Failed to build!")
|
python
|
def try_run_setup(**kwargs):
""" Fails gracefully when various install steps don't work.
"""
try:
run_setup(**kwargs)
except Exception as e:
print(str(e))
if "xgboost" in str(e).lower():
kwargs["test_xgboost"] = False
print("Couldn't install XGBoost for testing!")
try_run_setup(**kwargs)
elif "lightgbm" in str(e).lower():
kwargs["test_lightgbm"] = False
print("Couldn't install LightGBM for testing!")
try_run_setup(**kwargs)
elif kwargs["with_binary"]:
kwargs["with_binary"] = False
print("WARNING: The C extension could not be compiled, sklearn tree models not supported.")
try_run_setup(**kwargs)
else:
print("ERROR: Failed to build!")
|
[
"def",
"try_run_setup",
"(",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"run_setup",
"(",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"str",
"(",
"e",
")",
")",
"if",
"\"xgboost\"",
"in",
"str",
"(",
"e",
")",
".",
"lower",
"(",
")",
":",
"kwargs",
"[",
"\"test_xgboost\"",
"]",
"=",
"False",
"print",
"(",
"\"Couldn't install XGBoost for testing!\"",
")",
"try_run_setup",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"\"lightgbm\"",
"in",
"str",
"(",
"e",
")",
".",
"lower",
"(",
")",
":",
"kwargs",
"[",
"\"test_lightgbm\"",
"]",
"=",
"False",
"print",
"(",
"\"Couldn't install LightGBM for testing!\"",
")",
"try_run_setup",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"kwargs",
"[",
"\"with_binary\"",
"]",
":",
"kwargs",
"[",
"\"with_binary\"",
"]",
"=",
"False",
"print",
"(",
"\"WARNING: The C extension could not be compiled, sklearn tree models not supported.\"",
")",
"try_run_setup",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"print",
"(",
"\"ERROR: Failed to build!\"",
")"
] |
Fails gracefully when various install steps don't work.
|
[
"Fails",
"gracefully",
"when",
"various",
"install",
"steps",
"don",
"t",
"work",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/setup.py#L101-L122
|
train
|
slundberg/shap
|
shap/explainers/deep/deep_pytorch.py
|
deeplift_grad
|
def deeplift_grad(module, grad_input, grad_output):
"""The backward hook which computes the deeplift
gradient for an nn.Module
"""
# first, get the module type
module_type = module.__class__.__name__
# first, check the module is supported
if module_type in op_handler:
if op_handler[module_type].__name__ not in ['passthrough', 'linear_1d']:
return op_handler[module_type](module, grad_input, grad_output)
else:
print('Warning: unrecognized nn.Module: {}'.format(module_type))
return grad_input
|
python
|
def deeplift_grad(module, grad_input, grad_output):
"""The backward hook which computes the deeplift
gradient for an nn.Module
"""
# first, get the module type
module_type = module.__class__.__name__
# first, check the module is supported
if module_type in op_handler:
if op_handler[module_type].__name__ not in ['passthrough', 'linear_1d']:
return op_handler[module_type](module, grad_input, grad_output)
else:
print('Warning: unrecognized nn.Module: {}'.format(module_type))
return grad_input
|
[
"def",
"deeplift_grad",
"(",
"module",
",",
"grad_input",
",",
"grad_output",
")",
":",
"# first, get the module type",
"module_type",
"=",
"module",
".",
"__class__",
".",
"__name__",
"# first, check the module is supported",
"if",
"module_type",
"in",
"op_handler",
":",
"if",
"op_handler",
"[",
"module_type",
"]",
".",
"__name__",
"not",
"in",
"[",
"'passthrough'",
",",
"'linear_1d'",
"]",
":",
"return",
"op_handler",
"[",
"module_type",
"]",
"(",
"module",
",",
"grad_input",
",",
"grad_output",
")",
"else",
":",
"print",
"(",
"'Warning: unrecognized nn.Module: {}'",
".",
"format",
"(",
"module_type",
")",
")",
"return",
"grad_input"
] |
The backward hook which computes the deeplift
gradient for an nn.Module
|
[
"The",
"backward",
"hook",
"which",
"computes",
"the",
"deeplift",
"gradient",
"for",
"an",
"nn",
".",
"Module"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_pytorch.py#L194-L206
|
train
|
slundberg/shap
|
shap/explainers/deep/deep_pytorch.py
|
add_interim_values
|
def add_interim_values(module, input, output):
"""The forward hook used to save interim tensors, detached
from the graph. Used to calculate the multipliers
"""
try:
del module.x
except AttributeError:
pass
try:
del module.y
except AttributeError:
pass
module_type = module.__class__.__name__
if module_type in op_handler:
func_name = op_handler[module_type].__name__
# First, check for cases where we don't need to save the x and y tensors
if func_name == 'passthrough':
pass
else:
# check only the 0th input varies
for i in range(len(input)):
if i != 0 and type(output) is tuple:
assert input[i] == output[i], "Only the 0th input may vary!"
# if a new method is added, it must be added here too. This ensures tensors
# are only saved if necessary
if func_name in ['maxpool', 'nonlinear_1d']:
# only save tensors if necessary
if type(input) is tuple:
setattr(module, 'x', torch.nn.Parameter(input[0].detach()))
else:
setattr(module, 'x', torch.nn.Parameter(input.detach()))
if type(output) is tuple:
setattr(module, 'y', torch.nn.Parameter(output[0].detach()))
else:
setattr(module, 'y', torch.nn.Parameter(output.detach()))
if module_type in failure_case_modules:
input[0].register_hook(deeplift_tensor_grad)
|
python
|
def add_interim_values(module, input, output):
"""The forward hook used to save interim tensors, detached
from the graph. Used to calculate the multipliers
"""
try:
del module.x
except AttributeError:
pass
try:
del module.y
except AttributeError:
pass
module_type = module.__class__.__name__
if module_type in op_handler:
func_name = op_handler[module_type].__name__
# First, check for cases where we don't need to save the x and y tensors
if func_name == 'passthrough':
pass
else:
# check only the 0th input varies
for i in range(len(input)):
if i != 0 and type(output) is tuple:
assert input[i] == output[i], "Only the 0th input may vary!"
# if a new method is added, it must be added here too. This ensures tensors
# are only saved if necessary
if func_name in ['maxpool', 'nonlinear_1d']:
# only save tensors if necessary
if type(input) is tuple:
setattr(module, 'x', torch.nn.Parameter(input[0].detach()))
else:
setattr(module, 'x', torch.nn.Parameter(input.detach()))
if type(output) is tuple:
setattr(module, 'y', torch.nn.Parameter(output[0].detach()))
else:
setattr(module, 'y', torch.nn.Parameter(output.detach()))
if module_type in failure_case_modules:
input[0].register_hook(deeplift_tensor_grad)
|
[
"def",
"add_interim_values",
"(",
"module",
",",
"input",
",",
"output",
")",
":",
"try",
":",
"del",
"module",
".",
"x",
"except",
"AttributeError",
":",
"pass",
"try",
":",
"del",
"module",
".",
"y",
"except",
"AttributeError",
":",
"pass",
"module_type",
"=",
"module",
".",
"__class__",
".",
"__name__",
"if",
"module_type",
"in",
"op_handler",
":",
"func_name",
"=",
"op_handler",
"[",
"module_type",
"]",
".",
"__name__",
"# First, check for cases where we don't need to save the x and y tensors",
"if",
"func_name",
"==",
"'passthrough'",
":",
"pass",
"else",
":",
"# check only the 0th input varies",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"input",
")",
")",
":",
"if",
"i",
"!=",
"0",
"and",
"type",
"(",
"output",
")",
"is",
"tuple",
":",
"assert",
"input",
"[",
"i",
"]",
"==",
"output",
"[",
"i",
"]",
",",
"\"Only the 0th input may vary!\"",
"# if a new method is added, it must be added here too. This ensures tensors",
"# are only saved if necessary",
"if",
"func_name",
"in",
"[",
"'maxpool'",
",",
"'nonlinear_1d'",
"]",
":",
"# only save tensors if necessary",
"if",
"type",
"(",
"input",
")",
"is",
"tuple",
":",
"setattr",
"(",
"module",
",",
"'x'",
",",
"torch",
".",
"nn",
".",
"Parameter",
"(",
"input",
"[",
"0",
"]",
".",
"detach",
"(",
")",
")",
")",
"else",
":",
"setattr",
"(",
"module",
",",
"'x'",
",",
"torch",
".",
"nn",
".",
"Parameter",
"(",
"input",
".",
"detach",
"(",
")",
")",
")",
"if",
"type",
"(",
"output",
")",
"is",
"tuple",
":",
"setattr",
"(",
"module",
",",
"'y'",
",",
"torch",
".",
"nn",
".",
"Parameter",
"(",
"output",
"[",
"0",
"]",
".",
"detach",
"(",
")",
")",
")",
"else",
":",
"setattr",
"(",
"module",
",",
"'y'",
",",
"torch",
".",
"nn",
".",
"Parameter",
"(",
"output",
".",
"detach",
"(",
")",
")",
")",
"if",
"module_type",
"in",
"failure_case_modules",
":",
"input",
"[",
"0",
"]",
".",
"register_hook",
"(",
"deeplift_tensor_grad",
")"
] |
The forward hook used to save interim tensors, detached
from the graph. Used to calculate the multipliers
|
[
"The",
"forward",
"hook",
"used",
"to",
"save",
"interim",
"tensors",
"detached",
"from",
"the",
"graph",
".",
"Used",
"to",
"calculate",
"the",
"multipliers"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_pytorch.py#L209-L245
|
train
|
slundberg/shap
|
shap/explainers/deep/deep_pytorch.py
|
get_target_input
|
def get_target_input(module, input, output):
"""A forward hook which saves the tensor - attached to its graph.
Used if we want to explain the interim outputs of a model
"""
try:
del module.target_input
except AttributeError:
pass
setattr(module, 'target_input', input)
|
python
|
def get_target_input(module, input, output):
"""A forward hook which saves the tensor - attached to its graph.
Used if we want to explain the interim outputs of a model
"""
try:
del module.target_input
except AttributeError:
pass
setattr(module, 'target_input', input)
|
[
"def",
"get_target_input",
"(",
"module",
",",
"input",
",",
"output",
")",
":",
"try",
":",
"del",
"module",
".",
"target_input",
"except",
"AttributeError",
":",
"pass",
"setattr",
"(",
"module",
",",
"'target_input'",
",",
"input",
")"
] |
A forward hook which saves the tensor - attached to its graph.
Used if we want to explain the interim outputs of a model
|
[
"A",
"forward",
"hook",
"which",
"saves",
"the",
"tensor",
"-",
"attached",
"to",
"its",
"graph",
".",
"Used",
"if",
"we",
"want",
"to",
"explain",
"the",
"interim",
"outputs",
"of",
"a",
"model"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_pytorch.py#L248-L256
|
train
|
slundberg/shap
|
shap/explainers/deep/deep_pytorch.py
|
PyTorchDeepExplainer.add_handles
|
def add_handles(self, model, forward_handle, backward_handle):
"""
Add handles to all non-container layers in the model.
Recursively for non-container layers
"""
handles_list = []
for child in model.children():
if 'nn.modules.container' in str(type(child)):
handles_list.extend(self.add_handles(child, forward_handle, backward_handle))
else:
handles_list.append(child.register_forward_hook(forward_handle))
handles_list.append(child.register_backward_hook(backward_handle))
return handles_list
|
python
|
def add_handles(self, model, forward_handle, backward_handle):
"""
Add handles to all non-container layers in the model.
Recursively for non-container layers
"""
handles_list = []
for child in model.children():
if 'nn.modules.container' in str(type(child)):
handles_list.extend(self.add_handles(child, forward_handle, backward_handle))
else:
handles_list.append(child.register_forward_hook(forward_handle))
handles_list.append(child.register_backward_hook(backward_handle))
return handles_list
|
[
"def",
"add_handles",
"(",
"self",
",",
"model",
",",
"forward_handle",
",",
"backward_handle",
")",
":",
"handles_list",
"=",
"[",
"]",
"for",
"child",
"in",
"model",
".",
"children",
"(",
")",
":",
"if",
"'nn.modules.container'",
"in",
"str",
"(",
"type",
"(",
"child",
")",
")",
":",
"handles_list",
".",
"extend",
"(",
"self",
".",
"add_handles",
"(",
"child",
",",
"forward_handle",
",",
"backward_handle",
")",
")",
"else",
":",
"handles_list",
".",
"append",
"(",
"child",
".",
"register_forward_hook",
"(",
"forward_handle",
")",
")",
"handles_list",
".",
"append",
"(",
"child",
".",
"register_backward_hook",
"(",
"backward_handle",
")",
")",
"return",
"handles_list"
] |
Add handles to all non-container layers in the model.
Recursively for non-container layers
|
[
"Add",
"handles",
"to",
"all",
"non",
"-",
"container",
"layers",
"in",
"the",
"model",
".",
"Recursively",
"for",
"non",
"-",
"container",
"layers"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_pytorch.py#L64-L76
|
train
|
slundberg/shap
|
shap/explainers/deep/deep_pytorch.py
|
PyTorchDeepExplainer.remove_attributes
|
def remove_attributes(self, model):
"""
Removes the x and y attributes which were added by the forward handles
Recursively searches for non-container layers
"""
for child in model.children():
if 'nn.modules.container' in str(type(child)):
self.remove_attributes(child)
else:
try:
del child.x
except AttributeError:
pass
try:
del child.y
except AttributeError:
pass
|
python
|
def remove_attributes(self, model):
"""
Removes the x and y attributes which were added by the forward handles
Recursively searches for non-container layers
"""
for child in model.children():
if 'nn.modules.container' in str(type(child)):
self.remove_attributes(child)
else:
try:
del child.x
except AttributeError:
pass
try:
del child.y
except AttributeError:
pass
|
[
"def",
"remove_attributes",
"(",
"self",
",",
"model",
")",
":",
"for",
"child",
"in",
"model",
".",
"children",
"(",
")",
":",
"if",
"'nn.modules.container'",
"in",
"str",
"(",
"type",
"(",
"child",
")",
")",
":",
"self",
".",
"remove_attributes",
"(",
"child",
")",
"else",
":",
"try",
":",
"del",
"child",
".",
"x",
"except",
"AttributeError",
":",
"pass",
"try",
":",
"del",
"child",
".",
"y",
"except",
"AttributeError",
":",
"pass"
] |
Removes the x and y attributes which were added by the forward handles
Recursively searches for non-container layers
|
[
"Removes",
"the",
"x",
"and",
"y",
"attributes",
"which",
"were",
"added",
"by",
"the",
"forward",
"handles",
"Recursively",
"searches",
"for",
"non",
"-",
"container",
"layers"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_pytorch.py#L78-L94
|
train
|
slundberg/shap
|
shap/explainers/tree.py
|
get_xgboost_json
|
def get_xgboost_json(model):
""" This gets a JSON dump of an XGBoost model while ensuring the features names are their indexes.
"""
fnames = model.feature_names
model.feature_names = None
json_trees = model.get_dump(with_stats=True, dump_format="json")
model.feature_names = fnames
# this fixes a bug where XGBoost can return invalid JSON
json_trees = [t.replace(": inf,", ": 1000000000000.0,") for t in json_trees]
json_trees = [t.replace(": -inf,", ": -1000000000000.0,") for t in json_trees]
return json_trees
|
python
|
def get_xgboost_json(model):
""" This gets a JSON dump of an XGBoost model while ensuring the features names are their indexes.
"""
fnames = model.feature_names
model.feature_names = None
json_trees = model.get_dump(with_stats=True, dump_format="json")
model.feature_names = fnames
# this fixes a bug where XGBoost can return invalid JSON
json_trees = [t.replace(": inf,", ": 1000000000000.0,") for t in json_trees]
json_trees = [t.replace(": -inf,", ": -1000000000000.0,") for t in json_trees]
return json_trees
|
[
"def",
"get_xgboost_json",
"(",
"model",
")",
":",
"fnames",
"=",
"model",
".",
"feature_names",
"model",
".",
"feature_names",
"=",
"None",
"json_trees",
"=",
"model",
".",
"get_dump",
"(",
"with_stats",
"=",
"True",
",",
"dump_format",
"=",
"\"json\"",
")",
"model",
".",
"feature_names",
"=",
"fnames",
"# this fixes a bug where XGBoost can return invalid JSON",
"json_trees",
"=",
"[",
"t",
".",
"replace",
"(",
"\": inf,\"",
",",
"\": 1000000000000.0,\"",
")",
"for",
"t",
"in",
"json_trees",
"]",
"json_trees",
"=",
"[",
"t",
".",
"replace",
"(",
"\": -inf,\"",
",",
"\": -1000000000000.0,\"",
")",
"for",
"t",
"in",
"json_trees",
"]",
"return",
"json_trees"
] |
This gets a JSON dump of an XGBoost model while ensuring the features names are their indexes.
|
[
"This",
"gets",
"a",
"JSON",
"dump",
"of",
"an",
"XGBoost",
"model",
"while",
"ensuring",
"the",
"features",
"names",
"are",
"their",
"indexes",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/tree.py#L907-L919
|
train
|
slundberg/shap
|
shap/explainers/tree.py
|
TreeExplainer.__dynamic_expected_value
|
def __dynamic_expected_value(self, y):
""" This computes the expected value conditioned on the given label value.
"""
return self.model.predict(self.data, np.ones(self.data.shape[0]) * y, output=self.model_output).mean(0)
|
python
|
def __dynamic_expected_value(self, y):
""" This computes the expected value conditioned on the given label value.
"""
return self.model.predict(self.data, np.ones(self.data.shape[0]) * y, output=self.model_output).mean(0)
|
[
"def",
"__dynamic_expected_value",
"(",
"self",
",",
"y",
")",
":",
"return",
"self",
".",
"model",
".",
"predict",
"(",
"self",
".",
"data",
",",
"np",
".",
"ones",
"(",
"self",
".",
"data",
".",
"shape",
"[",
"0",
"]",
")",
"*",
"y",
",",
"output",
"=",
"self",
".",
"model_output",
")",
".",
"mean",
"(",
"0",
")"
] |
This computes the expected value conditioned on the given label value.
|
[
"This",
"computes",
"the",
"expected",
"value",
"conditioned",
"on",
"the",
"given",
"label",
"value",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/tree.py#L125-L129
|
train
|
slundberg/shap
|
shap/explainers/tree.py
|
TreeExplainer.shap_values
|
def shap_values(self, X, y=None, tree_limit=None, approximate=False):
""" Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array, pandas.DataFrame or catboost.Pool (for catboost)
A matrix of samples (# samples x # features) on which to explain the model's output.
y : numpy.array
An array of label values for each sample. Used when explaining loss functions.
tree_limit : None (default) or int
Limit the number of trees used by the model. By default None means no use the limit of the
original model, and -1 means no limit.
approximate : bool
Run fast, but only roughly approximate the Tree SHAP values. This runs a method
previously proposed by Saabas which only considers a single feature ordering. Take care
since this does not have the consistency guarantees of Shapley values and places too
much weight on lower splits in the tree.
Returns
-------
For models with a single output this returns a matrix of SHAP values
(# samples x # features). Each row sums to the difference between the model output for that
sample and the expected value of the model output (which is stored in the expected_value
attribute of the explainer when it is constant). For models with vector outputs this returns
a list of such matrices, one for each output.
"""
# see if we have a default tree_limit in place.
if tree_limit is None:
tree_limit = -1 if self.model.tree_limit is None else self.model.tree_limit
# shortcut using the C++ version of Tree SHAP in XGBoost, LightGBM, and CatBoost
if self.feature_dependence == "tree_path_dependent" and self.model.model_type != "internal" and self.data is None:
phi = None
if self.model.model_type == "xgboost":
assert_import("xgboost")
if not str(type(X)).endswith("xgboost.core.DMatrix'>"):
X = xgboost.DMatrix(X)
if tree_limit == -1:
tree_limit = 0
phi = self.model.original_model.predict(
X, ntree_limit=tree_limit, pred_contribs=True,
approx_contribs=approximate, validate_features=False
)
elif self.model.model_type == "lightgbm":
assert not approximate, "approximate=True is not supported for LightGBM models!"
phi = self.model.original_model.predict(X, num_iteration=tree_limit, pred_contrib=True)
if phi.shape[1] != X.shape[1] + 1:
phi = phi.reshape(X.shape[0], phi.shape[1]//(X.shape[1]+1), X.shape[1]+1)
elif self.model.model_type == "catboost": # thanks to the CatBoost team for implementing this...
assert not approximate, "approximate=True is not supported for CatBoost models!"
assert tree_limit == -1, "tree_limit is not yet supported for CatBoost models!"
if type(X) != catboost.Pool:
X = catboost.Pool(X)
phi = self.model.original_model.get_feature_importance(data=X, fstr_type='ShapValues')
# note we pull off the last column and keep it as our expected_value
if phi is not None:
if len(phi.shape) == 3:
self.expected_value = [phi[0, i, -1] for i in range(phi.shape[1])]
return [phi[:, i, :-1] for i in range(phi.shape[1])]
else:
self.expected_value = phi[0, -1]
return phi[:, :-1]
# convert dataframes
orig_X = X
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("pandas.core.frame.DataFrame'>"):
X = X.values
flat_output = False
if len(X.shape) == 1:
flat_output = True
X = X.reshape(1, X.shape[0])
if X.dtype != self.model.dtype:
X = X.astype(self.model.dtype)
X_missing = np.isnan(X, dtype=np.bool)
assert str(type(X)).endswith("'numpy.ndarray'>"), "Unknown instance type: " + str(type(X))
assert len(X.shape) == 2, "Passed input data matrix X must have 1 or 2 dimensions!"
if tree_limit < 0 or tree_limit > self.model.values.shape[0]:
tree_limit = self.model.values.shape[0]
if self.model_output == "logloss":
assert y is not None, "Both samples and labels must be provided when explaining the loss (i.e. `explainer.shap_values(X, y)`)!"
assert X.shape[0] == len(y), "The number of labels (%d) does not match the number of samples to explain (%d)!" % (len(y), X.shape[0])
transform = self.model.get_transform(self.model_output)
if self.feature_dependence == "tree_path_dependent":
assert self.model.fully_defined_weighting, "The background dataset you provided does not cover all the leaves in the model, " \
"so TreeExplainer cannot run with the feature_dependence=\"tree_path_dependent\" option! " \
"Try providing a larger background dataset, or using feature_dependence=\"independent\"."
# run the core algorithm using the C extension
assert_import("cext")
phi = np.zeros((X.shape[0], X.shape[1]+1, self.model.n_outputs))
if not approximate:
_cext.dense_tree_shap(
self.model.children_left, self.model.children_right, self.model.children_default,
self.model.features, self.model.thresholds, self.model.values, self.model.node_sample_weight,
self.model.max_depth, X, X_missing, y, self.data, self.data_missing, tree_limit,
self.model.base_offset, phi, feature_dependence_codes[self.feature_dependence],
output_transform_codes[transform], False
)
else:
_cext.dense_tree_saabas(
self.model.children_left, self.model.children_right, self.model.children_default,
self.model.features, self.model.thresholds, self.model.values,
self.model.max_depth, tree_limit, self.model.base_offset, output_transform_codes[transform],
X, X_missing, y, phi
)
# note we pull off the last column and keep it as our expected_value
if self.model.n_outputs == 1:
if self.model_output != "logloss":
self.expected_value = phi[0, -1, 0]
if flat_output:
return phi[0, :-1, 0]
else:
return phi[:, :-1, 0]
else:
if self.model_output != "logloss":
self.expected_value = [phi[0, -1, i] for i in range(phi.shape[2])]
if flat_output:
return [phi[0, :-1, i] for i in range(self.model.n_outputs)]
else:
return [phi[:, :-1, i] for i in range(self.model.n_outputs)]
|
python
|
def shap_values(self, X, y=None, tree_limit=None, approximate=False):
""" Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array, pandas.DataFrame or catboost.Pool (for catboost)
A matrix of samples (# samples x # features) on which to explain the model's output.
y : numpy.array
An array of label values for each sample. Used when explaining loss functions.
tree_limit : None (default) or int
Limit the number of trees used by the model. By default None means no use the limit of the
original model, and -1 means no limit.
approximate : bool
Run fast, but only roughly approximate the Tree SHAP values. This runs a method
previously proposed by Saabas which only considers a single feature ordering. Take care
since this does not have the consistency guarantees of Shapley values and places too
much weight on lower splits in the tree.
Returns
-------
For models with a single output this returns a matrix of SHAP values
(# samples x # features). Each row sums to the difference between the model output for that
sample and the expected value of the model output (which is stored in the expected_value
attribute of the explainer when it is constant). For models with vector outputs this returns
a list of such matrices, one for each output.
"""
# see if we have a default tree_limit in place.
if tree_limit is None:
tree_limit = -1 if self.model.tree_limit is None else self.model.tree_limit
# shortcut using the C++ version of Tree SHAP in XGBoost, LightGBM, and CatBoost
if self.feature_dependence == "tree_path_dependent" and self.model.model_type != "internal" and self.data is None:
phi = None
if self.model.model_type == "xgboost":
assert_import("xgboost")
if not str(type(X)).endswith("xgboost.core.DMatrix'>"):
X = xgboost.DMatrix(X)
if tree_limit == -1:
tree_limit = 0
phi = self.model.original_model.predict(
X, ntree_limit=tree_limit, pred_contribs=True,
approx_contribs=approximate, validate_features=False
)
elif self.model.model_type == "lightgbm":
assert not approximate, "approximate=True is not supported for LightGBM models!"
phi = self.model.original_model.predict(X, num_iteration=tree_limit, pred_contrib=True)
if phi.shape[1] != X.shape[1] + 1:
phi = phi.reshape(X.shape[0], phi.shape[1]//(X.shape[1]+1), X.shape[1]+1)
elif self.model.model_type == "catboost": # thanks to the CatBoost team for implementing this...
assert not approximate, "approximate=True is not supported for CatBoost models!"
assert tree_limit == -1, "tree_limit is not yet supported for CatBoost models!"
if type(X) != catboost.Pool:
X = catboost.Pool(X)
phi = self.model.original_model.get_feature_importance(data=X, fstr_type='ShapValues')
# note we pull off the last column and keep it as our expected_value
if phi is not None:
if len(phi.shape) == 3:
self.expected_value = [phi[0, i, -1] for i in range(phi.shape[1])]
return [phi[:, i, :-1] for i in range(phi.shape[1])]
else:
self.expected_value = phi[0, -1]
return phi[:, :-1]
# convert dataframes
orig_X = X
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("pandas.core.frame.DataFrame'>"):
X = X.values
flat_output = False
if len(X.shape) == 1:
flat_output = True
X = X.reshape(1, X.shape[0])
if X.dtype != self.model.dtype:
X = X.astype(self.model.dtype)
X_missing = np.isnan(X, dtype=np.bool)
assert str(type(X)).endswith("'numpy.ndarray'>"), "Unknown instance type: " + str(type(X))
assert len(X.shape) == 2, "Passed input data matrix X must have 1 or 2 dimensions!"
if tree_limit < 0 or tree_limit > self.model.values.shape[0]:
tree_limit = self.model.values.shape[0]
if self.model_output == "logloss":
assert y is not None, "Both samples and labels must be provided when explaining the loss (i.e. `explainer.shap_values(X, y)`)!"
assert X.shape[0] == len(y), "The number of labels (%d) does not match the number of samples to explain (%d)!" % (len(y), X.shape[0])
transform = self.model.get_transform(self.model_output)
if self.feature_dependence == "tree_path_dependent":
assert self.model.fully_defined_weighting, "The background dataset you provided does not cover all the leaves in the model, " \
"so TreeExplainer cannot run with the feature_dependence=\"tree_path_dependent\" option! " \
"Try providing a larger background dataset, or using feature_dependence=\"independent\"."
# run the core algorithm using the C extension
assert_import("cext")
phi = np.zeros((X.shape[0], X.shape[1]+1, self.model.n_outputs))
if not approximate:
_cext.dense_tree_shap(
self.model.children_left, self.model.children_right, self.model.children_default,
self.model.features, self.model.thresholds, self.model.values, self.model.node_sample_weight,
self.model.max_depth, X, X_missing, y, self.data, self.data_missing, tree_limit,
self.model.base_offset, phi, feature_dependence_codes[self.feature_dependence],
output_transform_codes[transform], False
)
else:
_cext.dense_tree_saabas(
self.model.children_left, self.model.children_right, self.model.children_default,
self.model.features, self.model.thresholds, self.model.values,
self.model.max_depth, tree_limit, self.model.base_offset, output_transform_codes[transform],
X, X_missing, y, phi
)
# note we pull off the last column and keep it as our expected_value
if self.model.n_outputs == 1:
if self.model_output != "logloss":
self.expected_value = phi[0, -1, 0]
if flat_output:
return phi[0, :-1, 0]
else:
return phi[:, :-1, 0]
else:
if self.model_output != "logloss":
self.expected_value = [phi[0, -1, i] for i in range(phi.shape[2])]
if flat_output:
return [phi[0, :-1, i] for i in range(self.model.n_outputs)]
else:
return [phi[:, :-1, i] for i in range(self.model.n_outputs)]
|
[
"def",
"shap_values",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"tree_limit",
"=",
"None",
",",
"approximate",
"=",
"False",
")",
":",
"# see if we have a default tree_limit in place.",
"if",
"tree_limit",
"is",
"None",
":",
"tree_limit",
"=",
"-",
"1",
"if",
"self",
".",
"model",
".",
"tree_limit",
"is",
"None",
"else",
"self",
".",
"model",
".",
"tree_limit",
"# shortcut using the C++ version of Tree SHAP in XGBoost, LightGBM, and CatBoost",
"if",
"self",
".",
"feature_dependence",
"==",
"\"tree_path_dependent\"",
"and",
"self",
".",
"model",
".",
"model_type",
"!=",
"\"internal\"",
"and",
"self",
".",
"data",
"is",
"None",
":",
"phi",
"=",
"None",
"if",
"self",
".",
"model",
".",
"model_type",
"==",
"\"xgboost\"",
":",
"assert_import",
"(",
"\"xgboost\"",
")",
"if",
"not",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"xgboost.core.DMatrix'>\"",
")",
":",
"X",
"=",
"xgboost",
".",
"DMatrix",
"(",
"X",
")",
"if",
"tree_limit",
"==",
"-",
"1",
":",
"tree_limit",
"=",
"0",
"phi",
"=",
"self",
".",
"model",
".",
"original_model",
".",
"predict",
"(",
"X",
",",
"ntree_limit",
"=",
"tree_limit",
",",
"pred_contribs",
"=",
"True",
",",
"approx_contribs",
"=",
"approximate",
",",
"validate_features",
"=",
"False",
")",
"elif",
"self",
".",
"model",
".",
"model_type",
"==",
"\"lightgbm\"",
":",
"assert",
"not",
"approximate",
",",
"\"approximate=True is not supported for LightGBM models!\"",
"phi",
"=",
"self",
".",
"model",
".",
"original_model",
".",
"predict",
"(",
"X",
",",
"num_iteration",
"=",
"tree_limit",
",",
"pred_contrib",
"=",
"True",
")",
"if",
"phi",
".",
"shape",
"[",
"1",
"]",
"!=",
"X",
".",
"shape",
"[",
"1",
"]",
"+",
"1",
":",
"phi",
"=",
"phi",
".",
"reshape",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"phi",
".",
"shape",
"[",
"1",
"]",
"//",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
"+",
"1",
")",
",",
"X",
".",
"shape",
"[",
"1",
"]",
"+",
"1",
")",
"elif",
"self",
".",
"model",
".",
"model_type",
"==",
"\"catboost\"",
":",
"# thanks to the CatBoost team for implementing this...",
"assert",
"not",
"approximate",
",",
"\"approximate=True is not supported for CatBoost models!\"",
"assert",
"tree_limit",
"==",
"-",
"1",
",",
"\"tree_limit is not yet supported for CatBoost models!\"",
"if",
"type",
"(",
"X",
")",
"!=",
"catboost",
".",
"Pool",
":",
"X",
"=",
"catboost",
".",
"Pool",
"(",
"X",
")",
"phi",
"=",
"self",
".",
"model",
".",
"original_model",
".",
"get_feature_importance",
"(",
"data",
"=",
"X",
",",
"fstr_type",
"=",
"'ShapValues'",
")",
"# note we pull off the last column and keep it as our expected_value",
"if",
"phi",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"phi",
".",
"shape",
")",
"==",
"3",
":",
"self",
".",
"expected_value",
"=",
"[",
"phi",
"[",
"0",
",",
"i",
",",
"-",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"phi",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"return",
"[",
"phi",
"[",
":",
",",
"i",
",",
":",
"-",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"phi",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"else",
":",
"self",
".",
"expected_value",
"=",
"phi",
"[",
"0",
",",
"-",
"1",
"]",
"return",
"phi",
"[",
":",
",",
":",
"-",
"1",
"]",
"# convert dataframes",
"orig_X",
"=",
"X",
"if",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"pandas.core.series.Series'>\"",
")",
":",
"X",
"=",
"X",
".",
"values",
"elif",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"pandas.core.frame.DataFrame'>\"",
")",
":",
"X",
"=",
"X",
".",
"values",
"flat_output",
"=",
"False",
"if",
"len",
"(",
"X",
".",
"shape",
")",
"==",
"1",
":",
"flat_output",
"=",
"True",
"X",
"=",
"X",
".",
"reshape",
"(",
"1",
",",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"if",
"X",
".",
"dtype",
"!=",
"self",
".",
"model",
".",
"dtype",
":",
"X",
"=",
"X",
".",
"astype",
"(",
"self",
".",
"model",
".",
"dtype",
")",
"X_missing",
"=",
"np",
".",
"isnan",
"(",
"X",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"assert",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"'numpy.ndarray'>\"",
")",
",",
"\"Unknown instance type: \"",
"+",
"str",
"(",
"type",
"(",
"X",
")",
")",
"assert",
"len",
"(",
"X",
".",
"shape",
")",
"==",
"2",
",",
"\"Passed input data matrix X must have 1 or 2 dimensions!\"",
"if",
"tree_limit",
"<",
"0",
"or",
"tree_limit",
">",
"self",
".",
"model",
".",
"values",
".",
"shape",
"[",
"0",
"]",
":",
"tree_limit",
"=",
"self",
".",
"model",
".",
"values",
".",
"shape",
"[",
"0",
"]",
"if",
"self",
".",
"model_output",
"==",
"\"logloss\"",
":",
"assert",
"y",
"is",
"not",
"None",
",",
"\"Both samples and labels must be provided when explaining the loss (i.e. `explainer.shap_values(X, y)`)!\"",
"assert",
"X",
".",
"shape",
"[",
"0",
"]",
"==",
"len",
"(",
"y",
")",
",",
"\"The number of labels (%d) does not match the number of samples to explain (%d)!\"",
"%",
"(",
"len",
"(",
"y",
")",
",",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"transform",
"=",
"self",
".",
"model",
".",
"get_transform",
"(",
"self",
".",
"model_output",
")",
"if",
"self",
".",
"feature_dependence",
"==",
"\"tree_path_dependent\"",
":",
"assert",
"self",
".",
"model",
".",
"fully_defined_weighting",
",",
"\"The background dataset you provided does not cover all the leaves in the model, \"",
"\"so TreeExplainer cannot run with the feature_dependence=\\\"tree_path_dependent\\\" option! \"",
"\"Try providing a larger background dataset, or using feature_dependence=\\\"independent\\\".\"",
"# run the core algorithm using the C extension",
"assert_import",
"(",
"\"cext\"",
")",
"phi",
"=",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"X",
".",
"shape",
"[",
"1",
"]",
"+",
"1",
",",
"self",
".",
"model",
".",
"n_outputs",
")",
")",
"if",
"not",
"approximate",
":",
"_cext",
".",
"dense_tree_shap",
"(",
"self",
".",
"model",
".",
"children_left",
",",
"self",
".",
"model",
".",
"children_right",
",",
"self",
".",
"model",
".",
"children_default",
",",
"self",
".",
"model",
".",
"features",
",",
"self",
".",
"model",
".",
"thresholds",
",",
"self",
".",
"model",
".",
"values",
",",
"self",
".",
"model",
".",
"node_sample_weight",
",",
"self",
".",
"model",
".",
"max_depth",
",",
"X",
",",
"X_missing",
",",
"y",
",",
"self",
".",
"data",
",",
"self",
".",
"data_missing",
",",
"tree_limit",
",",
"self",
".",
"model",
".",
"base_offset",
",",
"phi",
",",
"feature_dependence_codes",
"[",
"self",
".",
"feature_dependence",
"]",
",",
"output_transform_codes",
"[",
"transform",
"]",
",",
"False",
")",
"else",
":",
"_cext",
".",
"dense_tree_saabas",
"(",
"self",
".",
"model",
".",
"children_left",
",",
"self",
".",
"model",
".",
"children_right",
",",
"self",
".",
"model",
".",
"children_default",
",",
"self",
".",
"model",
".",
"features",
",",
"self",
".",
"model",
".",
"thresholds",
",",
"self",
".",
"model",
".",
"values",
",",
"self",
".",
"model",
".",
"max_depth",
",",
"tree_limit",
",",
"self",
".",
"model",
".",
"base_offset",
",",
"output_transform_codes",
"[",
"transform",
"]",
",",
"X",
",",
"X_missing",
",",
"y",
",",
"phi",
")",
"# note we pull off the last column and keep it as our expected_value",
"if",
"self",
".",
"model",
".",
"n_outputs",
"==",
"1",
":",
"if",
"self",
".",
"model_output",
"!=",
"\"logloss\"",
":",
"self",
".",
"expected_value",
"=",
"phi",
"[",
"0",
",",
"-",
"1",
",",
"0",
"]",
"if",
"flat_output",
":",
"return",
"phi",
"[",
"0",
",",
":",
"-",
"1",
",",
"0",
"]",
"else",
":",
"return",
"phi",
"[",
":",
",",
":",
"-",
"1",
",",
"0",
"]",
"else",
":",
"if",
"self",
".",
"model_output",
"!=",
"\"logloss\"",
":",
"self",
".",
"expected_value",
"=",
"[",
"phi",
"[",
"0",
",",
"-",
"1",
",",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"phi",
".",
"shape",
"[",
"2",
"]",
")",
"]",
"if",
"flat_output",
":",
"return",
"[",
"phi",
"[",
"0",
",",
":",
"-",
"1",
",",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"model",
".",
"n_outputs",
")",
"]",
"else",
":",
"return",
"[",
"phi",
"[",
":",
",",
":",
"-",
"1",
",",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"model",
".",
"n_outputs",
")",
"]"
] |
Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array, pandas.DataFrame or catboost.Pool (for catboost)
A matrix of samples (# samples x # features) on which to explain the model's output.
y : numpy.array
An array of label values for each sample. Used when explaining loss functions.
tree_limit : None (default) or int
Limit the number of trees used by the model. By default None means no use the limit of the
original model, and -1 means no limit.
approximate : bool
Run fast, but only roughly approximate the Tree SHAP values. This runs a method
previously proposed by Saabas which only considers a single feature ordering. Take care
since this does not have the consistency guarantees of Shapley values and places too
much weight on lower splits in the tree.
Returns
-------
For models with a single output this returns a matrix of SHAP values
(# samples x # features). Each row sums to the difference between the model output for that
sample and the expected value of the model output (which is stored in the expected_value
attribute of the explainer when it is constant). For models with vector outputs this returns
a list of such matrices, one for each output.
|
[
"Estimate",
"the",
"SHAP",
"values",
"for",
"a",
"set",
"of",
"samples",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/tree.py#L131-L263
|
train
|
slundberg/shap
|
shap/explainers/tree.py
|
TreeExplainer.shap_interaction_values
|
def shap_interaction_values(self, X, y=None, tree_limit=None):
""" Estimate the SHAP interaction values for a set of samples.
Parameters
----------
X : numpy.array, pandas.DataFrame or catboost.Pool (for catboost)
A matrix of samples (# samples x # features) on which to explain the model's output.
y : numpy.array
An array of label values for each sample. Used when explaining loss functions (not yet supported).
tree_limit : None (default) or int
Limit the number of trees used by the model. By default None means no use the limit of the
original model, and -1 means no limit.
Returns
-------
For models with a single output this returns a tensor of SHAP values
(# samples x # features x # features). The matrix (# features x # features) for each sample sums
to the difference between the model output for that sample and the expected value of the model output
(which is stored in the expected_value attribute of the explainer). Each row of this matrix sums to the
SHAP value for that feature for that sample. The diagonal entries of the matrix represent the
"main effect" of that feature on the prediction and the symmetric off-diagonal entries represent the
interaction effects between all pairs of features for that sample. For models with vector outputs
this returns a list of tensors, one for each output.
"""
assert self.model_output == "margin", "Only model_output = \"margin\" is supported for SHAP interaction values right now!"
assert self.feature_dependence == "tree_path_dependent", "Only feature_dependence = \"tree_path_dependent\" is supported for SHAP interaction values right now!"
transform = "identity"
# see if we have a default tree_limit in place.
if tree_limit is None:
tree_limit = -1 if self.model.tree_limit is None else self.model.tree_limit
# shortcut using the C++ version of Tree SHAP in XGBoost
if self.model.model_type == "xgboost":
assert_import("xgboost")
if not str(type(X)).endswith("xgboost.core.DMatrix'>"):
X = xgboost.DMatrix(X)
if tree_limit == -1:
tree_limit = 0
phi = self.model.original_model.predict(X, ntree_limit=tree_limit, pred_interactions=True)
# note we pull off the last column and keep it as our expected_value
if len(phi.shape) == 4:
self.expected_value = [phi[0, i, -1, -1] for i in range(phi.shape[1])]
return [phi[:, i, :-1, :-1] for i in range(phi.shape[1])]
else:
self.expected_value = phi[0, -1, -1]
return phi[:, :-1, :-1]
# convert dataframes
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("pandas.core.frame.DataFrame'>"):
X = X.values
flat_output = False
if len(X.shape) == 1:
flat_output = True
X = X.reshape(1, X.shape[0])
if X.dtype != self.model.dtype:
X = X.astype(self.model.dtype)
X_missing = np.isnan(X, dtype=np.bool)
assert str(type(X)).endswith("'numpy.ndarray'>"), "Unknown instance type: " + str(type(X))
assert len(X.shape) == 2, "Passed input data matrix X must have 1 or 2 dimensions!"
if tree_limit < 0 or tree_limit > self.model.values.shape[0]:
tree_limit = self.model.values.shape[0]
# run the core algorithm using the C extension
assert_import("cext")
phi = np.zeros((X.shape[0], X.shape[1]+1, X.shape[1]+1, self.model.n_outputs))
_cext.dense_tree_shap(
self.model.children_left, self.model.children_right, self.model.children_default,
self.model.features, self.model.thresholds, self.model.values, self.model.node_sample_weight,
self.model.max_depth, X, X_missing, y, self.data, self.data_missing, tree_limit,
self.model.base_offset, phi, feature_dependence_codes[self.feature_dependence],
output_transform_codes[transform], True
)
# note we pull off the last column and keep it as our expected_value
if self.model.n_outputs == 1:
self.expected_value = phi[0, -1, -1, 0]
if flat_output:
return phi[0, :-1, :-1, 0]
else:
return phi[:, :-1, :-1, 0]
else:
self.expected_value = [phi[0, -1, -1, i] for i in range(phi.shape[3])]
if flat_output:
return [phi[0, :-1, :-1, i] for i in range(self.model.n_outputs)]
else:
return [phi[:, :-1, :-1, i] for i in range(self.model.n_outputs)]
|
python
|
def shap_interaction_values(self, X, y=None, tree_limit=None):
""" Estimate the SHAP interaction values for a set of samples.
Parameters
----------
X : numpy.array, pandas.DataFrame or catboost.Pool (for catboost)
A matrix of samples (# samples x # features) on which to explain the model's output.
y : numpy.array
An array of label values for each sample. Used when explaining loss functions (not yet supported).
tree_limit : None (default) or int
Limit the number of trees used by the model. By default None means no use the limit of the
original model, and -1 means no limit.
Returns
-------
For models with a single output this returns a tensor of SHAP values
(# samples x # features x # features). The matrix (# features x # features) for each sample sums
to the difference between the model output for that sample and the expected value of the model output
(which is stored in the expected_value attribute of the explainer). Each row of this matrix sums to the
SHAP value for that feature for that sample. The diagonal entries of the matrix represent the
"main effect" of that feature on the prediction and the symmetric off-diagonal entries represent the
interaction effects between all pairs of features for that sample. For models with vector outputs
this returns a list of tensors, one for each output.
"""
assert self.model_output == "margin", "Only model_output = \"margin\" is supported for SHAP interaction values right now!"
assert self.feature_dependence == "tree_path_dependent", "Only feature_dependence = \"tree_path_dependent\" is supported for SHAP interaction values right now!"
transform = "identity"
# see if we have a default tree_limit in place.
if tree_limit is None:
tree_limit = -1 if self.model.tree_limit is None else self.model.tree_limit
# shortcut using the C++ version of Tree SHAP in XGBoost
if self.model.model_type == "xgboost":
assert_import("xgboost")
if not str(type(X)).endswith("xgboost.core.DMatrix'>"):
X = xgboost.DMatrix(X)
if tree_limit == -1:
tree_limit = 0
phi = self.model.original_model.predict(X, ntree_limit=tree_limit, pred_interactions=True)
# note we pull off the last column and keep it as our expected_value
if len(phi.shape) == 4:
self.expected_value = [phi[0, i, -1, -1] for i in range(phi.shape[1])]
return [phi[:, i, :-1, :-1] for i in range(phi.shape[1])]
else:
self.expected_value = phi[0, -1, -1]
return phi[:, :-1, :-1]
# convert dataframes
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("pandas.core.frame.DataFrame'>"):
X = X.values
flat_output = False
if len(X.shape) == 1:
flat_output = True
X = X.reshape(1, X.shape[0])
if X.dtype != self.model.dtype:
X = X.astype(self.model.dtype)
X_missing = np.isnan(X, dtype=np.bool)
assert str(type(X)).endswith("'numpy.ndarray'>"), "Unknown instance type: " + str(type(X))
assert len(X.shape) == 2, "Passed input data matrix X must have 1 or 2 dimensions!"
if tree_limit < 0 or tree_limit > self.model.values.shape[0]:
tree_limit = self.model.values.shape[0]
# run the core algorithm using the C extension
assert_import("cext")
phi = np.zeros((X.shape[0], X.shape[1]+1, X.shape[1]+1, self.model.n_outputs))
_cext.dense_tree_shap(
self.model.children_left, self.model.children_right, self.model.children_default,
self.model.features, self.model.thresholds, self.model.values, self.model.node_sample_weight,
self.model.max_depth, X, X_missing, y, self.data, self.data_missing, tree_limit,
self.model.base_offset, phi, feature_dependence_codes[self.feature_dependence],
output_transform_codes[transform], True
)
# note we pull off the last column and keep it as our expected_value
if self.model.n_outputs == 1:
self.expected_value = phi[0, -1, -1, 0]
if flat_output:
return phi[0, :-1, :-1, 0]
else:
return phi[:, :-1, :-1, 0]
else:
self.expected_value = [phi[0, -1, -1, i] for i in range(phi.shape[3])]
if flat_output:
return [phi[0, :-1, :-1, i] for i in range(self.model.n_outputs)]
else:
return [phi[:, :-1, :-1, i] for i in range(self.model.n_outputs)]
|
[
"def",
"shap_interaction_values",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"tree_limit",
"=",
"None",
")",
":",
"assert",
"self",
".",
"model_output",
"==",
"\"margin\"",
",",
"\"Only model_output = \\\"margin\\\" is supported for SHAP interaction values right now!\"",
"assert",
"self",
".",
"feature_dependence",
"==",
"\"tree_path_dependent\"",
",",
"\"Only feature_dependence = \\\"tree_path_dependent\\\" is supported for SHAP interaction values right now!\"",
"transform",
"=",
"\"identity\"",
"# see if we have a default tree_limit in place.",
"if",
"tree_limit",
"is",
"None",
":",
"tree_limit",
"=",
"-",
"1",
"if",
"self",
".",
"model",
".",
"tree_limit",
"is",
"None",
"else",
"self",
".",
"model",
".",
"tree_limit",
"# shortcut using the C++ version of Tree SHAP in XGBoost",
"if",
"self",
".",
"model",
".",
"model_type",
"==",
"\"xgboost\"",
":",
"assert_import",
"(",
"\"xgboost\"",
")",
"if",
"not",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"xgboost.core.DMatrix'>\"",
")",
":",
"X",
"=",
"xgboost",
".",
"DMatrix",
"(",
"X",
")",
"if",
"tree_limit",
"==",
"-",
"1",
":",
"tree_limit",
"=",
"0",
"phi",
"=",
"self",
".",
"model",
".",
"original_model",
".",
"predict",
"(",
"X",
",",
"ntree_limit",
"=",
"tree_limit",
",",
"pred_interactions",
"=",
"True",
")",
"# note we pull off the last column and keep it as our expected_value",
"if",
"len",
"(",
"phi",
".",
"shape",
")",
"==",
"4",
":",
"self",
".",
"expected_value",
"=",
"[",
"phi",
"[",
"0",
",",
"i",
",",
"-",
"1",
",",
"-",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"phi",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"return",
"[",
"phi",
"[",
":",
",",
"i",
",",
":",
"-",
"1",
",",
":",
"-",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"phi",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"else",
":",
"self",
".",
"expected_value",
"=",
"phi",
"[",
"0",
",",
"-",
"1",
",",
"-",
"1",
"]",
"return",
"phi",
"[",
":",
",",
":",
"-",
"1",
",",
":",
"-",
"1",
"]",
"# convert dataframes",
"if",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"pandas.core.series.Series'>\"",
")",
":",
"X",
"=",
"X",
".",
"values",
"elif",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"pandas.core.frame.DataFrame'>\"",
")",
":",
"X",
"=",
"X",
".",
"values",
"flat_output",
"=",
"False",
"if",
"len",
"(",
"X",
".",
"shape",
")",
"==",
"1",
":",
"flat_output",
"=",
"True",
"X",
"=",
"X",
".",
"reshape",
"(",
"1",
",",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"if",
"X",
".",
"dtype",
"!=",
"self",
".",
"model",
".",
"dtype",
":",
"X",
"=",
"X",
".",
"astype",
"(",
"self",
".",
"model",
".",
"dtype",
")",
"X_missing",
"=",
"np",
".",
"isnan",
"(",
"X",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"assert",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"'numpy.ndarray'>\"",
")",
",",
"\"Unknown instance type: \"",
"+",
"str",
"(",
"type",
"(",
"X",
")",
")",
"assert",
"len",
"(",
"X",
".",
"shape",
")",
"==",
"2",
",",
"\"Passed input data matrix X must have 1 or 2 dimensions!\"",
"if",
"tree_limit",
"<",
"0",
"or",
"tree_limit",
">",
"self",
".",
"model",
".",
"values",
".",
"shape",
"[",
"0",
"]",
":",
"tree_limit",
"=",
"self",
".",
"model",
".",
"values",
".",
"shape",
"[",
"0",
"]",
"# run the core algorithm using the C extension",
"assert_import",
"(",
"\"cext\"",
")",
"phi",
"=",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"X",
".",
"shape",
"[",
"1",
"]",
"+",
"1",
",",
"X",
".",
"shape",
"[",
"1",
"]",
"+",
"1",
",",
"self",
".",
"model",
".",
"n_outputs",
")",
")",
"_cext",
".",
"dense_tree_shap",
"(",
"self",
".",
"model",
".",
"children_left",
",",
"self",
".",
"model",
".",
"children_right",
",",
"self",
".",
"model",
".",
"children_default",
",",
"self",
".",
"model",
".",
"features",
",",
"self",
".",
"model",
".",
"thresholds",
",",
"self",
".",
"model",
".",
"values",
",",
"self",
".",
"model",
".",
"node_sample_weight",
",",
"self",
".",
"model",
".",
"max_depth",
",",
"X",
",",
"X_missing",
",",
"y",
",",
"self",
".",
"data",
",",
"self",
".",
"data_missing",
",",
"tree_limit",
",",
"self",
".",
"model",
".",
"base_offset",
",",
"phi",
",",
"feature_dependence_codes",
"[",
"self",
".",
"feature_dependence",
"]",
",",
"output_transform_codes",
"[",
"transform",
"]",
",",
"True",
")",
"# note we pull off the last column and keep it as our expected_value",
"if",
"self",
".",
"model",
".",
"n_outputs",
"==",
"1",
":",
"self",
".",
"expected_value",
"=",
"phi",
"[",
"0",
",",
"-",
"1",
",",
"-",
"1",
",",
"0",
"]",
"if",
"flat_output",
":",
"return",
"phi",
"[",
"0",
",",
":",
"-",
"1",
",",
":",
"-",
"1",
",",
"0",
"]",
"else",
":",
"return",
"phi",
"[",
":",
",",
":",
"-",
"1",
",",
":",
"-",
"1",
",",
"0",
"]",
"else",
":",
"self",
".",
"expected_value",
"=",
"[",
"phi",
"[",
"0",
",",
"-",
"1",
",",
"-",
"1",
",",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"phi",
".",
"shape",
"[",
"3",
"]",
")",
"]",
"if",
"flat_output",
":",
"return",
"[",
"phi",
"[",
"0",
",",
":",
"-",
"1",
",",
":",
"-",
"1",
",",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"model",
".",
"n_outputs",
")",
"]",
"else",
":",
"return",
"[",
"phi",
"[",
":",
",",
":",
"-",
"1",
",",
":",
"-",
"1",
",",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"model",
".",
"n_outputs",
")",
"]"
] |
Estimate the SHAP interaction values for a set of samples.
Parameters
----------
X : numpy.array, pandas.DataFrame or catboost.Pool (for catboost)
A matrix of samples (# samples x # features) on which to explain the model's output.
y : numpy.array
An array of label values for each sample. Used when explaining loss functions (not yet supported).
tree_limit : None (default) or int
Limit the number of trees used by the model. By default None means no use the limit of the
original model, and -1 means no limit.
Returns
-------
For models with a single output this returns a tensor of SHAP values
(# samples x # features x # features). The matrix (# features x # features) for each sample sums
to the difference between the model output for that sample and the expected value of the model output
(which is stored in the expected_value attribute of the explainer). Each row of this matrix sums to the
SHAP value for that feature for that sample. The diagonal entries of the matrix represent the
"main effect" of that feature on the prediction and the symmetric off-diagonal entries represent the
interaction effects between all pairs of features for that sample. For models with vector outputs
this returns a list of tensors, one for each output.
|
[
"Estimate",
"the",
"SHAP",
"interaction",
"values",
"for",
"a",
"set",
"of",
"samples",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/tree.py#L265-L358
|
train
|
slundberg/shap
|
shap/explainers/tree.py
|
TreeEnsemble.get_transform
|
def get_transform(self, model_output):
""" A consistent interface to make predictions from this model.
"""
if model_output == "margin":
transform = "identity"
elif model_output == "probability":
if self.tree_output == "log_odds":
transform = "logistic"
elif self.tree_output == "probability":
transform = "identity"
else:
raise Exception("model_output = \"probability\" is not yet supported when model.tree_output = \"" + self.tree_output + "\"!")
elif model_output == "logloss":
if self.objective == "squared_error":
transform = "squared_loss"
elif self.objective == "binary_crossentropy":
transform = "logistic_nlogloss"
else:
raise Exception("model_output = \"logloss\" is not yet supported when model.objective = \"" + self.objective + "\"!")
return transform
|
python
|
def get_transform(self, model_output):
""" A consistent interface to make predictions from this model.
"""
if model_output == "margin":
transform = "identity"
elif model_output == "probability":
if self.tree_output == "log_odds":
transform = "logistic"
elif self.tree_output == "probability":
transform = "identity"
else:
raise Exception("model_output = \"probability\" is not yet supported when model.tree_output = \"" + self.tree_output + "\"!")
elif model_output == "logloss":
if self.objective == "squared_error":
transform = "squared_loss"
elif self.objective == "binary_crossentropy":
transform = "logistic_nlogloss"
else:
raise Exception("model_output = \"logloss\" is not yet supported when model.objective = \"" + self.objective + "\"!")
return transform
|
[
"def",
"get_transform",
"(",
"self",
",",
"model_output",
")",
":",
"if",
"model_output",
"==",
"\"margin\"",
":",
"transform",
"=",
"\"identity\"",
"elif",
"model_output",
"==",
"\"probability\"",
":",
"if",
"self",
".",
"tree_output",
"==",
"\"log_odds\"",
":",
"transform",
"=",
"\"logistic\"",
"elif",
"self",
".",
"tree_output",
"==",
"\"probability\"",
":",
"transform",
"=",
"\"identity\"",
"else",
":",
"raise",
"Exception",
"(",
"\"model_output = \\\"probability\\\" is not yet supported when model.tree_output = \\\"\"",
"+",
"self",
".",
"tree_output",
"+",
"\"\\\"!\"",
")",
"elif",
"model_output",
"==",
"\"logloss\"",
":",
"if",
"self",
".",
"objective",
"==",
"\"squared_error\"",
":",
"transform",
"=",
"\"squared_loss\"",
"elif",
"self",
".",
"objective",
"==",
"\"binary_crossentropy\"",
":",
"transform",
"=",
"\"logistic_nlogloss\"",
"else",
":",
"raise",
"Exception",
"(",
"\"model_output = \\\"logloss\\\" is not yet supported when model.objective = \\\"\"",
"+",
"self",
".",
"objective",
"+",
"\"\\\"!\"",
")",
"return",
"transform"
] |
A consistent interface to make predictions from this model.
|
[
"A",
"consistent",
"interface",
"to",
"make",
"predictions",
"from",
"this",
"model",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/tree.py#L633-L653
|
train
|
slundberg/shap
|
shap/explainers/tree.py
|
TreeEnsemble.predict
|
def predict(self, X, y=None, output="margin", tree_limit=None):
""" A consistent interface to make predictions from this model.
Parameters
----------
tree_limit : None (default) or int
Limit the number of trees used by the model. By default None means no use the limit of the
original model, and -1 means no limit.
"""
# see if we have a default tree_limit in place.
if tree_limit is None:
tree_limit = -1 if self.tree_limit is None else self.tree_limit
# convert dataframes
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("pandas.core.frame.DataFrame'>"):
X = X.values
flat_output = False
if len(X.shape) == 1:
flat_output = True
X = X.reshape(1, X.shape[0])
if X.dtype != self.dtype:
X = X.astype(self.dtype)
X_missing = np.isnan(X, dtype=np.bool)
assert str(type(X)).endswith("'numpy.ndarray'>"), "Unknown instance type: " + str(type(X))
assert len(X.shape) == 2, "Passed input data matrix X must have 1 or 2 dimensions!"
if tree_limit < 0 or tree_limit > self.values.shape[0]:
tree_limit = self.values.shape[0]
if output == "logloss":
assert y is not None, "Both samples and labels must be provided when explaining the loss (i.e. `explainer.shap_values(X, y)`)!"
assert X.shape[0] == len(y), "The number of labels (%d) does not match the number of samples to explain (%d)!" % (len(y), X.shape[0])
transform = self.get_transform(output)
if True or self.model_type == "internal":
output = np.zeros((X.shape[0], self.n_outputs))
assert_import("cext")
_cext.dense_tree_predict(
self.children_left, self.children_right, self.children_default,
self.features, self.thresholds, self.values,
self.max_depth, tree_limit, self.base_offset, output_transform_codes[transform],
X, X_missing, y, output
)
elif self.model_type == "xgboost":
assert_import("xgboost")
output = self.original_model.predict(X, output_margin=True, tree_limit=tree_limit)
# drop dimensions we don't need
if flat_output:
if self.n_outputs == 1:
return output.flatten()[0]
else:
return output.reshape(-1, self.n_outputs)
else:
if self.n_outputs == 1:
return output.flatten()
else:
return output
|
python
|
def predict(self, X, y=None, output="margin", tree_limit=None):
""" A consistent interface to make predictions from this model.
Parameters
----------
tree_limit : None (default) or int
Limit the number of trees used by the model. By default None means no use the limit of the
original model, and -1 means no limit.
"""
# see if we have a default tree_limit in place.
if tree_limit is None:
tree_limit = -1 if self.tree_limit is None else self.tree_limit
# convert dataframes
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("pandas.core.frame.DataFrame'>"):
X = X.values
flat_output = False
if len(X.shape) == 1:
flat_output = True
X = X.reshape(1, X.shape[0])
if X.dtype != self.dtype:
X = X.astype(self.dtype)
X_missing = np.isnan(X, dtype=np.bool)
assert str(type(X)).endswith("'numpy.ndarray'>"), "Unknown instance type: " + str(type(X))
assert len(X.shape) == 2, "Passed input data matrix X must have 1 or 2 dimensions!"
if tree_limit < 0 or tree_limit > self.values.shape[0]:
tree_limit = self.values.shape[0]
if output == "logloss":
assert y is not None, "Both samples and labels must be provided when explaining the loss (i.e. `explainer.shap_values(X, y)`)!"
assert X.shape[0] == len(y), "The number of labels (%d) does not match the number of samples to explain (%d)!" % (len(y), X.shape[0])
transform = self.get_transform(output)
if True or self.model_type == "internal":
output = np.zeros((X.shape[0], self.n_outputs))
assert_import("cext")
_cext.dense_tree_predict(
self.children_left, self.children_right, self.children_default,
self.features, self.thresholds, self.values,
self.max_depth, tree_limit, self.base_offset, output_transform_codes[transform],
X, X_missing, y, output
)
elif self.model_type == "xgboost":
assert_import("xgboost")
output = self.original_model.predict(X, output_margin=True, tree_limit=tree_limit)
# drop dimensions we don't need
if flat_output:
if self.n_outputs == 1:
return output.flatten()[0]
else:
return output.reshape(-1, self.n_outputs)
else:
if self.n_outputs == 1:
return output.flatten()
else:
return output
|
[
"def",
"predict",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"output",
"=",
"\"margin\"",
",",
"tree_limit",
"=",
"None",
")",
":",
"# see if we have a default tree_limit in place.",
"if",
"tree_limit",
"is",
"None",
":",
"tree_limit",
"=",
"-",
"1",
"if",
"self",
".",
"tree_limit",
"is",
"None",
"else",
"self",
".",
"tree_limit",
"# convert dataframes",
"if",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"pandas.core.series.Series'>\"",
")",
":",
"X",
"=",
"X",
".",
"values",
"elif",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"pandas.core.frame.DataFrame'>\"",
")",
":",
"X",
"=",
"X",
".",
"values",
"flat_output",
"=",
"False",
"if",
"len",
"(",
"X",
".",
"shape",
")",
"==",
"1",
":",
"flat_output",
"=",
"True",
"X",
"=",
"X",
".",
"reshape",
"(",
"1",
",",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"if",
"X",
".",
"dtype",
"!=",
"self",
".",
"dtype",
":",
"X",
"=",
"X",
".",
"astype",
"(",
"self",
".",
"dtype",
")",
"X_missing",
"=",
"np",
".",
"isnan",
"(",
"X",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"assert",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"'numpy.ndarray'>\"",
")",
",",
"\"Unknown instance type: \"",
"+",
"str",
"(",
"type",
"(",
"X",
")",
")",
"assert",
"len",
"(",
"X",
".",
"shape",
")",
"==",
"2",
",",
"\"Passed input data matrix X must have 1 or 2 dimensions!\"",
"if",
"tree_limit",
"<",
"0",
"or",
"tree_limit",
">",
"self",
".",
"values",
".",
"shape",
"[",
"0",
"]",
":",
"tree_limit",
"=",
"self",
".",
"values",
".",
"shape",
"[",
"0",
"]",
"if",
"output",
"==",
"\"logloss\"",
":",
"assert",
"y",
"is",
"not",
"None",
",",
"\"Both samples and labels must be provided when explaining the loss (i.e. `explainer.shap_values(X, y)`)!\"",
"assert",
"X",
".",
"shape",
"[",
"0",
"]",
"==",
"len",
"(",
"y",
")",
",",
"\"The number of labels (%d) does not match the number of samples to explain (%d)!\"",
"%",
"(",
"len",
"(",
"y",
")",
",",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"transform",
"=",
"self",
".",
"get_transform",
"(",
"output",
")",
"if",
"True",
"or",
"self",
".",
"model_type",
"==",
"\"internal\"",
":",
"output",
"=",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"n_outputs",
")",
")",
"assert_import",
"(",
"\"cext\"",
")",
"_cext",
".",
"dense_tree_predict",
"(",
"self",
".",
"children_left",
",",
"self",
".",
"children_right",
",",
"self",
".",
"children_default",
",",
"self",
".",
"features",
",",
"self",
".",
"thresholds",
",",
"self",
".",
"values",
",",
"self",
".",
"max_depth",
",",
"tree_limit",
",",
"self",
".",
"base_offset",
",",
"output_transform_codes",
"[",
"transform",
"]",
",",
"X",
",",
"X_missing",
",",
"y",
",",
"output",
")",
"elif",
"self",
".",
"model_type",
"==",
"\"xgboost\"",
":",
"assert_import",
"(",
"\"xgboost\"",
")",
"output",
"=",
"self",
".",
"original_model",
".",
"predict",
"(",
"X",
",",
"output_margin",
"=",
"True",
",",
"tree_limit",
"=",
"tree_limit",
")",
"# drop dimensions we don't need",
"if",
"flat_output",
":",
"if",
"self",
".",
"n_outputs",
"==",
"1",
":",
"return",
"output",
".",
"flatten",
"(",
")",
"[",
"0",
"]",
"else",
":",
"return",
"output",
".",
"reshape",
"(",
"-",
"1",
",",
"self",
".",
"n_outputs",
")",
"else",
":",
"if",
"self",
".",
"n_outputs",
"==",
"1",
":",
"return",
"output",
".",
"flatten",
"(",
")",
"else",
":",
"return",
"output"
] |
A consistent interface to make predictions from this model.
Parameters
----------
tree_limit : None (default) or int
Limit the number of trees used by the model. By default None means no use the limit of the
original model, and -1 means no limit.
|
[
"A",
"consistent",
"interface",
"to",
"make",
"predictions",
"from",
"this",
"model",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/tree.py#L655-L716
|
train
|
slundberg/shap
|
shap/explainers/gradient.py
|
GradientExplainer.shap_values
|
def shap_values(self, X, nsamples=200, ranked_outputs=None, output_rank_order="max", rseed=None):
""" Return the values for the model applied to X.
Parameters
----------
X : list,
if framework == 'tensorflow': numpy.array, or pandas.DataFrame
if framework == 'pytorch': torch.tensor
A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to
explain the model's output.
ranked_outputs : None or int
If ranked_outputs is None then we explain all the outputs in a multi-output model. If
ranked_outputs is a positive integer then we only explain that many of the top model
outputs (where "top" is determined by output_rank_order). Note that this causes a pair
of values to be returned (shap_values, indexes), where phi is a list of numpy arrays for each of
the output ranks, and indexes is a matrix that tells for each sample which output indexes
were choses as "top".
output_rank_order : "max", "min", "max_abs", or "custom"
How to order the model outputs when using ranked_outputs, either by maximum, minimum, or
maximum absolute value. If "custom" Then "ranked_outputs" contains a list of output nodes.
rseed : None or int
Seeding the randomness in shap value computation (background example choice,
interpolation between current and background example, smoothing).
Returns
-------
For a models with a single output this returns a tensor of SHAP values with the same shape
as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of
which are the same shape as X. If ranked_outputs is None then this list of tensors matches
the number of model outputs. If ranked_outputs is a positive integer a pair is returned
(shap_values, indexes), where shap_values is a list of tensors with a length of
ranked_outputs, and indexes is a matrix that tells for each sample which output indexes
were chosen as "top".
"""
return self.explainer.shap_values(X, nsamples, ranked_outputs, output_rank_order, rseed)
|
python
|
def shap_values(self, X, nsamples=200, ranked_outputs=None, output_rank_order="max", rseed=None):
""" Return the values for the model applied to X.
Parameters
----------
X : list,
if framework == 'tensorflow': numpy.array, or pandas.DataFrame
if framework == 'pytorch': torch.tensor
A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to
explain the model's output.
ranked_outputs : None or int
If ranked_outputs is None then we explain all the outputs in a multi-output model. If
ranked_outputs is a positive integer then we only explain that many of the top model
outputs (where "top" is determined by output_rank_order). Note that this causes a pair
of values to be returned (shap_values, indexes), where phi is a list of numpy arrays for each of
the output ranks, and indexes is a matrix that tells for each sample which output indexes
were choses as "top".
output_rank_order : "max", "min", "max_abs", or "custom"
How to order the model outputs when using ranked_outputs, either by maximum, minimum, or
maximum absolute value. If "custom" Then "ranked_outputs" contains a list of output nodes.
rseed : None or int
Seeding the randomness in shap value computation (background example choice,
interpolation between current and background example, smoothing).
Returns
-------
For a models with a single output this returns a tensor of SHAP values with the same shape
as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of
which are the same shape as X. If ranked_outputs is None then this list of tensors matches
the number of model outputs. If ranked_outputs is a positive integer a pair is returned
(shap_values, indexes), where shap_values is a list of tensors with a length of
ranked_outputs, and indexes is a matrix that tells for each sample which output indexes
were chosen as "top".
"""
return self.explainer.shap_values(X, nsamples, ranked_outputs, output_rank_order, rseed)
|
[
"def",
"shap_values",
"(",
"self",
",",
"X",
",",
"nsamples",
"=",
"200",
",",
"ranked_outputs",
"=",
"None",
",",
"output_rank_order",
"=",
"\"max\"",
",",
"rseed",
"=",
"None",
")",
":",
"return",
"self",
".",
"explainer",
".",
"shap_values",
"(",
"X",
",",
"nsamples",
",",
"ranked_outputs",
",",
"output_rank_order",
",",
"rseed",
")"
] |
Return the values for the model applied to X.
Parameters
----------
X : list,
if framework == 'tensorflow': numpy.array, or pandas.DataFrame
if framework == 'pytorch': torch.tensor
A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to
explain the model's output.
ranked_outputs : None or int
If ranked_outputs is None then we explain all the outputs in a multi-output model. If
ranked_outputs is a positive integer then we only explain that many of the top model
outputs (where "top" is determined by output_rank_order). Note that this causes a pair
of values to be returned (shap_values, indexes), where phi is a list of numpy arrays for each of
the output ranks, and indexes is a matrix that tells for each sample which output indexes
were choses as "top".
output_rank_order : "max", "min", "max_abs", or "custom"
How to order the model outputs when using ranked_outputs, either by maximum, minimum, or
maximum absolute value. If "custom" Then "ranked_outputs" contains a list of output nodes.
rseed : None or int
Seeding the randomness in shap value computation (background example choice,
interpolation between current and background example, smoothing).
Returns
-------
For a models with a single output this returns a tensor of SHAP values with the same shape
as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of
which are the same shape as X. If ranked_outputs is None then this list of tensors matches
the number of model outputs. If ranked_outputs is a positive integer a pair is returned
(shap_values, indexes), where shap_values is a list of tensors with a length of
ranked_outputs, and indexes is a matrix that tells for each sample which output indexes
were chosen as "top".
|
[
"Return",
"the",
"values",
"for",
"the",
"model",
"applied",
"to",
"X",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/gradient.py#L75-L112
|
train
|
slundberg/shap
|
shap/plots/force.py
|
force_plot
|
def force_plot(base_value, shap_values, features=None, feature_names=None, out_names=None, link="identity",
plot_cmap="RdBu", matplotlib=False, show=True, figsize=(20,3), ordering_keys=None, ordering_keys_time_format=None,
text_rotation=0):
""" Visualize the given SHAP values with an additive force layout.
Parameters
----------
base_value : float
This is the reference value that the feature contributions start from. For SHAP values it should
be the value of explainer.expected_value.
shap_values : numpy.array
Matrix of SHAP values (# features) or (# samples x # features). If this is a 1D array then a single
force plot will be drawn, if it is a 2D array then a stacked force plot will be drawn.
features : numpy.array
Matrix of feature values (# features) or (# samples x # features). This provides the values of all the
features, and should be the same shape as the shap_values argument.
feature_names : list
List of feature names (# features).
out_names : str
The name of the outout of the model (plural to support multi-output plotting in the future).
link : "identity" or "logit"
The transformation used when drawing the tick mark labels. Using logit will change log-odds numbers
into probabilities.
matplotlib : bool
Whether to use the default Javascript output, or the (less developed) matplotlib output. Using matplotlib
can be helpful in scenarios where rendering Javascript/HTML is inconvenient.
"""
# auto unwrap the base_value
if type(base_value) == np.ndarray and len(base_value) == 1:
base_value = base_value[0]
if (type(base_value) == np.ndarray or type(base_value) == list):
if type(shap_values) != list or len(shap_values) != len(base_value):
raise Exception("In v0.20 force_plot now requires the base value as the first parameter! " \
"Try shap.force_plot(explainer.expected_value, shap_values) or " \
"for multi-output models try " \
"shap.force_plot(explainer.expected_value[0], shap_values[0]).")
assert not type(shap_values) == list, "The shap_values arg looks looks multi output, try shap_values[i]."
link = convert_to_link(link)
if type(shap_values) != np.ndarray:
return visualize(shap_values)
# convert from a DataFrame or other types
if str(type(features)) == "<class 'pandas.core.frame.DataFrame'>":
if feature_names is None:
feature_names = list(features.columns)
features = features.values
elif str(type(features)) == "<class 'pandas.core.series.Series'>":
if feature_names is None:
feature_names = list(features.index)
features = features.values
elif isinstance(features, list):
if feature_names is None:
feature_names = features
features = None
elif features is not None and len(features.shape) == 1 and feature_names is None:
feature_names = features
features = None
if len(shap_values.shape) == 1:
shap_values = np.reshape(shap_values, (1, len(shap_values)))
if out_names is None:
out_names = ["output value"]
elif type(out_names) == str:
out_names = [out_names]
if shap_values.shape[0] == 1:
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
if features is None:
features = ["" for _ in range(len(feature_names))]
if type(features) == np.ndarray:
features = features.flatten()
# check that the shape of the shap_values and features match
if len(features) != shap_values.shape[1]:
msg = "Length of features is not equal to the length of shap_values!"
if len(features) == shap_values.shape[1] - 1:
msg += " You might be using an old format shap_values array with the base value " \
"as the last column. In this case just pass the array without the last column."
raise Exception(msg)
instance = Instance(np.zeros((1, len(feature_names))), features)
e = AdditiveExplanation(
base_value,
np.sum(shap_values[0, :]) + base_value,
shap_values[0, :],
None,
instance,
link,
Model(None, out_names),
DenseData(np.zeros((1, len(feature_names))), list(feature_names))
)
return visualize(e, plot_cmap, matplotlib, figsize=figsize, show=show, text_rotation=text_rotation)
else:
if matplotlib:
raise Exception("matplotlib = True is not yet supported for force plots with multiple samples!")
if shap_values.shape[0] > 3000:
warnings.warn("shap.force_plot is slow for many thousands of rows, try subsampling your data.")
exps = []
for i in range(shap_values.shape[0]):
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
if features is None:
display_features = ["" for i in range(len(feature_names))]
else:
display_features = features[i, :]
instance = Instance(np.ones((1, len(feature_names))), display_features)
e = AdditiveExplanation(
base_value,
np.sum(shap_values[i, :]) + base_value,
shap_values[i, :],
None,
instance,
link,
Model(None, out_names),
DenseData(np.ones((1, len(feature_names))), list(feature_names))
)
exps.append(e)
return visualize(
exps,
plot_cmap=plot_cmap,
ordering_keys=ordering_keys,
ordering_keys_time_format=ordering_keys_time_format,
text_rotation=text_rotation
)
|
python
|
def force_plot(base_value, shap_values, features=None, feature_names=None, out_names=None, link="identity",
plot_cmap="RdBu", matplotlib=False, show=True, figsize=(20,3), ordering_keys=None, ordering_keys_time_format=None,
text_rotation=0):
""" Visualize the given SHAP values with an additive force layout.
Parameters
----------
base_value : float
This is the reference value that the feature contributions start from. For SHAP values it should
be the value of explainer.expected_value.
shap_values : numpy.array
Matrix of SHAP values (# features) or (# samples x # features). If this is a 1D array then a single
force plot will be drawn, if it is a 2D array then a stacked force plot will be drawn.
features : numpy.array
Matrix of feature values (# features) or (# samples x # features). This provides the values of all the
features, and should be the same shape as the shap_values argument.
feature_names : list
List of feature names (# features).
out_names : str
The name of the outout of the model (plural to support multi-output plotting in the future).
link : "identity" or "logit"
The transformation used when drawing the tick mark labels. Using logit will change log-odds numbers
into probabilities.
matplotlib : bool
Whether to use the default Javascript output, or the (less developed) matplotlib output. Using matplotlib
can be helpful in scenarios where rendering Javascript/HTML is inconvenient.
"""
# auto unwrap the base_value
if type(base_value) == np.ndarray and len(base_value) == 1:
base_value = base_value[0]
if (type(base_value) == np.ndarray or type(base_value) == list):
if type(shap_values) != list or len(shap_values) != len(base_value):
raise Exception("In v0.20 force_plot now requires the base value as the first parameter! " \
"Try shap.force_plot(explainer.expected_value, shap_values) or " \
"for multi-output models try " \
"shap.force_plot(explainer.expected_value[0], shap_values[0]).")
assert not type(shap_values) == list, "The shap_values arg looks looks multi output, try shap_values[i]."
link = convert_to_link(link)
if type(shap_values) != np.ndarray:
return visualize(shap_values)
# convert from a DataFrame or other types
if str(type(features)) == "<class 'pandas.core.frame.DataFrame'>":
if feature_names is None:
feature_names = list(features.columns)
features = features.values
elif str(type(features)) == "<class 'pandas.core.series.Series'>":
if feature_names is None:
feature_names = list(features.index)
features = features.values
elif isinstance(features, list):
if feature_names is None:
feature_names = features
features = None
elif features is not None and len(features.shape) == 1 and feature_names is None:
feature_names = features
features = None
if len(shap_values.shape) == 1:
shap_values = np.reshape(shap_values, (1, len(shap_values)))
if out_names is None:
out_names = ["output value"]
elif type(out_names) == str:
out_names = [out_names]
if shap_values.shape[0] == 1:
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
if features is None:
features = ["" for _ in range(len(feature_names))]
if type(features) == np.ndarray:
features = features.flatten()
# check that the shape of the shap_values and features match
if len(features) != shap_values.shape[1]:
msg = "Length of features is not equal to the length of shap_values!"
if len(features) == shap_values.shape[1] - 1:
msg += " You might be using an old format shap_values array with the base value " \
"as the last column. In this case just pass the array without the last column."
raise Exception(msg)
instance = Instance(np.zeros((1, len(feature_names))), features)
e = AdditiveExplanation(
base_value,
np.sum(shap_values[0, :]) + base_value,
shap_values[0, :],
None,
instance,
link,
Model(None, out_names),
DenseData(np.zeros((1, len(feature_names))), list(feature_names))
)
return visualize(e, plot_cmap, matplotlib, figsize=figsize, show=show, text_rotation=text_rotation)
else:
if matplotlib:
raise Exception("matplotlib = True is not yet supported for force plots with multiple samples!")
if shap_values.shape[0] > 3000:
warnings.warn("shap.force_plot is slow for many thousands of rows, try subsampling your data.")
exps = []
for i in range(shap_values.shape[0]):
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
if features is None:
display_features = ["" for i in range(len(feature_names))]
else:
display_features = features[i, :]
instance = Instance(np.ones((1, len(feature_names))), display_features)
e = AdditiveExplanation(
base_value,
np.sum(shap_values[i, :]) + base_value,
shap_values[i, :],
None,
instance,
link,
Model(None, out_names),
DenseData(np.ones((1, len(feature_names))), list(feature_names))
)
exps.append(e)
return visualize(
exps,
plot_cmap=plot_cmap,
ordering_keys=ordering_keys,
ordering_keys_time_format=ordering_keys_time_format,
text_rotation=text_rotation
)
|
[
"def",
"force_plot",
"(",
"base_value",
",",
"shap_values",
",",
"features",
"=",
"None",
",",
"feature_names",
"=",
"None",
",",
"out_names",
"=",
"None",
",",
"link",
"=",
"\"identity\"",
",",
"plot_cmap",
"=",
"\"RdBu\"",
",",
"matplotlib",
"=",
"False",
",",
"show",
"=",
"True",
",",
"figsize",
"=",
"(",
"20",
",",
"3",
")",
",",
"ordering_keys",
"=",
"None",
",",
"ordering_keys_time_format",
"=",
"None",
",",
"text_rotation",
"=",
"0",
")",
":",
"# auto unwrap the base_value",
"if",
"type",
"(",
"base_value",
")",
"==",
"np",
".",
"ndarray",
"and",
"len",
"(",
"base_value",
")",
"==",
"1",
":",
"base_value",
"=",
"base_value",
"[",
"0",
"]",
"if",
"(",
"type",
"(",
"base_value",
")",
"==",
"np",
".",
"ndarray",
"or",
"type",
"(",
"base_value",
")",
"==",
"list",
")",
":",
"if",
"type",
"(",
"shap_values",
")",
"!=",
"list",
"or",
"len",
"(",
"shap_values",
")",
"!=",
"len",
"(",
"base_value",
")",
":",
"raise",
"Exception",
"(",
"\"In v0.20 force_plot now requires the base value as the first parameter! \"",
"\"Try shap.force_plot(explainer.expected_value, shap_values) or \"",
"\"for multi-output models try \"",
"\"shap.force_plot(explainer.expected_value[0], shap_values[0]).\"",
")",
"assert",
"not",
"type",
"(",
"shap_values",
")",
"==",
"list",
",",
"\"The shap_values arg looks looks multi output, try shap_values[i].\"",
"link",
"=",
"convert_to_link",
"(",
"link",
")",
"if",
"type",
"(",
"shap_values",
")",
"!=",
"np",
".",
"ndarray",
":",
"return",
"visualize",
"(",
"shap_values",
")",
"# convert from a DataFrame or other types",
"if",
"str",
"(",
"type",
"(",
"features",
")",
")",
"==",
"\"<class 'pandas.core.frame.DataFrame'>\"",
":",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"list",
"(",
"features",
".",
"columns",
")",
"features",
"=",
"features",
".",
"values",
"elif",
"str",
"(",
"type",
"(",
"features",
")",
")",
"==",
"\"<class 'pandas.core.series.Series'>\"",
":",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"list",
"(",
"features",
".",
"index",
")",
"features",
"=",
"features",
".",
"values",
"elif",
"isinstance",
"(",
"features",
",",
"list",
")",
":",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"features",
"features",
"=",
"None",
"elif",
"features",
"is",
"not",
"None",
"and",
"len",
"(",
"features",
".",
"shape",
")",
"==",
"1",
"and",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"features",
"features",
"=",
"None",
"if",
"len",
"(",
"shap_values",
".",
"shape",
")",
"==",
"1",
":",
"shap_values",
"=",
"np",
".",
"reshape",
"(",
"shap_values",
",",
"(",
"1",
",",
"len",
"(",
"shap_values",
")",
")",
")",
"if",
"out_names",
"is",
"None",
":",
"out_names",
"=",
"[",
"\"output value\"",
"]",
"elif",
"type",
"(",
"out_names",
")",
"==",
"str",
":",
"out_names",
"=",
"[",
"out_names",
"]",
"if",
"shap_values",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
":",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"[",
"labels",
"[",
"'FEATURE'",
"]",
"%",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"shap_values",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"if",
"features",
"is",
"None",
":",
"features",
"=",
"[",
"\"\"",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"feature_names",
")",
")",
"]",
"if",
"type",
"(",
"features",
")",
"==",
"np",
".",
"ndarray",
":",
"features",
"=",
"features",
".",
"flatten",
"(",
")",
"# check that the shape of the shap_values and features match",
"if",
"len",
"(",
"features",
")",
"!=",
"shap_values",
".",
"shape",
"[",
"1",
"]",
":",
"msg",
"=",
"\"Length of features is not equal to the length of shap_values!\"",
"if",
"len",
"(",
"features",
")",
"==",
"shap_values",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
":",
"msg",
"+=",
"\" You might be using an old format shap_values array with the base value \"",
"\"as the last column. In this case just pass the array without the last column.\"",
"raise",
"Exception",
"(",
"msg",
")",
"instance",
"=",
"Instance",
"(",
"np",
".",
"zeros",
"(",
"(",
"1",
",",
"len",
"(",
"feature_names",
")",
")",
")",
",",
"features",
")",
"e",
"=",
"AdditiveExplanation",
"(",
"base_value",
",",
"np",
".",
"sum",
"(",
"shap_values",
"[",
"0",
",",
":",
"]",
")",
"+",
"base_value",
",",
"shap_values",
"[",
"0",
",",
":",
"]",
",",
"None",
",",
"instance",
",",
"link",
",",
"Model",
"(",
"None",
",",
"out_names",
")",
",",
"DenseData",
"(",
"np",
".",
"zeros",
"(",
"(",
"1",
",",
"len",
"(",
"feature_names",
")",
")",
")",
",",
"list",
"(",
"feature_names",
")",
")",
")",
"return",
"visualize",
"(",
"e",
",",
"plot_cmap",
",",
"matplotlib",
",",
"figsize",
"=",
"figsize",
",",
"show",
"=",
"show",
",",
"text_rotation",
"=",
"text_rotation",
")",
"else",
":",
"if",
"matplotlib",
":",
"raise",
"Exception",
"(",
"\"matplotlib = True is not yet supported for force plots with multiple samples!\"",
")",
"if",
"shap_values",
".",
"shape",
"[",
"0",
"]",
">",
"3000",
":",
"warnings",
".",
"warn",
"(",
"\"shap.force_plot is slow for many thousands of rows, try subsampling your data.\"",
")",
"exps",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"shap_values",
".",
"shape",
"[",
"0",
"]",
")",
":",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"[",
"labels",
"[",
"'FEATURE'",
"]",
"%",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"shap_values",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"if",
"features",
"is",
"None",
":",
"display_features",
"=",
"[",
"\"\"",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"feature_names",
")",
")",
"]",
"else",
":",
"display_features",
"=",
"features",
"[",
"i",
",",
":",
"]",
"instance",
"=",
"Instance",
"(",
"np",
".",
"ones",
"(",
"(",
"1",
",",
"len",
"(",
"feature_names",
")",
")",
")",
",",
"display_features",
")",
"e",
"=",
"AdditiveExplanation",
"(",
"base_value",
",",
"np",
".",
"sum",
"(",
"shap_values",
"[",
"i",
",",
":",
"]",
")",
"+",
"base_value",
",",
"shap_values",
"[",
"i",
",",
":",
"]",
",",
"None",
",",
"instance",
",",
"link",
",",
"Model",
"(",
"None",
",",
"out_names",
")",
",",
"DenseData",
"(",
"np",
".",
"ones",
"(",
"(",
"1",
",",
"len",
"(",
"feature_names",
")",
")",
")",
",",
"list",
"(",
"feature_names",
")",
")",
")",
"exps",
".",
"append",
"(",
"e",
")",
"return",
"visualize",
"(",
"exps",
",",
"plot_cmap",
"=",
"plot_cmap",
",",
"ordering_keys",
"=",
"ordering_keys",
",",
"ordering_keys_time_format",
"=",
"ordering_keys_time_format",
",",
"text_rotation",
"=",
"text_rotation",
")"
] |
Visualize the given SHAP values with an additive force layout.
Parameters
----------
base_value : float
This is the reference value that the feature contributions start from. For SHAP values it should
be the value of explainer.expected_value.
shap_values : numpy.array
Matrix of SHAP values (# features) or (# samples x # features). If this is a 1D array then a single
force plot will be drawn, if it is a 2D array then a stacked force plot will be drawn.
features : numpy.array
Matrix of feature values (# features) or (# samples x # features). This provides the values of all the
features, and should be the same shape as the shap_values argument.
feature_names : list
List of feature names (# features).
out_names : str
The name of the outout of the model (plural to support multi-output plotting in the future).
link : "identity" or "logit"
The transformation used when drawing the tick mark labels. Using logit will change log-odds numbers
into probabilities.
matplotlib : bool
Whether to use the default Javascript output, or the (less developed) matplotlib output. Using matplotlib
can be helpful in scenarios where rendering Javascript/HTML is inconvenient.
|
[
"Visualize",
"the",
"given",
"SHAP",
"values",
"with",
"an",
"additive",
"force",
"layout",
".",
"Parameters",
"----------",
"base_value",
":",
"float",
"This",
"is",
"the",
"reference",
"value",
"that",
"the",
"feature",
"contributions",
"start",
"from",
".",
"For",
"SHAP",
"values",
"it",
"should",
"be",
"the",
"value",
"of",
"explainer",
".",
"expected_value",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/plots/force.py#L27-L171
|
train
|
slundberg/shap
|
shap/plots/force.py
|
save_html
|
def save_html(out_file, plot_html):
""" Save html plots to an output file.
"""
internal_open = False
if type(out_file) == str:
out_file = open(out_file, "w")
internal_open = True
out_file.write("<html><head><script>\n")
# dump the js code
bundle_path = os.path.join(os.path.split(__file__)[0], "resources", "bundle.js")
with io.open(bundle_path, encoding="utf-8") as f:
bundle_data = f.read()
out_file.write(bundle_data)
out_file.write("</script></head><body>\n")
out_file.write(plot_html.data)
out_file.write("</body></html>\n")
if internal_open:
out_file.close()
|
python
|
def save_html(out_file, plot_html):
""" Save html plots to an output file.
"""
internal_open = False
if type(out_file) == str:
out_file = open(out_file, "w")
internal_open = True
out_file.write("<html><head><script>\n")
# dump the js code
bundle_path = os.path.join(os.path.split(__file__)[0], "resources", "bundle.js")
with io.open(bundle_path, encoding="utf-8") as f:
bundle_data = f.read()
out_file.write(bundle_data)
out_file.write("</script></head><body>\n")
out_file.write(plot_html.data)
out_file.write("</body></html>\n")
if internal_open:
out_file.close()
|
[
"def",
"save_html",
"(",
"out_file",
",",
"plot_html",
")",
":",
"internal_open",
"=",
"False",
"if",
"type",
"(",
"out_file",
")",
"==",
"str",
":",
"out_file",
"=",
"open",
"(",
"out_file",
",",
"\"w\"",
")",
"internal_open",
"=",
"True",
"out_file",
".",
"write",
"(",
"\"<html><head><script>\\n\"",
")",
"# dump the js code",
"bundle_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"split",
"(",
"__file__",
")",
"[",
"0",
"]",
",",
"\"resources\"",
",",
"\"bundle.js\"",
")",
"with",
"io",
".",
"open",
"(",
"bundle_path",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"f",
":",
"bundle_data",
"=",
"f",
".",
"read",
"(",
")",
"out_file",
".",
"write",
"(",
"bundle_data",
")",
"out_file",
".",
"write",
"(",
"\"</script></head><body>\\n\"",
")",
"out_file",
".",
"write",
"(",
"plot_html",
".",
"data",
")",
"out_file",
".",
"write",
"(",
"\"</body></html>\\n\"",
")",
"if",
"internal_open",
":",
"out_file",
".",
"close",
"(",
")"
] |
Save html plots to an output file.
|
[
"Save",
"html",
"plots",
"to",
"an",
"output",
"file",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/plots/force.py#L217-L239
|
train
|
slundberg/shap
|
shap/explainers/deep/deep_tf.py
|
tensors_blocked_by_false
|
def tensors_blocked_by_false(ops):
""" Follows a set of ops assuming their value is False and find blocked Switch paths.
This is used to prune away parts of the model graph that are only used during the training
phase (like dropout, batch norm, etc.).
"""
blocked = []
def recurse(op):
if op.type == "Switch":
blocked.append(op.outputs[1]) # the true path is blocked since we assume the ops we trace are False
else:
for out in op.outputs:
for c in out.consumers():
recurse(c)
for op in ops:
recurse(op)
return blocked
|
python
|
def tensors_blocked_by_false(ops):
""" Follows a set of ops assuming their value is False and find blocked Switch paths.
This is used to prune away parts of the model graph that are only used during the training
phase (like dropout, batch norm, etc.).
"""
blocked = []
def recurse(op):
if op.type == "Switch":
blocked.append(op.outputs[1]) # the true path is blocked since we assume the ops we trace are False
else:
for out in op.outputs:
for c in out.consumers():
recurse(c)
for op in ops:
recurse(op)
return blocked
|
[
"def",
"tensors_blocked_by_false",
"(",
"ops",
")",
":",
"blocked",
"=",
"[",
"]",
"def",
"recurse",
"(",
"op",
")",
":",
"if",
"op",
".",
"type",
"==",
"\"Switch\"",
":",
"blocked",
".",
"append",
"(",
"op",
".",
"outputs",
"[",
"1",
"]",
")",
"# the true path is blocked since we assume the ops we trace are False",
"else",
":",
"for",
"out",
"in",
"op",
".",
"outputs",
":",
"for",
"c",
"in",
"out",
".",
"consumers",
"(",
")",
":",
"recurse",
"(",
"c",
")",
"for",
"op",
"in",
"ops",
":",
"recurse",
"(",
"op",
")",
"return",
"blocked"
] |
Follows a set of ops assuming their value is False and find blocked Switch paths.
This is used to prune away parts of the model graph that are only used during the training
phase (like dropout, batch norm, etc.).
|
[
"Follows",
"a",
"set",
"of",
"ops",
"assuming",
"their",
"value",
"is",
"False",
"and",
"find",
"blocked",
"Switch",
"paths",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_tf.py#L290-L307
|
train
|
slundberg/shap
|
shap/explainers/deep/deep_tf.py
|
softmax
|
def softmax(explainer, op, *grads):
""" Just decompose softmax into its components and recurse, we can handle all of them :)
We assume the 'axis' is the last dimension because the TF codebase swaps the 'axis' to
the last dimension before the softmax op if 'axis' is not already the last dimension.
We also don't subtract the max before tf.exp for numerical stability since that might
mess up the attributions and it seems like TensorFlow doesn't define softmax that way
(according to the docs)
"""
in0 = op.inputs[0]
in0_max = tf.reduce_max(in0, axis=-1, keepdims=True, name="in0_max")
in0_centered = in0 - in0_max
evals = tf.exp(in0_centered, name="custom_exp")
rsum = tf.reduce_sum(evals, axis=-1, keepdims=True)
div = evals / rsum
explainer.between_ops.extend([evals.op, rsum.op, div.op, in0_centered.op]) # mark these as in-between the inputs and outputs
out = tf.gradients(div, in0_centered, grad_ys=grads[0])[0]
del explainer.between_ops[-4:]
# rescale to account for our shift by in0_max (which we did for numerical stability)
xin0,rin0 = tf.split(in0, 2)
xin0_centered,rin0_centered = tf.split(in0_centered, 2)
delta_in0 = xin0 - rin0
dup0 = [2] + [1 for i in delta_in0.shape[1:]]
return tf.where(
tf.tile(tf.abs(delta_in0), dup0) < 1e-6,
out,
out * tf.tile((xin0_centered - rin0_centered) / delta_in0, dup0)
)
|
python
|
def softmax(explainer, op, *grads):
""" Just decompose softmax into its components and recurse, we can handle all of them :)
We assume the 'axis' is the last dimension because the TF codebase swaps the 'axis' to
the last dimension before the softmax op if 'axis' is not already the last dimension.
We also don't subtract the max before tf.exp for numerical stability since that might
mess up the attributions and it seems like TensorFlow doesn't define softmax that way
(according to the docs)
"""
in0 = op.inputs[0]
in0_max = tf.reduce_max(in0, axis=-1, keepdims=True, name="in0_max")
in0_centered = in0 - in0_max
evals = tf.exp(in0_centered, name="custom_exp")
rsum = tf.reduce_sum(evals, axis=-1, keepdims=True)
div = evals / rsum
explainer.between_ops.extend([evals.op, rsum.op, div.op, in0_centered.op]) # mark these as in-between the inputs and outputs
out = tf.gradients(div, in0_centered, grad_ys=grads[0])[0]
del explainer.between_ops[-4:]
# rescale to account for our shift by in0_max (which we did for numerical stability)
xin0,rin0 = tf.split(in0, 2)
xin0_centered,rin0_centered = tf.split(in0_centered, 2)
delta_in0 = xin0 - rin0
dup0 = [2] + [1 for i in delta_in0.shape[1:]]
return tf.where(
tf.tile(tf.abs(delta_in0), dup0) < 1e-6,
out,
out * tf.tile((xin0_centered - rin0_centered) / delta_in0, dup0)
)
|
[
"def",
"softmax",
"(",
"explainer",
",",
"op",
",",
"*",
"grads",
")",
":",
"in0",
"=",
"op",
".",
"inputs",
"[",
"0",
"]",
"in0_max",
"=",
"tf",
".",
"reduce_max",
"(",
"in0",
",",
"axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"True",
",",
"name",
"=",
"\"in0_max\"",
")",
"in0_centered",
"=",
"in0",
"-",
"in0_max",
"evals",
"=",
"tf",
".",
"exp",
"(",
"in0_centered",
",",
"name",
"=",
"\"custom_exp\"",
")",
"rsum",
"=",
"tf",
".",
"reduce_sum",
"(",
"evals",
",",
"axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"True",
")",
"div",
"=",
"evals",
"/",
"rsum",
"explainer",
".",
"between_ops",
".",
"extend",
"(",
"[",
"evals",
".",
"op",
",",
"rsum",
".",
"op",
",",
"div",
".",
"op",
",",
"in0_centered",
".",
"op",
"]",
")",
"# mark these as in-between the inputs and outputs",
"out",
"=",
"tf",
".",
"gradients",
"(",
"div",
",",
"in0_centered",
",",
"grad_ys",
"=",
"grads",
"[",
"0",
"]",
")",
"[",
"0",
"]",
"del",
"explainer",
".",
"between_ops",
"[",
"-",
"4",
":",
"]",
"# rescale to account for our shift by in0_max (which we did for numerical stability)",
"xin0",
",",
"rin0",
"=",
"tf",
".",
"split",
"(",
"in0",
",",
"2",
")",
"xin0_centered",
",",
"rin0_centered",
"=",
"tf",
".",
"split",
"(",
"in0_centered",
",",
"2",
")",
"delta_in0",
"=",
"xin0",
"-",
"rin0",
"dup0",
"=",
"[",
"2",
"]",
"+",
"[",
"1",
"for",
"i",
"in",
"delta_in0",
".",
"shape",
"[",
"1",
":",
"]",
"]",
"return",
"tf",
".",
"where",
"(",
"tf",
".",
"tile",
"(",
"tf",
".",
"abs",
"(",
"delta_in0",
")",
",",
"dup0",
")",
"<",
"1e-6",
",",
"out",
",",
"out",
"*",
"tf",
".",
"tile",
"(",
"(",
"xin0_centered",
"-",
"rin0_centered",
")",
"/",
"delta_in0",
",",
"dup0",
")",
")"
] |
Just decompose softmax into its components and recurse, we can handle all of them :)
We assume the 'axis' is the last dimension because the TF codebase swaps the 'axis' to
the last dimension before the softmax op if 'axis' is not already the last dimension.
We also don't subtract the max before tf.exp for numerical stability since that might
mess up the attributions and it seems like TensorFlow doesn't define softmax that way
(according to the docs)
|
[
"Just",
"decompose",
"softmax",
"into",
"its",
"components",
"and",
"recurse",
"we",
"can",
"handle",
"all",
"of",
"them",
":",
")"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_tf.py#L335-L363
|
train
|
slundberg/shap
|
shap/explainers/deep/deep_tf.py
|
TFDeepExplainer._variable_inputs
|
def _variable_inputs(self, op):
""" Return which inputs of this operation are variable (i.e. depend on the model inputs).
"""
if op.name not in self._vinputs:
self._vinputs[op.name] = np.array([t.op in self.between_ops or t in self.model_inputs for t in op.inputs])
return self._vinputs[op.name]
|
python
|
def _variable_inputs(self, op):
""" Return which inputs of this operation are variable (i.e. depend on the model inputs).
"""
if op.name not in self._vinputs:
self._vinputs[op.name] = np.array([t.op in self.between_ops or t in self.model_inputs for t in op.inputs])
return self._vinputs[op.name]
|
[
"def",
"_variable_inputs",
"(",
"self",
",",
"op",
")",
":",
"if",
"op",
".",
"name",
"not",
"in",
"self",
".",
"_vinputs",
":",
"self",
".",
"_vinputs",
"[",
"op",
".",
"name",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"t",
".",
"op",
"in",
"self",
".",
"between_ops",
"or",
"t",
"in",
"self",
".",
"model_inputs",
"for",
"t",
"in",
"op",
".",
"inputs",
"]",
")",
"return",
"self",
".",
"_vinputs",
"[",
"op",
".",
"name",
"]"
] |
Return which inputs of this operation are variable (i.e. depend on the model inputs).
|
[
"Return",
"which",
"inputs",
"of",
"this",
"operation",
"are",
"variable",
"(",
"i",
".",
"e",
".",
"depend",
"on",
"the",
"model",
"inputs",
")",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_tf.py#L171-L176
|
train
|
slundberg/shap
|
shap/explainers/deep/deep_tf.py
|
TFDeepExplainer.phi_symbolic
|
def phi_symbolic(self, i):
""" Get the SHAP value computation graph for a given model output.
"""
if self.phi_symbolics[i] is None:
# replace the gradients for all the non-linear activations
# we do this by hacking our way into the registry (TODO: find a public API for this if it exists)
reg = tf_ops._gradient_registry._registry
for n in op_handlers:
if n in reg:
self.orig_grads[n] = reg[n]["type"]
if op_handlers[n] is not passthrough:
reg[n]["type"] = self.custom_grad
elif n in self.used_types:
raise Exception(n + " was used in the model but is not in the gradient registry!")
# In TensorFlow 1.10 they started pruning out nodes that they think can't be backpropped
# unfortunately that includes the index of embedding layers so we disable that check here
if hasattr(tf_gradients_impl, "_IsBackpropagatable"):
orig_IsBackpropagatable = tf_gradients_impl._IsBackpropagatable
tf_gradients_impl._IsBackpropagatable = lambda tensor: True
# define the computation graph for the attribution values using custom a gradient-like computation
try:
out = self.model_output[:,i] if self.multi_output else self.model_output
self.phi_symbolics[i] = tf.gradients(out, self.model_inputs)
finally:
# reinstate the backpropagatable check
if hasattr(tf_gradients_impl, "_IsBackpropagatable"):
tf_gradients_impl._IsBackpropagatable = orig_IsBackpropagatable
# restore the original gradient definitions
for n in op_handlers:
if n in reg:
reg[n]["type"] = self.orig_grads[n]
return self.phi_symbolics[i]
|
python
|
def phi_symbolic(self, i):
""" Get the SHAP value computation graph for a given model output.
"""
if self.phi_symbolics[i] is None:
# replace the gradients for all the non-linear activations
# we do this by hacking our way into the registry (TODO: find a public API for this if it exists)
reg = tf_ops._gradient_registry._registry
for n in op_handlers:
if n in reg:
self.orig_grads[n] = reg[n]["type"]
if op_handlers[n] is not passthrough:
reg[n]["type"] = self.custom_grad
elif n in self.used_types:
raise Exception(n + " was used in the model but is not in the gradient registry!")
# In TensorFlow 1.10 they started pruning out nodes that they think can't be backpropped
# unfortunately that includes the index of embedding layers so we disable that check here
if hasattr(tf_gradients_impl, "_IsBackpropagatable"):
orig_IsBackpropagatable = tf_gradients_impl._IsBackpropagatable
tf_gradients_impl._IsBackpropagatable = lambda tensor: True
# define the computation graph for the attribution values using custom a gradient-like computation
try:
out = self.model_output[:,i] if self.multi_output else self.model_output
self.phi_symbolics[i] = tf.gradients(out, self.model_inputs)
finally:
# reinstate the backpropagatable check
if hasattr(tf_gradients_impl, "_IsBackpropagatable"):
tf_gradients_impl._IsBackpropagatable = orig_IsBackpropagatable
# restore the original gradient definitions
for n in op_handlers:
if n in reg:
reg[n]["type"] = self.orig_grads[n]
return self.phi_symbolics[i]
|
[
"def",
"phi_symbolic",
"(",
"self",
",",
"i",
")",
":",
"if",
"self",
".",
"phi_symbolics",
"[",
"i",
"]",
"is",
"None",
":",
"# replace the gradients for all the non-linear activations",
"# we do this by hacking our way into the registry (TODO: find a public API for this if it exists)",
"reg",
"=",
"tf_ops",
".",
"_gradient_registry",
".",
"_registry",
"for",
"n",
"in",
"op_handlers",
":",
"if",
"n",
"in",
"reg",
":",
"self",
".",
"orig_grads",
"[",
"n",
"]",
"=",
"reg",
"[",
"n",
"]",
"[",
"\"type\"",
"]",
"if",
"op_handlers",
"[",
"n",
"]",
"is",
"not",
"passthrough",
":",
"reg",
"[",
"n",
"]",
"[",
"\"type\"",
"]",
"=",
"self",
".",
"custom_grad",
"elif",
"n",
"in",
"self",
".",
"used_types",
":",
"raise",
"Exception",
"(",
"n",
"+",
"\" was used in the model but is not in the gradient registry!\"",
")",
"# In TensorFlow 1.10 they started pruning out nodes that they think can't be backpropped",
"# unfortunately that includes the index of embedding layers so we disable that check here",
"if",
"hasattr",
"(",
"tf_gradients_impl",
",",
"\"_IsBackpropagatable\"",
")",
":",
"orig_IsBackpropagatable",
"=",
"tf_gradients_impl",
".",
"_IsBackpropagatable",
"tf_gradients_impl",
".",
"_IsBackpropagatable",
"=",
"lambda",
"tensor",
":",
"True",
"# define the computation graph for the attribution values using custom a gradient-like computation",
"try",
":",
"out",
"=",
"self",
".",
"model_output",
"[",
":",
",",
"i",
"]",
"if",
"self",
".",
"multi_output",
"else",
"self",
".",
"model_output",
"self",
".",
"phi_symbolics",
"[",
"i",
"]",
"=",
"tf",
".",
"gradients",
"(",
"out",
",",
"self",
".",
"model_inputs",
")",
"finally",
":",
"# reinstate the backpropagatable check",
"if",
"hasattr",
"(",
"tf_gradients_impl",
",",
"\"_IsBackpropagatable\"",
")",
":",
"tf_gradients_impl",
".",
"_IsBackpropagatable",
"=",
"orig_IsBackpropagatable",
"# restore the original gradient definitions",
"for",
"n",
"in",
"op_handlers",
":",
"if",
"n",
"in",
"reg",
":",
"reg",
"[",
"n",
"]",
"[",
"\"type\"",
"]",
"=",
"self",
".",
"orig_grads",
"[",
"n",
"]",
"return",
"self",
".",
"phi_symbolics",
"[",
"i",
"]"
] |
Get the SHAP value computation graph for a given model output.
|
[
"Get",
"the",
"SHAP",
"value",
"computation",
"graph",
"for",
"a",
"given",
"model",
"output",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_tf.py#L178-L214
|
train
|
slundberg/shap
|
shap/explainers/deep/deep_tf.py
|
TFDeepExplainer.run
|
def run(self, out, model_inputs, X):
""" Runs the model while also setting the learning phase flags to False.
"""
feed_dict = dict(zip(model_inputs, X))
for t in self.learning_phase_flags:
feed_dict[t] = False
return self.session.run(out, feed_dict)
|
python
|
def run(self, out, model_inputs, X):
""" Runs the model while also setting the learning phase flags to False.
"""
feed_dict = dict(zip(model_inputs, X))
for t in self.learning_phase_flags:
feed_dict[t] = False
return self.session.run(out, feed_dict)
|
[
"def",
"run",
"(",
"self",
",",
"out",
",",
"model_inputs",
",",
"X",
")",
":",
"feed_dict",
"=",
"dict",
"(",
"zip",
"(",
"model_inputs",
",",
"X",
")",
")",
"for",
"t",
"in",
"self",
".",
"learning_phase_flags",
":",
"feed_dict",
"[",
"t",
"]",
"=",
"False",
"return",
"self",
".",
"session",
".",
"run",
"(",
"out",
",",
"feed_dict",
")"
] |
Runs the model while also setting the learning phase flags to False.
|
[
"Runs",
"the",
"model",
"while",
"also",
"setting",
"the",
"learning",
"phase",
"flags",
"to",
"False",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_tf.py#L276-L282
|
train
|
slundberg/shap
|
shap/explainers/deep/deep_tf.py
|
TFDeepExplainer.custom_grad
|
def custom_grad(self, op, *grads):
""" Passes a gradient op creation request to the correct handler.
"""
return op_handlers[op.type](self, op, *grads)
|
python
|
def custom_grad(self, op, *grads):
""" Passes a gradient op creation request to the correct handler.
"""
return op_handlers[op.type](self, op, *grads)
|
[
"def",
"custom_grad",
"(",
"self",
",",
"op",
",",
"*",
"grads",
")",
":",
"return",
"op_handlers",
"[",
"op",
".",
"type",
"]",
"(",
"self",
",",
"op",
",",
"*",
"grads",
")"
] |
Passes a gradient op creation request to the correct handler.
|
[
"Passes",
"a",
"gradient",
"op",
"creation",
"request",
"to",
"the",
"correct",
"handler",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_tf.py#L284-L287
|
train
|
slundberg/shap
|
shap/benchmark/experiments.py
|
run_remote_experiments
|
def run_remote_experiments(experiments, thread_hosts, rate_limit=10):
""" Use ssh to run the experiments on remote machines in parallel.
Parameters
----------
experiments : iterable
Output of shap.benchmark.experiments(...).
thread_hosts : list of strings
Each host has the format "host_name:path_to_python_binary" and can appear multiple times
in the list (one for each parallel execution you want on that machine).
rate_limit : int
How many ssh connections we make per minute to each host (to avoid throttling issues).
"""
global ssh_conn_per_min_limit
ssh_conn_per_min_limit = rate_limit
# first we kill any remaining workers from previous runs
# note we don't check_call because pkill kills our ssh call as well
thread_hosts = copy.copy(thread_hosts)
random.shuffle(thread_hosts)
for host in set(thread_hosts):
hostname,_ = host.split(":")
try:
subprocess.run(["ssh", hostname, "pkill -f shap.benchmark.run_experiment"], timeout=15)
except subprocess.TimeoutExpired:
print("Failed to connect to", hostname, "after 15 seconds! Exiting.")
return
experiments = copy.copy(list(experiments))
random.shuffle(experiments) # this way all the hard experiments don't get put on one machine
global nexperiments, total_sent, total_done, total_failed, host_records
nexperiments = len(experiments)
total_sent = 0
total_done = 0
total_failed = 0
host_records = {}
q = Queue()
for host in thread_hosts:
worker = Thread(target=__thread_worker, args=(q, host))
worker.setDaemon(True)
worker.start()
for experiment in experiments:
q.put(experiment)
q.join()
|
python
|
def run_remote_experiments(experiments, thread_hosts, rate_limit=10):
""" Use ssh to run the experiments on remote machines in parallel.
Parameters
----------
experiments : iterable
Output of shap.benchmark.experiments(...).
thread_hosts : list of strings
Each host has the format "host_name:path_to_python_binary" and can appear multiple times
in the list (one for each parallel execution you want on that machine).
rate_limit : int
How many ssh connections we make per minute to each host (to avoid throttling issues).
"""
global ssh_conn_per_min_limit
ssh_conn_per_min_limit = rate_limit
# first we kill any remaining workers from previous runs
# note we don't check_call because pkill kills our ssh call as well
thread_hosts = copy.copy(thread_hosts)
random.shuffle(thread_hosts)
for host in set(thread_hosts):
hostname,_ = host.split(":")
try:
subprocess.run(["ssh", hostname, "pkill -f shap.benchmark.run_experiment"], timeout=15)
except subprocess.TimeoutExpired:
print("Failed to connect to", hostname, "after 15 seconds! Exiting.")
return
experiments = copy.copy(list(experiments))
random.shuffle(experiments) # this way all the hard experiments don't get put on one machine
global nexperiments, total_sent, total_done, total_failed, host_records
nexperiments = len(experiments)
total_sent = 0
total_done = 0
total_failed = 0
host_records = {}
q = Queue()
for host in thread_hosts:
worker = Thread(target=__thread_worker, args=(q, host))
worker.setDaemon(True)
worker.start()
for experiment in experiments:
q.put(experiment)
q.join()
|
[
"def",
"run_remote_experiments",
"(",
"experiments",
",",
"thread_hosts",
",",
"rate_limit",
"=",
"10",
")",
":",
"global",
"ssh_conn_per_min_limit",
"ssh_conn_per_min_limit",
"=",
"rate_limit",
"# first we kill any remaining workers from previous runs",
"# note we don't check_call because pkill kills our ssh call as well",
"thread_hosts",
"=",
"copy",
".",
"copy",
"(",
"thread_hosts",
")",
"random",
".",
"shuffle",
"(",
"thread_hosts",
")",
"for",
"host",
"in",
"set",
"(",
"thread_hosts",
")",
":",
"hostname",
",",
"_",
"=",
"host",
".",
"split",
"(",
"\":\"",
")",
"try",
":",
"subprocess",
".",
"run",
"(",
"[",
"\"ssh\"",
",",
"hostname",
",",
"\"pkill -f shap.benchmark.run_experiment\"",
"]",
",",
"timeout",
"=",
"15",
")",
"except",
"subprocess",
".",
"TimeoutExpired",
":",
"print",
"(",
"\"Failed to connect to\"",
",",
"hostname",
",",
"\"after 15 seconds! Exiting.\"",
")",
"return",
"experiments",
"=",
"copy",
".",
"copy",
"(",
"list",
"(",
"experiments",
")",
")",
"random",
".",
"shuffle",
"(",
"experiments",
")",
"# this way all the hard experiments don't get put on one machine",
"global",
"nexperiments",
",",
"total_sent",
",",
"total_done",
",",
"total_failed",
",",
"host_records",
"nexperiments",
"=",
"len",
"(",
"experiments",
")",
"total_sent",
"=",
"0",
"total_done",
"=",
"0",
"total_failed",
"=",
"0",
"host_records",
"=",
"{",
"}",
"q",
"=",
"Queue",
"(",
")",
"for",
"host",
"in",
"thread_hosts",
":",
"worker",
"=",
"Thread",
"(",
"target",
"=",
"__thread_worker",
",",
"args",
"=",
"(",
"q",
",",
"host",
")",
")",
"worker",
".",
"setDaemon",
"(",
"True",
")",
"worker",
".",
"start",
"(",
")",
"for",
"experiment",
"in",
"experiments",
":",
"q",
".",
"put",
"(",
"experiment",
")",
"q",
".",
"join",
"(",
")"
] |
Use ssh to run the experiments on remote machines in parallel.
Parameters
----------
experiments : iterable
Output of shap.benchmark.experiments(...).
thread_hosts : list of strings
Each host has the format "host_name:path_to_python_binary" and can appear multiple times
in the list (one for each parallel execution you want on that machine).
rate_limit : int
How many ssh connections we make per minute to each host (to avoid throttling issues).
|
[
"Use",
"ssh",
"to",
"run",
"the",
"experiments",
"on",
"remote",
"machines",
"in",
"parallel",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/experiments.py#L322-L372
|
train
|
slundberg/shap
|
shap/plots/monitoring.py
|
monitoring_plot
|
def monitoring_plot(ind, shap_values, features, feature_names=None):
""" Create a SHAP monitoring plot.
(Note this function is preliminary and subject to change!!)
A SHAP monitoring plot is meant to display the behavior of a model
over time. Often the shap_values given to this plot explain the loss
of a model, so changes in a feature's impact on the model's loss over
time can help in monitoring the model's performance.
Parameters
----------
ind : int
Index of the feature to plot.
shap_values : numpy.array
Matrix of SHAP values (# samples x # features)
features : numpy.array or pandas.DataFrame
Matrix of feature values (# samples x # features)
feature_names : list
Names of the features (length # features)
"""
if str(type(features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = features.columns
features = features.values
pl.figure(figsize=(10,3))
ys = shap_values[:,ind]
xs = np.arange(len(ys))#np.linspace(0, 12*2, len(ys))
pvals = []
inc = 50
for i in range(inc, len(ys)-inc, inc):
#stat, pval = scipy.stats.mannwhitneyu(v[:i], v[i:], alternative="two-sided")
stat, pval = scipy.stats.ttest_ind(ys[:i], ys[i:])
pvals.append(pval)
min_pval = np.min(pvals)
min_pval_ind = np.argmin(pvals)*inc + inc
if min_pval < 0.05 / shap_values.shape[1]:
pl.axvline(min_pval_ind, linestyle="dashed", color="#666666", alpha=0.2)
pl.scatter(xs, ys, s=10, c=features[:,ind], cmap=colors.red_blue)
pl.xlabel("Sample index")
pl.ylabel(truncate_text(feature_names[ind], 30) + "\nSHAP value", size=13)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('left')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
cb = pl.colorbar()
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 20)
cb.set_label(truncate_text(feature_names[ind], 30), size=13)
pl.show()
|
python
|
def monitoring_plot(ind, shap_values, features, feature_names=None):
""" Create a SHAP monitoring plot.
(Note this function is preliminary and subject to change!!)
A SHAP monitoring plot is meant to display the behavior of a model
over time. Often the shap_values given to this plot explain the loss
of a model, so changes in a feature's impact on the model's loss over
time can help in monitoring the model's performance.
Parameters
----------
ind : int
Index of the feature to plot.
shap_values : numpy.array
Matrix of SHAP values (# samples x # features)
features : numpy.array or pandas.DataFrame
Matrix of feature values (# samples x # features)
feature_names : list
Names of the features (length # features)
"""
if str(type(features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = features.columns
features = features.values
pl.figure(figsize=(10,3))
ys = shap_values[:,ind]
xs = np.arange(len(ys))#np.linspace(0, 12*2, len(ys))
pvals = []
inc = 50
for i in range(inc, len(ys)-inc, inc):
#stat, pval = scipy.stats.mannwhitneyu(v[:i], v[i:], alternative="two-sided")
stat, pval = scipy.stats.ttest_ind(ys[:i], ys[i:])
pvals.append(pval)
min_pval = np.min(pvals)
min_pval_ind = np.argmin(pvals)*inc + inc
if min_pval < 0.05 / shap_values.shape[1]:
pl.axvline(min_pval_ind, linestyle="dashed", color="#666666", alpha=0.2)
pl.scatter(xs, ys, s=10, c=features[:,ind], cmap=colors.red_blue)
pl.xlabel("Sample index")
pl.ylabel(truncate_text(feature_names[ind], 30) + "\nSHAP value", size=13)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('left')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
cb = pl.colorbar()
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 20)
cb.set_label(truncate_text(feature_names[ind], 30), size=13)
pl.show()
|
[
"def",
"monitoring_plot",
"(",
"ind",
",",
"shap_values",
",",
"features",
",",
"feature_names",
"=",
"None",
")",
":",
"if",
"str",
"(",
"type",
"(",
"features",
")",
")",
".",
"endswith",
"(",
"\"'pandas.core.frame.DataFrame'>\"",
")",
":",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"features",
".",
"columns",
"features",
"=",
"features",
".",
"values",
"pl",
".",
"figure",
"(",
"figsize",
"=",
"(",
"10",
",",
"3",
")",
")",
"ys",
"=",
"shap_values",
"[",
":",
",",
"ind",
"]",
"xs",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"ys",
")",
")",
"#np.linspace(0, 12*2, len(ys))",
"pvals",
"=",
"[",
"]",
"inc",
"=",
"50",
"for",
"i",
"in",
"range",
"(",
"inc",
",",
"len",
"(",
"ys",
")",
"-",
"inc",
",",
"inc",
")",
":",
"#stat, pval = scipy.stats.mannwhitneyu(v[:i], v[i:], alternative=\"two-sided\")",
"stat",
",",
"pval",
"=",
"scipy",
".",
"stats",
".",
"ttest_ind",
"(",
"ys",
"[",
":",
"i",
"]",
",",
"ys",
"[",
"i",
":",
"]",
")",
"pvals",
".",
"append",
"(",
"pval",
")",
"min_pval",
"=",
"np",
".",
"min",
"(",
"pvals",
")",
"min_pval_ind",
"=",
"np",
".",
"argmin",
"(",
"pvals",
")",
"*",
"inc",
"+",
"inc",
"if",
"min_pval",
"<",
"0.05",
"/",
"shap_values",
".",
"shape",
"[",
"1",
"]",
":",
"pl",
".",
"axvline",
"(",
"min_pval_ind",
",",
"linestyle",
"=",
"\"dashed\"",
",",
"color",
"=",
"\"#666666\"",
",",
"alpha",
"=",
"0.2",
")",
"pl",
".",
"scatter",
"(",
"xs",
",",
"ys",
",",
"s",
"=",
"10",
",",
"c",
"=",
"features",
"[",
":",
",",
"ind",
"]",
",",
"cmap",
"=",
"colors",
".",
"red_blue",
")",
"pl",
".",
"xlabel",
"(",
"\"Sample index\"",
")",
"pl",
".",
"ylabel",
"(",
"truncate_text",
"(",
"feature_names",
"[",
"ind",
"]",
",",
"30",
")",
"+",
"\"\\nSHAP value\"",
",",
"size",
"=",
"13",
")",
"pl",
".",
"gca",
"(",
")",
".",
"xaxis",
".",
"set_ticks_position",
"(",
"'bottom'",
")",
"pl",
".",
"gca",
"(",
")",
".",
"yaxis",
".",
"set_ticks_position",
"(",
"'left'",
")",
"pl",
".",
"gca",
"(",
")",
".",
"spines",
"[",
"'right'",
"]",
".",
"set_visible",
"(",
"False",
")",
"pl",
".",
"gca",
"(",
")",
".",
"spines",
"[",
"'top'",
"]",
".",
"set_visible",
"(",
"False",
")",
"cb",
"=",
"pl",
".",
"colorbar",
"(",
")",
"cb",
".",
"outline",
".",
"set_visible",
"(",
"False",
")",
"bbox",
"=",
"cb",
".",
"ax",
".",
"get_window_extent",
"(",
")",
".",
"transformed",
"(",
"pl",
".",
"gcf",
"(",
")",
".",
"dpi_scale_trans",
".",
"inverted",
"(",
")",
")",
"cb",
".",
"ax",
".",
"set_aspect",
"(",
"(",
"bbox",
".",
"height",
"-",
"0.7",
")",
"*",
"20",
")",
"cb",
".",
"set_label",
"(",
"truncate_text",
"(",
"feature_names",
"[",
"ind",
"]",
",",
"30",
")",
",",
"size",
"=",
"13",
")",
"pl",
".",
"show",
"(",
")"
] |
Create a SHAP monitoring plot.
(Note this function is preliminary and subject to change!!)
A SHAP monitoring plot is meant to display the behavior of a model
over time. Often the shap_values given to this plot explain the loss
of a model, so changes in a feature's impact on the model's loss over
time can help in monitoring the model's performance.
Parameters
----------
ind : int
Index of the feature to plot.
shap_values : numpy.array
Matrix of SHAP values (# samples x # features)
features : numpy.array or pandas.DataFrame
Matrix of feature values (# samples x # features)
feature_names : list
Names of the features (length # features)
|
[
"Create",
"a",
"SHAP",
"monitoring",
"plot",
".",
"(",
"Note",
"this",
"function",
"is",
"preliminary",
"and",
"subject",
"to",
"change!!",
")",
"A",
"SHAP",
"monitoring",
"plot",
"is",
"meant",
"to",
"display",
"the",
"behavior",
"of",
"a",
"model",
"over",
"time",
".",
"Often",
"the",
"shap_values",
"given",
"to",
"this",
"plot",
"explain",
"the",
"loss",
"of",
"a",
"model",
"so",
"changes",
"in",
"a",
"feature",
"s",
"impact",
"on",
"the",
"model",
"s",
"loss",
"over",
"time",
"can",
"help",
"in",
"monitoring",
"the",
"model",
"s",
"performance",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/plots/monitoring.py#L20-L78
|
train
|
slundberg/shap
|
shap/explainers/kernel.py
|
kmeans
|
def kmeans(X, k, round_values=True):
""" Summarize a dataset with k mean samples weighted by the number of data points they
each represent.
Parameters
----------
X : numpy.array or pandas.DataFrame
Matrix of data samples to summarize (# samples x # features)
k : int
Number of means to use for approximation.
round_values : bool
For all i, round the ith dimension of each mean sample to match the nearest value
from X[:,i]. This ensures discrete features always get a valid value.
Returns
-------
DenseData object.
"""
group_names = [str(i) for i in range(X.shape[1])]
if str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
group_names = X.columns
X = X.values
kmeans = KMeans(n_clusters=k, random_state=0).fit(X)
if round_values:
for i in range(k):
for j in range(X.shape[1]):
ind = np.argmin(np.abs(X[:,j] - kmeans.cluster_centers_[i,j]))
kmeans.cluster_centers_[i,j] = X[ind,j]
return DenseData(kmeans.cluster_centers_, group_names, None, 1.0*np.bincount(kmeans.labels_))
|
python
|
def kmeans(X, k, round_values=True):
""" Summarize a dataset with k mean samples weighted by the number of data points they
each represent.
Parameters
----------
X : numpy.array or pandas.DataFrame
Matrix of data samples to summarize (# samples x # features)
k : int
Number of means to use for approximation.
round_values : bool
For all i, round the ith dimension of each mean sample to match the nearest value
from X[:,i]. This ensures discrete features always get a valid value.
Returns
-------
DenseData object.
"""
group_names = [str(i) for i in range(X.shape[1])]
if str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
group_names = X.columns
X = X.values
kmeans = KMeans(n_clusters=k, random_state=0).fit(X)
if round_values:
for i in range(k):
for j in range(X.shape[1]):
ind = np.argmin(np.abs(X[:,j] - kmeans.cluster_centers_[i,j]))
kmeans.cluster_centers_[i,j] = X[ind,j]
return DenseData(kmeans.cluster_centers_, group_names, None, 1.0*np.bincount(kmeans.labels_))
|
[
"def",
"kmeans",
"(",
"X",
",",
"k",
",",
"round_values",
"=",
"True",
")",
":",
"group_names",
"=",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"if",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"'pandas.core.frame.DataFrame'>\"",
")",
":",
"group_names",
"=",
"X",
".",
"columns",
"X",
"=",
"X",
".",
"values",
"kmeans",
"=",
"KMeans",
"(",
"n_clusters",
"=",
"k",
",",
"random_state",
"=",
"0",
")",
".",
"fit",
"(",
"X",
")",
"if",
"round_values",
":",
"for",
"i",
"in",
"range",
"(",
"k",
")",
":",
"for",
"j",
"in",
"range",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
")",
":",
"ind",
"=",
"np",
".",
"argmin",
"(",
"np",
".",
"abs",
"(",
"X",
"[",
":",
",",
"j",
"]",
"-",
"kmeans",
".",
"cluster_centers_",
"[",
"i",
",",
"j",
"]",
")",
")",
"kmeans",
".",
"cluster_centers_",
"[",
"i",
",",
"j",
"]",
"=",
"X",
"[",
"ind",
",",
"j",
"]",
"return",
"DenseData",
"(",
"kmeans",
".",
"cluster_centers_",
",",
"group_names",
",",
"None",
",",
"1.0",
"*",
"np",
".",
"bincount",
"(",
"kmeans",
".",
"labels_",
")",
")"
] |
Summarize a dataset with k mean samples weighted by the number of data points they
each represent.
Parameters
----------
X : numpy.array or pandas.DataFrame
Matrix of data samples to summarize (# samples x # features)
k : int
Number of means to use for approximation.
round_values : bool
For all i, round the ith dimension of each mean sample to match the nearest value
from X[:,i]. This ensures discrete features always get a valid value.
Returns
-------
DenseData object.
|
[
"Summarize",
"a",
"dataset",
"with",
"k",
"mean",
"samples",
"weighted",
"by",
"the",
"number",
"of",
"data",
"points",
"they",
"each",
"represent",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/kernel.py#L18-L50
|
train
|
slundberg/shap
|
shap/explainers/kernel.py
|
KernelExplainer.shap_values
|
def shap_values(self, X, **kwargs):
""" Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array or pandas.DataFrame or any scipy.sparse matrix
A matrix of samples (# samples x # features) on which to explain the model's output.
nsamples : "auto" or int
Number of times to re-evaluate the model when explaining each prediction. More samples
lead to lower variance estimates of the SHAP values. The "auto" setting uses
`nsamples = 2 * X.shape[1] + 2048`.
l1_reg : "num_features(int)", "auto" (default for now, but deprecated), "aic", "bic", or float
The l1 regularization to use for feature selection (the estimation procedure is based on
a debiased lasso). The auto option currently uses "aic" when less that 20% of the possible sample
space is enumerated, otherwise it uses no regularization. THE BEHAVIOR OF "auto" WILL CHANGE
in a future version to be based on num_features instead of AIC.
The "aic" and "bic" options use the AIC and BIC rules for regularization.
Using "num_features(int)" selects a fix number of top features. Passing a float directly sets the
"alpha" parameter of the sklearn.linear_model.Lasso model used for feature selection.
Returns
-------
For models with a single output this returns a matrix of SHAP values
(# samples x # features). Each row sums to the difference between the model output for that
sample and the expected value of the model output (which is stored as expected_value
attribute of the explainer). For models with vector outputs this returns a list
of such matrices, one for each output.
"""
# convert dataframes
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
if self.keep_index:
index_value = X.index.values
index_name = X.index.name
column_name = list(X.columns)
X = X.values
x_type = str(type(X))
arr_type = "'numpy.ndarray'>"
# if sparse, convert to lil for performance
if sp.sparse.issparse(X) and not sp.sparse.isspmatrix_lil(X):
X = X.tolil()
assert x_type.endswith(arr_type) or sp.sparse.isspmatrix_lil(X), "Unknown instance type: " + x_type
assert len(X.shape) == 1 or len(X.shape) == 2, "Instance must have 1 or 2 dimensions!"
# single instance
if len(X.shape) == 1:
data = X.reshape((1, X.shape[0]))
if self.keep_index:
data = convert_to_instance_with_index(data, column_name, index_name, index_value)
explanation = self.explain(data, **kwargs)
# vector-output
s = explanation.shape
if len(s) == 2:
outs = [np.zeros(s[0]) for j in range(s[1])]
for j in range(s[1]):
outs[j] = explanation[:, j]
return outs
# single-output
else:
out = np.zeros(s[0])
out[:] = explanation
return out
# explain the whole dataset
elif len(X.shape) == 2:
explanations = []
for i in tqdm(range(X.shape[0]), disable=kwargs.get("silent", False)):
data = X[i:i + 1, :]
if self.keep_index:
data = convert_to_instance_with_index(data, column_name, index_value[i:i + 1], index_name)
explanations.append(self.explain(data, **kwargs))
# vector-output
s = explanations[0].shape
if len(s) == 2:
outs = [np.zeros((X.shape[0], s[0])) for j in range(s[1])]
for i in range(X.shape[0]):
for j in range(s[1]):
outs[j][i] = explanations[i][:, j]
return outs
# single-output
else:
out = np.zeros((X.shape[0], s[0]))
for i in range(X.shape[0]):
out[i] = explanations[i]
return out
|
python
|
def shap_values(self, X, **kwargs):
""" Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array or pandas.DataFrame or any scipy.sparse matrix
A matrix of samples (# samples x # features) on which to explain the model's output.
nsamples : "auto" or int
Number of times to re-evaluate the model when explaining each prediction. More samples
lead to lower variance estimates of the SHAP values. The "auto" setting uses
`nsamples = 2 * X.shape[1] + 2048`.
l1_reg : "num_features(int)", "auto" (default for now, but deprecated), "aic", "bic", or float
The l1 regularization to use for feature selection (the estimation procedure is based on
a debiased lasso). The auto option currently uses "aic" when less that 20% of the possible sample
space is enumerated, otherwise it uses no regularization. THE BEHAVIOR OF "auto" WILL CHANGE
in a future version to be based on num_features instead of AIC.
The "aic" and "bic" options use the AIC and BIC rules for regularization.
Using "num_features(int)" selects a fix number of top features. Passing a float directly sets the
"alpha" parameter of the sklearn.linear_model.Lasso model used for feature selection.
Returns
-------
For models with a single output this returns a matrix of SHAP values
(# samples x # features). Each row sums to the difference between the model output for that
sample and the expected value of the model output (which is stored as expected_value
attribute of the explainer). For models with vector outputs this returns a list
of such matrices, one for each output.
"""
# convert dataframes
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
if self.keep_index:
index_value = X.index.values
index_name = X.index.name
column_name = list(X.columns)
X = X.values
x_type = str(type(X))
arr_type = "'numpy.ndarray'>"
# if sparse, convert to lil for performance
if sp.sparse.issparse(X) and not sp.sparse.isspmatrix_lil(X):
X = X.tolil()
assert x_type.endswith(arr_type) or sp.sparse.isspmatrix_lil(X), "Unknown instance type: " + x_type
assert len(X.shape) == 1 or len(X.shape) == 2, "Instance must have 1 or 2 dimensions!"
# single instance
if len(X.shape) == 1:
data = X.reshape((1, X.shape[0]))
if self.keep_index:
data = convert_to_instance_with_index(data, column_name, index_name, index_value)
explanation = self.explain(data, **kwargs)
# vector-output
s = explanation.shape
if len(s) == 2:
outs = [np.zeros(s[0]) for j in range(s[1])]
for j in range(s[1]):
outs[j] = explanation[:, j]
return outs
# single-output
else:
out = np.zeros(s[0])
out[:] = explanation
return out
# explain the whole dataset
elif len(X.shape) == 2:
explanations = []
for i in tqdm(range(X.shape[0]), disable=kwargs.get("silent", False)):
data = X[i:i + 1, :]
if self.keep_index:
data = convert_to_instance_with_index(data, column_name, index_value[i:i + 1], index_name)
explanations.append(self.explain(data, **kwargs))
# vector-output
s = explanations[0].shape
if len(s) == 2:
outs = [np.zeros((X.shape[0], s[0])) for j in range(s[1])]
for i in range(X.shape[0]):
for j in range(s[1]):
outs[j][i] = explanations[i][:, j]
return outs
# single-output
else:
out = np.zeros((X.shape[0], s[0]))
for i in range(X.shape[0]):
out[i] = explanations[i]
return out
|
[
"def",
"shap_values",
"(",
"self",
",",
"X",
",",
"*",
"*",
"kwargs",
")",
":",
"# convert dataframes",
"if",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"pandas.core.series.Series'>\"",
")",
":",
"X",
"=",
"X",
".",
"values",
"elif",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"'pandas.core.frame.DataFrame'>\"",
")",
":",
"if",
"self",
".",
"keep_index",
":",
"index_value",
"=",
"X",
".",
"index",
".",
"values",
"index_name",
"=",
"X",
".",
"index",
".",
"name",
"column_name",
"=",
"list",
"(",
"X",
".",
"columns",
")",
"X",
"=",
"X",
".",
"values",
"x_type",
"=",
"str",
"(",
"type",
"(",
"X",
")",
")",
"arr_type",
"=",
"\"'numpy.ndarray'>\"",
"# if sparse, convert to lil for performance",
"if",
"sp",
".",
"sparse",
".",
"issparse",
"(",
"X",
")",
"and",
"not",
"sp",
".",
"sparse",
".",
"isspmatrix_lil",
"(",
"X",
")",
":",
"X",
"=",
"X",
".",
"tolil",
"(",
")",
"assert",
"x_type",
".",
"endswith",
"(",
"arr_type",
")",
"or",
"sp",
".",
"sparse",
".",
"isspmatrix_lil",
"(",
"X",
")",
",",
"\"Unknown instance type: \"",
"+",
"x_type",
"assert",
"len",
"(",
"X",
".",
"shape",
")",
"==",
"1",
"or",
"len",
"(",
"X",
".",
"shape",
")",
"==",
"2",
",",
"\"Instance must have 1 or 2 dimensions!\"",
"# single instance",
"if",
"len",
"(",
"X",
".",
"shape",
")",
"==",
"1",
":",
"data",
"=",
"X",
".",
"reshape",
"(",
"(",
"1",
",",
"X",
".",
"shape",
"[",
"0",
"]",
")",
")",
"if",
"self",
".",
"keep_index",
":",
"data",
"=",
"convert_to_instance_with_index",
"(",
"data",
",",
"column_name",
",",
"index_name",
",",
"index_value",
")",
"explanation",
"=",
"self",
".",
"explain",
"(",
"data",
",",
"*",
"*",
"kwargs",
")",
"# vector-output",
"s",
"=",
"explanation",
".",
"shape",
"if",
"len",
"(",
"s",
")",
"==",
"2",
":",
"outs",
"=",
"[",
"np",
".",
"zeros",
"(",
"s",
"[",
"0",
"]",
")",
"for",
"j",
"in",
"range",
"(",
"s",
"[",
"1",
"]",
")",
"]",
"for",
"j",
"in",
"range",
"(",
"s",
"[",
"1",
"]",
")",
":",
"outs",
"[",
"j",
"]",
"=",
"explanation",
"[",
":",
",",
"j",
"]",
"return",
"outs",
"# single-output",
"else",
":",
"out",
"=",
"np",
".",
"zeros",
"(",
"s",
"[",
"0",
"]",
")",
"out",
"[",
":",
"]",
"=",
"explanation",
"return",
"out",
"# explain the whole dataset",
"elif",
"len",
"(",
"X",
".",
"shape",
")",
"==",
"2",
":",
"explanations",
"=",
"[",
"]",
"for",
"i",
"in",
"tqdm",
"(",
"range",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
",",
"disable",
"=",
"kwargs",
".",
"get",
"(",
"\"silent\"",
",",
"False",
")",
")",
":",
"data",
"=",
"X",
"[",
"i",
":",
"i",
"+",
"1",
",",
":",
"]",
"if",
"self",
".",
"keep_index",
":",
"data",
"=",
"convert_to_instance_with_index",
"(",
"data",
",",
"column_name",
",",
"index_value",
"[",
"i",
":",
"i",
"+",
"1",
"]",
",",
"index_name",
")",
"explanations",
".",
"append",
"(",
"self",
".",
"explain",
"(",
"data",
",",
"*",
"*",
"kwargs",
")",
")",
"# vector-output",
"s",
"=",
"explanations",
"[",
"0",
"]",
".",
"shape",
"if",
"len",
"(",
"s",
")",
"==",
"2",
":",
"outs",
"=",
"[",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"s",
"[",
"0",
"]",
")",
")",
"for",
"j",
"in",
"range",
"(",
"s",
"[",
"1",
"]",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
":",
"for",
"j",
"in",
"range",
"(",
"s",
"[",
"1",
"]",
")",
":",
"outs",
"[",
"j",
"]",
"[",
"i",
"]",
"=",
"explanations",
"[",
"i",
"]",
"[",
":",
",",
"j",
"]",
"return",
"outs",
"# single-output",
"else",
":",
"out",
"=",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"s",
"[",
"0",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
":",
"out",
"[",
"i",
"]",
"=",
"explanations",
"[",
"i",
"]",
"return",
"out"
] |
Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array or pandas.DataFrame or any scipy.sparse matrix
A matrix of samples (# samples x # features) on which to explain the model's output.
nsamples : "auto" or int
Number of times to re-evaluate the model when explaining each prediction. More samples
lead to lower variance estimates of the SHAP values. The "auto" setting uses
`nsamples = 2 * X.shape[1] + 2048`.
l1_reg : "num_features(int)", "auto" (default for now, but deprecated), "aic", "bic", or float
The l1 regularization to use for feature selection (the estimation procedure is based on
a debiased lasso). The auto option currently uses "aic" when less that 20% of the possible sample
space is enumerated, otherwise it uses no regularization. THE BEHAVIOR OF "auto" WILL CHANGE
in a future version to be based on num_features instead of AIC.
The "aic" and "bic" options use the AIC and BIC rules for regularization.
Using "num_features(int)" selects a fix number of top features. Passing a float directly sets the
"alpha" parameter of the sklearn.linear_model.Lasso model used for feature selection.
Returns
-------
For models with a single output this returns a matrix of SHAP values
(# samples x # features). Each row sums to the difference between the model output for that
sample and the expected value of the model output (which is stored as expected_value
attribute of the explainer). For models with vector outputs this returns a list
of such matrices, one for each output.
|
[
"Estimate",
"the",
"SHAP",
"values",
"for",
"a",
"set",
"of",
"samples",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/kernel.py#L132-L225
|
train
|
slundberg/shap
|
shap/plots/embedding.py
|
embedding_plot
|
def embedding_plot(ind, shap_values, feature_names=None, method="pca", alpha=1.0, show=True):
""" Use the SHAP values as an embedding which we project to 2D for visualization.
Parameters
----------
ind : int or string
If this is an int it is the index of the feature to use to color the embedding.
If this is a string it is either the name of the feature, or it can have the
form "rank(int)" to specify the feature with that rank (ordered by mean absolute
SHAP value over all the samples), or "sum()" to mean the sum of all the SHAP values,
which is the model's output (minus it's expected value).
shap_values : numpy.array
Matrix of SHAP values (# samples x # features).
feature_names : None or list
The names of the features in the shap_values array.
method : "pca" or numpy.array
How to reduce the dimensions of the shap_values to 2D. If "pca" then the 2D
PCA projection of shap_values is used. If a numpy array then is should be
(# samples x 2) and represent the embedding of that values.
alpha : float
The transparency of the data points (between 0 and 1). This can be useful to the
show density of the data points when using a large dataset.
"""
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
ind = convert_name(ind, shap_values, feature_names)
if ind == "sum()":
cvals = shap_values.sum(1)
fname = "sum(SHAP values)"
else:
cvals = shap_values[:,ind]
fname = feature_names[ind]
# see if we need to compute the embedding
if type(method) == str and method == "pca":
pca = sklearn.decomposition.PCA(2)
embedding_values = pca.fit_transform(shap_values)
elif hasattr(method, "shape") and method.shape[1] == 2:
embedding_values = method
else:
print("Unsupported embedding method:", method)
pl.scatter(
embedding_values[:,0], embedding_values[:,1], c=cvals,
cmap=colors.red_blue, alpha=alpha, linewidth=0
)
pl.axis("off")
#pl.title(feature_names[ind])
cb = pl.colorbar()
cb.set_label("SHAP value for\n"+fname, size=13)
cb.outline.set_visible(False)
pl.gcf().set_size_inches(7.5, 5)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 10)
cb.set_alpha(1)
if show:
pl.show()
|
python
|
def embedding_plot(ind, shap_values, feature_names=None, method="pca", alpha=1.0, show=True):
""" Use the SHAP values as an embedding which we project to 2D for visualization.
Parameters
----------
ind : int or string
If this is an int it is the index of the feature to use to color the embedding.
If this is a string it is either the name of the feature, or it can have the
form "rank(int)" to specify the feature with that rank (ordered by mean absolute
SHAP value over all the samples), or "sum()" to mean the sum of all the SHAP values,
which is the model's output (minus it's expected value).
shap_values : numpy.array
Matrix of SHAP values (# samples x # features).
feature_names : None or list
The names of the features in the shap_values array.
method : "pca" or numpy.array
How to reduce the dimensions of the shap_values to 2D. If "pca" then the 2D
PCA projection of shap_values is used. If a numpy array then is should be
(# samples x 2) and represent the embedding of that values.
alpha : float
The transparency of the data points (between 0 and 1). This can be useful to the
show density of the data points when using a large dataset.
"""
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
ind = convert_name(ind, shap_values, feature_names)
if ind == "sum()":
cvals = shap_values.sum(1)
fname = "sum(SHAP values)"
else:
cvals = shap_values[:,ind]
fname = feature_names[ind]
# see if we need to compute the embedding
if type(method) == str and method == "pca":
pca = sklearn.decomposition.PCA(2)
embedding_values = pca.fit_transform(shap_values)
elif hasattr(method, "shape") and method.shape[1] == 2:
embedding_values = method
else:
print("Unsupported embedding method:", method)
pl.scatter(
embedding_values[:,0], embedding_values[:,1], c=cvals,
cmap=colors.red_blue, alpha=alpha, linewidth=0
)
pl.axis("off")
#pl.title(feature_names[ind])
cb = pl.colorbar()
cb.set_label("SHAP value for\n"+fname, size=13)
cb.outline.set_visible(False)
pl.gcf().set_size_inches(7.5, 5)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 10)
cb.set_alpha(1)
if show:
pl.show()
|
[
"def",
"embedding_plot",
"(",
"ind",
",",
"shap_values",
",",
"feature_names",
"=",
"None",
",",
"method",
"=",
"\"pca\"",
",",
"alpha",
"=",
"1.0",
",",
"show",
"=",
"True",
")",
":",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"[",
"labels",
"[",
"'FEATURE'",
"]",
"%",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"shap_values",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"ind",
"=",
"convert_name",
"(",
"ind",
",",
"shap_values",
",",
"feature_names",
")",
"if",
"ind",
"==",
"\"sum()\"",
":",
"cvals",
"=",
"shap_values",
".",
"sum",
"(",
"1",
")",
"fname",
"=",
"\"sum(SHAP values)\"",
"else",
":",
"cvals",
"=",
"shap_values",
"[",
":",
",",
"ind",
"]",
"fname",
"=",
"feature_names",
"[",
"ind",
"]",
"# see if we need to compute the embedding",
"if",
"type",
"(",
"method",
")",
"==",
"str",
"and",
"method",
"==",
"\"pca\"",
":",
"pca",
"=",
"sklearn",
".",
"decomposition",
".",
"PCA",
"(",
"2",
")",
"embedding_values",
"=",
"pca",
".",
"fit_transform",
"(",
"shap_values",
")",
"elif",
"hasattr",
"(",
"method",
",",
"\"shape\"",
")",
"and",
"method",
".",
"shape",
"[",
"1",
"]",
"==",
"2",
":",
"embedding_values",
"=",
"method",
"else",
":",
"print",
"(",
"\"Unsupported embedding method:\"",
",",
"method",
")",
"pl",
".",
"scatter",
"(",
"embedding_values",
"[",
":",
",",
"0",
"]",
",",
"embedding_values",
"[",
":",
",",
"1",
"]",
",",
"c",
"=",
"cvals",
",",
"cmap",
"=",
"colors",
".",
"red_blue",
",",
"alpha",
"=",
"alpha",
",",
"linewidth",
"=",
"0",
")",
"pl",
".",
"axis",
"(",
"\"off\"",
")",
"#pl.title(feature_names[ind])",
"cb",
"=",
"pl",
".",
"colorbar",
"(",
")",
"cb",
".",
"set_label",
"(",
"\"SHAP value for\\n\"",
"+",
"fname",
",",
"size",
"=",
"13",
")",
"cb",
".",
"outline",
".",
"set_visible",
"(",
"False",
")",
"pl",
".",
"gcf",
"(",
")",
".",
"set_size_inches",
"(",
"7.5",
",",
"5",
")",
"bbox",
"=",
"cb",
".",
"ax",
".",
"get_window_extent",
"(",
")",
".",
"transformed",
"(",
"pl",
".",
"gcf",
"(",
")",
".",
"dpi_scale_trans",
".",
"inverted",
"(",
")",
")",
"cb",
".",
"ax",
".",
"set_aspect",
"(",
"(",
"bbox",
".",
"height",
"-",
"0.7",
")",
"*",
"10",
")",
"cb",
".",
"set_alpha",
"(",
"1",
")",
"if",
"show",
":",
"pl",
".",
"show",
"(",
")"
] |
Use the SHAP values as an embedding which we project to 2D for visualization.
Parameters
----------
ind : int or string
If this is an int it is the index of the feature to use to color the embedding.
If this is a string it is either the name of the feature, or it can have the
form "rank(int)" to specify the feature with that rank (ordered by mean absolute
SHAP value over all the samples), or "sum()" to mean the sum of all the SHAP values,
which is the model's output (minus it's expected value).
shap_values : numpy.array
Matrix of SHAP values (# samples x # features).
feature_names : None or list
The names of the features in the shap_values array.
method : "pca" or numpy.array
How to reduce the dimensions of the shap_values to 2D. If "pca" then the 2D
PCA projection of shap_values is used. If a numpy array then is should be
(# samples x 2) and represent the embedding of that values.
alpha : float
The transparency of the data points (between 0 and 1). This can be useful to the
show density of the data points when using a large dataset.
|
[
"Use",
"the",
"SHAP",
"values",
"as",
"an",
"embedding",
"which",
"we",
"project",
"to",
"2D",
"for",
"visualization",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/plots/embedding.py#L14-L78
|
train
|
slundberg/shap
|
shap/plots/dependence.py
|
dependence_plot
|
def dependence_plot(ind, shap_values, features, feature_names=None, display_features=None,
interaction_index="auto",
color="#1E88E5", axis_color="#333333", cmap=colors.red_blue,
dot_size=16, x_jitter=0, alpha=1, title=None, xmin=None, xmax=None, show=True):
""" Create a SHAP dependence plot, colored by an interaction feature.
Plots the value of the feature on the x-axis and the SHAP value of the same feature
on the y-axis. This shows how the model depends on the given feature, and is like a
richer extenstion of the classical parital dependence plots. Vertical dispersion of the
data points represents interaction effects. Grey ticks along the y-axis are data
points where the feature's value was NaN.
Parameters
----------
ind : int or string
If this is an int it is the index of the feature to plot. If this is a string it is
either the name of the feature to plot, or it can have the form "rank(int)" to specify
the feature with that rank (ordered by mean absolute SHAP value over all the samples).
shap_values : numpy.array
Matrix of SHAP values (# samples x # features).
features : numpy.array or pandas.DataFrame
Matrix of feature values (# samples x # features).
feature_names : list
Names of the features (length # features).
display_features : numpy.array or pandas.DataFrame
Matrix of feature values for visual display (such as strings instead of coded values).
interaction_index : "auto", None, int, or string
The index of the feature used to color the plot. The name of a feature can also be passed
as a string. If "auto" then shap.common.approximate_interactions is used to pick what
seems to be the strongest interaction (note that to find to true stongest interaction you
need to compute the SHAP interaction values).
x_jitter : float (0 - 1)
Adds random jitter to feature values. May increase plot readability when feature
is discrete.
alpha : float
The transparency of the data points (between 0 and 1). This can be useful to the
show density of the data points when using a large dataset.
xmin : float or string
Represents the lower bound of the plot's x-axis. It can be a string of the format
"percentile(float)" to denote that percentile of the feature's value used on the x-axis.
xmax : float or string
Represents the upper bound of the plot's x-axis. It can be a string of the format
"percentile(float)" to denote that percentile of the feature's value used on the x-axis.
"""
# convert from DataFrames if we got any
if str(type(features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = features.columns
features = features.values
if str(type(display_features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = display_features.columns
display_features = display_features.values
elif display_features is None:
display_features = features
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
# allow vectors to be passed
if len(shap_values.shape) == 1:
shap_values = np.reshape(shap_values, len(shap_values), 1)
if len(features.shape) == 1:
features = np.reshape(features, len(features), 1)
ind = convert_name(ind, shap_values, feature_names)
# plotting SHAP interaction values
if len(shap_values.shape) == 3 and len(ind) == 2:
ind1 = convert_name(ind[0], shap_values, feature_names)
ind2 = convert_name(ind[1], shap_values, feature_names)
if ind1 == ind2:
proj_shap_values = shap_values[:, ind2, :]
else:
proj_shap_values = shap_values[:, ind2, :] * 2 # off-diag values are split in half
# TODO: remove recursion; generally the functions should be shorter for more maintainable code
dependence_plot(
ind1, proj_shap_values, features, feature_names=feature_names,
interaction_index=ind2, display_features=display_features, show=False,
xmin=xmin, xmax=xmax
)
if ind1 == ind2:
pl.ylabel(labels['MAIN_EFFECT'] % feature_names[ind1])
else:
pl.ylabel(labels['INTERACTION_EFFECT'] % (feature_names[ind1], feature_names[ind2]))
if show:
pl.show()
return
assert shap_values.shape[0] == features.shape[0], \
"'shap_values' and 'features' values must have the same number of rows!"
assert shap_values.shape[1] == features.shape[1], \
"'shap_values' must have the same number of columns as 'features'!"
# get both the raw and display feature values
oinds = np.arange(shap_values.shape[0]) # we randomize the ordering so plotting overlaps are not related to data ordering
np.random.shuffle(oinds)
xv = features[oinds, ind].astype(np.float64)
xd = display_features[oinds, ind]
s = shap_values[oinds, ind]
if type(xd[0]) == str:
name_map = {}
for i in range(len(xv)):
name_map[xd[i]] = xv[i]
xnames = list(name_map.keys())
# allow a single feature name to be passed alone
if type(feature_names) == str:
feature_names = [feature_names]
name = feature_names[ind]
# guess what other feature as the stongest interaction with the plotted feature
if interaction_index == "auto":
interaction_index = approximate_interactions(ind, shap_values, features)[0]
interaction_index = convert_name(interaction_index, shap_values, feature_names)
categorical_interaction = False
# get both the raw and display color values
color_norm = None
if interaction_index is not None:
cv = features[:, interaction_index]
cd = display_features[:, interaction_index]
clow = np.nanpercentile(cv.astype(np.float), 5)
chigh = np.nanpercentile(cv.astype(np.float), 95)
if type(cd[0]) == str:
cname_map = {}
for i in range(len(cv)):
cname_map[cd[i]] = cv[i]
cnames = list(cname_map.keys())
categorical_interaction = True
elif clow % 1 == 0 and chigh % 1 == 0 and chigh - clow < 10:
categorical_interaction = True
# discritize colors for categorical features
if categorical_interaction and clow != chigh:
clow = np.nanmin(cv.astype(np.float))
chigh = np.nanmax(cv.astype(np.float))
bounds = np.linspace(clow, chigh, int(chigh - clow + 2))
color_norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N-1)
# optionally add jitter to feature values
if x_jitter > 0:
if x_jitter > 1: x_jitter = 1
xvals = xv.copy()
if isinstance(xvals[0], float):
xvals = xvals.astype(np.float)
xvals = xvals[~np.isnan(xvals)]
xvals = np.unique(xvals)
if len(xvals) >= 2:
smallest_diff = np.min(np.diff(np.sort(xvals)))
jitter_amount = x_jitter * smallest_diff
xv += (np.random.ranf(size = len(xv))*jitter_amount) - (jitter_amount/2)
# the actual scatter plot, TODO: adapt the dot_size to the number of data points?
xv_nan = np.isnan(xv)
xv_notnan = np.invert(xv_nan)
if interaction_index is not None:
# plot the nan values in the interaction feature as grey
cvals = features[oinds, interaction_index].astype(np.float64)
cvals_imp = cvals.copy()
cvals_imp[np.isnan(cvals)] = (clow + chigh) / 2.0
cvals[cvals_imp > chigh] = chigh
cvals[cvals_imp < clow] = clow
p = pl.scatter(
xv[xv_notnan], s[xv_notnan], s=dot_size, linewidth=0, c=cvals[xv_notnan],
cmap=cmap, alpha=alpha, vmin=clow, vmax=chigh,
norm=color_norm, rasterized=len(xv) > 500
)
p.set_array(cvals[xv_notnan])
else:
pl.scatter(xv, s, s=dot_size, linewidth=0, color=color,
alpha=alpha, rasterized=len(xv) > 500)
if interaction_index != ind and interaction_index is not None:
# draw the color bar
if type(cd[0]) == str:
tick_positions = [cname_map[n] for n in cnames]
if len(tick_positions) == 2:
tick_positions[0] -= 0.25
tick_positions[1] += 0.25
cb = pl.colorbar(ticks=tick_positions)
cb.set_ticklabels(cnames)
else:
cb = pl.colorbar()
cb.set_label(feature_names[interaction_index], size=13)
cb.ax.tick_params(labelsize=11)
if categorical_interaction:
cb.ax.tick_params(length=0)
cb.set_alpha(1)
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 20)
# handles any setting of xmax and xmin
# note that we handle None,float, or "percentile(float)" formats
if xmin is not None or xmax is not None:
if type(xmin) == str and xmin.startswith("percentile"):
xmin = np.nanpercentile(xv, float(xmin[11:-1]))
if type(xmax) == str and xmax.startswith("percentile"):
xmax = np.nanpercentile(xv, float(xmax[11:-1]))
if xmin is None or xmin == np.nanmin(xv):
xmin = np.nanmin(xv) - (xmax - np.nanmin(xv))/20
if xmax is None or xmax == np.nanmax(xv):
xmax = np.nanmax(xv) + (np.nanmax(xv) - xmin)/20
pl.xlim(xmin, xmax)
# plot any nan feature values as tick marks along the y-axis
xlim = pl.xlim()
if interaction_index is not None:
p = pl.scatter(
xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,
linewidth=2, c=cvals_imp[xv_nan], cmap=cmap, alpha=alpha,
vmin=clow, vmax=chigh
)
p.set_array(cvals[xv_nan])
else:
pl.scatter(
xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,
linewidth=2, color=color, alpha=alpha
)
pl.xlim(*xlim)
# make the plot more readable
if interaction_index != ind:
pl.gcf().set_size_inches(7.5, 5)
else:
pl.gcf().set_size_inches(6, 5)
pl.xlabel(name, color=axis_color, fontsize=13)
pl.ylabel(labels['VALUE_FOR'] % name, color=axis_color, fontsize=13)
if title is not None:
pl.title(title, color=axis_color, fontsize=13)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('left')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
pl.gca().tick_params(color=axis_color, labelcolor=axis_color, labelsize=11)
for spine in pl.gca().spines.values():
spine.set_edgecolor(axis_color)
if type(xd[0]) == str:
pl.xticks([name_map[n] for n in xnames], xnames, rotation='vertical', fontsize=11)
if show:
with warnings.catch_warnings(): # ignore expected matplotlib warnings
warnings.simplefilter("ignore", RuntimeWarning)
pl.show()
|
python
|
def dependence_plot(ind, shap_values, features, feature_names=None, display_features=None,
interaction_index="auto",
color="#1E88E5", axis_color="#333333", cmap=colors.red_blue,
dot_size=16, x_jitter=0, alpha=1, title=None, xmin=None, xmax=None, show=True):
""" Create a SHAP dependence plot, colored by an interaction feature.
Plots the value of the feature on the x-axis and the SHAP value of the same feature
on the y-axis. This shows how the model depends on the given feature, and is like a
richer extenstion of the classical parital dependence plots. Vertical dispersion of the
data points represents interaction effects. Grey ticks along the y-axis are data
points where the feature's value was NaN.
Parameters
----------
ind : int or string
If this is an int it is the index of the feature to plot. If this is a string it is
either the name of the feature to plot, or it can have the form "rank(int)" to specify
the feature with that rank (ordered by mean absolute SHAP value over all the samples).
shap_values : numpy.array
Matrix of SHAP values (# samples x # features).
features : numpy.array or pandas.DataFrame
Matrix of feature values (# samples x # features).
feature_names : list
Names of the features (length # features).
display_features : numpy.array or pandas.DataFrame
Matrix of feature values for visual display (such as strings instead of coded values).
interaction_index : "auto", None, int, or string
The index of the feature used to color the plot. The name of a feature can also be passed
as a string. If "auto" then shap.common.approximate_interactions is used to pick what
seems to be the strongest interaction (note that to find to true stongest interaction you
need to compute the SHAP interaction values).
x_jitter : float (0 - 1)
Adds random jitter to feature values. May increase plot readability when feature
is discrete.
alpha : float
The transparency of the data points (between 0 and 1). This can be useful to the
show density of the data points when using a large dataset.
xmin : float or string
Represents the lower bound of the plot's x-axis. It can be a string of the format
"percentile(float)" to denote that percentile of the feature's value used on the x-axis.
xmax : float or string
Represents the upper bound of the plot's x-axis. It can be a string of the format
"percentile(float)" to denote that percentile of the feature's value used on the x-axis.
"""
# convert from DataFrames if we got any
if str(type(features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = features.columns
features = features.values
if str(type(display_features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = display_features.columns
display_features = display_features.values
elif display_features is None:
display_features = features
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
# allow vectors to be passed
if len(shap_values.shape) == 1:
shap_values = np.reshape(shap_values, len(shap_values), 1)
if len(features.shape) == 1:
features = np.reshape(features, len(features), 1)
ind = convert_name(ind, shap_values, feature_names)
# plotting SHAP interaction values
if len(shap_values.shape) == 3 and len(ind) == 2:
ind1 = convert_name(ind[0], shap_values, feature_names)
ind2 = convert_name(ind[1], shap_values, feature_names)
if ind1 == ind2:
proj_shap_values = shap_values[:, ind2, :]
else:
proj_shap_values = shap_values[:, ind2, :] * 2 # off-diag values are split in half
# TODO: remove recursion; generally the functions should be shorter for more maintainable code
dependence_plot(
ind1, proj_shap_values, features, feature_names=feature_names,
interaction_index=ind2, display_features=display_features, show=False,
xmin=xmin, xmax=xmax
)
if ind1 == ind2:
pl.ylabel(labels['MAIN_EFFECT'] % feature_names[ind1])
else:
pl.ylabel(labels['INTERACTION_EFFECT'] % (feature_names[ind1], feature_names[ind2]))
if show:
pl.show()
return
assert shap_values.shape[0] == features.shape[0], \
"'shap_values' and 'features' values must have the same number of rows!"
assert shap_values.shape[1] == features.shape[1], \
"'shap_values' must have the same number of columns as 'features'!"
# get both the raw and display feature values
oinds = np.arange(shap_values.shape[0]) # we randomize the ordering so plotting overlaps are not related to data ordering
np.random.shuffle(oinds)
xv = features[oinds, ind].astype(np.float64)
xd = display_features[oinds, ind]
s = shap_values[oinds, ind]
if type(xd[0]) == str:
name_map = {}
for i in range(len(xv)):
name_map[xd[i]] = xv[i]
xnames = list(name_map.keys())
# allow a single feature name to be passed alone
if type(feature_names) == str:
feature_names = [feature_names]
name = feature_names[ind]
# guess what other feature as the stongest interaction with the plotted feature
if interaction_index == "auto":
interaction_index = approximate_interactions(ind, shap_values, features)[0]
interaction_index = convert_name(interaction_index, shap_values, feature_names)
categorical_interaction = False
# get both the raw and display color values
color_norm = None
if interaction_index is not None:
cv = features[:, interaction_index]
cd = display_features[:, interaction_index]
clow = np.nanpercentile(cv.astype(np.float), 5)
chigh = np.nanpercentile(cv.astype(np.float), 95)
if type(cd[0]) == str:
cname_map = {}
for i in range(len(cv)):
cname_map[cd[i]] = cv[i]
cnames = list(cname_map.keys())
categorical_interaction = True
elif clow % 1 == 0 and chigh % 1 == 0 and chigh - clow < 10:
categorical_interaction = True
# discritize colors for categorical features
if categorical_interaction and clow != chigh:
clow = np.nanmin(cv.astype(np.float))
chigh = np.nanmax(cv.astype(np.float))
bounds = np.linspace(clow, chigh, int(chigh - clow + 2))
color_norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N-1)
# optionally add jitter to feature values
if x_jitter > 0:
if x_jitter > 1: x_jitter = 1
xvals = xv.copy()
if isinstance(xvals[0], float):
xvals = xvals.astype(np.float)
xvals = xvals[~np.isnan(xvals)]
xvals = np.unique(xvals)
if len(xvals) >= 2:
smallest_diff = np.min(np.diff(np.sort(xvals)))
jitter_amount = x_jitter * smallest_diff
xv += (np.random.ranf(size = len(xv))*jitter_amount) - (jitter_amount/2)
# the actual scatter plot, TODO: adapt the dot_size to the number of data points?
xv_nan = np.isnan(xv)
xv_notnan = np.invert(xv_nan)
if interaction_index is not None:
# plot the nan values in the interaction feature as grey
cvals = features[oinds, interaction_index].astype(np.float64)
cvals_imp = cvals.copy()
cvals_imp[np.isnan(cvals)] = (clow + chigh) / 2.0
cvals[cvals_imp > chigh] = chigh
cvals[cvals_imp < clow] = clow
p = pl.scatter(
xv[xv_notnan], s[xv_notnan], s=dot_size, linewidth=0, c=cvals[xv_notnan],
cmap=cmap, alpha=alpha, vmin=clow, vmax=chigh,
norm=color_norm, rasterized=len(xv) > 500
)
p.set_array(cvals[xv_notnan])
else:
pl.scatter(xv, s, s=dot_size, linewidth=0, color=color,
alpha=alpha, rasterized=len(xv) > 500)
if interaction_index != ind and interaction_index is not None:
# draw the color bar
if type(cd[0]) == str:
tick_positions = [cname_map[n] for n in cnames]
if len(tick_positions) == 2:
tick_positions[0] -= 0.25
tick_positions[1] += 0.25
cb = pl.colorbar(ticks=tick_positions)
cb.set_ticklabels(cnames)
else:
cb = pl.colorbar()
cb.set_label(feature_names[interaction_index], size=13)
cb.ax.tick_params(labelsize=11)
if categorical_interaction:
cb.ax.tick_params(length=0)
cb.set_alpha(1)
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 20)
# handles any setting of xmax and xmin
# note that we handle None,float, or "percentile(float)" formats
if xmin is not None or xmax is not None:
if type(xmin) == str and xmin.startswith("percentile"):
xmin = np.nanpercentile(xv, float(xmin[11:-1]))
if type(xmax) == str and xmax.startswith("percentile"):
xmax = np.nanpercentile(xv, float(xmax[11:-1]))
if xmin is None or xmin == np.nanmin(xv):
xmin = np.nanmin(xv) - (xmax - np.nanmin(xv))/20
if xmax is None or xmax == np.nanmax(xv):
xmax = np.nanmax(xv) + (np.nanmax(xv) - xmin)/20
pl.xlim(xmin, xmax)
# plot any nan feature values as tick marks along the y-axis
xlim = pl.xlim()
if interaction_index is not None:
p = pl.scatter(
xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,
linewidth=2, c=cvals_imp[xv_nan], cmap=cmap, alpha=alpha,
vmin=clow, vmax=chigh
)
p.set_array(cvals[xv_nan])
else:
pl.scatter(
xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,
linewidth=2, color=color, alpha=alpha
)
pl.xlim(*xlim)
# make the plot more readable
if interaction_index != ind:
pl.gcf().set_size_inches(7.5, 5)
else:
pl.gcf().set_size_inches(6, 5)
pl.xlabel(name, color=axis_color, fontsize=13)
pl.ylabel(labels['VALUE_FOR'] % name, color=axis_color, fontsize=13)
if title is not None:
pl.title(title, color=axis_color, fontsize=13)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('left')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
pl.gca().tick_params(color=axis_color, labelcolor=axis_color, labelsize=11)
for spine in pl.gca().spines.values():
spine.set_edgecolor(axis_color)
if type(xd[0]) == str:
pl.xticks([name_map[n] for n in xnames], xnames, rotation='vertical', fontsize=11)
if show:
with warnings.catch_warnings(): # ignore expected matplotlib warnings
warnings.simplefilter("ignore", RuntimeWarning)
pl.show()
|
[
"def",
"dependence_plot",
"(",
"ind",
",",
"shap_values",
",",
"features",
",",
"feature_names",
"=",
"None",
",",
"display_features",
"=",
"None",
",",
"interaction_index",
"=",
"\"auto\"",
",",
"color",
"=",
"\"#1E88E5\"",
",",
"axis_color",
"=",
"\"#333333\"",
",",
"cmap",
"=",
"colors",
".",
"red_blue",
",",
"dot_size",
"=",
"16",
",",
"x_jitter",
"=",
"0",
",",
"alpha",
"=",
"1",
",",
"title",
"=",
"None",
",",
"xmin",
"=",
"None",
",",
"xmax",
"=",
"None",
",",
"show",
"=",
"True",
")",
":",
"# convert from DataFrames if we got any",
"if",
"str",
"(",
"type",
"(",
"features",
")",
")",
".",
"endswith",
"(",
"\"'pandas.core.frame.DataFrame'>\"",
")",
":",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"features",
".",
"columns",
"features",
"=",
"features",
".",
"values",
"if",
"str",
"(",
"type",
"(",
"display_features",
")",
")",
".",
"endswith",
"(",
"\"'pandas.core.frame.DataFrame'>\"",
")",
":",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"display_features",
".",
"columns",
"display_features",
"=",
"display_features",
".",
"values",
"elif",
"display_features",
"is",
"None",
":",
"display_features",
"=",
"features",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"[",
"labels",
"[",
"'FEATURE'",
"]",
"%",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"shap_values",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"# allow vectors to be passed",
"if",
"len",
"(",
"shap_values",
".",
"shape",
")",
"==",
"1",
":",
"shap_values",
"=",
"np",
".",
"reshape",
"(",
"shap_values",
",",
"len",
"(",
"shap_values",
")",
",",
"1",
")",
"if",
"len",
"(",
"features",
".",
"shape",
")",
"==",
"1",
":",
"features",
"=",
"np",
".",
"reshape",
"(",
"features",
",",
"len",
"(",
"features",
")",
",",
"1",
")",
"ind",
"=",
"convert_name",
"(",
"ind",
",",
"shap_values",
",",
"feature_names",
")",
"# plotting SHAP interaction values",
"if",
"len",
"(",
"shap_values",
".",
"shape",
")",
"==",
"3",
"and",
"len",
"(",
"ind",
")",
"==",
"2",
":",
"ind1",
"=",
"convert_name",
"(",
"ind",
"[",
"0",
"]",
",",
"shap_values",
",",
"feature_names",
")",
"ind2",
"=",
"convert_name",
"(",
"ind",
"[",
"1",
"]",
",",
"shap_values",
",",
"feature_names",
")",
"if",
"ind1",
"==",
"ind2",
":",
"proj_shap_values",
"=",
"shap_values",
"[",
":",
",",
"ind2",
",",
":",
"]",
"else",
":",
"proj_shap_values",
"=",
"shap_values",
"[",
":",
",",
"ind2",
",",
":",
"]",
"*",
"2",
"# off-diag values are split in half",
"# TODO: remove recursion; generally the functions should be shorter for more maintainable code",
"dependence_plot",
"(",
"ind1",
",",
"proj_shap_values",
",",
"features",
",",
"feature_names",
"=",
"feature_names",
",",
"interaction_index",
"=",
"ind2",
",",
"display_features",
"=",
"display_features",
",",
"show",
"=",
"False",
",",
"xmin",
"=",
"xmin",
",",
"xmax",
"=",
"xmax",
")",
"if",
"ind1",
"==",
"ind2",
":",
"pl",
".",
"ylabel",
"(",
"labels",
"[",
"'MAIN_EFFECT'",
"]",
"%",
"feature_names",
"[",
"ind1",
"]",
")",
"else",
":",
"pl",
".",
"ylabel",
"(",
"labels",
"[",
"'INTERACTION_EFFECT'",
"]",
"%",
"(",
"feature_names",
"[",
"ind1",
"]",
",",
"feature_names",
"[",
"ind2",
"]",
")",
")",
"if",
"show",
":",
"pl",
".",
"show",
"(",
")",
"return",
"assert",
"shap_values",
".",
"shape",
"[",
"0",
"]",
"==",
"features",
".",
"shape",
"[",
"0",
"]",
",",
"\"'shap_values' and 'features' values must have the same number of rows!\"",
"assert",
"shap_values",
".",
"shape",
"[",
"1",
"]",
"==",
"features",
".",
"shape",
"[",
"1",
"]",
",",
"\"'shap_values' must have the same number of columns as 'features'!\"",
"# get both the raw and display feature values",
"oinds",
"=",
"np",
".",
"arange",
"(",
"shap_values",
".",
"shape",
"[",
"0",
"]",
")",
"# we randomize the ordering so plotting overlaps are not related to data ordering",
"np",
".",
"random",
".",
"shuffle",
"(",
"oinds",
")",
"xv",
"=",
"features",
"[",
"oinds",
",",
"ind",
"]",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"xd",
"=",
"display_features",
"[",
"oinds",
",",
"ind",
"]",
"s",
"=",
"shap_values",
"[",
"oinds",
",",
"ind",
"]",
"if",
"type",
"(",
"xd",
"[",
"0",
"]",
")",
"==",
"str",
":",
"name_map",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"xv",
")",
")",
":",
"name_map",
"[",
"xd",
"[",
"i",
"]",
"]",
"=",
"xv",
"[",
"i",
"]",
"xnames",
"=",
"list",
"(",
"name_map",
".",
"keys",
"(",
")",
")",
"# allow a single feature name to be passed alone",
"if",
"type",
"(",
"feature_names",
")",
"==",
"str",
":",
"feature_names",
"=",
"[",
"feature_names",
"]",
"name",
"=",
"feature_names",
"[",
"ind",
"]",
"# guess what other feature as the stongest interaction with the plotted feature",
"if",
"interaction_index",
"==",
"\"auto\"",
":",
"interaction_index",
"=",
"approximate_interactions",
"(",
"ind",
",",
"shap_values",
",",
"features",
")",
"[",
"0",
"]",
"interaction_index",
"=",
"convert_name",
"(",
"interaction_index",
",",
"shap_values",
",",
"feature_names",
")",
"categorical_interaction",
"=",
"False",
"# get both the raw and display color values",
"color_norm",
"=",
"None",
"if",
"interaction_index",
"is",
"not",
"None",
":",
"cv",
"=",
"features",
"[",
":",
",",
"interaction_index",
"]",
"cd",
"=",
"display_features",
"[",
":",
",",
"interaction_index",
"]",
"clow",
"=",
"np",
".",
"nanpercentile",
"(",
"cv",
".",
"astype",
"(",
"np",
".",
"float",
")",
",",
"5",
")",
"chigh",
"=",
"np",
".",
"nanpercentile",
"(",
"cv",
".",
"astype",
"(",
"np",
".",
"float",
")",
",",
"95",
")",
"if",
"type",
"(",
"cd",
"[",
"0",
"]",
")",
"==",
"str",
":",
"cname_map",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"cv",
")",
")",
":",
"cname_map",
"[",
"cd",
"[",
"i",
"]",
"]",
"=",
"cv",
"[",
"i",
"]",
"cnames",
"=",
"list",
"(",
"cname_map",
".",
"keys",
"(",
")",
")",
"categorical_interaction",
"=",
"True",
"elif",
"clow",
"%",
"1",
"==",
"0",
"and",
"chigh",
"%",
"1",
"==",
"0",
"and",
"chigh",
"-",
"clow",
"<",
"10",
":",
"categorical_interaction",
"=",
"True",
"# discritize colors for categorical features",
"if",
"categorical_interaction",
"and",
"clow",
"!=",
"chigh",
":",
"clow",
"=",
"np",
".",
"nanmin",
"(",
"cv",
".",
"astype",
"(",
"np",
".",
"float",
")",
")",
"chigh",
"=",
"np",
".",
"nanmax",
"(",
"cv",
".",
"astype",
"(",
"np",
".",
"float",
")",
")",
"bounds",
"=",
"np",
".",
"linspace",
"(",
"clow",
",",
"chigh",
",",
"int",
"(",
"chigh",
"-",
"clow",
"+",
"2",
")",
")",
"color_norm",
"=",
"matplotlib",
".",
"colors",
".",
"BoundaryNorm",
"(",
"bounds",
",",
"cmap",
".",
"N",
"-",
"1",
")",
"# optionally add jitter to feature values",
"if",
"x_jitter",
">",
"0",
":",
"if",
"x_jitter",
">",
"1",
":",
"x_jitter",
"=",
"1",
"xvals",
"=",
"xv",
".",
"copy",
"(",
")",
"if",
"isinstance",
"(",
"xvals",
"[",
"0",
"]",
",",
"float",
")",
":",
"xvals",
"=",
"xvals",
".",
"astype",
"(",
"np",
".",
"float",
")",
"xvals",
"=",
"xvals",
"[",
"~",
"np",
".",
"isnan",
"(",
"xvals",
")",
"]",
"xvals",
"=",
"np",
".",
"unique",
"(",
"xvals",
")",
"if",
"len",
"(",
"xvals",
")",
">=",
"2",
":",
"smallest_diff",
"=",
"np",
".",
"min",
"(",
"np",
".",
"diff",
"(",
"np",
".",
"sort",
"(",
"xvals",
")",
")",
")",
"jitter_amount",
"=",
"x_jitter",
"*",
"smallest_diff",
"xv",
"+=",
"(",
"np",
".",
"random",
".",
"ranf",
"(",
"size",
"=",
"len",
"(",
"xv",
")",
")",
"*",
"jitter_amount",
")",
"-",
"(",
"jitter_amount",
"/",
"2",
")",
"# the actual scatter plot, TODO: adapt the dot_size to the number of data points?",
"xv_nan",
"=",
"np",
".",
"isnan",
"(",
"xv",
")",
"xv_notnan",
"=",
"np",
".",
"invert",
"(",
"xv_nan",
")",
"if",
"interaction_index",
"is",
"not",
"None",
":",
"# plot the nan values in the interaction feature as grey",
"cvals",
"=",
"features",
"[",
"oinds",
",",
"interaction_index",
"]",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"cvals_imp",
"=",
"cvals",
".",
"copy",
"(",
")",
"cvals_imp",
"[",
"np",
".",
"isnan",
"(",
"cvals",
")",
"]",
"=",
"(",
"clow",
"+",
"chigh",
")",
"/",
"2.0",
"cvals",
"[",
"cvals_imp",
">",
"chigh",
"]",
"=",
"chigh",
"cvals",
"[",
"cvals_imp",
"<",
"clow",
"]",
"=",
"clow",
"p",
"=",
"pl",
".",
"scatter",
"(",
"xv",
"[",
"xv_notnan",
"]",
",",
"s",
"[",
"xv_notnan",
"]",
",",
"s",
"=",
"dot_size",
",",
"linewidth",
"=",
"0",
",",
"c",
"=",
"cvals",
"[",
"xv_notnan",
"]",
",",
"cmap",
"=",
"cmap",
",",
"alpha",
"=",
"alpha",
",",
"vmin",
"=",
"clow",
",",
"vmax",
"=",
"chigh",
",",
"norm",
"=",
"color_norm",
",",
"rasterized",
"=",
"len",
"(",
"xv",
")",
">",
"500",
")",
"p",
".",
"set_array",
"(",
"cvals",
"[",
"xv_notnan",
"]",
")",
"else",
":",
"pl",
".",
"scatter",
"(",
"xv",
",",
"s",
",",
"s",
"=",
"dot_size",
",",
"linewidth",
"=",
"0",
",",
"color",
"=",
"color",
",",
"alpha",
"=",
"alpha",
",",
"rasterized",
"=",
"len",
"(",
"xv",
")",
">",
"500",
")",
"if",
"interaction_index",
"!=",
"ind",
"and",
"interaction_index",
"is",
"not",
"None",
":",
"# draw the color bar",
"if",
"type",
"(",
"cd",
"[",
"0",
"]",
")",
"==",
"str",
":",
"tick_positions",
"=",
"[",
"cname_map",
"[",
"n",
"]",
"for",
"n",
"in",
"cnames",
"]",
"if",
"len",
"(",
"tick_positions",
")",
"==",
"2",
":",
"tick_positions",
"[",
"0",
"]",
"-=",
"0.25",
"tick_positions",
"[",
"1",
"]",
"+=",
"0.25",
"cb",
"=",
"pl",
".",
"colorbar",
"(",
"ticks",
"=",
"tick_positions",
")",
"cb",
".",
"set_ticklabels",
"(",
"cnames",
")",
"else",
":",
"cb",
"=",
"pl",
".",
"colorbar",
"(",
")",
"cb",
".",
"set_label",
"(",
"feature_names",
"[",
"interaction_index",
"]",
",",
"size",
"=",
"13",
")",
"cb",
".",
"ax",
".",
"tick_params",
"(",
"labelsize",
"=",
"11",
")",
"if",
"categorical_interaction",
":",
"cb",
".",
"ax",
".",
"tick_params",
"(",
"length",
"=",
"0",
")",
"cb",
".",
"set_alpha",
"(",
"1",
")",
"cb",
".",
"outline",
".",
"set_visible",
"(",
"False",
")",
"bbox",
"=",
"cb",
".",
"ax",
".",
"get_window_extent",
"(",
")",
".",
"transformed",
"(",
"pl",
".",
"gcf",
"(",
")",
".",
"dpi_scale_trans",
".",
"inverted",
"(",
")",
")",
"cb",
".",
"ax",
".",
"set_aspect",
"(",
"(",
"bbox",
".",
"height",
"-",
"0.7",
")",
"*",
"20",
")",
"# handles any setting of xmax and xmin",
"# note that we handle None,float, or \"percentile(float)\" formats",
"if",
"xmin",
"is",
"not",
"None",
"or",
"xmax",
"is",
"not",
"None",
":",
"if",
"type",
"(",
"xmin",
")",
"==",
"str",
"and",
"xmin",
".",
"startswith",
"(",
"\"percentile\"",
")",
":",
"xmin",
"=",
"np",
".",
"nanpercentile",
"(",
"xv",
",",
"float",
"(",
"xmin",
"[",
"11",
":",
"-",
"1",
"]",
")",
")",
"if",
"type",
"(",
"xmax",
")",
"==",
"str",
"and",
"xmax",
".",
"startswith",
"(",
"\"percentile\"",
")",
":",
"xmax",
"=",
"np",
".",
"nanpercentile",
"(",
"xv",
",",
"float",
"(",
"xmax",
"[",
"11",
":",
"-",
"1",
"]",
")",
")",
"if",
"xmin",
"is",
"None",
"or",
"xmin",
"==",
"np",
".",
"nanmin",
"(",
"xv",
")",
":",
"xmin",
"=",
"np",
".",
"nanmin",
"(",
"xv",
")",
"-",
"(",
"xmax",
"-",
"np",
".",
"nanmin",
"(",
"xv",
")",
")",
"/",
"20",
"if",
"xmax",
"is",
"None",
"or",
"xmax",
"==",
"np",
".",
"nanmax",
"(",
"xv",
")",
":",
"xmax",
"=",
"np",
".",
"nanmax",
"(",
"xv",
")",
"+",
"(",
"np",
".",
"nanmax",
"(",
"xv",
")",
"-",
"xmin",
")",
"/",
"20",
"pl",
".",
"xlim",
"(",
"xmin",
",",
"xmax",
")",
"# plot any nan feature values as tick marks along the y-axis",
"xlim",
"=",
"pl",
".",
"xlim",
"(",
")",
"if",
"interaction_index",
"is",
"not",
"None",
":",
"p",
"=",
"pl",
".",
"scatter",
"(",
"xlim",
"[",
"0",
"]",
"*",
"np",
".",
"ones",
"(",
"xv_nan",
".",
"sum",
"(",
")",
")",
",",
"s",
"[",
"xv_nan",
"]",
",",
"marker",
"=",
"1",
",",
"linewidth",
"=",
"2",
",",
"c",
"=",
"cvals_imp",
"[",
"xv_nan",
"]",
",",
"cmap",
"=",
"cmap",
",",
"alpha",
"=",
"alpha",
",",
"vmin",
"=",
"clow",
",",
"vmax",
"=",
"chigh",
")",
"p",
".",
"set_array",
"(",
"cvals",
"[",
"xv_nan",
"]",
")",
"else",
":",
"pl",
".",
"scatter",
"(",
"xlim",
"[",
"0",
"]",
"*",
"np",
".",
"ones",
"(",
"xv_nan",
".",
"sum",
"(",
")",
")",
",",
"s",
"[",
"xv_nan",
"]",
",",
"marker",
"=",
"1",
",",
"linewidth",
"=",
"2",
",",
"color",
"=",
"color",
",",
"alpha",
"=",
"alpha",
")",
"pl",
".",
"xlim",
"(",
"*",
"xlim",
")",
"# make the plot more readable",
"if",
"interaction_index",
"!=",
"ind",
":",
"pl",
".",
"gcf",
"(",
")",
".",
"set_size_inches",
"(",
"7.5",
",",
"5",
")",
"else",
":",
"pl",
".",
"gcf",
"(",
")",
".",
"set_size_inches",
"(",
"6",
",",
"5",
")",
"pl",
".",
"xlabel",
"(",
"name",
",",
"color",
"=",
"axis_color",
",",
"fontsize",
"=",
"13",
")",
"pl",
".",
"ylabel",
"(",
"labels",
"[",
"'VALUE_FOR'",
"]",
"%",
"name",
",",
"color",
"=",
"axis_color",
",",
"fontsize",
"=",
"13",
")",
"if",
"title",
"is",
"not",
"None",
":",
"pl",
".",
"title",
"(",
"title",
",",
"color",
"=",
"axis_color",
",",
"fontsize",
"=",
"13",
")",
"pl",
".",
"gca",
"(",
")",
".",
"xaxis",
".",
"set_ticks_position",
"(",
"'bottom'",
")",
"pl",
".",
"gca",
"(",
")",
".",
"yaxis",
".",
"set_ticks_position",
"(",
"'left'",
")",
"pl",
".",
"gca",
"(",
")",
".",
"spines",
"[",
"'right'",
"]",
".",
"set_visible",
"(",
"False",
")",
"pl",
".",
"gca",
"(",
")",
".",
"spines",
"[",
"'top'",
"]",
".",
"set_visible",
"(",
"False",
")",
"pl",
".",
"gca",
"(",
")",
".",
"tick_params",
"(",
"color",
"=",
"axis_color",
",",
"labelcolor",
"=",
"axis_color",
",",
"labelsize",
"=",
"11",
")",
"for",
"spine",
"in",
"pl",
".",
"gca",
"(",
")",
".",
"spines",
".",
"values",
"(",
")",
":",
"spine",
".",
"set_edgecolor",
"(",
"axis_color",
")",
"if",
"type",
"(",
"xd",
"[",
"0",
"]",
")",
"==",
"str",
":",
"pl",
".",
"xticks",
"(",
"[",
"name_map",
"[",
"n",
"]",
"for",
"n",
"in",
"xnames",
"]",
",",
"xnames",
",",
"rotation",
"=",
"'vertical'",
",",
"fontsize",
"=",
"11",
")",
"if",
"show",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"# ignore expected matplotlib warnings",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
",",
"RuntimeWarning",
")",
"pl",
".",
"show",
"(",
")"
] |
Create a SHAP dependence plot, colored by an interaction feature.
Plots the value of the feature on the x-axis and the SHAP value of the same feature
on the y-axis. This shows how the model depends on the given feature, and is like a
richer extenstion of the classical parital dependence plots. Vertical dispersion of the
data points represents interaction effects. Grey ticks along the y-axis are data
points where the feature's value was NaN.
Parameters
----------
ind : int or string
If this is an int it is the index of the feature to plot. If this is a string it is
either the name of the feature to plot, or it can have the form "rank(int)" to specify
the feature with that rank (ordered by mean absolute SHAP value over all the samples).
shap_values : numpy.array
Matrix of SHAP values (# samples x # features).
features : numpy.array or pandas.DataFrame
Matrix of feature values (# samples x # features).
feature_names : list
Names of the features (length # features).
display_features : numpy.array or pandas.DataFrame
Matrix of feature values for visual display (such as strings instead of coded values).
interaction_index : "auto", None, int, or string
The index of the feature used to color the plot. The name of a feature can also be passed
as a string. If "auto" then shap.common.approximate_interactions is used to pick what
seems to be the strongest interaction (note that to find to true stongest interaction you
need to compute the SHAP interaction values).
x_jitter : float (0 - 1)
Adds random jitter to feature values. May increase plot readability when feature
is discrete.
alpha : float
The transparency of the data points (between 0 and 1). This can be useful to the
show density of the data points when using a large dataset.
xmin : float or string
Represents the lower bound of the plot's x-axis. It can be a string of the format
"percentile(float)" to denote that percentile of the feature's value used on the x-axis.
xmax : float or string
Represents the upper bound of the plot's x-axis. It can be a string of the format
"percentile(float)" to denote that percentile of the feature's value used on the x-axis.
|
[
"Create",
"a",
"SHAP",
"dependence",
"plot",
"colored",
"by",
"an",
"interaction",
"feature",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/plots/dependence.py#L15-L275
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
runtime
|
def runtime(X, y, model_generator, method_name):
""" Runtime
transform = "negate"
sort_order = 1
"""
old_seed = np.random.seed()
np.random.seed(3293)
# average the method scores over several train/test splits
method_reps = []
for i in range(1):
X_train, X_test, y_train, _ = train_test_split(__toarray(X), y, test_size=100, random_state=i)
# define the model we are going to explain
model = model_generator()
model.fit(X_train, y_train)
# evaluate each method
start = time.time()
explainer = getattr(methods, method_name)(model, X_train)
build_time = time.time() - start
start = time.time()
explainer(X_test)
explain_time = time.time() - start
# we always normalize the explain time as though we were explaining 1000 samples
# even if to reduce the runtime of the benchmark we do less (like just 100)
method_reps.append(build_time + explain_time * 1000.0 / X_test.shape[0])
np.random.seed(old_seed)
return None, np.mean(method_reps)
|
python
|
def runtime(X, y, model_generator, method_name):
""" Runtime
transform = "negate"
sort_order = 1
"""
old_seed = np.random.seed()
np.random.seed(3293)
# average the method scores over several train/test splits
method_reps = []
for i in range(1):
X_train, X_test, y_train, _ = train_test_split(__toarray(X), y, test_size=100, random_state=i)
# define the model we are going to explain
model = model_generator()
model.fit(X_train, y_train)
# evaluate each method
start = time.time()
explainer = getattr(methods, method_name)(model, X_train)
build_time = time.time() - start
start = time.time()
explainer(X_test)
explain_time = time.time() - start
# we always normalize the explain time as though we were explaining 1000 samples
# even if to reduce the runtime of the benchmark we do less (like just 100)
method_reps.append(build_time + explain_time * 1000.0 / X_test.shape[0])
np.random.seed(old_seed)
return None, np.mean(method_reps)
|
[
"def",
"runtime",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
")",
":",
"old_seed",
"=",
"np",
".",
"random",
".",
"seed",
"(",
")",
"np",
".",
"random",
".",
"seed",
"(",
"3293",
")",
"# average the method scores over several train/test splits",
"method_reps",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
")",
":",
"X_train",
",",
"X_test",
",",
"y_train",
",",
"_",
"=",
"train_test_split",
"(",
"__toarray",
"(",
"X",
")",
",",
"y",
",",
"test_size",
"=",
"100",
",",
"random_state",
"=",
"i",
")",
"# define the model we are going to explain",
"model",
"=",
"model_generator",
"(",
")",
"model",
".",
"fit",
"(",
"X_train",
",",
"y_train",
")",
"# evaluate each method",
"start",
"=",
"time",
".",
"time",
"(",
")",
"explainer",
"=",
"getattr",
"(",
"methods",
",",
"method_name",
")",
"(",
"model",
",",
"X_train",
")",
"build_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start",
"start",
"=",
"time",
".",
"time",
"(",
")",
"explainer",
"(",
"X_test",
")",
"explain_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start",
"# we always normalize the explain time as though we were explaining 1000 samples",
"# even if to reduce the runtime of the benchmark we do less (like just 100)",
"method_reps",
".",
"append",
"(",
"build_time",
"+",
"explain_time",
"*",
"1000.0",
"/",
"X_test",
".",
"shape",
"[",
"0",
"]",
")",
"np",
".",
"random",
".",
"seed",
"(",
"old_seed",
")",
"return",
"None",
",",
"np",
".",
"mean",
"(",
"method_reps",
")"
] |
Runtime
transform = "negate"
sort_order = 1
|
[
"Runtime",
"transform",
"=",
"negate",
"sort_order",
"=",
"1"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L22-L54
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
local_accuracy
|
def local_accuracy(X, y, model_generator, method_name):
""" Local Accuracy
transform = "identity"
sort_order = 2
"""
def score_map(true, pred):
""" Converts local accuracy from % of standard deviation to numerical scores for coloring.
"""
v = min(1.0, np.std(pred - true) / (np.std(true) + 1e-8))
if v < 1e-6:
return 1.0
elif v < 0.01:
return 0.9
elif v < 0.05:
return 0.75
elif v < 0.1:
return 0.6
elif v < 0.2:
return 0.4
elif v < 0.3:
return 0.3
elif v < 0.5:
return 0.2
elif v < 0.7:
return 0.1
else:
return 0.0
def score_function(X_train, X_test, y_train, y_test, attr_function, trained_model, random_state):
return measures.local_accuracy(
X_train, y_train, X_test, y_test, attr_function(X_test),
model_generator, score_map, trained_model
)
return None, __score_method(X, y, None, model_generator, score_function, method_name)
|
python
|
def local_accuracy(X, y, model_generator, method_name):
""" Local Accuracy
transform = "identity"
sort_order = 2
"""
def score_map(true, pred):
""" Converts local accuracy from % of standard deviation to numerical scores for coloring.
"""
v = min(1.0, np.std(pred - true) / (np.std(true) + 1e-8))
if v < 1e-6:
return 1.0
elif v < 0.01:
return 0.9
elif v < 0.05:
return 0.75
elif v < 0.1:
return 0.6
elif v < 0.2:
return 0.4
elif v < 0.3:
return 0.3
elif v < 0.5:
return 0.2
elif v < 0.7:
return 0.1
else:
return 0.0
def score_function(X_train, X_test, y_train, y_test, attr_function, trained_model, random_state):
return measures.local_accuracy(
X_train, y_train, X_test, y_test, attr_function(X_test),
model_generator, score_map, trained_model
)
return None, __score_method(X, y, None, model_generator, score_function, method_name)
|
[
"def",
"local_accuracy",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
")",
":",
"def",
"score_map",
"(",
"true",
",",
"pred",
")",
":",
"\"\"\" Converts local accuracy from % of standard deviation to numerical scores for coloring.\n \"\"\"",
"v",
"=",
"min",
"(",
"1.0",
",",
"np",
".",
"std",
"(",
"pred",
"-",
"true",
")",
"/",
"(",
"np",
".",
"std",
"(",
"true",
")",
"+",
"1e-8",
")",
")",
"if",
"v",
"<",
"1e-6",
":",
"return",
"1.0",
"elif",
"v",
"<",
"0.01",
":",
"return",
"0.9",
"elif",
"v",
"<",
"0.05",
":",
"return",
"0.75",
"elif",
"v",
"<",
"0.1",
":",
"return",
"0.6",
"elif",
"v",
"<",
"0.2",
":",
"return",
"0.4",
"elif",
"v",
"<",
"0.3",
":",
"return",
"0.3",
"elif",
"v",
"<",
"0.5",
":",
"return",
"0.2",
"elif",
"v",
"<",
"0.7",
":",
"return",
"0.1",
"else",
":",
"return",
"0.0",
"def",
"score_function",
"(",
"X_train",
",",
"X_test",
",",
"y_train",
",",
"y_test",
",",
"attr_function",
",",
"trained_model",
",",
"random_state",
")",
":",
"return",
"measures",
".",
"local_accuracy",
"(",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
",",
"attr_function",
"(",
"X_test",
")",
",",
"model_generator",
",",
"score_map",
",",
"trained_model",
")",
"return",
"None",
",",
"__score_method",
"(",
"X",
",",
"y",
",",
"None",
",",
"model_generator",
",",
"score_function",
",",
"method_name",
")"
] |
Local Accuracy
transform = "identity"
sort_order = 2
|
[
"Local",
"Accuracy",
"transform",
"=",
"identity",
"sort_order",
"=",
"2"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L56-L90
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
keep_negative_mask
|
def keep_negative_mask(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Negative (mask)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 5
"""
return __run_measure(measures.keep_mask, X, y, model_generator, method_name, -1, num_fcounts, __mean_pred)
|
python
|
def keep_negative_mask(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Negative (mask)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 5
"""
return __run_measure(measures.keep_mask, X, y, model_generator, method_name, -1, num_fcounts, __mean_pred)
|
[
"def",
"keep_negative_mask",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"keep_mask",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"-",
"1",
",",
"num_fcounts",
",",
"__mean_pred",
")"
] |
Keep Negative (mask)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 5
|
[
"Keep",
"Negative",
"(",
"mask",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"kept",
"ylabel",
"=",
"Negative",
"mean",
"model",
"output",
"transform",
"=",
"negate",
"sort_order",
"=",
"5"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L135-L142
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
keep_absolute_mask__r2
|
def keep_absolute_mask__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Absolute (mask)
xlabel = "Max fraction of features kept"
ylabel = "R^2"
transform = "identity"
sort_order = 6
"""
return __run_measure(measures.keep_mask, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
|
python
|
def keep_absolute_mask__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Absolute (mask)
xlabel = "Max fraction of features kept"
ylabel = "R^2"
transform = "identity"
sort_order = 6
"""
return __run_measure(measures.keep_mask, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
|
[
"def",
"keep_absolute_mask__r2",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"keep_mask",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"0",
",",
"num_fcounts",
",",
"sklearn",
".",
"metrics",
".",
"r2_score",
")"
] |
Keep Absolute (mask)
xlabel = "Max fraction of features kept"
ylabel = "R^2"
transform = "identity"
sort_order = 6
|
[
"Keep",
"Absolute",
"(",
"mask",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"kept",
"ylabel",
"=",
"R^2",
"transform",
"=",
"identity",
"sort_order",
"=",
"6"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L144-L151
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
remove_positive_mask
|
def remove_positive_mask(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Positive (mask)
xlabel = "Max fraction of features removed"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 7
"""
return __run_measure(measures.remove_mask, X, y, model_generator, method_name, 1, num_fcounts, __mean_pred)
|
python
|
def remove_positive_mask(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Positive (mask)
xlabel = "Max fraction of features removed"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 7
"""
return __run_measure(measures.remove_mask, X, y, model_generator, method_name, 1, num_fcounts, __mean_pred)
|
[
"def",
"remove_positive_mask",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"remove_mask",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"1",
",",
"num_fcounts",
",",
"__mean_pred",
")"
] |
Remove Positive (mask)
xlabel = "Max fraction of features removed"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 7
|
[
"Remove",
"Positive",
"(",
"mask",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"removed",
"ylabel",
"=",
"Negative",
"mean",
"model",
"output",
"transform",
"=",
"negate",
"sort_order",
"=",
"7"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L162-L169
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
remove_absolute_mask__r2
|
def remove_absolute_mask__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Absolute (mask)
xlabel = "Max fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 9
"""
return __run_measure(measures.remove_mask, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
|
python
|
def remove_absolute_mask__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Absolute (mask)
xlabel = "Max fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 9
"""
return __run_measure(measures.remove_mask, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
|
[
"def",
"remove_absolute_mask__r2",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"remove_mask",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"0",
",",
"num_fcounts",
",",
"sklearn",
".",
"metrics",
".",
"r2_score",
")"
] |
Remove Absolute (mask)
xlabel = "Max fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 9
|
[
"Remove",
"Absolute",
"(",
"mask",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"removed",
"ylabel",
"=",
"1",
"-",
"R^2",
"transform",
"=",
"one_minus",
"sort_order",
"=",
"9"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L180-L187
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
keep_negative_resample
|
def keep_negative_resample(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Negative (resample)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 11
"""
return __run_measure(measures.keep_resample, X, y, model_generator, method_name, -1, num_fcounts, __mean_pred)
|
python
|
def keep_negative_resample(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Negative (resample)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 11
"""
return __run_measure(measures.keep_resample, X, y, model_generator, method_name, -1, num_fcounts, __mean_pred)
|
[
"def",
"keep_negative_resample",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"keep_resample",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"-",
"1",
",",
"num_fcounts",
",",
"__mean_pred",
")"
] |
Keep Negative (resample)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 11
|
[
"Keep",
"Negative",
"(",
"resample",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"kept",
"ylabel",
"=",
"Negative",
"mean",
"model",
"output",
"transform",
"=",
"negate",
"sort_order",
"=",
"11"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L207-L214
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
keep_absolute_resample__r2
|
def keep_absolute_resample__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Absolute (resample)
xlabel = "Max fraction of features kept"
ylabel = "R^2"
transform = "identity"
sort_order = 12
"""
return __run_measure(measures.keep_resample, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
|
python
|
def keep_absolute_resample__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Absolute (resample)
xlabel = "Max fraction of features kept"
ylabel = "R^2"
transform = "identity"
sort_order = 12
"""
return __run_measure(measures.keep_resample, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
|
[
"def",
"keep_absolute_resample__r2",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"keep_resample",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"0",
",",
"num_fcounts",
",",
"sklearn",
".",
"metrics",
".",
"r2_score",
")"
] |
Keep Absolute (resample)
xlabel = "Max fraction of features kept"
ylabel = "R^2"
transform = "identity"
sort_order = 12
|
[
"Keep",
"Absolute",
"(",
"resample",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"kept",
"ylabel",
"=",
"R^2",
"transform",
"=",
"identity",
"sort_order",
"=",
"12"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L216-L223
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
keep_absolute_resample__roc_auc
|
def keep_absolute_resample__roc_auc(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Absolute (resample)
xlabel = "Max fraction of features kept"
ylabel = "ROC AUC"
transform = "identity"
sort_order = 12
"""
return __run_measure(measures.keep_resample, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.roc_auc_score)
|
python
|
def keep_absolute_resample__roc_auc(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Absolute (resample)
xlabel = "Max fraction of features kept"
ylabel = "ROC AUC"
transform = "identity"
sort_order = 12
"""
return __run_measure(measures.keep_resample, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.roc_auc_score)
|
[
"def",
"keep_absolute_resample__roc_auc",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"keep_resample",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"0",
",",
"num_fcounts",
",",
"sklearn",
".",
"metrics",
".",
"roc_auc_score",
")"
] |
Keep Absolute (resample)
xlabel = "Max fraction of features kept"
ylabel = "ROC AUC"
transform = "identity"
sort_order = 12
|
[
"Keep",
"Absolute",
"(",
"resample",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"kept",
"ylabel",
"=",
"ROC",
"AUC",
"transform",
"=",
"identity",
"sort_order",
"=",
"12"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L225-L232
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
remove_positive_resample
|
def remove_positive_resample(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Positive (resample)
xlabel = "Max fraction of features removed"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 13
"""
return __run_measure(measures.remove_resample, X, y, model_generator, method_name, 1, num_fcounts, __mean_pred)
|
python
|
def remove_positive_resample(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Positive (resample)
xlabel = "Max fraction of features removed"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 13
"""
return __run_measure(measures.remove_resample, X, y, model_generator, method_name, 1, num_fcounts, __mean_pred)
|
[
"def",
"remove_positive_resample",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"remove_resample",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"1",
",",
"num_fcounts",
",",
"__mean_pred",
")"
] |
Remove Positive (resample)
xlabel = "Max fraction of features removed"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 13
|
[
"Remove",
"Positive",
"(",
"resample",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"removed",
"ylabel",
"=",
"Negative",
"mean",
"model",
"output",
"transform",
"=",
"negate",
"sort_order",
"=",
"13"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L234-L241
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
remove_absolute_resample__r2
|
def remove_absolute_resample__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Absolute (resample)
xlabel = "Max fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 15
"""
return __run_measure(measures.remove_resample, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
|
python
|
def remove_absolute_resample__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Absolute (resample)
xlabel = "Max fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 15
"""
return __run_measure(measures.remove_resample, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
|
[
"def",
"remove_absolute_resample__r2",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"remove_resample",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"0",
",",
"num_fcounts",
",",
"sklearn",
".",
"metrics",
".",
"r2_score",
")"
] |
Remove Absolute (resample)
xlabel = "Max fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 15
|
[
"Remove",
"Absolute",
"(",
"resample",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"removed",
"ylabel",
"=",
"1",
"-",
"R^2",
"transform",
"=",
"one_minus",
"sort_order",
"=",
"15"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L252-L259
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
remove_absolute_resample__roc_auc
|
def remove_absolute_resample__roc_auc(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Absolute (resample)
xlabel = "Max fraction of features removed"
ylabel = "1 - ROC AUC"
transform = "one_minus"
sort_order = 15
"""
return __run_measure(measures.remove_resample, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.roc_auc_score)
|
python
|
def remove_absolute_resample__roc_auc(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Absolute (resample)
xlabel = "Max fraction of features removed"
ylabel = "1 - ROC AUC"
transform = "one_minus"
sort_order = 15
"""
return __run_measure(measures.remove_resample, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.roc_auc_score)
|
[
"def",
"remove_absolute_resample__roc_auc",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"remove_resample",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"0",
",",
"num_fcounts",
",",
"sklearn",
".",
"metrics",
".",
"roc_auc_score",
")"
] |
Remove Absolute (resample)
xlabel = "Max fraction of features removed"
ylabel = "1 - ROC AUC"
transform = "one_minus"
sort_order = 15
|
[
"Remove",
"Absolute",
"(",
"resample",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"removed",
"ylabel",
"=",
"1",
"-",
"ROC",
"AUC",
"transform",
"=",
"one_minus",
"sort_order",
"=",
"15"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L261-L268
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
keep_negative_impute
|
def keep_negative_impute(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Negative (impute)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 17
"""
return __run_measure(measures.keep_impute, X, y, model_generator, method_name, -1, num_fcounts, __mean_pred)
|
python
|
def keep_negative_impute(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Negative (impute)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 17
"""
return __run_measure(measures.keep_impute, X, y, model_generator, method_name, -1, num_fcounts, __mean_pred)
|
[
"def",
"keep_negative_impute",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"keep_impute",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"-",
"1",
",",
"num_fcounts",
",",
"__mean_pred",
")"
] |
Keep Negative (impute)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 17
|
[
"Keep",
"Negative",
"(",
"impute",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"kept",
"ylabel",
"=",
"Negative",
"mean",
"model",
"output",
"transform",
"=",
"negate",
"sort_order",
"=",
"17"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L279-L286
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
keep_absolute_impute__r2
|
def keep_absolute_impute__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Absolute (impute)
xlabel = "Max fraction of features kept"
ylabel = "R^2"
transform = "identity"
sort_order = 18
"""
return __run_measure(measures.keep_impute, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
|
python
|
def keep_absolute_impute__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Absolute (impute)
xlabel = "Max fraction of features kept"
ylabel = "R^2"
transform = "identity"
sort_order = 18
"""
return __run_measure(measures.keep_impute, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
|
[
"def",
"keep_absolute_impute__r2",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"keep_impute",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"0",
",",
"num_fcounts",
",",
"sklearn",
".",
"metrics",
".",
"r2_score",
")"
] |
Keep Absolute (impute)
xlabel = "Max fraction of features kept"
ylabel = "R^2"
transform = "identity"
sort_order = 18
|
[
"Keep",
"Absolute",
"(",
"impute",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"kept",
"ylabel",
"=",
"R^2",
"transform",
"=",
"identity",
"sort_order",
"=",
"18"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L288-L295
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
keep_absolute_impute__roc_auc
|
def keep_absolute_impute__roc_auc(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Absolute (impute)
xlabel = "Max fraction of features kept"
ylabel = "ROC AUC"
transform = "identity"
sort_order = 19
"""
return __run_measure(measures.keep_mask, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.roc_auc_score)
|
python
|
def keep_absolute_impute__roc_auc(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Absolute (impute)
xlabel = "Max fraction of features kept"
ylabel = "ROC AUC"
transform = "identity"
sort_order = 19
"""
return __run_measure(measures.keep_mask, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.roc_auc_score)
|
[
"def",
"keep_absolute_impute__roc_auc",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"keep_mask",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"0",
",",
"num_fcounts",
",",
"sklearn",
".",
"metrics",
".",
"roc_auc_score",
")"
] |
Keep Absolute (impute)
xlabel = "Max fraction of features kept"
ylabel = "ROC AUC"
transform = "identity"
sort_order = 19
|
[
"Keep",
"Absolute",
"(",
"impute",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"kept",
"ylabel",
"=",
"ROC",
"AUC",
"transform",
"=",
"identity",
"sort_order",
"=",
"19"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L297-L304
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
remove_positive_impute
|
def remove_positive_impute(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Positive (impute)
xlabel = "Max fraction of features removed"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 7
"""
return __run_measure(measures.remove_impute, X, y, model_generator, method_name, 1, num_fcounts, __mean_pred)
|
python
|
def remove_positive_impute(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Positive (impute)
xlabel = "Max fraction of features removed"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 7
"""
return __run_measure(measures.remove_impute, X, y, model_generator, method_name, 1, num_fcounts, __mean_pred)
|
[
"def",
"remove_positive_impute",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"remove_impute",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"1",
",",
"num_fcounts",
",",
"__mean_pred",
")"
] |
Remove Positive (impute)
xlabel = "Max fraction of features removed"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 7
|
[
"Remove",
"Positive",
"(",
"impute",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"removed",
"ylabel",
"=",
"Negative",
"mean",
"model",
"output",
"transform",
"=",
"negate",
"sort_order",
"=",
"7"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L306-L313
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
remove_absolute_impute__r2
|
def remove_absolute_impute__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Absolute (impute)
xlabel = "Max fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 9
"""
return __run_measure(measures.remove_impute, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
|
python
|
def remove_absolute_impute__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Absolute (impute)
xlabel = "Max fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 9
"""
return __run_measure(measures.remove_impute, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score)
|
[
"def",
"remove_absolute_impute__r2",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"remove_impute",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"0",
",",
"num_fcounts",
",",
"sklearn",
".",
"metrics",
".",
"r2_score",
")"
] |
Remove Absolute (impute)
xlabel = "Max fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 9
|
[
"Remove",
"Absolute",
"(",
"impute",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"removed",
"ylabel",
"=",
"1",
"-",
"R^2",
"transform",
"=",
"one_minus",
"sort_order",
"=",
"9"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L324-L331
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
remove_absolute_impute__roc_auc
|
def remove_absolute_impute__roc_auc(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Absolute (impute)
xlabel = "Max fraction of features removed"
ylabel = "1 - ROC AUC"
transform = "one_minus"
sort_order = 9
"""
return __run_measure(measures.remove_mask, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.roc_auc_score)
|
python
|
def remove_absolute_impute__roc_auc(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Absolute (impute)
xlabel = "Max fraction of features removed"
ylabel = "1 - ROC AUC"
transform = "one_minus"
sort_order = 9
"""
return __run_measure(measures.remove_mask, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.roc_auc_score)
|
[
"def",
"remove_absolute_impute__roc_auc",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"remove_mask",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"0",
",",
"num_fcounts",
",",
"sklearn",
".",
"metrics",
".",
"roc_auc_score",
")"
] |
Remove Absolute (impute)
xlabel = "Max fraction of features removed"
ylabel = "1 - ROC AUC"
transform = "one_minus"
sort_order = 9
|
[
"Remove",
"Absolute",
"(",
"impute",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"removed",
"ylabel",
"=",
"1",
"-",
"ROC",
"AUC",
"transform",
"=",
"one_minus",
"sort_order",
"=",
"9"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L333-L340
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
keep_negative_retrain
|
def keep_negative_retrain(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Negative (retrain)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 7
"""
return __run_measure(measures.keep_retrain, X, y, model_generator, method_name, -1, num_fcounts, __mean_pred)
|
python
|
def keep_negative_retrain(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Negative (retrain)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 7
"""
return __run_measure(measures.keep_retrain, X, y, model_generator, method_name, -1, num_fcounts, __mean_pred)
|
[
"def",
"keep_negative_retrain",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"keep_retrain",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"-",
"1",
",",
"num_fcounts",
",",
"__mean_pred",
")"
] |
Keep Negative (retrain)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 7
|
[
"Keep",
"Negative",
"(",
"retrain",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"kept",
"ylabel",
"=",
"Negative",
"mean",
"model",
"output",
"transform",
"=",
"negate",
"sort_order",
"=",
"7"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L351-L358
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
remove_positive_retrain
|
def remove_positive_retrain(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Positive (retrain)
xlabel = "Max fraction of features removed"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 11
"""
return __run_measure(measures.remove_retrain, X, y, model_generator, method_name, 1, num_fcounts, __mean_pred)
|
python
|
def remove_positive_retrain(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Positive (retrain)
xlabel = "Max fraction of features removed"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 11
"""
return __run_measure(measures.remove_retrain, X, y, model_generator, method_name, 1, num_fcounts, __mean_pred)
|
[
"def",
"remove_positive_retrain",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_measure",
"(",
"measures",
".",
"remove_retrain",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"1",
",",
"num_fcounts",
",",
"__mean_pred",
")"
] |
Remove Positive (retrain)
xlabel = "Max fraction of features removed"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 11
|
[
"Remove",
"Positive",
"(",
"retrain",
")",
"xlabel",
"=",
"Max",
"fraction",
"of",
"features",
"removed",
"ylabel",
"=",
"Negative",
"mean",
"model",
"output",
"transform",
"=",
"negate",
"sort_order",
"=",
"11"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L360-L367
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
batch_remove_absolute_retrain__r2
|
def batch_remove_absolute_retrain__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Batch Remove Absolute (retrain)
xlabel = "Fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 13
"""
return __run_batch_abs_metric(measures.batch_remove_retrain, X, y, model_generator, method_name, sklearn.metrics.r2_score, num_fcounts)
|
python
|
def batch_remove_absolute_retrain__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Batch Remove Absolute (retrain)
xlabel = "Fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 13
"""
return __run_batch_abs_metric(measures.batch_remove_retrain, X, y, model_generator, method_name, sklearn.metrics.r2_score, num_fcounts)
|
[
"def",
"batch_remove_absolute_retrain__r2",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_batch_abs_metric",
"(",
"measures",
".",
"batch_remove_retrain",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"sklearn",
".",
"metrics",
".",
"r2_score",
",",
"num_fcounts",
")"
] |
Batch Remove Absolute (retrain)
xlabel = "Fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 13
|
[
"Batch",
"Remove",
"Absolute",
"(",
"retrain",
")",
"xlabel",
"=",
"Fraction",
"of",
"features",
"removed",
"ylabel",
"=",
"1",
"-",
"R^2",
"transform",
"=",
"one_minus",
"sort_order",
"=",
"13"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L394-L401
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
batch_keep_absolute_retrain__r2
|
def batch_keep_absolute_retrain__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Batch Keep Absolute (retrain)
xlabel = "Fraction of features kept"
ylabel = "R^2"
transform = "identity"
sort_order = 13
"""
return __run_batch_abs_metric(measures.batch_keep_retrain, X, y, model_generator, method_name, sklearn.metrics.r2_score, num_fcounts)
|
python
|
def batch_keep_absolute_retrain__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Batch Keep Absolute (retrain)
xlabel = "Fraction of features kept"
ylabel = "R^2"
transform = "identity"
sort_order = 13
"""
return __run_batch_abs_metric(measures.batch_keep_retrain, X, y, model_generator, method_name, sklearn.metrics.r2_score, num_fcounts)
|
[
"def",
"batch_keep_absolute_retrain__r2",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_batch_abs_metric",
"(",
"measures",
".",
"batch_keep_retrain",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"sklearn",
".",
"metrics",
".",
"r2_score",
",",
"num_fcounts",
")"
] |
Batch Keep Absolute (retrain)
xlabel = "Fraction of features kept"
ylabel = "R^2"
transform = "identity"
sort_order = 13
|
[
"Batch",
"Keep",
"Absolute",
"(",
"retrain",
")",
"xlabel",
"=",
"Fraction",
"of",
"features",
"kept",
"ylabel",
"=",
"R^2",
"transform",
"=",
"identity",
"sort_order",
"=",
"13"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L403-L410
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
batch_remove_absolute_retrain__roc_auc
|
def batch_remove_absolute_retrain__roc_auc(X, y, model_generator, method_name, num_fcounts=11):
""" Batch Remove Absolute (retrain)
xlabel = "Fraction of features removed"
ylabel = "1 - ROC AUC"
transform = "one_minus"
sort_order = 13
"""
return __run_batch_abs_metric(measures.batch_remove_retrain, X, y, model_generator, method_name, sklearn.metrics.roc_auc_score, num_fcounts)
|
python
|
def batch_remove_absolute_retrain__roc_auc(X, y, model_generator, method_name, num_fcounts=11):
""" Batch Remove Absolute (retrain)
xlabel = "Fraction of features removed"
ylabel = "1 - ROC AUC"
transform = "one_minus"
sort_order = 13
"""
return __run_batch_abs_metric(measures.batch_remove_retrain, X, y, model_generator, method_name, sklearn.metrics.roc_auc_score, num_fcounts)
|
[
"def",
"batch_remove_absolute_retrain__roc_auc",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_batch_abs_metric",
"(",
"measures",
".",
"batch_remove_retrain",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"sklearn",
".",
"metrics",
".",
"roc_auc_score",
",",
"num_fcounts",
")"
] |
Batch Remove Absolute (retrain)
xlabel = "Fraction of features removed"
ylabel = "1 - ROC AUC"
transform = "one_minus"
sort_order = 13
|
[
"Batch",
"Remove",
"Absolute",
"(",
"retrain",
")",
"xlabel",
"=",
"Fraction",
"of",
"features",
"removed",
"ylabel",
"=",
"1",
"-",
"ROC",
"AUC",
"transform",
"=",
"one_minus",
"sort_order",
"=",
"13"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L412-L419
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
batch_keep_absolute_retrain__roc_auc
|
def batch_keep_absolute_retrain__roc_auc(X, y, model_generator, method_name, num_fcounts=11):
""" Batch Keep Absolute (retrain)
xlabel = "Fraction of features kept"
ylabel = "ROC AUC"
transform = "identity"
sort_order = 13
"""
return __run_batch_abs_metric(measures.batch_keep_retrain, X, y, model_generator, method_name, sklearn.metrics.roc_auc_score, num_fcounts)
|
python
|
def batch_keep_absolute_retrain__roc_auc(X, y, model_generator, method_name, num_fcounts=11):
""" Batch Keep Absolute (retrain)
xlabel = "Fraction of features kept"
ylabel = "ROC AUC"
transform = "identity"
sort_order = 13
"""
return __run_batch_abs_metric(measures.batch_keep_retrain, X, y, model_generator, method_name, sklearn.metrics.roc_auc_score, num_fcounts)
|
[
"def",
"batch_keep_absolute_retrain__roc_auc",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"num_fcounts",
"=",
"11",
")",
":",
"return",
"__run_batch_abs_metric",
"(",
"measures",
".",
"batch_keep_retrain",
",",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
",",
"sklearn",
".",
"metrics",
".",
"roc_auc_score",
",",
"num_fcounts",
")"
] |
Batch Keep Absolute (retrain)
xlabel = "Fraction of features kept"
ylabel = "ROC AUC"
transform = "identity"
sort_order = 13
|
[
"Batch",
"Keep",
"Absolute",
"(",
"retrain",
")",
"xlabel",
"=",
"Fraction",
"of",
"features",
"kept",
"ylabel",
"=",
"ROC",
"AUC",
"transform",
"=",
"identity",
"sort_order",
"=",
"13"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L421-L428
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
__score_method
|
def __score_method(X, y, fcounts, model_generator, score_function, method_name, nreps=10, test_size=100, cache_dir="/tmp"):
""" Test an explanation method.
"""
old_seed = np.random.seed()
np.random.seed(3293)
# average the method scores over several train/test splits
method_reps = []
data_hash = hashlib.sha256(__toarray(X).flatten()).hexdigest() + hashlib.sha256(__toarray(y)).hexdigest()
for i in range(nreps):
X_train, X_test, y_train, y_test = train_test_split(__toarray(X), y, test_size=test_size, random_state=i)
# define the model we are going to explain, caching so we onlu build it once
model_id = "model_cache__v" + "__".join([__version__, data_hash, model_generator.__name__])+".pickle"
cache_file = os.path.join(cache_dir, model_id + ".pickle")
if os.path.isfile(cache_file):
with open(cache_file, "rb") as f:
model = pickle.load(f)
else:
model = model_generator()
model.fit(X_train, y_train)
with open(cache_file, "wb") as f:
pickle.dump(model, f)
attr_key = "_".join([model_generator.__name__, method_name, str(test_size), str(nreps), str(i), data_hash])
def score(attr_function):
def cached_attr_function(X_inner):
if attr_key not in _attribution_cache:
_attribution_cache[attr_key] = attr_function(X_inner)
return _attribution_cache[attr_key]
#cached_attr_function = lambda X: __check_cache(attr_function, X)
if fcounts is None:
return score_function(X_train, X_test, y_train, y_test, cached_attr_function, model, i)
else:
scores = []
for f in fcounts:
scores.append(score_function(f, X_train, X_test, y_train, y_test, cached_attr_function, model, i))
return np.array(scores)
# evaluate the method (only building the attribution function if we need to)
if attr_key not in _attribution_cache:
method_reps.append(score(getattr(methods, method_name)(model, X_train)))
else:
method_reps.append(score(None))
np.random.seed(old_seed)
return np.array(method_reps).mean(0)
|
python
|
def __score_method(X, y, fcounts, model_generator, score_function, method_name, nreps=10, test_size=100, cache_dir="/tmp"):
""" Test an explanation method.
"""
old_seed = np.random.seed()
np.random.seed(3293)
# average the method scores over several train/test splits
method_reps = []
data_hash = hashlib.sha256(__toarray(X).flatten()).hexdigest() + hashlib.sha256(__toarray(y)).hexdigest()
for i in range(nreps):
X_train, X_test, y_train, y_test = train_test_split(__toarray(X), y, test_size=test_size, random_state=i)
# define the model we are going to explain, caching so we onlu build it once
model_id = "model_cache__v" + "__".join([__version__, data_hash, model_generator.__name__])+".pickle"
cache_file = os.path.join(cache_dir, model_id + ".pickle")
if os.path.isfile(cache_file):
with open(cache_file, "rb") as f:
model = pickle.load(f)
else:
model = model_generator()
model.fit(X_train, y_train)
with open(cache_file, "wb") as f:
pickle.dump(model, f)
attr_key = "_".join([model_generator.__name__, method_name, str(test_size), str(nreps), str(i), data_hash])
def score(attr_function):
def cached_attr_function(X_inner):
if attr_key not in _attribution_cache:
_attribution_cache[attr_key] = attr_function(X_inner)
return _attribution_cache[attr_key]
#cached_attr_function = lambda X: __check_cache(attr_function, X)
if fcounts is None:
return score_function(X_train, X_test, y_train, y_test, cached_attr_function, model, i)
else:
scores = []
for f in fcounts:
scores.append(score_function(f, X_train, X_test, y_train, y_test, cached_attr_function, model, i))
return np.array(scores)
# evaluate the method (only building the attribution function if we need to)
if attr_key not in _attribution_cache:
method_reps.append(score(getattr(methods, method_name)(model, X_train)))
else:
method_reps.append(score(None))
np.random.seed(old_seed)
return np.array(method_reps).mean(0)
|
[
"def",
"__score_method",
"(",
"X",
",",
"y",
",",
"fcounts",
",",
"model_generator",
",",
"score_function",
",",
"method_name",
",",
"nreps",
"=",
"10",
",",
"test_size",
"=",
"100",
",",
"cache_dir",
"=",
"\"/tmp\"",
")",
":",
"old_seed",
"=",
"np",
".",
"random",
".",
"seed",
"(",
")",
"np",
".",
"random",
".",
"seed",
"(",
"3293",
")",
"# average the method scores over several train/test splits",
"method_reps",
"=",
"[",
"]",
"data_hash",
"=",
"hashlib",
".",
"sha256",
"(",
"__toarray",
"(",
"X",
")",
".",
"flatten",
"(",
")",
")",
".",
"hexdigest",
"(",
")",
"+",
"hashlib",
".",
"sha256",
"(",
"__toarray",
"(",
"y",
")",
")",
".",
"hexdigest",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"nreps",
")",
":",
"X_train",
",",
"X_test",
",",
"y_train",
",",
"y_test",
"=",
"train_test_split",
"(",
"__toarray",
"(",
"X",
")",
",",
"y",
",",
"test_size",
"=",
"test_size",
",",
"random_state",
"=",
"i",
")",
"# define the model we are going to explain, caching so we onlu build it once",
"model_id",
"=",
"\"model_cache__v\"",
"+",
"\"__\"",
".",
"join",
"(",
"[",
"__version__",
",",
"data_hash",
",",
"model_generator",
".",
"__name__",
"]",
")",
"+",
"\".pickle\"",
"cache_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cache_dir",
",",
"model_id",
"+",
"\".pickle\"",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"cache_file",
")",
":",
"with",
"open",
"(",
"cache_file",
",",
"\"rb\"",
")",
"as",
"f",
":",
"model",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"else",
":",
"model",
"=",
"model_generator",
"(",
")",
"model",
".",
"fit",
"(",
"X_train",
",",
"y_train",
")",
"with",
"open",
"(",
"cache_file",
",",
"\"wb\"",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"model",
",",
"f",
")",
"attr_key",
"=",
"\"_\"",
".",
"join",
"(",
"[",
"model_generator",
".",
"__name__",
",",
"method_name",
",",
"str",
"(",
"test_size",
")",
",",
"str",
"(",
"nreps",
")",
",",
"str",
"(",
"i",
")",
",",
"data_hash",
"]",
")",
"def",
"score",
"(",
"attr_function",
")",
":",
"def",
"cached_attr_function",
"(",
"X_inner",
")",
":",
"if",
"attr_key",
"not",
"in",
"_attribution_cache",
":",
"_attribution_cache",
"[",
"attr_key",
"]",
"=",
"attr_function",
"(",
"X_inner",
")",
"return",
"_attribution_cache",
"[",
"attr_key",
"]",
"#cached_attr_function = lambda X: __check_cache(attr_function, X)",
"if",
"fcounts",
"is",
"None",
":",
"return",
"score_function",
"(",
"X_train",
",",
"X_test",
",",
"y_train",
",",
"y_test",
",",
"cached_attr_function",
",",
"model",
",",
"i",
")",
"else",
":",
"scores",
"=",
"[",
"]",
"for",
"f",
"in",
"fcounts",
":",
"scores",
".",
"append",
"(",
"score_function",
"(",
"f",
",",
"X_train",
",",
"X_test",
",",
"y_train",
",",
"y_test",
",",
"cached_attr_function",
",",
"model",
",",
"i",
")",
")",
"return",
"np",
".",
"array",
"(",
"scores",
")",
"# evaluate the method (only building the attribution function if we need to)",
"if",
"attr_key",
"not",
"in",
"_attribution_cache",
":",
"method_reps",
".",
"append",
"(",
"score",
"(",
"getattr",
"(",
"methods",
",",
"method_name",
")",
"(",
"model",
",",
"X_train",
")",
")",
")",
"else",
":",
"method_reps",
".",
"append",
"(",
"score",
"(",
"None",
")",
")",
"np",
".",
"random",
".",
"seed",
"(",
"old_seed",
")",
"return",
"np",
".",
"array",
"(",
"method_reps",
")",
".",
"mean",
"(",
"0",
")"
] |
Test an explanation method.
|
[
"Test",
"an",
"explanation",
"method",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L446-L495
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
human_and_00
|
def human_and_00(X, y, model_generator, method_name):
""" AND (false/false)
This tests how well a feature attribution method agrees with human intuition
for an AND operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever and cough: +6 points
transform = "identity"
sort_order = 0
"""
return _human_and(X, model_generator, method_name, False, False)
|
python
|
def human_and_00(X, y, model_generator, method_name):
""" AND (false/false)
This tests how well a feature attribution method agrees with human intuition
for an AND operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever and cough: +6 points
transform = "identity"
sort_order = 0
"""
return _human_and(X, model_generator, method_name, False, False)
|
[
"def",
"human_and_00",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
")",
":",
"return",
"_human_and",
"(",
"X",
",",
"model_generator",
",",
"method_name",
",",
"False",
",",
"False",
")"
] |
AND (false/false)
This tests how well a feature attribution method agrees with human intuition
for an AND operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever and cough: +6 points
transform = "identity"
sort_order = 0
|
[
"AND",
"(",
"false",
"/",
"false",
")"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L578-L592
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
human_and_01
|
def human_and_01(X, y, model_generator, method_name):
""" AND (false/true)
This tests how well a feature attribution method agrees with human intuition
for an AND operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever and cough: +6 points
transform = "identity"
sort_order = 1
"""
return _human_and(X, model_generator, method_name, False, True)
|
python
|
def human_and_01(X, y, model_generator, method_name):
""" AND (false/true)
This tests how well a feature attribution method agrees with human intuition
for an AND operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever and cough: +6 points
transform = "identity"
sort_order = 1
"""
return _human_and(X, model_generator, method_name, False, True)
|
[
"def",
"human_and_01",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
")",
":",
"return",
"_human_and",
"(",
"X",
",",
"model_generator",
",",
"method_name",
",",
"False",
",",
"True",
")"
] |
AND (false/true)
This tests how well a feature attribution method agrees with human intuition
for an AND operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever and cough: +6 points
transform = "identity"
sort_order = 1
|
[
"AND",
"(",
"false",
"/",
"true",
")"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L594-L608
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
human_and_11
|
def human_and_11(X, y, model_generator, method_name):
""" AND (true/true)
This tests how well a feature attribution method agrees with human intuition
for an AND operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever and cough: +6 points
transform = "identity"
sort_order = 2
"""
return _human_and(X, model_generator, method_name, True, True)
|
python
|
def human_and_11(X, y, model_generator, method_name):
""" AND (true/true)
This tests how well a feature attribution method agrees with human intuition
for an AND operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever and cough: +6 points
transform = "identity"
sort_order = 2
"""
return _human_and(X, model_generator, method_name, True, True)
|
[
"def",
"human_and_11",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
")",
":",
"return",
"_human_and",
"(",
"X",
",",
"model_generator",
",",
"method_name",
",",
"True",
",",
"True",
")"
] |
AND (true/true)
This tests how well a feature attribution method agrees with human intuition
for an AND operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever and cough: +6 points
transform = "identity"
sort_order = 2
|
[
"AND",
"(",
"true",
"/",
"true",
")"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L610-L624
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
human_or_00
|
def human_or_00(X, y, model_generator, method_name):
""" OR (false/false)
This tests how well a feature attribution method agrees with human intuition
for an OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough: +6 points
transform = "identity"
sort_order = 0
"""
return _human_or(X, model_generator, method_name, False, False)
|
python
|
def human_or_00(X, y, model_generator, method_name):
""" OR (false/false)
This tests how well a feature attribution method agrees with human intuition
for an OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough: +6 points
transform = "identity"
sort_order = 0
"""
return _human_or(X, model_generator, method_name, False, False)
|
[
"def",
"human_or_00",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
")",
":",
"return",
"_human_or",
"(",
"X",
",",
"model_generator",
",",
"method_name",
",",
"False",
",",
"False",
")"
] |
OR (false/false)
This tests how well a feature attribution method agrees with human intuition
for an OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough: +6 points
transform = "identity"
sort_order = 0
|
[
"OR",
"(",
"false",
"/",
"false",
")"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L649-L663
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
human_or_01
|
def human_or_01(X, y, model_generator, method_name):
""" OR (false/true)
This tests how well a feature attribution method agrees with human intuition
for an OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough: +6 points
transform = "identity"
sort_order = 1
"""
return _human_or(X, model_generator, method_name, False, True)
|
python
|
def human_or_01(X, y, model_generator, method_name):
""" OR (false/true)
This tests how well a feature attribution method agrees with human intuition
for an OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough: +6 points
transform = "identity"
sort_order = 1
"""
return _human_or(X, model_generator, method_name, False, True)
|
[
"def",
"human_or_01",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
")",
":",
"return",
"_human_or",
"(",
"X",
",",
"model_generator",
",",
"method_name",
",",
"False",
",",
"True",
")"
] |
OR (false/true)
This tests how well a feature attribution method agrees with human intuition
for an OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough: +6 points
transform = "identity"
sort_order = 1
|
[
"OR",
"(",
"false",
"/",
"true",
")"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L665-L679
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
human_or_11
|
def human_or_11(X, y, model_generator, method_name):
""" OR (true/true)
This tests how well a feature attribution method agrees with human intuition
for an OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough: +6 points
transform = "identity"
sort_order = 2
"""
return _human_or(X, model_generator, method_name, True, True)
|
python
|
def human_or_11(X, y, model_generator, method_name):
""" OR (true/true)
This tests how well a feature attribution method agrees with human intuition
for an OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough: +6 points
transform = "identity"
sort_order = 2
"""
return _human_or(X, model_generator, method_name, True, True)
|
[
"def",
"human_or_11",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
")",
":",
"return",
"_human_or",
"(",
"X",
",",
"model_generator",
",",
"method_name",
",",
"True",
",",
"True",
")"
] |
OR (true/true)
This tests how well a feature attribution method agrees with human intuition
for an OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough: +6 points
transform = "identity"
sort_order = 2
|
[
"OR",
"(",
"true",
"/",
"true",
")"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L681-L695
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
human_xor_00
|
def human_xor_00(X, y, model_generator, method_name):
""" XOR (false/false)
This tests how well a feature attribution method agrees with human intuition
for an eXclusive OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough but not both: +6 points
transform = "identity"
sort_order = 3
"""
return _human_xor(X, model_generator, method_name, False, False)
|
python
|
def human_xor_00(X, y, model_generator, method_name):
""" XOR (false/false)
This tests how well a feature attribution method agrees with human intuition
for an eXclusive OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough but not both: +6 points
transform = "identity"
sort_order = 3
"""
return _human_xor(X, model_generator, method_name, False, False)
|
[
"def",
"human_xor_00",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
")",
":",
"return",
"_human_xor",
"(",
"X",
",",
"model_generator",
",",
"method_name",
",",
"False",
",",
"False",
")"
] |
XOR (false/false)
This tests how well a feature attribution method agrees with human intuition
for an eXclusive OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough but not both: +6 points
transform = "identity"
sort_order = 3
|
[
"XOR",
"(",
"false",
"/",
"false",
")"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L720-L734
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
human_xor_01
|
def human_xor_01(X, y, model_generator, method_name):
""" XOR (false/true)
This tests how well a feature attribution method agrees with human intuition
for an eXclusive OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough but not both: +6 points
transform = "identity"
sort_order = 4
"""
return _human_xor(X, model_generator, method_name, False, True)
|
python
|
def human_xor_01(X, y, model_generator, method_name):
""" XOR (false/true)
This tests how well a feature attribution method agrees with human intuition
for an eXclusive OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough but not both: +6 points
transform = "identity"
sort_order = 4
"""
return _human_xor(X, model_generator, method_name, False, True)
|
[
"def",
"human_xor_01",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
")",
":",
"return",
"_human_xor",
"(",
"X",
",",
"model_generator",
",",
"method_name",
",",
"False",
",",
"True",
")"
] |
XOR (false/true)
This tests how well a feature attribution method agrees with human intuition
for an eXclusive OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough but not both: +6 points
transform = "identity"
sort_order = 4
|
[
"XOR",
"(",
"false",
"/",
"true",
")"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L736-L750
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
human_xor_11
|
def human_xor_11(X, y, model_generator, method_name):
""" XOR (true/true)
This tests how well a feature attribution method agrees with human intuition
for an eXclusive OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough but not both: +6 points
transform = "identity"
sort_order = 5
"""
return _human_xor(X, model_generator, method_name, True, True)
|
python
|
def human_xor_11(X, y, model_generator, method_name):
""" XOR (true/true)
This tests how well a feature attribution method agrees with human intuition
for an eXclusive OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough but not both: +6 points
transform = "identity"
sort_order = 5
"""
return _human_xor(X, model_generator, method_name, True, True)
|
[
"def",
"human_xor_11",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
")",
":",
"return",
"_human_xor",
"(",
"X",
",",
"model_generator",
",",
"method_name",
",",
"True",
",",
"True",
")"
] |
XOR (true/true)
This tests how well a feature attribution method agrees with human intuition
for an eXclusive OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough but not both: +6 points
transform = "identity"
sort_order = 5
|
[
"XOR",
"(",
"true",
"/",
"true",
")"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L752-L766
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
human_sum_00
|
def human_sum_00(X, y, model_generator, method_name):
""" SUM (false/false)
This tests how well a feature attribution method agrees with human intuition
for a SUM operation. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
transform = "identity"
sort_order = 0
"""
return _human_sum(X, model_generator, method_name, False, False)
|
python
|
def human_sum_00(X, y, model_generator, method_name):
""" SUM (false/false)
This tests how well a feature attribution method agrees with human intuition
for a SUM operation. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
transform = "identity"
sort_order = 0
"""
return _human_sum(X, model_generator, method_name, False, False)
|
[
"def",
"human_sum_00",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
")",
":",
"return",
"_human_sum",
"(",
"X",
",",
"model_generator",
",",
"method_name",
",",
"False",
",",
"False",
")"
] |
SUM (false/false)
This tests how well a feature attribution method agrees with human intuition
for a SUM operation. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
transform = "identity"
sort_order = 0
|
[
"SUM",
"(",
"false",
"/",
"false",
")"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L791-L804
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
human_sum_01
|
def human_sum_01(X, y, model_generator, method_name):
""" SUM (false/true)
This tests how well a feature attribution method agrees with human intuition
for a SUM operation. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
transform = "identity"
sort_order = 1
"""
return _human_sum(X, model_generator, method_name, False, True)
|
python
|
def human_sum_01(X, y, model_generator, method_name):
""" SUM (false/true)
This tests how well a feature attribution method agrees with human intuition
for a SUM operation. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
transform = "identity"
sort_order = 1
"""
return _human_sum(X, model_generator, method_name, False, True)
|
[
"def",
"human_sum_01",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
")",
":",
"return",
"_human_sum",
"(",
"X",
",",
"model_generator",
",",
"method_name",
",",
"False",
",",
"True",
")"
] |
SUM (false/true)
This tests how well a feature attribution method agrees with human intuition
for a SUM operation. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
transform = "identity"
sort_order = 1
|
[
"SUM",
"(",
"false",
"/",
"true",
")"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L806-L819
|
train
|
slundberg/shap
|
shap/benchmark/metrics.py
|
human_sum_11
|
def human_sum_11(X, y, model_generator, method_name):
""" SUM (true/true)
This tests how well a feature attribution method agrees with human intuition
for a SUM operation. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
transform = "identity"
sort_order = 2
"""
return _human_sum(X, model_generator, method_name, True, True)
|
python
|
def human_sum_11(X, y, model_generator, method_name):
""" SUM (true/true)
This tests how well a feature attribution method agrees with human intuition
for a SUM operation. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
transform = "identity"
sort_order = 2
"""
return _human_sum(X, model_generator, method_name, True, True)
|
[
"def",
"human_sum_11",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
")",
":",
"return",
"_human_sum",
"(",
"X",
",",
"model_generator",
",",
"method_name",
",",
"True",
",",
"True",
")"
] |
SUM (true/true)
This tests how well a feature attribution method agrees with human intuition
for a SUM operation. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
transform = "identity"
sort_order = 2
|
[
"SUM",
"(",
"true",
"/",
"true",
")"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L821-L834
|
train
|
slundberg/shap
|
shap/explainers/linear.py
|
LinearExplainer._estimate_transforms
|
def _estimate_transforms(self, nsamples):
""" Uses block matrix inversion identities to quickly estimate transforms.
After a bit of matrix math we can isolate a transform matrix (# features x # features)
that is independent of any sample we are explaining. It is the result of averaging over
all feature permutations, but we just use a fixed number of samples to estimate the value.
TODO: Do a brute force enumeration when # feature subsets is less than nsamples. This could
happen through a recursive method that uses the same block matrix inversion as below.
"""
M = len(self.coef)
mean_transform = np.zeros((M,M))
x_transform = np.zeros((M,M))
inds = np.arange(M, dtype=np.int)
for _ in tqdm(range(nsamples), "Estimating transforms"):
np.random.shuffle(inds)
cov_inv_SiSi = np.zeros((0,0))
cov_Si = np.zeros((M,0))
for j in range(M):
i = inds[j]
# use the last Si as the new S
cov_S = cov_Si
cov_inv_SS = cov_inv_SiSi
# get the new cov_Si
cov_Si = self.cov[:,inds[:j+1]]
# compute the new cov_inv_SiSi from cov_inv_SS
d = cov_Si[i,:-1].T
t = np.matmul(cov_inv_SS, d)
Z = self.cov[i, i]
u = Z - np.matmul(t.T, d)
cov_inv_SiSi = np.zeros((j+1, j+1))
if j > 0:
cov_inv_SiSi[:-1, :-1] = cov_inv_SS + np.outer(t, t) / u
cov_inv_SiSi[:-1, -1] = cov_inv_SiSi[-1,:-1] = -t / u
cov_inv_SiSi[-1, -1] = 1 / u
# + coef @ (Q(bar(Sui)) - Q(bar(S)))
mean_transform[i, i] += self.coef[i]
# + coef @ R(Sui)
coef_R_Si = np.matmul(self.coef[inds[j+1:]], np.matmul(cov_Si, cov_inv_SiSi)[inds[j+1:]])
mean_transform[i, inds[:j+1]] += coef_R_Si
# - coef @ R(S)
coef_R_S = np.matmul(self.coef[inds[j:]], np.matmul(cov_S, cov_inv_SS)[inds[j:]])
mean_transform[i, inds[:j]] -= coef_R_S
# - coef @ (Q(Sui) - Q(S))
x_transform[i, i] += self.coef[i]
# + coef @ R(Sui)
x_transform[i, inds[:j+1]] += coef_R_Si
# - coef @ R(S)
x_transform[i, inds[:j]] -= coef_R_S
mean_transform /= nsamples
x_transform /= nsamples
return mean_transform, x_transform
|
python
|
def _estimate_transforms(self, nsamples):
""" Uses block matrix inversion identities to quickly estimate transforms.
After a bit of matrix math we can isolate a transform matrix (# features x # features)
that is independent of any sample we are explaining. It is the result of averaging over
all feature permutations, but we just use a fixed number of samples to estimate the value.
TODO: Do a brute force enumeration when # feature subsets is less than nsamples. This could
happen through a recursive method that uses the same block matrix inversion as below.
"""
M = len(self.coef)
mean_transform = np.zeros((M,M))
x_transform = np.zeros((M,M))
inds = np.arange(M, dtype=np.int)
for _ in tqdm(range(nsamples), "Estimating transforms"):
np.random.shuffle(inds)
cov_inv_SiSi = np.zeros((0,0))
cov_Si = np.zeros((M,0))
for j in range(M):
i = inds[j]
# use the last Si as the new S
cov_S = cov_Si
cov_inv_SS = cov_inv_SiSi
# get the new cov_Si
cov_Si = self.cov[:,inds[:j+1]]
# compute the new cov_inv_SiSi from cov_inv_SS
d = cov_Si[i,:-1].T
t = np.matmul(cov_inv_SS, d)
Z = self.cov[i, i]
u = Z - np.matmul(t.T, d)
cov_inv_SiSi = np.zeros((j+1, j+1))
if j > 0:
cov_inv_SiSi[:-1, :-1] = cov_inv_SS + np.outer(t, t) / u
cov_inv_SiSi[:-1, -1] = cov_inv_SiSi[-1,:-1] = -t / u
cov_inv_SiSi[-1, -1] = 1 / u
# + coef @ (Q(bar(Sui)) - Q(bar(S)))
mean_transform[i, i] += self.coef[i]
# + coef @ R(Sui)
coef_R_Si = np.matmul(self.coef[inds[j+1:]], np.matmul(cov_Si, cov_inv_SiSi)[inds[j+1:]])
mean_transform[i, inds[:j+1]] += coef_R_Si
# - coef @ R(S)
coef_R_S = np.matmul(self.coef[inds[j:]], np.matmul(cov_S, cov_inv_SS)[inds[j:]])
mean_transform[i, inds[:j]] -= coef_R_S
# - coef @ (Q(Sui) - Q(S))
x_transform[i, i] += self.coef[i]
# + coef @ R(Sui)
x_transform[i, inds[:j+1]] += coef_R_Si
# - coef @ R(S)
x_transform[i, inds[:j]] -= coef_R_S
mean_transform /= nsamples
x_transform /= nsamples
return mean_transform, x_transform
|
[
"def",
"_estimate_transforms",
"(",
"self",
",",
"nsamples",
")",
":",
"M",
"=",
"len",
"(",
"self",
".",
"coef",
")",
"mean_transform",
"=",
"np",
".",
"zeros",
"(",
"(",
"M",
",",
"M",
")",
")",
"x_transform",
"=",
"np",
".",
"zeros",
"(",
"(",
"M",
",",
"M",
")",
")",
"inds",
"=",
"np",
".",
"arange",
"(",
"M",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"for",
"_",
"in",
"tqdm",
"(",
"range",
"(",
"nsamples",
")",
",",
"\"Estimating transforms\"",
")",
":",
"np",
".",
"random",
".",
"shuffle",
"(",
"inds",
")",
"cov_inv_SiSi",
"=",
"np",
".",
"zeros",
"(",
"(",
"0",
",",
"0",
")",
")",
"cov_Si",
"=",
"np",
".",
"zeros",
"(",
"(",
"M",
",",
"0",
")",
")",
"for",
"j",
"in",
"range",
"(",
"M",
")",
":",
"i",
"=",
"inds",
"[",
"j",
"]",
"# use the last Si as the new S",
"cov_S",
"=",
"cov_Si",
"cov_inv_SS",
"=",
"cov_inv_SiSi",
"# get the new cov_Si",
"cov_Si",
"=",
"self",
".",
"cov",
"[",
":",
",",
"inds",
"[",
":",
"j",
"+",
"1",
"]",
"]",
"# compute the new cov_inv_SiSi from cov_inv_SS",
"d",
"=",
"cov_Si",
"[",
"i",
",",
":",
"-",
"1",
"]",
".",
"T",
"t",
"=",
"np",
".",
"matmul",
"(",
"cov_inv_SS",
",",
"d",
")",
"Z",
"=",
"self",
".",
"cov",
"[",
"i",
",",
"i",
"]",
"u",
"=",
"Z",
"-",
"np",
".",
"matmul",
"(",
"t",
".",
"T",
",",
"d",
")",
"cov_inv_SiSi",
"=",
"np",
".",
"zeros",
"(",
"(",
"j",
"+",
"1",
",",
"j",
"+",
"1",
")",
")",
"if",
"j",
">",
"0",
":",
"cov_inv_SiSi",
"[",
":",
"-",
"1",
",",
":",
"-",
"1",
"]",
"=",
"cov_inv_SS",
"+",
"np",
".",
"outer",
"(",
"t",
",",
"t",
")",
"/",
"u",
"cov_inv_SiSi",
"[",
":",
"-",
"1",
",",
"-",
"1",
"]",
"=",
"cov_inv_SiSi",
"[",
"-",
"1",
",",
":",
"-",
"1",
"]",
"=",
"-",
"t",
"/",
"u",
"cov_inv_SiSi",
"[",
"-",
"1",
",",
"-",
"1",
"]",
"=",
"1",
"/",
"u",
"# + coef @ (Q(bar(Sui)) - Q(bar(S)))",
"mean_transform",
"[",
"i",
",",
"i",
"]",
"+=",
"self",
".",
"coef",
"[",
"i",
"]",
"# + coef @ R(Sui)",
"coef_R_Si",
"=",
"np",
".",
"matmul",
"(",
"self",
".",
"coef",
"[",
"inds",
"[",
"j",
"+",
"1",
":",
"]",
"]",
",",
"np",
".",
"matmul",
"(",
"cov_Si",
",",
"cov_inv_SiSi",
")",
"[",
"inds",
"[",
"j",
"+",
"1",
":",
"]",
"]",
")",
"mean_transform",
"[",
"i",
",",
"inds",
"[",
":",
"j",
"+",
"1",
"]",
"]",
"+=",
"coef_R_Si",
"# - coef @ R(S)",
"coef_R_S",
"=",
"np",
".",
"matmul",
"(",
"self",
".",
"coef",
"[",
"inds",
"[",
"j",
":",
"]",
"]",
",",
"np",
".",
"matmul",
"(",
"cov_S",
",",
"cov_inv_SS",
")",
"[",
"inds",
"[",
"j",
":",
"]",
"]",
")",
"mean_transform",
"[",
"i",
",",
"inds",
"[",
":",
"j",
"]",
"]",
"-=",
"coef_R_S",
"# - coef @ (Q(Sui) - Q(S))",
"x_transform",
"[",
"i",
",",
"i",
"]",
"+=",
"self",
".",
"coef",
"[",
"i",
"]",
"# + coef @ R(Sui)",
"x_transform",
"[",
"i",
",",
"inds",
"[",
":",
"j",
"+",
"1",
"]",
"]",
"+=",
"coef_R_Si",
"# - coef @ R(S)",
"x_transform",
"[",
"i",
",",
"inds",
"[",
":",
"j",
"]",
"]",
"-=",
"coef_R_S",
"mean_transform",
"/=",
"nsamples",
"x_transform",
"/=",
"nsamples",
"return",
"mean_transform",
",",
"x_transform"
] |
Uses block matrix inversion identities to quickly estimate transforms.
After a bit of matrix math we can isolate a transform matrix (# features x # features)
that is independent of any sample we are explaining. It is the result of averaging over
all feature permutations, but we just use a fixed number of samples to estimate the value.
TODO: Do a brute force enumeration when # feature subsets is less than nsamples. This could
happen through a recursive method that uses the same block matrix inversion as below.
|
[
"Uses",
"block",
"matrix",
"inversion",
"identities",
"to",
"quickly",
"estimate",
"transforms",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/linear.py#L113-L175
|
train
|
slundberg/shap
|
shap/explainers/linear.py
|
LinearExplainer.shap_values
|
def shap_values(self, X):
""" Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array or pandas.DataFrame
A matrix of samples (# samples x # features) on which to explain the model's output.
Returns
-------
For models with a single output this returns a matrix of SHAP values
(# samples x # features). Each row sums to the difference between the model output for that
sample and the expected value of the model output (which is stored as expected_value
attribute of the explainer).
"""
# convert dataframes
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
X = X.values
#assert str(type(X)).endswith("'numpy.ndarray'>"), "Unknown instance type: " + str(type(X))
assert len(X.shape) == 1 or len(X.shape) == 2, "Instance must have 1 or 2 dimensions!"
if self.feature_dependence == "correlation":
phi = np.matmul(np.matmul(X[:,self.valid_inds], self.avg_proj.T), self.x_transform.T) - self.mean_transformed
phi = np.matmul(phi, self.avg_proj)
full_phi = np.zeros(((phi.shape[0], self.M)))
full_phi[:,self.valid_inds] = phi
return full_phi
elif self.feature_dependence == "independent":
if len(self.coef.shape) == 1:
return np.array(X - self.mean) * self.coef
else:
return [np.array(X - self.mean) * self.coef[i] for i in range(self.coef.shape[0])]
|
python
|
def shap_values(self, X):
""" Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array or pandas.DataFrame
A matrix of samples (# samples x # features) on which to explain the model's output.
Returns
-------
For models with a single output this returns a matrix of SHAP values
(# samples x # features). Each row sums to the difference between the model output for that
sample and the expected value of the model output (which is stored as expected_value
attribute of the explainer).
"""
# convert dataframes
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
X = X.values
#assert str(type(X)).endswith("'numpy.ndarray'>"), "Unknown instance type: " + str(type(X))
assert len(X.shape) == 1 or len(X.shape) == 2, "Instance must have 1 or 2 dimensions!"
if self.feature_dependence == "correlation":
phi = np.matmul(np.matmul(X[:,self.valid_inds], self.avg_proj.T), self.x_transform.T) - self.mean_transformed
phi = np.matmul(phi, self.avg_proj)
full_phi = np.zeros(((phi.shape[0], self.M)))
full_phi[:,self.valid_inds] = phi
return full_phi
elif self.feature_dependence == "independent":
if len(self.coef.shape) == 1:
return np.array(X - self.mean) * self.coef
else:
return [np.array(X - self.mean) * self.coef[i] for i in range(self.coef.shape[0])]
|
[
"def",
"shap_values",
"(",
"self",
",",
"X",
")",
":",
"# convert dataframes",
"if",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"pandas.core.series.Series'>\"",
")",
":",
"X",
"=",
"X",
".",
"values",
"elif",
"str",
"(",
"type",
"(",
"X",
")",
")",
".",
"endswith",
"(",
"\"'pandas.core.frame.DataFrame'>\"",
")",
":",
"X",
"=",
"X",
".",
"values",
"#assert str(type(X)).endswith(\"'numpy.ndarray'>\"), \"Unknown instance type: \" + str(type(X))",
"assert",
"len",
"(",
"X",
".",
"shape",
")",
"==",
"1",
"or",
"len",
"(",
"X",
".",
"shape",
")",
"==",
"2",
",",
"\"Instance must have 1 or 2 dimensions!\"",
"if",
"self",
".",
"feature_dependence",
"==",
"\"correlation\"",
":",
"phi",
"=",
"np",
".",
"matmul",
"(",
"np",
".",
"matmul",
"(",
"X",
"[",
":",
",",
"self",
".",
"valid_inds",
"]",
",",
"self",
".",
"avg_proj",
".",
"T",
")",
",",
"self",
".",
"x_transform",
".",
"T",
")",
"-",
"self",
".",
"mean_transformed",
"phi",
"=",
"np",
".",
"matmul",
"(",
"phi",
",",
"self",
".",
"avg_proj",
")",
"full_phi",
"=",
"np",
".",
"zeros",
"(",
"(",
"(",
"phi",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"M",
")",
")",
")",
"full_phi",
"[",
":",
",",
"self",
".",
"valid_inds",
"]",
"=",
"phi",
"return",
"full_phi",
"elif",
"self",
".",
"feature_dependence",
"==",
"\"independent\"",
":",
"if",
"len",
"(",
"self",
".",
"coef",
".",
"shape",
")",
"==",
"1",
":",
"return",
"np",
".",
"array",
"(",
"X",
"-",
"self",
".",
"mean",
")",
"*",
"self",
".",
"coef",
"else",
":",
"return",
"[",
"np",
".",
"array",
"(",
"X",
"-",
"self",
".",
"mean",
")",
"*",
"self",
".",
"coef",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"coef",
".",
"shape",
"[",
"0",
"]",
")",
"]"
] |
Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array or pandas.DataFrame
A matrix of samples (# samples x # features) on which to explain the model's output.
Returns
-------
For models with a single output this returns a matrix of SHAP values
(# samples x # features). Each row sums to the difference between the model output for that
sample and the expected value of the model output (which is stored as expected_value
attribute of the explainer).
|
[
"Estimate",
"the",
"SHAP",
"values",
"for",
"a",
"set",
"of",
"samples",
"."
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/linear.py#L177-L215
|
train
|
slundberg/shap
|
shap/benchmark/models.py
|
independentlinear60__ffnn
|
def independentlinear60__ffnn():
""" 4-Layer Neural Network
"""
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=60))
model.add(Dense(20, activation='relu'))
model.add(Dense(20, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['mean_squared_error'])
return KerasWrap(model, 30, flatten_output=True)
|
python
|
def independentlinear60__ffnn():
""" 4-Layer Neural Network
"""
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=60))
model.add(Dense(20, activation='relu'))
model.add(Dense(20, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['mean_squared_error'])
return KerasWrap(model, 30, flatten_output=True)
|
[
"def",
"independentlinear60__ffnn",
"(",
")",
":",
"from",
"keras",
".",
"models",
"import",
"Sequential",
"from",
"keras",
".",
"layers",
"import",
"Dense",
"model",
"=",
"Sequential",
"(",
")",
"model",
".",
"add",
"(",
"Dense",
"(",
"32",
",",
"activation",
"=",
"'relu'",
",",
"input_dim",
"=",
"60",
")",
")",
"model",
".",
"add",
"(",
"Dense",
"(",
"20",
",",
"activation",
"=",
"'relu'",
")",
")",
"model",
".",
"add",
"(",
"Dense",
"(",
"20",
",",
"activation",
"=",
"'relu'",
")",
")",
"model",
".",
"add",
"(",
"Dense",
"(",
"1",
")",
")",
"model",
".",
"compile",
"(",
"optimizer",
"=",
"'adam'",
",",
"loss",
"=",
"'mean_squared_error'",
",",
"metrics",
"=",
"[",
"'mean_squared_error'",
"]",
")",
"return",
"KerasWrap",
"(",
"model",
",",
"30",
",",
"flatten_output",
"=",
"True",
")"
] |
4-Layer Neural Network
|
[
"4",
"-",
"Layer",
"Neural",
"Network"
] |
b280cb81d498b9d98565cad8dd16fc88ae52649f
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/models.py#L114-L130
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.