after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def __init__(
self,
keys: KeysCollection,
angle: Union[Sequence[float], float],
keep_size: bool = True,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.BORDER,
align_corners: Union[Sequence[bool], bool] = False,
dtype: Union[Sequence[Optional[np.dtype]], Optional[np.dtype]] = np.float64,
) -> None:
super().__init__(keys)
self.rotator = Rotate(angle=angle, keep_size=keep_size)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self.dtype = ensure_tuple_rep(dtype, len(self.keys))
|
def __init__(
self,
keys: KeysCollection,
angle: Union[Sequence[float], float],
keep_size: bool = True,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.BORDER,
align_corners: Union[Sequence[bool], bool] = False,
) -> None:
super().__init__(keys)
self.rotator = Rotate(angle=angle, keep_size=keep_size)
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
|
https://github.com/Project-MONAI/MONAI/issues/832
|
======================================================================
FAIL: test_training (__main__.IntegrationSegmentation3D)
----------------------------------------------------------------------
Traceback (most recent call last):
File "tests/test_integration_segmentation_3d.py", line 257, in test_training
rtol=1e-3,
File "/opt/conda/lib/python3.6/site-packages/numpy/testing/_private/utils.py", line 1533, in assert_allclose
verbose=verbose, header=header, equal_nan=equal_nan)
File "/opt/conda/lib/python3.6/site-packages/numpy/testing/_private/utils.py", line 846, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=0
Mismatched elements: 6 / 6 (100%)
Max absolute difference: 0.01720129
Max relative difference: 0.04047175
x: array([0.543151, 0.471052, 0.453605, 0.438546, 0.437794, 0.407818])
y: array([0.544673, 0.475109, 0.444963, 0.427036, 0.433381, 0.42502 ])
----------------------------------------------------------------------
Ran 1 test in 92.377s
FAILED (failures=1)
|
AssertionError
|
def __call__(self, data):
d = dict(data)
for idx, key in enumerate(self.keys):
d[key] = self.rotator(
d[key],
mode=self.mode[idx],
padding_mode=self.padding_mode[idx],
align_corners=self.align_corners[idx],
dtype=self.dtype[idx],
)
return d
|
def __call__(self, data):
d = dict(data)
for idx, key in enumerate(self.keys):
d[key] = self.rotator(
d[key],
mode=self.mode[idx],
padding_mode=self.padding_mode[idx],
align_corners=self.align_corners[idx],
)
return d
|
https://github.com/Project-MONAI/MONAI/issues/832
|
======================================================================
FAIL: test_training (__main__.IntegrationSegmentation3D)
----------------------------------------------------------------------
Traceback (most recent call last):
File "tests/test_integration_segmentation_3d.py", line 257, in test_training
rtol=1e-3,
File "/opt/conda/lib/python3.6/site-packages/numpy/testing/_private/utils.py", line 1533, in assert_allclose
verbose=verbose, header=header, equal_nan=equal_nan)
File "/opt/conda/lib/python3.6/site-packages/numpy/testing/_private/utils.py", line 846, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=0
Mismatched elements: 6 / 6 (100%)
Max absolute difference: 0.01720129
Max relative difference: 0.04047175
x: array([0.543151, 0.471052, 0.453605, 0.438546, 0.437794, 0.407818])
y: array([0.544673, 0.475109, 0.444963, 0.427036, 0.433381, 0.42502 ])
----------------------------------------------------------------------
Ran 1 test in 92.377s
FAILED (failures=1)
|
AssertionError
|
def __init__(
self,
keys: KeysCollection,
range_x: Union[Tuple[float, float], float] = 0.0,
range_y: Union[Tuple[float, float], float] = 0.0,
range_z: Union[Tuple[float, float], float] = 0.0,
prob: float = 0.1,
keep_size: bool = True,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.BORDER,
align_corners: Union[Sequence[bool], bool] = False,
dtype: Union[Sequence[Optional[np.dtype]], Optional[np.dtype]] = np.float64,
) -> None:
super().__init__(keys)
self.range_x = ensure_tuple(range_x)
if len(self.range_x) == 1:
self.range_x = tuple(sorted([-self.range_x[0], self.range_x[0]]))
self.range_y = ensure_tuple(range_y)
if len(self.range_y) == 1:
self.range_y = tuple(sorted([-self.range_y[0], self.range_y[0]]))
self.range_z = ensure_tuple(range_z)
if len(self.range_z) == 1:
self.range_z = tuple(sorted([-self.range_z[0], self.range_z[0]]))
self.prob = prob
self.keep_size = keep_size
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self.dtype = ensure_tuple_rep(dtype, len(self.keys))
self._do_transform = False
self.x = 0.0
self.y = 0.0
self.z = 0.0
|
def __init__(
self,
keys: KeysCollection,
range_x: Union[Tuple[float, float], float] = 0.0,
range_y: Union[Tuple[float, float], float] = 0.0,
range_z: Union[Tuple[float, float], float] = 0.0,
prob: float = 0.1,
keep_size: bool = True,
mode: GridSampleModeSequence = GridSampleMode.BILINEAR,
padding_mode: GridSamplePadModeSequence = GridSamplePadMode.BORDER,
align_corners: Union[Sequence[bool], bool] = False,
) -> None:
super().__init__(keys)
self.range_x = ensure_tuple(range_x)
if len(self.range_x) == 1:
self.range_x = tuple(sorted([-self.range_x[0], self.range_x[0]]))
self.range_y = ensure_tuple(range_y)
if len(self.range_y) == 1:
self.range_y = tuple(sorted([-self.range_y[0], self.range_y[0]]))
self.range_z = ensure_tuple(range_z)
if len(self.range_z) == 1:
self.range_z = tuple(sorted([-self.range_z[0], self.range_z[0]]))
self.prob = prob
self.keep_size = keep_size
self.mode = ensure_tuple_rep(mode, len(self.keys))
self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))
self._do_transform = False
self.x = 0.0
self.y = 0.0
self.z = 0.0
|
https://github.com/Project-MONAI/MONAI/issues/832
|
======================================================================
FAIL: test_training (__main__.IntegrationSegmentation3D)
----------------------------------------------------------------------
Traceback (most recent call last):
File "tests/test_integration_segmentation_3d.py", line 257, in test_training
rtol=1e-3,
File "/opt/conda/lib/python3.6/site-packages/numpy/testing/_private/utils.py", line 1533, in assert_allclose
verbose=verbose, header=header, equal_nan=equal_nan)
File "/opt/conda/lib/python3.6/site-packages/numpy/testing/_private/utils.py", line 846, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=0
Mismatched elements: 6 / 6 (100%)
Max absolute difference: 0.01720129
Max relative difference: 0.04047175
x: array([0.543151, 0.471052, 0.453605, 0.438546, 0.437794, 0.407818])
y: array([0.544673, 0.475109, 0.444963, 0.427036, 0.433381, 0.42502 ])
----------------------------------------------------------------------
Ran 1 test in 92.377s
FAILED (failures=1)
|
AssertionError
|
def __call__(self, data):
self.randomize()
d = dict(data)
if not self._do_transform:
return d
rotator = Rotate(
angle=self.x if d[self.keys[0]].ndim == 3 else (self.x, self.y, self.z),
keep_size=self.keep_size,
)
for idx, key in enumerate(self.keys):
d[key] = rotator(
d[key],
mode=self.mode[idx],
padding_mode=self.padding_mode[idx],
align_corners=self.align_corners[idx],
dtype=self.dtype[idx],
)
return d
|
def __call__(self, data):
self.randomize()
d = dict(data)
if not self._do_transform:
return d
rotator = Rotate(
angle=self.x if d[self.keys[0]].ndim == 3 else (self.x, self.y, self.z),
keep_size=self.keep_size,
)
for idx, key in enumerate(self.keys):
d[key] = rotator(
d[key],
mode=self.mode[idx],
padding_mode=self.padding_mode[idx],
align_corners=self.align_corners[idx],
)
return d
|
https://github.com/Project-MONAI/MONAI/issues/832
|
======================================================================
FAIL: test_training (__main__.IntegrationSegmentation3D)
----------------------------------------------------------------------
Traceback (most recent call last):
File "tests/test_integration_segmentation_3d.py", line 257, in test_training
rtol=1e-3,
File "/opt/conda/lib/python3.6/site-packages/numpy/testing/_private/utils.py", line 1533, in assert_allclose
verbose=verbose, header=header, equal_nan=equal_nan)
File "/opt/conda/lib/python3.6/site-packages/numpy/testing/_private/utils.py", line 846, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=0
Mismatched elements: 6 / 6 (100%)
Max absolute difference: 0.01720129
Max relative difference: 0.04047175
x: array([0.543151, 0.471052, 0.453605, 0.438546, 0.437794, 0.407818])
y: array([0.544673, 0.475109, 0.444963, 0.427036, 0.433381, 0.42502 ])
----------------------------------------------------------------------
Ran 1 test in 92.377s
FAILED (failures=1)
|
AssertionError
|
def paga_path(
adata: AnnData,
nodes: Sequence[Union[str, int]],
keys: Sequence[str],
use_raw: bool = True,
annotations: Sequence[str] = ("dpt_pseudotime",),
color_map: Union[str, Colormap, None] = None,
color_maps_annotations: Mapping[str, Union[str, Colormap]] = MappingProxyType(
dict(dpt_pseudotime="Greys")
),
palette_groups: Optional[Sequence[str]] = None,
n_avg: int = 1,
groups_key: Optional[str] = None,
xlim: Tuple[Optional[int], Optional[int]] = (None, None),
title: Optional[str] = None,
left_margin=None,
ytick_fontsize: Optional[int] = None,
title_fontsize: Optional[int] = None,
show_node_names: bool = True,
show_yticks: bool = True,
show_colorbar: bool = True,
legend_fontsize: Union[int, float, _FontSize, None] = None,
legend_fontweight: Union[int, _FontWeight, None] = None,
normalize_to_zero_one: bool = False,
as_heatmap: bool = True,
return_data: bool = False,
show: Optional[bool] = None,
save: Union[bool, str, None] = None,
ax: Optional[Axes] = None,
) -> Optional[Axes]:
"""\
Gene expression and annotation changes along paths in the abstracted graph.
Parameters
----------
adata
An annotated data matrix.
nodes
A path through nodes of the abstracted graph, that is, names or indices
(within `.categories`) of groups that have been used to run PAGA.
keys
Either variables in `adata.var_names` or annotations in
`adata.obs`. They are plotted using `color_map`.
use_raw
Use `adata.raw` for retrieving gene expressions if it has been set.
annotations
Plot these keys with `color_maps_annotations`. Need to be keys for
`adata.obs`.
color_map
Matplotlib colormap.
color_maps_annotations
Color maps for plotting the annotations. Keys of the dictionary must
appear in `annotations`.
palette_groups
Ususally, use the same `sc.pl.palettes...` as used for coloring the
abstracted graph.
n_avg
Number of data points to include in computation of running average.
groups_key
Key of the grouping used to run PAGA. If `None`, defaults to
`adata.uns['paga']['groups']`.
as_heatmap
Plot the timeseries as heatmap. If not plotting as heatmap,
`annotations` have no effect.
show_node_names
Plot the node names on the nodes bar.
show_colorbar
Show the colorbar.
show_yticks
Show the y ticks.
normalize_to_zero_one
Shift and scale the running average to [0, 1] per gene.
return_data
Return the timeseries data in addition to the axes if `True`.
show
Show the plot, do not return axis.
save
If `True` or a `str`, save the figure.
A string is appended to the default filename.
Infer the filetype if ending on \\{`'.pdf'`, `'.png'`, `'.svg'`\\}.
ax
A matplotlib axes object.
Returns
-------
A :class:`~matplotlib.axes.Axes` object, if `ax` is `None`, else `None`.
If `return_data`, return the timeseries data in addition to an axes.
"""
ax_was_none = ax is None
if groups_key is None:
if "groups" not in adata.uns["paga"]:
raise KeyError(
"Pass the key of the grouping with which you ran PAGA, "
"using the parameter `groups_key`."
)
groups_key = adata.uns["paga"]["groups"]
groups_names = adata.obs[groups_key].cat.categories
if "dpt_pseudotime" not in adata.obs.keys():
raise ValueError(
"`pl.paga_path` requires computation of a pseudotime `tl.dpt` "
"for ordering at single-cell resolution"
)
if palette_groups is None:
_utils.add_colors_for_categorical_sample_annotation(adata, groups_key)
palette_groups = adata.uns[f"{groups_key}_colors"]
def moving_average(a):
return _sc_utils.moving_average(a, n_avg)
ax = pl.gca() if ax is None else ax
X = []
x_tick_locs = [0]
x_tick_labels = []
groups = []
anno_dict = {anno: [] for anno in annotations}
if isinstance(nodes[0], str):
nodes_ints = []
groups_names_set = set(groups_names)
for node in nodes:
if node not in groups_names_set:
raise ValueError(
f"Each node/group needs to be in {groups_names.tolist()} "
f"(`groups_key`={groups_key!r}) not {node!r}."
)
nodes_ints.append(groups_names.get_loc(node))
nodes_strs = nodes
else:
nodes_ints = nodes
nodes_strs = [groups_names[node] for node in nodes]
adata_X = adata
if use_raw and adata.raw is not None:
adata_X = adata.raw
for ikey, key in enumerate(keys):
x = []
for igroup, group in enumerate(nodes_ints):
idcs = np.arange(adata.n_obs)[
adata.obs[groups_key].values == nodes_strs[igroup]
]
if len(idcs) == 0:
raise ValueError(
"Did not find data points that match "
f"`adata.obs[{groups_key!r}].values == {str(group)!r}`. "
f"Check whether `adata.obs[{groups_key!r}]` "
"actually contains what you expect."
)
idcs_group = np.argsort(
adata.obs["dpt_pseudotime"].values[
adata.obs[groups_key].values == nodes_strs[igroup]
]
)
idcs = idcs[idcs_group]
values = (
adata.obs[key].values if key in adata.obs_keys() else adata_X[:, key].X
)[idcs]
x += (values.A if issparse(values) else values).tolist()
if ikey == 0:
groups += [group] * len(idcs)
x_tick_locs.append(len(x))
for anno in annotations:
series = adata.obs[anno]
if is_categorical_dtype(series):
series = series.cat.codes
anno_dict[anno] += list(series.values[idcs])
if n_avg > 1:
x = moving_average(x)
if ikey == 0:
for key in annotations:
if not isinstance(anno_dict[key][0], str):
anno_dict[key] = moving_average(anno_dict[key])
if normalize_to_zero_one:
x -= np.min(x)
x /= np.max(x)
X.append(x)
if not as_heatmap:
ax.plot(x[xlim[0] : xlim[1]], label=key)
if ikey == 0:
for igroup, group in enumerate(nodes):
if len(groups_names) > 0 and group not in groups_names:
label = groups_names[group]
else:
label = group
x_tick_labels.append(label)
X = np.asarray(X).squeeze()
if as_heatmap:
img = ax.imshow(X, aspect="auto", interpolation="nearest", cmap=color_map)
if show_yticks:
ax.set_yticks(range(len(X)))
ax.set_yticklabels(keys, fontsize=ytick_fontsize)
else:
ax.set_yticks([])
ax.set_frame_on(False)
ax.set_xticks([])
ax.tick_params(axis="both", which="both", length=0)
ax.grid(False)
if show_colorbar:
pl.colorbar(img, ax=ax)
left_margin = 0.2 if left_margin is None else left_margin
pl.subplots_adjust(left=left_margin)
else:
left_margin = 0.4 if left_margin is None else left_margin
if len(keys) > 1:
pl.legend(
frameon=False,
loc="center left",
bbox_to_anchor=(-left_margin, 0.5),
fontsize=legend_fontsize,
)
xlabel = groups_key
if not as_heatmap:
ax.set_xlabel(xlabel)
pl.yticks([])
if len(keys) == 1:
pl.ylabel(keys[0] + " (a.u.)")
else:
import matplotlib.colors
# groups bar
ax_bounds = ax.get_position().bounds
groups_axis = pl.axes(
(
ax_bounds[0],
ax_bounds[1] - ax_bounds[3] / len(keys),
ax_bounds[2],
ax_bounds[3] / len(keys),
)
)
groups = np.array(groups)[None, :]
groups_axis.imshow(
groups,
aspect="auto",
interpolation="nearest",
cmap=matplotlib.colors.ListedColormap(
# the following line doesn't work because of normalization
# adata.uns['paga_groups_colors'])
palette_groups[np.min(groups).astype(int) :],
N=int(np.max(groups) + 1 - np.min(groups)),
),
)
if show_yticks:
groups_axis.set_yticklabels(["", xlabel, ""], fontsize=ytick_fontsize)
else:
groups_axis.set_yticks([])
groups_axis.set_frame_on(False)
if show_node_names:
ypos = (groups_axis.get_ylim()[1] + groups_axis.get_ylim()[0]) / 2
x_tick_locs = _sc_utils.moving_average(x_tick_locs, n=2)
for ilabel, label in enumerate(x_tick_labels):
groups_axis.text(
x_tick_locs[ilabel],
ypos,
x_tick_labels[ilabel],
fontdict=dict(
horizontalalignment="center",
verticalalignment="center",
),
)
groups_axis.set_xticks([])
groups_axis.grid(False)
groups_axis.tick_params(axis="both", which="both", length=0)
# further annotations
y_shift = ax_bounds[3] / len(keys)
for ianno, anno in enumerate(annotations):
if ianno > 0:
y_shift = ax_bounds[3] / len(keys) / 2
anno_axis = pl.axes(
(
ax_bounds[0],
ax_bounds[1] - (ianno + 2) * y_shift,
ax_bounds[2],
y_shift,
)
)
arr = np.array(anno_dict[anno])[None, :]
if anno not in color_maps_annotations:
color_map_anno = (
"Vega10" if is_categorical_dtype(adata.obs[anno]) else "Greys"
)
else:
color_map_anno = color_maps_annotations[anno]
img = anno_axis.imshow(
arr,
aspect="auto",
interpolation="nearest",
cmap=color_map_anno,
)
if show_yticks:
anno_axis.set_yticklabels(["", anno, ""], fontsize=ytick_fontsize)
anno_axis.tick_params(axis="both", which="both", length=0)
else:
anno_axis.set_yticks([])
anno_axis.set_frame_on(False)
anno_axis.set_xticks([])
anno_axis.grid(False)
if title is not None:
ax.set_title(title, fontsize=title_fontsize)
if show is None and not ax_was_none:
show = False
else:
show = settings.autoshow if show is None else show
_utils.savefig_or_show("paga_path", show=show, save=save)
if return_data:
df = pd.DataFrame(data=X.T, columns=keys)
df["groups"] = moving_average(groups) # groups is without moving average, yet
if "dpt_pseudotime" in anno_dict:
df["distance"] = anno_dict["dpt_pseudotime"].T
return ax, df if ax_was_none and not show else df
else:
return ax if ax_was_none and not show else None
|
def paga_path(
adata: AnnData,
nodes: Sequence[Union[str, int]],
keys: Sequence[str],
use_raw: bool = True,
annotations: Sequence[str] = ("dpt_pseudotime",),
color_map: Union[str, Colormap, None] = None,
color_maps_annotations: Mapping[str, Union[str, Colormap]] = MappingProxyType(
dict(dpt_pseudotime="Greys")
),
palette_groups: Optional[Sequence[str]] = None,
n_avg: int = 1,
groups_key: Optional[str] = None,
xlim: Tuple[Optional[int], Optional[int]] = (None, None),
title: Optional[str] = None,
left_margin=None,
ytick_fontsize: Optional[int] = None,
title_fontsize: Optional[int] = None,
show_node_names: bool = True,
show_yticks: bool = True,
show_colorbar: bool = True,
legend_fontsize: Union[int, float, _FontSize, None] = None,
legend_fontweight: Union[int, _FontWeight, None] = None,
normalize_to_zero_one: bool = False,
as_heatmap: bool = True,
return_data: bool = False,
show: Optional[bool] = None,
save: Union[bool, str, None] = None,
ax: Optional[Axes] = None,
) -> Optional[Axes]:
"""\
Gene expression and annotation changes along paths in the abstracted graph.
Parameters
----------
adata
An annotated data matrix.
nodes
A path through nodes of the abstracted graph, that is, names or indices
(within `.categories`) of groups that have been used to run PAGA.
keys
Either variables in `adata.var_names` or annotations in
`adata.obs`. They are plotted using `color_map`.
use_raw
Use `adata.raw` for retrieving gene expressions if it has been set.
annotations
Plot these keys with `color_maps_annotations`. Need to be keys for
`adata.obs`.
color_map
Matplotlib colormap.
color_maps_annotations
Color maps for plotting the annotations. Keys of the dictionary must
appear in `annotations`.
palette_groups
Ususally, use the same `sc.pl.palettes...` as used for coloring the
abstracted graph.
n_avg
Number of data points to include in computation of running average.
groups_key
Key of the grouping used to run PAGA. If `None`, defaults to
`adata.uns['paga']['groups']`.
as_heatmap
Plot the timeseries as heatmap. If not plotting as heatmap,
`annotations` have no effect.
show_node_names
Plot the node names on the nodes bar.
show_colorbar
Show the colorbar.
show_yticks
Show the y ticks.
normalize_to_zero_one
Shift and scale the running average to [0, 1] per gene.
return_data
Return the timeseries data in addition to the axes if `True`.
show
Show the plot, do not return axis.
save
If `True` or a `str`, save the figure.
A string is appended to the default filename.
Infer the filetype if ending on \\{`'.pdf'`, `'.png'`, `'.svg'`\\}.
ax
A matplotlib axes object.
Returns
-------
A :class:`~matplotlib.axes.Axes` object, if `ax` is `None`, else `None`.
If `return_data`, return the timeseries data in addition to an axes.
"""
ax_was_none = ax is None
if groups_key is None:
if "groups" not in adata.uns["paga"]:
raise KeyError(
"Pass the key of the grouping with which you ran PAGA, "
"using the parameter `groups_key`."
)
groups_key = adata.uns["paga"]["groups"]
groups_names = adata.obs[groups_key].cat.categories
if "dpt_pseudotime" not in adata.obs.keys():
raise ValueError(
"`pl.paga_path` requires computation of a pseudotime `tl.dpt` "
"for ordering at single-cell resolution"
)
if palette_groups is None:
_utils.add_colors_for_categorical_sample_annotation(adata, groups_key)
palette_groups = adata.uns[f"{groups_key}_colors"]
def moving_average(a):
return _sc_utils.moving_average(a, n_avg)
ax = pl.gca() if ax is None else ax
X = []
x_tick_locs = [0]
x_tick_labels = []
groups = []
anno_dict = {anno: [] for anno in annotations}
if isinstance(nodes[0], str):
nodes_ints = []
groups_names_set = set(groups_names)
for node in nodes:
if node not in groups_names_set:
raise ValueError(
f"Each node/group needs to be in {groups_names.tolist()} "
f"(`groups_key`={groups_key!r}) not {node!r}."
)
nodes_ints.append(groups_names.get_loc(node))
nodes_strs = nodes
else:
nodes_ints = nodes
nodes_strs = [groups_names[node] for node in nodes]
adata_X = adata
if use_raw and adata.raw is not None:
adata_X = adata.raw
for ikey, key in enumerate(keys):
x = []
for igroup, group in enumerate(nodes_ints):
idcs = np.arange(adata.n_obs)[
adata.obs[groups_key].values == nodes_strs[igroup]
]
if len(idcs) == 0:
raise ValueError(
"Did not find data points that match "
f"`adata.obs[{groups_key!r}].values == {str(group)!r}`. "
f"Check whether `adata.obs[{groups_key!r}]` "
"actually contains what you expect."
)
idcs_group = np.argsort(
adata.obs["dpt_pseudotime"].values[
adata.obs[groups_key].values == nodes_strs[igroup]
]
)
idcs = idcs[idcs_group]
if key in adata.obs_keys():
x += list(adata.obs[key].values[idcs])
else:
x += list(adata_X[:, key].X[idcs])
if ikey == 0:
groups += [group for i in range(len(idcs))]
x_tick_locs.append(len(x))
for anno in annotations:
series = adata.obs[anno]
if is_categorical_dtype(series):
series = series.cat.codes
anno_dict[anno] += list(series.values[idcs])
if n_avg > 1:
x = moving_average(x)
if ikey == 0:
for key in annotations:
if not isinstance(anno_dict[key][0], str):
anno_dict[key] = moving_average(anno_dict[key])
if normalize_to_zero_one:
x -= np.min(x)
x /= np.max(x)
X.append(x)
if not as_heatmap:
ax.plot(x[xlim[0] : xlim[1]], label=key)
if ikey == 0:
for igroup, group in enumerate(nodes):
if len(groups_names) > 0 and group not in groups_names:
label = groups_names[group]
else:
label = group
x_tick_labels.append(label)
X = np.array(X)
if as_heatmap:
img = ax.imshow(X, aspect="auto", interpolation="nearest", cmap=color_map)
if show_yticks:
ax.set_yticks(range(len(X)))
ax.set_yticklabels(keys, fontsize=ytick_fontsize)
else:
ax.set_yticks([])
ax.set_frame_on(False)
ax.set_xticks([])
ax.tick_params(axis="both", which="both", length=0)
ax.grid(False)
if show_colorbar:
pl.colorbar(img, ax=ax)
left_margin = 0.2 if left_margin is None else left_margin
pl.subplots_adjust(left=left_margin)
else:
left_margin = 0.4 if left_margin is None else left_margin
if len(keys) > 1:
pl.legend(
frameon=False,
loc="center left",
bbox_to_anchor=(-left_margin, 0.5),
fontsize=legend_fontsize,
)
xlabel = groups_key
if not as_heatmap:
ax.set_xlabel(xlabel)
pl.yticks([])
if len(keys) == 1:
pl.ylabel(keys[0] + " (a.u.)")
else:
import matplotlib.colors
# groups bar
ax_bounds = ax.get_position().bounds
groups_axis = pl.axes(
(
ax_bounds[0],
ax_bounds[1] - ax_bounds[3] / len(keys),
ax_bounds[2],
ax_bounds[3] / len(keys),
)
)
groups = np.array(groups)[None, :]
groups_axis.imshow(
groups,
aspect="auto",
interpolation="nearest",
cmap=matplotlib.colors.ListedColormap(
# the following line doesn't work because of normalization
# adata.uns['paga_groups_colors'])
palette_groups[np.min(groups).astype(int) :],
N=int(np.max(groups) + 1 - np.min(groups)),
),
)
if show_yticks:
groups_axis.set_yticklabels(["", xlabel, ""], fontsize=ytick_fontsize)
else:
groups_axis.set_yticks([])
groups_axis.set_frame_on(False)
if show_node_names:
ypos = (groups_axis.get_ylim()[1] + groups_axis.get_ylim()[0]) / 2
x_tick_locs = _sc_utils.moving_average(x_tick_locs, n=2)
for ilabel, label in enumerate(x_tick_labels):
groups_axis.text(
x_tick_locs[ilabel],
ypos,
x_tick_labels[ilabel],
fontdict=dict(
horizontalalignment="center",
verticalalignment="center",
),
)
groups_axis.set_xticks([])
groups_axis.grid(False)
groups_axis.tick_params(axis="both", which="both", length=0)
# further annotations
y_shift = ax_bounds[3] / len(keys)
for ianno, anno in enumerate(annotations):
if ianno > 0:
y_shift = ax_bounds[3] / len(keys) / 2
anno_axis = pl.axes(
(
ax_bounds[0],
ax_bounds[1] - (ianno + 2) * y_shift,
ax_bounds[2],
y_shift,
)
)
arr = np.array(anno_dict[anno])[None, :]
if anno not in color_maps_annotations:
color_map_anno = (
"Vega10" if is_categorical_dtype(adata.obs[anno]) else "Greys"
)
else:
color_map_anno = color_maps_annotations[anno]
img = anno_axis.imshow(
arr,
aspect="auto",
interpolation="nearest",
cmap=color_map_anno,
)
if show_yticks:
anno_axis.set_yticklabels(["", anno, ""], fontsize=ytick_fontsize)
anno_axis.tick_params(axis="both", which="both", length=0)
else:
anno_axis.set_yticks([])
anno_axis.set_frame_on(False)
anno_axis.set_xticks([])
anno_axis.grid(False)
if title is not None:
ax.set_title(title, fontsize=title_fontsize)
if show is None and not ax_was_none:
show = False
else:
show = settings.autoshow if show is None else show
_utils.savefig_or_show("paga_path", show=show, save=save)
if return_data:
df = pd.DataFrame(data=X.T, columns=keys)
df["groups"] = moving_average(groups) # groups is without moving average, yet
if "dpt_pseudotime" in anno_dict:
df["distance"] = anno_dict["dpt_pseudotime"].T
return ax, df if ax_was_none and not show else df
else:
return ax if ax_was_none and not show else None
|
https://github.com/theislab/scanpy/issues/953
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-a9471349c389> in <module>
----> 1 sc.pl.paga_path(adata, nodes=['1Ery'], keys=['Gata2', 'Btg2', 'Btg1'])
~/Documents/Python/scanpy/scanpy/plotting/_tools/paga.py in paga_path(adata, nodes, keys, use_raw, annotations, color_map, color_maps_annotations, palette_groups, n_avg, groups_key, xlim, title, left_margin, ytick_fontsize, title_fontsize, show_node_names, show_yticks, show_colorbar, legend_fontsize, legend_fontweight, normalize_to_zero_one, as_heatmap, return_data, show, save, ax)
1057 if as_heatmap:
1058 img = ax.imshow(
-> 1059 X, aspect='auto', interpolation='nearest', cmap=color_map
1060 )
1061 if show_yticks:
~/anaconda3/lib/python3.7/site-packages/matplotlib/__init__.py in inner(ax, data, *args, **kwargs)
1599 def inner(ax, *args, data=None, **kwargs):
1600 if data is None:
-> 1601 return func(ax, *map(sanitize_sequence, args), **kwargs)
1602
1603 bound = new_sig.bind(ax, *args, **kwargs)
~/anaconda3/lib/python3.7/site-packages/matplotlib/cbook/deprecation.py in wrapper(*args, **kwargs)
367 f"%(removal)s. If any parameter follows {name!r}, they "
368 f"should be pass as keyword, not positionally.")
--> 369 return func(*args, **kwargs)
370
371 return wrapper
~/anaconda3/lib/python3.7/site-packages/matplotlib/cbook/deprecation.py in wrapper(*args, **kwargs)
367 f"%(removal)s. If any parameter follows {name!r}, they "
368 f"should be pass as keyword, not positionally.")
--> 369 return func(*args, **kwargs)
370
371 return wrapper
~/anaconda3/lib/python3.7/site-packages/matplotlib/axes/_axes.py in imshow(self, X, cmap, norm, aspect, interpolation, alpha, vmin, vmax, origin, extent, shape, filternorm, filterrad, imlim, resample, url, **kwargs)
5669 resample=resample, **kwargs)
5670
-> 5671 im.set_data(X)
5672 im.set_alpha(alpha)
5673 if im.get_clip_path() is None:
~/anaconda3/lib/python3.7/site-packages/matplotlib/image.py in set_data(self, A)
688 or self._A.ndim == 3 and self._A.shape[-1] in [3, 4]):
689 raise TypeError("Invalid shape {} for image data"
--> 690 .format(self._A.shape))
691
692 if self._A.ndim == 3:
TypeError: Invalid shape (3, 43, 1) for image data
|
TypeError
|
def print_versions(*, file=None):
"""Print print versions of imported packages"""
if file is None: # Inform people about the behavior change
warning("If you miss a compact list, please try `print_header`!")
stdout = sys.stdout
try:
buf = sys.stdout = io.StringIO()
sinfo(
dependencies=True,
excludes=[
"builtins",
"stdlib_list",
"importlib_metadata",
# Special module present if test coverage being calculated
# https://gitlab.com/joelostblom/sinfo/-/issues/10
"$coverage",
],
)
finally:
sys.stdout = stdout
output = buf.getvalue()
print(output, file=file)
|
def print_versions(*, file=None):
"""Print print versions of imported packages"""
if file is None: # Inform people about the behavior change
warning("If you miss a compact list, please try `print_header`!")
stdout = sys.stdout
try:
buf = sys.stdout = io.StringIO()
sinfo(dependencies=True)
finally:
sys.stdout = stdout
output = buf.getvalue()
print(output, file=file)
|
https://github.com/theislab/scanpy/issues/1437
|
WARNING: If you miss a compact list, please try `print_header`!
Traceback (most recent call last):
File "/home/sturm/anaconda3/envs/scanpy_test/lib/python3.7/site-packages/sinfo/main.py", line 195, in sinfo
mod_version = _find_version(mod.__version__)
AttributeError: module 'importlib_metadata' has no attribute '__version__'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/sturm/anaconda3/envs/scanpy_test/lib/python3.7/site-packages/scanpy/logging.py", line 161, in print_versions
sinfo(dependencies=True)
File "/home/sturm/anaconda3/envs/scanpy_test/lib/python3.7/site-packages/sinfo/main.py", line 198, in sinfo
mod_version = _find_version(mod.version)
File "/home/sturm/anaconda3/envs/scanpy_test/lib/python3.7/site-packages/sinfo/main.py", line 42, in _find_version
return mod_version_attr()
TypeError: version() missing 1 required positional argument: 'distribution_name'
|
AttributeError
|
def obs_df(
adata: AnnData,
keys: Iterable[str] = (),
obsm_keys: Iterable[Tuple[str, int]] = (),
*,
layer: str = None,
gene_symbols: str = None,
use_raw: bool = False,
) -> pd.DataFrame:
"""\
Return values for observations in adata.
Params
------
adata
AnnData object to get values from.
keys
Keys from either `.var_names`, `.var[gene_symbols]`, or `.obs.columns`.
obsm_keys
Tuple of `(key from obsm, column index of obsm[key])`.
layer
Layer of `adata` to use as expression values.
gene_symbols
Column of `adata.var` to search for `keys` in.
use_raw
Whether to get expression values from `adata.raw`.
Returns
-------
A dataframe with `adata.obs_names` as index, and values specified by `keys`
and `obsm_keys`.
Examples
--------
Getting value for plotting:
>>> pbmc = sc.datasets.pbmc68k_reduced()
>>> plotdf = sc.get.obs_df(
pbmc,
keys=["CD8B", "n_genes"],
obsm_keys=[("X_umap", 0), ("X_umap", 1)]
)
>>> plotdf.plot.scatter("X_umap0", "X_umap1", c="CD8B")
Calculating mean expression for marker genes by cluster:
>>> pbmc = sc.datasets.pbmc68k_reduced()
>>> marker_genes = ['CD79A', 'MS4A1', 'CD8A', 'CD8B', 'LYZ']
>>> genedf = sc.get.obs_df(
pbmc,
keys=["louvain", *marker_genes]
)
>>> grouped = genedf.groupby("louvain")
>>> mean, var = grouped.mean(), grouped.var()
"""
if use_raw:
assert layer is None, (
"Cannot specify use_raw=True and a layer at the same time."
)
var = adata.raw.var
else:
var = adata.var
if gene_symbols is not None:
alias_index = pd.Index(var[gene_symbols])
else:
alias_index = None
obs_cols, var_idx_keys, var_symbols = _check_indices(
adata.obs,
var.index,
"obs",
keys,
alias_index=alias_index,
use_raw=use_raw,
)
# Make df
df = pd.DataFrame(index=adata.obs_names)
# add var values
if len(var_idx_keys) > 0:
matrix = _get_array_values(
_get_obs_rep(adata, layer=layer, use_raw=use_raw),
var.index,
var_idx_keys,
axis=1,
backed=adata.isbacked,
)
df = pd.concat(
[df, pd.DataFrame(matrix, columns=var_symbols, index=adata.obs_names)],
axis=1,
)
# add obs values
if len(obs_cols) > 0:
df = pd.concat([df, adata.obs[obs_cols]], axis=1)
# reorder columns to given order (including duplicates keys if present)
if keys:
df = df[keys]
for k, idx in obsm_keys:
added_k = f"{k}-{idx}"
val = adata.obsm[k]
if isinstance(val, np.ndarray):
df[added_k] = np.ravel(val[:, idx])
elif isinstance(val, spmatrix):
df[added_k] = np.ravel(val[:, idx].toarray())
elif isinstance(val, pd.DataFrame):
df[added_k] = val.loc[:, idx]
return df
|
def obs_df(
adata: AnnData,
keys: Iterable[str] = (),
obsm_keys: Iterable[Tuple[str, int]] = (),
*,
layer: str = None,
gene_symbols: str = None,
use_raw: bool = False,
) -> pd.DataFrame:
"""\
Return values for observations in adata.
Params
------
adata
AnnData object to get values from.
keys
Keys from either `.var_names`, `.var[gene_symbols]`, or `.obs.columns`.
obsm_keys
Tuple of `(key from obsm, column index of obsm[key])`.
layer
Layer of `adata` to use as expression values.
gene_symbols
Column of `adata.var` to search for `keys` in.
use_raw
Whether to get expression values from `adata.raw`.
Returns
-------
A dataframe with `adata.obs_names` as index, and values specified by `keys`
and `obsm_keys`.
Examples
--------
Getting value for plotting:
>>> pbmc = sc.datasets.pbmc68k_reduced()
>>> plotdf = sc.get.obs_df(
pbmc,
keys=["CD8B", "n_genes"],
obsm_keys=[("X_umap", 0), ("X_umap", 1)]
)
>>> plotdf.plot.scatter("X_umap0", "X_umap1", c="CD8B")
Calculating mean expression for marker genes by cluster:
>>> pbmc = sc.datasets.pbmc68k_reduced()
>>> marker_genes = ['CD79A', 'MS4A1', 'CD8A', 'CD8B', 'LYZ']
>>> genedf = sc.get.obs_df(
pbmc,
keys=["louvain", *marker_genes]
)
>>> grouped = genedf.groupby("louvain")
>>> mean, var = grouped.mean(), grouped.var()
"""
if use_raw:
assert layer is None, (
"Cannot specify use_raw=True and a layer at the same time."
)
var = adata.raw.var
else:
var = adata.var
if gene_symbols is not None:
alias_index = pd.Index(var[gene_symbols])
else:
alias_index = None
obs_cols, var_idx_keys, var_symbols = _check_indices(
adata.obs,
var.index,
"obs",
keys,
alias_index=alias_index,
use_raw=use_raw,
)
# Make df
df = pd.DataFrame(index=adata.obs_names)
# add var values
if len(var_idx_keys) > 0:
matrix = _get_array_values(
_get_obs_rep(adata, layer=layer, use_raw=use_raw),
var.index,
var_idx_keys,
axis=1,
backed=adata.isbacked,
)
df = pd.concat(
[df, pd.DataFrame(matrix, columns=var_symbols, index=adata.obs_names)],
axis=1,
)
# add obs values
if len(obs_cols) > 0:
df = pd.concat([df, adata.obs[obs_cols]], axis=1)
# reorder columns to given order (including duplicates keys if present)
df = df[keys]
for k, idx in obsm_keys:
added_k = f"{k}-{idx}"
val = adata.obsm[k]
if isinstance(val, np.ndarray):
df[added_k] = np.ravel(val[:, idx])
elif isinstance(val, spmatrix):
df[added_k] = np.ravel(val[:, idx].toarray())
elif isinstance(val, pd.DataFrame):
df[added_k] = val.loc[:, idx]
return df
|
https://github.com/theislab/scanpy/issues/1634
|
~/scanpy/scanpy/get/get.py in obs_df(adata, keys, obsm_keys, layer, gene_symbols, use_raw)
301
302 # reorder columns to given order (including duplicates keys if present)
--> 303 df = df[keys]
304 for k, idx in obsm_keys:
305 added_k = f"{k}-{idx}"
KeyError: ()
|
KeyError
|
def var_df(
adata: AnnData,
keys: Iterable[str] = (),
varm_keys: Iterable[Tuple[str, int]] = (),
*,
layer: str = None,
) -> pd.DataFrame:
"""\
Return values for observations in adata.
Params
------
adata
AnnData object to get values from.
keys
Keys from either `.obs_names`, or `.var.columns`.
varm_keys
Tuple of `(key from varm, column index of varm[key])`.
layer
Layer of `adata` to use as expression values.
Returns
-------
A dataframe with `adata.var_names` as index, and values specified by `keys`
and `varm_keys`.
"""
# Argument handling
var_cols, obs_idx_keys, _ = _check_indices(adata.var, adata.obs_names, "var", keys)
# initialize df
df = pd.DataFrame(index=adata.var.index)
if len(obs_idx_keys) > 0:
matrix = _get_array_values(
_get_obs_rep(adata, layer=layer),
adata.obs_names,
obs_idx_keys,
axis=0,
backed=adata.isbacked,
).T
df = pd.concat(
[df, pd.DataFrame(matrix, columns=obs_idx_keys, index=adata.var_names)],
axis=1,
)
# add obs values
if len(var_cols) > 0:
df = pd.concat([df, adata.var[var_cols]], axis=1)
# reorder columns to given order
if keys:
df = df[keys]
for k, idx in varm_keys:
added_k = f"{k}-{idx}"
val = adata.varm[k]
if isinstance(val, np.ndarray):
df[added_k] = np.ravel(val[:, idx])
elif isinstance(val, spmatrix):
df[added_k] = np.ravel(val[:, idx].toarray())
elif isinstance(val, pd.DataFrame):
df[added_k] = val.loc[:, idx]
return df
|
def var_df(
adata: AnnData,
keys: Iterable[str] = (),
varm_keys: Iterable[Tuple[str, int]] = (),
*,
layer: str = None,
) -> pd.DataFrame:
"""\
Return values for observations in adata.
Params
------
adata
AnnData object to get values from.
keys
Keys from either `.obs_names`, or `.var.columns`.
varm_keys
Tuple of `(key from varm, column index of varm[key])`.
layer
Layer of `adata` to use as expression values.
Returns
-------
A dataframe with `adata.var_names` as index, and values specified by `keys`
and `varm_keys`.
"""
# Argument handling
var_cols, obs_idx_keys, _ = _check_indices(adata.var, adata.obs_names, "var", keys)
# initialize df
df = pd.DataFrame(index=adata.var.index)
if len(obs_idx_keys) > 0:
matrix = _get_array_values(
_get_obs_rep(adata, layer=layer),
adata.obs_names,
obs_idx_keys,
axis=0,
backed=adata.isbacked,
).T
df = pd.concat(
[df, pd.DataFrame(matrix, columns=obs_idx_keys, index=adata.var_names)],
axis=1,
)
# add obs values
if len(var_cols) > 0:
df = pd.concat([df, adata.var[var_cols]], axis=1)
# reorder columns to given order
df = df[keys]
for k, idx in varm_keys:
added_k = f"{k}-{idx}"
val = adata.varm[k]
if isinstance(val, np.ndarray):
df[added_k] = np.ravel(val[:, idx])
elif isinstance(val, spmatrix):
df[added_k] = np.ravel(val[:, idx].toarray())
elif isinstance(val, pd.DataFrame):
df[added_k] = val.loc[:, idx]
return df
|
https://github.com/theislab/scanpy/issues/1634
|
~/scanpy/scanpy/get/get.py in obs_df(adata, keys, obsm_keys, layer, gene_symbols, use_raw)
301
302 # reorder columns to given order (including duplicates keys if present)
--> 303 df = df[keys]
304 for k, idx in obsm_keys:
305 added_k = f"{k}-{idx}"
KeyError: ()
|
KeyError
|
def heatmap(
adata: AnnData,
var_names: Union[_VarNames, Mapping[str, _VarNames]],
groupby: Union[str, Sequence[str]],
use_raw: Optional[bool] = None,
log: bool = False,
num_categories: int = 7,
dendrogram: Union[bool, str] = False,
gene_symbols: Optional[str] = None,
var_group_positions: Optional[Sequence[Tuple[int, int]]] = None,
var_group_labels: Optional[Sequence[str]] = None,
var_group_rotation: Optional[float] = None,
layer: Optional[str] = None,
standard_scale: Optional[Literal["var", "obs"]] = None,
swap_axes: bool = False,
show_gene_labels: Optional[bool] = None,
show: Optional[bool] = None,
save: Union[str, bool, None] = None,
figsize: Optional[Tuple[float, float]] = None,
**kwds,
):
"""\
Heatmap of the expression values of genes.
If `groupby` is given, the heatmap is ordered by the respective group. For
example, a list of marker genes can be plotted, ordered by clustering. If
the `groupby` observation annotation is not categorical the observation
annotation is turned into a categorical by binning the data into the number
specified in `num_categories`.
Parameters
----------
{common_plot_args}
standard_scale
Whether or not to standardize that dimension between 0 and 1, meaning for each variable or observation,
subtract the minimum and divide each by its maximum.
swap_axes
By default, the x axis contains `var_names` (e.g. genes) and the y axis the `groupby`
categories (if any). By setting `swap_axes` then x are the `groupby` categories and y the `var_names`.
show_gene_labels
By default gene labels are shown when there are 50 or less genes. Otherwise the labels are removed.
{show_save_ax}
**kwds
Are passed to :func:`matplotlib.pyplot.imshow`.
Returns
-------
List of :class:`~matplotlib.axes.Axes`
Examples
-------
>>> import scanpy as sc
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
>>> sc.pl.heatmap(adata, markers, groupby='bulk_labels', dendrogram=True, swap_axes=True)
Using var_names as dict:
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.heatmap(adata, markers, groupby='bulk_labels', dendrogram=True)
See also
--------
rank_genes_groups_heatmap: to plot marker genes identified using the :func:`~scanpy.tl.rank_genes_groups` function.
"""
var_names, var_group_labels, var_group_positions = _check_var_names_type(
var_names, var_group_labels, var_group_positions
)
categories, obs_tidy = _prepare_dataframe(
adata,
var_names,
groupby,
use_raw,
log,
num_categories,
gene_symbols=gene_symbols,
layer=layer,
)
# check if var_group_labels are a subset of categories:
if var_group_labels is not None:
if set(var_group_labels).issubset(categories):
var_groups_subset_of_groupby = True
else:
var_groups_subset_of_groupby = False
if standard_scale == "obs":
obs_tidy = obs_tidy.sub(obs_tidy.min(1), axis=0)
obs_tidy = obs_tidy.div(obs_tidy.max(1), axis=0).fillna(0)
elif standard_scale == "var":
obs_tidy -= obs_tidy.min(0)
obs_tidy = (obs_tidy / obs_tidy.max(0)).fillna(0)
elif standard_scale is None:
pass
else:
logg.warning("Unknown type for standard_scale, ignored")
if groupby is None or len(categories) <= 1:
categorical = False
# dendrogram can only be computed between groupby categories
dendrogram = False
else:
categorical = True
# get categories colors
if isinstance(groupby, str) and is_categorical_dtype(adata.obs[groupby]):
# saved category colors only work when groupby is valid adata.obs
# categorical column. When groupby is a numerical column
# or when groupby is a list of columns the colors are assigned on the fly,
# which may create inconsistencies in multiple runs that require sorting
# of the categories (eg. when dendrogram is plotted).
if groupby + "_colors" not in adata.uns:
# if colors are not found, assign a new palette
# and save it using the same code for embeddings
from ._tools.scatterplots import _get_palette
_get_palette(adata, groupby)
groupby_colors = adata.uns[groupby + "_colors"]
else:
# this case happen when adata.obs[groupby] is numeric
# the values are converted into a category on the fly
groupby_colors = None
if dendrogram:
dendro_data = _reorder_categories_after_dendrogram(
adata,
groupby,
dendrogram,
var_names=var_names,
var_group_labels=var_group_labels,
var_group_positions=var_group_positions,
categories=categories,
)
var_group_labels = dendro_data["var_group_labels"]
var_group_positions = dendro_data["var_group_positions"]
# reorder obs_tidy
if dendro_data["var_names_idx_ordered"] is not None:
obs_tidy = obs_tidy.iloc[:, dendro_data["var_names_idx_ordered"]]
var_names = [var_names[x] for x in dendro_data["var_names_idx_ordered"]]
obs_tidy.index = obs_tidy.index.reorder_categories(
[categories[x] for x in dendro_data["categories_idx_ordered"]],
ordered=True,
)
# reorder groupby colors
if groupby_colors is not None:
groupby_colors = [
groupby_colors[x] for x in dendro_data["categories_idx_ordered"]
]
if show_gene_labels is None:
if len(var_names) <= 50:
show_gene_labels = True
else:
show_gene_labels = False
logg.warning(
"Gene labels are not shown when more than 50 genes are visualized. "
"To show gene labels set `show_gene_labels=True`"
)
if categorical:
obs_tidy = obs_tidy.sort_index()
colorbar_width = 0.2
if not swap_axes:
# define a layout of 2 rows x 4 columns
# first row is for 'brackets' (if no brackets needed, the height of this row
# is zero) second row is for main content. This second row is divided into
# three axes:
# first ax is for the categories defined by `groupby`
# second ax is for the heatmap
# third ax is for the dendrogram
# fourth ax is for colorbar
dendro_width = 1 if dendrogram else 0
groupby_width = 0.2 if categorical else 0
if figsize is None:
height = 6
if show_gene_labels:
heatmap_width = len(var_names) * 0.3
else:
heatmap_width = 8
width = heatmap_width + dendro_width + groupby_width
else:
width, height = figsize
heatmap_width = width - (dendro_width + groupby_width)
if var_group_positions is not None and len(var_group_positions) > 0:
# add some space in case 'brackets' want to be plotted on top of the image
height_ratios = [0.15, height]
else:
height_ratios = [0, height]
width_ratios = [
groupby_width,
heatmap_width,
dendro_width,
colorbar_width,
]
fig = pl.figure(figsize=(width, height))
axs = gridspec.GridSpec(
nrows=2,
ncols=4,
width_ratios=width_ratios,
wspace=0.15 / width,
hspace=0.13 / height,
height_ratios=height_ratios,
)
heatmap_ax = fig.add_subplot(axs[1, 1])
kwds.setdefault("interpolation", "nearest")
im = heatmap_ax.imshow(obs_tidy.values, aspect="auto", **kwds)
heatmap_ax.set_ylim(obs_tidy.shape[0] - 0.5, -0.5)
heatmap_ax.set_xlim(-0.5, obs_tidy.shape[1] - 0.5)
heatmap_ax.tick_params(axis="y", left=False, labelleft=False)
heatmap_ax.set_ylabel("")
heatmap_ax.grid(False)
if show_gene_labels:
heatmap_ax.tick_params(axis="x", labelsize="small")
heatmap_ax.set_xticks(np.arange(len(var_names)))
heatmap_ax.set_xticklabels(var_names, rotation=90)
else:
heatmap_ax.tick_params(axis="x", labelbottom=False, bottom=False)
# plot colorbar
_plot_colorbar(im, fig, axs[1, 3])
if categorical:
groupby_ax = fig.add_subplot(axs[1, 0])
(
label2code,
ticks,
labels,
groupby_cmap,
norm,
) = _plot_categories_as_colorblocks(
groupby_ax, obs_tidy, colors=groupby_colors, orientation="left"
)
# add lines to main heatmap
line_positions = (
np.cumsum(obs_tidy.index.value_counts(sort=False))[:-1] - 0.5
)
heatmap_ax.hlines(
line_positions,
-0.5,
len(var_names) - 0.5,
lw=1,
color="black",
zorder=10,
clip_on=False,
)
if dendrogram:
dendro_ax = fig.add_subplot(axs[1, 2], sharey=heatmap_ax)
_plot_dendrogram(
dendro_ax, adata, groupby, ticks=ticks, dendrogram_key=dendrogram
)
# plot group legends on top of heatmap_ax (if given)
if var_group_positions is not None and len(var_group_positions) > 0:
gene_groups_ax = fig.add_subplot(axs[0, 1], sharex=heatmap_ax)
_plot_gene_groups_brackets(
gene_groups_ax,
group_positions=var_group_positions,
group_labels=var_group_labels,
rotation=var_group_rotation,
left_adjustment=-0.3,
right_adjustment=0.3,
)
# swap axes case
else:
# define a layout of 3 rows x 3 columns
# The first row is for the dendrogram (if not dendrogram height is zero)
# second row is for main content. This col is divided into three axes:
# first ax is for the heatmap
# second ax is for 'brackets' if any (othwerise width is zero)
# third ax is for colorbar
dendro_height = 0.8 if dendrogram else 0
groupby_height = 0.13 if categorical else 0
if figsize is None:
if show_gene_labels:
heatmap_height = len(var_names) * 0.18
else:
heatmap_height = 4
width = 10
height = heatmap_height + dendro_height + groupby_height
else:
width, height = figsize
heatmap_height = height - (dendro_height + groupby_height)
height_ratios = [dendro_height, heatmap_height, groupby_height]
if var_group_positions is not None and len(var_group_positions) > 0:
# add some space in case 'brackets' want to be plotted on top of the image
width_ratios = [width, 0.14, colorbar_width]
else:
width_ratios = [width, 0, colorbar_width]
fig = pl.figure(figsize=(width, height))
axs = gridspec.GridSpec(
nrows=3,
ncols=3,
wspace=0.25 / width,
hspace=0.3 / height,
width_ratios=width_ratios,
height_ratios=height_ratios,
)
# plot heatmap
heatmap_ax = fig.add_subplot(axs[1, 0])
kwds.setdefault("interpolation", "nearest")
im = heatmap_ax.imshow(obs_tidy.T.values, aspect="auto", **kwds)
heatmap_ax.set_xlim(0 - 0.5, obs_tidy.shape[0] - 0.5)
heatmap_ax.set_ylim(obs_tidy.shape[1] - 0.5, -0.5)
heatmap_ax.tick_params(axis="x", bottom=False, labelbottom=False)
heatmap_ax.set_xlabel("")
heatmap_ax.grid(False)
if show_gene_labels:
heatmap_ax.tick_params(axis="y", labelsize="small", length=1)
heatmap_ax.set_yticks(np.arange(len(var_names)))
heatmap_ax.set_yticklabels(var_names, rotation=0)
else:
heatmap_ax.tick_params(axis="y", labelleft=False, left=False)
if categorical:
groupby_ax = fig.add_subplot(axs[2, 0])
(
label2code,
ticks,
labels,
groupby_cmap,
norm,
) = _plot_categories_as_colorblocks(
groupby_ax, obs_tidy, colors=groupby_colors, orientation="bottom"
)
# add lines to main heatmap
line_positions = (
np.cumsum(obs_tidy.index.value_counts(sort=False))[:-1] - 0.5
)
heatmap_ax.vlines(
line_positions,
-0.5,
len(var_names) - 0.5,
lw=1,
color="black",
zorder=10,
clip_on=False,
)
if dendrogram:
dendro_ax = fig.add_subplot(axs[0, 0], sharex=heatmap_ax)
_plot_dendrogram(
dendro_ax,
adata,
groupby,
dendrogram_key=dendrogram,
ticks=ticks,
orientation="top",
)
# plot group legends next to the heatmap_ax (if given)
if var_group_positions is not None and len(var_group_positions) > 0:
gene_groups_ax = fig.add_subplot(axs[1, 1])
arr = []
for idx, (label, pos) in enumerate(
zip(var_group_labels, var_group_positions)
):
if var_groups_subset_of_groupby:
label_code = label2code[label]
else:
label_code = idx
arr += [label_code] * (pos[1] + 1 - pos[0])
gene_groups_ax.imshow(
np.array([arr]).T, aspect="auto", cmap=groupby_cmap, norm=norm
)
gene_groups_ax.axis("off")
# plot colorbar
_plot_colorbar(im, fig, axs[1, 2])
return_ax_dict = {"heatmap_ax": heatmap_ax}
if categorical:
return_ax_dict["groupby_ax"] = groupby_ax
if dendrogram:
return_ax_dict["dendrogram_ax"] = dendro_ax
if var_group_positions is not None and len(var_group_positions) > 0:
return_ax_dict["gene_groups_ax"] = gene_groups_ax
_utils.savefig_or_show("heatmap", show=show, save=save)
show = settings.autoshow if show is None else show
if not show:
return return_ax_dict
|
def heatmap(
adata: AnnData,
var_names: Union[_VarNames, Mapping[str, _VarNames]],
groupby: Union[str, Sequence[str]],
use_raw: Optional[bool] = None,
log: bool = False,
num_categories: int = 7,
dendrogram: Union[bool, str] = False,
gene_symbols: Optional[str] = None,
var_group_positions: Optional[Sequence[Tuple[int, int]]] = None,
var_group_labels: Optional[Sequence[str]] = None,
var_group_rotation: Optional[float] = None,
layer: Optional[str] = None,
standard_scale: Optional[Literal["var", "obs"]] = None,
swap_axes: bool = False,
show_gene_labels: Optional[bool] = None,
show: Optional[bool] = None,
save: Union[str, bool, None] = None,
figsize: Optional[Tuple[float, float]] = None,
**kwds,
):
"""\
Heatmap of the expression values of genes.
If `groupby` is given, the heatmap is ordered by the respective group. For
example, a list of marker genes can be plotted, ordered by clustering. If
the `groupby` observation annotation is not categorical the observation
annotation is turned into a categorical by binning the data into the number
specified in `num_categories`.
Parameters
----------
{common_plot_args}
standard_scale
Whether or not to standardize that dimension between 0 and 1, meaning for each variable or observation,
subtract the minimum and divide each by its maximum.
swap_axes
By default, the x axis contains `var_names` (e.g. genes) and the y axis the `groupby`
categories (if any). By setting `swap_axes` then x are the `groupby` categories and y the `var_names`.
show_gene_labels
By default gene labels are shown when there are 50 or less genes. Otherwise the labels are removed.
{show_save_ax}
**kwds
Are passed to :func:`matplotlib.pyplot.imshow`.
Returns
-------
List of :class:`~matplotlib.axes.Axes`
Examples
-------
>>> import scanpy as sc
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
>>> sc.pl.heatmap(adata, markers, groupby='bulk_labels', dendrogram=True, swap_axes=True)
Using var_names as dict:
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.heatmap(adata, markers, groupby='bulk_labels', dendrogram=True)
See also
--------
rank_genes_groups_heatmap: to plot marker genes identified using the :func:`~scanpy.tl.rank_genes_groups` function.
"""
var_names, var_group_labels, var_group_positions = _check_var_names_type(
var_names, var_group_labels, var_group_positions
)
categories, obs_tidy = _prepare_dataframe(
adata,
var_names,
groupby,
use_raw,
log,
num_categories,
gene_symbols=gene_symbols,
layer=layer,
)
# check if var_group_labels are a subset of categories:
if var_group_labels is not None:
if set(var_group_labels).issubset(categories):
var_groups_subset_of_groupby = True
else:
var_groups_subset_of_groupby = False
if standard_scale == "obs":
obs_tidy = obs_tidy.sub(obs_tidy.min(1), axis=0)
obs_tidy = obs_tidy.div(obs_tidy.max(1), axis=0).fillna(0)
elif standard_scale == "var":
obs_tidy -= obs_tidy.min(0)
obs_tidy = (obs_tidy / obs_tidy.max(0)).fillna(0)
elif standard_scale is None:
pass
else:
logg.warning("Unknown type for standard_scale, ignored")
if groupby is None or len(categories) <= 1:
categorical = False
# dendrogram can only be computed between groupby categories
dendrogram = False
else:
categorical = True
# get categories colors:
if groupby + "_colors" in adata.uns:
groupby_colors = adata.uns[groupby + "_colors"]
else:
groupby_colors = None
if dendrogram:
dendro_data = _reorder_categories_after_dendrogram(
adata,
groupby,
dendrogram,
var_names=var_names,
var_group_labels=var_group_labels,
var_group_positions=var_group_positions,
categories=categories,
)
var_group_labels = dendro_data["var_group_labels"]
var_group_positions = dendro_data["var_group_positions"]
# reorder obs_tidy
if dendro_data["var_names_idx_ordered"] is not None:
obs_tidy = obs_tidy.iloc[:, dendro_data["var_names_idx_ordered"]]
var_names = [var_names[x] for x in dendro_data["var_names_idx_ordered"]]
obs_tidy.index = obs_tidy.index.reorder_categories(
[categories[x] for x in dendro_data["categories_idx_ordered"]],
ordered=True,
)
# reorder groupby colors
if groupby_colors is not None:
groupby_colors = [
groupby_colors[x] for x in dendro_data["categories_idx_ordered"]
]
if show_gene_labels is None:
if len(var_names) <= 50:
show_gene_labels = True
else:
show_gene_labels = False
logg.warning(
"Gene labels are not shown when more than 50 genes are visualized. "
"To show gene labels set `show_gene_labels=True`"
)
if categorical:
obs_tidy = obs_tidy.sort_index()
colorbar_width = 0.2
if not swap_axes:
# define a layout of 2 rows x 4 columns
# first row is for 'brackets' (if no brackets needed, the height of this row is zero)
# second row is for main content. This second row is divided into three axes:
# first ax is for the categories defined by `groupby`
# second ax is for the heatmap
# third ax is for the dendrogram
# fourth ax is for colorbar
dendro_width = 1 if dendrogram else 0
groupby_width = 0.2 if categorical else 0
if figsize is None:
height = 6
if show_gene_labels:
heatmap_width = len(var_names) * 0.3
else:
heatmap_width = 8
width = heatmap_width + dendro_width + groupby_width
else:
width, height = figsize
heatmap_width = width - (dendro_width + groupby_width)
if var_group_positions is not None and len(var_group_positions) > 0:
# add some space in case 'brackets' want to be plotted on top of the image
height_ratios = [0.15, height]
else:
height_ratios = [0, height]
width_ratios = [
groupby_width,
heatmap_width,
dendro_width,
colorbar_width,
]
fig = pl.figure(figsize=(width, height))
axs = gridspec.GridSpec(
nrows=2,
ncols=4,
width_ratios=width_ratios,
wspace=0.15 / width,
hspace=0.13 / height,
height_ratios=height_ratios,
)
heatmap_ax = fig.add_subplot(axs[1, 1])
kwds.setdefault("interpolation", "nearest")
im = heatmap_ax.imshow(obs_tidy.values, aspect="auto", **kwds)
heatmap_ax.set_ylim(obs_tidy.shape[0] - 0.5, -0.5)
heatmap_ax.set_xlim(-0.5, obs_tidy.shape[1] - 0.5)
heatmap_ax.tick_params(axis="y", left=False, labelleft=False)
heatmap_ax.set_ylabel("")
heatmap_ax.grid(False)
# sns.heatmap(obs_tidy, yticklabels="auto", ax=heatmap_ax, cbar_ax=heatmap_cbar_ax, **kwds)
if show_gene_labels:
heatmap_ax.tick_params(axis="x", labelsize="small")
heatmap_ax.set_xticks(np.arange(len(var_names)))
heatmap_ax.set_xticklabels(var_names, rotation=90)
else:
heatmap_ax.tick_params(axis="x", labelbottom=False, bottom=False)
# plot colorbar
_plot_colorbar(im, fig, axs[1, 3])
if categorical:
groupby_ax = fig.add_subplot(axs[1, 0])
(
label2code,
ticks,
labels,
groupby_cmap,
norm,
) = _plot_categories_as_colorblocks(
groupby_ax, obs_tidy, colors=groupby_colors, orientation="left"
)
# add lines to main heatmap
line_positions = (
np.cumsum(obs_tidy.index.value_counts(sort=False))[:-1] - 0.5
)
heatmap_ax.hlines(
line_positions,
-0.73,
len(var_names) - 0.5,
lw=0.6,
zorder=10,
clip_on=False,
)
if dendrogram:
dendro_ax = fig.add_subplot(axs[1, 2], sharey=heatmap_ax)
_plot_dendrogram(
dendro_ax, adata, groupby, ticks=ticks, dendrogram_key=dendrogram
)
# plot group legends on top of heatmap_ax (if given)
if var_group_positions is not None and len(var_group_positions) > 0:
gene_groups_ax = fig.add_subplot(axs[0, 1], sharex=heatmap_ax)
_plot_gene_groups_brackets(
gene_groups_ax,
group_positions=var_group_positions,
group_labels=var_group_labels,
rotation=var_group_rotation,
left_adjustment=-0.3,
right_adjustment=0.3,
)
# swap axes case
else:
# define a layout of 3 rows x 3 columns
# The first row is for the dendrogram (if not dendrogram height is zero)
# second row is for main content. This col is divided into three axes:
# first ax is for the heatmap
# second ax is for 'brackets' if any (othwerise width is zero)
# third ax is for colorbar
dendro_height = 0.8 if dendrogram else 0
groupby_height = 0.13 if categorical else 0
if figsize is None:
if show_gene_labels:
heatmap_height = len(var_names) * 0.18
else:
heatmap_height = 4
width = 10
height = heatmap_height + dendro_height + groupby_height
else:
width, height = figsize
heatmap_height = height - (dendro_height + groupby_height)
height_ratios = [dendro_height, heatmap_height, groupby_height]
if var_group_positions is not None and len(var_group_positions) > 0:
# add some space in case 'brackets' want to be plotted on top of the image
width_ratios = [width, 0.14, colorbar_width]
else:
width_ratios = [width, 0, colorbar_width]
fig = pl.figure(figsize=(width, height))
axs = gridspec.GridSpec(
nrows=3,
ncols=3,
wspace=0.25 / width,
hspace=0.3 / height,
width_ratios=width_ratios,
height_ratios=height_ratios,
)
# plot heatmap
heatmap_ax = fig.add_subplot(axs[1, 0])
kwds.setdefault("interpolation", "nearest")
im = heatmap_ax.imshow(obs_tidy.T.values, aspect="auto", **kwds)
heatmap_ax.set_xlim(0, obs_tidy.shape[0])
heatmap_ax.set_ylim(obs_tidy.shape[1] - 0.5, -0.5)
heatmap_ax.tick_params(axis="x", bottom=False, labelbottom=False)
heatmap_ax.set_xlabel("")
heatmap_ax.grid(False)
if show_gene_labels:
heatmap_ax.tick_params(axis="y", labelsize="small", length=1)
heatmap_ax.set_yticks(np.arange(len(var_names)))
heatmap_ax.set_yticklabels(var_names, rotation=0)
else:
heatmap_ax.tick_params(axis="y", labelleft=False, left=False)
if categorical:
groupby_ax = fig.add_subplot(axs[2, 0])
(
label2code,
ticks,
labels,
groupby_cmap,
norm,
) = _plot_categories_as_colorblocks(
groupby_ax, obs_tidy, colors=groupby_colors, orientation="bottom"
)
# add lines to main heatmap
line_positions = (
np.cumsum(obs_tidy.index.value_counts(sort=False))[:-1] - 0.5
)
heatmap_ax.vlines(
line_positions,
-0.5,
len(var_names) + 0.35,
lw=0.6,
zorder=10,
clip_on=False,
)
if dendrogram:
dendro_ax = fig.add_subplot(axs[0, 0], sharex=heatmap_ax)
_plot_dendrogram(
dendro_ax,
adata,
groupby,
dendrogram_key=dendrogram,
ticks=ticks,
orientation="top",
)
# plot group legends next to the heatmap_ax (if given)
if var_group_positions is not None and len(var_group_positions) > 0:
gene_groups_ax = fig.add_subplot(axs[1, 1])
arr = []
for idx, (label, pos) in enumerate(
zip(var_group_labels, var_group_positions)
):
if var_groups_subset_of_groupby:
label_code = label2code[label]
else:
label_code = idx
arr += [label_code] * (pos[1] + 1 - pos[0])
gene_groups_ax.imshow(
np.array([arr]).T, aspect="auto", cmap=groupby_cmap, norm=norm
)
gene_groups_ax.axis("off")
# plot colorbar
_plot_colorbar(im, fig, axs[1, 2])
return_ax_dict = {"heatmap_ax": heatmap_ax}
if categorical:
return_ax_dict["groupby_ax"] = groupby_ax
if dendrogram:
return_ax_dict["dendrogram_ax"] = dendro_ax
if var_group_positions is not None and len(var_group_positions) > 0:
return_ax_dict["gene_groups_ax"] = gene_groups_ax
_utils.savefig_or_show("heatmap", show=show, save=save)
show = settings.autoshow if show is None else show
if not show:
return return_ax_dict
|
https://github.com/theislab/scanpy/issues/1591
|
TypeError Traceback (most recent call last)
<ipython-input-56-f1ba710dac43> in <module>
9
10 sc.pl.heatmap(a, var_names=a.var_names[:5], groupby=None, swap_axes=True) # is shifted to the left (fig. 1)
---> 11 sc.pl.heatmap(a, var_names=a.var_names[:5], groupby="foo", swap_axes=True)
12
13 a.X[:16, :] = 0
~/.miniconda3/envs/cellrank/lib/python3.8/site-packages/scanpy/plotting/_anndata.py in heatmap(adata, var_names, groupby, use_raw, log, num_categories, dendrogram, gene_symbols, var_group_positions, var_group_labels, var_group_rotation, layer, standard_scale, swap_axes, show_gene_labels, show, save, figsize, **kwds)
1220 groupby_cmap,
1221 norm,
-> 1222 ) = _plot_categories_as_colorblocks(
1223 groupby_ax, obs_tidy, colors=groupby_colors, orientation='bottom'
1224 )
~/.miniconda3/envs/cellrank/lib/python3.8/site-packages/scanpy/plotting/_anndata.py in _plot_categories_as_colorblocks(groupby_ax, obs_tidy, colors, orientation, cmap_name)
2361 if len(labels) > 1:
2362 groupby_ax.set_xticks(ticks)
-> 2363 if max([len(x) for x in labels]) < 3:
2364 # if the labels are small do not rotate them
2365 rotation = 0
~/.miniconda3/envs/cellrank/lib/python3.8/site-packages/scanpy/plotting/_anndata.py in <listcomp>(.0)
2361 if len(labels) > 1:
2362 groupby_ax.set_xticks(ticks)
-> 2363 if max([len(x) for x in labels]) < 3:
2364 # if the labels are small do not rotate them
2365 rotation = 0
TypeError: object of type 'int' has no len()
|
TypeError
|
def _plot_categories_as_colorblocks(
groupby_ax: Axes,
obs_tidy: pd.DataFrame,
colors=None,
orientation: Literal["top", "bottom", "left", "right"] = "left",
cmap_name: str = "tab20",
):
"""\
Plots categories as colored blocks. If orientation is 'left', the categories
are plotted vertically, otherwise they are plotted horizontally.
Parameters
----------
groupby_ax
obs_tidy
colors
Sequence of valid color names to use for each category.
orientation
cmap_name
Name of colormap to use, in case colors is None
Returns
-------
ticks position, labels, colormap
"""
groupby = obs_tidy.index.name
from matplotlib.colors import ListedColormap, BoundaryNorm
if colors is None:
groupby_cmap = pl.get_cmap(cmap_name)
else:
groupby_cmap = ListedColormap(colors, groupby + "_cmap")
norm = BoundaryNorm(np.arange(groupby_cmap.N + 1) - 0.5, groupby_cmap.N)
# determine groupby label positions such that they appear
# centered next/below to the color code rectangle assigned to the category
value_sum = 0
ticks = [] # list of centered position of the labels
labels = []
label2code = {} # dictionary of numerical values asigned to each label
for code, (label, value) in enumerate(
obs_tidy.index.value_counts(sort=False).iteritems()
):
ticks.append(value_sum + (value / 2))
labels.append(label)
value_sum += value
label2code[label] = code
groupby_ax.grid(False)
if orientation == "left":
groupby_ax.imshow(
np.array([[label2code[lab] for lab in obs_tidy.index]]).T,
aspect="auto",
cmap=groupby_cmap,
norm=norm,
)
if len(labels) > 1:
groupby_ax.set_yticks(ticks)
groupby_ax.set_yticklabels(labels)
# remove y ticks
groupby_ax.tick_params(axis="y", left=False, labelsize="small")
# remove x ticks and labels
groupby_ax.tick_params(axis="x", bottom=False, labelbottom=False)
# remove surrounding lines
groupby_ax.spines["right"].set_visible(False)
groupby_ax.spines["top"].set_visible(False)
groupby_ax.spines["left"].set_visible(False)
groupby_ax.spines["bottom"].set_visible(False)
groupby_ax.set_ylabel(groupby)
else:
groupby_ax.imshow(
np.array([[label2code[lab] for lab in obs_tidy.index]]),
aspect="auto",
cmap=groupby_cmap,
norm=norm,
)
if len(labels) > 1:
groupby_ax.set_xticks(ticks)
if max([len(str(x)) for x in labels]) < 3:
# if the labels are small do not rotate them
rotation = 0
else:
rotation = 90
groupby_ax.set_xticklabels(labels, rotation=rotation)
# remove x ticks
groupby_ax.tick_params(axis="x", bottom=False, labelsize="small")
# remove y ticks and labels
groupby_ax.tick_params(axis="y", left=False, labelleft=False)
# remove surrounding lines
groupby_ax.spines["right"].set_visible(False)
groupby_ax.spines["top"].set_visible(False)
groupby_ax.spines["left"].set_visible(False)
groupby_ax.spines["bottom"].set_visible(False)
groupby_ax.set_xlabel(groupby)
return label2code, ticks, labels, groupby_cmap, norm
|
def _plot_categories_as_colorblocks(
groupby_ax: Axes,
obs_tidy: pd.DataFrame,
colors=None,
orientation: Literal["top", "bottom", "left", "right"] = "left",
cmap_name: str = "tab20",
):
"""\
Plots categories as colored blocks. If orientation is 'left', the categories
are plotted vertically, otherwise they are plotted horizontally.
Parameters
----------
groupby_ax
obs_tidy
colors
Sequence of valid color names to use for each category.
orientation
cmap_name
Name of colormap to use, in case colors is None
Returns
-------
ticks position, labels, colormap
"""
groupby = obs_tidy.index.name
from matplotlib.colors import ListedColormap, BoundaryNorm
if colors is None:
groupby_cmap = pl.get_cmap(cmap_name)
else:
groupby_cmap = ListedColormap(colors, groupby + "_cmap")
norm = BoundaryNorm(np.arange(groupby_cmap.N + 1) - 0.5, groupby_cmap.N)
# determine groupby label positions such that they appear
# centered next/below to the color code rectangle assigned to the category
value_sum = 0
ticks = [] # list of centered position of the labels
labels = []
label2code = {} # dictionary of numerical values asigned to each label
for code, (label, value) in enumerate(
obs_tidy.index.value_counts(sort=False).iteritems()
):
ticks.append(value_sum + (value / 2))
labels.append(label)
value_sum += value
label2code[label] = code
groupby_ax.grid(False)
if orientation == "left":
groupby_ax.imshow(
np.array([[label2code[lab] for lab in obs_tidy.index]]).T,
aspect="auto",
cmap=groupby_cmap,
norm=norm,
)
if len(labels) > 1:
groupby_ax.set_yticks(ticks)
groupby_ax.set_yticklabels(labels)
# remove y ticks
groupby_ax.tick_params(axis="y", left=False, labelsize="small")
# remove x ticks and labels
groupby_ax.tick_params(axis="x", bottom=False, labelbottom=False)
# remove surrounding lines
groupby_ax.spines["right"].set_visible(False)
groupby_ax.spines["top"].set_visible(False)
groupby_ax.spines["left"].set_visible(False)
groupby_ax.spines["bottom"].set_visible(False)
groupby_ax.set_ylabel(groupby)
else:
groupby_ax.imshow(
np.array([[label2code[lab] for lab in obs_tidy.index]]),
aspect="auto",
cmap=groupby_cmap,
norm=norm,
)
if len(labels) > 1:
groupby_ax.set_xticks(ticks)
if max([len(x) for x in labels]) < 3:
# if the labels are small do not rotate them
rotation = 0
else:
rotation = 90
groupby_ax.set_xticklabels(labels, rotation=rotation)
# remove x ticks
groupby_ax.tick_params(axis="x", bottom=False, labelsize="small")
# remove y ticks and labels
groupby_ax.tick_params(axis="y", left=False, labelleft=False)
# remove surrounding lines
groupby_ax.spines["right"].set_visible(False)
groupby_ax.spines["top"].set_visible(False)
groupby_ax.spines["left"].set_visible(False)
groupby_ax.spines["bottom"].set_visible(False)
groupby_ax.set_xlabel(groupby)
return label2code, ticks, labels, groupby_cmap, norm
|
https://github.com/theislab/scanpy/issues/1591
|
TypeError Traceback (most recent call last)
<ipython-input-56-f1ba710dac43> in <module>
9
10 sc.pl.heatmap(a, var_names=a.var_names[:5], groupby=None, swap_axes=True) # is shifted to the left (fig. 1)
---> 11 sc.pl.heatmap(a, var_names=a.var_names[:5], groupby="foo", swap_axes=True)
12
13 a.X[:16, :] = 0
~/.miniconda3/envs/cellrank/lib/python3.8/site-packages/scanpy/plotting/_anndata.py in heatmap(adata, var_names, groupby, use_raw, log, num_categories, dendrogram, gene_symbols, var_group_positions, var_group_labels, var_group_rotation, layer, standard_scale, swap_axes, show_gene_labels, show, save, figsize, **kwds)
1220 groupby_cmap,
1221 norm,
-> 1222 ) = _plot_categories_as_colorblocks(
1223 groupby_ax, obs_tidy, colors=groupby_colors, orientation='bottom'
1224 )
~/.miniconda3/envs/cellrank/lib/python3.8/site-packages/scanpy/plotting/_anndata.py in _plot_categories_as_colorblocks(groupby_ax, obs_tidy, colors, orientation, cmap_name)
2361 if len(labels) > 1:
2362 groupby_ax.set_xticks(ticks)
-> 2363 if max([len(x) for x in labels]) < 3:
2364 # if the labels are small do not rotate them
2365 rotation = 0
~/.miniconda3/envs/cellrank/lib/python3.8/site-packages/scanpy/plotting/_anndata.py in <listcomp>(.0)
2361 if len(labels) > 1:
2362 groupby_ax.set_xticks(ticks)
-> 2363 if max([len(x) for x in labels]) < 3:
2364 # if the labels are small do not rotate them
2365 rotation = 0
TypeError: object of type 'int' has no len()
|
TypeError
|
def recipe_seurat(
adata: AnnData,
log: bool = True,
plot: bool = False,
copy: bool = False,
) -> Optional[AnnData]:
"""\
Normalization and filtering as of Seurat [Satija15]_.
This uses a particular preprocessing.
Expects non-logarithmized data.
If using logarithmized data, pass `log=False`.
"""
if copy:
adata = adata.copy()
pp.filter_cells(adata, min_genes=200)
pp.filter_genes(adata, min_cells=3)
normalize_total(adata, target_sum=1e4)
filter_result = filter_genes_dispersion(
adata.X, min_mean=0.0125, max_mean=3, min_disp=0.5, log=not log
)
if plot:
from ..plotting import (
_preprocessing as ppp,
) # should not import at the top of the file
ppp.filter_genes_dispersion(filter_result, log=not log)
adata._inplace_subset_var(filter_result.gene_subset) # filter genes
if log:
pp.log1p(adata)
pp.scale(adata, max_value=10)
return adata if copy else None
|
def recipe_seurat(
adata: AnnData,
log: bool = True,
plot: bool = False,
copy: bool = False,
) -> Optional[AnnData]:
"""\
Normalization and filtering as of Seurat [Satija15]_.
This uses a particular preprocessing.
Expects non-logarithmized data.
If using logarithmized data, pass `log=False`.
"""
if copy:
adata = adata.copy()
pp.filter_cells(adata, min_genes=200)
pp.filter_genes(adata, min_cells=3)
normalize_total(adata, target_sum=1e4)
filter_result = filter_genes_dispersion(
adata.X, min_mean=0.0125, max_mean=3, min_disp=0.5, log=not log
)
if plot:
from ..plotting import (
_preprocessing as ppp,
) # should not import at the top of the file
ppp.filter_genes_dispersion(filter_result, log=not log)
adata._inplace_subset_var(filter_result.gene_subset) # filter genes
if log:
pp.log1p(adata)
pp.scale(adata, max_value=10)
return adata if copy else None
|
https://github.com/theislab/scanpy/issues/1326
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-4-bfa4168a87e6> in <module>
1 adata.raw = adata
----> 2 sc.pp.recipe_weinreb17(adata, log=False)
3 sc.tl.pca(adata)
/opt/anaconda3/lib/python3.7/site-packages/scanpy/preprocessing/_recipes.py in recipe_weinreb17(adata, log, mean_threshold, cv_threshold, n_pcs, svd_solver, random_state, copy)
48 )
49 adata._inplace_subset_var(gene_subset) # this modifies the object itself
---> 50 X_pca = pp.pca(
51 pp.zscore_deprecated(adata.X),
52 n_comps=n_pcs,
AttributeError: module 'scanpy.preprocessing._simple' has no attribute 'pca'
|
AttributeError
|
def swap_axes(self, swap_axes: Optional[bool] = True):
"""
Plots a transposed image.
By default, the x axis contains `var_names` (e.g. genes) and the y
axis the `groupby` categories. By setting `swap_axes` then x are
the `groupby` categories and y the `var_names`.
Parameters
----------
swap_axes
Boolean to turn on (True) or off (False) 'add_dendrogram'. Default True
Returns
-------
BasePlot
"""
self.DEFAULT_CATEGORY_HEIGHT, self.DEFAULT_CATEGORY_WIDTH = (
self.DEFAULT_CATEGORY_WIDTH,
self.DEFAULT_CATEGORY_HEIGHT,
)
self.are_axes_swapped = swap_axes
return self
|
def swap_axes(self, swap_axes: Optional[bool] = True):
"""
Plots a transposed image.
By default, the x axis contains `var_names` (e.g. genes) and the y
axis the `groupby` categories. By setting `swap_axes` then x are
the `groupby` categories and y the `var_names`.
Parameters
----------
swap_axes : bool, default: True
Returns
-------
BasePlot
"""
self.DEFAULT_CATEGORY_HEIGHT, self.DEFAULT_CATEGORY_WIDTH = (
self.DEFAULT_CATEGORY_WIDTH,
self.DEFAULT_CATEGORY_HEIGHT,
)
self.are_axes_swapped = swap_axes
return self
|
https://github.com/theislab/scanpy/issues/1307
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/docs/conf.py:112: RemovedInSphinx40Warning: The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.
app.add_stylesheet('css/custom.css')
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/cmd/build.py", line 280, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/application.py", line 348, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 311, in build
updated_docnames = set(self.read())
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 213, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 178, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 415, in filter
raise SphinxWarning(location + ":" + str(message))
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
|
sphinx.errors.SphinxWarning
|
def add_dendrogram(
self,
show: Optional[bool] = True,
dendrogram_key: Optional[str] = None,
size: Optional[float] = 0.8,
):
"""\
Show dendrogram based on the hierarchical clustering between the `groupby`
categories. Categories are reordered to match the dendrogram order.
The dendrogram information is computed using :func:`scanpy.tl.dendrogram`.
If `sc.tl.dendrogram` has not been called previously the function is called
with default parameters.
The dendrogram is by default shown on the right side of the plot or on top
if the axes are swapped.
`var_names` are reordered to produce a more pleasing output if:
* The data contains `var_groups`
* the `var_groups` match the categories.
The previous conditions happen by default when using Plot
to show the results from :func:`~scanpy.tl.rank_genes_groups` (aka gene markers), by
calling `scanpy.tl.rank_genes_groups_(plot_name)`.
Parameters
----------
show
Boolean to turn on (True) or off (False) 'add_dendrogram'
dendrogram_key
Needed if `sc.tl.dendrogram` saved the dendrogram using a key different
than the default name.
size
size of the dendrogram. Corresponds to width when dendrogram shown on
the right of the plot, or height when shown on top.
Returns
-------
BasePlot
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.BasePlot(adata, markers, groupby='bulk_labels').add_dendrogram().show()
"""
if not show:
self.plot_group_extra = None
return self
if self.groupby is None or len(self.categories) <= 2:
# dendrogram can only be computed between groupby categories
logg.warning(
"Dendrogram not added. Dendrogram is added only "
"when the number of categories to plot > 2"
)
return self
self.group_extra_size = size
# to correctly plot the dendrogram the categories need to be ordered
# according to the dendrogram ordering.
self._reorder_categories_after_dendrogram(dendrogram_key)
dendro_ticks = np.arange(len(self.categories)) + 0.5
self.group_extra_size = size
self.plot_group_extra = {
"kind": "dendrogram",
"width": size,
"dendrogram_key": dendrogram_key,
"dendrogram_ticks": dendro_ticks,
}
return self
|
def add_dendrogram(
self,
show: Optional[bool] = True,
dendrogram_key: Optional[str] = None,
size: Optional[float] = 0.8,
):
"""
Show dendrogram based on the hierarchical clustering between the `groupby`
categories. Categories are reordered to match the dendrogram order.
The dendrogram information is computed using :func:`scanpy.tl.dendrogram`.
If `sc.tl.dendrogram` has not been called previously the function is called
with default parameters.
The dendrogram is by default shown on the right side of the plot or on top
if the axes are swapped.
`var_names` are reordered to produce a more pleasing output if:
* The data contains `var_groups`
* the `var_groups` match the categories.
The previous conditions happen by default when using Plot
to show the results from `sc.tl.rank_genes_groups` (aka gene markers), by
calling `sc.tl.rank_genes_groups_(plot_name)`.
Parameters
----------
show : bool, default True
dendrogram_key : str, default None
Needed if `sc.tl.dendrogram` saved the dendrogram using a key different
than the default name.
size : size of the dendrogram. Corresponds to width when dendrogram shown on
the right of the plot, or height when shown on top.
Returns
-------
BasePlot
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.BasePlot(adata, markers, groupby='bulk_labels').add_dendrogram().show()
"""
if not show:
self.plot_group_extra = None
return self
if self.groupby is None or len(self.categories) <= 2:
# dendrogram can only be computed between groupby categories
logg.warning(
"Dendrogram not added. Dendrogram is added only "
"when the number of categories to plot > 2"
)
return self
self.group_extra_size = size
# to correctly plot the dendrogram the categories need to be ordered
# according to the dendrogram ordering.
self._reorder_categories_after_dendrogram(dendrogram_key)
dendro_ticks = np.arange(len(self.categories)) + 0.5
self.group_extra_size = size
self.plot_group_extra = {
"kind": "dendrogram",
"width": size,
"dendrogram_key": dendrogram_key,
"dendrogram_ticks": dendro_ticks,
}
return self
|
https://github.com/theislab/scanpy/issues/1307
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/docs/conf.py:112: RemovedInSphinx40Warning: The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.
app.add_stylesheet('css/custom.css')
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/cmd/build.py", line 280, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/application.py", line 348, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 311, in build
updated_docnames = set(self.read())
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 213, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 178, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 415, in filter
raise SphinxWarning(location + ":" + str(message))
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
|
sphinx.errors.SphinxWarning
|
def add_totals(
self,
show: Optional[bool] = True,
sort: Literal["ascending", "descending"] = None,
size: Optional[float] = 0.8,
color: Optional[Union[ColorLike, Sequence[ColorLike]]] = None,
):
"""\
Show barplot for the number of cells in in `groupby` category.
The barplot is by default shown on the right side of the plot or on top
if the axes are swapped.
Parameters
----------
show
Boolean to turn on (True) or off (False) 'add_dendrogram'
sort
Set to either 'ascending' or 'descending' to reorder the categories
by cell number
size
size of the barplot. Corresponds to width when shown on
the right of the plot, or height when shown on top.
color
Color for the bar plots or list of colors for each of the bar plots.
By default, each bar plot uses the colors assigned in
`adata.uns[{groupby}_colors]`.
Returns
-------
BasePlot
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.BasePlot(adata, markers, groupby='bulk_labels').add_totals().show()
"""
self.group_extra_size = size
if not show:
# hide totals
self.plot_group_extra = None
self.group_extra_size = 0
return self
_sort = True if sort is not None else False
_ascending = True if sort == "ascending" else False
counts_df = self.obs_tidy.index.value_counts(sort=_sort, ascending=_ascending)
if _sort:
self.categories_order = counts_df.index
self.plot_group_extra = {
"kind": "group_totals",
"width": size,
"sort": sort,
"counts_df": counts_df,
"color": color,
}
return self
|
def add_totals(
self,
show: Optional[bool] = True,
sort: Literal["ascending", "descending"] = None,
size: Optional[float] = 0.8,
color: Optional[Union[ColorLike, Sequence[ColorLike]]] = None,
):
"""
Show barplot for the number of cells in in `groupby` category.
The barplot is by default shown on the right side of the plot or on top
if the axes are swapped.
Parameters
----------
show : bool, default True
sort : Set to either 'ascending' or 'descending' to reorder the categories
by cell number
size : size of the barplot. Corresponds to width when shown on
the right of the plot, or height when shown on top.
color: Color for the bar plots or list of colors for each of the bar plots.
By default, each bar plot uses the colors assigned in `adata.uns[{groupby}_colors.
Returns
-------
BasePlot
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.BasePlot(adata, markers, groupby='bulk_labels').add_totals().show()
"""
self.group_extra_size = size
if not show:
# hide totals
self.plot_group_extra = None
self.group_extra_size = 0
return self
_sort = True if sort is not None else False
_ascending = True if sort == "ascending" else False
counts_df = self.obs_tidy.index.value_counts(sort=_sort, ascending=_ascending)
if _sort:
self.categories_order = counts_df.index
self.plot_group_extra = {
"kind": "group_totals",
"width": size,
"sort": sort,
"counts_df": counts_df,
"color": color,
}
return self
|
https://github.com/theislab/scanpy/issues/1307
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/docs/conf.py:112: RemovedInSphinx40Warning: The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.
app.add_stylesheet('css/custom.css')
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/cmd/build.py", line 280, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/application.py", line 348, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 311, in build
updated_docnames = set(self.read())
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 213, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 178, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 415, in filter
raise SphinxWarning(location + ":" + str(message))
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
|
sphinx.errors.SphinxWarning
|
def style(self, cmap: Optional[str] = DEFAULT_COLORMAP):
"""\
Set visual style parameters
Parameters
----------
cmap
colormap
Returns
-------
BasePlot
"""
self.cmap = cmap
|
def style(self, cmap: Optional[str] = DEFAULT_COLORMAP):
self.cmap = cmap
|
https://github.com/theislab/scanpy/issues/1307
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/docs/conf.py:112: RemovedInSphinx40Warning: The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.
app.add_stylesheet('css/custom.css')
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/cmd/build.py", line 280, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/application.py", line 348, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 311, in build
updated_docnames = set(self.read())
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 213, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 178, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 415, in filter
raise SphinxWarning(location + ":" + str(message))
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
|
sphinx.errors.SphinxWarning
|
def legend(
self,
show: Optional[bool] = True,
title: Optional[str] = DEFAULT_COLOR_LEGEND_TITLE,
width: Optional[float] = DEFAULT_LEGENDS_WIDTH,
):
"""\
Configure legend parameters
Parameters
----------
show
Set to 'False' to hide the default plot of the legend. This sets the
legend width to zero which will result in a wider main plot.
title
Legend title. Appears on top of the color bar. Use '\\n' to add line breaks.
width
Width of the legend. The value is a proportion with respect
to the figure width. E.g. 0.5 means the legend width is 50% of the figure
width.
Returns
-------
BasePlot
Examples
--------
Set legend title:
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> dp = sc.pl.BasePlot(adata, markers, groupby='bulk_labels')
>>> dp.legend(colorbar_title='log(UMI counts + 1)').show()
"""
if not show:
# turn of legends by setting width to 0
self.legends_width = 0
else:
self.color_legend_title = title
self.legends_width = width
return self
|
def legend(
self,
show: Optional[bool] = True,
title: Optional[str] = DEFAULT_COLOR_LEGEND_TITLE,
width: Optional[float] = DEFAULT_LEGENDS_WIDTH,
):
"""
Configure legend parameters.
Parameters
----------
show
Set to `False` to hide the default plot of the legend.
title
Title for the dot size legend. Use "\n" to add line breaks.
width
Width of the legend.
Returns
-------
BasePlot
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> dp = sc.pl.BasePlot(adata, markers, groupby='bulk_labels')
>>> dp.legend(colorbar_title='log(UMI counts + 1)').show()
"""
if not show:
# turn of legends by setting width to 0
self.legends_width = 0
else:
self.color_legend_title = title
self.legends_width = width
return self
|
https://github.com/theislab/scanpy/issues/1307
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/docs/conf.py:112: RemovedInSphinx40Warning: The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.
app.add_stylesheet('css/custom.css')
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/cmd/build.py", line 280, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/application.py", line 348, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 311, in build
updated_docnames = set(self.read())
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 213, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 178, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 415, in filter
raise SphinxWarning(location + ":" + str(message))
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
|
sphinx.errors.SphinxWarning
|
def style(
self,
cmap: str = DEFAULT_COLORMAP,
color_on: Optional[Literal["dot", "square"]] = DEFAULT_COLOR_ON,
dot_max: Optional[float] = DEFAULT_DOT_MAX,
dot_min: Optional[float] = DEFAULT_DOT_MIN,
smallest_dot: Optional[float] = DEFAULT_SMALLEST_DOT,
largest_dot: Optional[float] = DEFAULT_LARGEST_DOT,
dot_edge_color: Optional[ColorLike] = DEFAULT_DOT_EDGECOLOR,
dot_edge_lw: Optional[float] = DEFAULT_DOT_EDGELW,
size_exponent: Optional[float] = DEFAULT_SIZE_EXPONENT,
grid: Optional[float] = False,
):
"""\
Modifies plot visual parameters
Parameters
----------
cmap
String denoting matplotlib color map.
color_on
Options are 'dot' or 'square'. Be default the colomap is applied to
the color of the dot. Optionally, the colormap can be applied to an
square behind the dot, in which case the dot is transparent and only
the edge is shown.
dot_max
If none, the maximum dot size is set to the maximum fraction value found
(e.g. 0.6). If given, the value should be a number between 0 and 1.
All fractions larger than dot_max are clipped to this value.
dot_min
If none, the minimum dot size is set to 0. If given,
the value should be a number between 0 and 1.
All fractions smaller than dot_min are clipped to this value.
smallest_dot
If none, the smallest dot has size 0.
All expression fractions with `dot_min` are plotted with this size.
largest_dot
If none, the largest dot has size 200.
All expression fractions with `dot_max` are plotted with this size.
dot_edge_color
Dot edge color. When `color_on='dot'` the default is no edge. When
`color_on='square'`, edge color is white for darker colors and black
for lighter background square colors.
dot_edge_lw
Dot edge line width. When `color_on='dot'` the default is no edge. When
`color_on='square'`, line width = 1.5.
size_exponent
Dot size is computed as:
fraction ** size exponent and afterwards scaled to match the
`smallest_dot` and `largest_dot` size parameters.
Using a different size exponent changes the relative sizes of the dots
to each other.
grid
Set to true to show grid lines. By default grid lines are not shown.
Further configuration of the grid lines can be achieved directly on the
returned ax.
Returns
-------
:class:`~scanpy.pl.DotPlot`
Examples
-------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
Change color map and apply it to the square behind the dot
>>> sc.pl.DotPlot(adata, markers, groupby='bulk_labels')\
... .style(cmap='RdBu_r', color_on='square').show()
Add edge to dots
>>> sc.pl.DotPlot(adata, markers, groupby='bulk_labels')\
... .style(dot_edge_color='black', dot_edge_lw=1).show()
"""
self.cmap = cmap
self.dot_max = dot_max
self.dot_min = dot_min
self.smallest_dot = smallest_dot
self.largest_dot = largest_dot
self.color_on = color_on
self.size_exponent = size_exponent
self.dot_edge_color = dot_edge_color
self.dot_edge_lw = dot_edge_lw
self.grid = grid
return self
|
def style(
self,
cmap: str = DEFAULT_COLORMAP,
color_on: Optional[Literal["dot", "square"]] = DEFAULT_COLOR_ON,
dot_max: Optional[float] = DEFAULT_DOT_MAX,
dot_min: Optional[float] = DEFAULT_DOT_MIN,
smallest_dot: Optional[float] = DEFAULT_SMALLEST_DOT,
largest_dot: Optional[float] = DEFAULT_LARGEST_DOT,
dot_edge_color: Optional[ColorLike] = DEFAULT_DOT_EDGECOLOR,
dot_edge_lw: Optional[float] = DEFAULT_DOT_EDGELW,
size_exponent: Optional[float] = DEFAULT_SIZE_EXPONENT,
grid: Optional[float] = False,
):
"""
Modifies plot style
Parameters
----------
cmap
String denoting matplotlib color map.
color_on
Options are 'dot' or 'square'. Be default the colomap is applied to
the color of the dot. Optionally, the colormap can be applied to an
square behind the dot, in which case the dot is transparent and only
the edge is shown.
dot_max
If none, the maximum dot size is set to the maximum fraction value found
(e.g. 0.6). If given, the value should be a number between 0 and 1.
All fractions larger than dot_max are clipped to this value.
dot_min
If none, the minimum dot size is set to 0. If given,
the value should be a number between 0 and 1.
All fractions smaller than dot_min are clipped to this value.
smallest_dot
If none, the smallest dot has size 0.
All expression fractions with `dot_min` are plotted with this size.
largest_dot
If none, the largest dot has size 200.
All expression fractions with `dot_max` are plotted with this size.
dot_edge_color
Dot edge color. When `color_on='dot'` the default is no edge. When
`color_on='square'`, edge color is white for darker colors and black
for lighter background square colors.
dot_edge_lw
Dot edge line width. When `color_on='dot'` the default is no edge. When
`color_on='square'`, line width = 1.5.
size_exponent
Dot size is computed as:
fraction ** size exponent and afterwards scaled to match the
`smallest_dot` and `largest_dot` size parameters.
Using a different size exponent changes the relative sizes of the dots
to each other.
grid
Set to true to show grid lines. By default grid lines are not shown.
Further configuration of the grid lines can be achived directly on the
returned ax.
Returns
-------
DotPlot
Examples
-------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
Change color map and apply it to the square behind the dot
>>> sc.pl.DotPlot(adata, markers, groupby='bulk_labels')\
... .style(cmap='RdBu_r', color_on='square').show()
Add edge to dots
>>> sc.pl.DotPlot(adata, markers, groupby='bulk_labels')\
... .style(dot_edge_color='black', dot_edge_lw=1).show()
"""
self.cmap = cmap
self.dot_max = dot_max
self.dot_min = dot_min
self.smallest_dot = smallest_dot
self.largest_dot = largest_dot
self.color_on = color_on
self.size_exponent = size_exponent
self.dot_edge_color = dot_edge_color
self.dot_edge_lw = dot_edge_lw
self.grid = grid
return self
|
https://github.com/theislab/scanpy/issues/1307
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/docs/conf.py:112: RemovedInSphinx40Warning: The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.
app.add_stylesheet('css/custom.css')
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/cmd/build.py", line 280, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/application.py", line 348, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 311, in build
updated_docnames = set(self.read())
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 213, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 178, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 415, in filter
raise SphinxWarning(location + ":" + str(message))
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
|
sphinx.errors.SphinxWarning
|
def legend(
self,
show: Optional[bool] = True,
show_size_legend: Optional[bool] = True,
show_colorbar: Optional[bool] = True,
size_title: Optional[str] = DEFAULT_SIZE_LEGEND_TITLE,
colorbar_title: Optional[str] = DEFAULT_COLOR_LEGEND_TITLE,
width: Optional[float] = DEFAULT_LEGENDS_WIDTH,
):
"""\
Configures dot size and the colorbar legends
Parameters
----------
show
Set to `False` to hide the default plot of the legends. This sets the
legend width to zero, which will result in a wider main plot.
show_size_legend
Set to `False` to hide the dot size legend
show_colorbar
Set to `False` to hide the colorbar legend
size_title
Title for the dot size legend. Use '\\n' to add line breaks. Appears on top
of dot sizes
colorbar_title
Title for the color bar. Use '\\n' to add line breaks. Appears on top of the
color bar
width
Width of the legends area. The value is a proportion with respect
to the figure width. E.g. 0.5 means the legend width is 50% of the figure
Returns
-------
:class:`~scanpy.pl.DotPlot`
Examples
--------
Set color bar title:
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> dp = sc.pl.DotPlot(adata, markers, groupby='bulk_labels')
>>> dp.legend(colorbar_title='log(UMI counts + 1)').show()
"""
if not show:
# turn of legends by setting width to 0
self.legends_width = 0
else:
self.color_legend_title = colorbar_title
self.size_title = size_title
self.legends_width = width
self.show_size_legend = show_size_legend
self.show_colorbar = show_colorbar
return self
|
def legend(
self,
show: Optional[bool] = True,
show_size_legend: Optional[bool] = True,
show_colorbar: Optional[bool] = True,
size_title: Optional[str] = DEFAULT_SIZE_LEGEND_TITLE,
colorbar_title: Optional[str] = DEFAULT_COLOR_LEGEND_TITLE,
width: Optional[float] = DEFAULT_LEGENDS_WIDTH,
):
"""
Configure legend parameters.
Parameters
----------
show
Set to `False` to hide the default plot of the legends.
show_size_legend
Set to `False` to hide the the size legend
show_colorbar
Set to `False` to hide the the colorbar
size_title
Title for the dot size legend. Use "\n" to add line breaks.
colorbar_title
Title for the color bar. Use "\n" to add line breaks.
width
Width of the legends.
Returns
-------
DotPlot
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> dp = sc.pl.DotPlot(adata, markers, groupby='bulk_labels')
>>> dp.legend(colorbar_title='log(UMI counts + 1)').show()
"""
if not show:
# turn of legends by setting width to 0
self.legends_width = 0
else:
self.color_legend_title = colorbar_title
self.size_title = size_title
self.legends_width = width
self.show_size_legend = show_size_legend
self.show_colorbar = show_colorbar
return self
|
https://github.com/theislab/scanpy/issues/1307
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/docs/conf.py:112: RemovedInSphinx40Warning: The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.
app.add_stylesheet('css/custom.css')
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/cmd/build.py", line 280, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/application.py", line 348, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 311, in build
updated_docnames = set(self.read())
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 213, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 178, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 415, in filter
raise SphinxWarning(location + ":" + str(message))
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
|
sphinx.errors.SphinxWarning
|
def _dotplot(
dot_size,
dot_color,
dot_ax,
cmap: str = "Reds",
color_on: Optional[str] = "dot",
y_label: Optional[str] = None,
dot_max: Optional[float] = None,
dot_min: Optional[float] = None,
standard_scale: Literal["var", "group"] = None,
smallest_dot: Optional[float] = 0.0,
largest_dot: Optional[float] = 200,
size_exponent: Optional[float] = 2,
edge_color: Optional[ColorLike] = None,
edge_lw: Optional[float] = None,
grid: Optional[bool] = False,
**kwds,
):
"""\
Makes a *dot plot* given two data frames, one containing
the doc size and other containing the dot color. The indices and
columns of the data frame are used to label the output image
The dots are plotted using :func:`matplotlib.pyplot.scatter`. Thus, additional
arguments can be passed.
Parameters
----------
dot_size: Data frame containing the dot_size.
dot_color: Data frame containing the dot_color, should have the same,
shape, columns and indices as dot_size.
dot_ax: matplotlib axis
y_lebel:
cmap
String denoting matplotlib color map.
color_on
Options are 'dot' or 'square'. Be default the colomap is applied to
the color of the dot. Optionally, the colormap can be applied to an
square behind the dot, in which case the dot is transparent and only
the edge is shown.
y_label: String. Label for y axis
dot_max
If none, the maximum dot size is set to the maximum fraction value found
(e.g. 0.6). If given, the value should be a number between 0 and 1.
All fractions larger than dot_max are clipped to this value.
dot_min
If none, the minimum dot size is set to 0. If given,
the value should be a number between 0 and 1.
All fractions smaller than dot_min are clipped to this value.
standard_scale
Whether or not to standardize that dimension between 0 and 1,
meaning for each variable or group,
subtract the minimum and divide each by its maximum.
smallest_dot
If none, the smallest dot has size 0.
All expression levels with `dot_min` are plotted with this size.
edge_color
Dot edge color. When `color_on='dot'` the default is no edge. When
`color_on='square'`, edge color is white
edge_lw
Dot edge line width. When `color_on='dot'` the default is no edge. When
`color_on='square'`, line width = 1.5
grid
Adds a grid to the plot
kwds
Are passed to :func:`matplotlib.pyplot.scatter`.
Returns
-------
matplotlib.colors.Normalize, dot_min, dot_max
"""
assert dot_size.shape == dot_color.shape, (
"please check that dot_size and dot_color dataframes have the same shape"
)
assert list(dot_size.index) == list(dot_color.index), (
"please check that dot_size and dot_color dataframes have the same index"
)
assert list(dot_size.columns) == list(dot_color.columns), (
"please check that the dot_size and dot_color dataframes have the same columns"
)
if standard_scale == "group":
dot_color = dot_color.sub(dot_color.min(1), axis=0)
dot_color = dot_color.div(dot_color.max(1), axis=0).fillna(0)
elif standard_scale == "var":
dot_color -= dot_color.min(0)
dot_color = (dot_color / dot_color.max(0)).fillna(0)
elif standard_scale is None:
pass
# make scatter plot in which
# x = var_names
# y = groupby category
# size = fraction
# color = mean expression
y, x = np.indices(dot_color.shape)
y = y.flatten() + 0.5
x = x.flatten() + 0.5
frac = dot_size.values.flatten()
mean_flat = dot_color.values.flatten()
cmap = pl.get_cmap(kwds.get("cmap", cmap))
if "cmap" in kwds:
del kwds["cmap"]
if dot_max is None:
dot_max = np.ceil(max(frac) * 10) / 10
else:
if dot_max < 0 or dot_max > 1:
raise ValueError("`dot_max` value has to be between 0 and 1")
if dot_min is None:
dot_min = 0
else:
if dot_min < 0 or dot_min > 1:
raise ValueError("`dot_min` value has to be between 0 and 1")
if dot_min != 0 or dot_max != 1:
# clip frac between dot_min and dot_max
frac = np.clip(frac, dot_min, dot_max)
old_range = dot_max - dot_min
# re-scale frac between 0 and 1
frac = (frac - dot_min) / old_range
size = frac**size_exponent
# rescale size to match smallest_dot and largest_dot
size = size * (largest_dot - smallest_dot) + smallest_dot
import matplotlib.colors
normalize = matplotlib.colors.Normalize(
vmin=kwds.get("vmin"), vmax=kwds.get("vmax")
)
if color_on == "square":
if edge_color is None:
from seaborn.utils import relative_luminance
# use either black or white for the edge color
# depending on the luminance of the background
# square color
edge_color = []
for color_value in cmap(normalize(mean_flat)):
lum = relative_luminance(color_value)
edge_color.append(".15" if lum > 0.408 else "w")
edge_lw = 1.5 if edge_lw is None else edge_lw
# first make a heatmap similar to `sc.pl.matrixplot`
# (squares with the asigned colormap). Circles will be plotted
# on top
dot_ax.pcolor(dot_color.values, cmap=cmap, norm=normalize)
for axis in ["top", "bottom", "left", "right"]:
dot_ax.spines[axis].set_linewidth(1.5)
kwds = fix_kwds(
kwds,
s=size,
cmap=cmap,
norm=None,
linewidth=edge_lw,
facecolor="none",
edgecolor=edge_color,
)
dot_ax.scatter(x, y, **kwds)
else:
edge_color = "none" if edge_color is None else edge_color
edge_lw = 0.5 if edge_lw is None else edge_lw
color = cmap(normalize(mean_flat))
kwds = fix_kwds(
kwds,
s=size,
cmap=cmap,
color=color,
norm=None,
linewidth=edge_lw,
edgecolor=edge_color,
)
dot_ax.scatter(x, y, **kwds)
y_ticks = np.arange(dot_color.shape[0]) + 0.5
dot_ax.set_yticks(y_ticks)
dot_ax.set_yticklabels(
[dot_color.index[idx] for idx, _ in enumerate(y_ticks)], minor=False
)
x_ticks = np.arange(dot_color.shape[1]) + 0.5
dot_ax.set_xticks(x_ticks)
dot_ax.set_xticklabels(
[dot_color.columns[idx] for idx, _ in enumerate(x_ticks)],
rotation=90,
ha="center",
minor=False,
)
dot_ax.tick_params(axis="both", labelsize="small")
dot_ax.grid(False)
dot_ax.set_ylabel(y_label)
# to be consistent with the heatmap plot, is better to
# invert the order of the y-axis, such that the first group is on
# top
dot_ax.set_ylim(dot_color.shape[0], 0)
dot_ax.set_xlim(0, dot_color.shape[1])
if color_on == "dot":
# add more distance to the x and y lims with the color is on the
# dots
dot_ax.set_ylim(dot_color.shape[0] + 0.5, -0.5)
dot_ax.set_xlim(-0.3, dot_color.shape[1] + 0.3)
if grid:
dot_ax.grid(True, color="gray", linewidth=0.1)
dot_ax.set_axisbelow(True)
return normalize, dot_min, dot_max
|
def _dotplot(
dot_size,
dot_color,
dot_ax,
cmap: str = "Reds",
color_on: Optional[str] = "dot",
y_label: Optional[str] = None,
dot_max: Optional[float] = None,
dot_min: Optional[float] = None,
standard_scale: Literal["var", "group"] = None,
smallest_dot: Optional[float] = 0.0,
largest_dot: Optional[float] = 200,
size_exponent: Optional[float] = 2,
edge_color: Optional[ColorLike] = None,
edge_lw: Optional[float] = None,
grid: Optional[bool] = False,
**kwds,
):
"""\
Makes a *dot plot* given two data frames, one containing
the doc size and other containing the dot color. The indices and
columns of the data frame are used to label the output image
The dots are plotted
using matplotlib.pyplot.scatter. Thus, additional arguments can be passed.
Parameters
----------
dot_size: Data frame containing the dot_size.
dot_color: Data frame containing the dot_color, should have the same,
shape, columns and indices as dot_size.
dot_ax: matplotlib axis
y_lebel:
cmap
String denoting matplotlib color map.
color_on
Options are 'dot' or 'square'. Be default the colomap is applied to
the color of the dot. Optionally, the colormap can be applied to an
square behind the dot, in which case the dot is transparent and only
the edge is shown.
y_label: String. Label for y axis
dot_max
If none, the maximum dot size is set to the maximum fraction value found
(e.g. 0.6). If given, the value should be a number between 0 and 1.
All fractions larger than dot_max are clipped to this value.
dot_min
If none, the minimum dot size is set to 0. If given,
the value should be a number between 0 and 1.
All fractions smaller than dot_min are clipped to this value.
standard_scale
Whether or not to standardize that dimension between 0 and 1,
meaning for each variable or group,
subtract the minimum and divide each by its maximum.
smallest_dot
If none, the smallest dot has size 0.
All expression levels with `dot_min` are plotted with this size.
edge_color
Dot edge color. When `color_on='dot'` the default is no edge. When
`color_on='square'`, edge color is white
edge_lw
Dot edge line width. When `color_on='dot'` the default is no edge. When
`color_on='square'`, line width = 1.5
grid
Adds a grid to the plot
**kwds
Are passed to :func:`matplotlib.pyplot.scatter`.
Returns
-------
matplotlib.colors.Normalize, dot_min, dot_max
"""
assert dot_size.shape == dot_color.shape, (
"please check that dot_size and dot_color dataframes have the same shape"
)
assert list(dot_size.index) == list(dot_color.index), (
"please check that dot_size and dot_color dataframes have the same index"
)
assert list(dot_size.columns) == list(dot_color.columns), (
"please check that the dot_size and dot_color dataframes have the same columns"
)
if standard_scale == "group":
dot_color = dot_color.sub(dot_color.min(1), axis=0)
dot_color = dot_color.div(dot_color.max(1), axis=0).fillna(0)
elif standard_scale == "var":
dot_color -= dot_color.min(0)
dot_color = (dot_color / dot_color.max(0)).fillna(0)
elif standard_scale is None:
pass
# make scatter plot in which
# x = var_names
# y = groupby category
# size = fraction
# color = mean expression
y, x = np.indices(dot_color.shape)
y = y.flatten() + 0.5
x = x.flatten() + 0.5
frac = dot_size.values.flatten()
mean_flat = dot_color.values.flatten()
cmap = pl.get_cmap(kwds.get("cmap", cmap))
if "cmap" in kwds:
del kwds["cmap"]
if dot_max is None:
dot_max = np.ceil(max(frac) * 10) / 10
else:
if dot_max < 0 or dot_max > 1:
raise ValueError("`dot_max` value has to be between 0 and 1")
if dot_min is None:
dot_min = 0
else:
if dot_min < 0 or dot_min > 1:
raise ValueError("`dot_min` value has to be between 0 and 1")
if dot_min != 0 or dot_max != 1:
# clip frac between dot_min and dot_max
frac = np.clip(frac, dot_min, dot_max)
old_range = dot_max - dot_min
# re-scale frac between 0 and 1
frac = (frac - dot_min) / old_range
size = frac**size_exponent
# rescale size to match smallest_dot and largest_dot
size = size * (largest_dot - smallest_dot) + smallest_dot
import matplotlib.colors
normalize = matplotlib.colors.Normalize(
vmin=kwds.get("vmin"), vmax=kwds.get("vmax")
)
if color_on == "square":
if edge_color is None:
from seaborn.utils import relative_luminance
# use either black or white for the edge color
# depending on the luminance of the background
# square color
edge_color = []
for color_value in cmap(normalize(mean_flat)):
lum = relative_luminance(color_value)
edge_color.append(".15" if lum > 0.408 else "w")
edge_lw = 1.5 if edge_lw is None else edge_lw
# first make a heatmap similar to `sc.pl.matrixplot`
# (squares with the asigned colormap). Circles will be plotted
# on top
dot_ax.pcolor(dot_color.values, cmap=cmap, norm=normalize)
for axis in ["top", "bottom", "left", "right"]:
dot_ax.spines[axis].set_linewidth(1.5)
kwds = fix_kwds(
kwds,
s=size,
cmap=cmap,
norm=None,
linewidth=edge_lw,
facecolor="none",
edgecolor=edge_color,
)
dot_ax.scatter(x, y, **kwds)
else:
edge_color = "none" if edge_color is None else edge_color
edge_lw = 0.5 if edge_lw is None else edge_lw
color = cmap(normalize(mean_flat))
kwds = fix_kwds(
kwds,
s=size,
cmap=cmap,
color=color,
norm=None,
linewidth=edge_lw,
edgecolor=edge_color,
)
dot_ax.scatter(x, y, **kwds)
y_ticks = np.arange(dot_color.shape[0]) + 0.5
dot_ax.set_yticks(y_ticks)
dot_ax.set_yticklabels(
[dot_color.index[idx] for idx, _ in enumerate(y_ticks)], minor=False
)
x_ticks = np.arange(dot_color.shape[1]) + 0.5
dot_ax.set_xticks(x_ticks)
dot_ax.set_xticklabels(
[dot_color.columns[idx] for idx, _ in enumerate(x_ticks)],
rotation=90,
ha="center",
minor=False,
)
dot_ax.tick_params(axis="both", labelsize="small")
dot_ax.grid(False)
dot_ax.set_ylabel(y_label)
# to be consistent with the heatmap plot, is better to
# invert the order of the y-axis, such that the first group is on
# top
dot_ax.set_ylim(dot_color.shape[0], 0)
dot_ax.set_xlim(0, dot_color.shape[1])
if color_on == "dot":
# add more distance to the x and y lims with the color is on the
# dots
dot_ax.set_ylim(dot_color.shape[0] + 0.5, -0.5)
dot_ax.set_xlim(-0.3, dot_color.shape[1] + 0.3)
if grid:
dot_ax.grid(True, color="gray", linewidth=0.1)
dot_ax.set_axisbelow(True)
return normalize, dot_min, dot_max
|
https://github.com/theislab/scanpy/issues/1307
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/docs/conf.py:112: RemovedInSphinx40Warning: The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.
app.add_stylesheet('css/custom.css')
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/cmd/build.py", line 280, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/application.py", line 348, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 311, in build
updated_docnames = set(self.read())
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 213, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 178, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 415, in filter
raise SphinxWarning(location + ":" + str(message))
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
|
sphinx.errors.SphinxWarning
|
def dotplot(
adata: AnnData,
var_names: Union[_VarNames, Mapping[str, _VarNames]],
groupby: Union[str, Sequence[str]],
use_raw: Optional[bool] = None,
log: bool = False,
num_categories: int = 7,
expression_cutoff: float = 0.0,
mean_only_expressed: bool = False,
cmap: str = "Reds",
dot_max: Optional[float] = None,
dot_min: Optional[float] = None,
standard_scale: Optional[Literal["var", "group"]] = None,
smallest_dot: Optional[float] = DotPlot.DEFAULT_SMALLEST_DOT,
title: Optional[str] = None,
colorbar_title: Optional[str] = DotPlot.DEFAULT_COLOR_LEGEND_TITLE,
size_title: Optional[str] = DotPlot.DEFAULT_SIZE_LEGEND_TITLE,
figsize: Optional[Tuple[float, float]] = None,
dendrogram: Union[bool, str] = False,
gene_symbols: Optional[str] = None,
var_group_positions: Optional[Sequence[Tuple[int, int]]] = None,
var_group_labels: Optional[Sequence[str]] = None,
var_group_rotation: Optional[float] = None,
layer: Optional[str] = None,
swap_axes: Optional[bool] = False,
dot_color_df: Optional[pd.DataFrame] = None,
show: Optional[bool] = None,
save: Union[str, bool, None] = None,
ax: Optional[_AxesSubplot] = None,
return_fig: Optional[bool] = False,
**kwds,
) -> Union[DotPlot, dict, None]:
"""\
Makes a *dot plot* of the expression values of `var_names`.
For each var_name and each `groupby` category a dot is plotted.
Each dot represents two values: mean expression within each category
(visualized by color) and fraction of cells expressing the `var_name` in the
category (visualized by the size of the dot). If `groupby` is not given,
the dotplot assumes that all data belongs to a single category.
.. note::
A gene is considered expressed if the expression value in the `adata` (or
`adata.raw`) is above the specified threshold which is zero by default.
An example of dotplot usage is to visualize, for multiple marker genes,
the mean value and the percentage of cells expressing the gene
across multiple clusters.
This function provides a convenient interface to the :class:`~scanpy.pl.DotPlot`
class. If you need more flexibility, you should use :class:`~scanpy.pl.DotPlot`
directly.
Parameters
----------
{common_plot_args}
{groupby_plots_args}
size_title
Title for the size legend. New line character (\\n) can be used.
expression_cutoff
Expression cutoff that is used for binarizing the gene expression and
determining the fraction of cells expressing given genes. A gene is
expressed only if the expression value is greater than this threshold.
mean_only_expressed
If True, gene expression is averaged only over the cells
expressing the given genes.
dot_max
If none, the maximum dot size is set to the maximum fraction value found
(e.g. 0.6). If given, the value should be a number between 0 and 1.
All fractions larger than dot_max are clipped to this value.
dot_min
If none, the minimum dot size is set to 0. If given,
the value should be a number between 0 and 1.
All fractions smaller than dot_min are clipped to this value.
smallest_dot
If none, the smallest dot has size 0.
All expression levels with `dot_min` are plotted with this size.
{show_save_ax}
kwds
Are passed to :func:`matplotlib.pyplot.scatter`.
Returns
-------
If `return_fig` is `True`, returns a :class:`~scanpy.pl.DotPlot` object,
else if `show` is false, return axes dict
See also
--------
:class:`~scanpy.pl.DotPlot`: The DotPlot class can be used to to control
several visual parameters not available in this function.
:func:`~scanpy.pl.rank_genes_groups_dotplot`: to plot marker genes
identified using the :func:`~scanpy.tl.rank_genes_groups` function.
Examples
--------
Create a dot plot using the given markers and the PBMC example dataset:
..plot::
:context: close-figs
>>> import scanpy as sc
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
>>> sc.pl.dotplot(adata, markers, groupby='bulk_labels', dendrogram=True)
Using var_names as dict:
..plot::
:context: close-figs
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.dotplot(adata, markers, groupby='bulk_labels', dendrogram=True)
Get DotPlot object for fine tuning
>>> dp = sc.pl.dotplot(adata, markers, 'bulk_labels', return_fig=True)
>>> dp.add_totals().style(dot_edge_color='black', dot_edge_lw=0.5).show()
The axes used can be obtained using the get_axes() method
>>> axes_dict = dp.get_axes()
>>> print(axes_dict)
"""
# backwards compatibility: previous version of dotplot used `color_map`
# instead of `cmap`
cmap = kwds.get("color_map", cmap)
if "color_map" in kwds:
del kwds["color_map"]
dp = DotPlot(
adata,
var_names,
groupby,
use_raw=use_raw,
log=log,
num_categories=num_categories,
expression_cutoff=expression_cutoff,
mean_only_expressed=mean_only_expressed,
standard_scale=standard_scale,
title=title,
figsize=figsize,
gene_symbols=gene_symbols,
var_group_positions=var_group_positions,
var_group_labels=var_group_labels,
var_group_rotation=var_group_rotation,
layer=layer,
dot_color_df=dot_color_df,
ax=ax,
**kwds,
)
if dendrogram:
dp.add_dendrogram(dendrogram_key=dendrogram)
if swap_axes:
dp.swap_axes()
dp = dp.style(
cmap=cmap,
dot_max=dot_max,
dot_min=dot_min,
smallest_dot=smallest_dot,
).legend(
colorbar_title=colorbar_title,
size_title=size_title,
)
if return_fig:
return dp
else:
return dp.show(show=show, save=save)
|
def dotplot(
adata: AnnData,
var_names: Union[_VarNames, Mapping[str, _VarNames]],
groupby: Union[str, Sequence[str]],
use_raw: Optional[bool] = None,
log: bool = False,
num_categories: int = 7,
expression_cutoff: float = 0.0,
mean_only_expressed: bool = False,
cmap: str = "Reds",
dot_max: Optional[float] = None,
dot_min: Optional[float] = None,
standard_scale: Optional[Literal["var", "group"]] = None,
smallest_dot: Optional[float] = DotPlot.DEFAULT_SMALLEST_DOT,
title: Optional[str] = None,
colorbar_title: Optional[str] = DotPlot.DEFAULT_COLOR_LEGEND_TITLE,
size_title: Optional[str] = DotPlot.DEFAULT_SIZE_LEGEND_TITLE,
figsize: Optional[Tuple[float, float]] = None,
dendrogram: Union[bool, str] = False,
gene_symbols: Optional[str] = None,
var_group_positions: Optional[Sequence[Tuple[int, int]]] = None,
var_group_labels: Optional[Sequence[str]] = None,
var_group_rotation: Optional[float] = None,
layer: Optional[str] = None,
swap_axes: Optional[bool] = False,
dot_color_df: Optional[pd.DataFrame] = None,
show: Optional[bool] = None,
save: Union[str, bool, None] = None,
ax: Optional[_AxesSubplot] = None,
return_fig: Optional[bool] = False,
**kwds,
) -> Union[DotPlot, dict, None]:
"""\
Makes a *dot plot* of the expression values of `var_names`.
For each var_name and each `groupby` category a dot is plotted.
Each dot represents two values: mean expression within each category
(visualized by color) and fraction of cells expressing the `var_name` in the
category (visualized by the size of the dot). If `groupby` is not given,
the dotplot assumes that all data belongs to a single category.
.. note::
A gene is considered expressed if the expression value in the `adata` (or
`adata.raw`) is above the specified threshold which is zero by default.
An example of dotplot usage is to visualize, for multiple marker genes,
the mean value and the percentage of cells expressing the gene
across multiple clusters.
This function provides a convenient interface to the :class:`DotPlot`
class. If you need more flexibility, you should use :class:`DotPlot` directly.
Parameters
----------
{common_plot_args}
{groupby_plots_args}
size_title
Title for the size legend. New line character (\\n) can be used.
expression_cutoff
Expression cutoff that is used for binarizing the gene expression and
determining the fraction of cells expressing given genes. A gene is
expressed only if the expression value is greater than this threshold.
mean_only_expressed
If True, gene expression is averaged only over the cells
expressing the given genes.
dot_max
If none, the maximum dot size is set to the maximum fraction value found
(e.g. 0.6). If given, the value should be a number between 0 and 1.
All fractions larger than dot_max are clipped to this value.
dot_min
If none, the minimum dot size is set to 0. If given,
the value should be a number between 0 and 1.
All fractions smaller than dot_min are clipped to this value.
smallest_dot
If none, the smallest dot has size 0.
All expression levels with `dot_min` are plotted with this size.
{show_save_ax}
**kwds
Are passed to :func:`matplotlib.pyplot.scatter`.
Returns
-------
If `return_fig` is `True`, returns a :class:`DotPlot` object,
else if `show` is false, return axes dict
Examples
-------
>>> import scanpy as sc
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
>>> sc.pl.dotplot(adata, markers, groupby='bulk_labels', dendrogram=True)
Using var_names as dict:
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.dotplot(adata, markers, groupby='bulk_labels', dendrogram=True)
Get DotPlot object for fine tuning
>>> dp = sc.pl.dotplot(adata, markers, 'bulk_labels', return_fig=True)
>>> dp.add_totals().style(dot_edge_color='black', dot_edge_lw=0.5).show()
The axes used can be obtained using the get_axes() method
>>> axes_dict = dp.get_axes()
See also
--------
:func:`~scanpy.pl.rank_genes_groups_dotplot`: to plot marker genes
identified using the :func:`~scanpy.tl.rank_genes_groups` function.
"""
# backwards compatibily: previous version of dotplot used `color_map`
# instead of `cmap`
cmap = kwds.get("color_map", cmap)
if "color_map" in kwds:
del kwds["color_map"]
dp = DotPlot(
adata,
var_names,
groupby,
use_raw=use_raw,
log=log,
num_categories=num_categories,
expression_cutoff=expression_cutoff,
mean_only_expressed=mean_only_expressed,
standard_scale=standard_scale,
title=title,
figsize=figsize,
gene_symbols=gene_symbols,
var_group_positions=var_group_positions,
var_group_labels=var_group_labels,
var_group_rotation=var_group_rotation,
layer=layer,
dot_color_df=dot_color_df,
ax=ax,
**kwds,
)
if dendrogram:
dp.add_dendrogram(dendrogram_key=dendrogram)
if swap_axes:
dp.swap_axes()
dp = dp.style(
cmap=cmap,
dot_max=dot_max,
dot_min=dot_min,
smallest_dot=smallest_dot,
).legend(
colorbar_title=colorbar_title,
size_title=size_title,
)
if return_fig:
return dp
else:
return dp.show(show=show, save=save)
|
https://github.com/theislab/scanpy/issues/1307
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/docs/conf.py:112: RemovedInSphinx40Warning: The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.
app.add_stylesheet('css/custom.css')
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/cmd/build.py", line 280, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/application.py", line 348, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 311, in build
updated_docnames = set(self.read())
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 213, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 178, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 415, in filter
raise SphinxWarning(location + ":" + str(message))
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
|
sphinx.errors.SphinxWarning
|
def style(
self,
cmap: str = DEFAULT_COLORMAP,
edge_color: Optional[ColorLike] = DEFAULT_EDGE_COLOR,
edge_lw: Optional[float] = DEFAULT_EDGE_LW,
):
"""\
Modifies plot visual parameters.
Parameters
----------
cmap
String denoting matplotlib color map.
edge_color
Edge color betweem the squares of matrix plot. Default is gray
edge_lw
Edge line width.
Returns
-------
:class:`~scanpy.pl.MatrixPlot`
Examples
-------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
Change color map and turn off edges:
>>> sc.pl.MatrixPlot(adata, markers, groupby='bulk_labels')\
... .style(cmap='Blues', edge_color='none').show()
"""
self.cmap = cmap
self.edge_color = edge_color
self.edge_lw = edge_lw
return self
|
def style(
self,
cmap: str = DEFAULT_COLORMAP,
edge_color: Optional[ColorLike] = DEFAULT_EDGE_COLOR,
edge_lw: Optional[float] = DEFAULT_EDGE_LW,
):
"""
Modifies plot graphical parameters
Parameters
----------
cmap
String denoting matplotlib color map.
edge_color
Edge color betweem the squares of matrix plot. Default is gray
edge_lw
Edge line width.
Returns
-------
MatrixPlot
Examples
-------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
Change color map and turn off edges
>>> sc.pl.MatrixPlot(adata, markers, groupby='bulk_labels')\
... .style(cmap='Blues', edge_color='none').show()
"""
self.cmap = cmap
self.edge_color = edge_color
self.edge_lw = edge_lw
return self
|
https://github.com/theislab/scanpy/issues/1307
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/docs/conf.py:112: RemovedInSphinx40Warning: The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.
app.add_stylesheet('css/custom.css')
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/cmd/build.py", line 280, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/application.py", line 348, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 311, in build
updated_docnames = set(self.read())
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 213, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 178, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 415, in filter
raise SphinxWarning(location + ":" + str(message))
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
|
sphinx.errors.SphinxWarning
|
def matrixplot(
adata: AnnData,
var_names: Union[_VarNames, Mapping[str, _VarNames]],
groupby: Union[str, Sequence[str]],
use_raw: Optional[bool] = None,
log: bool = False,
num_categories: int = 7,
figsize: Optional[Tuple[float, float]] = None,
dendrogram: Union[bool, str] = False,
title: Optional[str] = None,
cmap: Optional[str] = MatrixPlot.DEFAULT_COLORMAP,
colorbar_title: Optional[str] = MatrixPlot.DEFAULT_COLOR_LEGEND_TITLE,
gene_symbols: Optional[str] = None,
var_group_positions: Optional[Sequence[Tuple[int, int]]] = None,
var_group_labels: Optional[Sequence[str]] = None,
var_group_rotation: Optional[float] = None,
layer: Optional[str] = None,
standard_scale: Literal["var", "group"] = None,
values_df: Optional[pd.DataFrame] = None,
swap_axes: bool = False,
show: Optional[bool] = None,
save: Union[str, bool, None] = None,
ax: Optional[_AxesSubplot] = None,
return_fig: Optional[bool] = False,
**kwds,
) -> Union[MatrixPlot, dict, None]:
"""\
Creates a heatmap of the mean expression values per cluster of each var_names.
This function provides a convenient interface to the :class:`~scanpy.pl.MatrixPlot`
class. If you need more flexibility, you should use :class:`~scanpy.pl.MatrixPlot`
directly.
Parameters
----------
{common_plot_args}
{groupby_plots_args}
{show_save_ax}
kwds
Are passed to :func:`matplotlib.pyplot.pcolor`.
Returns
-------
If `return_fig` is `True`, returns a :class:`~scanpy.pl.MatrixPlot` object,
else if `show` is false, return axes dict
Examples
--------
>>> import scanpy as sc
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
>>> sc.pl.matrixplot(adata, markers, groupby='bulk_labels', dendrogram=True)
Using var_names as dict:
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.matrixplot(adata, markers, groupby='bulk_labels', dendrogram=True)
Get Matrix object for fine tuning:
>>> mp = sc.pl.matrix(adata, markers, 'bulk_labels', return_fig=True)
>>> mp.add_totals().style(edge_color='black').show()
The axes used can be obtained using the get_axes() method
>>> axes_dict = mp.get_axes()
See also
--------
:func:`~scanpy.pl.rank_genes_groups_matrixplot`: to plot marker genes
identified using the :func:`~scanpy.tl.rank_genes_groups` function.
"""
mp = MatrixPlot(
adata,
var_names,
groupby=groupby,
use_raw=use_raw,
log=log,
num_categories=num_categories,
standard_scale=standard_scale,
title=title,
figsize=figsize,
gene_symbols=gene_symbols,
var_group_positions=var_group_positions,
var_group_labels=var_group_labels,
var_group_rotation=var_group_rotation,
layer=layer,
values_df=values_df,
ax=ax,
**kwds,
)
if dendrogram:
mp.add_dendrogram(dendrogram_key=dendrogram)
if swap_axes:
mp.swap_axes()
mp = mp.style(cmap=cmap).legend(title=colorbar_title)
if return_fig:
return mp
else:
return mp.show(show=show, save=save)
|
def matrixplot(
adata: AnnData,
var_names: Union[_VarNames, Mapping[str, _VarNames]],
groupby: Union[str, Sequence[str]],
use_raw: Optional[bool] = None,
log: bool = False,
num_categories: int = 7,
figsize: Optional[Tuple[float, float]] = None,
dendrogram: Union[bool, str] = False,
title: Optional[str] = None,
cmap: Optional[str] = MatrixPlot.DEFAULT_COLORMAP,
colorbar_title: Optional[str] = MatrixPlot.DEFAULT_COLOR_LEGEND_TITLE,
gene_symbols: Optional[str] = None,
var_group_positions: Optional[Sequence[Tuple[int, int]]] = None,
var_group_labels: Optional[Sequence[str]] = None,
var_group_rotation: Optional[float] = None,
layer: Optional[str] = None,
standard_scale: Literal["var", "group"] = None,
values_df: Optional[pd.DataFrame] = None,
swap_axes: bool = False,
show: Optional[bool] = None,
save: Union[str, bool, None] = None,
ax: Optional[_AxesSubplot] = None,
return_fig: Optional[bool] = False,
**kwds,
) -> Union[MatrixPlot, dict, None]:
"""\
Creates a heatmap of the mean expression values per cluster of each var_names.
This function provides a convenient interface to the :class:`MatrixPlot`
class. If you need more flexibility, you should use :class:`MatrixPlot` directly.
Parameters
----------
{common_plot_args}
{groupby_plots_args}
{show_save_ax}
**kwds
Are passed to :func:`matplotlib.pyplot.pcolor`.
Returns
-------
if `show` is `False`, returns a :class:`MatrixPlot` object
Examples
--------
>>> import scanpy as sc
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
>>> sc.pl.matrixplot(adata, markers, groupby='bulk_labels', dendrogram=True)
Using var_names as dict:
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.matrixplot(adata, markers, groupby='bulk_labels', dendrogram=True)
Get Matrix object for fine tuning
>>> mp = sc.pl.matrix(adata, markers, 'bulk_labels', return_fig=True)
>>> mp.add_totals().style(edge_color='black').show()
The axes used can be obtained using the get_axes() method
>>> axes_dict = mp.get_axes()
See also
--------
:func:`~scanpy.pl.rank_genes_groups_matrixplot`: to plot marker genes
identified using the :func:`~scanpy.tl.rank_genes_groups` function.
"""
mp = MatrixPlot(
adata,
var_names,
groupby=groupby,
use_raw=use_raw,
log=log,
num_categories=num_categories,
standard_scale=standard_scale,
title=title,
figsize=figsize,
gene_symbols=gene_symbols,
var_group_positions=var_group_positions,
var_group_labels=var_group_labels,
var_group_rotation=var_group_rotation,
layer=layer,
values_df=values_df,
ax=ax,
**kwds,
)
if dendrogram:
mp.add_dendrogram(dendrogram_key=dendrogram)
if swap_axes:
mp.swap_axes()
mp = mp.style(cmap=cmap).legend(title=colorbar_title)
if return_fig:
return mp
else:
return mp.show(show=show, save=save)
|
https://github.com/theislab/scanpy/issues/1307
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/docs/conf.py:112: RemovedInSphinx40Warning: The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.
app.add_stylesheet('css/custom.css')
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/cmd/build.py", line 280, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/application.py", line 348, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 311, in build
updated_docnames = set(self.read())
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 213, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 178, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 415, in filter
raise SphinxWarning(location + ":" + str(message))
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
|
sphinx.errors.SphinxWarning
|
def style(
self,
cmap: str = DEFAULT_COLORMAP,
stripplot: Optional[bool] = DEFAULT_STRIPPLOT,
jitter: Optional[Union[float, bool]] = DEFAULT_JITTER,
jitter_size: Optional[int] = DEFAULT_JITTER_SIZE,
linewidth: Optional[float] = DEFAULT_LINE_WIDTH,
row_palette: Optional[str] = DEFAULT_ROW_PALETTE,
scale: Optional[Literal["area", "count", "width"]] = DEFAULT_SCALE,
yticklabels: Optional[bool] = DEFAULT_PLOT_YTICKLABELS,
ylim: Optional[Tuple[float, float]] = DEFAULT_YLIM,
):
"""\
Modifies plot visual parameters
Parameters
----------
cmap
String denoting matplotlib color map.
stripplot
Add a stripplot on top of the violin plot.
See :func:`~seaborn.stripplot`.
jitter
Add jitter to the stripplot (only when stripplot is True)
See :func:`~seaborn.stripplot`.
jitter_size
Size of the jitter points.
linewidth
linewidth for the violin plots.
row_palette
The row palette determines the colors to use for the stacked violins.
The value should be a valid seaborn or matplotlib palette name
(see :func:`~seaborn.color_palette`).
Alternatively, a single color name or hex value can be passed,
e.g. `'red'` or `'#cc33ff'`.
scale
The method used to scale the width of each violin.
If 'width' (the default), each violin will have the same width.
If 'area', each violin will have the same area.
If 'count', a violin’s width corresponds to the number of observations.
yticklabels
Because the plots are on top of each other the yticks labels tend to
overlap and are not plotted. Set to true to view the labels.
ylim
minimum and maximum values for the y-axis. If set. All rows will have
the same y-axis range. Example: ylim=(0, 5)
Returns
-------
:class:`~scanpy.pl.StackedViolin`
Examples
-------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
Change color map and turn off edges
>>> sc.pl.MatrixPlot(adata, markers, groupby='bulk_labels')\
... .style(row_palette='Blues', linewidth=0).show()
"""
self.cmap = cmap
self.row_palette = row_palette
self.kwds["color"] = self.row_palette
self.stripplot = stripplot
self.jitter = jitter
self.jitter_size = jitter_size
self.plot_yticklabels = yticklabels
self.ylim = ylim
self.kwds["linewidth"] = linewidth
self.kwds["scale"] = scale
return self
|
def style(
self,
cmap: str = DEFAULT_COLORMAP,
stripplot: Optional[bool] = DEFAULT_STRIPPLOT,
jitter: Optional[Union[float, bool]] = DEFAULT_JITTER,
jitter_size: Optional[int] = DEFAULT_JITTER_SIZE,
linewidth: Optional[float] = DEFAULT_LINE_WIDTH,
row_palette: Optional[str] = DEFAULT_ROW_PALETTE,
scale: Optional[Literal["area", "count", "width"]] = DEFAULT_SCALE,
yticklabels: Optional[bool] = DEFAULT_PLOT_YTICKLABELS,
ylim: Optional[Tuple[float, float]] = DEFAULT_YLIM,
):
"""
Modifies plot graphical parameters
Parameters
----------
cmap
String denoting matplotlib color map.
stripplot
Add a stripplot on top of the violin plot.
See :func:`~seaborn.stripplot`.
jitter
Add jitter to the stripplot (only when stripplot is True)
See :func:`~seaborn.stripplot`.
jitter_size
Size of the jitter points.
linewidth
linewidth for the violin plots.
row_palette
The row palette determines the colors to use for the stacked violins.
The value should be a valid seaborn or matplotlib palette name
(see :func:`~seaborn.color_palette`).
Alternatively, a single color name or hex value can be passed,
e.g. `'red'` or `'#cc33ff'`.
scale
The method used to scale the width of each violin.
If 'width' (the default), each violin will have the same width.
If 'area', each violin will have the same area.
If 'count', a violin’s width corresponds to the number of observations.
yticklabels
Because the plots are on top of each other the yticks labels tend to
overlap and are not plotted. Set to true to view the labels.
ylim
minimum and maximum values for the y-axis. If set. All rows will have
the same y-axis range. Example: ylim=(0, 5)
Returns
-------
StackedViolin
Examples
-------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
Change color map and turn off edges
>>> sc.pl.MatrixPlot(adata, markers, groupby='bulk_labels')\
... .style(row_palette='Blues', linewisth=0).show()
"""
self.cmap = cmap
self.row_palette = row_palette
self.kwds["color"] = self.row_palette
self.stripplot = stripplot
self.jitter = jitter
self.jitter_size = jitter_size
self.plot_yticklabels = yticklabels
self.ylim = ylim
self.kwds["linewidth"] = linewidth
self.kwds["scale"] = scale
return self
|
https://github.com/theislab/scanpy/issues/1307
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/docs/conf.py:112: RemovedInSphinx40Warning: The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.
app.add_stylesheet('css/custom.css')
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/cmd/build.py", line 280, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/application.py", line 348, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 311, in build
updated_docnames = set(self.read())
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 213, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 178, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 415, in filter
raise SphinxWarning(location + ":" + str(message))
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
|
sphinx.errors.SphinxWarning
|
def stacked_violin(
adata: AnnData,
var_names: Union[_VarNames, Mapping[str, _VarNames]],
groupby: Union[str, Sequence[str]],
log: bool = False,
use_raw: Optional[bool] = None,
num_categories: int = 7,
title: Optional[str] = None,
colorbar_title: Optional[str] = StackedViolin.DEFAULT_COLOR_LEGEND_TITLE,
figsize: Optional[Tuple[float, float]] = None,
dendrogram: Union[bool, str] = False,
gene_symbols: Optional[str] = None,
var_group_positions: Optional[Sequence[Tuple[int, int]]] = None,
var_group_labels: Optional[Sequence[str]] = None,
standard_scale: Optional[Literal["var", "obs"]] = None,
var_group_rotation: Optional[float] = None,
layer: Optional[str] = None,
stripplot: bool = False,
jitter: Union[float, bool] = False,
size: int = 1,
scale: Literal["area", "count", "width"] = "width",
order: Optional[Sequence[str]] = None,
swap_axes: bool = False,
show: Optional[bool] = None,
save: Union[bool, str, None] = None,
return_fig: Optional[bool] = False,
row_palette: Optional[str] = None,
cmap: Optional[str] = StackedViolin.DEFAULT_COLORMAP,
ax: Optional[_AxesSubplot] = None,
**kwds,
) -> Union[StackedViolin, dict, None]:
"""\
Stacked violin plots.
Makes a compact image composed of individual violin plots
(from :func:`~seaborn.violinplot`) stacked on top of each other.
Useful to visualize gene expression per cluster.
Wraps :func:`seaborn.violinplot` for :class:`~anndata.AnnData`.
This function provides a convenient interface to the
:class:`~scanpy.pl.StackedViolin` class. If you need more flexibility,
you should use :class:`~scanpy.pl.StackedViolin` directly.
Parameters
----------
{common_plot_args}
{groupby_plots_args}
stripplot
Add a stripplot on top of the violin plot.
See :func:`~seaborn.stripplot`.
jitter
Add jitter to the stripplot (only when stripplot is True)
See :func:`~seaborn.stripplot`.
size
Size of the jitter points.
order
Order in which to show the categories. Note: if `dendrogram=True`
the categories order will be given by the dendrogram and `order`
will be ignored.
scale
The method used to scale the width of each violin.
If 'width' (the default), each violin will have the same width.
If 'area', each violin will have the same area.
If 'count', a violin’s width corresponds to the number of observations.
row_palette
Be default, median values are mapped to the violin color using a
color map (see `cmap` argument). Alternatively, a 'row_palette` can
be given to color each violin plot row using a different colors.
The value should be a valid seaborn or matplotlib palette name
(see :func:`~seaborn.color_palette`).
Alternatively, a single color name or hex value can be passed,
e.g. `'red'` or `'#cc33ff'`.
{show_save_ax}
kwds
Are passed to :func:`~seaborn.violinplot`.
Returns
-------
If `return_fig` is `True`, returns a :class:`~scanpy.pl.StackedViolin` object,
else if `show` is false, return axes dict
See also
--------
:class:`~scanpy.pl.StackedViolin`
:func:`~scanpy.pl.rank_genes_groups_stacked_violin` to plot marker genes identified
using the :func:`~scanpy.tl.rank_genes_groups` function.
Examples
-------
>>> import scanpy as sc
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
>>> sc.pl.stacked_violin(adata, markers, groupby='bulk_labels', dendrogram=True)
Using var_names as dict:
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.stacked_violin(adata, markers, groupby='bulk_labels', dendrogram=True)
Get StackedViolin object for fine tuning
>>> vp = sc.pl.stacked_violin(adata, markers, 'bulk_labels', return_fig=True)
>>> vp.add_totals().style(ylim=(0,5)).show()
The axes used can be obtained using the get_axes() method:
>>> axes_dict = vp.get_axes()
"""
vp = StackedViolin(
adata,
var_names,
groupby=groupby,
use_raw=use_raw,
log=log,
num_categories=num_categories,
standard_scale=standard_scale,
title=title,
figsize=figsize,
gene_symbols=gene_symbols,
var_group_positions=var_group_positions,
var_group_labels=var_group_labels,
var_group_rotation=var_group_rotation,
layer=layer,
ax=ax,
**kwds,
)
if dendrogram:
vp.add_dendrogram(dendrogram_key=dendrogram)
if swap_axes:
vp.swap_axes()
vp = vp.style(
cmap=cmap,
stripplot=stripplot,
jitter=jitter,
jitter_size=size,
row_palette=row_palette,
scale=scale,
).legend(title=colorbar_title)
if return_fig:
return vp
else:
return vp.show(show=show, save=save)
|
def stacked_violin(
adata: AnnData,
var_names: Union[_VarNames, Mapping[str, _VarNames]],
groupby: Union[str, Sequence[str]],
log: bool = False,
use_raw: Optional[bool] = None,
num_categories: int = 7,
title: Optional[str] = None,
colorbar_title: Optional[str] = StackedViolin.DEFAULT_COLOR_LEGEND_TITLE,
figsize: Optional[Tuple[float, float]] = None,
dendrogram: Union[bool, str] = False,
gene_symbols: Optional[str] = None,
var_group_positions: Optional[Sequence[Tuple[int, int]]] = None,
var_group_labels: Optional[Sequence[str]] = None,
standard_scale: Optional[Literal["var", "obs"]] = None,
var_group_rotation: Optional[float] = None,
layer: Optional[str] = None,
stripplot: bool = False,
jitter: Union[float, bool] = False,
size: int = 1,
scale: Literal["area", "count", "width"] = "width",
order: Optional[Sequence[str]] = None,
swap_axes: bool = False,
show: Optional[bool] = None,
save: Union[bool, str, None] = None,
return_fig: Optional[bool] = False,
row_palette: Optional[str] = None,
cmap: Optional[str] = StackedViolin.DEFAULT_COLORMAP,
ax: Optional[_AxesSubplot] = None,
**kwds,
) -> Union[StackedViolin, dict, None]:
"""\
Stacked violin plots.
Makes a compact image composed of individual violin plots
(from :func:`~seaborn.violinplot`) stacked on top of each other.
Useful to visualize gene expression per cluster.
Wraps :func:`seaborn.violinplot` for :class:`~anndata.AnnData`.
This function provides a convenient interface to the :class:`StackedViolin`
class. If you need more flexibility, you should use :class:`StackedViolin` directly.
Parameters
----------
{common_plot_args}
{groupby_plots_args}
stripplot
Add a stripplot on top of the violin plot.
See :func:`~seaborn.stripplot`.
jitter
Add jitter to the stripplot (only when stripplot is True)
See :func:`~seaborn.stripplot`.
size
Size of the jitter points.
order
Order in which to show the categories. Note: if `dendrogram=True`
the categories order will be given by the dendrogram and `order`
will be ignored.
scale
The method used to scale the width of each violin.
If 'width' (the default), each violin will have the same width.
If 'area', each violin will have the same area.
If 'count', a violin’s width corresponds to the number of observations.
row_palette
Be default, median values are mapped to the violin color using a
color map (see `cmap` argument). Alternatively, a 'row_palette` can
be given to color each violin plot row using a different colors.
The value should be a valid seaborn or matplotlib palette name
(see :func:`~seaborn.color_palette`).
Alternatively, a single color name or hex value can be passed,
e.g. `'red'` or `'#cc33ff'`.
{show_save_ax}
**kwds
Are passed to :func:`~seaborn.violinplot`.
Returns
-------
If `return_fig` is `True`, returns a :class:`StackedViolin` object,
else if `show` is false, return axes dict
Examples
-------
>>> import scanpy as sc
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
>>> sc.pl.stacked_violin(adata, markers, groupby='bulk_labels', dendrogram=True)
Using var_names as dict:
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.stacked_violin(adata, markers, groupby='bulk_labels', dendrogram=True)
Get StackedViolin object for fine tuning
>>> vp = sc.pl.stacked_violin(adata, markers, 'bulk_labels', return_fig=True)
>>> vp.add_totals().style(ylim=(0,5)).show()
The axes used can be obtained using the get_axes() method
>>> axes_dict = vp.get_axes()
See also
--------
rank_genes_groups_stacked_violin: to plot marker genes identified using
the :func:`~scanpy.tl.rank_genes_groups` function.
"""
vp = StackedViolin(
adata,
var_names,
groupby=groupby,
use_raw=use_raw,
log=log,
num_categories=num_categories,
standard_scale=standard_scale,
title=title,
figsize=figsize,
gene_symbols=gene_symbols,
var_group_positions=var_group_positions,
var_group_labels=var_group_labels,
var_group_rotation=var_group_rotation,
layer=layer,
ax=ax,
**kwds,
)
if dendrogram:
vp.add_dendrogram(dendrogram_key=dendrogram)
if swap_axes:
vp.swap_axes()
vp = vp.style(
cmap=cmap,
stripplot=stripplot,
jitter=jitter,
jitter_size=size,
row_palette=row_palette,
scale=scale,
).legend(title=colorbar_title)
if return_fig:
return vp
else:
return vp.show(show=show, save=save)
|
https://github.com/theislab/scanpy/issues/1307
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/docs/conf.py:112: RemovedInSphinx40Warning: The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.
app.add_stylesheet('css/custom.css')
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/cmd/build.py", line 280, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/application.py", line 348, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 311, in build
updated_docnames = set(self.read())
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 213, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 178, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/1204/lib/python3.6/site-packages/sphinx/util/logging.py", line 415, in filter
raise SphinxWarning(location + ":" + str(message))
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/1204/scanpy/plotting/_dotplot.py:docstring of scanpy.pl.dotplot:122:Inline strong start-string without end-string.
|
sphinx.errors.SphinxWarning
|
def _prepare_dataframe(
adata: AnnData,
var_names: Union[_VarNames, Mapping[str, _VarNames]],
groupby: Optional[str] = None,
use_raw: Optional[bool] = None,
log: bool = False,
num_categories: int = 7,
layer=None,
gene_symbols: Optional[str] = None,
):
"""
Given the anndata object, prepares a data frame in which the row index are the categories
defined by group by and the columns correspond to var_names.
Parameters
----------
adata
Annotated data matrix.
var_names
`var_names` should be a valid subset of `adata.var_names`.
groupby
The key of the observation grouping to consider. It is expected that
groupby is a categorical. If groupby is not a categorical observation,
it would be subdivided into `num_categories`.
use_raw
Use `raw` attribute of `adata` if present.
log
Use the log of the values
num_categories
Only used if groupby observation is not categorical. This value
determines the number of groups into which the groupby observation
should be subdivided.
gene_symbols
Key for field in .var that stores gene symbols.
Returns
-------
Tuple of `pandas.DataFrame` and list of categories.
"""
from scipy.sparse import issparse
sanitize_anndata(adata)
if use_raw is None and adata.raw is not None:
use_raw = True
if isinstance(var_names, str):
var_names = [var_names]
if groupby is not None:
if groupby not in adata.obs_keys():
raise ValueError(
"groupby has to be a valid observation. "
f"Given {groupby}, valid observations: {adata.obs_keys()}"
)
if gene_symbols is not None and gene_symbols in adata.var.columns:
# translate gene_symbols to var_names
# slow method but gives a meaningful error if no gene symbol is found:
translated_var_names = []
# if we're using raw to plot, we should also do gene symbol translations
# using raw
if use_raw:
adata_or_raw = adata.raw
else:
adata_or_raw = adata
for symbol in var_names:
if symbol not in adata_or_raw.var[gene_symbols].values:
logg.error(
f"Gene symbol {symbol!r} not found in given "
f"gene_symbols column: {gene_symbols!r}"
)
return
translated_var_names.append(
adata_or_raw.var[adata_or_raw.var[gene_symbols] == symbol].index[0]
)
symbols = var_names
var_names = translated_var_names
if layer is not None:
if layer not in adata.layers.keys():
raise KeyError(
f"Selected layer: {layer} is not in the layers list. "
f"The list of valid layers is: {adata.layers.keys()}"
)
matrix = adata[:, var_names].layers[layer]
elif use_raw:
matrix = adata.raw[:, var_names].X
else:
matrix = adata[:, var_names].X
if issparse(matrix):
matrix = matrix.toarray()
if log:
matrix = np.log1p(matrix)
obs_tidy = pd.DataFrame(matrix, columns=var_names)
if groupby is None:
groupby = ""
categorical = pd.Series(np.repeat("", len(obs_tidy))).astype("category")
else:
if not is_categorical_dtype(adata.obs[groupby]):
# if the groupby column is not categorical, turn it into one
# by subdividing into `num_categories` categories
categorical = pd.cut(adata.obs[groupby], num_categories)
else:
categorical = adata.obs[groupby]
obs_tidy.set_index(categorical, groupby, inplace=True)
if gene_symbols is not None:
# translate the column names to the symbol names
obs_tidy.rename(
columns=dict([(var_names[x], symbols[x]) for x in range(len(var_names))]),
inplace=True,
)
categories = obs_tidy.index.categories
return categories, obs_tidy
|
def _prepare_dataframe(
adata: AnnData,
var_names: Union[_VarNames, Mapping[str, _VarNames]],
groupby: Optional[str] = None,
use_raw: Optional[bool] = None,
log: bool = False,
num_categories: int = 7,
layer=None,
gene_symbols: Optional[str] = None,
):
"""
Given the anndata object, prepares a data frame in which the row index are the categories
defined by group by and the columns correspond to var_names.
Parameters
----------
adata
Annotated data matrix.
var_names
`var_names` should be a valid subset of `adata.var_names`.
groupby
The key of the observation grouping to consider. It is expected that
groupby is a categorical. If groupby is not a categorical observation,
it would be subdivided into `num_categories`.
use_raw
Use `raw` attribute of `adata` if present.
log
Use the log of the values
num_categories
Only used if groupby observation is not categorical. This value
determines the number of groups into which the groupby observation
should be subdivided.
gene_symbols
Key for field in .var that stores gene symbols.
Returns
-------
Tuple of `pandas.DataFrame` and list of categories.
"""
from scipy.sparse import issparse
sanitize_anndata(adata)
if use_raw is None and adata.raw is not None:
use_raw = True
if isinstance(var_names, str):
var_names = [var_names]
if groupby is not None:
if groupby not in adata.obs_keys():
raise ValueError(
"groupby has to be a valid observation. "
f"Given {groupby}, valid observations: {adata.obs_keys()}"
)
if gene_symbols is not None and gene_symbols in adata.var.columns:
# translate gene_symbols to var_names
# slow method but gives a meaningful error if no gene symbol is found:
translated_var_names = []
for symbol in var_names:
if symbol not in adata.var[gene_symbols].values:
logg.error(
f"Gene symbol {symbol!r} not found in given "
f"gene_symbols column: {gene_symbols!r}"
)
return
translated_var_names.append(
adata.var[adata.var[gene_symbols] == symbol].index[0]
)
symbols = var_names
var_names = translated_var_names
if layer is not None:
if layer not in adata.layers.keys():
raise KeyError(
f"Selected layer: {layer} is not in the layers list. "
f"The list of valid layers is: {adata.layers.keys()}"
)
matrix = adata[:, var_names].layers[layer]
elif use_raw:
matrix = adata.raw[:, var_names].X
else:
matrix = adata[:, var_names].X
if issparse(matrix):
matrix = matrix.toarray()
if log:
matrix = np.log1p(matrix)
obs_tidy = pd.DataFrame(matrix, columns=var_names)
if groupby is None:
groupby = ""
categorical = pd.Series(np.repeat("", len(obs_tidy))).astype("category")
else:
if not is_categorical_dtype(adata.obs[groupby]):
# if the groupby column is not categorical, turn it into one
# by subdividing into `num_categories` categories
categorical = pd.cut(adata.obs[groupby], num_categories)
else:
categorical = adata.obs[groupby]
obs_tidy.set_index(categorical, groupby, inplace=True)
if gene_symbols is not None:
# translate the column names to the symbol names
obs_tidy.rename(
columns=dict([(var_names[x], symbols[x]) for x in range(len(var_names))]),
inplace=True,
)
categories = obs_tidy.index.categories
return categories, obs_tidy
|
https://github.com/theislab/scanpy/issues/1277
|
ERROR: Gene symbol 'ENSGALG00000048305' not found in given gene_symbols column: 'varnames'
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-40-80ce653c9d2e> in <module>
----> 1 sc.pl.heatmap(
2 adata,
3 var_names=marker_genes_table.iloc[:, :5].values.flatten(),
4 groupby='cluster_anno',
5 show_gene_labels=True,
~/anaconda3/envs/scanpy/lib/python3.8/site-packages/scanpy/plotting/_anndata.py in heatmap(adata, var_names, groupby, use_raw, log, num_categories, dendrogram, gene_symbols, var_group_positions, var_group_labels, var_group_rotation, layer, standard_scale, swap_axes, show_gene_labels, show, save, figsize, **kwds)
1413 )
1414
-> 1415 categories, obs_tidy = _prepare_dataframe(
1416 adata,
1417 var_names,
TypeError: cannot unpack non-iterable NoneType object
|
TypeError
|
def circles(x, y, s, ax, marker=None, c="b", vmin=None, vmax=None, **kwargs):
"""
Taken from here: https://gist.github.com/syrte/592a062c562cd2a98a83
Make a scatter plot of circles.
Similar to pl.scatter, but the size of circles are in data scale.
Parameters
----------
x, y : scalar or array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, )
Radius of circles.
c : color or sequence of color, optional, default : 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs.
Note that `c` should not be a single numeric RGB or RGBA sequence
because that is indistinguishable from an array of values
to be colormapped. (If you insist, use `color` instead.)
`c` can be a 2-D array in which the rows are RGB or RGBA, however.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used.
kwargs : `~matplotlib.collections.Collection` properties
Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls),
norm, cmap, transform, etc.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Examples
--------
a = np.arange(11)
circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')
pl.colorbar()
License
--------
This code is under [The BSD 3-Clause License]
(http://opensource.org/licenses/BSD-3-Clause)
"""
# You can set `facecolor` with an array for each patch,
# while you can only set `facecolors` with a value for all.
zipped = np.broadcast(x, y, s)
patches = [Circle((x_, y_), s_) for x_, y_, s_ in zipped]
collection = PatchCollection(patches, **kwargs)
if isinstance(c, np.ndarray) and np.issubdtype(c.dtype, np.number):
collection.set_array(c)
collection.set_clim(vmin, vmax)
else:
collection.set_facecolor(c)
ax.add_collection(collection)
return collection
|
def circles(x, y, s, ax, marker=None, c="b", vmin=None, vmax=None, **kwargs):
"""
Taken from here: https://gist.github.com/syrte/592a062c562cd2a98a83
Make a scatter plot of circles.
Similar to pl.scatter, but the size of circles are in data scale.
Parameters
----------
x, y : scalar or array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, )
Radius of circles.
c : color or sequence of color, optional, default : 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs.
Note that `c` should not be a single numeric RGB or RGBA sequence
because that is indistinguishable from an array of values
to be colormapped. (If you insist, use `color` instead.)
`c` can be a 2-D array in which the rows are RGB or RGBA, however.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used.
kwargs : `~matplotlib.collections.Collection` properties
Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls),
norm, cmap, transform, etc.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Examples
--------
a = np.arange(11)
circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')
pl.colorbar()
License
--------
This code is under [The BSD 3-Clause License]
(http://opensource.org/licenses/BSD-3-Clause)
"""
# You can set `facecolor` with an array for each patch,
# while you can only set `facecolors` with a value for all.
zipped = np.broadcast(x, y, s)
patches = [Circle((x_, y_), s_) for x_, y_, s_ in zipped]
collection = PatchCollection(patches, **kwargs)
if c is not None and np.issubdtype(c.dtype, np.number):
collection.set_array(c)
collection.set_clim(vmin, vmax)
else:
collection.set_facecolor(c)
ax.add_collection(collection)
return collection
|
https://github.com/theislab/scanpy/issues/1225
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-3-1ffa4586cef4> in <module>
----> 1 sc.pl.spatial(adata)
~/github/scanpy/scanpy/plotting/_tools/scatterplots.py in spatial(adata, img_key, library_id, crop_coord, alpha_img, bw, **kwargs)
785 bw=bw,
786 library_id=library_id,
--> 787 **kwargs,
788 )
789
~/github/scanpy/scanpy/plotting/_tools/scatterplots.py in embedding(adata, basis, color, gene_symbols, use_raw, sort_order, edges, edges_width, edges_color, neighbors_key, arrows, arrows_kwds, groups, components, layer, projection, img_key, crop_coord, alpha_img, bw, library_id, color_map, palette, size, frameon, legend_fontsize, legend_fontweight, legend_loc, legend_fontoutline, vmax, vmin, add_outline, outline_width, outline_color, ncols, hspace, wspace, title, show, save, ax, return_fig, **kwargs)
397 c=color_vector,
398 rasterized=settings._vector_friendly,
--> 399 **kwargs,
400 )
401
~/github/scanpy/scanpy/plotting/_utils.py in circles(x, y, s, ax, marker, c, vmin, vmax, **kwargs)
1127 patches = [Circle((x_, y_), s_) for x_, y_, s_ in zipped]
1128 collection = PatchCollection(patches, **kwargs)
-> 1129 if c is not None and np.issubdtype(c.dtype, np.number):
1130 collection.set_array(c)
1131 collection.set_clim(vmin, vmax)
AttributeError: 'str' object has no attribute 'dtype'
|
AttributeError
|
def _standardize_data(
model: pd.DataFrame,
data: pd.DataFrame,
batch_key: str,
) -> Tuple[pd.DataFrame, pd.DataFrame, np.ndarray, np.ndarray]:
"""\
Standardizes the data per gene.
The aim here is to make mean and variance be comparable across batches.
Parameters
--------
model
Contains the batch annotation
data
Contains the Data
batch_key
Name of the batch column in the model matrix
Returns
--------
s_data
Standardized Data
design
Batch assignment as one-hot encodings
var_pooled
Pooled variance per gene
stand_mean
Gene-wise mean
"""
# compute the design matrix
batch_items = model.groupby(batch_key).groups.items()
batch_levels, batch_info = zip(*batch_items)
n_batch = len(batch_info)
n_batches = np.array([len(v) for v in batch_info])
n_array = float(sum(n_batches))
design = _design_matrix(model, batch_key, batch_levels)
# compute pooled variance estimator
B_hat = np.dot(np.dot(la.inv(np.dot(design.T, design)), design.T), data.T)
grand_mean = np.dot((n_batches / n_array).T, B_hat[:n_batch, :])
var_pooled = (data - np.dot(design, B_hat).T) ** 2
var_pooled = np.dot(var_pooled, np.ones((int(n_array), 1)) / int(n_array))
# Compute the means
if np.sum(var_pooled == 0) > 0:
print(f"Found {np.sum(var_pooled == 0)} genes with zero variance.")
stand_mean = np.dot(
grand_mean.T.reshape((len(grand_mean), 1)), np.ones((1, int(n_array)))
)
tmp = np.array(design.copy())
tmp[:, :n_batch] = 0
stand_mean += np.dot(tmp, B_hat).T
# need to be a bit careful with the zero variance genes
# just set the zero variance genes to zero in the standardized data
s_data = np.where(
var_pooled == 0,
0,
((data - stand_mean) / np.dot(np.sqrt(var_pooled), np.ones((1, int(n_array))))),
)
s_data = pd.DataFrame(s_data, index=data.index, columns=data.columns)
return s_data, design, var_pooled, stand_mean
|
def _standardize_data(
model: pd.DataFrame,
data: pd.DataFrame,
batch_key: str,
) -> Tuple[pd.DataFrame, pd.DataFrame, np.ndarray, np.ndarray]:
"""\
Standardizes the data per gene.
The aim here is to make mean and variance be comparable across batches.
Parameters
--------
model
Contains the batch annotation
data
Contains the Data
batch_key
Name of the batch column in the model matrix
Returns
--------
s_data
Standardized Data
design
Batch assignment as one-hot encodings
var_pooled
Pooled variance per gene
stand_mean
Gene-wise mean
"""
# compute the design matrix
batch_items = model.groupby(batch_key).groups.items()
batch_levels, batch_info = zip(*batch_items)
n_batch = len(batch_info)
n_batches = np.array([len(v) for v in batch_info])
n_array = float(sum(n_batches))
design = _design_matrix(model, batch_key, batch_levels)
# compute pooled variance estimator
B_hat = np.dot(np.dot(la.inv(np.dot(design.T, design)), design.T), data.T)
grand_mean = np.dot((n_batches / n_array).T, B_hat[:n_batch, :])
var_pooled = (data - np.dot(design, B_hat).T) ** 2
var_pooled = np.dot(var_pooled, np.ones((int(n_array), 1)) / int(n_array))
# Compute the means
if np.sum(var_pooled == 0) > 0:
print("Found {} genes with zero variance.".format(np.sum(var_pooled == 0)))
stand_mean = np.dot(
grand_mean.T.reshape((len(grand_mean), 1)), np.ones((1, int(n_array)))
)
tmp = np.array(design.copy())
tmp[:, :n_batch] = 0
stand_mean += np.dot(tmp, B_hat).T
# need to be a bit careful with the zero variance genes
# just set the zero variance genes to zero in the standardized data
s_data = np.where(
var_pooled == 0,
0,
((data - stand_mean) / np.dot(np.sqrt(var_pooled), np.ones((1, int(n_array))))),
)
s_data = pd.DataFrame(s_data, index=data.index, columns=data.columns)
return s_data, design, var_pooled, stand_mean
|
https://github.com/theislab/scanpy/issues/1170
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-5-350690ae55dc> in <module>
1 # ComBat batch correction
----> 2 sc.pp.combat(adata, key='sample')
~/anaconda2/envs/scanpy/lib/python3.6/site-packages/scanpy/preprocessing/_combat.py in combat(adata, key, covariates, inplace)
266 denom = np.dot(dsq, np.ones((1, n_batches[j])))
267 numer = np.array(bayesdata[batch_idxs] - np.dot(batch_design.loc[batch_idxs], gamma_star).T)
--> 268 bayesdata[batch_idxs] = numer / denom
269
270 vpsq = np.sqrt(var_pooled).reshape((len(var_pooled), 1))
ValueError: operands could not be broadcast together with shapes (23259,18243) (23259,15479)
|
ValueError
|
def combat(
adata: AnnData,
key: str = "batch",
covariates: Optional[Collection[str]] = None,
inplace: bool = True,
) -> Union[AnnData, np.ndarray, None]:
"""\
ComBat function for batch effect correction [Johnson07]_ [Leek12]_
[Pedersen12]_.
Corrects for batch effects by fitting linear models, gains statistical power
via an EB framework where information is borrowed across genes.
This uses the implementation `combat.py`_ [Pedersen12]_.
.. _combat.py: https://github.com/brentp/combat.py
Parameters
----------
adata
Annotated data matrix
key
Key to a categorical annotation from :attr:`~anndata.AnnData.obs`
that will be used for batch effect removal.
covariates
Additional covariates besides the batch variable such as adjustment
variables or biological condition. This parameter refers to the design
matrix `X` in Equation 2.1 in [Johnson07]_ and to the `mod` argument in
the original combat function in the sva R package.
Note that not including covariates may introduce bias or lead to the
removal of biological signal in unbalanced designs.
inplace
Whether to replace adata.X or to return the corrected data
Returns
-------
Depending on the value of `inplace`, either returns the corrected matrix or
or modifies `adata.X`.
"""
# check the input
if key not in adata.obs_keys():
raise ValueError("Could not find the key {!r} in adata.obs".format(key))
if covariates is not None:
cov_exist = np.isin(covariates, adata.obs_keys())
if np.any(~cov_exist):
missing_cov = np.array(covariates)[~cov_exist].tolist()
raise ValueError(
"Could not find the covariate(s) {!r} in adata.obs".format(missing_cov)
)
if key in covariates:
raise ValueError("Batch key and covariates cannot overlap")
if len(covariates) != len(set(covariates)):
raise ValueError("Covariates must be unique")
# only works on dense matrices so far
if issparse(adata.X):
X = adata.X.A.T
else:
X = adata.X.T
data = pd.DataFrame(
data=X,
index=adata.var_names,
columns=adata.obs_names,
)
sanitize_anndata(adata)
# construct a pandas series of the batch annotation
model = adata.obs[[key] + (covariates if covariates else [])]
batch_info = model.groupby(key).indices.values()
n_batch = len(batch_info)
n_batches = np.array([len(v) for v in batch_info])
n_array = float(sum(n_batches))
# standardize across genes using a pooled variance estimator
logg.info("Standardizing Data across genes.\n")
s_data, design, var_pooled, stand_mean = _standardize_data(model, data, key)
# fitting the parameters on the standardized data
logg.info("Fitting L/S model and finding priors\n")
batch_design = design[design.columns[:n_batch]]
# first estimate of the additive batch effect
gamma_hat = (
la.inv(batch_design.T @ batch_design) @ batch_design.T @ s_data.T
).values
delta_hat = []
# first estimate for the multiplicative batch effect
for i, batch_idxs in enumerate(batch_info):
delta_hat.append(s_data.iloc[:, batch_idxs].var(axis=1))
# empirically fix the prior hyperparameters
gamma_bar = gamma_hat.mean(axis=1)
t2 = gamma_hat.var(axis=1)
# a_prior and b_prior are the priors on lambda and theta from Johnson and Li (2006)
a_prior = list(map(_aprior, delta_hat))
b_prior = list(map(_bprior, delta_hat))
logg.info("Finding parametric adjustments\n")
# gamma star and delta star will be our empirical bayes (EB) estimators
# for the additive and multiplicative batch effect per batch and cell
gamma_star, delta_star = [], []
for i, batch_idxs in enumerate(batch_info):
# temp stores our estimates for the batch effect parameters.
# temp[0] is the additive batch effect
# temp[1] is the multiplicative batch effect
gamma, delta = _it_sol(
s_data.iloc[:, batch_idxs].values,
gamma_hat[i],
delta_hat[i].values,
gamma_bar[i],
t2[i],
a_prior[i],
b_prior[i],
)
gamma_star.append(gamma)
delta_star.append(delta)
logg.info("Adjusting data\n")
bayesdata = s_data
gamma_star = np.array(gamma_star)
delta_star = np.array(delta_star)
# we now apply the parametric adjustment to the standardized data from above
# loop over all batches in the data
for j, batch_idxs in enumerate(batch_info):
# we basically substract the additive batch effect, rescale by the ratio
# of multiplicative batch effect to pooled variance and add the overall gene
# wise mean
dsq = np.sqrt(delta_star[j, :])
dsq = dsq.reshape((len(dsq), 1))
denom = np.dot(dsq, np.ones((1, n_batches[j])))
numer = np.array(
bayesdata.iloc[:, batch_idxs]
- np.dot(batch_design.iloc[batch_idxs], gamma_star).T
)
bayesdata.iloc[:, batch_idxs] = numer / denom
vpsq = np.sqrt(var_pooled).reshape((len(var_pooled), 1))
bayesdata = bayesdata * np.dot(vpsq, np.ones((1, int(n_array)))) + stand_mean
# put back into the adata object or return
if inplace:
adata.X = bayesdata.values.transpose()
else:
return bayesdata.values.transpose()
|
def combat(
adata: AnnData,
key: str = "batch",
covariates: Optional[Collection[str]] = None,
inplace: bool = True,
) -> Union[AnnData, np.ndarray, None]:
"""\
ComBat function for batch effect correction [Johnson07]_ [Leek12]_
[Pedersen12]_.
Corrects for batch effects by fitting linear models, gains statistical power
via an EB framework where information is borrowed across genes.
This uses the implementation `combat.py`_ [Pedersen12]_.
.. _combat.py: https://github.com/brentp/combat.py
Parameters
----------
adata
Annotated data matrix
key
Key to a categorical annotation from :attr:`~anndata.AnnData.obs`
that will be used for batch effect removal.
covariates
Additional covariates besides the batch variable such as adjustment
variables or biological condition. This parameter refers to the design
matrix `X` in Equation 2.1 in [Johnson07]_ and to the `mod` argument in
the original combat function in the sva R package.
Note that not including covariates may introduce bias or lead to the
removal of biological signal in unbalanced designs.
inplace
Whether to replace adata.X or to return the corrected data
Returns
-------
Depending on the value of `inplace`, either returns the corrected matrix or
or modifies `adata.X`.
"""
# check the input
if key not in adata.obs_keys():
raise ValueError("Could not find the key {!r} in adata.obs".format(key))
if covariates is not None:
cov_exist = np.isin(covariates, adata.obs_keys())
if np.any(~cov_exist):
missing_cov = np.array(covariates)[~cov_exist].tolist()
raise ValueError(
"Could not find the covariate(s) {!r} in adata.obs".format(missing_cov)
)
if key in covariates:
raise ValueError("Batch key and covariates cannot overlap")
if len(covariates) != len(set(covariates)):
raise ValueError("Covariates must be unique")
# only works on dense matrices so far
if issparse(adata.X):
X = adata.X.A.T
else:
X = adata.X.T
data = pd.DataFrame(
data=X,
index=adata.var_names,
columns=adata.obs_names,
)
sanitize_anndata(adata)
# construct a pandas series of the batch annotation
model = adata.obs[[key] + (covariates if covariates else [])]
batch_info = model.groupby(key).indices.values()
n_batch = len(batch_info)
n_batches = np.array([len(v) for v in batch_info])
n_array = float(sum(n_batches))
# standardize across genes using a pooled variance estimator
logg.info("Standardizing Data across genes.\n")
s_data, design, var_pooled, stand_mean = _standardize_data(model, data, key)
# fitting the parameters on the standardized data
logg.info("Fitting L/S model and finding priors\n")
batch_design = design[design.columns[:n_batch]]
# first estimate of the additive batch effect
gamma_hat = np.dot(
np.dot(la.inv(np.dot(batch_design.T, batch_design)), batch_design.T), s_data.T
)
delta_hat = []
# first estimate for the multiplicative batch effect
for i, batch_idxs in enumerate(batch_info):
delta_hat.append(s_data.iloc[:, batch_idxs].var(axis=1))
# empirically fix the prior hyperparameters
gamma_bar = gamma_hat.mean(axis=1)
t2 = gamma_hat.var(axis=1)
# a_prior and b_prior are the priors on lambda and theta from Johnson and Li (2006)
a_prior = list(map(_aprior, delta_hat))
b_prior = list(map(_bprior, delta_hat))
logg.info("Finding parametric adjustments\n")
# gamma star and delta star will be our empirical bayes (EB) estimators
# for the additive and multiplicative batch effect per batch and cell
gamma_star, delta_star = [], []
for i, batch_idxs in enumerate(batch_info):
# temp stores our estimates for the batch effect parameters.
# temp[0] is the additive batch effect
# temp[1] is the multiplicative batch effect
gamma, delta = _it_sol(
s_data.iloc[:, batch_idxs].values,
gamma_hat[i],
delta_hat[i].values,
gamma_bar[i],
t2[i],
a_prior[i],
b_prior[i],
)
gamma_star.append(gamma)
delta_star.append(delta)
logg.info("Adjusting data\n")
bayesdata = s_data
gamma_star = np.array(gamma_star)
delta_star = np.array(delta_star)
# we now apply the parametric adjustment to the standardized data from above
# loop over all batches in the data
for j, batch_idxs in enumerate(batch_info):
# we basically substract the additive batch effect, rescale by the ratio
# of multiplicative batch effect to pooled variance and add the overall gene
# wise mean
dsq = np.sqrt(delta_star[j, :])
dsq = dsq.reshape((len(dsq), 1))
denom = np.dot(dsq, np.ones((1, n_batches[j])))
numer = np.array(
bayesdata.iloc[:, batch_idxs]
- np.dot(batch_design.iloc[batch_idxs], gamma_star).T
)
bayesdata.iloc[:, batch_idxs] = numer / denom
vpsq = np.sqrt(var_pooled).reshape((len(var_pooled), 1))
bayesdata = bayesdata * np.dot(vpsq, np.ones((1, int(n_array)))) + stand_mean
# put back into the adata object or return
if inplace:
adata.X = bayesdata.values.transpose()
else:
return bayesdata.values.transpose()
|
https://github.com/theislab/scanpy/issues/1170
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-5-350690ae55dc> in <module>
1 # ComBat batch correction
----> 2 sc.pp.combat(adata, key='sample')
~/anaconda2/envs/scanpy/lib/python3.6/site-packages/scanpy/preprocessing/_combat.py in combat(adata, key, covariates, inplace)
266 denom = np.dot(dsq, np.ones((1, n_batches[j])))
267 numer = np.array(bayesdata[batch_idxs] - np.dot(batch_design.loc[batch_idxs], gamma_star).T)
--> 268 bayesdata[batch_idxs] = numer / denom
269
270 vpsq = np.sqrt(var_pooled).reshape((len(var_pooled), 1))
ValueError: operands could not be broadcast together with shapes (23259,18243) (23259,15479)
|
ValueError
|
def _download_visium_dataset(sample_id: str, base_dir: Optional[Path] = None):
"""
Params
------
sample_id
String name of example visium dataset.
base_dir
Where to download the dataset to.
"""
import tarfile
if base_dir is None:
base_dir = settings.datasetdir
url_prefix = f"http://cf.10xgenomics.com/samples/spatial-exp/1.0.0/{sample_id}/"
sample_dir = base_dir / sample_id
sample_dir.mkdir(exist_ok=True, parents=True)
# Download spatial data
tar_filename = f"{sample_id}_spatial.tar.gz"
tar_pth = sample_dir / tar_filename
_utils.check_presence_download(
filename=tar_pth, backup_url=url_prefix + tar_filename
)
with tarfile.open(tar_pth) as f:
for el in f:
if not (sample_dir / el.name).exists():
f.extract(el, sample_dir)
# Download counts
_utils.check_presence_download(
filename=sample_dir / "filtered_feature_bc_matrix.h5",
backup_url=url_prefix + f"{sample_id}_filtered_feature_bc_matrix.h5",
)
|
def _download_visium_dataset(sample_id: str, base_dir: Optional[Path] = None):
"""
Params
------
sample_id
String name of example visium dataset.
base_dir
Where to download the dataset to.
"""
import tarfile
if base_dir is None:
base_dir = settings.datasetdir
url_prefix = f"http://cf.10xgenomics.com/samples/spatial-exp/1.0.0/{sample_id}/"
sample_dir = base_dir / sample_id
sample_dir.mkdir(exist_ok=True)
# Download spatial data
tar_filename = f"{sample_id}_spatial.tar.gz"
tar_pth = sample_dir / tar_filename
_utils.check_presence_download(
filename=tar_pth, backup_url=url_prefix + tar_filename
)
with tarfile.open(tar_pth) as f:
for el in f:
if not (sample_dir / el.name).exists():
f.extract(el, sample_dir)
# Download counts
_utils.check_presence_download(
filename=sample_dir / "filtered_feature_bc_matrix.h5",
backup_url=url_prefix + f"{sample_id}_filtered_feature_bc_matrix.h5",
)
|
https://github.com/theislab/scanpy/issues/1184
|
FileNotFoundError: [Errno 2] No such file or directory: '/Users/lisa/data/V1_Breast_Cancer_Block_A_Section_1'
|
FileNotFoundError
|
def _download(url: str, path: Path):
from tqdm.auto import tqdm
from urllib.request import urlretrieve
path.parent.mkdir(parents=True, exist_ok=True)
with tqdm(unit="B", unit_scale=True, miniters=1, desc=path.name) as t:
def update_to(b=1, bsize=1, tsize=None):
if tsize is not None:
t.total = tsize
t.update(b * bsize - t.n)
try:
urlretrieve(url, str(path), reporthook=update_to)
except Exception:
# Make sure file doesn’t exist half-downloaded
if path.is_file():
path.unlink()
raise
|
def _download(url: str, path: Path):
from tqdm.auto import tqdm
from urllib.request import urlretrieve
path.parent.mkdir(parents=True, exist_ok=True)
with tqdm(unit="B", unit_scale=True, miniters=1, desc=path.name) as t:
def update_to(b=1, bsize=1, tsize=None):
if tsize is not None:
t.total = tsize
t.update(b * bsize - t.n)
try:
urlretrieve(url, str(path), reporthook=update_to)
except Exception:
# Make sure file doesn’t exist half-downloaded
path.unlink(missing_ok=True)
raise
|
https://github.com/theislab/scanpy/issues/1082
|
/scanpy/scanpy/datasets/_ebi_expression_atlas.py in download_experiment(accession)
41
42 _download(
---> 43 download_url + "experiment-design", experiment_dir / "experimental_design.tsv",
44 )
45 _download(
/scanpy/scanpy/readwrite.py in _download(url, path)
877 except Exception:
878 # Make sure file doesn’t exist half-downloaded
--> 879 path.unlink(missing_ok=True)
880 raise
881
TypeError: unlink() got an unexpected keyword argument 'missing_ok'
...
|
TypeError
|
def _init_umap(self, adata):
from umap import UMAP
self._umap = UMAP(
metric=adata.uns["neighbors"]["params"]["metric"],
random_state=adata.uns["umap"]["params"].get("random_state", 0),
)
self._umap.embedding_ = adata.obsm["X_umap"]
self._umap._raw_data = self._rep
self._umap._sparse_data = issparse(self._rep)
self._umap._small_data = self._rep.shape[0] < 4096
self._umap._metric_kwds = adata.uns["neighbors"]["params"].get("metric_kwds", {})
self._umap._n_neighbors = adata.uns["neighbors"]["params"]["n_neighbors"]
self._umap._initial_alpha = self._umap.learning_rate
if self._random_init is not None or self._tree_init is not None:
self._umap._random_init = self._random_init
self._umap._tree_init = self._tree_init
self._umap._search = self._search
self._umap._rp_forest = self._rp_forest
self._umap._search_graph = self._search_graph
self._umap._a = adata.uns["umap"]["params"]["a"]
self._umap._b = adata.uns["umap"]["params"]["b"]
self._umap._input_hash = None
|
def _init_umap(self, adata):
from umap import UMAP
self._umap = UMAP(metric=adata.uns["neighbors"]["params"]["metric"])
self._umap.embedding_ = adata.obsm["X_umap"]
self._umap._raw_data = self._rep
self._umap._sparse_data = issparse(self._rep)
self._umap._small_data = self._rep.shape[0] < 4096
self._umap._metric_kwds = adata.uns["neighbors"]["params"].get("metric_kwds", {})
self._umap._n_neighbors = adata.uns["neighbors"]["params"]["n_neighbors"]
self._umap._initial_alpha = self._umap.learning_rate
if self._random_init is not None or self._tree_init is not None:
self._umap._random_init = self._random_init
self._umap._tree_init = self._tree_init
self._umap._search = self._search
self._umap._rp_forest = self._rp_forest
self._umap._search_graph = self._search_graph
self._umap._a = adata.uns["umap"]["params"]["a"]
self._umap._b = adata.uns["umap"]["params"]["b"]
self._umap._input_hash = None
|
https://github.com/theislab/scanpy/issues/1036
|
------------------------------------------------------------------------------------------------------------------- Captured stderr call -------------------------------------------------------------------------------------------------------------------
running ingest
______________________________________________________________________________________________________________ test_ingest_map_embedding_umap ______________________________________________________________________________________________________________
def test_ingest_map_embedding_umap():
adata_ref = sc.AnnData(X)
adata_new = sc.AnnData(T)
sc.pp.neighbors(
adata_ref, method='umap', use_rep='X', n_neighbors=4, random_state=0
)
sc.tl.umap(adata_ref, random_state=0)
ing = sc.tl.Ingest(adata_ref)
scanpy/tests/test_ingest.py:132:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
scanpy/tools/_ingest.py:270: in __init__
self._init_neighbors(adata)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <scanpy.tools._ingest.Ingest object at 0x140357550>, adata = AnnData object with n_obs × n_vars = 6 × 5
uns: 'neighbors', 'umap'
obsm: 'X_umap'
def _init_neighbors(self, adata):
from umap.distances import named_distances
from umap.nndescent import (
make_initialisations,
make_initialized_nnd_search,
)
E ImportError: cannot import name 'make_initialisations' from 'umap.nndescent' (/usr/local/lib/python3.7/site-packages/umap/nndescent.py)
scanpy/tools/_ingest.py:210: ImportError
|
ImportError
|
def umap(
adata: AnnData,
min_dist: float = 0.5,
spread: float = 1.0,
n_components: int = 2,
maxiter: Optional[int] = None,
alpha: float = 1.0,
gamma: float = 1.0,
negative_sample_rate: int = 5,
init_pos: Union[_InitPos, np.ndarray, None] = "spectral",
random_state: AnyRandom = 0,
a: Optional[float] = None,
b: Optional[float] = None,
copy: bool = False,
method: Literal["umap", "rapids"] = "umap",
) -> Optional[AnnData]:
"""\
Embed the neighborhood graph using UMAP [McInnes18]_.
UMAP (Uniform Manifold Approximation and Projection) is a manifold learning
technique suitable for visualizing high-dimensional data. Besides tending to
be faster than tSNE, it optimizes the embedding such that it best reflects
the topology of the data, which we represent throughout Scanpy using a
neighborhood graph. tSNE, by contrast, optimizes the distribution of
nearest-neighbor distances in the embedding such that these best match the
distribution of distances in the high-dimensional space. We use the
implementation of `umap-learn <https://github.com/lmcinnes/umap>`__
[McInnes18]_. For a few comparisons of UMAP with tSNE, see this `preprint
<https://doi.org/10.1101/298430>`__.
Parameters
----------
adata
Annotated data matrix.
min_dist
The effective minimum distance between embedded points. Smaller values
will result in a more clustered/clumped embedding where nearby points on
the manifold are drawn closer together, while larger values will result
on a more even dispersal of points. The value should be set relative to
the ``spread`` value, which determines the scale at which embedded
points will be spread out. The default of in the `umap-learn` package is
0.1.
spread
The effective scale of embedded points. In combination with `min_dist`
this determines how clustered/clumped the embedded points are.
n_components
The number of dimensions of the embedding.
maxiter
The number of iterations (epochs) of the optimization. Called `n_epochs`
in the original UMAP.
alpha
The initial learning rate for the embedding optimization.
gamma
Weighting applied to negative samples in low dimensional embedding
optimization. Values higher than one will result in greater weight
being given to negative samples.
negative_sample_rate
The number of negative edge/1-simplex samples to use per positive
edge/1-simplex sample in optimizing the low dimensional embedding.
init_pos
How to initialize the low dimensional embedding. Called `init` in the
original UMAP. Options are:
* Any key for `adata.obsm`.
* 'paga': positions from :func:`~scanpy.pl.paga`.
* 'spectral': use a spectral embedding of the graph.
* 'random': assign initial embedding positions at random.
* A numpy array of initial embedding positions.
random_state
If `int`, `random_state` is the seed used by the random number generator;
If `RandomState` or `Generator`, `random_state` is the random number generator;
If `None`, the random number generator is the `RandomState` instance used
by `np.random`.
a
More specific parameters controlling the embedding. If `None` these
values are set automatically as determined by `min_dist` and
`spread`.
b
More specific parameters controlling the embedding. If `None` these
values are set automatically as determined by `min_dist` and
`spread`.
copy
Return a copy instead of writing to adata.
method
Use the original 'umap' implementation, or 'rapids' (experimental, GPU only)
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
**X_umap** : `adata.obsm` field
UMAP coordinates of data.
"""
adata = adata.copy() if copy else adata
if "neighbors" not in adata.uns:
raise ValueError(
"Did not find 'neighbors/connectivities'. Run `sc.pp.neighbors` first."
)
start = logg.info("computing UMAP")
if (
"params" not in adata.uns["neighbors"]
or adata.uns["neighbors"]["params"]["method"] != "umap"
):
logg.warning("neighbors/connectivities have not been computed using umap")
from umap.umap_ import find_ab_params, simplicial_set_embedding
if a is None or b is None:
a, b = find_ab_params(spread, min_dist)
else:
a = a
b = b
adata.uns["umap"] = {"params": {"a": a, "b": b}}
if isinstance(init_pos, str) and init_pos in adata.obsm.keys():
init_coords = adata.obsm[init_pos]
elif isinstance(init_pos, str) and init_pos == "paga":
init_coords = get_init_pos_from_paga(adata, random_state=random_state)
else:
init_coords = init_pos # Let umap handle it
if hasattr(init_coords, "dtype"):
init_coords = check_array(init_coords, dtype=np.float32, accept_sparse=False)
if random_state != 0:
adata.uns["umap"]["params"]["random_state"] = random_state
random_state = check_random_state(random_state)
neigh_params = adata.uns["neighbors"]["params"]
X = _choose_representation(
adata,
neigh_params.get("use_rep", None),
neigh_params.get("n_pcs", None),
silent=True,
)
if method == "umap":
# the data matrix X is really only used for determining the number of connected components
# for the init condition in the UMAP embedding
n_epochs = 0 if maxiter is None else maxiter
X_umap = simplicial_set_embedding(
X,
adata.uns["neighbors"]["connectivities"].tocoo(),
n_components,
alpha,
a,
b,
gamma,
negative_sample_rate,
n_epochs,
init_coords,
random_state,
neigh_params.get("metric", "euclidean"),
neigh_params.get("metric_kwds", {}),
verbose=settings.verbosity > 3,
)
elif method == "rapids":
metric = neigh_params.get("metric", "euclidean")
if metric != "euclidean":
raise ValueError(
f"`sc.pp.neighbors` was called with `metric` {metric!r}, "
"but umap `method` 'rapids' only supports the 'euclidean' metric."
)
from cuml import UMAP
n_neighbors = adata.uns["neighbors"]["params"]["n_neighbors"]
n_epochs = (
500 if maxiter is None else maxiter
) # 0 is not a valid value for rapids, unlike original umap
X_contiguous = np.ascontiguousarray(X, dtype=np.float32)
umap = UMAP(
n_neighbors=n_neighbors,
n_components=n_components,
n_epochs=n_epochs,
learning_rate=alpha,
init=init_pos,
min_dist=min_dist,
spread=spread,
negative_sample_rate=negative_sample_rate,
a=a,
b=b,
verbose=settings.verbosity > 3,
)
X_umap = umap.fit_transform(X_contiguous)
adata.obsm["X_umap"] = X_umap # annotate samples with UMAP coordinates
logg.info(
" finished",
time=start,
deep=("added\n 'X_umap', UMAP coordinates (adata.obsm)"),
)
return adata if copy else None
|
def umap(
adata: AnnData,
min_dist: float = 0.5,
spread: float = 1.0,
n_components: int = 2,
maxiter: Optional[int] = None,
alpha: float = 1.0,
gamma: float = 1.0,
negative_sample_rate: int = 5,
init_pos: Union[_InitPos, np.ndarray, None] = "spectral",
random_state: AnyRandom = 0,
a: Optional[float] = None,
b: Optional[float] = None,
copy: bool = False,
method: Literal["umap", "rapids"] = "umap",
) -> Optional[AnnData]:
"""\
Embed the neighborhood graph using UMAP [McInnes18]_.
UMAP (Uniform Manifold Approximation and Projection) is a manifold learning
technique suitable for visualizing high-dimensional data. Besides tending to
be faster than tSNE, it optimizes the embedding such that it best reflects
the topology of the data, which we represent throughout Scanpy using a
neighborhood graph. tSNE, by contrast, optimizes the distribution of
nearest-neighbor distances in the embedding such that these best match the
distribution of distances in the high-dimensional space. We use the
implementation of `umap-learn <https://github.com/lmcinnes/umap>`__
[McInnes18]_. For a few comparisons of UMAP with tSNE, see this `preprint
<https://doi.org/10.1101/298430>`__.
Parameters
----------
adata
Annotated data matrix.
min_dist
The effective minimum distance between embedded points. Smaller values
will result in a more clustered/clumped embedding where nearby points on
the manifold are drawn closer together, while larger values will result
on a more even dispersal of points. The value should be set relative to
the ``spread`` value, which determines the scale at which embedded
points will be spread out. The default of in the `umap-learn` package is
0.1.
spread
The effective scale of embedded points. In combination with `min_dist`
this determines how clustered/clumped the embedded points are.
n_components
The number of dimensions of the embedding.
maxiter
The number of iterations (epochs) of the optimization. Called `n_epochs`
in the original UMAP.
alpha
The initial learning rate for the embedding optimization.
gamma
Weighting applied to negative samples in low dimensional embedding
optimization. Values higher than one will result in greater weight
being given to negative samples.
negative_sample_rate
The number of negative edge/1-simplex samples to use per positive
edge/1-simplex sample in optimizing the low dimensional embedding.
init_pos
How to initialize the low dimensional embedding. Called `init` in the
original UMAP. Options are:
* Any key for `adata.obsm`.
* 'paga': positions from :func:`~scanpy.pl.paga`.
* 'spectral': use a spectral embedding of the graph.
* 'random': assign initial embedding positions at random.
* A numpy array of initial embedding positions.
random_state
If `int`, `random_state` is the seed used by the random number generator;
If `RandomState` or `Generator`, `random_state` is the random number generator;
If `None`, the random number generator is the `RandomState` instance used
by `np.random`.
a
More specific parameters controlling the embedding. If `None` these
values are set automatically as determined by `min_dist` and
`spread`.
b
More specific parameters controlling the embedding. If `None` these
values are set automatically as determined by `min_dist` and
`spread`.
copy
Return a copy instead of writing to adata.
method
Use the original 'umap' implementation, or 'rapids' (experimental, GPU only)
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
**X_umap** : `adata.obsm` field
UMAP coordinates of data.
"""
adata = adata.copy() if copy else adata
if "neighbors" not in adata.uns:
raise ValueError(
"Did not find 'neighbors/connectivities'. Run `sc.pp.neighbors` first."
)
start = logg.info("computing UMAP")
if (
"params" not in adata.uns["neighbors"]
or adata.uns["neighbors"]["params"]["method"] != "umap"
):
logg.warning("neighbors/connectivities have not been computed using umap")
from umap.umap_ import find_ab_params, simplicial_set_embedding
if a is None or b is None:
a, b = find_ab_params(spread, min_dist)
else:
a = a
b = b
adata.uns["umap"] = {"params": {"a": a, "b": b}}
if isinstance(init_pos, str) and init_pos in adata.obsm.keys():
init_coords = adata.obsm[init_pos]
elif isinstance(init_pos, str) and init_pos == "paga":
init_coords = get_init_pos_from_paga(adata, random_state=random_state)
else:
init_coords = init_pos # Let umap handle it
if hasattr(init_coords, "dtype"):
init_coords = check_array(init_coords, dtype=np.float32, accept_sparse=False)
random_state = check_random_state(random_state)
neigh_params = adata.uns["neighbors"]["params"]
X = _choose_representation(
adata,
neigh_params.get("use_rep", None),
neigh_params.get("n_pcs", None),
silent=True,
)
if method == "umap":
# the data matrix X is really only used for determining the number of connected components
# for the init condition in the UMAP embedding
n_epochs = 0 if maxiter is None else maxiter
X_umap = simplicial_set_embedding(
X,
adata.uns["neighbors"]["connectivities"].tocoo(),
n_components,
alpha,
a,
b,
gamma,
negative_sample_rate,
n_epochs,
init_coords,
random_state,
neigh_params.get("metric", "euclidean"),
neigh_params.get("metric_kwds", {}),
verbose=settings.verbosity > 3,
)
elif method == "rapids":
metric = neigh_params.get("metric", "euclidean")
if metric != "euclidean":
raise ValueError(
f"`sc.pp.neighbors` was called with `metric` {metric!r}, "
"but umap `method` 'rapids' only supports the 'euclidean' metric."
)
from cuml import UMAP
n_neighbors = adata.uns["neighbors"]["params"]["n_neighbors"]
n_epochs = (
500 if maxiter is None else maxiter
) # 0 is not a valid value for rapids, unlike original umap
X_contiguous = np.ascontiguousarray(X, dtype=np.float32)
umap = UMAP(
n_neighbors=n_neighbors,
n_components=n_components,
n_epochs=n_epochs,
learning_rate=alpha,
init=init_pos,
min_dist=min_dist,
spread=spread,
negative_sample_rate=negative_sample_rate,
a=a,
b=b,
verbose=settings.verbosity > 3,
)
X_umap = umap.fit_transform(X_contiguous)
adata.obsm["X_umap"] = X_umap # annotate samples with UMAP coordinates
logg.info(
" finished",
time=start,
deep=("added\n 'X_umap', UMAP coordinates (adata.obsm)"),
)
return adata if copy else None
|
https://github.com/theislab/scanpy/issues/1036
|
------------------------------------------------------------------------------------------------------------------- Captured stderr call -------------------------------------------------------------------------------------------------------------------
running ingest
______________________________________________________________________________________________________________ test_ingest_map_embedding_umap ______________________________________________________________________________________________________________
def test_ingest_map_embedding_umap():
adata_ref = sc.AnnData(X)
adata_new = sc.AnnData(T)
sc.pp.neighbors(
adata_ref, method='umap', use_rep='X', n_neighbors=4, random_state=0
)
sc.tl.umap(adata_ref, random_state=0)
ing = sc.tl.Ingest(adata_ref)
scanpy/tests/test_ingest.py:132:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
scanpy/tools/_ingest.py:270: in __init__
self._init_neighbors(adata)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <scanpy.tools._ingest.Ingest object at 0x140357550>, adata = AnnData object with n_obs × n_vars = 6 × 5
uns: 'neighbors', 'umap'
obsm: 'X_umap'
def _init_neighbors(self, adata):
from umap.distances import named_distances
from umap.nndescent import (
make_initialisations,
make_initialized_nnd_search,
)
E ImportError: cannot import name 'make_initialisations' from 'umap.nndescent' (/usr/local/lib/python3.7/site-packages/umap/nndescent.py)
scanpy/tools/_ingest.py:210: ImportError
|
ImportError
|
def dca(
adata: AnnData,
mode: Literal["denoise", "latent"] = "denoise",
ae_type: _AEType = "zinb-conddisp",
normalize_per_cell: bool = True,
scale: bool = True,
log1p: bool = True,
# network args
hidden_size: Sequence[int] = (64, 32, 64),
hidden_dropout: Union[float, Sequence[float]] = 0.0,
batchnorm: bool = True,
activation: str = "relu",
init: str = "glorot_uniform",
network_kwds: Mapping[str, Any] = MappingProxyType({}),
# training args
epochs: int = 300,
reduce_lr: int = 10,
early_stop: int = 15,
batch_size: int = 32,
optimizer: str = "rmsprop",
random_state: AnyRandom = 0,
threads: Optional[int] = None,
learning_rate: Optional[float] = None,
verbose: bool = False,
training_kwds: Mapping[str, Any] = MappingProxyType({}),
return_model: bool = False,
return_info: bool = False,
copy: bool = False,
) -> Optional[AnnData]:
"""\
Deep count autoencoder [Eraslan18]_.
Fits a count autoencoder to the raw count data given in the anndata object
in order to denoise the data and to capture hidden representation of
cells in low dimensions. Type of the autoencoder and return values are
determined by the parameters.
.. note::
More information and bug reports `here <https://github.com/theislab/dca>`__.
Parameters
----------
adata
An anndata file with `.raw` attribute representing raw counts.
mode
`denoise` overwrites `adata.X` with denoised expression values.
In `latent` mode DCA adds `adata.obsm['X_dca']` to given adata
object. This matrix represent latent representation of cells via DCA.
ae_type
Type of the autoencoder. Return values and the architecture is
determined by the type e.g. `nb` does not provide dropout
probabilities. Types that end with "-conddisp", assumes that dispersion is mean dependant.
normalize_per_cell
If true, library size normalization is performed using
the `sc.pp.normalize_per_cell` function in Scanpy and saved into adata
object. Mean layer is re-introduces library size differences by
scaling the mean value of each cell in the output layer. See the
manuscript for more details.
scale
If true, the input of the autoencoder is centered using
`sc.pp.scale` function of Scanpy. Note that the output is kept as raw
counts as loss functions are designed for the count data.
log1p
If true, the input of the autoencoder is log transformed with a
pseudocount of one using `sc.pp.log1p` function of Scanpy.
hidden_size
Width of hidden layers.
hidden_dropout
Probability of weight dropout in the autoencoder (per layer if list
or tuple).
batchnorm
If true, batch normalization is performed.
activation
Activation function of hidden layers.
init
Initialization method used to initialize weights.
network_kwds
Additional keyword arguments for the autoencoder.
epochs
Number of total epochs in training.
reduce_lr
Reduces learning rate if validation loss does not improve in given number of epochs.
early_stop
Stops training if validation loss does not improve in given number of epochs.
batch_size
Number of samples in the batch used for SGD.
optimizer
Type of optimization method used for training.
random_state
Seed for python, numpy and tensorflow.
threads
Number of threads to use in training. All cores are used by default.
learning_rate
Learning rate to use in the training.
verbose
If true, prints additional information about training and architecture.
training_kwds
Additional keyword arguments for the training process.
return_model
If true, trained autoencoder object is returned. See "Returns".
return_info
If true, all additional parameters of DCA are stored in `adata.obsm` such as dropout
probabilities (obsm['X_dca_dropout']) and estimated dispersion values
(obsm['X_dca_dispersion']), in case that autoencoder is of type
zinb or zinb-conddisp.
copy
If true, a copy of anndata is returned.
Returns
-------
If `copy` is true and `return_model` is false, AnnData object is returned.
In "denoise" mode, `adata.X` is overwritten with the denoised values.
In "latent" mode, latent low dimensional representation of cells are stored
in `adata.obsm['X_dca']` and `adata.X` is not modified.
Note that these values are not corrected for library size effects.
If `return_info` is true, all estimated distribution parameters are stored
in AnnData like this:
`.obsm["X_dca_dropout"]`
The mixture coefficient (pi) of the zero component in ZINB,
i.e. dropout probability (if `ae_type` is `zinb` or `zinb-conddisp`).
`.obsm["X_dca_dispersion"]`
The dispersion parameter of NB.
`.uns["dca_loss_history"]`
The loss history of the training.
See `.history` attribute of Keras History class for mode details.
Finally, the raw counts are stored in `.raw` attribute of AnnData object.
If `return_model` is given, trained model is returned.
When both `copy` and `return_model` are true,
a tuple of anndata and model is returned in that order.
"""
try:
from dca.api import dca
except ImportError:
raise ImportError("Please install dca package (>= 0.2.1) via `pip install dca`")
return dca(
adata,
mode=mode,
ae_type=ae_type,
normalize_per_cell=normalize_per_cell,
scale=scale,
log1p=log1p,
hidden_size=hidden_size,
hidden_dropout=hidden_dropout,
batchnorm=batchnorm,
activation=activation,
init=init,
network_kwds=network_kwds,
epochs=epochs,
reduce_lr=reduce_lr,
early_stop=early_stop,
batch_size=batch_size,
optimizer=optimizer,
random_state=random_state,
threads=threads,
learning_rate=learning_rate,
verbose=verbose,
training_kwds=training_kwds,
return_model=return_model,
return_info=return_info,
copy=copy,
)
|
def dca(
adata: AnnData,
mode: Literal["denoise", "latent"] = "denoise",
ae_type: _AEType = "zinb-conddisp",
normalize_per_cell: bool = True,
scale: bool = True,
log1p: bool = True,
# network args
hidden_size: Sequence[int] = (64, 32, 64),
hidden_dropout: Union[float, Sequence[float]] = 0.0,
batchnorm: bool = True,
activation: str = "relu",
init: str = "glorot_uniform",
network_kwds: Mapping[str, Any] = MappingProxyType({}),
# training args
epochs: int = 300,
reduce_lr: int = 10,
early_stop: int = 15,
batch_size: int = 32,
optimizer: str = "rmsprop",
random_state: Union[int, RandomState] = 0,
threads: Optional[int] = None,
learning_rate: Optional[float] = None,
verbose: bool = False,
training_kwds: Mapping[str, Any] = MappingProxyType({}),
return_model: bool = False,
return_info: bool = False,
copy: bool = False,
) -> Optional[AnnData]:
"""\
Deep count autoencoder [Eraslan18]_.
Fits a count autoencoder to the raw count data given in the anndata object
in order to denoise the data and to capture hidden representation of
cells in low dimensions. Type of the autoencoder and return values are
determined by the parameters.
.. note::
More information and bug reports `here <https://github.com/theislab/dca>`__.
Parameters
----------
adata
An anndata file with `.raw` attribute representing raw counts.
mode
`denoise` overwrites `adata.X` with denoised expression values.
In `latent` mode DCA adds `adata.obsm['X_dca']` to given adata
object. This matrix represent latent representation of cells via DCA.
ae_type
Type of the autoencoder. Return values and the architecture is
determined by the type e.g. `nb` does not provide dropout
probabilities. Types that end with "-conddisp", assumes that dispersion is mean dependant.
normalize_per_cell
If true, library size normalization is performed using
the `sc.pp.normalize_per_cell` function in Scanpy and saved into adata
object. Mean layer is re-introduces library size differences by
scaling the mean value of each cell in the output layer. See the
manuscript for more details.
scale
If true, the input of the autoencoder is centered using
`sc.pp.scale` function of Scanpy. Note that the output is kept as raw
counts as loss functions are designed for the count data.
log1p
If true, the input of the autoencoder is log transformed with a
pseudocount of one using `sc.pp.log1p` function of Scanpy.
hidden_size
Width of hidden layers.
hidden_dropout
Probability of weight dropout in the autoencoder (per layer if list
or tuple).
batchnorm
If true, batch normalization is performed.
activation
Activation function of hidden layers.
init
Initialization method used to initialize weights.
network_kwds
Additional keyword arguments for the autoencoder.
epochs
Number of total epochs in training.
reduce_lr
Reduces learning rate if validation loss does not improve in given number of epochs.
early_stop
Stops training if validation loss does not improve in given number of epochs.
batch_size
Number of samples in the batch used for SGD.
optimizer
Type of optimization method used for training.
random_state
Seed for python, numpy and tensorflow.
threads
Number of threads to use in training. All cores are used by default.
learning_rate
Learning rate to use in the training.
verbose
If true, prints additional information about training and architecture.
training_kwds
Additional keyword arguments for the training process.
return_model
If true, trained autoencoder object is returned. See "Returns".
return_info
If true, all additional parameters of DCA are stored in `adata.obsm` such as dropout
probabilities (obsm['X_dca_dropout']) and estimated dispersion values
(obsm['X_dca_dispersion']), in case that autoencoder is of type
zinb or zinb-conddisp.
copy
If true, a copy of anndata is returned.
Returns
-------
If `copy` is true and `return_model` is false, AnnData object is returned.
In "denoise" mode, `adata.X` is overwritten with the denoised values.
In "latent" mode, latent low dimensional representation of cells are stored
in `adata.obsm['X_dca']` and `adata.X` is not modified.
Note that these values are not corrected for library size effects.
If `return_info` is true, all estimated distribution parameters are stored
in AnnData like this:
`.obsm["X_dca_dropout"]`
The mixture coefficient (pi) of the zero component in ZINB,
i.e. dropout probability (if `ae_type` is `zinb` or `zinb-conddisp`).
`.obsm["X_dca_dispersion"]`
The dispersion parameter of NB.
`.uns["dca_loss_history"]`
The loss history of the training.
See `.history` attribute of Keras History class for mode details.
Finally, the raw counts are stored in `.raw` attribute of AnnData object.
If `return_model` is given, trained model is returned.
When both `copy` and `return_model` are true,
a tuple of anndata and model is returned in that order.
"""
try:
from dca.api import dca
except ImportError:
raise ImportError("Please install dca package (>= 0.2.1) via `pip install dca`")
return dca(
adata,
mode=mode,
ae_type=ae_type,
normalize_per_cell=normalize_per_cell,
scale=scale,
log1p=log1p,
hidden_size=hidden_size,
hidden_dropout=hidden_dropout,
batchnorm=batchnorm,
activation=activation,
init=init,
network_kwds=network_kwds,
epochs=epochs,
reduce_lr=reduce_lr,
early_stop=early_stop,
batch_size=batch_size,
optimizer=optimizer,
random_state=random_state,
threads=threads,
learning_rate=learning_rate,
verbose=verbose,
training_kwds=training_kwds,
return_model=return_model,
return_info=return_info,
copy=copy,
)
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def magic(
adata: AnnData,
name_list: Union[Literal["all_genes", "pca_only"], Sequence[str], None] = None,
*,
knn: int = 5,
decay: Optional[float] = 1,
knn_max: Optional[int] = None,
t: Union[Literal["auto"], int] = 3,
n_pca: Optional[int] = 100,
solver: Literal["exact", "approximate"] = "exact",
knn_dist: str = "euclidean",
random_state: AnyRandom = None,
n_jobs: Optional[int] = None,
verbose: bool = False,
copy: Optional[bool] = None,
**kwargs,
) -> Optional[AnnData]:
"""\
Markov Affinity-based Graph Imputation of Cells (MAGIC) API [vanDijk18]_.
MAGIC is an algorithm for denoising and transcript recover of single cells
applied to single-cell sequencing data. MAGIC builds a graph from the data
and uses diffusion to smooth out noise and recover the data manifold.
The algorithm implemented here has changed primarily in two ways
compared to the algorithm described in [vanDijk18]_. Firstly, we use
the adaptive kernel described in Moon et al, 2019 [Moon17]_ for
improved stability. Secondly, data diffusion is applied
in the PCA space, rather than the data space, for speed and
memory improvements.
More information and bug reports
`here <https://github.com/KrishnaswamyLab/MAGIC>`__. For help, visit
<https://krishnaswamylab.org/get-help>.
Parameters
----------
adata
An anndata file with `.raw` attribute representing raw counts.
name_list
Denoised genes to return. The default `'all_genes'`/`None`
may require a large amount of memory if the input data is sparse.
Another possibility is `'pca_only'`.
knn
number of nearest neighbors on which to build kernel.
decay
sets decay rate of kernel tails.
If None, alpha decaying kernel is not used.
knn_max
maximum number of nearest neighbors with nonzero connection.
If `None`, will be set to 3 * `knn`.
t
power to which the diffusion operator is powered.
This sets the level of diffusion. If 'auto', t is selected
according to the Procrustes disparity of the diffused data.
n_pca
Number of principal components to use for calculating
neighborhoods. For extremely large datasets, using
n_pca < 20 allows neighborhoods to be calculated in
roughly log(n_samples) time. If `None`, no PCA is performed.
solver
Which solver to use. "exact" uses the implementation described
in van Dijk et al. (2018) [vanDijk18]_. "approximate" uses a faster
implementation that performs imputation in the PCA space and then
projects back to the gene space. Note, the "approximate" solver may
return negative values.
knn_dist
recommended values: 'euclidean', 'cosine', 'precomputed'
Any metric from `scipy.spatial.distance` can be used
distance metric for building kNN graph. If 'precomputed',
`data` should be an n_samples x n_samples distance or
affinity matrix.
random_state
Random seed. Defaults to the global `numpy` random number generator.
n_jobs
Number of threads to use in training. All cores are used by default.
verbose
If `True` or an integer `>= 2`, print status messages.
If `None`, `sc.settings.verbosity` is used.
copy
If true, a copy of anndata is returned. If `None`, `copy` is True if
`genes` is not `'all_genes'` or `'pca_only'`. `copy` may only be False
if `genes` is `'all_genes'` or `'pca_only'`, as the resultant data
will otherwise have different column names from the input data.
kwargs
Additional arguments to `magic.MAGIC`.
Returns
-------
If `copy` is True, AnnData object is returned.
If `subset_genes` is not `all_genes`, PCA on MAGIC values of cells are
stored in `adata.obsm['X_magic']` and `adata.X` is not modified.
The raw counts are stored in `.raw` attribute of AnnData object.
Examples
--------
>>> import scanpy as sc
>>> import scanpy.external as sce
>>> adata = sc.datasets.paul15()
>>> sc.pp.normalize_per_cell(adata)
>>> sc.pp.sqrt(adata) # or sc.pp.log1p(adata)
>>> adata_magic = sce.pp.magic(adata, name_list=['Mpo', 'Klf1', 'Ifitm1'], knn=5)
>>> adata_magic.shape
(2730, 3)
>>> sce.pp.magic(adata, name_list='pca_only', knn=5)
>>> adata.obsm['X_magic'].shape
(2730, 100)
>>> sce.pp.magic(adata, name_list='all_genes', knn=5)
>>> adata.X.shape
(2730, 3451)
"""
try:
from magic import MAGIC, __version__
except ImportError:
raise ImportError(
"Please install magic package via `pip install --user "
"git+git://github.com/KrishnaswamyLab/MAGIC.git#subdirectory=python`"
)
else:
if not version.parse(__version__) >= version.parse(MIN_VERSION):
raise ImportError(
"scanpy requires magic-impute >= "
f"v{MIN_VERSION} (detected: v{__version__}). "
"Please update magic package via `pip install --user "
"--upgrade magic-impute`"
)
start = logg.info("computing MAGIC")
all_or_pca = isinstance(name_list, (str, type(None)))
if all_or_pca and name_list not in {"all_genes", "pca_only", None}:
raise ValueError(
"Invalid string value for `name_list`: "
"Only `'all_genes'` and `'pca_only'` are allowed."
)
if copy is None:
copy = not all_or_pca
elif not all_or_pca and not copy:
raise ValueError(
"Can only perform MAGIC in-place with `name_list=='all_genes' or "
f"`name_list=='pca_only'` (got {name_list}). Consider setting "
"`copy=True`"
)
adata = adata.copy() if copy else adata
n_jobs = settings.n_jobs if n_jobs is None else n_jobs
X_magic = MAGIC(
knn=knn,
decay=decay,
knn_max=knn_max,
t=t,
n_pca=n_pca,
solver=solver,
knn_dist=knn_dist,
random_state=random_state,
n_jobs=n_jobs,
verbose=verbose,
**kwargs,
).fit_transform(adata, genes=name_list)
logg.info(
" finished",
time=start,
deep=(
"added\n 'X_magic', PCA on MAGIC coordinates (adata.obsm)"
if name_list == "pca_only"
else ""
),
)
# update AnnData instance
if name_list == "pca_only":
# special case – update adata.obsm with smoothed values
adata.obsm["X_magic"] = X_magic.X
elif copy:
# just return X_magic
X_magic.raw = adata
adata = X_magic
else:
# replace data with smoothed data
adata.raw = adata
adata.X = X_magic.X
if copy:
return adata
|
def magic(
adata: AnnData,
name_list: Union[Literal["all_genes", "pca_only"], Sequence[str], None] = None,
*,
knn: int = 5,
decay: Optional[float] = 1,
knn_max: Optional[int] = None,
t: Union[Literal["auto"], int] = 3,
n_pca: Optional[int] = 100,
solver: Literal["exact", "approximate"] = "exact",
knn_dist: str = "euclidean",
random_state: Optional[Union[int, RandomState]] = None,
n_jobs: Optional[int] = None,
verbose: bool = False,
copy: Optional[bool] = None,
**kwargs,
) -> Optional[AnnData]:
"""\
Markov Affinity-based Graph Imputation of Cells (MAGIC) API [vanDijk18]_.
MAGIC is an algorithm for denoising and transcript recover of single cells
applied to single-cell sequencing data. MAGIC builds a graph from the data
and uses diffusion to smooth out noise and recover the data manifold.
The algorithm implemented here has changed primarily in two ways
compared to the algorithm described in [vanDijk18]_. Firstly, we use
the adaptive kernel described in Moon et al, 2019 [Moon17]_ for
improved stability. Secondly, data diffusion is applied
in the PCA space, rather than the data space, for speed and
memory improvements.
More information and bug reports
`here <https://github.com/KrishnaswamyLab/MAGIC>`__. For help, visit
<https://krishnaswamylab.org/get-help>.
Parameters
----------
adata
An anndata file with `.raw` attribute representing raw counts.
name_list
Denoised genes to return. The default `'all_genes'`/`None`
may require a large amount of memory if the input data is sparse.
Another possibility is `'pca_only'`.
knn
number of nearest neighbors on which to build kernel.
decay
sets decay rate of kernel tails.
If None, alpha decaying kernel is not used.
knn_max
maximum number of nearest neighbors with nonzero connection.
If `None`, will be set to 3 * `knn`.
t
power to which the diffusion operator is powered.
This sets the level of diffusion. If 'auto', t is selected
according to the Procrustes disparity of the diffused data.
n_pca
Number of principal components to use for calculating
neighborhoods. For extremely large datasets, using
n_pca < 20 allows neighborhoods to be calculated in
roughly log(n_samples) time. If `None`, no PCA is performed.
solver
Which solver to use. "exact" uses the implementation described
in van Dijk et al. (2018) [vanDijk18]_. "approximate" uses a faster
implementation that performs imputation in the PCA space and then
projects back to the gene space. Note, the "approximate" solver may
return negative values.
knn_dist
recommended values: 'euclidean', 'cosine', 'precomputed'
Any metric from `scipy.spatial.distance` can be used
distance metric for building kNN graph. If 'precomputed',
`data` should be an n_samples x n_samples distance or
affinity matrix.
random_state
Random seed. Defaults to the global `numpy` random number generator.
n_jobs
Number of threads to use in training. All cores are used by default.
verbose
If `True` or an integer `>= 2`, print status messages.
If `None`, `sc.settings.verbosity` is used.
copy
If true, a copy of anndata is returned. If `None`, `copy` is True if
`genes` is not `'all_genes'` or `'pca_only'`. `copy` may only be False
if `genes` is `'all_genes'` or `'pca_only'`, as the resultant data
will otherwise have different column names from the input data.
kwargs
Additional arguments to `magic.MAGIC`.
Returns
-------
If `copy` is True, AnnData object is returned.
If `subset_genes` is not `all_genes`, PCA on MAGIC values of cells are
stored in `adata.obsm['X_magic']` and `adata.X` is not modified.
The raw counts are stored in `.raw` attribute of AnnData object.
Examples
--------
>>> import scanpy as sc
>>> import scanpy.external as sce
>>> adata = sc.datasets.paul15()
>>> sc.pp.normalize_per_cell(adata)
>>> sc.pp.sqrt(adata) # or sc.pp.log1p(adata)
>>> adata_magic = sce.pp.magic(adata, name_list=['Mpo', 'Klf1', 'Ifitm1'], knn=5)
>>> adata_magic.shape
(2730, 3)
>>> sce.pp.magic(adata, name_list='pca_only', knn=5)
>>> adata.obsm['X_magic'].shape
(2730, 100)
>>> sce.pp.magic(adata, name_list='all_genes', knn=5)
>>> adata.X.shape
(2730, 3451)
"""
try:
from magic import MAGIC, __version__
except ImportError:
raise ImportError(
"Please install magic package via `pip install --user "
"git+git://github.com/KrishnaswamyLab/MAGIC.git#subdirectory=python`"
)
else:
if not version.parse(__version__) >= version.parse(MIN_VERSION):
raise ImportError(
"scanpy requires magic-impute >= "
f"v{MIN_VERSION} (detected: v{__version__}). "
"Please update magic package via `pip install --user "
"--upgrade magic-impute`"
)
start = logg.info("computing MAGIC")
all_or_pca = isinstance(name_list, (str, type(None)))
if all_or_pca and name_list not in {"all_genes", "pca_only", None}:
raise ValueError(
"Invalid string value for `name_list`: "
"Only `'all_genes'` and `'pca_only'` are allowed."
)
if copy is None:
copy = not all_or_pca
elif not all_or_pca and not copy:
raise ValueError(
"Can only perform MAGIC in-place with `name_list=='all_genes' or "
f"`name_list=='pca_only'` (got {name_list}). Consider setting "
"`copy=True`"
)
adata = adata.copy() if copy else adata
n_jobs = settings.n_jobs if n_jobs is None else n_jobs
X_magic = MAGIC(
knn=knn,
decay=decay,
knn_max=knn_max,
t=t,
n_pca=n_pca,
solver=solver,
knn_dist=knn_dist,
random_state=random_state,
n_jobs=n_jobs,
verbose=verbose,
**kwargs,
).fit_transform(adata, genes=name_list)
logg.info(
" finished",
time=start,
deep=(
"added\n 'X_magic', PCA on MAGIC coordinates (adata.obsm)"
if name_list == "pca_only"
else ""
),
)
# update AnnData instance
if name_list == "pca_only":
# special case – update adata.obsm with smoothed values
adata.obsm["X_magic"] = X_magic.X
elif copy:
# just return X_magic
X_magic.raw = adata
adata = X_magic
else:
# replace data with smoothed data
adata.raw = adata
adata.X = X_magic.X
if copy:
return adata
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def phate(
adata: AnnData,
n_components: int = 2,
k: int = 5,
a: int = 15,
n_landmark: int = 2000,
t: Union[int, str] = "auto",
gamma: float = 1.0,
n_pca: int = 100,
knn_dist: str = "euclidean",
mds_dist: str = "euclidean",
mds: Literal["classic", "metric", "nonmetric"] = "metric",
n_jobs: Optional[int] = None,
random_state: AnyRandom = None,
verbose: Union[bool, int, None] = None,
copy: bool = False,
**kwargs,
) -> Optional[AnnData]:
"""\
PHATE [Moon17]_.
Potential of Heat-diffusion for Affinity-based Trajectory Embedding (PHATE)
embeds high dimensional single-cell data into two or three dimensions for
visualization of biological progressions.
For more information and access to the object-oriented interface, read the
`PHATE documentation <https://phate.readthedocs.io/>`__. For
tutorials, bug reports, and R/MATLAB implementations, visit the `PHATE
GitHub page <https://github.com/KrishnaswamyLab/PHATE/>`__. For help
using PHATE, go `here <https://krishnaswamylab.org/get-help>`__.
Parameters
----------
adata
Annotated data matrix.
n_components
number of dimensions in which the data will be embedded
k
number of nearest neighbors on which to build kernel
a
sets decay rate of kernel tails.
If None, alpha decaying kernel is not used
n_landmark
number of landmarks to use in fast PHATE
t
power to which the diffusion operator is powered
sets the level of diffusion. If 'auto', t is selected
according to the knee point in the Von Neumann Entropy of
the diffusion operator
gamma
Informational distance constant between -1 and 1.
`gamma=1` gives the PHATE log potential, `gamma=0` gives
a square root potential.
n_pca
Number of principal components to use for calculating
neighborhoods. For extremely large datasets, using
n_pca < 20 allows neighborhoods to be calculated in
log(n_samples) time.
knn_dist
recommended values: 'euclidean' and 'cosine'
Any metric from `scipy.spatial.distance` can be used
distance metric for building kNN graph
mds_dist
recommended values: 'euclidean' and 'cosine'
Any metric from `scipy.spatial.distance` can be used
distance metric for MDS
mds
Selects which MDS algorithm is used for dimensionality reduction.
n_jobs
The number of jobs to use for the computation.
If `None`, `sc.settings.n_jobs` is used.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for
n_jobs = -2, all CPUs but one are used
random_state
Random seed. Defaults to the global `numpy` random number generator
verbose
If `True` or an `int`/`Verbosity` ≥ 2/`hint`, print status messages.
If `None`, `sc.settings.verbosity` is used.
copy
Return a copy instead of writing to `adata`.
kwargs
Additional arguments to `phate.PHATE`
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
**X_phate** : `np.ndarray`, (`adata.obs`, shape=[n_samples, n_components], dtype `float`)
PHATE coordinates of data.
Examples
--------
>>> from anndata import AnnData
>>> import scanpy.external as sce
>>> import phate
>>> tree_data, tree_clusters = phate.tree.gen_dla(
... n_dim=100,
... n_branch=20,
... branch_length=100,
... )
>>> tree_data.shape
(2000, 100)
>>> adata = AnnData(tree_data)
>>> sce.tl.phate(adata, k=5, a=20, t=150)
>>> adata.obsm['X_phate'].shape
(2000, 2)
>>> sce.pl.phate(adata)
"""
start = logg.info("computing PHATE")
adata = adata.copy() if copy else adata
verbosity = settings.verbosity if verbose is None else verbose
verbose = verbosity if isinstance(verbosity, bool) else verbosity >= 2
n_jobs = settings.n_jobs if n_jobs is None else n_jobs
try:
import phate
except ImportError:
raise ImportError(
"You need to install the package `phate`: please run `pip install "
"--user phate` in a terminal."
)
X_phate = phate.PHATE(
n_components=n_components,
k=k,
a=a,
n_landmark=n_landmark,
t=t,
gamma=gamma,
n_pca=n_pca,
knn_dist=knn_dist,
mds_dist=mds_dist,
mds=mds,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
**kwargs,
).fit_transform(adata)
# update AnnData instance
adata.obsm["X_phate"] = X_phate # annotate samples with PHATE coordinates
logg.info(
" finished",
time=start,
deep=("added\n 'X_phate', PHATE coordinates (adata.obsm)"),
)
return adata if copy else None
|
def phate(
adata: AnnData,
n_components: int = 2,
k: int = 5,
a: int = 15,
n_landmark: int = 2000,
t: Union[int, str] = "auto",
gamma: float = 1.0,
n_pca: int = 100,
knn_dist: str = "euclidean",
mds_dist: str = "euclidean",
mds: Literal["classic", "metric", "nonmetric"] = "metric",
n_jobs: Optional[int] = None,
random_state: Optional[Union[int, RandomState]] = None,
verbose: Union[bool, int, None] = None,
copy: bool = False,
**kwargs,
) -> Optional[AnnData]:
"""\
PHATE [Moon17]_.
Potential of Heat-diffusion for Affinity-based Trajectory Embedding (PHATE)
embeds high dimensional single-cell data into two or three dimensions for
visualization of biological progressions.
For more information and access to the object-oriented interface, read the
`PHATE documentation <https://phate.readthedocs.io/>`__. For
tutorials, bug reports, and R/MATLAB implementations, visit the `PHATE
GitHub page <https://github.com/KrishnaswamyLab/PHATE/>`__. For help
using PHATE, go `here <https://krishnaswamylab.org/get-help>`__.
Parameters
----------
adata
Annotated data matrix.
n_components
number of dimensions in which the data will be embedded
k
number of nearest neighbors on which to build kernel
a
sets decay rate of kernel tails.
If None, alpha decaying kernel is not used
n_landmark
number of landmarks to use in fast PHATE
t
power to which the diffusion operator is powered
sets the level of diffusion. If 'auto', t is selected
according to the knee point in the Von Neumann Entropy of
the diffusion operator
gamma
Informational distance constant between -1 and 1.
`gamma=1` gives the PHATE log potential, `gamma=0` gives
a square root potential.
n_pca
Number of principal components to use for calculating
neighborhoods. For extremely large datasets, using
n_pca < 20 allows neighborhoods to be calculated in
log(n_samples) time.
knn_dist
recommended values: 'euclidean' and 'cosine'
Any metric from `scipy.spatial.distance` can be used
distance metric for building kNN graph
mds_dist
recommended values: 'euclidean' and 'cosine'
Any metric from `scipy.spatial.distance` can be used
distance metric for MDS
mds
Selects which MDS algorithm is used for dimensionality reduction.
n_jobs
The number of jobs to use for the computation.
If `None`, `sc.settings.n_jobs` is used.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for
n_jobs = -2, all CPUs but one are used
random_state
Random seed. Defaults to the global `numpy` random number generator
verbose
If `True` or an `int`/`Verbosity` ≥ 2/`hint`, print status messages.
If `None`, `sc.settings.verbosity` is used.
copy
Return a copy instead of writing to `adata`.
kwargs
Additional arguments to `phate.PHATE`
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
**X_phate** : `np.ndarray`, (`adata.obs`, shape=[n_samples, n_components], dtype `float`)
PHATE coordinates of data.
Examples
--------
>>> from anndata import AnnData
>>> import scanpy.external as sce
>>> import phate
>>> tree_data, tree_clusters = phate.tree.gen_dla(
... n_dim=100,
... n_branch=20,
... branch_length=100,
... )
>>> tree_data.shape
(2000, 100)
>>> adata = AnnData(tree_data)
>>> sce.tl.phate(adata, k=5, a=20, t=150)
>>> adata.obsm['X_phate'].shape
(2000, 2)
>>> sce.pl.phate(adata)
"""
start = logg.info("computing PHATE")
adata = adata.copy() if copy else adata
verbosity = settings.verbosity if verbose is None else verbose
verbose = verbosity if isinstance(verbosity, bool) else verbosity >= 2
n_jobs = settings.n_jobs if n_jobs is None else n_jobs
try:
import phate
except ImportError:
raise ImportError(
"You need to install the package `phate`: please run `pip install "
"--user phate` in a terminal."
)
X_phate = phate.PHATE(
n_components=n_components,
k=k,
a=a,
n_landmark=n_landmark,
t=t,
gamma=gamma,
n_pca=n_pca,
knn_dist=knn_dist,
mds_dist=mds_dist,
mds=mds,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
**kwargs,
).fit_transform(adata)
# update AnnData instance
adata.obsm["X_phate"] = X_phate # annotate samples with PHATE coordinates
logg.info(
" finished",
time=start,
deep=("added\n 'X_phate', PHATE coordinates (adata.obsm)"),
)
return adata if copy else None
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def neighbors(
adata: AnnData,
n_neighbors: int = 15,
n_pcs: Optional[int] = None,
use_rep: Optional[str] = None,
knn: bool = True,
random_state: AnyRandom = 0,
method: Optional[_Method] = "umap",
metric: Union[_Metric, _MetricFn] = "euclidean",
metric_kwds: Mapping[str, Any] = MappingProxyType({}),
copy: bool = False,
) -> Optional[AnnData]:
"""\
Compute a neighborhood graph of observations [McInnes18]_.
The neighbor search efficiency of this heavily relies on UMAP [McInnes18]_,
which also provides a method for estimating connectivities of data points -
the connectivity of the manifold (`method=='umap'`). If `method=='gauss'`,
connectivities are computed according to [Coifman05]_, in the adaption of
[Haghverdi16]_.
Parameters
----------
adata
Annotated data matrix.
n_neighbors
The size of local neighborhood (in terms of number of neighboring data
points) used for manifold approximation. Larger values result in more
global views of the manifold, while smaller values result in more local
data being preserved. In general values should be in the range 2 to 100.
If `knn` is `True`, number of nearest neighbors to be searched. If `knn`
is `False`, a Gaussian kernel width is set to the distance of the
`n_neighbors` neighbor.
{n_pcs}
{use_rep}
knn
If `True`, use a hard threshold to restrict the number of neighbors to
`n_neighbors`, that is, consider a knn graph. Otherwise, use a Gaussian
Kernel to assign low weights to neighbors more distant than the
`n_neighbors` nearest neighbor.
random_state
A numpy random seed.
method
Use 'umap' [McInnes18]_ or 'gauss' (Gauss kernel following [Coifman05]_
with adaptive width [Haghverdi16]_) for computing connectivities.
Use 'rapids' for the RAPIDS implementation of UMAP (experimental, GPU
only).
metric
A known metric’s name or a callable that returns a distance.
metric_kwds
Options for the metric.
copy
Return a copy instead of writing to adata.
Returns
-------
Depending on `copy`, updates or returns `adata` with the following:
**connectivities** : sparse matrix (`.uns['neighbors']`, dtype `float32`)
Weighted adjacency matrix of the neighborhood graph of data
points. Weights should be interpreted as connectivities.
**distances** : sparse matrix (`.uns['neighbors']`, dtype `float32`)
Instead of decaying weights, this stores distances for each pair of
neighbors.
"""
start = logg.info("computing neighbors")
adata = adata.copy() if copy else adata
if adata.is_view: # we shouldn't need this here...
adata._init_as_actual(adata.copy())
neighbors = Neighbors(adata)
neighbors.compute_neighbors(
n_neighbors=n_neighbors,
knn=knn,
n_pcs=n_pcs,
use_rep=use_rep,
method=method,
metric=metric,
metric_kwds=metric_kwds,
random_state=random_state,
)
adata.uns["neighbors"] = {}
adata.uns["neighbors"]["params"] = {
"n_neighbors": neighbors.n_neighbors,
"method": method,
}
adata.uns["neighbors"]["params"]["metric"] = metric
if metric_kwds:
adata.uns["neighbors"]["params"]["metric_kwds"] = metric_kwds
if use_rep is not None:
adata.uns["neighbors"]["params"]["use_rep"] = use_rep
if n_pcs is not None:
adata.uns["neighbors"]["params"]["n_pcs"] = n_pcs
adata.uns["neighbors"]["distances"] = neighbors.distances
adata.uns["neighbors"]["connectivities"] = neighbors.connectivities
if neighbors.rp_forest is not None:
adata.uns["neighbors"]["rp_forest"] = neighbors.rp_forest
logg.info(
" finished",
time=start,
deep=(
"added to `.uns['neighbors']`\n"
" 'distances', distances for each pair of neighbors\n"
" 'connectivities', weighted adjacency matrix"
),
)
return adata if copy else None
|
def neighbors(
adata: AnnData,
n_neighbors: int = 15,
n_pcs: Optional[int] = None,
use_rep: Optional[str] = None,
knn: bool = True,
random_state: Optional[Union[int, RandomState]] = 0,
method: Optional[_Method] = "umap",
metric: Union[_Metric, _MetricFn] = "euclidean",
metric_kwds: Mapping[str, Any] = MappingProxyType({}),
copy: bool = False,
) -> Optional[AnnData]:
"""\
Compute a neighborhood graph of observations [McInnes18]_.
The neighbor search efficiency of this heavily relies on UMAP [McInnes18]_,
which also provides a method for estimating connectivities of data points -
the connectivity of the manifold (`method=='umap'`). If `method=='gauss'`,
connectivities are computed according to [Coifman05]_, in the adaption of
[Haghverdi16]_.
Parameters
----------
adata
Annotated data matrix.
n_neighbors
The size of local neighborhood (in terms of number of neighboring data
points) used for manifold approximation. Larger values result in more
global views of the manifold, while smaller values result in more local
data being preserved. In general values should be in the range 2 to 100.
If `knn` is `True`, number of nearest neighbors to be searched. If `knn`
is `False`, a Gaussian kernel width is set to the distance of the
`n_neighbors` neighbor.
{n_pcs}
{use_rep}
knn
If `True`, use a hard threshold to restrict the number of neighbors to
`n_neighbors`, that is, consider a knn graph. Otherwise, use a Gaussian
Kernel to assign low weights to neighbors more distant than the
`n_neighbors` nearest neighbor.
random_state
A numpy random seed.
method
Use 'umap' [McInnes18]_ or 'gauss' (Gauss kernel following [Coifman05]_
with adaptive width [Haghverdi16]_) for computing connectivities.
Use 'rapids' for the RAPIDS implementation of UMAP (experimental, GPU
only).
metric
A known metric’s name or a callable that returns a distance.
metric_kwds
Options for the metric.
copy
Return a copy instead of writing to adata.
Returns
-------
Depending on `copy`, updates or returns `adata` with the following:
**connectivities** : sparse matrix (`.uns['neighbors']`, dtype `float32`)
Weighted adjacency matrix of the neighborhood graph of data
points. Weights should be interpreted as connectivities.
**distances** : sparse matrix (`.uns['neighbors']`, dtype `float32`)
Instead of decaying weights, this stores distances for each pair of
neighbors.
"""
start = logg.info("computing neighbors")
adata = adata.copy() if copy else adata
if adata.is_view: # we shouldn't need this here...
adata._init_as_actual(adata.copy())
neighbors = Neighbors(adata)
neighbors.compute_neighbors(
n_neighbors=n_neighbors,
knn=knn,
n_pcs=n_pcs,
use_rep=use_rep,
method=method,
metric=metric,
metric_kwds=metric_kwds,
random_state=random_state,
)
adata.uns["neighbors"] = {}
adata.uns["neighbors"]["params"] = {
"n_neighbors": neighbors.n_neighbors,
"method": method,
}
adata.uns["neighbors"]["params"]["metric"] = metric
if metric_kwds:
adata.uns["neighbors"]["params"]["metric_kwds"] = metric_kwds
if use_rep is not None:
adata.uns["neighbors"]["params"]["use_rep"] = use_rep
if n_pcs is not None:
adata.uns["neighbors"]["params"]["n_pcs"] = n_pcs
adata.uns["neighbors"]["distances"] = neighbors.distances
adata.uns["neighbors"]["connectivities"] = neighbors.connectivities
if neighbors.rp_forest is not None:
adata.uns["neighbors"]["rp_forest"] = neighbors.rp_forest
logg.info(
" finished",
time=start,
deep=(
"added to `.uns['neighbors']`\n"
" 'distances', distances for each pair of neighbors\n"
" 'connectivities', weighted adjacency matrix"
),
)
return adata if copy else None
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def compute_neighbors_umap(
X: Union[np.ndarray, csr_matrix],
n_neighbors: int,
random_state: AnyRandom = None,
metric: Union[_Metric, _MetricFn] = "euclidean",
metric_kwds: Mapping[str, Any] = MappingProxyType({}),
angular: bool = False,
verbose: bool = False,
):
"""This is from umap.fuzzy_simplicial_set [McInnes18]_.
Given a set of data X, a neighborhood size, and a measure of distance
compute the fuzzy simplicial set (here represented as a fuzzy graph in
the form of a sparse matrix) associated to the data. This is done by
locally approximating geodesic distance at each point, creating a fuzzy
simplicial set for each such point, and then combining all the local
fuzzy simplicial sets into a global one via a fuzzy union.
Parameters
----------
X: array of shape (n_samples, n_features)
The data to be modelled as a fuzzy simplicial set.
n_neighbors
The number of neighbors to use to approximate geodesic distance.
Larger numbers induce more global estimates of the manifold that can
miss finer detail, while smaller values will focus on fine manifold
structure to the detriment of the larger picture.
random_state
A state capable being used as a numpy random state.
metric
The metric to use to compute distances in high dimensional space.
If a string is passed it must match a valid predefined metric. If
a general metric is required a function that takes two 1d arrays and
returns a float can be provided. For performance purposes it is
required that this be a numba jit'd function. Valid string metrics
include:
* euclidean
* manhattan
* chebyshev
* minkowski
* canberra
* braycurtis
* mahalanobis
* wminkowski
* seuclidean
* cosine
* correlation
* haversine
* hamming
* jaccard
* dice
* russelrao
* kulsinski
* rogerstanimoto
* sokalmichener
* sokalsneath
* yule
Metrics that take arguments (such as minkowski, mahalanobis etc.)
can have arguments passed via the metric_kwds dictionary. At this
time care must be taken and dictionary elements must be ordered
appropriately; this will hopefully be fixed in the future.
metric_kwds
Arguments to pass on to the metric, such as the ``p`` value for
Minkowski distance.
angular
Whether to use angular/cosine distance for the random projection
forest for seeding NN-descent to determine approximate nearest
neighbors.
verbose
Whether to report information on the current progress of the algorithm.
Returns
-------
**knn_indices**, **knn_dists** : np.arrays of shape (n_observations, n_neighbors)
"""
from umap.umap_ import nearest_neighbors
random_state = check_random_state(random_state)
knn_indices, knn_dists, forest = nearest_neighbors(
X,
n_neighbors,
random_state=random_state,
metric=metric,
metric_kwds=metric_kwds,
angular=angular,
verbose=verbose,
)
return knn_indices, knn_dists, forest
|
def compute_neighbors_umap(
X: Union[np.ndarray, csr_matrix],
n_neighbors: int,
random_state: Optional[Union[int, RandomState]] = None,
metric: Union[_Metric, _MetricFn] = "euclidean",
metric_kwds: Mapping[str, Any] = MappingProxyType({}),
angular: bool = False,
verbose: bool = False,
):
"""This is from umap.fuzzy_simplicial_set [McInnes18]_.
Given a set of data X, a neighborhood size, and a measure of distance
compute the fuzzy simplicial set (here represented as a fuzzy graph in
the form of a sparse matrix) associated to the data. This is done by
locally approximating geodesic distance at each point, creating a fuzzy
simplicial set for each such point, and then combining all the local
fuzzy simplicial sets into a global one via a fuzzy union.
Parameters
----------
X: array of shape (n_samples, n_features)
The data to be modelled as a fuzzy simplicial set.
n_neighbors
The number of neighbors to use to approximate geodesic distance.
Larger numbers induce more global estimates of the manifold that can
miss finer detail, while smaller values will focus on fine manifold
structure to the detriment of the larger picture.
random_state
A state capable being used as a numpy random state.
metric
The metric to use to compute distances in high dimensional space.
If a string is passed it must match a valid predefined metric. If
a general metric is required a function that takes two 1d arrays and
returns a float can be provided. For performance purposes it is
required that this be a numba jit'd function. Valid string metrics
include:
* euclidean
* manhattan
* chebyshev
* minkowski
* canberra
* braycurtis
* mahalanobis
* wminkowski
* seuclidean
* cosine
* correlation
* haversine
* hamming
* jaccard
* dice
* russelrao
* kulsinski
* rogerstanimoto
* sokalmichener
* sokalsneath
* yule
Metrics that take arguments (such as minkowski, mahalanobis etc.)
can have arguments passed via the metric_kwds dictionary. At this
time care must be taken and dictionary elements must be ordered
appropriately; this will hopefully be fixed in the future.
metric_kwds
Arguments to pass on to the metric, such as the ``p`` value for
Minkowski distance.
angular
Whether to use angular/cosine distance for the random projection
forest for seeding NN-descent to determine approximate nearest
neighbors.
verbose
Whether to report information on the current progress of the algorithm.
Returns
-------
**knn_indices**, **knn_dists** : np.arrays of shape (n_observations, n_neighbors)
"""
from umap.umap_ import nearest_neighbors
random_state = check_random_state(random_state)
knn_indices, knn_dists, forest = nearest_neighbors(
X,
n_neighbors,
random_state=random_state,
metric=metric,
metric_kwds=metric_kwds,
angular=angular,
verbose=verbose,
)
return knn_indices, knn_dists, forest
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def compute_neighbors(
self,
n_neighbors: int = 30,
knn: bool = True,
n_pcs: Optional[int] = None,
use_rep: Optional[str] = None,
method: _Method = "umap",
random_state: AnyRandom = 0,
write_knn_indices: bool = False,
metric: _Metric = "euclidean",
metric_kwds: Mapping[str, Any] = MappingProxyType({}),
) -> None:
"""\
Compute distances and connectivities of neighbors.
Parameters
----------
n_neighbors
Use this number of nearest neighbors.
knn
Restrict result to `n_neighbors` nearest neighbors.
{n_pcs}
{use_rep}
Returns
-------
Writes sparse graph attributes `.distances` and `.connectivities`.
Also writes `.knn_indices` and `.knn_distances` if
`write_knn_indices==True`.
"""
from sklearn.metrics import pairwise_distances
start_neighbors = logg.debug("computing neighbors")
if n_neighbors > self._adata.shape[0]: # very small datasets
n_neighbors = 1 + int(0.5 * self._adata.shape[0])
logg.warning(f"n_obs too small: adjusting to `n_neighbors = {n_neighbors}`")
if method == "umap" and not knn:
raise ValueError("`method = 'umap' only with `knn = True`.")
if method == "rapids" and metric != "euclidean":
raise ValueError("`method` 'rapids' only supports the 'euclidean' `metric`.")
if method not in {"umap", "gauss", "rapids"}:
raise ValueError("`method` needs to be 'umap', 'gauss', or 'rapids'.")
if self._adata.shape[0] >= 10000 and not knn:
logg.warning("Using high n_obs without `knn=True` takes a lot of memory...")
self.n_neighbors = n_neighbors
self.knn = knn
X = _choose_representation(self._adata, use_rep=use_rep, n_pcs=n_pcs)
# neighbor search
use_dense_distances = (metric == "euclidean" and X.shape[0] < 8192) or knn == False
if use_dense_distances:
_distances = pairwise_distances(X, metric=metric, **metric_kwds)
knn_indices, knn_distances = _get_indices_distances_from_dense_matrix(
_distances, n_neighbors
)
if knn:
self._distances = _get_sparse_matrix_from_indices_distances_numpy(
knn_indices, knn_distances, X.shape[0], n_neighbors
)
else:
self._distances = _distances
elif method == "rapids":
knn_indices, knn_distances = compute_neighbors_rapids(X, n_neighbors)
else:
# non-euclidean case and approx nearest neighbors
if X.shape[0] < 4096:
X = pairwise_distances(X, metric=metric, **metric_kwds)
metric = "precomputed"
knn_indices, knn_distances, forest = compute_neighbors_umap(
X, n_neighbors, random_state, metric=metric, metric_kwds=metric_kwds
)
# very cautious here
try:
if forest:
self._rp_forest = _make_forest_dict(forest)
except:
pass
# write indices as attributes
if write_knn_indices:
self.knn_indices = knn_indices
self.knn_distances = knn_distances
start_connect = logg.debug("computed neighbors", time=start_neighbors)
if not use_dense_distances or method in {"umap", "rapids"}:
# we need self._distances also for method == 'gauss' if we didn't
# use dense distances
self._distances, self._connectivities = _compute_connectivities_umap(
knn_indices,
knn_distances,
self._adata.shape[0],
self.n_neighbors,
)
# overwrite the umap connectivities if method is 'gauss'
# self._distances is unaffected by this
if method == "gauss":
self._compute_connectivities_diffmap()
logg.debug("computed connectivities", time=start_connect)
self._number_connected_components = 1
if issparse(self._connectivities):
from scipy.sparse.csgraph import connected_components
self._connected_components = connected_components(self._connectivities)
self._number_connected_components = self._connected_components[0]
|
def compute_neighbors(
self,
n_neighbors: int = 30,
knn: bool = True,
n_pcs: Optional[int] = None,
use_rep: Optional[str] = None,
method: _Method = "umap",
random_state: Optional[Union[int, RandomState]] = 0,
write_knn_indices: bool = False,
metric: _Metric = "euclidean",
metric_kwds: Mapping[str, Any] = MappingProxyType({}),
) -> None:
"""\
Compute distances and connectivities of neighbors.
Parameters
----------
n_neighbors
Use this number of nearest neighbors.
knn
Restrict result to `n_neighbors` nearest neighbors.
{n_pcs}
{use_rep}
Returns
-------
Writes sparse graph attributes `.distances` and `.connectivities`.
Also writes `.knn_indices` and `.knn_distances` if
`write_knn_indices==True`.
"""
from sklearn.metrics import pairwise_distances
start_neighbors = logg.debug("computing neighbors")
if n_neighbors > self._adata.shape[0]: # very small datasets
n_neighbors = 1 + int(0.5 * self._adata.shape[0])
logg.warning(f"n_obs too small: adjusting to `n_neighbors = {n_neighbors}`")
if method == "umap" and not knn:
raise ValueError("`method = 'umap' only with `knn = True`.")
if method == "rapids" and metric != "euclidean":
raise ValueError("`method` 'rapids' only supports the 'euclidean' `metric`.")
if method not in {"umap", "gauss", "rapids"}:
raise ValueError("`method` needs to be 'umap', 'gauss', or 'rapids'.")
if self._adata.shape[0] >= 10000 and not knn:
logg.warning("Using high n_obs without `knn=True` takes a lot of memory...")
self.n_neighbors = n_neighbors
self.knn = knn
X = _choose_representation(self._adata, use_rep=use_rep, n_pcs=n_pcs)
# neighbor search
use_dense_distances = (metric == "euclidean" and X.shape[0] < 8192) or knn == False
if use_dense_distances:
_distances = pairwise_distances(X, metric=metric, **metric_kwds)
knn_indices, knn_distances = _get_indices_distances_from_dense_matrix(
_distances, n_neighbors
)
if knn:
self._distances = _get_sparse_matrix_from_indices_distances_numpy(
knn_indices, knn_distances, X.shape[0], n_neighbors
)
else:
self._distances = _distances
elif method == "rapids":
knn_indices, knn_distances = compute_neighbors_rapids(X, n_neighbors)
else:
# non-euclidean case and approx nearest neighbors
if X.shape[0] < 4096:
X = pairwise_distances(X, metric=metric, **metric_kwds)
metric = "precomputed"
knn_indices, knn_distances, forest = compute_neighbors_umap(
X, n_neighbors, random_state, metric=metric, metric_kwds=metric_kwds
)
# very cautious here
try:
if forest:
self._rp_forest = _make_forest_dict(forest)
except:
pass
# write indices as attributes
if write_knn_indices:
self.knn_indices = knn_indices
self.knn_distances = knn_distances
start_connect = logg.debug("computed neighbors", time=start_neighbors)
if not use_dense_distances or method in {"umap", "rapids"}:
# we need self._distances also for method == 'gauss' if we didn't
# use dense distances
self._distances, self._connectivities = _compute_connectivities_umap(
knn_indices,
knn_distances,
self._adata.shape[0],
self.n_neighbors,
)
# overwrite the umap connectivities if method is 'gauss'
# self._distances is unaffected by this
if method == "gauss":
self._compute_connectivities_diffmap()
logg.debug("computed connectivities", time=start_connect)
self._number_connected_components = 1
if issparse(self._connectivities):
from scipy.sparse.csgraph import connected_components
self._connected_components = connected_components(self._connectivities)
self._number_connected_components = self._connected_components[0]
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def recipe_weinreb17(
adata: AnnData,
log: bool = True,
mean_threshold: float = 0.01,
cv_threshold: int = 2,
n_pcs: int = 50,
svd_solver="randomized",
random_state: AnyRandom = 0,
copy: bool = False,
) -> Optional[AnnData]:
"""\
Normalization and filtering as of [Weinreb17]_.
Expects non-logarithmized data.
If using logarithmized data, pass `log=False`.
Parameters
----------
adata
Annotated data matrix.
log
Logarithmize data?
copy
Return a copy if true.
"""
from scipy.sparse import issparse
if issparse(adata.X):
raise ValueError("`recipe_weinreb16 does not support sparse matrices.")
if copy:
adata = adata.copy()
if log:
pp.log1p(adata)
adata.X = pp.normalize_per_cell_weinreb16_deprecated(
adata.X, max_fraction=0.05, mult_with_mean=True
)
gene_subset = filter_genes_cv_deprecated(adata.X, mean_threshold, cv_threshold)
adata._inplace_subset_var(gene_subset) # this modifies the object itself
X_pca = pp.pca(
pp.zscore_deprecated(adata.X),
n_comps=n_pcs,
svd_solver=svd_solver,
random_state=random_state,
)
# update adata
adata.obsm["X_pca"] = X_pca
return adata if copy else None
|
def recipe_weinreb17(
adata: AnnData,
log: bool = True,
mean_threshold: float = 0.01,
cv_threshold: int = 2,
n_pcs: int = 50,
svd_solver="randomized",
random_state: Union[int, RandomState] = 0,
copy: bool = False,
) -> Optional[AnnData]:
"""\
Normalization and filtering as of [Weinreb17]_.
Expects non-logarithmized data.
If using logarithmized data, pass `log=False`.
Parameters
----------
adata
Annotated data matrix.
log
Logarithmize data?
copy
Return a copy if true.
"""
from scipy.sparse import issparse
if issparse(adata.X):
raise ValueError("`recipe_weinreb16 does not support sparse matrices.")
if copy:
adata = adata.copy()
if log:
pp.log1p(adata)
adata.X = pp.normalize_per_cell_weinreb16_deprecated(
adata.X, max_fraction=0.05, mult_with_mean=True
)
gene_subset = filter_genes_cv_deprecated(adata.X, mean_threshold, cv_threshold)
adata._inplace_subset_var(gene_subset) # this modifies the object itself
X_pca = pp.pca(
pp.zscore_deprecated(adata.X),
n_comps=n_pcs,
svd_solver=svd_solver,
random_state=random_state,
)
# update adata
adata.obsm["X_pca"] = X_pca
return adata if copy else None
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def pca(
data: Union[AnnData, np.ndarray, spmatrix],
n_comps: Optional[int] = None,
zero_center: Optional[bool] = True,
svd_solver: str = "arpack",
random_state: AnyRandom = 0,
return_info: bool = False,
use_highly_variable: Optional[bool] = None,
dtype: str = "float32",
copy: bool = False,
chunked: bool = False,
chunk_size: Optional[int] = None,
) -> Union[AnnData, np.ndarray, spmatrix]:
"""\
Principal component analysis [Pedregosa11]_.
Computes PCA coordinates, loadings and variance decomposition.
Uses the implementation of *scikit-learn* [Pedregosa11]_.
Parameters
----------
data
The (annotated) data matrix of shape `n_obs` × `n_vars`.
Rows correspond to cells and columns to genes.
n_comps
Number of principal components to compute. Defaults to 50, or 1 - minimum
dimension size of selected representation.
zero_center
If `True`, compute standard PCA from covariance matrix.
If `False`, omit zero-centering variables
(uses :class:`~sklearn.decomposition.TruncatedSVD`),
which allows to handle sparse input efficiently.
Passing `None` decides automatically based on sparseness of the data.
svd_solver
SVD solver to use:
`'arpack'`
for the ARPACK wrapper in SciPy (:func:`~scipy.sparse.linalg.svds`)
`'randomized'`
for the randomized algorithm due to Halko (2009).
`'auto'` (the default)
chooses automatically depending on the size of the problem.
.. versionchanged:: 1.4.5
Default value changed from `'auto'` to `'arpack'`.
random_state
Change to use different initial states for the optimization.
return_info
Only relevant when not passing an :class:`~anndata.AnnData`:
see “**Returns**”.
use_highly_variable
Whether to use highly variable genes only, stored in
`.var['highly_variable']`.
By default uses them if they have been determined beforehand.
dtype
Numpy data type string to which to convert the result.
copy
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned. Is ignored otherwise.
chunked
If `True`, perform an incremental PCA on segments of `chunk_size`.
The incremental PCA automatically zero centers and ignores settings of
`random_seed` and `svd_solver`. If `False`, perform a full PCA.
chunk_size
Number of observations to include in each chunk.
Required if `chunked=True` was passed.
Returns
-------
X_pca : :class:`~scipy.sparse.spmatrix`, :class:`~numpy.ndarray`
If `data` is array-like and `return_info=False` was passed,
this function only returns `X_pca`…
adata : anndata.AnnData
…otherwise if `copy=True` it returns or else adds fields to `adata`:
`.obsm['X_pca']`
PCA representation of data.
`.varm['PCs']`
The principal components containing the loadings.
`.uns['pca']['variance_ratio']`
Ratio of explained variance.
`.uns['pca']['variance']`
Explained variance, equivalent to the eigenvalues of the
covariance matrix.
"""
# chunked calculation is not randomized, anyways
if svd_solver in {"auto", "randomized"} and not chunked:
logg.info(
"Note that scikit-learn's randomized PCA might not be exactly "
"reproducible across different computational platforms. For exact "
"reproducibility, choose `svd_solver='arpack'.`"
)
data_is_AnnData = isinstance(data, AnnData)
if data_is_AnnData:
adata = data.copy() if copy else data
else:
adata = AnnData(data)
if use_highly_variable is True and "highly_variable" not in adata.var.keys():
raise ValueError(
"Did not find adata.var['highly_variable']. "
"Either your data already only consists of highly-variable genes "
"or consider running `pp.highly_variable_genes` first."
)
if use_highly_variable is None:
use_highly_variable = True if "highly_variable" in adata.var.keys() else False
if use_highly_variable:
logg.info(" on highly variable genes")
adata_comp = (
adata[:, adata.var["highly_variable"]] if use_highly_variable else adata
)
if n_comps is None:
min_dim = min(adata_comp.n_vars, adata_comp.n_obs)
if N_PCS >= min_dim:
n_comps = min_dim - 1
else:
n_comps = N_PCS
start = logg.info(f"computing PCA with n_comps = {n_comps}")
if chunked:
if not zero_center or random_state or svd_solver != "arpack":
logg.debug("Ignoring zero_center, random_state, svd_solver")
from sklearn.decomposition import IncrementalPCA
X_pca = np.zeros((adata_comp.X.shape[0], n_comps), adata_comp.X.dtype)
pca_ = IncrementalPCA(n_components=n_comps)
for chunk, _, _ in adata_comp.chunked_X(chunk_size):
chunk = chunk.toarray() if issparse(chunk) else chunk
pca_.partial_fit(chunk)
for chunk, start, end in adata_comp.chunked_X(chunk_size):
chunk = chunk.toarray() if issparse(chunk) else chunk
X_pca[start:end] = pca_.transform(chunk)
else:
if zero_center is None:
zero_center = not issparse(adata_comp.X)
if zero_center:
from sklearn.decomposition import PCA
if issparse(adata_comp.X):
logg.debug(
" as `zero_center=True`, "
"sparse input is densified and may "
"lead to huge memory consumption",
)
X = (
adata_comp.X.toarray()
) # Copying the whole adata_comp.X here, could cause memory problems
else:
X = adata_comp.X
pca_ = PCA(
n_components=n_comps, svd_solver=svd_solver, random_state=random_state
)
else:
from sklearn.decomposition import TruncatedSVD
logg.debug(
" without zero-centering: \n"
" the explained variance does not correspond to the exact statistical defintion\n"
" the first component, e.g., might be heavily influenced by different means\n"
" the following components often resemble the exact PCA very closely"
)
pca_ = TruncatedSVD(n_components=n_comps, random_state=random_state)
X = adata_comp.X
X_pca = pca_.fit_transform(X)
if X_pca.dtype.descr != np.dtype(dtype).descr:
X_pca = X_pca.astype(dtype)
if data_is_AnnData:
adata.obsm["X_pca"] = X_pca
adata.uns["pca"] = {}
adata.uns["pca"]["params"] = {
"zero_center": zero_center,
"use_highly_variable": use_highly_variable,
}
if use_highly_variable:
adata.varm["PCs"] = np.zeros(shape=(adata.n_vars, n_comps))
adata.varm["PCs"][adata.var["highly_variable"]] = pca_.components_.T
else:
adata.varm["PCs"] = pca_.components_.T
adata.uns["pca"]["variance"] = pca_.explained_variance_
adata.uns["pca"]["variance_ratio"] = pca_.explained_variance_ratio_
logg.info(" finished", time=start)
logg.debug(
"and added\n"
" 'X_pca', the PCA coordinates (adata.obs)\n"
" 'PC1', 'PC2', ..., the loadings (adata.var)\n"
" 'pca_variance', the variance / eigenvalues (adata.uns)\n"
" 'pca_variance_ratio', the variance ratio (adata.uns)"
)
return adata if copy else None
else:
logg.info(" finished", time=start)
if return_info:
return (
X_pca,
pca_.components_,
pca_.explained_variance_ratio_,
pca_.explained_variance_,
)
else:
return X_pca
|
def pca(
data: Union[AnnData, np.ndarray, spmatrix],
n_comps: Optional[int] = None,
zero_center: Optional[bool] = True,
svd_solver: str = "arpack",
random_state: Optional[Union[int, RandomState]] = 0,
return_info: bool = False,
use_highly_variable: Optional[bool] = None,
dtype: str = "float32",
copy: bool = False,
chunked: bool = False,
chunk_size: Optional[int] = None,
) -> Union[AnnData, np.ndarray, spmatrix]:
"""\
Principal component analysis [Pedregosa11]_.
Computes PCA coordinates, loadings and variance decomposition.
Uses the implementation of *scikit-learn* [Pedregosa11]_.
Parameters
----------
data
The (annotated) data matrix of shape `n_obs` × `n_vars`.
Rows correspond to cells and columns to genes.
n_comps
Number of principal components to compute. Defaults to 50, or 1 - minimum
dimension size of selected representation.
zero_center
If `True`, compute standard PCA from covariance matrix.
If `False`, omit zero-centering variables
(uses :class:`~sklearn.decomposition.TruncatedSVD`),
which allows to handle sparse input efficiently.
Passing `None` decides automatically based on sparseness of the data.
svd_solver
SVD solver to use:
`'arpack'`
for the ARPACK wrapper in SciPy (:func:`~scipy.sparse.linalg.svds`)
`'randomized'`
for the randomized algorithm due to Halko (2009).
`'auto'` (the default)
chooses automatically depending on the size of the problem.
.. versionchanged:: 1.4.5
Default value changed from `'auto'` to `'arpack'`.
random_state
Change to use different initial states for the optimization.
return_info
Only relevant when not passing an :class:`~anndata.AnnData`:
see “**Returns**”.
use_highly_variable
Whether to use highly variable genes only, stored in
`.var['highly_variable']`.
By default uses them if they have been determined beforehand.
dtype
Numpy data type string to which to convert the result.
copy
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned. Is ignored otherwise.
chunked
If `True`, perform an incremental PCA on segments of `chunk_size`.
The incremental PCA automatically zero centers and ignores settings of
`random_seed` and `svd_solver`. If `False`, perform a full PCA.
chunk_size
Number of observations to include in each chunk.
Required if `chunked=True` was passed.
Returns
-------
X_pca : :class:`~scipy.sparse.spmatrix`, :class:`~numpy.ndarray`
If `data` is array-like and `return_info=False` was passed,
this function only returns `X_pca`…
adata : anndata.AnnData
…otherwise if `copy=True` it returns or else adds fields to `adata`:
`.obsm['X_pca']`
PCA representation of data.
`.varm['PCs']`
The principal components containing the loadings.
`.uns['pca']['variance_ratio']`
Ratio of explained variance.
`.uns['pca']['variance']`
Explained variance, equivalent to the eigenvalues of the
covariance matrix.
"""
# chunked calculation is not randomized, anyways
if svd_solver in {"auto", "randomized"} and not chunked:
logg.info(
"Note that scikit-learn's randomized PCA might not be exactly "
"reproducible across different computational platforms. For exact "
"reproducibility, choose `svd_solver='arpack'.`"
)
data_is_AnnData = isinstance(data, AnnData)
if data_is_AnnData:
adata = data.copy() if copy else data
else:
adata = AnnData(data)
if use_highly_variable is True and "highly_variable" not in adata.var.keys():
raise ValueError(
"Did not find adata.var['highly_variable']. "
"Either your data already only consists of highly-variable genes "
"or consider running `pp.highly_variable_genes` first."
)
if use_highly_variable is None:
use_highly_variable = True if "highly_variable" in adata.var.keys() else False
if use_highly_variable:
logg.info(" on highly variable genes")
adata_comp = (
adata[:, adata.var["highly_variable"]] if use_highly_variable else adata
)
if n_comps is None:
min_dim = min(adata_comp.n_vars, adata_comp.n_obs)
if N_PCS >= min_dim:
n_comps = min_dim - 1
else:
n_comps = N_PCS
start = logg.info(f"computing PCA with n_comps = {n_comps}")
if chunked:
if not zero_center or random_state or svd_solver != "arpack":
logg.debug("Ignoring zero_center, random_state, svd_solver")
from sklearn.decomposition import IncrementalPCA
X_pca = np.zeros((adata_comp.X.shape[0], n_comps), adata_comp.X.dtype)
pca_ = IncrementalPCA(n_components=n_comps)
for chunk, _, _ in adata_comp.chunked_X(chunk_size):
chunk = chunk.toarray() if issparse(chunk) else chunk
pca_.partial_fit(chunk)
for chunk, start, end in adata_comp.chunked_X(chunk_size):
chunk = chunk.toarray() if issparse(chunk) else chunk
X_pca[start:end] = pca_.transform(chunk)
else:
if zero_center is None:
zero_center = not issparse(adata_comp.X)
if zero_center:
from sklearn.decomposition import PCA
if issparse(adata_comp.X):
logg.debug(
" as `zero_center=True`, "
"sparse input is densified and may "
"lead to huge memory consumption",
)
X = (
adata_comp.X.toarray()
) # Copying the whole adata_comp.X here, could cause memory problems
else:
X = adata_comp.X
pca_ = PCA(
n_components=n_comps, svd_solver=svd_solver, random_state=random_state
)
else:
from sklearn.decomposition import TruncatedSVD
logg.debug(
" without zero-centering: \n"
" the explained variance does not correspond to the exact statistical defintion\n"
" the first component, e.g., might be heavily influenced by different means\n"
" the following components often resemble the exact PCA very closely"
)
pca_ = TruncatedSVD(n_components=n_comps, random_state=random_state)
X = adata_comp.X
X_pca = pca_.fit_transform(X)
if X_pca.dtype.descr != np.dtype(dtype).descr:
X_pca = X_pca.astype(dtype)
if data_is_AnnData:
adata.obsm["X_pca"] = X_pca
adata.uns["pca"] = {}
adata.uns["pca"]["params"] = {
"zero_center": zero_center,
"use_highly_variable": use_highly_variable,
}
if use_highly_variable:
adata.varm["PCs"] = np.zeros(shape=(adata.n_vars, n_comps))
adata.varm["PCs"][adata.var["highly_variable"]] = pca_.components_.T
else:
adata.varm["PCs"] = pca_.components_.T
adata.uns["pca"]["variance"] = pca_.explained_variance_
adata.uns["pca"]["variance_ratio"] = pca_.explained_variance_ratio_
logg.info(" finished", time=start)
logg.debug(
"and added\n"
" 'X_pca', the PCA coordinates (adata.obs)\n"
" 'PC1', 'PC2', ..., the loadings (adata.var)\n"
" 'pca_variance', the variance / eigenvalues (adata.uns)\n"
" 'pca_variance_ratio', the variance ratio (adata.uns)"
)
return adata if copy else None
else:
logg.info(" finished", time=start)
if return_info:
return (
X_pca,
pca_.components_,
pca_.explained_variance_ratio_,
pca_.explained_variance_,
)
else:
return X_pca
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def subsample(
data: Union[AnnData, np.ndarray, spmatrix],
fraction: Optional[float] = None,
n_obs: Optional[int] = None,
random_state: AnyRandom = 0,
copy: bool = False,
) -> Optional[AnnData]:
"""\
Subsample to a fraction of the number of observations.
Parameters
----------
data
The (annotated) data matrix of shape `n_obs` × `n_vars`.
Rows correspond to cells and columns to genes.
fraction
Subsample to this `fraction` of the number of observations.
n_obs
Subsample to this number of observations.
random_state
Random seed to change subsampling.
copy
If an :class:`~anndata.AnnData` is passed,
determines whether a copy is returned.
Returns
-------
Returns `X[obs_indices], obs_indices` if data is array-like, otherwise
subsamples the passed :class:`~anndata.AnnData` (`copy == False`) or
returns a subsampled copy of it (`copy == True`).
"""
np.random.seed(random_state)
old_n_obs = data.n_obs if isinstance(data, AnnData) else data.shape[0]
if n_obs is not None:
new_n_obs = n_obs
elif fraction is not None:
if fraction > 1 or fraction < 0:
raise ValueError(f"`fraction` needs to be within [0, 1], not {fraction}")
new_n_obs = int(fraction * old_n_obs)
logg.debug(f"... subsampled to {new_n_obs} data points")
else:
raise ValueError("Either pass `n_obs` or `fraction`.")
obs_indices = np.random.choice(old_n_obs, size=new_n_obs, replace=False)
if isinstance(data, AnnData):
if copy:
return data[obs_indices].copy()
else:
data._inplace_subset_obs(obs_indices)
else:
X = data
return X[obs_indices], obs_indices
|
def subsample(
data: Union[AnnData, np.ndarray, spmatrix],
fraction: Optional[float] = None,
n_obs: Optional[int] = None,
random_state: Union[int, RandomState] = 0,
copy: bool = False,
) -> Optional[AnnData]:
"""\
Subsample to a fraction of the number of observations.
Parameters
----------
data
The (annotated) data matrix of shape `n_obs` × `n_vars`.
Rows correspond to cells and columns to genes.
fraction
Subsample to this `fraction` of the number of observations.
n_obs
Subsample to this number of observations.
random_state
Random seed to change subsampling.
copy
If an :class:`~anndata.AnnData` is passed,
determines whether a copy is returned.
Returns
-------
Returns `X[obs_indices], obs_indices` if data is array-like, otherwise
subsamples the passed :class:`~anndata.AnnData` (`copy == False`) or
returns a subsampled copy of it (`copy == True`).
"""
np.random.seed(random_state)
old_n_obs = data.n_obs if isinstance(data, AnnData) else data.shape[0]
if n_obs is not None:
new_n_obs = n_obs
elif fraction is not None:
if fraction > 1 or fraction < 0:
raise ValueError(f"`fraction` needs to be within [0, 1], not {fraction}")
new_n_obs = int(fraction * old_n_obs)
logg.debug(f"... subsampled to {new_n_obs} data points")
else:
raise ValueError("Either pass `n_obs` or `fraction`.")
obs_indices = np.random.choice(old_n_obs, size=new_n_obs, replace=False)
if isinstance(data, AnnData):
if copy:
return data[obs_indices].copy()
else:
data._inplace_subset_obs(obs_indices)
else:
X = data
return X[obs_indices], obs_indices
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def downsample_counts(
adata: AnnData,
counts_per_cell: Optional[Union[int, Collection[int]]] = None,
total_counts: Optional[int] = None,
*,
random_state: AnyRandom = 0,
replace: bool = False,
copy: bool = False,
) -> Optional[AnnData]:
"""\
Downsample counts from count matrix.
If `counts_per_cell` is specified, each cell will downsampled.
If `total_counts` is specified, expression matrix will be downsampled to
contain at most `total_counts`.
Parameters
----------
adata
Annotated data matrix.
counts_per_cell
Target total counts per cell. If a cell has more than 'counts_per_cell',
it will be downsampled to this number. Resulting counts can be specified
on a per cell basis by passing an array.Should be an integer or integer
ndarray with same length as number of obs.
total_counts
Target total counts. If the count matrix has more than `total_counts`
it will be downsampled to have this number.
random_state
Random seed for subsampling.
replace
Whether to sample the counts with replacement.
copy
Determines whether a copy of `adata` is returned.
Returns
-------
Depending on `copy` returns or updates an `adata` with downsampled `.X`.
"""
# This logic is all dispatch
total_counts_call = total_counts is not None
counts_per_cell_call = counts_per_cell is not None
if total_counts_call is counts_per_cell_call:
raise ValueError(
"Must specify exactly one of `total_counts` or `counts_per_cell`."
)
if copy:
adata = adata.copy()
if total_counts_call:
adata.X = _downsample_total_counts(adata.X, total_counts, random_state, replace)
elif counts_per_cell_call:
adata.X = _downsample_per_cell(adata.X, counts_per_cell, random_state, replace)
if copy:
return adata
|
def downsample_counts(
adata: AnnData,
counts_per_cell: Optional[Union[int, Collection[int]]] = None,
total_counts: Optional[int] = None,
*,
random_state: Optional[Union[int, RandomState]] = 0,
replace: bool = False,
copy: bool = False,
) -> Optional[AnnData]:
"""\
Downsample counts from count matrix.
If `counts_per_cell` is specified, each cell will downsampled.
If `total_counts` is specified, expression matrix will be downsampled to
contain at most `total_counts`.
Parameters
----------
adata
Annotated data matrix.
counts_per_cell
Target total counts per cell. If a cell has more than 'counts_per_cell',
it will be downsampled to this number. Resulting counts can be specified
on a per cell basis by passing an array.Should be an integer or integer
ndarray with same length as number of obs.
total_counts
Target total counts. If the count matrix has more than `total_counts`
it will be downsampled to have this number.
random_state
Random seed for subsampling.
replace
Whether to sample the counts with replacement.
copy
Determines whether a copy of `adata` is returned.
Returns
-------
Depending on `copy` returns or updates an `adata` with downsampled `.X`.
"""
# This logic is all dispatch
total_counts_call = total_counts is not None
counts_per_cell_call = counts_per_cell is not None
if total_counts_call is counts_per_cell_call:
raise ValueError(
"Must specify exactly one of `total_counts` or `counts_per_cell`."
)
if copy:
adata = adata.copy()
if total_counts_call:
adata.X = _downsample_total_counts(adata.X, total_counts, random_state, replace)
elif counts_per_cell_call:
adata.X = _downsample_per_cell(adata.X, counts_per_cell, random_state, replace)
if copy:
return adata
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def _downsample_array(
col: np.ndarray,
target: int,
random_state: AnyRandom = 0,
replace: bool = True,
inplace: bool = False,
):
"""\
Evenly reduce counts in cell to target amount.
This is an internal function and has some restrictions:
* total counts in cell must be less than target
"""
np.random.seed(random_state)
cumcounts = col.cumsum()
if inplace:
col[:] = 0
else:
col = np.zeros_like(col)
total = np.int_(cumcounts[-1])
sample = np.random.choice(total, target, replace=replace)
sample.sort()
geneptr = 0
for count in sample:
while count >= cumcounts[geneptr]:
geneptr += 1
col[geneptr] += 1
return col
|
def _downsample_array(
col: np.ndarray,
target: int,
random_state: Optional[Union[int, RandomState]] = 0,
replace: bool = True,
inplace: bool = False,
):
"""\
Evenly reduce counts in cell to target amount.
This is an internal function and has some restrictions:
* total counts in cell must be less than target
"""
np.random.seed(random_state)
cumcounts = col.cumsum()
if inplace:
col[:] = 0
else:
col = np.zeros_like(col)
total = np.int_(cumcounts[-1])
sample = np.random.choice(total, target, replace=replace)
sample.sort()
geneptr = 0
for count in sample:
while count >= cumcounts[geneptr]:
geneptr += 1
col[geneptr] += 1
return col
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def draw_graph(
adata: AnnData,
layout: _Layout = "fa",
init_pos: Union[str, bool, None] = None,
root: Optional[int] = None,
random_state: AnyRandom = 0,
n_jobs: Optional[int] = None,
adjacency: Optional[spmatrix] = None,
key_added_ext: Optional[str] = None,
copy: bool = False,
**kwds,
):
"""\
Force-directed graph drawing [Islam11]_ [Jacomy14]_ [Chippada18]_.
An alternative to tSNE that often preserves the topology of the data
better. This requires to run :func:`~scanpy.pp.neighbors`, first.
The default layout ('fa', `ForceAtlas2`) [Jacomy14]_ uses the package |fa2|_
[Chippada18]_, which can be installed via `pip install fa2`.
`Force-directed graph drawing`_ describes a class of long-established
algorithms for visualizing graphs.
It has been suggested for visualizing single-cell data by [Islam11]_.
Many other layouts as implemented in igraph [Csardi06]_ are available.
Similar approaches have been used by [Zunder15]_ or [Weinreb17]_.
.. |fa2| replace:: `fa2`
.. _fa2: https://github.com/bhargavchippada/forceatlas2
.. _Force-directed graph drawing: https://en.wikipedia.org/wiki/Force-directed_graph_drawing
Parameters
----------
adata
Annotated data matrix.
layout
'fa' (`ForceAtlas2`) or any valid `igraph layout
<http://igraph.org/c/doc/igraph-Layout.html>`__. Of particular interest
are 'fr' (Fruchterman Reingold), 'grid_fr' (Grid Fruchterman Reingold,
faster than 'fr'), 'kk' (Kamadi Kawai', slower than 'fr'), 'lgl' (Large
Graph, very fast), 'drl' (Distributed Recursive Layout, pretty fast) and
'rt' (Reingold Tilford tree layout).
root
Root for tree layouts.
random_state
For layouts with random initialization like 'fr', change this to use
different intial states for the optimization. If `None`, no seed is set.
adjacency
Sparse adjacency matrix of the graph, defaults to
`adata.uns['neighbors']['connectivities']`.
key_added_ext
By default, append `layout`.
proceed
Continue computation, starting off with 'X_draw_graph_`layout`'.
init_pos
`'paga'`/`True`, `None`/`False`, or any valid 2d-`.obsm` key.
Use precomputed coordinates for initialization.
If `False`/`None` (the default), initialize randomly.
copy
Return a copy instead of writing to adata.
**kwds
Parameters of chosen igraph layout. See e.g. `fruchterman-reingold`_
[Fruchterman91]_. One of the most important ones is `maxiter`.
.. _fruchterman-reingold: http://igraph.org/python/doc/igraph.Graph-class.html#layout_fruchterman_reingold
Returns
-------
Depending on `copy`, returns or updates `adata` with the following field.
**X_draw_graph_layout** : `adata.obsm`
Coordinates of graph layout. E.g. for layout='fa' (the default),
the field is called 'X_draw_graph_fa'
"""
start = logg.info(f"drawing single-cell graph using layout {layout!r}")
if layout not in _LAYOUTS:
raise ValueError(f"Provide a valid layout, one of {_LAYOUTS}.")
adata = adata.copy() if copy else adata
if adjacency is None and "neighbors" not in adata.uns:
raise ValueError(
"You need to run `pp.neighbors` first to compute a neighborhood graph."
)
if adjacency is None:
adjacency = adata.uns["neighbors"]["connectivities"]
# init coordinates
if init_pos in adata.obsm.keys():
init_coords = adata.obsm[init_pos]
elif init_pos == "paga" or init_pos:
init_coords = get_init_pos_from_paga(
adata, adjacency, random_state=random_state
)
else:
np.random.seed(random_state)
init_coords = np.random.random((adjacency.shape[0], 2))
# see whether fa2 is installed
if layout == "fa":
try:
from fa2 import ForceAtlas2
except ImportError:
logg.warning(
"Package 'fa2' is not installed, falling back to layout 'fr'."
"To use the faster and better ForceAtlas2 layout, "
"install package 'fa2' (`pip install fa2`)."
)
layout = "fr"
# actual drawing
if layout == "fa":
forceatlas2 = ForceAtlas2(
# Behavior alternatives
outboundAttractionDistribution=False, # Dissuade hubs
linLogMode=False, # NOT IMPLEMENTED
adjustSizes=False, # Prevent overlap (NOT IMPLEMENTED)
edgeWeightInfluence=1.0,
# Performance
jitterTolerance=1.0, # Tolerance
barnesHutOptimize=True,
barnesHutTheta=1.2,
multiThreaded=False, # NOT IMPLEMENTED
# Tuning
scalingRatio=2.0,
strongGravityMode=False,
gravity=1.0,
# Log
verbose=False,
)
if "maxiter" in kwds:
iterations = kwds["maxiter"]
elif "iterations" in kwds:
iterations = kwds["iterations"]
else:
iterations = 500
positions = forceatlas2.forceatlas2(
adjacency, pos=init_coords, iterations=iterations
)
positions = np.array(positions)
else:
g = _utils.get_igraph_from_adjacency(adjacency)
if layout in {"fr", "drl", "kk", "grid_fr"}:
ig_layout = g.layout(layout, seed=init_coords.tolist(), **kwds)
elif "rt" in layout:
if root is not None:
root = [root]
ig_layout = g.layout(layout, root=root, **kwds)
else:
ig_layout = g.layout(layout, **kwds)
positions = np.array(ig_layout.coords)
adata.uns["draw_graph"] = {}
adata.uns["draw_graph"]["params"] = dict(layout=layout, random_state=random_state)
key_added = f"X_draw_graph_{key_added_ext or layout}"
adata.obsm[key_added] = positions
logg.info(
" finished",
time=start,
deep=f"added\n {key_added!r}, graph_drawing coordinates (adata.obsm)",
)
return adata if copy else None
|
def draw_graph(
adata: AnnData,
layout: _Layout = "fa",
init_pos: Union[str, bool, None] = None,
root: Optional[int] = None,
random_state: Optional[Union[int, RandomState]] = 0,
n_jobs: Optional[int] = None,
adjacency: Optional[spmatrix] = None,
key_added_ext: Optional[str] = None,
copy: bool = False,
**kwds,
):
"""\
Force-directed graph drawing [Islam11]_ [Jacomy14]_ [Chippada18]_.
An alternative to tSNE that often preserves the topology of the data
better. This requires to run :func:`~scanpy.pp.neighbors`, first.
The default layout ('fa', `ForceAtlas2`) [Jacomy14]_ uses the package |fa2|_
[Chippada18]_, which can be installed via `pip install fa2`.
`Force-directed graph drawing`_ describes a class of long-established
algorithms for visualizing graphs.
It has been suggested for visualizing single-cell data by [Islam11]_.
Many other layouts as implemented in igraph [Csardi06]_ are available.
Similar approaches have been used by [Zunder15]_ or [Weinreb17]_.
.. |fa2| replace:: `fa2`
.. _fa2: https://github.com/bhargavchippada/forceatlas2
.. _Force-directed graph drawing: https://en.wikipedia.org/wiki/Force-directed_graph_drawing
Parameters
----------
adata
Annotated data matrix.
layout
'fa' (`ForceAtlas2`) or any valid `igraph layout
<http://igraph.org/c/doc/igraph-Layout.html>`__. Of particular interest
are 'fr' (Fruchterman Reingold), 'grid_fr' (Grid Fruchterman Reingold,
faster than 'fr'), 'kk' (Kamadi Kawai', slower than 'fr'), 'lgl' (Large
Graph, very fast), 'drl' (Distributed Recursive Layout, pretty fast) and
'rt' (Reingold Tilford tree layout).
root
Root for tree layouts.
random_state
For layouts with random initialization like 'fr', change this to use
different intial states for the optimization. If `None`, no seed is set.
adjacency
Sparse adjacency matrix of the graph, defaults to
`adata.uns['neighbors']['connectivities']`.
key_added_ext
By default, append `layout`.
proceed
Continue computation, starting off with 'X_draw_graph_`layout`'.
init_pos
`'paga'`/`True`, `None`/`False`, or any valid 2d-`.obsm` key.
Use precomputed coordinates for initialization.
If `False`/`None` (the default), initialize randomly.
copy
Return a copy instead of writing to adata.
**kwds
Parameters of chosen igraph layout. See e.g. `fruchterman-reingold`_
[Fruchterman91]_. One of the most important ones is `maxiter`.
.. _fruchterman-reingold: http://igraph.org/python/doc/igraph.Graph-class.html#layout_fruchterman_reingold
Returns
-------
Depending on `copy`, returns or updates `adata` with the following field.
**X_draw_graph_layout** : `adata.obsm`
Coordinates of graph layout. E.g. for layout='fa' (the default),
the field is called 'X_draw_graph_fa'
"""
start = logg.info(f"drawing single-cell graph using layout {layout!r}")
if layout not in _LAYOUTS:
raise ValueError(f"Provide a valid layout, one of {_LAYOUTS}.")
adata = adata.copy() if copy else adata
if adjacency is None and "neighbors" not in adata.uns:
raise ValueError(
"You need to run `pp.neighbors` first to compute a neighborhood graph."
)
if adjacency is None:
adjacency = adata.uns["neighbors"]["connectivities"]
# init coordinates
if init_pos in adata.obsm.keys():
init_coords = adata.obsm[init_pos]
elif init_pos == "paga" or init_pos:
init_coords = get_init_pos_from_paga(
adata, adjacency, random_state=random_state
)
else:
np.random.seed(random_state)
init_coords = np.random.random((adjacency.shape[0], 2))
# see whether fa2 is installed
if layout == "fa":
try:
from fa2 import ForceAtlas2
except ImportError:
logg.warning(
"Package 'fa2' is not installed, falling back to layout 'fr'."
"To use the faster and better ForceAtlas2 layout, "
"install package 'fa2' (`pip install fa2`)."
)
layout = "fr"
# actual drawing
if layout == "fa":
forceatlas2 = ForceAtlas2(
# Behavior alternatives
outboundAttractionDistribution=False, # Dissuade hubs
linLogMode=False, # NOT IMPLEMENTED
adjustSizes=False, # Prevent overlap (NOT IMPLEMENTED)
edgeWeightInfluence=1.0,
# Performance
jitterTolerance=1.0, # Tolerance
barnesHutOptimize=True,
barnesHutTheta=1.2,
multiThreaded=False, # NOT IMPLEMENTED
# Tuning
scalingRatio=2.0,
strongGravityMode=False,
gravity=1.0,
# Log
verbose=False,
)
if "maxiter" in kwds:
iterations = kwds["maxiter"]
elif "iterations" in kwds:
iterations = kwds["iterations"]
else:
iterations = 500
positions = forceatlas2.forceatlas2(
adjacency, pos=init_coords, iterations=iterations
)
positions = np.array(positions)
else:
g = _utils.get_igraph_from_adjacency(adjacency)
if layout in {"fr", "drl", "kk", "grid_fr"}:
ig_layout = g.layout(layout, seed=init_coords.tolist(), **kwds)
elif "rt" in layout:
if root is not None:
root = [root]
ig_layout = g.layout(layout, root=root, **kwds)
else:
ig_layout = g.layout(layout, **kwds)
positions = np.array(ig_layout.coords)
adata.uns["draw_graph"] = {}
adata.uns["draw_graph"]["params"] = dict(layout=layout, random_state=random_state)
key_added = f"X_draw_graph_{key_added_ext or layout}"
adata.obsm[key_added] = positions
logg.info(
" finished",
time=start,
deep=f"added\n {key_added!r}, graph_drawing coordinates (adata.obsm)",
)
return adata if copy else None
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def leiden(
adata: AnnData,
resolution: float = 1,
*,
restrict_to: Optional[Tuple[str, Sequence[str]]] = None,
random_state: _utils.AnyRandom = 0,
key_added: str = "leiden",
adjacency: Optional[sparse.spmatrix] = None,
directed: bool = True,
use_weights: bool = True,
n_iterations: int = -1,
partition_type: Optional[Type[MutableVertexPartition]] = None,
copy: bool = False,
**partition_kwargs,
) -> Optional[AnnData]:
"""\
Cluster cells into subgroups [Traag18]_.
Cluster cells using the Leiden algorithm [Traag18]_,
an improved version of the Louvain algorithm [Blondel08]_.
It has been proposed for single-cell analysis by [Levine15]_.
This requires having ran :func:`~scanpy.pp.neighbors` or
:func:`~scanpy.external.pp.bbknn` first.
Parameters
----------
adata
The annotated data matrix.
resolution
A parameter value controlling the coarseness of the clustering.
Higher values lead to more clusters.
Set to `None` if overriding `partition_type`
to one that doesn’t accept a `resolution_parameter`.
random_state
Change the initialization of the optimization.
restrict_to
Restrict the clustering to the categories within the key for sample
annotation, tuple needs to contain `(obs_key, list_of_categories)`.
key_added
`adata.obs` key under which to add the cluster labels.
adjacency
Sparse adjacency matrix of the graph, defaults to
`adata.uns['neighbors']['connectivities']`.
directed
Whether to treat the graph as directed or undirected.
use_weights
If `True`, edge weights from the graph are used in the computation
(placing more emphasis on stronger edges).
n_iterations
How many iterations of the Leiden clustering algorithm to perform.
Positive values above 2 define the total number of iterations to perform,
-1 has the algorithm run until it reaches its optimal clustering.
partition_type
Type of partition to use.
Defaults to :class:`~leidenalg.RBConfigurationVertexPartition`.
For the available options, consult the documentation for
:func:`~leidenalg.find_partition`.
copy
Whether to copy `adata` or modify it inplace.
**partition_kwargs
Any further arguments to pass to `~leidenalg.find_partition`
(which in turn passes arguments to the `partition_type`).
Returns
-------
`adata.obs[key_added]`
Array of dim (number of samples) that stores the subgroup id
(`'0'`, `'1'`, ...) for each cell.
`adata.uns['leiden']['params']`
A dict with the values for the parameters `resolution`, `random_state`,
and `n_iterations`.
"""
try:
import leidenalg
except ImportError:
raise ImportError(
"Please install the leiden algorithm: `conda install -c conda-forge leidenalg` or `pip3 install leidenalg`."
)
partition_kwargs = dict(partition_kwargs)
start = logg.info("running Leiden clustering")
adata = adata.copy() if copy else adata
# are we clustering a user-provided graph or the default AnnData one?
if adjacency is None:
if "neighbors" not in adata.uns:
raise ValueError(
"You need to run `pp.neighbors` first to compute a neighborhood graph."
)
adjacency = adata.uns["neighbors"]["connectivities"]
if restrict_to is not None:
restrict_key, restrict_categories = restrict_to
adjacency, restrict_indices = restrict_adjacency(
adata,
restrict_key,
restrict_categories,
adjacency,
)
# convert it to igraph
g = _utils.get_igraph_from_adjacency(adjacency, directed=directed)
# flip to the default partition type if not overriden by the user
if partition_type is None:
partition_type = leidenalg.RBConfigurationVertexPartition
# Prepare find_partition arguments as a dictionary,
# appending to whatever the user provided. It needs to be this way
# as this allows for the accounting of a None resolution
# (in the case of a partition variant that doesn't take it on input)
if use_weights:
partition_kwargs["weights"] = np.array(g.es["weight"]).astype(np.float64)
partition_kwargs["n_iterations"] = n_iterations
partition_kwargs["seed"] = random_state
if resolution is not None:
partition_kwargs["resolution_parameter"] = resolution
# clustering proper
part = leidenalg.find_partition(g, partition_type, **partition_kwargs)
# store output into adata.obs
groups = np.array(part.membership)
if restrict_to is not None:
if key_added == "leiden":
key_added += "_R"
groups = rename_groups(
adata,
key_added,
restrict_key,
restrict_categories,
restrict_indices,
groups,
)
adata.obs[key_added] = pd.Categorical(
values=groups.astype("U"),
categories=natsorted(map(str, np.unique(groups))),
)
# store information on the clustering parameters
adata.uns["leiden"] = {}
adata.uns["leiden"]["params"] = dict(
resolution=resolution,
random_state=random_state,
n_iterations=n_iterations,
)
logg.info(
" finished",
time=start,
deep=(
f"found {len(np.unique(groups))} clusters and added\n"
f" {key_added!r}, the cluster labels (adata.obs, categorical)"
),
)
return adata if copy else None
|
def leiden(
adata: AnnData,
resolution: float = 1,
*,
restrict_to: Optional[Tuple[str, Sequence[str]]] = None,
random_state: Optional[Union[int, RandomState]] = 0,
key_added: str = "leiden",
adjacency: Optional[sparse.spmatrix] = None,
directed: bool = True,
use_weights: bool = True,
n_iterations: int = -1,
partition_type: Optional[Type[MutableVertexPartition]] = None,
copy: bool = False,
**partition_kwargs,
) -> Optional[AnnData]:
"""\
Cluster cells into subgroups [Traag18]_.
Cluster cells using the Leiden algorithm [Traag18]_,
an improved version of the Louvain algorithm [Blondel08]_.
It has been proposed for single-cell analysis by [Levine15]_.
This requires having ran :func:`~scanpy.pp.neighbors` or
:func:`~scanpy.external.pp.bbknn` first.
Parameters
----------
adata
The annotated data matrix.
resolution
A parameter value controlling the coarseness of the clustering.
Higher values lead to more clusters.
Set to `None` if overriding `partition_type`
to one that doesn’t accept a `resolution_parameter`.
random_state
Change the initialization of the optimization.
restrict_to
Restrict the clustering to the categories within the key for sample
annotation, tuple needs to contain `(obs_key, list_of_categories)`.
key_added
`adata.obs` key under which to add the cluster labels.
adjacency
Sparse adjacency matrix of the graph, defaults to
`adata.uns['neighbors']['connectivities']`.
directed
Whether to treat the graph as directed or undirected.
use_weights
If `True`, edge weights from the graph are used in the computation
(placing more emphasis on stronger edges).
n_iterations
How many iterations of the Leiden clustering algorithm to perform.
Positive values above 2 define the total number of iterations to perform,
-1 has the algorithm run until it reaches its optimal clustering.
partition_type
Type of partition to use.
Defaults to :class:`~leidenalg.RBConfigurationVertexPartition`.
For the available options, consult the documentation for
:func:`~leidenalg.find_partition`.
copy
Whether to copy `adata` or modify it inplace.
**partition_kwargs
Any further arguments to pass to `~leidenalg.find_partition`
(which in turn passes arguments to the `partition_type`).
Returns
-------
`adata.obs[key_added]`
Array of dim (number of samples) that stores the subgroup id
(`'0'`, `'1'`, ...) for each cell.
`adata.uns['leiden']['params']`
A dict with the values for the parameters `resolution`, `random_state`,
and `n_iterations`.
"""
try:
import leidenalg
except ImportError:
raise ImportError(
"Please install the leiden algorithm: `conda install -c conda-forge leidenalg` or `pip3 install leidenalg`."
)
partition_kwargs = dict(partition_kwargs)
start = logg.info("running Leiden clustering")
adata = adata.copy() if copy else adata
# are we clustering a user-provided graph or the default AnnData one?
if adjacency is None:
if "neighbors" not in adata.uns:
raise ValueError(
"You need to run `pp.neighbors` first to compute a neighborhood graph."
)
adjacency = adata.uns["neighbors"]["connectivities"]
if restrict_to is not None:
restrict_key, restrict_categories = restrict_to
adjacency, restrict_indices = restrict_adjacency(
adata,
restrict_key,
restrict_categories,
adjacency,
)
# convert it to igraph
g = _utils.get_igraph_from_adjacency(adjacency, directed=directed)
# flip to the default partition type if not overriden by the user
if partition_type is None:
partition_type = leidenalg.RBConfigurationVertexPartition
# Prepare find_partition arguments as a dictionary,
# appending to whatever the user provided. It needs to be this way
# as this allows for the accounting of a None resolution
# (in the case of a partition variant that doesn't take it on input)
if use_weights:
partition_kwargs["weights"] = np.array(g.es["weight"]).astype(np.float64)
partition_kwargs["n_iterations"] = n_iterations
partition_kwargs["seed"] = random_state
if resolution is not None:
partition_kwargs["resolution_parameter"] = resolution
# clustering proper
part = leidenalg.find_partition(g, partition_type, **partition_kwargs)
# store output into adata.obs
groups = np.array(part.membership)
if restrict_to is not None:
if key_added == "leiden":
key_added += "_R"
groups = rename_groups(
adata,
key_added,
restrict_key,
restrict_categories,
restrict_indices,
groups,
)
adata.obs[key_added] = pd.Categorical(
values=groups.astype("U"),
categories=natsorted(map(str, np.unique(groups))),
)
# store information on the clustering parameters
adata.uns["leiden"] = {}
adata.uns["leiden"]["params"] = dict(
resolution=resolution,
random_state=random_state,
n_iterations=n_iterations,
)
logg.info(
" finished",
time=start,
deep=(
f"found {len(np.unique(groups))} clusters and added\n"
f" {key_added!r}, the cluster labels (adata.obs, categorical)"
),
)
return adata if copy else None
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def louvain(
adata: AnnData,
resolution: Optional[float] = None,
random_state: _utils.AnyRandom = 0,
restrict_to: Optional[Tuple[str, Sequence[str]]] = None,
key_added: str = "louvain",
adjacency: Optional[spmatrix] = None,
flavor: Literal["vtraag", "igraph", "rapids"] = "vtraag",
directed: bool = True,
use_weights: bool = False,
partition_type: Optional[Type[MutableVertexPartition]] = None,
partition_kwargs: Mapping[str, Any] = MappingProxyType({}),
copy: bool = False,
) -> Optional[AnnData]:
"""\
Cluster cells into subgroups [Blondel08]_ [Levine15]_ [Traag17]_.
Cluster cells using the Louvain algorithm [Blondel08]_ in the implementation
of [Traag17]_. The Louvain algorithm has been proposed for single-cell
analysis by [Levine15]_.
This requires having ran :func:`~scanpy.pp.neighbors` or
:func:`~scanpy.external.pp.bbknn` first,
or explicitly passing a ``adjacency`` matrix.
Parameters
----------
adata
The annotated data matrix.
resolution
For the default flavor (``'vtraag'``), you can provide a resolution
(higher resolution means finding more and smaller clusters),
which defaults to 1.0.
See “Time as a resolution parameter” in [Lambiotte09]_.
random_state
Change the initialization of the optimization.
restrict_to
Restrict the clustering to the categories within the key for sample
annotation, tuple needs to contain ``(obs_key, list_of_categories)``.
key_added
Key under which to add the cluster labels. (default: ``'louvain'``)
adjacency
Sparse adjacency matrix of the graph, defaults to
``adata.uns['neighbors']['connectivities']``.
flavor
Choose between to packages for computing the clustering.
``'vtraag'`` is much more powerful, and the default.
directed
Interpret the ``adjacency`` matrix as directed graph?
use_weights
Use weights from knn graph.
partition_type
Type of partition to use.
Only a valid argument if ``flavor`` is ``'vtraag'``.
partition_kwargs
Key word arguments to pass to partitioning,
if ``vtraag`` method is being used.
copy
Copy adata or modify it inplace.
Returns
-------
:obj:`None`
By default (``copy=False``), updates ``adata`` with the following fields:
``adata.obs['louvain']`` (:class:`pandas.Series`, dtype ``category``)
Array of dim (number of samples) that stores the subgroup id
(``'0'``, ``'1'``, ...) for each cell.
:class:`~anndata.AnnData`
When ``copy=True`` is set, a copy of ``adata`` with those fields is returned.
"""
partition_kwargs = dict(partition_kwargs)
start = logg.info("running Louvain clustering")
if (flavor != "vtraag") and (partition_type is not None):
raise ValueError(
'`partition_type` is only a valid argument when `flavour` is "vtraag"'
)
adata = adata.copy() if copy else adata
if adjacency is None and "neighbors" not in adata.uns:
raise ValueError(
"You need to run `pp.neighbors` first to compute a neighborhood graph."
)
if adjacency is None:
adjacency = adata.uns["neighbors"]["connectivities"]
if restrict_to is not None:
restrict_key, restrict_categories = restrict_to
adjacency, restrict_indices = restrict_adjacency(
adata,
restrict_key,
restrict_categories,
adjacency,
)
if flavor in {"vtraag", "igraph"}:
if flavor == "igraph" and resolution is not None:
logg.warning('`resolution` parameter has no effect for flavor "igraph"')
if directed and flavor == "igraph":
directed = False
if not directed:
logg.debug(" using the undirected graph")
g = _utils.get_igraph_from_adjacency(adjacency, directed=directed)
if use_weights:
weights = np.array(g.es["weight"]).astype(np.float64)
else:
weights = None
if flavor == "vtraag":
import louvain
if partition_type is None:
partition_type = louvain.RBConfigurationVertexPartition
if resolution is not None:
partition_kwargs["resolution_parameter"] = resolution
if use_weights:
partition_kwargs["weights"] = weights
logg.info(' using the "louvain" package of Traag (2017)')
louvain.set_rng_seed(random_state)
part = louvain.find_partition(
g,
partition_type,
**partition_kwargs,
)
# adata.uns['louvain_quality'] = part.quality()
else:
part = g.community_multilevel(weights=weights)
groups = np.array(part.membership)
elif flavor == "rapids":
# nvLouvain only works with undirected graphs,
# and `adjacency` must have a directed edge in both directions
import cudf
import cugraph
offsets = cudf.Series(adjacency.indptr)
indices = cudf.Series(adjacency.indices)
if use_weights:
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
weights = cudf.Series(weights)
else:
weights = None
g = cugraph.Graph()
g.add_adj_list(offsets, indices, weights)
logg.info(' using the "louvain" package of rapids')
louvain_parts, _ = cugraph.nvLouvain(g)
groups = (
louvain_parts.to_pandas()
.sort_values("vertex")[["partition"]]
.to_numpy()
.ravel()
)
elif flavor == "taynaud":
# this is deprecated
import networkx as nx
import community
g = nx.Graph(adjacency)
partition = community.best_partition(g)
groups = np.zeros(len(partition), dtype=int)
for k, v in partition.items():
groups[k] = v
else:
raise ValueError('`flavor` needs to be "vtraag" or "igraph" or "taynaud".')
if restrict_to is not None:
if key_added == "louvain":
key_added += "_R"
groups = rename_groups(
adata,
key_added,
restrict_key,
restrict_categories,
restrict_indices,
groups,
)
adata.obs[key_added] = pd.Categorical(
values=groups.astype("U"),
categories=natsorted(map(str, np.unique(groups))),
)
adata.uns["louvain"] = {}
adata.uns["louvain"]["params"] = dict(
resolution=resolution,
random_state=random_state,
)
logg.info(
" finished",
time=start,
deep=(
f"found {len(np.unique(groups))} clusters and added\n"
f" {key_added!r}, the cluster labels (adata.obs, categorical)"
),
)
return adata if copy else None
|
def louvain(
adata: AnnData,
resolution: Optional[float] = None,
random_state: Optional[Union[int, RandomState]] = 0,
restrict_to: Optional[Tuple[str, Sequence[str]]] = None,
key_added: str = "louvain",
adjacency: Optional[spmatrix] = None,
flavor: Literal["vtraag", "igraph", "rapids"] = "vtraag",
directed: bool = True,
use_weights: bool = False,
partition_type: Optional[Type[MutableVertexPartition]] = None,
partition_kwargs: Mapping[str, Any] = MappingProxyType({}),
copy: bool = False,
) -> Optional[AnnData]:
"""\
Cluster cells into subgroups [Blondel08]_ [Levine15]_ [Traag17]_.
Cluster cells using the Louvain algorithm [Blondel08]_ in the implementation
of [Traag17]_. The Louvain algorithm has been proposed for single-cell
analysis by [Levine15]_.
This requires having ran :func:`~scanpy.pp.neighbors` or
:func:`~scanpy.external.pp.bbknn` first,
or explicitly passing a ``adjacency`` matrix.
Parameters
----------
adata
The annotated data matrix.
resolution
For the default flavor (``'vtraag'``), you can provide a resolution
(higher resolution means finding more and smaller clusters),
which defaults to 1.0.
See “Time as a resolution parameter” in [Lambiotte09]_.
random_state
Change the initialization of the optimization.
restrict_to
Restrict the clustering to the categories within the key for sample
annotation, tuple needs to contain ``(obs_key, list_of_categories)``.
key_added
Key under which to add the cluster labels. (default: ``'louvain'``)
adjacency
Sparse adjacency matrix of the graph, defaults to
``adata.uns['neighbors']['connectivities']``.
flavor
Choose between to packages for computing the clustering.
``'vtraag'`` is much more powerful, and the default.
directed
Interpret the ``adjacency`` matrix as directed graph?
use_weights
Use weights from knn graph.
partition_type
Type of partition to use.
Only a valid argument if ``flavor`` is ``'vtraag'``.
partition_kwargs
Key word arguments to pass to partitioning,
if ``vtraag`` method is being used.
copy
Copy adata or modify it inplace.
Returns
-------
:obj:`None`
By default (``copy=False``), updates ``adata`` with the following fields:
``adata.obs['louvain']`` (:class:`pandas.Series`, dtype ``category``)
Array of dim (number of samples) that stores the subgroup id
(``'0'``, ``'1'``, ...) for each cell.
:class:`~anndata.AnnData`
When ``copy=True`` is set, a copy of ``adata`` with those fields is returned.
"""
partition_kwargs = dict(partition_kwargs)
start = logg.info("running Louvain clustering")
if (flavor != "vtraag") and (partition_type is not None):
raise ValueError(
'`partition_type` is only a valid argument when `flavour` is "vtraag"'
)
adata = adata.copy() if copy else adata
if adjacency is None and "neighbors" not in adata.uns:
raise ValueError(
"You need to run `pp.neighbors` first to compute a neighborhood graph."
)
if adjacency is None:
adjacency = adata.uns["neighbors"]["connectivities"]
if restrict_to is not None:
restrict_key, restrict_categories = restrict_to
adjacency, restrict_indices = restrict_adjacency(
adata,
restrict_key,
restrict_categories,
adjacency,
)
if flavor in {"vtraag", "igraph"}:
if flavor == "igraph" and resolution is not None:
logg.warning('`resolution` parameter has no effect for flavor "igraph"')
if directed and flavor == "igraph":
directed = False
if not directed:
logg.debug(" using the undirected graph")
g = _utils.get_igraph_from_adjacency(adjacency, directed=directed)
if use_weights:
weights = np.array(g.es["weight"]).astype(np.float64)
else:
weights = None
if flavor == "vtraag":
import louvain
if partition_type is None:
partition_type = louvain.RBConfigurationVertexPartition
if resolution is not None:
partition_kwargs["resolution_parameter"] = resolution
if use_weights:
partition_kwargs["weights"] = weights
logg.info(' using the "louvain" package of Traag (2017)')
louvain.set_rng_seed(random_state)
part = louvain.find_partition(
g,
partition_type,
**partition_kwargs,
)
# adata.uns['louvain_quality'] = part.quality()
else:
part = g.community_multilevel(weights=weights)
groups = np.array(part.membership)
elif flavor == "rapids":
# nvLouvain only works with undirected graphs,
# and `adjacency` must have a directed edge in both directions
import cudf
import cugraph
offsets = cudf.Series(adjacency.indptr)
indices = cudf.Series(adjacency.indices)
if use_weights:
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
weights = cudf.Series(weights)
else:
weights = None
g = cugraph.Graph()
g.add_adj_list(offsets, indices, weights)
logg.info(' using the "louvain" package of rapids')
louvain_parts, _ = cugraph.nvLouvain(g)
groups = (
louvain_parts.to_pandas()
.sort_values("vertex")[["partition"]]
.to_numpy()
.ravel()
)
elif flavor == "taynaud":
# this is deprecated
import networkx as nx
import community
g = nx.Graph(adjacency)
partition = community.best_partition(g)
groups = np.zeros(len(partition), dtype=int)
for k, v in partition.items():
groups[k] = v
else:
raise ValueError('`flavor` needs to be "vtraag" or "igraph" or "taynaud".')
if restrict_to is not None:
if key_added == "louvain":
key_added += "_R"
groups = rename_groups(
adata,
key_added,
restrict_key,
restrict_categories,
restrict_indices,
groups,
)
adata.obs[key_added] = pd.Categorical(
values=groups.astype("U"),
categories=natsorted(map(str, np.unique(groups))),
)
adata.uns["louvain"] = {}
adata.uns["louvain"]["params"] = dict(
resolution=resolution,
random_state=random_state,
)
logg.info(
" finished",
time=start,
deep=(
f"found {len(np.unique(groups))} clusters and added\n"
f" {key_added!r}, the cluster labels (adata.obs, categorical)"
),
)
return adata if copy else None
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def score_genes(
adata: AnnData,
gene_list: Sequence[str],
ctrl_size: int = 50,
gene_pool: Optional[Sequence[str]] = None,
n_bins: int = 25,
score_name: str = "score",
random_state: AnyRandom = 0,
copy: bool = False,
use_raw: bool = None,
) -> Optional[AnnData]:
"""\
Score a set of genes [Satija15]_.
The score is the average expression of a set of genes subtracted with the
average expression of a reference set of genes. The reference set is
randomly sampled from the `gene_pool` for each binned expression value.
This reproduces the approach in Seurat [Satija15]_ and has been implemented
for Scanpy by Davide Cittaro.
Parameters
----------
adata
The annotated data matrix.
gene_list
The list of gene names used for score calculation.
ctrl_size
Number of reference genes to be sampled. If `len(gene_list)` is not too
low, you can set `ctrl_size=len(gene_list)`.
gene_pool
Genes for sampling the reference set. Default is all genes.
n_bins
Number of expression level bins for sampling.
score_name
Name of the field to be added in `.obs`.
random_state
The random seed for sampling.
copy
Copy `adata` or modify it inplace.
use_raw
Use `raw` attribute of `adata` if present.
.. versionchanged:: 1.4.5
Default value changed from `False` to `None`.
Returns
-------
Depending on `copy`, returns or updates `adata` with an additional field
`score_name`.
Examples
--------
See this `notebook <https://github.com/theislab/scanpy_usage/tree/master/180209_cell_cycle>`__.
"""
start = logg.info(f"computing score {score_name!r}")
adata = adata.copy() if copy else adata
if random_state is not None:
np.random.seed(random_state)
gene_list_in_var = []
var_names = adata.raw.var_names if use_raw else adata.var_names
genes_to_ignore = []
for gene in gene_list:
if gene in var_names:
gene_list_in_var.append(gene)
else:
genes_to_ignore.append(gene)
if len(genes_to_ignore) > 0:
logg.warning(f"genes are not in var_names and ignored: {genes_to_ignore}")
gene_list = set(gene_list_in_var[:])
if len(gene_list) == 0:
logg.warning("provided gene list has length 0, scores as 0")
adata.obs[score_name] = 0
return adata if copy else None
if gene_pool is None:
gene_pool = list(var_names)
else:
gene_pool = [x for x in gene_pool if x in var_names]
# Trying here to match the Seurat approach in scoring cells.
# Basically we need to compare genes against random genes in a matched
# interval of expression.
if use_raw is None:
use_raw = True if adata.raw is not None else False
_adata = adata.raw if use_raw else adata
_adata_subset = (
_adata[:, gene_pool] if len(gene_pool) < len(_adata.var_names) else _adata
)
if issparse(_adata_subset.X):
obs_avg = pd.Series(
np.array(_adata_subset.X.mean(axis=0)).flatten(), index=gene_pool
) # average expression of genes
else:
obs_avg = pd.Series(
np.nanmean(_adata_subset.X, axis=0), index=gene_pool
) # average expression of genes
obs_avg = obs_avg[
np.isfinite(obs_avg)
] # Sometimes (and I don't know how) missing data may be there, with nansfor
n_items = int(np.round(len(obs_avg) / (n_bins - 1)))
obs_cut = obs_avg.rank(method="min") // n_items
control_genes = set()
# now pick `ctrl_size` genes from every cut
for cut in np.unique(obs_cut.loc[gene_list]):
r_genes = np.array(obs_cut[obs_cut == cut].index)
np.random.shuffle(r_genes)
# uses full r_genes if ctrl_size > len(r_genes)
control_genes.update(set(r_genes[:ctrl_size]))
# To index, we need a list – indexing implies an order.
control_genes = list(control_genes - gene_list)
gene_list = list(gene_list)
X_list = _adata[:, gene_list].X
if issparse(X_list):
X_list = X_list.toarray()
X_control = _adata[:, control_genes].X
if issparse(X_control):
X_control = X_control.toarray()
X_control = np.nanmean(X_control, axis=1)
if len(gene_list) == 0:
# We shouldn't even get here, but just in case
logg.hint(f"could not add \n {score_name!r}, score of gene set (adata.obs)")
return adata if copy else None
elif len(gene_list) == 1:
if _adata[:, gene_list].X.ndim == 2:
vector = _adata[:, gene_list].X.toarray()[:, 0] # new anndata
else:
vector = _adata[:, gene_list].X # old anndata
score = vector - X_control
else:
score = np.nanmean(X_list, axis=1) - X_control
adata.obs[score_name] = pd.Series(np.array(score).ravel(), index=adata.obs_names)
logg.info(
" finished",
time=start,
deep=(f"added\n {score_name!r}, score of gene set (adata.obs)"),
)
return adata if copy else None
|
def score_genes(
adata: AnnData,
gene_list: Sequence[str],
ctrl_size: int = 50,
gene_pool: Optional[Sequence[str]] = None,
n_bins: int = 25,
score_name: str = "score",
random_state: Optional[Union[int, RandomState]] = 0,
copy: bool = False,
use_raw: bool = None,
) -> Optional[AnnData]:
"""\
Score a set of genes [Satija15]_.
The score is the average expression of a set of genes subtracted with the
average expression of a reference set of genes. The reference set is
randomly sampled from the `gene_pool` for each binned expression value.
This reproduces the approach in Seurat [Satija15]_ and has been implemented
for Scanpy by Davide Cittaro.
Parameters
----------
adata
The annotated data matrix.
gene_list
The list of gene names used for score calculation.
ctrl_size
Number of reference genes to be sampled. If `len(gene_list)` is not too
low, you can set `ctrl_size=len(gene_list)`.
gene_pool
Genes for sampling the reference set. Default is all genes.
n_bins
Number of expression level bins for sampling.
score_name
Name of the field to be added in `.obs`.
random_state
The random seed for sampling.
copy
Copy `adata` or modify it inplace.
use_raw
Use `raw` attribute of `adata` if present.
.. versionchanged:: 1.4.5
Default value changed from `False` to `None`.
Returns
-------
Depending on `copy`, returns or updates `adata` with an additional field
`score_name`.
Examples
--------
See this `notebook <https://github.com/theislab/scanpy_usage/tree/master/180209_cell_cycle>`__.
"""
start = logg.info(f"computing score {score_name!r}")
adata = adata.copy() if copy else adata
if random_state is not None:
np.random.seed(random_state)
gene_list_in_var = []
var_names = adata.raw.var_names if use_raw else adata.var_names
genes_to_ignore = []
for gene in gene_list:
if gene in var_names:
gene_list_in_var.append(gene)
else:
genes_to_ignore.append(gene)
if len(genes_to_ignore) > 0:
logg.warning(f"genes are not in var_names and ignored: {genes_to_ignore}")
gene_list = set(gene_list_in_var[:])
if len(gene_list) == 0:
logg.warning("provided gene list has length 0, scores as 0")
adata.obs[score_name] = 0
return adata if copy else None
if gene_pool is None:
gene_pool = list(var_names)
else:
gene_pool = [x for x in gene_pool if x in var_names]
# Trying here to match the Seurat approach in scoring cells.
# Basically we need to compare genes against random genes in a matched
# interval of expression.
if use_raw is None:
use_raw = True if adata.raw is not None else False
_adata = adata.raw if use_raw else adata
_adata_subset = (
_adata[:, gene_pool] if len(gene_pool) < len(_adata.var_names) else _adata
)
if issparse(_adata_subset.X):
obs_avg = pd.Series(
np.array(_adata_subset.X.mean(axis=0)).flatten(), index=gene_pool
) # average expression of genes
else:
obs_avg = pd.Series(
np.nanmean(_adata_subset.X, axis=0), index=gene_pool
) # average expression of genes
obs_avg = obs_avg[
np.isfinite(obs_avg)
] # Sometimes (and I don't know how) missing data may be there, with nansfor
n_items = int(np.round(len(obs_avg) / (n_bins - 1)))
obs_cut = obs_avg.rank(method="min") // n_items
control_genes = set()
# now pick `ctrl_size` genes from every cut
for cut in np.unique(obs_cut.loc[gene_list]):
r_genes = np.array(obs_cut[obs_cut == cut].index)
np.random.shuffle(r_genes)
# uses full r_genes if ctrl_size > len(r_genes)
control_genes.update(set(r_genes[:ctrl_size]))
# To index, we need a list – indexing implies an order.
control_genes = list(control_genes - gene_list)
gene_list = list(gene_list)
X_list = _adata[:, gene_list].X
if issparse(X_list):
X_list = X_list.toarray()
X_control = _adata[:, control_genes].X
if issparse(X_control):
X_control = X_control.toarray()
X_control = np.nanmean(X_control, axis=1)
if len(gene_list) == 0:
# We shouldn't even get here, but just in case
logg.hint(f"could not add \n {score_name!r}, score of gene set (adata.obs)")
return adata if copy else None
elif len(gene_list) == 1:
if _adata[:, gene_list].X.ndim == 2:
vector = _adata[:, gene_list].X.toarray()[:, 0] # new anndata
else:
vector = _adata[:, gene_list].X # old anndata
score = vector - X_control
else:
score = np.nanmean(X_list, axis=1) - X_control
adata.obs[score_name] = pd.Series(np.array(score).ravel(), index=adata.obs_names)
logg.info(
" finished",
time=start,
deep=(f"added\n {score_name!r}, score of gene set (adata.obs)"),
)
return adata if copy else None
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def tsne(
adata: AnnData,
n_pcs: Optional[int] = None,
use_rep: Optional[str] = None,
perplexity: Union[float, int] = 30,
early_exaggeration: Union[float, int] = 12,
learning_rate: Union[float, int] = 1000,
random_state: AnyRandom = 0,
use_fast_tsne: bool = True,
n_jobs: Optional[int] = None,
copy: bool = False,
) -> Optional[AnnData]:
"""\
t-SNE [Maaten08]_ [Amir13]_ [Pedregosa11]_.
t-distributed stochastic neighborhood embedding (tSNE) [Maaten08]_ has been
proposed for visualizating single-cell data by [Amir13]_. Here, by default,
we use the implementation of *scikit-learn* [Pedregosa11]_. You can achieve
a huge speedup and better convergence if you install `Multicore-tSNE
<https://github.com/DmitryUlyanov/Multicore-TSNE>`__ by [Ulyanov16]_, which
will be automatically detected by Scanpy.
Parameters
----------
adata
Annotated data matrix.
{doc_n_pcs}
{use_rep}
perplexity
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration
Controls how tight natural clusters in the original space are in the
embedded space and how much space will be between them. For larger
values, the space between natural clusters will be larger in the
embedded space. Again, the choice of this parameter is not very
critical. If the cost function increases during initial optimization,
the early exaggeration factor or the learning rate might be too high.
learning_rate
Note that the R-package "Rtsne" uses a default of 200.
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
random_state
Change this to use different intial states for the optimization.
If `None`, the initial state is not reproducible.
use_fast_tsne
Use the MulticoreTSNE package by D. Ulyanov if it is installed.
n_jobs
Number of jobs for parallel computation.
`None` means using :attr:`scanpy._settings.ScanpyConfig.n_jobs`.
copy
Return a copy instead of writing to `adata`.
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
**X_tsne** : `np.ndarray` (`adata.obs`, dtype `float`)
tSNE coordinates of data.
"""
start = logg.info("computing tSNE")
adata = adata.copy() if copy else adata
X = _choose_representation(adata, use_rep=use_rep, n_pcs=n_pcs)
# params for sklearn
params_sklearn = dict(
perplexity=perplexity,
random_state=random_state,
verbose=settings.verbosity > 3,
early_exaggeration=early_exaggeration,
learning_rate=learning_rate,
)
n_jobs = settings.n_jobs if n_jobs is None else n_jobs
# deal with different tSNE implementations
X_tsne = None
if n_jobs >= 1 and use_fast_tsne:
try:
from MulticoreTSNE import MulticoreTSNE as TSNE
tsne = TSNE(n_jobs=n_jobs, **params_sklearn)
logg.info(" using the 'MulticoreTSNE' package by Ulyanov (2017)")
# need to transform to float64 for MulticoreTSNE...
X_tsne = tsne.fit_transform(X.astype("float64"))
except ImportError:
logg.warning(
"Consider installing the package MulticoreTSNE "
"(https://github.com/DmitryUlyanov/Multicore-TSNE). "
"Even for n_jobs=1 this speeds up the computation considerably "
"and might yield better converged results."
)
if X_tsne is None:
from sklearn.manifold import TSNE
from . import _tsne_fix # fix by D. DeTomaso for sklearn < 0.19
# unfortunately, sklearn does not allow to set a minimum number
# of iterations for barnes-hut tSNE
tsne = TSNE(**params_sklearn)
logg.info(" using sklearn.manifold.TSNE with a fix by D. DeTomaso")
X_tsne = tsne.fit_transform(X)
# update AnnData instance
adata.obsm["X_tsne"] = X_tsne # annotate samples with tSNE coordinates
logg.info(
" finished",
time=start,
deep="added\n 'X_tsne', tSNE coordinates (adata.obsm)",
)
return adata if copy else None
|
def tsne(
adata: AnnData,
n_pcs: Optional[int] = None,
use_rep: Optional[str] = None,
perplexity: Union[float, int] = 30,
early_exaggeration: Union[float, int] = 12,
learning_rate: Union[float, int] = 1000,
random_state: Union[int, RandomState] = 0,
use_fast_tsne: bool = True,
n_jobs: Optional[int] = None,
copy: bool = False,
) -> Optional[AnnData]:
"""\
t-SNE [Maaten08]_ [Amir13]_ [Pedregosa11]_.
t-distributed stochastic neighborhood embedding (tSNE) [Maaten08]_ has been
proposed for visualizating single-cell data by [Amir13]_. Here, by default,
we use the implementation of *scikit-learn* [Pedregosa11]_. You can achieve
a huge speedup and better convergence if you install `Multicore-tSNE
<https://github.com/DmitryUlyanov/Multicore-TSNE>`__ by [Ulyanov16]_, which
will be automatically detected by Scanpy.
Parameters
----------
adata
Annotated data matrix.
{doc_n_pcs}
{use_rep}
perplexity
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration
Controls how tight natural clusters in the original space are in the
embedded space and how much space will be between them. For larger
values, the space between natural clusters will be larger in the
embedded space. Again, the choice of this parameter is not very
critical. If the cost function increases during initial optimization,
the early exaggeration factor or the learning rate might be too high.
learning_rate
Note that the R-package "Rtsne" uses a default of 200.
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
random_state
Change this to use different intial states for the optimization.
If `None`, the initial state is not reproducible.
use_fast_tsne
Use the MulticoreTSNE package by D. Ulyanov if it is installed.
n_jobs
Number of jobs for parallel computation.
`None` means using :attr:`scanpy._settings.ScanpyConfig.n_jobs`.
copy
Return a copy instead of writing to `adata`.
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
**X_tsne** : `np.ndarray` (`adata.obs`, dtype `float`)
tSNE coordinates of data.
"""
start = logg.info("computing tSNE")
adata = adata.copy() if copy else adata
X = _choose_representation(adata, use_rep=use_rep, n_pcs=n_pcs)
# params for sklearn
params_sklearn = dict(
perplexity=perplexity,
random_state=random_state,
verbose=settings.verbosity > 3,
early_exaggeration=early_exaggeration,
learning_rate=learning_rate,
)
n_jobs = settings.n_jobs if n_jobs is None else n_jobs
# deal with different tSNE implementations
X_tsne = None
if n_jobs >= 1 and use_fast_tsne:
try:
from MulticoreTSNE import MulticoreTSNE as TSNE
tsne = TSNE(n_jobs=n_jobs, **params_sklearn)
logg.info(" using the 'MulticoreTSNE' package by Ulyanov (2017)")
# need to transform to float64 for MulticoreTSNE...
X_tsne = tsne.fit_transform(X.astype("float64"))
except ImportError:
logg.warning(
"Consider installing the package MulticoreTSNE "
"(https://github.com/DmitryUlyanov/Multicore-TSNE). "
"Even for n_jobs=1 this speeds up the computation considerably "
"and might yield better converged results."
)
if X_tsne is None:
from sklearn.manifold import TSNE
from . import _tsne_fix # fix by D. DeTomaso for sklearn < 0.19
# unfortunately, sklearn does not allow to set a minimum number
# of iterations for barnes-hut tSNE
tsne = TSNE(**params_sklearn)
logg.info(" using sklearn.manifold.TSNE with a fix by D. DeTomaso")
X_tsne = tsne.fit_transform(X)
# update AnnData instance
adata.obsm["X_tsne"] = X_tsne # annotate samples with tSNE coordinates
logg.info(
" finished",
time=start,
deep="added\n 'X_tsne', tSNE coordinates (adata.obsm)",
)
return adata if copy else None
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def umap(
adata: AnnData,
min_dist: float = 0.5,
spread: float = 1.0,
n_components: int = 2,
maxiter: Optional[int] = None,
alpha: float = 1.0,
gamma: float = 1.0,
negative_sample_rate: int = 5,
init_pos: Union[_InitPos, np.ndarray, None] = "spectral",
random_state: AnyRandom = 0,
a: Optional[float] = None,
b: Optional[float] = None,
copy: bool = False,
method: Literal["umap", "rapids"] = "umap",
) -> Optional[AnnData]:
"""\
Embed the neighborhood graph using UMAP [McInnes18]_.
UMAP (Uniform Manifold Approximation and Projection) is a manifold learning
technique suitable for visualizing high-dimensional data. Besides tending to
be faster than tSNE, it optimizes the embedding such that it best reflects
the topology of the data, which we represent throughout Scanpy using a
neighborhood graph. tSNE, by contrast, optimizes the distribution of
nearest-neighbor distances in the embedding such that these best match the
distribution of distances in the high-dimensional space. We use the
implementation of `umap-learn <https://github.com/lmcinnes/umap>`__
[McInnes18]_. For a few comparisons of UMAP with tSNE, see this `preprint
<https://doi.org/10.1101/298430>`__.
Parameters
----------
adata
Annotated data matrix.
min_dist
The effective minimum distance between embedded points. Smaller values
will result in a more clustered/clumped embedding where nearby points on
the manifold are drawn closer together, while larger values will result
on a more even dispersal of points. The value should be set relative to
the ``spread`` value, which determines the scale at which embedded
points will be spread out. The default of in the `umap-learn` package is
0.1.
spread
The effective scale of embedded points. In combination with `min_dist`
this determines how clustered/clumped the embedded points are.
n_components
The number of dimensions of the embedding.
maxiter
The number of iterations (epochs) of the optimization. Called `n_epochs`
in the original UMAP.
alpha
The initial learning rate for the embedding optimization.
gamma
Weighting applied to negative samples in low dimensional embedding
optimization. Values higher than one will result in greater weight
being given to negative samples.
negative_sample_rate
The number of negative edge/1-simplex samples to use per positive
edge/1-simplex sample in optimizing the low dimensional embedding.
init_pos
How to initialize the low dimensional embedding. Called `init` in the
original UMAP. Options are:
* Any key for `adata.obsm`.
* 'paga': positions from :func:`~scanpy.pl.paga`.
* 'spectral': use a spectral embedding of the graph.
* 'random': assign initial embedding positions at random.
* A numpy array of initial embedding positions.
random_state
If `int`, `random_state` is the seed used by the random number generator;
If `RandomState` or `Generator`, `random_state` is the random number generator;
If `None`, the random number generator is the `RandomState` instance used
by `np.random`.
a
More specific parameters controlling the embedding. If `None` these
values are set automatically as determined by `min_dist` and
`spread`.
b
More specific parameters controlling the embedding. If `None` these
values are set automatically as determined by `min_dist` and
`spread`.
copy
Return a copy instead of writing to adata.
method
Use the original 'umap' implementation, or 'rapids' (experimental, GPU only)
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
**X_umap** : `adata.obsm` field
UMAP coordinates of data.
"""
adata = adata.copy() if copy else adata
if "neighbors" not in adata.uns:
raise ValueError(
"Did not find 'neighbors/connectivities'. Run `sc.pp.neighbors` first."
)
start = logg.info("computing UMAP")
if (
"params" not in adata.uns["neighbors"]
or adata.uns["neighbors"]["params"]["method"] != "umap"
):
logg.warning("neighbors/connectivities have not been computed using umap")
from umap.umap_ import find_ab_params, simplicial_set_embedding
if a is None or b is None:
a, b = find_ab_params(spread, min_dist)
else:
a = a
b = b
adata.uns["umap"] = {"params": {"a": a, "b": b}}
if isinstance(init_pos, str) and init_pos in adata.obsm.keys():
init_coords = adata.obsm[init_pos]
elif isinstance(init_pos, str) and init_pos == "paga":
init_coords = get_init_pos_from_paga(adata, random_state=random_state)
else:
init_coords = init_pos # Let umap handle it
if hasattr(init_coords, "dtype"):
init_coords = check_array(init_coords, dtype=np.float32, accept_sparse=False)
random_state = check_random_state(random_state)
neigh_params = adata.uns["neighbors"]["params"]
X = _choose_representation(
adata,
neigh_params.get("use_rep", None),
neigh_params.get("n_pcs", None),
silent=True,
)
if method == "umap":
# the data matrix X is really only used for determining the number of connected components
# for the init condition in the UMAP embedding
n_epochs = 0 if maxiter is None else maxiter
X_umap = simplicial_set_embedding(
X,
adata.uns["neighbors"]["connectivities"].tocoo(),
n_components,
alpha,
a,
b,
gamma,
negative_sample_rate,
n_epochs,
init_coords,
random_state,
neigh_params.get("metric", "euclidean"),
neigh_params.get("metric_kwds", {}),
verbose=settings.verbosity > 3,
)
elif method == "rapids":
metric = neigh_params.get("metric", "euclidean")
if metric != "euclidean":
raise ValueError(
f"`sc.pp.neighbors` was called with `metric` {metric!r}, "
"but umap `method` 'rapids' only supports the 'euclidean' metric."
)
from cuml import UMAP
n_neighbors = adata.uns["neighbors"]["params"]["n_neighbors"]
n_epochs = (
500 if maxiter is None else maxiter
) # 0 is not a valid value for rapids, unlike original umap
X_contiguous = np.ascontiguousarray(X, dtype=np.float32)
umap = UMAP(
n_neighbors=n_neighbors,
n_components=n_components,
n_epochs=n_epochs,
learning_rate=alpha,
init=init_pos,
min_dist=min_dist,
spread=spread,
negative_sample_rate=negative_sample_rate,
a=a,
b=b,
verbose=settings.verbosity > 3,
)
X_umap = umap.fit_transform(X_contiguous)
adata.obsm["X_umap"] = X_umap # annotate samples with UMAP coordinates
logg.info(
" finished",
time=start,
deep=("added\n 'X_umap', UMAP coordinates (adata.obsm)"),
)
return adata if copy else None
|
def umap(
adata: AnnData,
min_dist: float = 0.5,
spread: float = 1.0,
n_components: int = 2,
maxiter: Optional[int] = None,
alpha: float = 1.0,
gamma: float = 1.0,
negative_sample_rate: int = 5,
init_pos: Union[_InitPos, np.ndarray, None] = "spectral",
random_state: Optional[Union[int, RandomState]] = 0,
a: Optional[float] = None,
b: Optional[float] = None,
copy: bool = False,
method: Literal["umap", "rapids"] = "umap",
) -> Optional[AnnData]:
"""\
Embed the neighborhood graph using UMAP [McInnes18]_.
UMAP (Uniform Manifold Approximation and Projection) is a manifold learning
technique suitable for visualizing high-dimensional data. Besides tending to
be faster than tSNE, it optimizes the embedding such that it best reflects
the topology of the data, which we represent throughout Scanpy using a
neighborhood graph. tSNE, by contrast, optimizes the distribution of
nearest-neighbor distances in the embedding such that these best match the
distribution of distances in the high-dimensional space. We use the
implementation of `umap-learn <https://github.com/lmcinnes/umap>`__
[McInnes18]_. For a few comparisons of UMAP with tSNE, see this `preprint
<https://doi.org/10.1101/298430>`__.
Parameters
----------
adata
Annotated data matrix.
min_dist
The effective minimum distance between embedded points. Smaller values
will result in a more clustered/clumped embedding where nearby points on
the manifold are drawn closer together, while larger values will result
on a more even dispersal of points. The value should be set relative to
the ``spread`` value, which determines the scale at which embedded
points will be spread out. The default of in the `umap-learn` package is
0.1.
spread
The effective scale of embedded points. In combination with `min_dist`
this determines how clustered/clumped the embedded points are.
n_components
The number of dimensions of the embedding.
maxiter
The number of iterations (epochs) of the optimization. Called `n_epochs`
in the original UMAP.
alpha
The initial learning rate for the embedding optimization.
gamma
Weighting applied to negative samples in low dimensional embedding
optimization. Values higher than one will result in greater weight
being given to negative samples.
negative_sample_rate
The number of negative edge/1-simplex samples to use per positive
edge/1-simplex sample in optimizing the low dimensional embedding.
init_pos
How to initialize the low dimensional embedding. Called `init` in the
original UMAP. Options are:
* Any key for `adata.obsm`.
* 'paga': positions from :func:`~scanpy.pl.paga`.
* 'spectral': use a spectral embedding of the graph.
* 'random': assign initial embedding positions at random.
* A numpy array of initial embedding positions.
random_state
If `int`, `random_state` is the seed used by the random number generator;
If `RandomState`, `random_state` is the random number generator;
If `None`, the random number generator is the `RandomState` instance used
by `np.random`.
a
More specific parameters controlling the embedding. If `None` these
values are set automatically as determined by `min_dist` and
`spread`.
b
More specific parameters controlling the embedding. If `None` these
values are set automatically as determined by `min_dist` and
`spread`.
copy
Return a copy instead of writing to adata.
method
Use the original 'umap' implementation, or 'rapids' (experimental, GPU only)
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
**X_umap** : `adata.obsm` field
UMAP coordinates of data.
"""
adata = adata.copy() if copy else adata
if "neighbors" not in adata.uns:
raise ValueError(
"Did not find 'neighbors/connectivities'. Run `sc.pp.neighbors` first."
)
start = logg.info("computing UMAP")
if (
"params" not in adata.uns["neighbors"]
or adata.uns["neighbors"]["params"]["method"] != "umap"
):
logg.warning("neighbors/connectivities have not been computed using umap")
from umap.umap_ import find_ab_params, simplicial_set_embedding
if a is None or b is None:
a, b = find_ab_params(spread, min_dist)
else:
a = a
b = b
adata.uns["umap"] = {"params": {"a": a, "b": b}}
if isinstance(init_pos, str) and init_pos in adata.obsm.keys():
init_coords = adata.obsm[init_pos]
elif isinstance(init_pos, str) and init_pos == "paga":
init_coords = get_init_pos_from_paga(adata, random_state=random_state)
else:
init_coords = init_pos # Let umap handle it
if hasattr(init_coords, "dtype"):
init_coords = check_array(init_coords, dtype=np.float32, accept_sparse=False)
random_state = check_random_state(random_state)
neigh_params = adata.uns["neighbors"]["params"]
X = _choose_representation(
adata,
neigh_params.get("use_rep", None),
neigh_params.get("n_pcs", None),
silent=True,
)
if method == "umap":
# the data matrix X is really only used for determining the number of connected components
# for the init condition in the UMAP embedding
n_epochs = 0 if maxiter is None else maxiter
X_umap = simplicial_set_embedding(
X,
adata.uns["neighbors"]["connectivities"].tocoo(),
n_components,
alpha,
a,
b,
gamma,
negative_sample_rate,
n_epochs,
init_coords,
random_state,
neigh_params.get("metric", "euclidean"),
neigh_params.get("metric_kwds", {}),
verbose=settings.verbosity > 3,
)
elif method == "rapids":
metric = neigh_params.get("metric", "euclidean")
if metric != "euclidean":
raise ValueError(
f"`sc.pp.neighbors` was called with `metric` {metric!r}, "
"but umap `method` 'rapids' only supports the 'euclidean' metric."
)
from cuml import UMAP
n_neighbors = adata.uns["neighbors"]["params"]["n_neighbors"]
n_epochs = (
500 if maxiter is None else maxiter
) # 0 is not a valid value for rapids, unlike original umap
X_contiguous = np.ascontiguousarray(X, dtype=np.float32)
umap = UMAP(
n_neighbors=n_neighbors,
n_components=n_components,
n_epochs=n_epochs,
learning_rate=alpha,
init=init_pos,
min_dist=min_dist,
spread=spread,
negative_sample_rate=negative_sample_rate,
a=a,
b=b,
verbose=settings.verbosity > 3,
)
X_umap = umap.fit_transform(X_contiguous)
adata.obsm["X_umap"] = X_umap # annotate samples with UMAP coordinates
logg.info(
" finished",
time=start,
deep=("added\n 'X_umap', UMAP coordinates (adata.obsm)"),
)
return adata if copy else None
|
https://github.com/theislab/scanpy/issues/1057
|
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/anndata/_core/anndata.py:21: FutureWarning: pandas.core.index is deprecated and will be removed in a future version. The public classes are available in the top-level namespace.
from pandas.core.index import RangeIndex
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/setuptools_scm/git.py:68: UserWarning: "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest" is shallow and may cause errors
warnings.warn('"{}" is shallow and may cause errors'.format(wd.path))
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 361, in build
self.write(docnames, list(updated_docnames), method)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 535, in write
self._write_serial(sorted(docnames))
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 545, in _write_serial
self.write_doc(docname, doctree)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/contextlib.py", line 88, in __exit__
next(self.gen)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 219, in pending_warnings
memhandler.flushTo(logger)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 184, in flushTo
logger.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1454, in handle
self.callHandlers(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 1516, in callHandlers
hdlr.handle(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 861, in handle
rv = self.filter(record)
File "/home/docs/.pyenv/versions/3.6.8/lib/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/envs/latest/lib/python3.6/site-packages/sphinx/util/logging.py", line 404, in filter
raise SphinxWarning(location + ":" + message)
sphinx.errors.SphinxWarning: /home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
Warning, treated as error:
/home/docs/checkouts/readthedocs.org/user_builds/icb-scanpy/checkouts/latest/scanpy/preprocessing/_simple.py:docstring of scanpy.pp.downsample_counts:17:py:class reference target not found: numpy.random.RandomState
|
sphinx.errors.SphinxWarning
|
def enrich(
container: Iterable[str],
*,
org: str = "hsapiens",
gprofiler_kwargs: Mapping[str, Any] = MappingProxyType({}),
) -> pd.DataFrame:
"""\
Get enrichment for DE results.
This is a thin convenience wrapper around the very useful gprofiler_.
This method dispatches on the first argument, leading to the following two
signatures::
enrich(container, ...)
enrich(adata: AnnData, group, key: str, ...)
Where::
enrich(adata, group, key, ...) = enrich(adata.uns[key]["names"][group], ...)
.. _gprofiler: https://pypi.org/project/gprofiler-official/#description
Parameters
----------
container
Contains genes you'd like to search.
adata
AnnData object whose group will be looked for.
group
The group whose genes should be used for enrichment.
key
Key in `uns` to find group under.
{doc_org}
gprofiler_kwargs
Keyword arguments to pass to `GProfiler.profile`, see gprofiler_.
**kwargs
All other keyword arguments are passed to `sc.get.rank_genes_groups_df`. E.g.
pval_cutoff, log2fc_min.
Returns
-------
Dataframe of enrichment results.
Examples
--------
Using `sc.queries.enrich` on a list of genes:
>>> import scanpy as sc
>>> sc.queries.enrich(['Klf4', 'Pax5', 'Sox2', 'Nanog'], org="hsapiens")
Using `sc.queries.enrich` on an :class:`anndata.AnnData` object:
>>> pbmcs = sc.datasets.pbmc68k_reduced()
>>> sc.tl.rank_genes_groups(pbmcs, "bulk_labels")
>>> sc.queries.enrich(pbmcs, "CD34+")
"""
try:
from gprofiler import GProfiler
except ImportError:
raise ImportError(
"This method requires the `gprofiler-official` module to be installed."
)
gprofiler = GProfiler(user_agent="scanpy", return_dataframe=True)
gprofiler_kwargs = dict(gprofiler_kwargs)
for k in ["organism"]:
if gprofiler_kwargs.get(k) is not None:
raise ValueError(
f"Argument `{k}` should be passed directly through `enrich`, "
"not through `gprofiler_kwargs`"
)
return gprofiler.profile(list(container), organism=org, **gprofiler_kwargs)
|
def enrich(
container: Iterable[str],
*,
org: str = "hsapiens",
gprofiler_kwargs: Mapping[str, Any] = MappingProxyType({}),
) -> pd.DataFrame:
"""\
Get enrichment for DE results.
This is a thin convenience wrapper around the very useful gprofiler_.
This method dispatches on the first argument, leading to the following two
signatures::
enrich(container, ...)
enrich(adata: AnnData, group, key: str, ...)
Where::
enrich(adata, group, key, ...) = enrich(adata.uns[key]["names"][group], ...)
.. _gprofiler: https://pypi.org/project/gprofiler-official/#description
Parameters
----------
container
Contains genes you'd like to search.
adata
AnnData object whose group will be looked for.
group
The group whose genes should be used for enrichment.
key
Key in `uns` to find group under.
{doc_org}
gprofiler_kwargs
Keyword arguments to pass to `GProfiler.profile`, see gprofiler_.
Returns
-------
Dataframe of enrichment results.
Examples
--------
Using `sc.queries.enrich` on a list of genes:
>>> import scanpy as sc
>>> sc.queries.enrich(['Klf4', 'Pax5', 'Sox2', 'Nanog'], org="hsapiens")
Using `sc.queries.enrich` on an :class:`anndata.AnnData` object:
>>> pbmcs = sc.datasets.pbmc68k_reduced()
>>> sc.tl.rank_genes_groups(pbmcs, "bulk_labels")
>>> sc.queries.enrich(pbmcs, "CD34+")
"""
try:
from gprofiler import GProfiler
except ImportError:
raise ImportError(
"This method requires the `gprofiler-official` module to be installed."
)
gprofiler = GProfiler(user_agent="scanpy", return_dataframe=True)
gprofiler_kwargs = dict(gprofiler_kwargs)
for k in ["organism"]:
if gprofiler_kwargs.get(k) is not None:
raise ValueError(
f"Argument `{k}` should be passed directly through `enrich`, "
"not through `gprofiler_kwargs`"
)
return gprofiler.profile(list(container), organism=org, **gprofiler_kwargs)
|
https://github.com/theislab/scanpy/issues/1043
|
AssertionError: query failed with error 500
|
AssertionError
|
def _enrich_anndata(
adata: AnnData,
group: str,
*,
org: Optional[str] = "hsapiens",
key: str = "rank_genes_groups",
pval_cutoff: float = 0.05,
log2fc_min: Optional[float] = None,
log2fc_max: Optional[float] = None,
gene_symbols: Optional[str] = None,
gprofiler_kwargs: Mapping[str, Any] = MappingProxyType({}),
) -> pd.DataFrame:
de = rank_genes_groups_df(
adata,
group=group,
key=key,
pval_cutoff=pval_cutoff,
log2fc_min=log2fc_min,
log2fc_max=log2fc_max,
gene_symbols=gene_symbols,
)
if gene_symbols is not None:
gene_list = list(de[gene_symbols].dropna())
else:
gene_list = list(de["names"].dropna())
return enrich(gene_list, org=org, gprofiler_kwargs=gprofiler_kwargs)
|
def _enrich_anndata(
adata: AnnData,
group: str,
*,
org: Optional[str] = "hsapiens",
key: str = "rank_genes_groups",
pval_cutoff: float = 0.05,
log2fc_min: Optional[float] = None,
log2fc_max: Optional[float] = None,
gene_symbols: Optional[str] = None,
gprofiler_kwargs: Mapping[str, Any] = MappingProxyType({}),
) -> pd.DataFrame:
de = rank_genes_groups_df(
adata,
group=group,
key=key,
pval_cutoff=pval_cutoff,
log2fc_min=log2fc_min,
log2fc_max=log2fc_max,
gene_symbols=gene_symbols,
)
if gene_symbols is not None:
gene_list = list(de[gene_symbols])
else:
gene_list = list(de["names"])
return enrich(gene_list, org=org, gprofiler_kwargs=gprofiler_kwargs)
|
https://github.com/theislab/scanpy/issues/1043
|
AssertionError: query failed with error 500
|
AssertionError
|
def top_segment_proportions_sparse_csr(data, indptr, ns):
# work around https://github.com/numba/numba/issues/5056
indptr = indptr.astype(np.int64)
ns = np.sort(ns)
maxidx = ns[-1]
sums = np.zeros((indptr.size - 1), dtype=data.dtype)
values = np.zeros((indptr.size - 1, len(ns)), dtype=np.float64)
# Just to keep it simple, as a dense matrix
partitioned = np.zeros((indptr.size - 1, maxidx), dtype=data.dtype)
for i in numba.prange(indptr.size - 1):
start, end = indptr[i], indptr[i + 1]
sums[i] = np.sum(data[start:end])
if end - start <= maxidx:
partitioned[i, : end - start] = data[start:end]
elif (end - start) > maxidx:
partitioned[i, :] = -(np.partition(-data[start:end], maxidx))[:maxidx]
partitioned[i, :] = np.partition(partitioned[i, :], maxidx - ns)
partitioned = partitioned[:, ::-1][:, : ns[-1]]
acc = np.zeros((indptr.size - 1), dtype=data.dtype)
prev = 0
# can’t use enumerate due to https://github.com/numba/numba/issues/2625
for j in range(ns.size):
acc += partitioned[:, prev : ns[j]].sum(axis=1)
values[:, j] = acc
prev = ns[j]
return values / sums.reshape((indptr.size - 1, 1))
|
def top_segment_proportions_sparse_csr(data, indptr, ns):
ns = np.sort(ns)
maxidx = ns[-1]
sums = np.zeros((indptr.size - 1), dtype=data.dtype)
values = np.zeros((indptr.size - 1, len(ns)), dtype=np.float64)
# Just to keep it simple, as a dense matrix
partitioned = np.zeros((indptr.size - 1, maxidx), dtype=data.dtype)
for i in numba.prange(indptr.size - 1):
start, end = indptr[i], indptr[i + 1]
sums[i] = np.sum(data[start:end])
if end - start <= maxidx:
partitioned[i, : end - start] = data[start:end]
elif (end - start) > maxidx:
partitioned[i, :] = -(np.partition(-data[start:end], maxidx))[:maxidx]
partitioned[i, :] = np.partition(partitioned[i, :], maxidx - ns)
partitioned = partitioned[:, ::-1][:, : ns[-1]]
acc = np.zeros((indptr.size - 1), dtype=data.dtype)
prev = 0
for j, n in enumerate(ns):
acc += partitioned[:, prev:n].sum(axis=1)
values[:, j] = acc
prev = n
return values / sums.reshape((indptr.size - 1, 1))
|
https://github.com/theislab/scanpy/issues/978
|
---------------------------------------------------------------------------
TypingError Traceback (most recent call last)
<ipython-input-5-0d8cf2779f18> in <module>
1 adata = sc.datasets.pbmc3k()
----> 2 sc.pp.calculate_qc_metrics(adata, inplace=True)
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py in calculate_qc_metrics(adata, expr_type, var_type, qc_vars, percent_top, layer, use_raw, inplace, parallel)
281 percent_top=percent_top,
282 inplace=inplace,
--> 283 X=X,
284 )
285 var_metrics = describe_var(
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py in describe_obs(adata, expr_type, var_type, qc_vars, percent_top, layer, use_raw, inplace, X, parallel)
107 if percent_top:
108 percent_top = sorted(percent_top)
--> 109 proportions = top_segment_proportions(X, percent_top)
110 for i, n in enumerate(percent_top):
111 obs_metrics[f"pct_{expr_type}_in_top_{n}_{var_type}"] = (
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py in top_segment_proportions(mtx, ns)
364 mtx = csr_matrix(mtx)
365 return top_segment_proportions_sparse_csr(
--> 366 mtx.data, mtx.indptr, np.array(ns, dtype=np.int)
367 )
368 else:
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
399 e.patch_message(msg)
400
--> 401 error_rewrite(e, 'typing')
402 except errors.UnsupportedError as e:
403 # Something unsupported is present in the user code, add help info
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/numba/dispatcher.py in error_rewrite(e, issue_type)
342 raise e
343 else:
--> 344 reraise(type(e), e, None)
345
346 argtypes = []
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/numba/six.py in reraise(tp, value, tb)
666 value = tp()
667 if value.__traceback__ is not tb:
--> 668 raise value.with_traceback(tb)
669 raise value
670
TypingError: Failed in nopython mode pipeline (step: nopython mode backend)
Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<intrinsic wrap_index>) with argument(s) of type(s): (int32, int64)
* parameterized
In definition 0:
ValueError: Argument types for wrap_index must match
raised from /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/numba/array_analysis.py:72
In definition 1:
ValueError: Argument types for wrap_index must match
raised from /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/numba/array_analysis.py:72
This error is usually caused by passing an argument of a type that is unsupported by the named function.
[1] During: resolving callee type: Function(<intrinsic wrap_index>)
[2] During: typing of call at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (399)
File "../../../../../packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py", line 399:
def top_segment_proportions_sparse_csr(data, indptr, ns):
<source elided>
start, end = indptr[i], indptr[i + 1]
sums[i] = np.sum(data[start:end])
^
[1] During: lowering "id=13[LoopNest(index_variable = parfor_index.271, range = (0, $100.6, 1))]{386: <ir.Block at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (403)>, 388: <ir.Block at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (404)>, 264: <ir.Block at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (401)>, 306: <ir.Block at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (402)>, 118: <ir.Block at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (397)>}Var(parfor_index.271, _qc.py:397)" at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (397)
This is not usually a problem with Numba itself but instead often caused by
the use of unsupported features or an issue in resolving types.
To see Python/NumPy features supported by the latest release of Numba visit:
http://numba.pydata.org/numba-doc/latest/reference/pysupported.html
and
http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html
For more information about typing errors and how to debug them visit:
http://numba.pydata.org/numba-doc/latest/user/troubleshoot.html#my-code-doesn-t-compile
If you think your code should work with Numba, please report the error message
and traceback, along with a minimal reproducer at:
https://github.com/numba/numba/issues/new
|
TypingError
|
def print_versions(*, file=None):
"""Print print versions of imported packages"""
if file is None:
sinfo(dependencies=True)
else:
stdout = sys.stdout
try:
sys.stdout = file
sinfo(dependencies=True)
finally:
sys.stdout = stdout
|
def print_versions():
"""\
Versions that might influence the numerical results.
Matplotlib and Seaborn are excluded from this.
"""
from ._settings import settings
modules = ["scanpy"] + _DEPENDENCIES_NUMERICS
print(
" ".join(f"{mod}=={ver}" for mod, ver in _versions_dependencies(modules)),
file=settings.logfile,
)
|
https://github.com/theislab/scanpy/issues/978
|
---------------------------------------------------------------------------
TypingError Traceback (most recent call last)
<ipython-input-5-0d8cf2779f18> in <module>
1 adata = sc.datasets.pbmc3k()
----> 2 sc.pp.calculate_qc_metrics(adata, inplace=True)
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py in calculate_qc_metrics(adata, expr_type, var_type, qc_vars, percent_top, layer, use_raw, inplace, parallel)
281 percent_top=percent_top,
282 inplace=inplace,
--> 283 X=X,
284 )
285 var_metrics = describe_var(
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py in describe_obs(adata, expr_type, var_type, qc_vars, percent_top, layer, use_raw, inplace, X, parallel)
107 if percent_top:
108 percent_top = sorted(percent_top)
--> 109 proportions = top_segment_proportions(X, percent_top)
110 for i, n in enumerate(percent_top):
111 obs_metrics[f"pct_{expr_type}_in_top_{n}_{var_type}"] = (
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py in top_segment_proportions(mtx, ns)
364 mtx = csr_matrix(mtx)
365 return top_segment_proportions_sparse_csr(
--> 366 mtx.data, mtx.indptr, np.array(ns, dtype=np.int)
367 )
368 else:
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
399 e.patch_message(msg)
400
--> 401 error_rewrite(e, 'typing')
402 except errors.UnsupportedError as e:
403 # Something unsupported is present in the user code, add help info
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/numba/dispatcher.py in error_rewrite(e, issue_type)
342 raise e
343 else:
--> 344 reraise(type(e), e, None)
345
346 argtypes = []
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/numba/six.py in reraise(tp, value, tb)
666 value = tp()
667 if value.__traceback__ is not tb:
--> 668 raise value.with_traceback(tb)
669 raise value
670
TypingError: Failed in nopython mode pipeline (step: nopython mode backend)
Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<intrinsic wrap_index>) with argument(s) of type(s): (int32, int64)
* parameterized
In definition 0:
ValueError: Argument types for wrap_index must match
raised from /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/numba/array_analysis.py:72
In definition 1:
ValueError: Argument types for wrap_index must match
raised from /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/numba/array_analysis.py:72
This error is usually caused by passing an argument of a type that is unsupported by the named function.
[1] During: resolving callee type: Function(<intrinsic wrap_index>)
[2] During: typing of call at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (399)
File "../../../../../packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py", line 399:
def top_segment_proportions_sparse_csr(data, indptr, ns):
<source elided>
start, end = indptr[i], indptr[i + 1]
sums[i] = np.sum(data[start:end])
^
[1] During: lowering "id=13[LoopNest(index_variable = parfor_index.271, range = (0, $100.6, 1))]{386: <ir.Block at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (403)>, 388: <ir.Block at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (404)>, 264: <ir.Block at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (401)>, 306: <ir.Block at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (402)>, 118: <ir.Block at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (397)>}Var(parfor_index.271, _qc.py:397)" at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (397)
This is not usually a problem with Numba itself but instead often caused by
the use of unsupported features or an issue in resolving types.
To see Python/NumPy features supported by the latest release of Numba visit:
http://numba.pydata.org/numba-doc/latest/reference/pysupported.html
and
http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html
For more information about typing errors and how to debug them visit:
http://numba.pydata.org/numba-doc/latest/user/troubleshoot.html#my-code-doesn-t-compile
If you think your code should work with Numba, please report the error message
and traceback, along with a minimal reproducer at:
https://github.com/numba/numba/issues/new
|
TypingError
|
def print_version_and_date(*, file=None):
"""\
Useful for starting a notebook so you see when you started working.
"""
from . import __version__
if file is None:
file = sys.stdout
print(
f"Running Scanpy {__version__}, on {datetime.now():%Y-%m-%d %H:%M}.",
file=file,
)
|
def print_version_and_date():
"""\
Useful for starting a notebook so you see when you started working.
"""
from . import __version__
from ._settings import settings
print(
f"Running Scanpy {__version__}, on {datetime.now():%Y-%m-%d %H:%M}.",
file=settings.logfile,
)
|
https://github.com/theislab/scanpy/issues/978
|
---------------------------------------------------------------------------
TypingError Traceback (most recent call last)
<ipython-input-5-0d8cf2779f18> in <module>
1 adata = sc.datasets.pbmc3k()
----> 2 sc.pp.calculate_qc_metrics(adata, inplace=True)
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py in calculate_qc_metrics(adata, expr_type, var_type, qc_vars, percent_top, layer, use_raw, inplace, parallel)
281 percent_top=percent_top,
282 inplace=inplace,
--> 283 X=X,
284 )
285 var_metrics = describe_var(
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py in describe_obs(adata, expr_type, var_type, qc_vars, percent_top, layer, use_raw, inplace, X, parallel)
107 if percent_top:
108 percent_top = sorted(percent_top)
--> 109 proportions = top_segment_proportions(X, percent_top)
110 for i, n in enumerate(percent_top):
111 obs_metrics[f"pct_{expr_type}_in_top_{n}_{var_type}"] = (
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py in top_segment_proportions(mtx, ns)
364 mtx = csr_matrix(mtx)
365 return top_segment_proportions_sparse_csr(
--> 366 mtx.data, mtx.indptr, np.array(ns, dtype=np.int)
367 )
368 else:
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
399 e.patch_message(msg)
400
--> 401 error_rewrite(e, 'typing')
402 except errors.UnsupportedError as e:
403 # Something unsupported is present in the user code, add help info
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/numba/dispatcher.py in error_rewrite(e, issue_type)
342 raise e
343 else:
--> 344 reraise(type(e), e, None)
345
346 argtypes = []
~/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/numba/six.py in reraise(tp, value, tb)
666 value = tp()
667 if value.__traceback__ is not tb:
--> 668 raise value.with_traceback(tb)
669 raise value
670
TypingError: Failed in nopython mode pipeline (step: nopython mode backend)
Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<intrinsic wrap_index>) with argument(s) of type(s): (int32, int64)
* parameterized
In definition 0:
ValueError: Argument types for wrap_index must match
raised from /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/numba/array_analysis.py:72
In definition 1:
ValueError: Argument types for wrap_index must match
raised from /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/numba/array_analysis.py:72
This error is usually caused by passing an argument of a type that is unsupported by the named function.
[1] During: resolving callee type: Function(<intrinsic wrap_index>)
[2] During: typing of call at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (399)
File "../../../../../packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py", line 399:
def top_segment_proportions_sparse_csr(data, indptr, ns):
<source elided>
start, end = indptr[i], indptr[i + 1]
sums[i] = np.sum(data[start:end])
^
[1] During: lowering "id=13[LoopNest(index_variable = parfor_index.271, range = (0, $100.6, 1))]{386: <ir.Block at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (403)>, 388: <ir.Block at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (404)>, 264: <ir.Block at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (401)>, 306: <ir.Block at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (402)>, 118: <ir.Block at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (397)>}Var(parfor_index.271, _qc.py:397)" at /home/gzhang/packages/anaconda3/envs/testscanpy145/lib/python3.6/site-packages/scanpy/preprocessing/_qc.py (397)
This is not usually a problem with Numba itself but instead often caused by
the use of unsupported features or an issue in resolving types.
To see Python/NumPy features supported by the latest release of Numba visit:
http://numba.pydata.org/numba-doc/latest/reference/pysupported.html
and
http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html
For more information about typing errors and how to debug them visit:
http://numba.pydata.org/numba-doc/latest/user/troubleshoot.html#my-code-doesn-t-compile
If you think your code should work with Numba, please report the error message
and traceback, along with a minimal reproducer at:
https://github.com/numba/numba/issues/new
|
TypingError
|
def train():
"""Training loop for awd language model."""
ntasgd = False
best_val = float("Inf")
start_train_time = time.time()
parameters = model.collect_params()
param_dict_avg = None
t = 0
avg_trigger = 0
n = 5
valid_losses = []
for epoch in range(args.epochs):
total_L = 0.0
start_epoch_time = time.time()
start_log_interval_time = time.time()
hiddens = [
model.begin_state(
args.batch_size // len(context), func=mx.nd.zeros, ctx=ctx
)
for ctx in context
]
batch_i, i = 0, 0
while i < len(train_data) - 1 - 1:
bptt = (
args.bptt if mx.nd.random.uniform().asscalar() < 0.95 else args.bptt / 2
)
seq_len = max(5, int(mx.nd.random.normal(bptt, 5).asscalar()))
lr_batch_start = trainer.learning_rate
trainer.set_learning_rate(lr_batch_start * seq_len / args.bptt)
data, target = get_batch(train_data, i, seq_len=seq_len)
data_list = gluon.utils.split_and_load(
data, context, batch_axis=1, even_split=True
)
target_list = gluon.utils.split_and_load(
target, context, batch_axis=1, even_split=True
)
hiddens = detach(hiddens)
Ls = []
with autograd.record():
for j, (X, y, h) in enumerate(zip(data_list, target_list, hiddens)):
output, h, encoder_hs, dropped_encoder_hs = model(X, h)
l = joint_loss(output, y, encoder_hs, dropped_encoder_hs)
Ls.append(l / (len(context) * X.size))
hiddens[j] = h
for L in Ls:
L.backward()
grads = [p.grad(d.context) for p in parameters.values() for d in data_list]
gluon.utils.clip_global_norm(grads, args.clip)
if args.ntasgd and ntasgd:
if param_dict_avg is None:
param_dict_avg = {
k.split(model._prefix)[1]: v.data(context[0]).copy()
for k, v in parameters.items()
}
trainer.step(1)
if args.ntasgd and ntasgd:
gamma = 1.0 / max(
1,
epoch * (len(train_data) // args.bptt) + batch_i - avg_trigger + 2,
)
for name, param_avg in param_dict_avg.items():
param_avg[:] += gamma * (
parameters["{}{}".format(model._prefix, name)].data(context[0])
- param_avg
)
total_L += sum([mx.nd.sum(L).asscalar() for L in Ls])
trainer.set_learning_rate(lr_batch_start)
if batch_i % args.log_interval == 0 and batch_i > 0:
cur_L = total_L / args.log_interval
print(
"[Epoch %d Batch %d/%d] current loss %.2f, ppl %.2f, "
"throughput %.2f samples/s, lr %.2f"
% (
epoch,
batch_i,
len(train_data) // args.bptt,
cur_L,
math.exp(cur_L),
args.batch_size
* args.log_interval
/ (time.time() - start_log_interval_time),
lr_batch_start * seq_len / args.bptt,
)
)
total_L = 0.0
start_log_interval_time = time.time()
i += seq_len
batch_i += 1
mx.nd.waitall()
print(
"[Epoch %d] throughput %.2f samples/s"
% (
epoch,
(args.batch_size * len(train_data)) / (time.time() - start_epoch_time),
)
)
if args.ntasgd and ntasgd:
mx.nd.save("{}.val.params".format(args.save), param_dict_avg)
else:
model.save_parameters("{}.val.params".format(args.save))
val_L = evaluate(
val_data, val_batch_size, "{}.val.params".format(args.save), context[0]
)
print(
"[Epoch %d] time cost %.2fs, valid loss %.2f, valid ppl %.2f, lr %.2f"
% (
epoch,
time.time() - start_epoch_time,
val_L,
math.exp(val_L),
trainer.learning_rate,
)
)
if args.ntasgd and avg_trigger == 0:
if t > n and val_L > min(valid_losses[-n:]):
if param_dict_avg is None:
param_dict_avg = {
k.split(model._prefix)[1]: v.data(context[0]).copy()
for k, v in parameters.items()
}
else:
for k, v in parameters.items():
param_dict_avg[k.split(model._prefix)[1]] = v.data(
context[0]
).copy()
avg_trigger = (
epoch * (len(train_data) // args.bptt)
+ len(train_data) // args.bptt
)
print("Switching to NTASGD and avg_trigger is : %d" % avg_trigger)
ntasgd = True
valid_losses.append(val_L)
t += 1
if val_L < best_val:
update_lr_epoch = 0
best_val = val_L
if args.ntasgd and ntasgd:
mx.nd.save(args.save, param_dict_avg)
else:
model.save_parameters(args.save)
test_L = evaluate(test_data, test_batch_size, args.save, context[0])
print(
"[Epoch %d] test loss %.2f, test ppl %.2f"
% (epoch, test_L, math.exp(test_L))
)
else:
update_lr_epoch += 1
if update_lr_epoch % args.lr_update_interval == 0 and update_lr_epoch != 0:
lr_scale = trainer.learning_rate * args.lr_update_factor
print("Learning rate after interval update %f" % lr_scale)
trainer.set_learning_rate(lr_scale)
update_lr_epoch = 0
print(
"Total training throughput %.2f samples/s"
% (
(args.batch_size * len(train_data) * args.epochs)
/ (time.time() - start_train_time)
)
)
|
def train():
"""Training loop for awd language model."""
ntasgd = False
best_val = float("Inf")
start_train_time = time.time()
parameters = model.collect_params()
param_dict_avg = None
t = 0
avg_trigger = 0
n = 5
valid_losses = []
for epoch in range(args.epochs):
total_L = 0.0
start_epoch_time = time.time()
start_log_interval_time = time.time()
hiddens = [
model.begin_state(
args.batch_size // len(context), func=mx.nd.zeros, ctx=ctx
)
for ctx in context
]
batch_i, i = 0, 0
while i < len(train_data) - 1 - 1:
bptt = (
args.bptt if mx.nd.random.uniform().asscalar() < 0.95 else args.bptt / 2
)
seq_len = max(5, int(mx.nd.random.normal(bptt, 5).asscalar()))
lr_batch_start = trainer.learning_rate
trainer.set_learning_rate(lr_batch_start * seq_len / args.bptt)
data, target = get_batch(train_data, i, seq_len=seq_len)
data_list = gluon.utils.split_and_load(
data, context, batch_axis=1, even_split=True
)
target_list = gluon.utils.split_and_load(
target, context, batch_axis=1, even_split=True
)
hiddens = detach(hiddens)
Ls = []
with autograd.record():
for j, (X, y, h) in enumerate(zip(data_list, target_list, hiddens)):
output, h, encoder_hs, dropped_encoder_hs = model(X, h)
l = joint_loss(output, y, encoder_hs, dropped_encoder_hs)
Ls.append(l / (len(context) * X.size))
hiddens[j] = h
for L in Ls:
L.backward()
grads = [p.grad(d.context) for p in parameters.values() for d in data_list]
gluon.utils.clip_global_norm(grads, args.clip)
if args.ntasgd and ntasgd:
if param_dict_avg is None:
param_dict_avg = {
k.split(model._prefix)[1]: v.data(context[0]).copy()
for k, v in parameters.items()
}
trainer.step(1)
if args.ntasgd and ntasgd:
gamma = 1.0 / max(
1,
epoch * (len(train_data) // args.bptt) + batch_i - avg_trigger + 2,
)
for name, param_avg in param_dict_avg.items():
param_avg[:] += gamma * (
parameters["{}{}".format(model._prefix, name)].data(context[0])
- param_avg
)
total_L += sum([mx.nd.sum(L).asscalar() for L in Ls])
trainer.set_learning_rate(lr_batch_start)
if batch_i % args.log_interval == 0 and batch_i > 0:
cur_L = total_L / args.log_interval
print(
"[Epoch %d Batch %d/%d] current loss %.2f, ppl %.2f, "
"throughput %.2f samples/s, lr %.2f"
% (
epoch,
batch_i,
len(train_data) // args.bptt,
cur_L,
math.exp(cur_L),
args.batch_size
* args.log_interval
/ (time.time() - start_log_interval_time),
lr_batch_start * seq_len / args.bptt,
)
)
total_L = 0.0
start_log_interval_time = time.time()
i += seq_len
batch_i += 1
mx.nd.waitall()
print(
"[Epoch %d] throughput %.2f samples/s"
% (
epoch,
(args.batch_size * len(train_data)) / (time.time() - start_epoch_time),
)
)
if args.ntasgd and ntasgd:
mx.nd.save("{}.val.params".format(args.save), param_dict_avg)
else:
model.save_parameters("{}.val.params".format(args.save))
val_L = evaluate(
val_data, val_batch_size, "{}.val.params".format(args.save), context[0]
)
print(
"[Epoch %d] time cost %.2fs, valid loss %.2f, valid ppl %.2f,lr %.2f"
% (
epoch,
time.time() - start_epoch_time,
val_L,
math.exp(val_L),
trainer.learning_rate,
)
)
if args.ntasgd and avg_trigger == 0:
if t > n and val_L > min(valid_losses[-n:]):
if param_dict_avg is None:
param_dict_avg = {
k.split(model._prefix)[1]: v.data(context[0]).copy()
for k, v in parameters.items()
}
else:
for k, v in parameters.items():
param_dict_avg[k.split(model._prefix)[1]] = v.data(
context[0]
).copy()
avg_trigger = (
epoch * (len(train_data) // args.bptt)
+ len(train_data) // args.bptt
)
print("Switching to NTASGD and avg_trigger is : %d" % avg_trigger)
ntasgd = True
valid_losses.append(val_L)
t += 1
if val_L < best_val:
update_lr_epoch = 0
best_val = val_L
if args.ntasgd and ntasgd:
mx.nd.save(args.save, param_dict_avg)
else:
model.save_parameters(args.save)
test_L = evaluate(test_data, test_batch_size, args.save, context[0])
print(
"[Epoch %d] test loss %.2f, test ppl %.2f"
% (epoch, test_L, math.exp(test_L))
)
else:
update_lr_epoch += 1
if update_lr_epoch % args.lr_update_interval == 0 and update_lr_epoch != 0:
lr_scale = trainer.learning_rate * args.lr_update_factor
print("Learning rate after interval update %f" % lr_scale)
trainer.set_learning_rate(lr_scale)
update_lr_epoch = 0
print(
"Total training throughput %.2f samples/s"
% (
(args.batch_size * len(train_data) * args.epochs)
/ (time.time() - start_train_time)
)
)
|
https://github.com/dmlc/gluon-nlp/issues/1055
|
Traceback (most recent call last):
File "word_language_model.py", line 468, in <module>
train()
File "word_language_model.py", line 426, in train
trainer.learning_rate))
UnicodeEncodeError: 'ascii' codec can't encode character '\uff0c' in position 62: ordinal not in range(128)
|
UnicodeEncodeError
|
def load_embedding_from_path(args):
"""Load a TokenEmbedding."""
if args.embedding_path.endswith(".bin"):
with utils.print_time("load fastText model."):
model = nlp.model.train.FasttextEmbeddingModel.load_fasttext_format(
args.embedding_path
)
idx_to_token = sorted(model._token_to_idx, key=model._token_to_idx.get)
# Analogy task is open-vocabulary, so must keep all known words.
# But if not evaluating analogy, no need to precompute now as all
# words for closed vocabulary task can be obtained via the unknown
# lookup
if not args.analogy_datasets:
# TODO(leezu): use shape (0, model.weight.shape[1]) once np shape
# is supported by TokenEmbedding
idx_to_token = ["<unk>"]
idx_to_vec = mx.nd.zeros((1, model.weight.shape[1]))
else:
if args.analogy_max_vocab_size:
idx_to_token = idx_to_token[: args.analogy_max_vocab_size]
with utils.print_time(
"compute vectors for {} known words.".format(len(idx_to_token))
):
idx_to_vec = model[idx_to_token]
embedding = nlp.embedding.TokenEmbedding(
unknown_token=None,
idx_to_token=idx_to_token,
idx_to_vec=idx_to_vec,
unknown_lookup=model,
)
else:
embedding = nlp.embedding.TokenEmbedding.from_file(args.embedding_path)
return embedding
|
def load_embedding_from_path(args):
"""Load a TokenEmbedding."""
if args.embedding_path.endswith(".bin"):
with utils.print_time("load fastText model."):
model = nlp.model.train.FasttextEmbeddingModel.load_fasttext_format(
args.embedding_path
)
idx_to_token = sorted(model._token_to_idx, key=model._token_to_idx.get)
embedding = nlp.embedding.TokenEmbedding(
unknown_token=None, unknown_lookup=model, allow_extend=True
)
# Analogy task is open-vocabulary, so must keep all known words.
# But if not evaluating analogy, no need to precompute now as all
# words for closed vocabulary task can be obtained via the unknown
# lookup
if not args.analogy_datasets:
idx_to_token = []
elif args.analogy_datasets and args.analogy_max_vocab_size:
idx_to_token = idx_to_token[: args.analogy_max_vocab_size]
embedding["<unk>"] = mx.nd.zeros(model.weight.shape[1])
if idx_to_token:
with utils.print_time(
"compute vectors for {} known words.".format(len(idx_to_token))
):
embedding[idx_to_token] = model[idx_to_token]
else:
embedding = nlp.embedding.TokenEmbedding.from_file(args.embedding_path)
return embedding
|
https://github.com/dmlc/gluon-nlp/issues/905
|
Traceback (most recent call last):
File "evaluate_pretrained.py", line 216, in <module>
vocab.set_embedding(token_embedding_)
File "/Users/siyuanl/private/gluon-nlp/src/gluonnlp/vocab/vocab.py", line 412, in set_embedding
new_idx_to_vec[1:, col_start:col_end] = embs[self._idx_to_token[1:]]
File "/Users/siyuanl/private/gluon-nlp/src/gluonnlp/embedding/token_embedding.py", line 637, in __getitem__
indices = [self._token_to_idx[token] for token in tokens]
File "/Users/siyuanl/private/gluon-nlp/src/gluonnlp/embedding/token_embedding.py", line 637, in <listcomp>
indices = [self._token_to_idx[token] for token in tokens]
KeyError: '<pad>'
|
KeyError
|
def enforce_max_size(token_embedding, size):
assert token_embedding.idx_to_vec is not None
if size and len(token_embedding.idx_to_token) > size:
assert size > 0
size = size + 1 if token_embedding.unknown_token is not None else size
token_embedding = nlp.embedding.TokenEmbedding(
unknown_token=token_embedding.unknown_token,
idx_to_token=token_embedding._idx_to_token[:size],
idx_to_vec=token_embedding._idx_to_vec[:size],
unknown_lookup=token_embedding.unknown_lookup,
)
return token_embedding
|
def enforce_max_size(token_embedding, size):
assert token_embedding.idx_to_vec is not None
if size and len(token_embedding.idx_to_token) > size:
assert size > 0
size = size + 1 if token_embedding.unknown_token is not None else size
token_embedding._idx_to_token = token_embedding._idx_to_token[:size]
token_embedding._idx_to_vec = token_embedding._idx_to_vec[:size]
token_embedding._token_to_idx = {
token: idx for idx, token in enumerate(token_embedding._idx_to_token)
}
|
https://github.com/dmlc/gluon-nlp/issues/905
|
Traceback (most recent call last):
File "evaluate_pretrained.py", line 216, in <module>
vocab.set_embedding(token_embedding_)
File "/Users/siyuanl/private/gluon-nlp/src/gluonnlp/vocab/vocab.py", line 412, in set_embedding
new_idx_to_vec[1:, col_start:col_end] = embs[self._idx_to_token[1:]]
File "/Users/siyuanl/private/gluon-nlp/src/gluonnlp/embedding/token_embedding.py", line 637, in __getitem__
indices = [self._token_to_idx[token] for token in tokens]
File "/Users/siyuanl/private/gluon-nlp/src/gluonnlp/embedding/token_embedding.py", line 637, in <listcomp>
indices = [self._token_to_idx[token] for token in tokens]
KeyError: '<pad>'
|
KeyError
|
def set_embedding(self, *embeddings):
"""Attaches one or more embeddings to the indexed text tokens.
Parameters
----------
embeddings : None or tuple of :class:`gluonnlp.embedding.TokenEmbedding` instances
The embedding to be attached to the indexed tokens. If a tuple of multiple embeddings
are provided, their embedding vectors will be concatenated for the same token.
"""
if len(embeddings) == 1 and embeddings[0] is None:
self._embedding = None
return
for embs in embeddings:
assert isinstance(embs, emb.TokenEmbedding), (
"The argument `embeddings` must be an instance or a list of instances of "
"`gluonnlp.embedding.TokenEmbedding`."
)
assert embs.idx_to_vec is not None, (
"For all specified `embeddings`, `embeddings.idx_to_vec` must be initialized. "
"Use eg. `emb[emb.unknown_token] = nd.zeros(emsize)` to initialize, "
"where `emsize` is the desired embedding dimensionality."
)
assert all([embs.unknown_token for embs in embeddings]) or all(
[not embs.unknown_token for embs in embeddings]
), "Either all or none of the TokenEmbeddings must have an unknown_token set."
new_vec_len = sum(embs.idx_to_vec.shape[1] for embs in embeddings)
# TODO(leezu): Remove once np shape is used by default
assert len(self), "Empty vocab not yet supported"
new_idx_to_vec = nd.zeros(shape=(len(self), new_vec_len))
col_start = 0
# Concatenate all the embedding vectors in embedding.
for embs in embeddings:
if embs and embs.idx_to_vec is not None:
col_end = col_start + embs.idx_to_vec.shape[1]
# Cancatenate vectors of the unknown token.
new_idx_to_vec[0, col_start:col_end] = embs.idx_to_vec[0]
new_idx_to_vec[1:, col_start:col_end] = embs[self._idx_to_token[1:]]
col_start = col_end
self._embedding = emb.TokenEmbedding(
self.unknown_token,
init_unknown_vec=None,
allow_extend=False,
idx_to_token=self.idx_to_token,
idx_to_vec=new_idx_to_vec,
)
|
def set_embedding(self, *embeddings):
"""Attaches one or more embeddings to the indexed text tokens.
Parameters
----------
embeddings : None or tuple of :class:`gluonnlp.embedding.TokenEmbedding` instances
The embedding to be attached to the indexed tokens. If a tuple of multiple embeddings
are provided, their embedding vectors will be concatenated for the same token.
"""
if len(embeddings) == 1 and embeddings[0] is None:
self._embedding = None
return
for embs in embeddings:
assert isinstance(embs, emb.TokenEmbedding), (
"The argument `embeddings` must be an instance or a list of instances of "
"`gluonnlp.embedding.TokenEmbedding`."
)
assert embs.idx_to_vec is not None, (
"For all specified `embeddings`, `embeddings.idx_to_vec` must be initialized. "
"Use eg. `emb[emb.unknown_token] = nd.zeros(emsize)` to initialize, "
"where `emsize` is the desired embedding dimensionality."
)
assert all([embs.unknown_token for embs in embeddings]) or all(
[not embs.unknown_token for embs in embeddings]
), "Either all or none of the TokenEmbeddings must have an unknown_token set."
new_vec_len = sum(embs.idx_to_vec.shape[1] for embs in embeddings)
new_idx_to_vec = nd.zeros(shape=(len(self), new_vec_len))
col_start = 0
# Concatenate all the embedding vectors in embedding.
for embs in embeddings:
if embs and embs.idx_to_vec is not None:
col_end = col_start + embs.idx_to_vec.shape[1]
# Cancatenate vectors of the unknown token.
new_idx_to_vec[0, col_start:col_end] = embs.idx_to_vec[0]
new_idx_to_vec[1:, col_start:col_end] = embs[self._idx_to_token[1:]]
col_start = col_end
self._embedding = emb.TokenEmbedding(
self.unknown_token,
init_unknown_vec=None,
allow_extend=False,
idx_to_token=self.idx_to_token,
idx_to_vec=new_idx_to_vec,
)
|
https://github.com/dmlc/gluon-nlp/issues/905
|
Traceback (most recent call last):
File "evaluate_pretrained.py", line 216, in <module>
vocab.set_embedding(token_embedding_)
File "/Users/siyuanl/private/gluon-nlp/src/gluonnlp/vocab/vocab.py", line 412, in set_embedding
new_idx_to_vec[1:, col_start:col_end] = embs[self._idx_to_token[1:]]
File "/Users/siyuanl/private/gluon-nlp/src/gluonnlp/embedding/token_embedding.py", line 637, in __getitem__
indices = [self._token_to_idx[token] for token in tokens]
File "/Users/siyuanl/private/gluon-nlp/src/gluonnlp/embedding/token_embedding.py", line 637, in <listcomp>
indices = [self._token_to_idx[token] for token in tokens]
KeyError: '<pad>'
|
KeyError
|
def _tokenize_mteval_13a(segment):
r"""
Tokenizes a string following the tokenizer in mteval-v13a.pl.
See https://github.com/moses-smt/mosesdecoder/"
"blob/master/scripts/generic/mteval-v14.pl#L917-L942
Parameters
----------
segment: str
A string to be tokenzied
Returns
-------
The tokenized string
"""
norm = segment.rstrip()
norm = norm.replace("<skipped>", "")
norm = norm.replace("-\n", "")
norm = norm.replace("\n", " ")
norm = norm.replace(""", '"')
norm = norm.replace("&", "&")
norm = norm.replace("<", "<")
norm = norm.replace(">", ">")
norm = " {} ".format(norm)
norm = re.sub(r"([\{-\~\[-\` -\&\(-\+\:-\@\/])", " \\1 ", norm)
norm = re.sub(r"([^0-9])([\.,])", "\\1 \\2 ", norm)
norm = re.sub(r"([\.,])([^0-9])", " \\1 \\2", norm)
norm = re.sub(r"([0-9])(-)", "\\1 \\2 ", norm)
norm = re.sub(r"\s+", " ", norm)
norm = re.sub(r"^\s+", "", norm)
norm = re.sub(r"\s+$", "", norm)
return norm
|
def _tokenize_mteval_13a(segment):
r"""
Tokenizes a string following the tokenizer in mteval-v13a.pl.
See https://github.com/moses-smt/mosesdecoder/"
"blob/master/scripts/generic/mteval-v14.pl#L917-L942
Parameters
----------
segment: str
A string to be tokenzied
Returns
-------
The tokenized string
"""
norm = segment.rstrip()
norm = norm.replace("<skipped>", "")
norm = norm.replace("-\n", "")
norm = norm.replace("\n", " ")
norm = norm.replace(""", '"')
norm = norm.replace("&", "&")
norm = norm.replace("<", "<")
norm = norm.replace(">", ">")
norm = " {} ".format(norm)
norm = re.sub(r"([\{-\~\[-\` -\&\(-\+\:-\@\/])", " \\1 ", norm)
norm = re.sub(r"([^0-9])([\.,])", "\\1 \\2 ", norm)
norm = re.sub(r"([\.,])([^0-9])", " \\1 \\2", norm)
norm = re.sub(r"([0-9])(-)", "\\1 \\2 ", norm)
norm = re.sub(r"\s+", " ", norm)
norm = re.sub(r"^\s+", "", norm)
norm = re.sub(r"\s+$", "", norm)
return norm
|
https://github.com/dmlc/gluon-nlp/issues/349
|
2018-09-26 05:38:28,761 - root - Namespace(average_checkpoint=False, average_start=5, batch_size=2700, beam_size=4, bleu='13a', bucket_ratio=0.0, bucket_scheme='exp', dataset='WMT2$
14BPE', dropout=0.1, epochs=30, epsilon=0.1, full=False, gpus='0,1,2,3,4,5,6,7', hidden_size=2048, log_interval=10, lp_alpha=0.6, lp_k=5, lr=2.0, magnitude=3.0, num_accumulated=16,
num_averages=5, num_buckets=20, num_heads=8, num_layers=6, num_units=512, optimizer='adam', save_dir='transformer_en_de_u512', scaled=True, src_lang='en', src_max_len=-1, test_batc$
_size=256, tgt_lang='de', tgt_max_len=-1, warmup_steps=4000.0)
All Logs will be saved to transformer_en_de_u512/train_transformer.log
Load cached data from /home/ubuntu/transformer/scripts/nmt/cached/WMT2014BPE_en_de_-1_-1_train.npz
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/numpy/lib/format.py", line 650, in read_array
array = pickle.load(fp, **pickle_kwargs)
UnicodeDecodeError: 'ascii' codec can't decode byte 0xfa in position 8: ordinal not in range(128)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "train_transformer.py", line 304, in <module>
= load_translation_data(dataset=args.dataset, src_lang=args.src_lang, tgt_lang=args.tgt_lang)
File "train_transformer.py", line 260, in load_translation_data
data_train_processed = load_cached_dataset(common_prefix + '_train')
File "train_transformer.py", line 167, in load_cached_dataset
return ArrayDataset(np.array(dat['src_data']), np.array(dat['tgt_data']))
File "/usr/local/lib/python3.5/dist-packages/numpy/lib/npyio.py", line 235, in __getitem__
pickle_kwargs=self.pickle_kwargs)
File "/usr/local/lib/python3.5/dist-packages/numpy/lib/format.py", line 656, in read_array
"to numpy.load" % (err,))
UnicodeError: Unpickling a python object failed: UnicodeDecodeError('ascii', b'\x07 \x00\x00ls\x00\x00\xfab\x00\x00k]\x00\x00`p\x00\x00fi\x00\x00\x03\x00\x00\x00', 8, 9, 'ordinal n$
t in range(128)')
You may need to pass the encoding= option to numpy.load
|
UnicodeDecodeError
|
def compute_bleu(
reference_corpus_list,
translation_corpus,
tokenized=True,
tokenizer="13a",
max_n=4,
smooth=False,
lower_case=False,
bpe=False,
split_compound_word=False,
):
r"""Compute bleu score of translation against references.
Parameters
----------
reference_corpus_list: list of list(list(str)) or list of list(str)
list of list(list(str)): tokenzied references
list of list(str): plain text
List of references for each translation.
translation_corpus: list(list(str)) or list(str)
list(list(str)): tokenzied translation
list(str): plain text
Translations to score.
tokenized: bool, default True
Whether the inputs has been tokenized.
tokenizer: str or None, default '13a'
'13a': follow the tokenizer in mteval-v13a.pl
'intl': follow the international tokenzier in mteval-v14.pl
None: identity mapping on the string.
This option is ignored if tokenized is True
max_n: int, default 4
Maximum n-gram order to use when computing BLEU score.
smooth: bool, default False
Whether or not to compute smoothed bleu score.
lower_case: bool, default False
Whether or not to use lower case of tokens
split_compound_word: bool, default False
Whether or not to split compound words
"rich-text format" --> rich ##AT##-##AT## text format.
bpe: bool, default False
Whether or not the inputs are in BPE format
Returns
-------
5-Tuple with the BLEU score, n-gram precisions, brevity penalty,
reference length, and translation length
"""
precision_numerators = [0 for _ in range(max_n)]
precision_denominators = [0 for _ in range(max_n)]
ref_length, trans_length = 0, 0
for references in reference_corpus_list:
assert len(references) == len(translation_corpus), (
"The number of translations and their references do not match"
)
if tokenized:
assert isinstance(reference_corpus_list[0][0], LIST_TYPES) and isinstance(
translation_corpus[0], LIST_TYPES
), (
"references and translation should have format of list of list(list(str)) "
"and list(list(str)), respectively, when toknized is True."
)
else:
assert isinstance(reference_corpus_list[0][0], six.string_types) and isinstance(
translation_corpus[0], six.string_types
), (
"references and translation should have format of list(list(str)) "
"and list(str), respectively, when toknized is False."
)
for references, translation in zip(zip(*reference_corpus_list), translation_corpus):
if not tokenized:
references = [
TOKENIZERS[tokenizer](reference).split() for reference in references
]
translation = TOKENIZERS[tokenizer](translation).split()
if bpe:
references = [_bpe_to_words(reference) for reference in references]
translation = _bpe_to_words(translation)
if split_compound_word:
references = [_split_compound_word(reference) for reference in references]
translation = _split_compound_word(translation)
if lower_case:
references = [[w.lower() for w in reference] for reference in references]
translation = [w.lower() for w in translation]
trans_len = len(translation)
trans_length += trans_len
ref_length += _closest_ref_length(references, trans_len)
for n in range(max_n):
matches, candidates = _compute_precision(references, translation, n + 1)
precision_numerators[n] += matches
precision_denominators[n] += candidates
precision_fractions = [
(precision_numerators[n], precision_denominators[n]) for n in range(max_n)
]
smooth_const = 0
if smooth:
smooth_const = 1
precisions = _smoothing(precision_fractions, smooth_const)
if min(precisions) > 0:
precision_log_average = sum(math.log(p) for p in precisions) / max_n
precision_exp_log_average = math.exp(precision_log_average)
else:
precision_exp_log_average = 0
bp = _brevity_penalty(ref_length, trans_length)
bleu = precision_exp_log_average * bp
return bleu, precisions, bp, ref_length, trans_length
|
def compute_bleu(
reference_corpus_list,
translation_corpus,
tokenized=True,
tokenizer="13a",
max_n=4,
smooth=False,
lower_case=False,
bpe=False,
split_compound_word=False,
):
r"""Compute bleu score of translation against references.
Parameters
----------
reference_corpus_list: list of list(list(str)) or list of list(str)
list of list(list(str)): tokenzied references
list of list(str): plain text
List of references for each translation.
translation_corpus: list(list(str)) or list(str)
list(list(str)): tokenzied translation
list(str): plain text
Translations to score.
tokenized: bool, default True
Whether the inputs has been tokenized.
tokenizer: str or None, default '13a'
'13a': follow the tokenizer in mteval-v13a.pl
'intl': follow the international tokenzier in mteval-v14.pl
None: identity mapping on the string.
This option is ignored if tokenized is True
max_n: int, default 4
Maximum n-gram order to use when computing BLEU score.
smooth: bool, default False
Whether or not to compute smoothed bleu score.
lower_case: bool, default False
Whether or not to use lower case of tokens
split_compound_word: bool, default False
Whether or not to split compound words
"rich-text format" --> rich ##AT##-##AT## text format.
bpe: bool, default False
Whether or not the inputs are in BPE format
Returns
-------
5-Tuple with the BLEU score, n-gram precisions, brevity penalty,
reference length, and translation length
"""
precision_numerators = [0 for _ in range(max_n)]
precision_denominators = [0 for _ in range(max_n)]
ref_length, trans_length = 0, 0
for references in reference_corpus_list:
assert len(references) == len(translation_corpus), (
"The number of translations and their references do not match"
)
if tokenized:
assert isinstance(reference_corpus_list[0][0], LIST_TYPES) and isinstance(
translation_corpus[0], LIST_TYPES
), (
"references and translation should have format of list of list(list(str)) "
"and list(list(str)), respectively, when toknized is True."
)
else:
assert isinstance(reference_corpus_list[0][0], str) and isinstance(
translation_corpus[0], str
), (
"references and translation should have format of list(list(str)) "
"and list(str), respectively, when toknized is False."
)
for references, translation in zip(zip(*reference_corpus_list), translation_corpus):
if not tokenized:
references = [
TOKENIZERS[tokenizer](reference).split() for reference in references
]
translation = TOKENIZERS[tokenizer](translation).split()
if bpe:
references = [_bpe_to_words(reference) for reference in references]
translation = _bpe_to_words(translation)
if split_compound_word:
references = [_split_compound_word(reference) for reference in references]
translation = _split_compound_word(translation)
if lower_case:
references = [list(map(str.lower, reference)) for reference in references]
translation = list(map(str.lower, translation))
trans_len = len(translation)
trans_length += trans_len
ref_length += _closest_ref_length(references, trans_len)
for n in range(max_n):
matches, candidates = _compute_precision(references, translation, n + 1)
precision_numerators[n] += matches
precision_denominators[n] += candidates
precision_fractions = [
(precision_numerators[n], precision_denominators[n]) for n in range(max_n)
]
smooth_const = 0
if smooth:
smooth_const = 1
precisions = _smoothing(precision_fractions, smooth_const)
if min(precisions) > 0:
precision_log_average = sum(math.log(p) for p in precisions) / max_n
precision_exp_log_average = math.exp(precision_log_average)
else:
precision_exp_log_average = 0
bp = _brevity_penalty(ref_length, trans_length)
bleu = precision_exp_log_average * bp
return bleu, precisions, bp, ref_length, trans_length
|
https://github.com/dmlc/gluon-nlp/issues/349
|
2018-09-26 05:38:28,761 - root - Namespace(average_checkpoint=False, average_start=5, batch_size=2700, beam_size=4, bleu='13a', bucket_ratio=0.0, bucket_scheme='exp', dataset='WMT2$
14BPE', dropout=0.1, epochs=30, epsilon=0.1, full=False, gpus='0,1,2,3,4,5,6,7', hidden_size=2048, log_interval=10, lp_alpha=0.6, lp_k=5, lr=2.0, magnitude=3.0, num_accumulated=16,
num_averages=5, num_buckets=20, num_heads=8, num_layers=6, num_units=512, optimizer='adam', save_dir='transformer_en_de_u512', scaled=True, src_lang='en', src_max_len=-1, test_batc$
_size=256, tgt_lang='de', tgt_max_len=-1, warmup_steps=4000.0)
All Logs will be saved to transformer_en_de_u512/train_transformer.log
Load cached data from /home/ubuntu/transformer/scripts/nmt/cached/WMT2014BPE_en_de_-1_-1_train.npz
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/numpy/lib/format.py", line 650, in read_array
array = pickle.load(fp, **pickle_kwargs)
UnicodeDecodeError: 'ascii' codec can't decode byte 0xfa in position 8: ordinal not in range(128)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "train_transformer.py", line 304, in <module>
= load_translation_data(dataset=args.dataset, src_lang=args.src_lang, tgt_lang=args.tgt_lang)
File "train_transformer.py", line 260, in load_translation_data
data_train_processed = load_cached_dataset(common_prefix + '_train')
File "train_transformer.py", line 167, in load_cached_dataset
return ArrayDataset(np.array(dat['src_data']), np.array(dat['tgt_data']))
File "/usr/local/lib/python3.5/dist-packages/numpy/lib/npyio.py", line 235, in __getitem__
pickle_kwargs=self.pickle_kwargs)
File "/usr/local/lib/python3.5/dist-packages/numpy/lib/format.py", line 656, in read_array
"to numpy.load" % (err,))
UnicodeError: Unpickling a python object failed: UnicodeDecodeError('ascii', b'\x07 \x00\x00ls\x00\x00\xfab\x00\x00k]\x00\x00`p\x00\x00fi\x00\x00\x03\x00\x00\x00', 8, 9, 'ordinal n$
t in range(128)')
You may need to pass the encoding= option to numpy.load
|
UnicodeDecodeError
|
def load_cached_dataset(prefix):
cached_file_path = os.path.join(_C.CACHE_PATH, prefix + ".npz")
if os.path.exists(cached_file_path):
print("Load cached data from {}".format(cached_file_path))
dat = np.load(cached_file_path, encoding="latin1")
return ArrayDataset(np.array(dat["src_data"]), np.array(dat["tgt_data"]))
else:
return None
|
def load_cached_dataset(prefix):
cached_file_path = os.path.join(_C.CACHE_PATH, prefix + ".npz")
if os.path.exists(cached_file_path):
print("Load cached data from {}".format(cached_file_path))
dat = np.load(cached_file_path)
return ArrayDataset(np.array(dat["src_data"]), np.array(dat["tgt_data"]))
else:
return None
|
https://github.com/dmlc/gluon-nlp/issues/349
|
2018-09-26 05:38:28,761 - root - Namespace(average_checkpoint=False, average_start=5, batch_size=2700, beam_size=4, bleu='13a', bucket_ratio=0.0, bucket_scheme='exp', dataset='WMT2$
14BPE', dropout=0.1, epochs=30, epsilon=0.1, full=False, gpus='0,1,2,3,4,5,6,7', hidden_size=2048, log_interval=10, lp_alpha=0.6, lp_k=5, lr=2.0, magnitude=3.0, num_accumulated=16,
num_averages=5, num_buckets=20, num_heads=8, num_layers=6, num_units=512, optimizer='adam', save_dir='transformer_en_de_u512', scaled=True, src_lang='en', src_max_len=-1, test_batc$
_size=256, tgt_lang='de', tgt_max_len=-1, warmup_steps=4000.0)
All Logs will be saved to transformer_en_de_u512/train_transformer.log
Load cached data from /home/ubuntu/transformer/scripts/nmt/cached/WMT2014BPE_en_de_-1_-1_train.npz
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/numpy/lib/format.py", line 650, in read_array
array = pickle.load(fp, **pickle_kwargs)
UnicodeDecodeError: 'ascii' codec can't decode byte 0xfa in position 8: ordinal not in range(128)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "train_transformer.py", line 304, in <module>
= load_translation_data(dataset=args.dataset, src_lang=args.src_lang, tgt_lang=args.tgt_lang)
File "train_transformer.py", line 260, in load_translation_data
data_train_processed = load_cached_dataset(common_prefix + '_train')
File "train_transformer.py", line 167, in load_cached_dataset
return ArrayDataset(np.array(dat['src_data']), np.array(dat['tgt_data']))
File "/usr/local/lib/python3.5/dist-packages/numpy/lib/npyio.py", line 235, in __getitem__
pickle_kwargs=self.pickle_kwargs)
File "/usr/local/lib/python3.5/dist-packages/numpy/lib/format.py", line 656, in read_array
"to numpy.load" % (err,))
UnicodeError: Unpickling a python object failed: UnicodeDecodeError('ascii', b'\x07 \x00\x00ls\x00\x00\xfab\x00\x00k]\x00\x00`p\x00\x00fi\x00\x00\x03\x00\x00\x00', 8, 9, 'ordinal n$
t in range(128)')
You may need to pass the encoding= option to numpy.load
|
UnicodeDecodeError
|
def remove_user(self, group_item, user_id):
self._remove_user(group_item, user_id)
try:
users = group_item.users
for user in users:
if user.id == user_id:
users.remove(user)
break
except UnpopulatedPropertyError:
# If we aren't populated, do nothing to the user list
pass
logger.info(
"Removed user (id: {0}) from group (ID: {1})".format(user_id, group_item.id)
)
|
def remove_user(self, group_item, user_id):
self._remove_user(group_item, user_id)
try:
user_set = group_item.users
for user in user_set:
if user.id == user_id:
user_set.remove(user)
break
except UnpopulatedPropertyError:
# If we aren't populated, do nothing to the user list
pass
logger.info(
"Removed user (id: {0}) from group (ID: {1})".format(user_id, group_item.id)
)
|
https://github.com/tableau/server-client-python/issues/162
|
Traceback (most recent call last):
File "create_group.py", line 50, in <module>
main()
File "create_group.py", line 45, in main
server.groups.add_user(group, new_user._id)
File "/Library/Python/2.7/site-packages/tableauserverclient/server/endpoint/groups_endpoint.py", line 70, in add_user
user_set.add(new_user)
AttributeError: 'list' object has no attribute 'add'
|
AttributeError
|
def add_user(self, group_item, user_id):
new_user = self._add_user(group_item, user_id)
try:
users = group_item.users
users.append(new_user)
group_item._set_users(users)
except UnpopulatedPropertyError:
# If we aren't populated, do nothing to the user list
pass
logger.info(
"Added user (id: {0}) to group (ID: {1})".format(user_id, group_item.id)
)
|
def add_user(self, group_item, user_id):
new_user = self._add_user(group_item, user_id)
try:
user_set = group_item.users
user_set.add(new_user)
group_item._set_users(user_set)
except UnpopulatedPropertyError:
# If we aren't populated, do nothing to the user list
pass
logger.info(
"Added user (id: {0}) to group (ID: {1})".format(user_id, group_item.id)
)
|
https://github.com/tableau/server-client-python/issues/162
|
Traceback (most recent call last):
File "create_group.py", line 50, in <module>
main()
File "create_group.py", line 45, in main
server.groups.add_user(group, new_user._id)
File "/Library/Python/2.7/site-packages/tableauserverclient/server/endpoint/groups_endpoint.py", line 70, in add_user
user_set.add(new_user)
AttributeError: 'list' object has no attribute 'add'
|
AttributeError
|
def random_affine_generator(
batch_size: int,
height: int,
width: int,
degrees: torch.Tensor,
translate: Optional[torch.Tensor] = None,
scale: Optional[torch.Tensor] = None,
shear: Optional[torch.Tensor] = None,
same_on_batch: bool = False,
) -> Dict[str, torch.Tensor]:
r"""Get parameters for ``affine`` for a random affine transform.
Args:
batch_size (int): the tensor batch size.
height (int) : height of the image.
width (int): width of the image.
degrees (tensor): Range of degrees to select from like (min, max).
translate (tensor, optional): tuple of maximum absolute fraction for horizontal
and vertical translations. For example translate=(a, b), then horizontal shift
is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
scale (tensor, optional): scaling factor interval, e.g (a, b), then scale is
randomly sampled from the range a <= scale <= b. Will keep original scale by default.
shear (tensor, optional): Range of degrees to select from.
Shear is a 2x2 tensor, a x-axis shear in (shear[0][0], shear[0][1]) and y-axis shear in
(shear[1][0], shear[1][1]) will be applied. Will not apply shear by default.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
Returns:
params Dict[str, torch.Tensor]: parameters to be passed for transformation.
"""
_common_param_check(batch_size, same_on_batch)
_joint_range_check(degrees, "degrees")
assert (
isinstance(width, (int,))
and isinstance(height, (int,))
and width > 0
and height > 0
), f"`width` and `height` must be positive integers. Got {width}, {height}."
device, dtype = _extract_device_dtype([degrees, translate, scale, shear])
angle = _adapted_uniform((batch_size,), degrees[0], degrees[1], same_on_batch)
# compute tensor ranges
if scale is not None:
_joint_range_check(cast(torch.Tensor, scale[:2]), "scale")
_scale = (
_adapted_uniform((batch_size,), scale[0], scale[1], same_on_batch)
.unsqueeze(1)
.repeat(1, 2)
)
if len(scale) == 4:
_joint_range_check(cast(torch.Tensor, scale[2:]), "scale_y")
_scale[:, 1] = _adapted_uniform(
(batch_size,), scale[2], scale[3], same_on_batch
)
else:
_scale = torch.ones((batch_size, 2), device=device, dtype=dtype)
if translate is not None:
_joint_range_check(cast(torch.Tensor, translate), "translate")
max_dx: torch.Tensor = translate[0] * width
max_dy: torch.Tensor = translate[1] * height
translations = torch.stack(
[
_adapted_uniform((batch_size,), -max_dx, max_dx, same_on_batch),
_adapted_uniform((batch_size,), -max_dy, max_dy, same_on_batch),
],
dim=-1,
)
else:
translations = torch.zeros((batch_size, 2), device=device, dtype=dtype)
center: torch.Tensor = (
torch.tensor([width, height], device=device, dtype=dtype).view(1, 2) / 2.0 - 0.5
)
center = center.expand(batch_size, -1)
if shear is not None:
_joint_range_check(cast(torch.Tensor, shear)[0], "shear")
_joint_range_check(cast(torch.Tensor, shear)[1], "shear")
sx = _adapted_uniform((batch_size,), shear[0][0], shear[0][1], same_on_batch)
sy = _adapted_uniform((batch_size,), shear[1][0], shear[1][1], same_on_batch)
else:
sx = sy = torch.tensor([0] * batch_size)
return dict(
translations=translations,
center=center,
scale=_scale,
angle=angle,
sx=sx,
sy=sy,
)
|
def random_affine_generator(
batch_size: int,
height: int,
width: int,
degrees: torch.Tensor,
translate: Optional[torch.Tensor] = None,
scale: Optional[torch.Tensor] = None,
shear: Optional[torch.Tensor] = None,
same_on_batch: bool = False,
) -> Dict[str, torch.Tensor]:
r"""Get parameters for ``affine`` for a random affine transform.
Args:
batch_size (int): the tensor batch size.
height (int) : height of the image.
width (int): width of the image.
degrees (tensor): Range of degrees to select from like (min, max).
translate (tensor, optional): tuple of maximum absolute fraction for horizontal
and vertical translations. For example translate=(a, b), then horizontal shift
is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
scale (tensor, optional): scaling factor interval, e.g (a, b), then scale is
randomly sampled from the range a <= scale <= b. Will keep original scale by default.
shear (tensor, optional): Range of degrees to select from.
Shear is a 2x2 tensor, a x-axis shear in (shear[0][0], shear[0][1]) and y-axis shear in
(shear[1][0], shear[1][1]) will be applied. Will not apply shear by default.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
Returns:
params Dict[str, torch.Tensor]: parameters to be passed for transformation.
"""
_common_param_check(batch_size, same_on_batch)
_joint_range_check(degrees, "degrees")
assert (
isinstance(width, (int,))
and isinstance(height, (int,))
and width > 0
and height > 0
), f"`width` and `height` must be positive integers. Got {width}, {height}."
device, dtype = _extract_device_dtype([degrees, translate, scale, shear])
angle = _adapted_uniform((batch_size,), degrees[0], degrees[1], same_on_batch)
# compute tensor ranges
if scale is not None:
_joint_range_check(cast(torch.Tensor, scale[:2]), "scale")
_scale = (
_adapted_uniform((batch_size,), scale[0], scale[1], same_on_batch)
.unsqueeze(1)
.repeat(1, 2)
)
if len(_scale) == 4:
_joint_range_check(cast(torch.Tensor, scale[2:]), "scale_y")
_scale[:, 1] = _adapted_uniform(
(batch_size,), scale[2], scale[3], same_on_batch
)
else:
_scale = torch.ones((batch_size, 2), device=device, dtype=dtype)
if translate is not None:
_joint_range_check(cast(torch.Tensor, translate), "translate")
max_dx: torch.Tensor = translate[0] * width
max_dy: torch.Tensor = translate[1] * height
translations = torch.stack(
[
_adapted_uniform((batch_size,), -max_dx, max_dx, same_on_batch),
_adapted_uniform((batch_size,), -max_dy, max_dy, same_on_batch),
],
dim=-1,
)
else:
translations = torch.zeros((batch_size, 2), device=device, dtype=dtype)
center: torch.Tensor = (
torch.tensor([width, height], device=device, dtype=dtype).view(1, 2) / 2.0 - 0.5
)
center = center.expand(batch_size, -1)
if shear is not None:
_joint_range_check(cast(torch.Tensor, shear)[0], "shear")
_joint_range_check(cast(torch.Tensor, shear)[1], "shear")
sx = _adapted_uniform((batch_size,), shear[0][0], shear[0][1], same_on_batch)
sy = _adapted_uniform((batch_size,), shear[1][0], shear[1][1], same_on_batch)
else:
sx = sy = torch.tensor([0] * batch_size)
return dict(
translations=translations,
center=center,
scale=_scale,
angle=angle,
sx=sx,
sy=sy,
)
|
https://github.com/kornia/kornia/issues/785
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-30-cf0a915755df> in <module>
----> 1 out = transform_fcn(x[0])
~/default-env/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/default-env/lib/python3.8/site-packages/torch/nn/modules/container.py in forward(self, input)
115 def forward(self, input):
116 for module in self:
--> 117 input = module(input)
118 return input
119
~/default-env/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/default-env/lib/python3.8/site-packages/kornia/augmentation/base.py in forward(self, input, params, return_transform)
196 return_transform = self.return_transform
197 if params is None:
--> 198 params = self.__forward_parameters__(batch_shape, self.p, self.p_batch, self.same_on_batch)
199 if 'batch_prob' not in params:
200 params['batch_prob'] = torch.tensor([True] * batch_shape[0])
~/default-env/lib/python3.8/site-packages/kornia/augmentation/base.py in __forward_parameters__(self, batch_shape, p, p_batch, same_on_batch)
92 batch_prob = batch_prob.repeat(batch_shape[0])
93 # selectively param gen
---> 94 return self.__selective_param_gen__(batch_shape, batch_prob)
95
96 def apply_func(self, input: torch.Tensor, params: Dict[str, torch.Tensor],
~/default-env/lib/python3.8/site-packages/kornia/augmentation/base.py in __selective_param_gen__(self, batch_shape, to_apply)
63 def __selective_param_gen__(
64 self, batch_shape: torch.Size, to_apply: torch.Tensor) -> Dict[str, torch.Tensor]:
---> 65 _params = self.generate_parameters(
66 torch.Size((int(to_apply.sum().item()), *batch_shape[1:])))
67 if _params is None:
~/default-env/lib/python3.8/site-packages/kornia/augmentation/augmentation.py in generate_parameters(self, batch_shape)
483
484 def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:
--> 485 return rg.random_affine_generator(
486 batch_shape[0], batch_shape[-2], batch_shape[-1], self.degrees, self.translate, self.scale, self.shear,
487 self.same_on_batch)
~/default-env/lib/python3.8/site-packages/kornia/augmentation/random_generator/random_generator.py in random_affine_generator(batch_size, height, width, degrees, translate, scale, shear, same_on_batch)
173 _scale = _adapted_uniform((batch_size,), scale[0], scale[1], same_on_batch).unsqueeze(1).repeat(1, 2)
174 if len(_scale) == 4:
--> 175 _joint_range_check(cast(torch.Tensor, scale[2:]), "scale_y")
176 _scale[:, 1] = _adapted_uniform((batch_size,), scale[2], scale[3], same_on_batch)
177 else:
~/default-env/lib/python3.8/site-packages/kornia/augmentation/utils/param_validation.py in _joint_range_check(ranged_factor, name, bounds)
45 raise ValueError(f"{name}[0] should be smaller than {name}[1] got {ranged_factor}")
46 else:
---> 47 raise TypeError(
48 f"{name} should be a float number or a tuple with length 2 whose values between {bounds}."
49 f"Got {ranged_factor}.")
TypeError: scale_y should be a float number or a tuple with length 2 whose values between (-inf, inf).Got tensor([]).
|
TypeError
|
def _fetchable_partitions(self):
fetchable = self._subscriptions.fetchable_partitions()
# do not fetch a partition if we have a pending fetch response to process
current = self._next_partition_records
pending = copy.copy(self._completed_fetches)
if current:
fetchable.discard(current.topic_partition)
for fetch in pending:
fetchable.discard(fetch.topic_partition)
return fetchable
|
def _fetchable_partitions(self):
fetchable = self._subscriptions.fetchable_partitions()
if self._next_partition_records:
fetchable.discard(self._next_partition_records.topic_partition)
for fetch in self._completed_fetches:
fetchable.discard(fetch.topic_partition)
return fetchable
|
https://github.com/dpkp/kafka-python/issues/1399
|
Traceback (most recent call last):
[...]
File "[...]/kafka/consumer/group.py", line 603, in poll
records = self._poll_once(remaining, max_records)
File "[...]/kafka/consumer/group.py", line 643, in _poll_once
self._fetcher.send_fetches()
File "[...]/kafka/consumer/fetcher.py", line 131, in send_fetches
for node_id, request in six.iteritems(self._create_fetch_requests()):
File "[...]/kafka/consumer/fetcher.py", line 652, in _create_fetch_requests
for partition in self._fetchable_partitions():
File "[...]/kafka/consumer/fetcher.py", line 636, in _fetchable_partitions
for fetch in self._completed_fetches:
RuntimeError: deque mutated during iteration
|
RuntimeError
|
def _poll(self, timeout, sleep=True):
# select on reads across all connected sockets, blocking up to timeout
assert self.in_flight_request_count() > 0 or self._connecting or sleep
responses = []
processed = set()
start_select = time.time()
ready = self._selector.select(timeout)
end_select = time.time()
if self._sensors:
self._sensors.select_time.record((end_select - start_select) * 1000000000)
for key, events in ready:
if key.fileobj is self._wake_r:
self._clear_wake_fd()
continue
elif not (events & selectors.EVENT_READ):
continue
conn = key.data
processed.add(conn)
if not conn.in_flight_requests:
# if we got an EVENT_READ but there were no in-flight requests, one of
# two things has happened:
#
# 1. The remote end closed the connection (because it died, or because
# a firewall timed out, or whatever)
# 2. The protocol is out of sync.
#
# either way, we can no longer safely use this connection
#
# Do a 1-byte read to check protocol didnt get out of sync, and then close the conn
try:
unexpected_data = key.fileobj.recv(1)
if (
unexpected_data
): # anything other than a 0-byte read means protocol issues
log.warning("Protocol out of sync on %r, closing", conn)
except socket.error:
pass
conn.close()
continue
# Accumulate as many responses as the connection has pending
while conn.in_flight_requests:
response = conn.recv() # Note: conn.recv runs callbacks / errbacks
# Incomplete responses are buffered internally
# while conn.in_flight_requests retains the request
if not response:
break
responses.append(response)
# Check for additional pending SSL bytes
if self.config["security_protocol"] in ("SSL", "SASL_SSL"):
# TODO: optimize
for conn in self._conns.values():
if conn not in processed and conn.connected() and conn._sock.pending():
response = conn.recv()
if response:
responses.append(response)
if self._sensors:
self._sensors.io_time.record((time.time() - end_select) * 1000000000)
return responses
|
def _poll(self, timeout, sleep=True):
# select on reads across all connected sockets, blocking up to timeout
assert self.in_flight_request_count() > 0 or self._connecting or sleep
responses = []
processed = set()
start_select = time.time()
ready = self._selector.select(timeout)
end_select = time.time()
if self._sensors:
self._sensors.select_time.record((end_select - start_select) * 1000000000)
for key, events in ready:
if key.fileobj is self._wake_r:
self._clear_wake_fd()
continue
elif not (events & selectors.EVENT_READ):
continue
conn = key.data
processed.add(conn)
if not conn.in_flight_requests:
# if we got an EVENT_READ but there were no in-flight requests, one of
# two things has happened:
#
# 1. The remote end closed the connection (because it died, or because
# a firewall timed out, or whatever)
# 2. The protocol is out of sync.
#
# either way, we can no longer safely use this connection
#
# Do a 1-byte read to clear the READ flag, and then close the conn
unexpected_data = key.fileobj.recv(1)
if (
unexpected_data
): # anything other than a 0-byte read means protocol issues
log.warning("Protocol out of sync on %r, closing", conn)
conn.close()
continue
# Accumulate as many responses as the connection has pending
while conn.in_flight_requests:
response = conn.recv() # Note: conn.recv runs callbacks / errbacks
# Incomplete responses are buffered internally
# while conn.in_flight_requests retains the request
if not response:
break
responses.append(response)
# Check for additional pending SSL bytes
if self.config["security_protocol"] in ("SSL", "SASL_SSL"):
# TODO: optimize
for conn in self._conns.values():
if conn not in processed and conn.connected() and conn._sock.pending():
response = conn.recv()
if response:
responses.append(response)
if self._sensors:
self._sensors.io_time.record((time.time() - end_select) * 1000000000)
return responses
|
https://github.com/dpkp/kafka-python/issues/791
|
Traceback (most recent call last):
File "python/server.py", line 84, in init
File "/usr/local/lib/python2.7/site-packages/six.py", line 558, in next
return type(self).__next__(self)
File "/usr/local/lib/python2.7/site-packages/kafka/consumer/group.py", line 863, in __next__
File "/usr/local/lib/python2.7/site-packages/kafka/consumer/group.py", line 803, in _message_generator
return next(self._iterator)
self._client.poll(timeout_ms=poll_ms, sleep=True)
File "/usr/local/lib/python2.7/site-packages/kafka/client_async.py", line 439, in poll
unexpected_data = key.fileobj.recv(1)
File "/usr/local/lib/python2.7/site-packages/kafka/client_async.py", line 474, in _poll
responses.extend(self._poll(timeout, sleep=sleep))
socket.error: [Errno 104] Connection reset by peer
Exception AttributeError: "'NoneType' object has no attribute 'info'" in <bound method KafkaProducer.__del__ of <kafka.producer.kafka.KafkaProducer object at 0x7f890ff46950>> ignored
|
socket.error
|
def get_commands_from_metadata(image_layer):
"""Given the image layer object, get the list of command objects that
created the layer. Return an empty list of we can't do that"""
# set up notice origin for the layer
origin_layer = "Layer {}".format(image_layer.layer_index)
# check if there is a key containing the script that created the layer
if image_layer.created_by:
command_line = fltr.get_run_command(image_layer.created_by)
if command_line:
command_list, msg = fltr.filter_install_commands(
general.clean_command(command_line)
)
if msg:
image_layer.origins.add_notice_to_origins(
origin_layer, Notice(msg, "warning")
)
return command_list
image_layer.origins.add_notice_to_origins(
origin_layer, Notice(errors.no_layer_created_by, "warning")
)
return []
|
def get_commands_from_metadata(image_layer):
"""Given the image layer object, get the list of command objects that
created the layer. Return an empty list of we can't do that"""
# set up notice origin for the layer
origin_layer = "Layer {}".format(image_layer.layer_index)
# check if there is a key containing the script that created the layer
if image_layer.created_by:
command_line = fltr.get_run_command(image_layer.created_by)
if command_line:
command_list, msg = fltr.filter_install_commands(
general.clean_command(command_line)
)
if msg:
image_layer.origins.add_notice_to_origins(
origin_layer, Notice(msg, "warning")
)
return command_list
image_layer.origins.add_notice_to_origins(
origin_layer,
Notice(errors.unknown_content.format(files=command_line), "warning"),
)
return []
|
https://github.com/tern-tools/tern/issues/838
|
Traceback (most recent call last):
File "/usr/local/bin/tern", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/dist-packages/tern/__main__.py", line 192, in main
do_main(args)
File "/usr/local/lib/python3.7/dist-packages/tern/__main__.py", line 95, in do_main
crun.execute_image(args)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/default/container/run.py", line 85, in execute_image
cimage.analyze(full_image, args.redo, args.driver, args.extend)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/default/container/image.py", line 74, in analyze
default_analyze(image_obj, redo, driver)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/default/container/image.py", line 64, in default_analyze
image_obj, shell, master_list, redo, driver)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/default/container/multi_layer.py", line 99, in analyze_subsequent_layers
fresh_analysis(image_obj, curr_layer, shell, driver, envs)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/default/container/multi_layer.py", line 52, in fresh_analysis
image_obj.layers[curr_layer])
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/default/default_common.py", line 119, in get_commands_from_metadata
files=command_line), 'warning'))
UnboundLocalError: local variable 'command_line' referenced before assignment
|
UnboundLocalError
|
def extract_image(args):
"""The image can either be downloaded from a container registry or provided
as an image tarball. Extract the image into a working directory accordingly
Return an image name and tag and an image digest if it exists"""
if args.docker_image:
# extract the docker image
image_attrs = docker_api.dump_docker_image(args.docker_image)
if image_attrs:
# repo name and digest is preferred, but if that doesn't exist
# the repo name and tag will do. If neither exist use repo Id.
if image_attrs["Id"]:
image_string = image_attrs["Id"]
if image_attrs["RepoTags"]:
image_string = image_attrs["RepoTags"][0]
if image_attrs["RepoDigests"]:
image_string = image_attrs["RepoDigests"][0]
return image_string
logger.critical("Cannot extract Docker image")
if args.raw_image:
# for now we assume that the raw image tarball is always
# the product of "docker save", hence it will be in
# the docker style layout
if rootfs.extract_tarfile(args.raw_image, rootfs.get_working_dir()):
return args.raw_image
logger.critical("Cannot extract raw image")
return None
|
def extract_image(args):
"""The image can either be downloaded from a container registry or provided
as an image tarball. Extract the image into a working directory accordingly
Return an image name and tag and an image digest if it exists"""
if args.docker_image:
# extract the docker image
image_attrs = docker_api.dump_docker_image(args.docker_image)
if image_attrs:
# repo name and digest is preferred, but if that doesn't exist
# the repo name and tag will do
if image_attrs["RepoDigests"]:
image_string = image_attrs["RepoDigests"][0]
if image_attrs["RepoTags"]:
image_string = image_attrs["RepoTags"][0]
return image_string
logger.critical("Cannot extract Docker image")
if args.raw_image:
# for now we assume that the raw image tarball is always
# the product of "docker save", hence it will be in
# the docker style layout
if rootfs.extract_tarfile(args.raw_image, rootfs.get_working_dir()):
return args.raw_image
logger.critical("Cannot extract raw image")
return None
|
https://github.com/tern-tools/tern/issues/874
|
{ memoryInfo:
'{"rss":39120896,"heapTotal":9961472,"heapUsed":5357888,"external":8962}',
errorCode: 1,
errorSignal: null,
errorMessage:
'Traceback (most recent call last):\n File "/usr/local/bin/tern", line 8, in <module>\n sys.exit(main())\n File "/usr/local/lib/python3.7/dist-packages/tern/__main__.py", line 192, in main\n do_main(args)\n File "/usr/local/lib/python3.7/dist-packages/tern/__main__.py", line 95, in do_main\n crun.execute_image(args)\n File "/usr/local/lib/python3.7/dist-packages/tern/analyze/default/container/run.py", line 73, in execute_image\n image_string = extract_image(args)\n File "/usr/local/lib/python3.7/dist-packages/tern/analyze/default/container/run.py", line 40, in extract_image\n return image_string\nUnboundLocalError: local variable \'image_string\' referenced before assignment\n',
outputString: '',
exception: undefined }
|
exception
|
def execute_image(args):
"""Execution path for container images"""
logger.debug("Starting analysis...")
image_string = extract_image(args)
# If the image has been extracted, load the metadata
if image_string:
full_image = cimage.load_full_image(image_string)
# check if the image was loaded successfully
if full_image.origins.is_empty():
# Add an image origin here
full_image.origins.add_notice_origin(
formats.docker_image.format(imagetag=image_string)
)
# Set up for analysis
setup(full_image)
# analyze image
cimage.analyze(full_image, args.redo, args.driver, args.extend)
# report out
report.report_out(args, full_image)
# clean up
teardown(full_image)
else:
# we cannot load the full image
logger.error("Cannot retrieve full image metadata")
if not args.keep_wd:
prep.clean_image_tars(full_image)
|
def execute_image(args):
"""Execution path for container images"""
logger.debug("Starting analysis...")
image_string = extract_image(args)
# If the image has been extracted, load the metadata
if image_string:
full_image = cimage.load_full_image(image_string)
# check if the image was loaded successfully
if full_image.origins.is_empty():
# Add an image origin here
full_image.origins.add_notice_origin(
formats.docker_image.format(imagetag=image_string)
)
# Set up for analysis
setup(full_image)
# analyze image
cimage.analyze(full_image, args.redo, args.driver, args.extend)
# report out
report.report_out(args, full_image)
# clean up
teardown(full_image)
else:
# we cannot load the full image
logger.error("Cannot retrieve full image metadata")
# cleanup
if not args.keep_wd:
prep.clean_image_tars(full_image)
|
https://github.com/tern-tools/tern/issues/828
|
$ tern report -o output.txt -i fossid-nginx:2020.2
2020-11-11 08:35:17,611 - DEBUG - __main__ - Starting...
2020-11-11 08:35:17,611 - DEBUG - prep - Setting up...
2020-11-11 08:35:18,060 - DEBUG - run - Starting analysis...
2020-11-11 08:35:18,154 - DEBUG - docker_api - Checking if image "fossid-nginx:2020.2" is available on disk...
2020-11-11 08:35:18,162 - DEBUG - docker_api - Attempting to pull image "fossid-nginx:2020.2"
2020-11-11 08:35:19,691 - WARNING - docker_api - No such image: "fossid-nginx:2020.2"
Traceback (most recent call last):
File "/home/nisha/terndev/bin/tern", line 10, in <module>
sys.exit(main())
File "/home/nisha/terndev/tern/tern/__main__.py", line 192, in main
do_main(args)
File "/home/nisha/terndev/tern/tern/__main__.py", line 95, in do_main
crun.execute_image(args)
File "/home/nisha/terndev/tern/tern/analyze/default/container/run.py", line 73, in execute_image
image_string = extract_image(args)
File "/home/nisha/terndev/tern/tern/analyze/default/container/run.py", line 32, in extract_image
image_attrs = docker_api.dump_docker_image(args.docker_image)
File "/home/nisha/terndev/tern/tern/load/docker_api.py", line 200, in dump_docker_image
if extract_image(image):
File "/home/nisha/terndev/tern/tern/load/docker_api.py", line 74, in extract_image
result = image_obj.save(chunk_size=2097152, named=True)
AttributeError: 'NoneType' object has no attribute 'save'
|
AttributeError
|
def pull_image(image_tag_string, client):
"""Pull an image from a container registry using Docker
Note: this function uses the Docker API to pull from a container
registry and is not responsible for configuring what registry to use"""
logger.debug('Attempting to pull image "%s"', image_tag_string)
try:
image = client.images.pull(image_tag_string)
logger.debug('Image "%s" downloaded', image_tag_string)
return image
except (docker.errors.ImageNotFound, docker.errors.NotFound):
logger.error('No such image: "%s"', image_tag_string)
return None
|
def pull_image(image_tag_string, client):
"""Pull an image from a container registry using Docker
Note: this function uses the Docker API to pull from a container
registry and is not responsible for configuring what registry to use"""
logger.debug('Attempting to pull image "%s"', image_tag_string)
try:
image = client.images.pull(image_tag_string)
logger.debug('Image "%s" downloaded', image_tag_string)
return image
except (docker.errors.ImageNotFound, docker.errors.NotFound):
logger.warning('No such image: "%s"', image_tag_string)
return None
|
https://github.com/tern-tools/tern/issues/828
|
$ tern report -o output.txt -i fossid-nginx:2020.2
2020-11-11 08:35:17,611 - DEBUG - __main__ - Starting...
2020-11-11 08:35:17,611 - DEBUG - prep - Setting up...
2020-11-11 08:35:18,060 - DEBUG - run - Starting analysis...
2020-11-11 08:35:18,154 - DEBUG - docker_api - Checking if image "fossid-nginx:2020.2" is available on disk...
2020-11-11 08:35:18,162 - DEBUG - docker_api - Attempting to pull image "fossid-nginx:2020.2"
2020-11-11 08:35:19,691 - WARNING - docker_api - No such image: "fossid-nginx:2020.2"
Traceback (most recent call last):
File "/home/nisha/terndev/bin/tern", line 10, in <module>
sys.exit(main())
File "/home/nisha/terndev/tern/tern/__main__.py", line 192, in main
do_main(args)
File "/home/nisha/terndev/tern/tern/__main__.py", line 95, in do_main
crun.execute_image(args)
File "/home/nisha/terndev/tern/tern/analyze/default/container/run.py", line 73, in execute_image
image_string = extract_image(args)
File "/home/nisha/terndev/tern/tern/analyze/default/container/run.py", line 32, in extract_image
image_attrs = docker_api.dump_docker_image(args.docker_image)
File "/home/nisha/terndev/tern/tern/load/docker_api.py", line 200, in dump_docker_image
if extract_image(image):
File "/home/nisha/terndev/tern/tern/load/docker_api.py", line 74, in extract_image
result = image_obj.save(chunk_size=2097152, named=True)
AttributeError: 'NoneType' object has no attribute 'save'
|
AttributeError
|
def dump_docker_image(image_tag):
"""Given an image and tag or image and digest, use the Docker API to get
a container image representation into the working directory"""
image_metadata = None
# open up a client first
# if this fails we cannot proceed further so we will exit
client = check_docker_setup()
image = get_docker_image(image_tag, client)
if image:
if extract_image(image):
image_metadata = image.attrs
# now the client can be closed
close_client(client)
return image_metadata
|
def dump_docker_image(image_tag):
"""Given an image and tag or image and digest, use the Docker API to get
a container image representation into the working directory"""
image_metadata = None
# open up a client first
# if this fails we cannot proceed further so we will exit
client = check_docker_setup()
image = get_docker_image(image_tag, client)
# this should return whether the operation succeeded or not
if extract_image(image):
image_metadata = image.attrs
# now the client can be closed
close_client(client)
return image_metadata
|
https://github.com/tern-tools/tern/issues/828
|
$ tern report -o output.txt -i fossid-nginx:2020.2
2020-11-11 08:35:17,611 - DEBUG - __main__ - Starting...
2020-11-11 08:35:17,611 - DEBUG - prep - Setting up...
2020-11-11 08:35:18,060 - DEBUG - run - Starting analysis...
2020-11-11 08:35:18,154 - DEBUG - docker_api - Checking if image "fossid-nginx:2020.2" is available on disk...
2020-11-11 08:35:18,162 - DEBUG - docker_api - Attempting to pull image "fossid-nginx:2020.2"
2020-11-11 08:35:19,691 - WARNING - docker_api - No such image: "fossid-nginx:2020.2"
Traceback (most recent call last):
File "/home/nisha/terndev/bin/tern", line 10, in <module>
sys.exit(main())
File "/home/nisha/terndev/tern/tern/__main__.py", line 192, in main
do_main(args)
File "/home/nisha/terndev/tern/tern/__main__.py", line 95, in do_main
crun.execute_image(args)
File "/home/nisha/terndev/tern/tern/analyze/default/container/run.py", line 73, in execute_image
image_string = extract_image(args)
File "/home/nisha/terndev/tern/tern/analyze/default/container/run.py", line 32, in extract_image
image_attrs = docker_api.dump_docker_image(args.docker_image)
File "/home/nisha/terndev/tern/tern/load/docker_api.py", line 200, in dump_docker_image
if extract_image(image):
File "/home/nisha/terndev/tern/tern/load/docker_api.py", line 74, in extract_image
result = image_obj.save(chunk_size=2097152, named=True)
AttributeError: 'NoneType' object has no attribute 'save'
|
AttributeError
|
def load_full_image(image_tag_string):
"""Create image object from image name and tag and return the object"""
test_image = DockerImage(image_tag_string)
failure_origin = formats.image_load_failure.format(testimage=test_image.repotag)
try:
test_image.load_image()
except (
NameError,
subprocess.CalledProcessError,
IOError,
docker.errors.APIError,
ValueError,
EOFError,
) as error:
logger.warning("Error in loading image: %s", str(error))
test_image.origins.add_notice_to_origins(
failure_origin, Notice(str(error), "error")
)
return test_image
|
def load_full_image(image_tag_string, digest_string):
"""Create image object from image name and tag and return the object"""
test_image = DockerImage(image_tag_string, digest_string)
failure_origin = formats.image_load_failure.format(testimage=test_image.repotag)
try:
test_image.load_image()
except (
NameError,
subprocess.CalledProcessError,
IOError,
docker.errors.APIError,
ValueError,
EOFError,
) as error:
logger.warning("Error in loading image: %s", str(error))
test_image.origins.add_notice_to_origins(
failure_origin, Notice(str(error), "error")
)
return test_image
|
https://github.com/tern-tools/tern/issues/797
|
tevoinea@tevoinea-Virtual-Machine:~/tern$ docker run --privileged --device /dev/fuse -v /var/run/docker.sock:/var/run/docker.sock --rm ternd report -f json -i ubuntu@sha256:2b90cad5ded7946db07a28252618b9c8b7f4f103fc39266bcc795719d1362d40 > output.json
2020-09-03 21:00:57,783 - DEBUG - __main__ - Starting...
2020-09-03 21:00:57,784 - DEBUG - run - Setting up...
2020-09-03 21:00:57,799 - DEBUG - container - Checking if image "ubuntu@sha256:2b90cad5ded7946db07a28252618b9c8b7f4f103fc39266bcc795719d1362d40" is available on disk...
2020-09-03 21:00:57,803 - DEBUG - container - Image "ubuntu@sha256:2b90cad5ded7946db07a28252618b9c8b7f4f103fc39266bcc795719d1362d40" found
2020-09-03 21:00:58,917 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp.tar
2020-09-03 21:00:58,962 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp.tar -C /root/.tern/temp
2020-09-03 21:00:59,086 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/layer.tar
2020-09-03 21:00:59,099 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/layer.tar -C /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/contents
2020-09-03 21:00:59,246 - DEBUG - rootfs - Running command: chmod +x /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh
2020-09-03 21:00:59,610 - DEBUG - rootfs - Running command: /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/contents
2020-09-03 21:01:07,677 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp/49c388ec605a3411c4d5b412e6fb46e734fe3f2c6006dda31983f58aafe05146/layer.tar
2020-09-03 21:01:07,686 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp/49c388ec605a3411c4d5b412e6fb46e734fe3f2c6006dda31983f58aafe05146/layer.tar -C /root/.tern/temp/49c388ec605a3411c4d5b412e6fb46e734fe3f2c6006dda31983f58aafe05146/contents
2020-09-03 21:01:07,698 - DEBUG - rootfs - Running command: chmod +x /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh
2020-09-03 21:01:07,702 - DEBUG - rootfs - Running command: /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh /root/.tern/temp/49c388ec605a3411c4d5b412e6fb46e734fe3f2c6006dda31983f58aafe05146/contents
2020-09-03 21:01:07,728 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp/e44e131d684525597610af42c8db14102ad22ba156ad6ec3f5df068cea515759/layer.tar
2020-09-03 21:01:07,732 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp/e44e131d684525597610af42c8db14102ad22ba156ad6ec3f5df068cea515759/layer.tar -C /root/.tern/temp/e44e131d684525597610af42c8db14102ad22ba156ad6ec3f5df068cea515759/contents
2020-09-03 21:01:07,738 - DEBUG - rootfs - Running command: chmod +x /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh
2020-09-03 21:01:07,741 - DEBUG - rootfs - Running command: /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh /root/.tern/temp/e44e131d684525597610af42c8db14102ad22ba156ad6ec3f5df068cea515759/contents
2020-09-03 21:01:07,766 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp/3d3abe9e801a99cc9ca3bb08981d58ccfab161d9b1ba8bcf79cce51c7baf9787/layer.tar
2020-09-03 21:01:07,769 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp/3d3abe9e801a99cc9ca3bb08981d58ccfab161d9b1ba8bcf79cce51c7baf9787/layer.tar -C /root/.tern/temp/3d3abe9e801a99cc9ca3bb08981d58ccfab161d9b1ba8bcf79cce51c7baf9787/contents
2020-09-03 21:01:07,772 - DEBUG - rootfs - Running command: chmod +x /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh
2020-09-03 21:01:07,776 - DEBUG - rootfs - Running command: /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh /root/.tern/temp/3d3abe9e801a99cc9ca3bb08981d58ccfab161d9b1ba8bcf79cce51c7baf9787/contents
2020-09-03 21:01:07,789 - DEBUG - common - Reading files in filesystem...
2020-09-03 21:01:08,317 - DEBUG - rootfs - Running command: mount -o bind /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/contents /root/.tern/temp/mergedir
2020-09-03 21:01:08,326 - DEBUG - rootfs - Running command: mount -t proc /proc /root/.tern/temp/mergedir/proc
2020-09-03 21:01:08,332 - DEBUG - rootfs - Running command: mount -o bind /sys /root/.tern/temp/mergedir/sys
2020-09-03 21:01:08,335 - DEBUG - rootfs - Running command: mount -o bind /dev /root/.tern/temp/mergedir/dev
2020-09-03 21:01:08,342 - DEBUG - rootfs - Running command: cp /etc/resolv.conf /root/.tern/temp/mergedir/etc/resolv.conf
2020-09-03 21:01:08,348 - DEBUG - rootfs - Running command: unshare -pf --mount-proc=/root/.tern/temp/mergedir/proc chroot /root/.tern/temp/mergedir /bin/sh -c dpkg --get-selections | cut -f1 -d':' | awk '{print $1}'
2020-09-03 21:01:08,364 - ERROR - rootfs - Command failed. chroot: failed to run command ‘/bin/sh’: Exec format error
2020-09-03 21:01:08,364 - WARNING - rootfs - Error executing command in chroot
2020-09-03 21:01:08,365 - DEBUG - rootfs - Running command: umount /root/.tern/temp/mergedir/proc
2020-09-03 21:01:08,371 - DEBUG - rootfs - Running command: umount /root/.tern/temp/mergedir/sys
2020-09-03 21:01:08,374 - DEBUG - rootfs - Running command: umount /root/.tern/temp/mergedir/dev
2020-09-03 21:01:08,376 - DEBUG - rootfs - Running command: umount -rl /root/.tern/temp/mergedir
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/tern/utils/rootfs.py", line 237, in run_chroot_command
shell, '-c', command_string)
File "/usr/local/lib/python3.7/dist-packages/tern/utils/rootfs.py", line 77, in root_command
1, cmd=full_cmd, output=None, stderr=error.decode())
subprocess.CalledProcessError: Command '['unshare', '-pf', '--mount-proc=/root/.tern/temp/mergedir/proc', 'chroot', '/root/.tern/temp/mergedir', '/bin/sh', '-c', "dpkg --get-selections | cut -f1 -d':' | awk '{print $1}'"]' returned non-zero exit status 1.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/bin/tern", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/dist-packages/tern/__main__.py", line 204, in main
do_main(args)
File "/usr/local/lib/python3.7/dist-packages/tern/__main__.py", line 105, in do_main
run.execute_docker_image(args)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/run.py", line 91, in execute_docker_image
analyze(full_image, args)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/run.py", line 70, in analyze
args.driver)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/analyze.py", line 40, in analyze_docker_image
shell = analyze_first_layer(image_obj, master_list, redo)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/analyze.py", line 90, in analyze_first_layer
execute_base_layer(image_obj.layers[0], binary, shell)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/analyze.py", line 102, in execute_base_layer
common.add_base_packages(base_layer, binary, shell)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/common.py", line 341, in add_base_packages
work_dir, envs)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/common.py", line 239, in collate_list_metadata
work_dir, envs)
File "/usr/local/lib/python3.7/dist-packages/tern/command_lib/command_lib.py", line 240, in get_pkg_attr_list
snippet_list, shell, package=package_name)
File "/usr/local/lib/python3.7/dist-packages/tern/command_lib/command_lib.py", line 197, in invoke_in_rootfs
result = rootfs.run_chroot_command(full_cmd, shell)
File "/usr/local/lib/python3.7/dist-packages/tern/utils/rootfs.py", line 242, in run_chroot_command
1, cmd=command_string, output=error.output.decode('utf-8'))
AttributeError: 'NoneType' object has no attribute 'decode'
|
subprocess.CalledProcessError
|
def extract_image(args):
"""The image can either be downloaded from a container registry or provided
as an image tarball. Extract the image into a working directory accordingly
Return an image name and tag and an image digest if it exists"""
if args.docker_image:
# extract the docker image
image_attrs = docker_api.dump_docker_image(args.docker_image)
if image_attrs:
# repo name and digest is preferred, but if that doesn't exist
# the repo name and tag will do
if image_attrs["RepoDigests"]:
image_string = image_attrs["RepoDigests"][0]
if image_attrs["RepoTags"]:
image_string = image_attrs["RepoTags"][0]
return image_string
logger.critical("Cannot extract Docker image")
if args.raw_image:
# for now we assume that the raw image tarball is always
# the product of "docker save", hence it will be in
# the docker style layout
if rootfs.extract_tarfile(args.raw_image, rootfs.get_working_dir()):
return args.raw_image
logger.critical("Cannot extract raw image")
return None
|
def extract_image(args):
"""The image can either be downloaded from a container registry or provided
as an image tarball. Extract the image into a working directory accordingly
Return an image name and tag and an image digest if it exists"""
if args.docker_image:
# extract the docker image
image_attrs = docker_api.dump_docker_image(args.docker_image)
if image_attrs:
if image_attrs["RepoTags"]:
image_string = image_attrs["RepoTags"][0]
if image_attrs["RepoDigests"]:
image_digest = image_attrs["RepoDigests"][0]
return image_string, image_digest
logger.critical("Cannot extract Docker image")
if args.raw_image:
# for now we assume that the raw image tarball is always
# the product of "docker save", hence it will be in
# the docker style layout
if rootfs.extract_tarfile(args.raw_image, rootfs.get_working_dir()):
return args.raw_image, None
logger.critical("Cannot extract raw image")
return None, None
|
https://github.com/tern-tools/tern/issues/797
|
tevoinea@tevoinea-Virtual-Machine:~/tern$ docker run --privileged --device /dev/fuse -v /var/run/docker.sock:/var/run/docker.sock --rm ternd report -f json -i ubuntu@sha256:2b90cad5ded7946db07a28252618b9c8b7f4f103fc39266bcc795719d1362d40 > output.json
2020-09-03 21:00:57,783 - DEBUG - __main__ - Starting...
2020-09-03 21:00:57,784 - DEBUG - run - Setting up...
2020-09-03 21:00:57,799 - DEBUG - container - Checking if image "ubuntu@sha256:2b90cad5ded7946db07a28252618b9c8b7f4f103fc39266bcc795719d1362d40" is available on disk...
2020-09-03 21:00:57,803 - DEBUG - container - Image "ubuntu@sha256:2b90cad5ded7946db07a28252618b9c8b7f4f103fc39266bcc795719d1362d40" found
2020-09-03 21:00:58,917 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp.tar
2020-09-03 21:00:58,962 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp.tar -C /root/.tern/temp
2020-09-03 21:00:59,086 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/layer.tar
2020-09-03 21:00:59,099 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/layer.tar -C /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/contents
2020-09-03 21:00:59,246 - DEBUG - rootfs - Running command: chmod +x /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh
2020-09-03 21:00:59,610 - DEBUG - rootfs - Running command: /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/contents
2020-09-03 21:01:07,677 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp/49c388ec605a3411c4d5b412e6fb46e734fe3f2c6006dda31983f58aafe05146/layer.tar
2020-09-03 21:01:07,686 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp/49c388ec605a3411c4d5b412e6fb46e734fe3f2c6006dda31983f58aafe05146/layer.tar -C /root/.tern/temp/49c388ec605a3411c4d5b412e6fb46e734fe3f2c6006dda31983f58aafe05146/contents
2020-09-03 21:01:07,698 - DEBUG - rootfs - Running command: chmod +x /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh
2020-09-03 21:01:07,702 - DEBUG - rootfs - Running command: /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh /root/.tern/temp/49c388ec605a3411c4d5b412e6fb46e734fe3f2c6006dda31983f58aafe05146/contents
2020-09-03 21:01:07,728 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp/e44e131d684525597610af42c8db14102ad22ba156ad6ec3f5df068cea515759/layer.tar
2020-09-03 21:01:07,732 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp/e44e131d684525597610af42c8db14102ad22ba156ad6ec3f5df068cea515759/layer.tar -C /root/.tern/temp/e44e131d684525597610af42c8db14102ad22ba156ad6ec3f5df068cea515759/contents
2020-09-03 21:01:07,738 - DEBUG - rootfs - Running command: chmod +x /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh
2020-09-03 21:01:07,741 - DEBUG - rootfs - Running command: /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh /root/.tern/temp/e44e131d684525597610af42c8db14102ad22ba156ad6ec3f5df068cea515759/contents
2020-09-03 21:01:07,766 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp/3d3abe9e801a99cc9ca3bb08981d58ccfab161d9b1ba8bcf79cce51c7baf9787/layer.tar
2020-09-03 21:01:07,769 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp/3d3abe9e801a99cc9ca3bb08981d58ccfab161d9b1ba8bcf79cce51c7baf9787/layer.tar -C /root/.tern/temp/3d3abe9e801a99cc9ca3bb08981d58ccfab161d9b1ba8bcf79cce51c7baf9787/contents
2020-09-03 21:01:07,772 - DEBUG - rootfs - Running command: chmod +x /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh
2020-09-03 21:01:07,776 - DEBUG - rootfs - Running command: /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh /root/.tern/temp/3d3abe9e801a99cc9ca3bb08981d58ccfab161d9b1ba8bcf79cce51c7baf9787/contents
2020-09-03 21:01:07,789 - DEBUG - common - Reading files in filesystem...
2020-09-03 21:01:08,317 - DEBUG - rootfs - Running command: mount -o bind /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/contents /root/.tern/temp/mergedir
2020-09-03 21:01:08,326 - DEBUG - rootfs - Running command: mount -t proc /proc /root/.tern/temp/mergedir/proc
2020-09-03 21:01:08,332 - DEBUG - rootfs - Running command: mount -o bind /sys /root/.tern/temp/mergedir/sys
2020-09-03 21:01:08,335 - DEBUG - rootfs - Running command: mount -o bind /dev /root/.tern/temp/mergedir/dev
2020-09-03 21:01:08,342 - DEBUG - rootfs - Running command: cp /etc/resolv.conf /root/.tern/temp/mergedir/etc/resolv.conf
2020-09-03 21:01:08,348 - DEBUG - rootfs - Running command: unshare -pf --mount-proc=/root/.tern/temp/mergedir/proc chroot /root/.tern/temp/mergedir /bin/sh -c dpkg --get-selections | cut -f1 -d':' | awk '{print $1}'
2020-09-03 21:01:08,364 - ERROR - rootfs - Command failed. chroot: failed to run command ‘/bin/sh’: Exec format error
2020-09-03 21:01:08,364 - WARNING - rootfs - Error executing command in chroot
2020-09-03 21:01:08,365 - DEBUG - rootfs - Running command: umount /root/.tern/temp/mergedir/proc
2020-09-03 21:01:08,371 - DEBUG - rootfs - Running command: umount /root/.tern/temp/mergedir/sys
2020-09-03 21:01:08,374 - DEBUG - rootfs - Running command: umount /root/.tern/temp/mergedir/dev
2020-09-03 21:01:08,376 - DEBUG - rootfs - Running command: umount -rl /root/.tern/temp/mergedir
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/tern/utils/rootfs.py", line 237, in run_chroot_command
shell, '-c', command_string)
File "/usr/local/lib/python3.7/dist-packages/tern/utils/rootfs.py", line 77, in root_command
1, cmd=full_cmd, output=None, stderr=error.decode())
subprocess.CalledProcessError: Command '['unshare', '-pf', '--mount-proc=/root/.tern/temp/mergedir/proc', 'chroot', '/root/.tern/temp/mergedir', '/bin/sh', '-c', "dpkg --get-selections | cut -f1 -d':' | awk '{print $1}'"]' returned non-zero exit status 1.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/bin/tern", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/dist-packages/tern/__main__.py", line 204, in main
do_main(args)
File "/usr/local/lib/python3.7/dist-packages/tern/__main__.py", line 105, in do_main
run.execute_docker_image(args)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/run.py", line 91, in execute_docker_image
analyze(full_image, args)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/run.py", line 70, in analyze
args.driver)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/analyze.py", line 40, in analyze_docker_image
shell = analyze_first_layer(image_obj, master_list, redo)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/analyze.py", line 90, in analyze_first_layer
execute_base_layer(image_obj.layers[0], binary, shell)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/analyze.py", line 102, in execute_base_layer
common.add_base_packages(base_layer, binary, shell)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/common.py", line 341, in add_base_packages
work_dir, envs)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/common.py", line 239, in collate_list_metadata
work_dir, envs)
File "/usr/local/lib/python3.7/dist-packages/tern/command_lib/command_lib.py", line 240, in get_pkg_attr_list
snippet_list, shell, package=package_name)
File "/usr/local/lib/python3.7/dist-packages/tern/command_lib/command_lib.py", line 197, in invoke_in_rootfs
result = rootfs.run_chroot_command(full_cmd, shell)
File "/usr/local/lib/python3.7/dist-packages/tern/utils/rootfs.py", line 242, in run_chroot_command
1, cmd=command_string, output=error.output.decode('utf-8'))
AttributeError: 'NoneType' object has no attribute 'decode'
|
subprocess.CalledProcessError
|
def execute_image(args):
"""Execution path for container images"""
logger.debug("Starting analysis...")
image_string = extract_image(args)
# If the image has been extracted, load the metadata
if image_string:
full_image = cimage.load_full_image(image_string)
# check if the image was loaded successfully
if full_image.origins.is_empty():
# Add an image origin here
full_image.origins.add_notice_origin(
formats.docker_image.format(imagetag=image_string)
)
# Set up for analysis
setup(full_image)
# analyze image
cimage.analyze(full_image, args.redo, args.driver, args.extend)
# report out
report.report_out(args, full_image)
# clean up
teardown(full_image)
else:
# we cannot load the full image
logger.error("Cannot retrieve full image metadata")
# cleanup
if not args.keep_wd:
prep.clean_image_tars(full_image)
|
def execute_image(args):
"""Execution path for container images"""
logger.debug("Starting analysis...")
image_string, image_digest = extract_image(args)
# If the image has been extracted, load the metadata
if image_string:
full_image = cimage.load_full_image(image_string, image_digest)
# check if the image was loaded successfully
if full_image.origins.is_empty():
# Add an image origin here
full_image.origins.add_notice_origin(
formats.docker_image.format(imagetag=image_string)
)
# Set up for analysis
setup(full_image)
# analyze image
cimage.analyze(full_image, args.redo, args.driver, args.extend)
# report out
report.report_out(args, full_image)
# clean up
teardown(full_image)
else:
# we cannot load the full image
logger.error("Cannot retrieve full image metadata")
# cleanup
if not args.keep_wd:
prep.clean_image_tars(full_image)
|
https://github.com/tern-tools/tern/issues/797
|
tevoinea@tevoinea-Virtual-Machine:~/tern$ docker run --privileged --device /dev/fuse -v /var/run/docker.sock:/var/run/docker.sock --rm ternd report -f json -i ubuntu@sha256:2b90cad5ded7946db07a28252618b9c8b7f4f103fc39266bcc795719d1362d40 > output.json
2020-09-03 21:00:57,783 - DEBUG - __main__ - Starting...
2020-09-03 21:00:57,784 - DEBUG - run - Setting up...
2020-09-03 21:00:57,799 - DEBUG - container - Checking if image "ubuntu@sha256:2b90cad5ded7946db07a28252618b9c8b7f4f103fc39266bcc795719d1362d40" is available on disk...
2020-09-03 21:00:57,803 - DEBUG - container - Image "ubuntu@sha256:2b90cad5ded7946db07a28252618b9c8b7f4f103fc39266bcc795719d1362d40" found
2020-09-03 21:00:58,917 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp.tar
2020-09-03 21:00:58,962 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp.tar -C /root/.tern/temp
2020-09-03 21:00:59,086 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/layer.tar
2020-09-03 21:00:59,099 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/layer.tar -C /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/contents
2020-09-03 21:00:59,246 - DEBUG - rootfs - Running command: chmod +x /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh
2020-09-03 21:00:59,610 - DEBUG - rootfs - Running command: /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/contents
2020-09-03 21:01:07,677 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp/49c388ec605a3411c4d5b412e6fb46e734fe3f2c6006dda31983f58aafe05146/layer.tar
2020-09-03 21:01:07,686 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp/49c388ec605a3411c4d5b412e6fb46e734fe3f2c6006dda31983f58aafe05146/layer.tar -C /root/.tern/temp/49c388ec605a3411c4d5b412e6fb46e734fe3f2c6006dda31983f58aafe05146/contents
2020-09-03 21:01:07,698 - DEBUG - rootfs - Running command: chmod +x /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh
2020-09-03 21:01:07,702 - DEBUG - rootfs - Running command: /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh /root/.tern/temp/49c388ec605a3411c4d5b412e6fb46e734fe3f2c6006dda31983f58aafe05146/contents
2020-09-03 21:01:07,728 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp/e44e131d684525597610af42c8db14102ad22ba156ad6ec3f5df068cea515759/layer.tar
2020-09-03 21:01:07,732 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp/e44e131d684525597610af42c8db14102ad22ba156ad6ec3f5df068cea515759/layer.tar -C /root/.tern/temp/e44e131d684525597610af42c8db14102ad22ba156ad6ec3f5df068cea515759/contents
2020-09-03 21:01:07,738 - DEBUG - rootfs - Running command: chmod +x /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh
2020-09-03 21:01:07,741 - DEBUG - rootfs - Running command: /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh /root/.tern/temp/e44e131d684525597610af42c8db14102ad22ba156ad6ec3f5df068cea515759/contents
2020-09-03 21:01:07,766 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp/3d3abe9e801a99cc9ca3bb08981d58ccfab161d9b1ba8bcf79cce51c7baf9787/layer.tar
2020-09-03 21:01:07,769 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp/3d3abe9e801a99cc9ca3bb08981d58ccfab161d9b1ba8bcf79cce51c7baf9787/layer.tar -C /root/.tern/temp/3d3abe9e801a99cc9ca3bb08981d58ccfab161d9b1ba8bcf79cce51c7baf9787/contents
2020-09-03 21:01:07,772 - DEBUG - rootfs - Running command: chmod +x /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh
2020-09-03 21:01:07,776 - DEBUG - rootfs - Running command: /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh /root/.tern/temp/3d3abe9e801a99cc9ca3bb08981d58ccfab161d9b1ba8bcf79cce51c7baf9787/contents
2020-09-03 21:01:07,789 - DEBUG - common - Reading files in filesystem...
2020-09-03 21:01:08,317 - DEBUG - rootfs - Running command: mount -o bind /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/contents /root/.tern/temp/mergedir
2020-09-03 21:01:08,326 - DEBUG - rootfs - Running command: mount -t proc /proc /root/.tern/temp/mergedir/proc
2020-09-03 21:01:08,332 - DEBUG - rootfs - Running command: mount -o bind /sys /root/.tern/temp/mergedir/sys
2020-09-03 21:01:08,335 - DEBUG - rootfs - Running command: mount -o bind /dev /root/.tern/temp/mergedir/dev
2020-09-03 21:01:08,342 - DEBUG - rootfs - Running command: cp /etc/resolv.conf /root/.tern/temp/mergedir/etc/resolv.conf
2020-09-03 21:01:08,348 - DEBUG - rootfs - Running command: unshare -pf --mount-proc=/root/.tern/temp/mergedir/proc chroot /root/.tern/temp/mergedir /bin/sh -c dpkg --get-selections | cut -f1 -d':' | awk '{print $1}'
2020-09-03 21:01:08,364 - ERROR - rootfs - Command failed. chroot: failed to run command ‘/bin/sh’: Exec format error
2020-09-03 21:01:08,364 - WARNING - rootfs - Error executing command in chroot
2020-09-03 21:01:08,365 - DEBUG - rootfs - Running command: umount /root/.tern/temp/mergedir/proc
2020-09-03 21:01:08,371 - DEBUG - rootfs - Running command: umount /root/.tern/temp/mergedir/sys
2020-09-03 21:01:08,374 - DEBUG - rootfs - Running command: umount /root/.tern/temp/mergedir/dev
2020-09-03 21:01:08,376 - DEBUG - rootfs - Running command: umount -rl /root/.tern/temp/mergedir
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/tern/utils/rootfs.py", line 237, in run_chroot_command
shell, '-c', command_string)
File "/usr/local/lib/python3.7/dist-packages/tern/utils/rootfs.py", line 77, in root_command
1, cmd=full_cmd, output=None, stderr=error.decode())
subprocess.CalledProcessError: Command '['unshare', '-pf', '--mount-proc=/root/.tern/temp/mergedir/proc', 'chroot', '/root/.tern/temp/mergedir', '/bin/sh', '-c', "dpkg --get-selections | cut -f1 -d':' | awk '{print $1}'"]' returned non-zero exit status 1.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/bin/tern", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/dist-packages/tern/__main__.py", line 204, in main
do_main(args)
File "/usr/local/lib/python3.7/dist-packages/tern/__main__.py", line 105, in do_main
run.execute_docker_image(args)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/run.py", line 91, in execute_docker_image
analyze(full_image, args)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/run.py", line 70, in analyze
args.driver)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/analyze.py", line 40, in analyze_docker_image
shell = analyze_first_layer(image_obj, master_list, redo)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/analyze.py", line 90, in analyze_first_layer
execute_base_layer(image_obj.layers[0], binary, shell)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/analyze.py", line 102, in execute_base_layer
common.add_base_packages(base_layer, binary, shell)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/common.py", line 341, in add_base_packages
work_dir, envs)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/common.py", line 239, in collate_list_metadata
work_dir, envs)
File "/usr/local/lib/python3.7/dist-packages/tern/command_lib/command_lib.py", line 240, in get_pkg_attr_list
snippet_list, shell, package=package_name)
File "/usr/local/lib/python3.7/dist-packages/tern/command_lib/command_lib.py", line 197, in invoke_in_rootfs
result = rootfs.run_chroot_command(full_cmd, shell)
File "/usr/local/lib/python3.7/dist-packages/tern/utils/rootfs.py", line 242, in run_chroot_command
1, cmd=command_string, output=error.output.decode('utf-8'))
AttributeError: 'NoneType' object has no attribute 'decode'
|
subprocess.CalledProcessError
|
def full_image_analysis(dfile, redo, driver, keep, extension):
"""This subroutine is executed when a Dockerfile is successfully built"""
image_list = []
# attempt to load the built image metadata
full_image = cimage.load_full_image(dfile)
if full_image.origins.is_empty():
# Add an image origin here
full_image.origins.add_notice_origin(
formats.dockerfile_image.format(dockerfile=dfile)
)
image_list = analyze_full_image(full_image, redo, driver, extension)
else:
# we cannot analyze the full image, but maybe we can
# analyze the base image
logger.error("Cannot retrieve full image metadata")
# cleanup for full images
if not keep:
prep.clean_image_tars(full_image)
return image_list
|
def full_image_analysis(dfile, redo, driver, keep, extension):
"""This subroutine is executed when a Dockerfile is successfully built"""
image_list = []
# attempt to load the built image metadata
full_image = cimage.load_full_image(dfile, "")
if full_image.origins.is_empty():
# Add an image origin here
full_image.origins.add_notice_origin(
formats.dockerfile_image.format(dockerfile=dfile)
)
image_list = analyze_full_image(full_image, redo, driver, extension)
else:
# we cannot analyze the full image, but maybe we can
# analyze the base image
logger.error("Cannot retrieve full image metadata")
# cleanup for full images
if not keep:
prep.clean_image_tars(full_image)
return image_list
|
https://github.com/tern-tools/tern/issues/797
|
tevoinea@tevoinea-Virtual-Machine:~/tern$ docker run --privileged --device /dev/fuse -v /var/run/docker.sock:/var/run/docker.sock --rm ternd report -f json -i ubuntu@sha256:2b90cad5ded7946db07a28252618b9c8b7f4f103fc39266bcc795719d1362d40 > output.json
2020-09-03 21:00:57,783 - DEBUG - __main__ - Starting...
2020-09-03 21:00:57,784 - DEBUG - run - Setting up...
2020-09-03 21:00:57,799 - DEBUG - container - Checking if image "ubuntu@sha256:2b90cad5ded7946db07a28252618b9c8b7f4f103fc39266bcc795719d1362d40" is available on disk...
2020-09-03 21:00:57,803 - DEBUG - container - Image "ubuntu@sha256:2b90cad5ded7946db07a28252618b9c8b7f4f103fc39266bcc795719d1362d40" found
2020-09-03 21:00:58,917 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp.tar
2020-09-03 21:00:58,962 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp.tar -C /root/.tern/temp
2020-09-03 21:00:59,086 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/layer.tar
2020-09-03 21:00:59,099 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/layer.tar -C /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/contents
2020-09-03 21:00:59,246 - DEBUG - rootfs - Running command: chmod +x /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh
2020-09-03 21:00:59,610 - DEBUG - rootfs - Running command: /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/contents
2020-09-03 21:01:07,677 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp/49c388ec605a3411c4d5b412e6fb46e734fe3f2c6006dda31983f58aafe05146/layer.tar
2020-09-03 21:01:07,686 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp/49c388ec605a3411c4d5b412e6fb46e734fe3f2c6006dda31983f58aafe05146/layer.tar -C /root/.tern/temp/49c388ec605a3411c4d5b412e6fb46e734fe3f2c6006dda31983f58aafe05146/contents
2020-09-03 21:01:07,698 - DEBUG - rootfs - Running command: chmod +x /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh
2020-09-03 21:01:07,702 - DEBUG - rootfs - Running command: /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh /root/.tern/temp/49c388ec605a3411c4d5b412e6fb46e734fe3f2c6006dda31983f58aafe05146/contents
2020-09-03 21:01:07,728 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp/e44e131d684525597610af42c8db14102ad22ba156ad6ec3f5df068cea515759/layer.tar
2020-09-03 21:01:07,732 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp/e44e131d684525597610af42c8db14102ad22ba156ad6ec3f5df068cea515759/layer.tar -C /root/.tern/temp/e44e131d684525597610af42c8db14102ad22ba156ad6ec3f5df068cea515759/contents
2020-09-03 21:01:07,738 - DEBUG - rootfs - Running command: chmod +x /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh
2020-09-03 21:01:07,741 - DEBUG - rootfs - Running command: /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh /root/.tern/temp/e44e131d684525597610af42c8db14102ad22ba156ad6ec3f5df068cea515759/contents
2020-09-03 21:01:07,766 - DEBUG - rootfs - Running command: tar -tf /root/.tern/temp/3d3abe9e801a99cc9ca3bb08981d58ccfab161d9b1ba8bcf79cce51c7baf9787/layer.tar
2020-09-03 21:01:07,769 - DEBUG - rootfs - Running command: tar -x --exclude=.wh.* -f /root/.tern/temp/3d3abe9e801a99cc9ca3bb08981d58ccfab161d9b1ba8bcf79cce51c7baf9787/layer.tar -C /root/.tern/temp/3d3abe9e801a99cc9ca3bb08981d58ccfab161d9b1ba8bcf79cce51c7baf9787/contents
2020-09-03 21:01:07,772 - DEBUG - rootfs - Running command: chmod +x /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh
2020-09-03 21:01:07,776 - DEBUG - rootfs - Running command: /usr/local/lib/python3.7/dist-packages/tern/tools/fs_hash.sh /root/.tern/temp/3d3abe9e801a99cc9ca3bb08981d58ccfab161d9b1ba8bcf79cce51c7baf9787/contents
2020-09-03 21:01:07,789 - DEBUG - common - Reading files in filesystem...
2020-09-03 21:01:08,317 - DEBUG - rootfs - Running command: mount -o bind /root/.tern/temp/9db0dcee2c01ab15b8419093c4fcfc585e3a37f0b4b63dda83c96c465f6f6521/contents /root/.tern/temp/mergedir
2020-09-03 21:01:08,326 - DEBUG - rootfs - Running command: mount -t proc /proc /root/.tern/temp/mergedir/proc
2020-09-03 21:01:08,332 - DEBUG - rootfs - Running command: mount -o bind /sys /root/.tern/temp/mergedir/sys
2020-09-03 21:01:08,335 - DEBUG - rootfs - Running command: mount -o bind /dev /root/.tern/temp/mergedir/dev
2020-09-03 21:01:08,342 - DEBUG - rootfs - Running command: cp /etc/resolv.conf /root/.tern/temp/mergedir/etc/resolv.conf
2020-09-03 21:01:08,348 - DEBUG - rootfs - Running command: unshare -pf --mount-proc=/root/.tern/temp/mergedir/proc chroot /root/.tern/temp/mergedir /bin/sh -c dpkg --get-selections | cut -f1 -d':' | awk '{print $1}'
2020-09-03 21:01:08,364 - ERROR - rootfs - Command failed. chroot: failed to run command ‘/bin/sh’: Exec format error
2020-09-03 21:01:08,364 - WARNING - rootfs - Error executing command in chroot
2020-09-03 21:01:08,365 - DEBUG - rootfs - Running command: umount /root/.tern/temp/mergedir/proc
2020-09-03 21:01:08,371 - DEBUG - rootfs - Running command: umount /root/.tern/temp/mergedir/sys
2020-09-03 21:01:08,374 - DEBUG - rootfs - Running command: umount /root/.tern/temp/mergedir/dev
2020-09-03 21:01:08,376 - DEBUG - rootfs - Running command: umount -rl /root/.tern/temp/mergedir
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/tern/utils/rootfs.py", line 237, in run_chroot_command
shell, '-c', command_string)
File "/usr/local/lib/python3.7/dist-packages/tern/utils/rootfs.py", line 77, in root_command
1, cmd=full_cmd, output=None, stderr=error.decode())
subprocess.CalledProcessError: Command '['unshare', '-pf', '--mount-proc=/root/.tern/temp/mergedir/proc', 'chroot', '/root/.tern/temp/mergedir', '/bin/sh', '-c', "dpkg --get-selections | cut -f1 -d':' | awk '{print $1}'"]' returned non-zero exit status 1.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/bin/tern", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/dist-packages/tern/__main__.py", line 204, in main
do_main(args)
File "/usr/local/lib/python3.7/dist-packages/tern/__main__.py", line 105, in do_main
run.execute_docker_image(args)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/run.py", line 91, in execute_docker_image
analyze(full_image, args)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/run.py", line 70, in analyze
args.driver)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/analyze.py", line 40, in analyze_docker_image
shell = analyze_first_layer(image_obj, master_list, redo)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/analyze.py", line 90, in analyze_first_layer
execute_base_layer(image_obj.layers[0], binary, shell)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/docker/analyze.py", line 102, in execute_base_layer
common.add_base_packages(base_layer, binary, shell)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/common.py", line 341, in add_base_packages
work_dir, envs)
File "/usr/local/lib/python3.7/dist-packages/tern/analyze/common.py", line 239, in collate_list_metadata
work_dir, envs)
File "/usr/local/lib/python3.7/dist-packages/tern/command_lib/command_lib.py", line 240, in get_pkg_attr_list
snippet_list, shell, package=package_name)
File "/usr/local/lib/python3.7/dist-packages/tern/command_lib/command_lib.py", line 197, in invoke_in_rootfs
result = rootfs.run_chroot_command(full_cmd, shell)
File "/usr/local/lib/python3.7/dist-packages/tern/utils/rootfs.py", line 242, in run_chroot_command
1, cmd=command_string, output=error.output.decode('utf-8'))
AttributeError: 'NoneType' object has no attribute 'decode'
|
subprocess.CalledProcessError
|
def get_commands_from_history(image_layer):
"""Given the image layer object and the shell, get the list of command
objects that created the layer"""
# set up notice origin for the layer
origin_layer = "Layer: " + image_layer.fs_hash[:10]
if image_layer.created_by:
instruction = created_to_instruction(image_layer.created_by)
image_layer.origins.add_notice_to_origins(
origin_layer,
Notice(
formats.dockerfile_line.format(dockerfile_instruction=instruction),
"info",
),
)
command_line = instruction.split(" ", 1)[1]
else:
instruction = ""
image_layer.origins.add_notice_to_origins(
origin_layer, Notice(formats.no_created_by, "warning")
)
command_line = instruction
# Image layers are created with the directives RUN, ADD and COPY
# For ADD and COPY instructions, there is no information about the
# packages added
if "ADD" in instruction or "COPY" in instruction:
image_layer.origins.add_notice_to_origins(
origin_layer,
Notice(errors.unknown_content.format(files=command_line), "warning"),
)
# return an empty list as we cannot find any commands
return []
# for RUN instructions we can return a list of commands
command_list, msg = common.filter_install_commands(command_line)
if msg:
image_layer.origins.add_notice_to_origins(origin_layer, Notice(msg, "warning"))
return command_list
|
def get_commands_from_history(image_layer):
"""Given the image layer object and the shell, get the list of command
objects that created the layer"""
# set up notice origin for the layer
origin_layer = "Layer: " + image_layer.fs_hash[:10]
if image_layer.created_by:
instruction = created_to_instruction(image_layer.created_by)
image_layer.origins.add_notice_to_origins(
origin_layer,
Notice(
formats.dockerfile_line.format(dockerfile_instruction=instruction),
"info",
),
)
else:
image_layer.origins.add_notice_to_origins(
origin_layer, Notice(formats.no_created_by, "warning")
)
command_line = instruction.split(" ", 1)[1]
# Image layers are created with the directives RUN, ADD and COPY
# For ADD and COPY instructions, there is no information about the
# packages added
if "ADD" in instruction or "COPY" in instruction:
image_layer.origins.add_notice_to_origins(
origin_layer,
Notice(errors.unknown_content.format(files=command_line), "warning"),
)
# return an empty list as we cannot find any commands
return []
# for RUN instructions we can return a list of commands
command_list, msg = common.filter_install_commands(command_line)
if msg:
image_layer.origins.add_notice_to_origins(origin_layer, Notice(msg, "warning"))
return command_list
|
https://github.com/tern-tools/tern/issues/636
|
/* snip */
2020-04-14 20:17:02,098 - DEBUG - common - Reading files in filesystem...
Traceback (most recent call last):
File "/usr/local/bin/tern", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.8/dist-packages/tern/__main__.py", line 201, in main
do_main(args)
File "/usr/local/lib/python3.8/dist-packages/tern/__main__.py", line 105, in do_main
run.execute_docker_image(args)
File "/usr/local/lib/python3.8/dist-packages/tern/analyze/docker/run.py", line 90, in execute_docker_image
analyze(full_image, args)
File "/usr/local/lib/python3.8/dist-packages/tern/analyze/docker/run.py", line 69, in analyze
analyze_docker_image(image_obj, args.redo, dfile_lock, dfobj)
File "/usr/local/lib/python3.8/dist-packages/tern/analyze/docker/analyze.py", line 42, in analyze_docker_image
analyze_subsequent_layers(image_obj, shell, master_list, redo, dfobj,
File "/usr/local/lib/python3.8/dist-packages/tern/analyze/docker/analyze.py", line 129, in analyze_subsequent_layers
command_list = dhelper.get_commands_from_history(
File "/usr/local/lib/python3.8/dist-packages/tern/analyze/docker/helpers.py", line 158, in get_commands_from_history
command_line = instruction.split(' ', 1)[1]
UnboundLocalError: local variable 'instruction' referenced before assignment
|
UnboundLocalError
|
def check_git_src(dockerfile_path):
"""Given the src_path and the dockerfile path, return the git
repository name and sha information in the format of string.
Currently we only consider the following situation:
- target_git_project
- dir1
- dir2/dockerfile
So we only use dockerfile_path to find the git repo info."""
# get the path of the folder containing the dockerfile
dockerfile_folder_path = os.path.dirname(os.path.abspath(dockerfile_path))
# locate the top level directory
path_to_toplevel = get_git_toplevel(dockerfile_folder_path)
# get the path of the target folder or file
logger.debug("looking into path: %s for git repo.", path_to_toplevel)
comment_line = ""
if path_to_toplevel:
sha_info = get_git_sha(path_to_toplevel)
# if path_to_toplevel exists, name_info should be the folder name
name_info = os.path.basename(path_to_toplevel)
comment_line = "git project name: " + name_info + ", HEAD sha: " + sha_info
else:
comment_line = "Not a git repository"
return comment_line
|
def check_git_src(dockerfile_path):
"""Given the src_path and the dockerfile path, return the git
repository name and sha information in the format of string.
Currently we only consider the following situation:
- target_git_project
- dir1
- dir2/dockerfile
So we only use dockerfile_path to find the git repo info."""
# get the path of the folder containing the dockerfile
dockerfile_folder_path = os.path.dirname(dockerfile_path)
# locate the top level directory
path_to_toplevel = get_git_toplevel(dockerfile_folder_path)
# get the path of the target folder or file
logger.debug("looking into path: %s for git repo.", path_to_toplevel)
comment_line = ""
if path_to_toplevel:
sha_info = get_git_sha(path_to_toplevel)
# if path_to_toplevel exists, name_info should be the folder name
name_info = os.path.basename(path_to_toplevel)
comment_line = "git project name: " + name_info + ", HEAD sha: " + sha_info
else:
comment_line = "Not a git repository"
return comment_line
|
https://github.com/tern-tools/tern/issues/642
|
Traceback (most recent call last):
File "/home/rjudge/ternenv/bin/tern", line 10, in <module>
sys.exit(main())
File "/home/rjudge/ternenv/tern/tern/__main__.py", line 201, in main
do_main(args)
File "/home/rjudge/ternenv/tern/tern/__main__.py", line 90, in do_main
run.execute_dockerfile(args)
File "/home/rjudge/ternenv/tern/tern/analyze/docker/run.py", line 176, in execute_dockerfile
output = dockerfile.create_locked_dockerfile(dfobj)
File "/home/rjudge/ternenv/tern/tern/analyze/docker/dockerfile.py", line 283, in create_locked_dockerfile
expand_add_command(dfobj)
File "/home/rjudge/ternenv/tern/tern/analyze/docker/dockerfile.py", line 270, in expand_add_command
dockerfile_path)
File "/home/rjudge/ternenv/tern/tern/analyze/docker/dockerfile.py", line 256, in find_git_info
comment_line = common.check_git_src(dockerfile_path)
File "/home/rjudge/ternenv/tern/tern/analyze/common.py", line 536, in check_git_src
path_to_toplevel = get_git_toplevel(dockerfile_folder_path)
File "/home/rjudge/ternenv/tern/tern/analyze/common.py", line 618, in get_git_toplevel
command, stderr=subprocess.DEVNULL, cwd=path)
File "/usr/lib/python3.6/subprocess.py", line 356, in check_output
**kwargs).stdout
File "/usr/lib/python3.6/subprocess.py", line 423, in run
with Popen(*popenargs, **kwargs) as process:
File "/usr/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/usr/lib/python3.6/subprocess.py", line 1364, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
FileNotFoundError: [Errno 2] No such file or directory: '': ''
|
FileNotFoundError
|
def get_git_url(dockerfile_path):
"""Given a dockerfile_path, return url of git project which contains
the dockerfile in a form of list."""
# get the path of the folder containing the dockerfile
dockerfile_folder_path = os.path.dirname(os.path.abspath(dockerfile_path))
command = ["git", "remote", "-v"]
try:
output = subprocess.check_output( # nosec
command, stderr=subprocess.DEVNULL, cwd=dockerfile_folder_path
)
if isinstance(output, bytes):
lines = output.decode("utf-8").split("\n")
# pop the last line which is an empty line
lines.pop()
url_list = set()
for line in lines:
extract_url = extract_git_url_from_line(line)
if extract_url:
url_list.add(extract_url)
except (subprocess.CalledProcessError, FileNotFoundError):
logger.debug("Cannot find git repo url, path is %s", dockerfile_folder_path)
return url_list
|
def get_git_url(dockerfile_path):
"""Given a dockerfile_path, return url of git project which contains
the dockerfile in a form of list."""
# get the path of the folder containing the dockerfile
dockerfile_folder_path = os.path.dirname(dockerfile_path)
command = ["git", "remote", "-v"]
try:
output = subprocess.check_output( # nosec
command, stderr=subprocess.DEVNULL, cwd=dockerfile_folder_path
)
if isinstance(output, bytes):
lines = output.decode("utf-8").split("\n")
# pop the last line which is an empty line
lines.pop()
url_list = set()
for line in lines:
extract_url = extract_git_url_from_line(line)
if extract_url:
url_list.add(extract_url)
except subprocess.CalledProcessError:
logger.debug("Cannot find git repo url, path is %s", dockerfile_folder_path)
return url_list
|
https://github.com/tern-tools/tern/issues/642
|
Traceback (most recent call last):
File "/home/rjudge/ternenv/bin/tern", line 10, in <module>
sys.exit(main())
File "/home/rjudge/ternenv/tern/tern/__main__.py", line 201, in main
do_main(args)
File "/home/rjudge/ternenv/tern/tern/__main__.py", line 90, in do_main
run.execute_dockerfile(args)
File "/home/rjudge/ternenv/tern/tern/analyze/docker/run.py", line 176, in execute_dockerfile
output = dockerfile.create_locked_dockerfile(dfobj)
File "/home/rjudge/ternenv/tern/tern/analyze/docker/dockerfile.py", line 283, in create_locked_dockerfile
expand_add_command(dfobj)
File "/home/rjudge/ternenv/tern/tern/analyze/docker/dockerfile.py", line 270, in expand_add_command
dockerfile_path)
File "/home/rjudge/ternenv/tern/tern/analyze/docker/dockerfile.py", line 256, in find_git_info
comment_line = common.check_git_src(dockerfile_path)
File "/home/rjudge/ternenv/tern/tern/analyze/common.py", line 536, in check_git_src
path_to_toplevel = get_git_toplevel(dockerfile_folder_path)
File "/home/rjudge/ternenv/tern/tern/analyze/common.py", line 618, in get_git_toplevel
command, stderr=subprocess.DEVNULL, cwd=path)
File "/usr/lib/python3.6/subprocess.py", line 356, in check_output
**kwargs).stdout
File "/usr/lib/python3.6/subprocess.py", line 423, in run
with Popen(*popenargs, **kwargs) as process:
File "/usr/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/usr/lib/python3.6/subprocess.py", line 1364, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
FileNotFoundError: [Errno 2] No such file or directory: '': ''
|
FileNotFoundError
|
def get_git_toplevel(path):
"""Given a path, return the absolute path to the top level directory if it
is in a git repository. Empty string will be returned if not.
Path should be a path to a directory not to a file."""
command = ["git", "rev-parse", "--show-toplevel"]
path_to_toplevel = ""
try:
output = subprocess.check_output( # nosec
command, stderr=subprocess.DEVNULL, cwd=path
)
if isinstance(output, bytes):
path_to_toplevel = output.decode("utf-8").split("\n").pop(0)
except (subprocess.CalledProcessError, FileNotFoundError):
logger.debug("Cannot find git repo toplevel, path is %s", path)
return path_to_toplevel
|
def get_git_toplevel(path):
"""Given a path, return the absolute path to the top level directory if it
is in a git repository. Empty string will be returned if not.
Path should be a path to a directory not to a file."""
command = ["git", "rev-parse", "--show-toplevel"]
path_to_toplevel = ""
try:
output = subprocess.check_output( # nosec
command, stderr=subprocess.DEVNULL, cwd=path
)
if isinstance(output, bytes):
path_to_toplevel = output.decode("utf-8").split("\n").pop(0)
except subprocess.CalledProcessError:
logger.debug("Cannot find git repo toplevel, path is %s", path)
return path_to_toplevel
|
https://github.com/tern-tools/tern/issues/642
|
Traceback (most recent call last):
File "/home/rjudge/ternenv/bin/tern", line 10, in <module>
sys.exit(main())
File "/home/rjudge/ternenv/tern/tern/__main__.py", line 201, in main
do_main(args)
File "/home/rjudge/ternenv/tern/tern/__main__.py", line 90, in do_main
run.execute_dockerfile(args)
File "/home/rjudge/ternenv/tern/tern/analyze/docker/run.py", line 176, in execute_dockerfile
output = dockerfile.create_locked_dockerfile(dfobj)
File "/home/rjudge/ternenv/tern/tern/analyze/docker/dockerfile.py", line 283, in create_locked_dockerfile
expand_add_command(dfobj)
File "/home/rjudge/ternenv/tern/tern/analyze/docker/dockerfile.py", line 270, in expand_add_command
dockerfile_path)
File "/home/rjudge/ternenv/tern/tern/analyze/docker/dockerfile.py", line 256, in find_git_info
comment_line = common.check_git_src(dockerfile_path)
File "/home/rjudge/ternenv/tern/tern/analyze/common.py", line 536, in check_git_src
path_to_toplevel = get_git_toplevel(dockerfile_folder_path)
File "/home/rjudge/ternenv/tern/tern/analyze/common.py", line 618, in get_git_toplevel
command, stderr=subprocess.DEVNULL, cwd=path)
File "/usr/lib/python3.6/subprocess.py", line 356, in check_output
**kwargs).stdout
File "/usr/lib/python3.6/subprocess.py", line 423, in run
with Popen(*popenargs, **kwargs) as process:
File "/usr/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/usr/lib/python3.6/subprocess.py", line 1364, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
FileNotFoundError: [Errno 2] No such file or directory: '': ''
|
FileNotFoundError
|
def close_client():
"""End docker interactions by closing the client. This is meant to be
used after analysis is done"""
try:
client.close()
except (AttributeError, requests.exceptions.ConnectionError):
# it should either already be closed, no socker is in use,
# or docker is not setup -- either way, the socket is closed
pass
|
def close_client():
"""End docker interactions by closing the client. This is meant to be
used after analysis is done"""
try:
client.close()
except requests.exceptions.ConnectionError:
# it should either already be closed or docker is not setup
# either way, the socket is closed
pass
|
https://github.com/tern-tools/tern/issues/628
|
2020-04-08 09:28:59,030 - DEBUG - run - Teardown...
Traceback (most recent call last):
File "/home/rjudge/ternenv/bin/tern", line 10, in <module>
sys.exit(main())
File "/home/rjudge/ternenv/tern/tern/__main__.py", line 201, in main
do_main(args)
File "/home/rjudge/ternenv/tern/tern/__main__.py", line 113, in do_main
run.execute_docker_image(args)
File "/home/rjudge/ternenv/tern/tern/analyze/docker/run.py", line 99, in execute_docker_image
report.teardown()
File "/home/rjudge/ternenv/tern/tern/report/report.py", line 66, in teardown
container.close_client()
File "/home/rjudge/ternenv/tern/tern/analyze/docker/container.py", line 190, in close_client
client.close()
AttributeError: 'NoneType' object has no attribute 'close'
|
AttributeError
|
def pull_image(image_tag_string):
"""Try to pull an image from Dockerhub"""
logger.debug('Attempting to pull image "%s"', image_tag_string)
try:
image = client.images.pull(image_tag_string)
logger.debug('Image "%s" downloaded', image_tag_string)
return image
except (docker.errors.ImageNotFound, docker.errors.NotFound):
logger.warning('No such image: "%s"', image_tag_string)
return None
|
def pull_image(image_tag_string):
"""Try to pull an image from Dockerhub"""
logger.debug('Attempting to pull image "%s"', image_tag_string)
try:
image = client.images.pull(image_tag_string)
logger.debug('Image "%s" downloaded', image_tag_string)
return image
except docker.errors.ImageNotFound:
logger.warning('No such image: "%s"', image_tag_string)
return None
|
https://github.com/tern-tools/tern/issues/610
|
2020-03-26 15:05:54,287 - DEBUG - container - Checking if image "photon:broken" is available on disk...
2020-03-26 15:05:54,290 - DEBUG - container - Attempting to pull image "photon:broken"
Traceback (most recent call last):
File "/home/rjudge/ternenv/lib/python3.6/site-packages/docker/api/client.py", line 261, in _raise_for_status
response.raise_for_status()
File "/home/rjudge/ternenv/lib/python3.6/site-packages/requests/models.py", line 940, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 404 Client Error: Not Found for url: http+docker://localhost/v1.35/images/create?tag=broken&fromImage=photon
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/rjudge/ternenv/bin/tern", line 10, in <module>
sys.exit(main())
File "/home/rjudge/ternenv/tern/tern/__main__.py", line 201, in main
do_main(args)
File "/home/rjudge/ternenv/tern/tern/__main__.py", line 90, in do_main
run.execute_dockerfile(args)
File "/home/rjudge/ternenv/tern/tern/analyze/docker/run.py", line 150, in execute_dockerfile
base_image = report.load_base_image()
File "/home/rjudge/ternenv/tern/tern/report/report.py", line 95, in load_base_image
if container.pull_image(base_image.repotag) is None:
File "/home/rjudge/ternenv/tern/tern/analyze/docker/container.py", line 91, in pull_image
image = client.images.pull(image_tag_string)
File "/home/rjudge/ternenv/lib/python3.6/site-packages/docker/models/images.py", line 445, in pull
repository, tag=tag, stream=True, **kwargs
File "/home/rjudge/ternenv/lib/python3.6/site-packages/docker/api/image.py", line 415, in pull
self._raise_for_status(response)
File "/home/rjudge/ternenv/lib/python3.6/site-packages/docker/api/client.py", line 263, in _raise_for_status
raise create_api_error_from_http_exception(e)
File "/home/rjudge/ternenv/lib/python3.6/site-packages/docker/errors.py", line 31, in create_api_error_from_http_exception
raise cls(e, response=response, explanation=explanation)
docker.errors.NotFound: 404 Client Error: Not Found ("manifest for photon:broken not found: manifest unknown: manifest unknown")
|
requests.exceptions.HTTPError
|
def prep_rootfs(rootfs_dir):
"""Mount required filesystems in the rootfs directory"""
rootfs_path = os.path.abspath(rootfs_dir)
proc_path = os.path.join(rootfs_path, "proc")
sys_path = os.path.join(rootfs_path, "sys")
dev_path = os.path.join(rootfs_path, "dev")
if not os.path.exists(proc_path):
os.mkdir(proc_path)
if not os.path.exists(sys_path):
os.mkdir(sys_path)
if not os.path.exists(dev_path):
os.mkdir(dev_path)
try:
root_command(mount_proc, os.path.join(rootfs_path, "proc"))
root_command(mount_sys, os.path.join(rootfs_path, "sys"))
root_command(mount_dev, os.path.join(rootfs_path, "dev"))
root_command(host_dns, os.path.join(rootfs_path, constants.resolv_path[1:]))
except subprocess.CalledProcessError as error:
logger.error("%s", error.output)
raise
|
def prep_rootfs(rootfs_dir):
"""Mount required filesystems in the rootfs directory"""
rootfs_path = os.path.abspath(rootfs_dir)
try:
root_command(mount_proc, os.path.join(rootfs_path, "proc"))
root_command(mount_sys, os.path.join(rootfs_path, "sys"))
root_command(mount_dev, os.path.join(rootfs_path, "dev"))
root_command(host_dns, os.path.join(rootfs_path, constants.resolv_path[1:]))
except subprocess.CalledProcessError as error:
logger.error("%s", error.output)
raise
|
https://github.com/tern-tools/tern/issues/538
|
2020-02-16 17:00:00,058 - ERROR - rootfs - b'mount: /home/oc37ejuc/.tern/temp/mergedir/proc: mount point does not exist.\n'
Traceback (most recent call last):
File "/home/oc37ejuc/ternenv/bin/tern", line 11, in <module>
sys.exit(main())
File "/home/oc37ejuc/ternenv/lib/python3.6/site-packages/tern/__main__.py", line 170, in main
do_main(args)
File "/home/oc37ejuc/ternenv/lib/python3.6/site-packages/tern/__main__.py", line 93, in do_main
run.execute_docker_image(args)
File "/home/oc37ejuc/ternenv/lib/python3.6/site-packages/tern/analyze/docker/run.py", line 88, in execute_docker_image
analyze(full_image, args)
File "/home/oc37ejuc/ternenv/lib/python3.6/site-packages/tern/analyze/docker/run.py", line 67, in analyze
analyze_docker_image(image_obj, args.redo, is_dockerfile)
File "/home/oc37ejuc/ternenv/lib/python3.6/site-packages/tern/analyze/docker/analyze.py", line 37, in analyze_docker_image
shell = analyze_first_layer(image_obj, master_list, redo)
File "/home/oc37ejuc/ternenv/lib/python3.6/site-packages/tern/analyze/docker/analyze.py", line 98, in analyze_first_layer
rootfs.prep_rootfs(target)
File "/home/oc37ejuc/ternenv/lib/python3.6/site-packages/tern/utils/rootfs.py", line 177, in prep_rootfs
root_command(mount_proc, os.path.join(rootfs_path, 'proc'))
File "/home/oc37ejuc/ternenv/lib/python3.6/site-packages/tern/utils/rootfs.py", line 79, in root_command
1, cmd=full_cmd, output=error)
subprocess.CalledProcessError: Command '['sudo', 'mount', '-t', 'proc', '/proc', '/home/oc37ejuc/.tern/temp/mergedir/proc']' returned non-zero exit status 1.
|
subprocess.CalledProcessError
|
def do_main(args):
"""Execute according to subcommands"""
# set bind mount location if working in a container
rootfs.set_mount_dir(args.bind_mount)
# create working directory
create_top_dir()
if args.log_stream:
# set up console logs
global logger
global console
logger.addHandler(console)
logger.debug("Starting...")
if args.clear_cache:
logger.debug("Clearing cache...")
cache.clear()
if hasattr(args, "name") and args.name == "report":
if args.dockerfile:
run.execute_dockerfile(args)
if args.docker_image:
if common.check_tar(args.docker_image):
logger.error("%s", errors.incorrect_raw_option)
else:
run.execute_docker_image(args)
logger.debug("Report completed.")
if args.raw_image:
if not common.check_tar(args.raw_image):
logger.error(
"%s", errors.invalid_raw_image.format(image=args.raw_image)
)
else:
run.execute_docker_image(args)
logger.debug("Report completed.")
logger.debug("Finished")
|
def do_main(args):
"""Execute according to subcommands"""
# create working directory
create_top_dir()
if args.log_stream:
# set up console logs
global logger
global console
logger.addHandler(console)
logger.debug("Starting...")
if args.clear_cache:
logger.debug("Clearing cache...")
cache.clear()
if hasattr(args, "name") and args.name == "report":
if args.dockerfile:
run.execute_dockerfile(args)
if args.docker_image:
if common.check_tar(args.docker_image):
logger.error("%s", errors.incorrect_raw_option)
else:
run.execute_docker_image(args)
logger.debug("Report completed.")
if args.raw_image:
if not common.check_tar(args.raw_image):
logger.error(
"%s", errors.invalid_raw_image.format(image=args.raw_image)
)
else:
run.execute_docker_image(args)
logger.debug("Report completed.")
logger.debug("Finished")
|
https://github.com/tern-tools/tern/issues/498
|
docker build -t ternd .
./docker_run.sh workdir ternd "report -i golang:alpine"
-------------------------OUTPUT-------------------------
Traceback (most recent call last):
File "/usr/bin/tern", line 8, in <module>
sys.exit(main())
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 167, in main
do_main(args)
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 90, in do_main
run.execute_docker_image(args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 88, in execute_docker_image
analyze(full_image, args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 67, in analyze
analyze_docker_image(image_obj, args.redo, is_dockerfile)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 39, in analyze_docker_image
analyze_subsequent_layers(image_obj, shell, master_list, redo)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 133, in analyze_subsequent_layers
target = mount_overlay_fs(image_obj, curr_layer)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 178, in mount_overlay_fs
target = rootfs.mount_diff_layers(tar_layers)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 196, in mount_diff_layers
root_command(union_mount, args, merge_dir_path)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 66, in root_command
1, cmd=full_cmd, output=error)
subprocess.CalledProcessError: Command '['mount', '-t', 'overlay', 'overlay', '-o', 'lowerdir=/root/.tern/temp/8e635d6264340a45901f63d2a18ea5bc8c680919e07191e4ef276860952d0399/contents,upperdir=/root/.tern/temp/1332c5c5bbaa26b26053d13d13b81dd2f523a2217e01f6edfe38fe107c091859/contents,workdir=/root/.tern/temp/workdir', '/root/.tern/temp/mergedir']' returned non-zero exit status 1.
Traceback (most recent call last):
File "ci/test_files_touched.py", line 130, in <module>
subprocess.check_output(t, shell=True) # nosec
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 336, in check_output
**kwargs).stdout
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 418, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command './docker_run.sh workdir ternd "report -i golang:alpine"' returned non-zero exit status 1.
Exited with code 1
|
subprocess.CalledProcessError
|
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
prog="Tern",
description="""
Tern is a container image component curation tool. Tern retrieves
information about packages that are installed in a container image.
Learn more at https://github.com/vmware/tern""",
)
parser.add_argument(
"-l",
"--log-stream",
action="store_true",
help="Stream logs to the console; Useful when running in a shell",
)
parser.add_argument(
"-c",
"--clear-cache",
action="store_true",
help="Clear the cache before running",
)
parser.add_argument(
"-k",
"--keep-wd",
action="store_true",
help="Keep the working directory after execution."
" Useful when debugging container images",
)
parser.add_argument(
"-b",
"--bind-mount",
metavar="BIND_DIR",
help="Absolute path to bind mount target. Needed"
" when running from within a container.",
)
parser.add_argument(
"-r",
"--redo",
action="store_true",
help="Repopulate the cache for found layers",
)
# sys.version gives more information than we care to print
py_ver = sys.version.replace("\n", "").split("[")[0]
parser.add_argument(
"-v",
"--version",
action="version",
version="{ver_str}\n python version = {py_v}".format(
ver_str=get_version(), py_v=py_ver
),
)
subparsers = parser.add_subparsers(help="Subcommands")
# subparser for report
parser_report = subparsers.add_parser(
"report",
help="Create a BoM report. Run 'tern report -h' for report format options.",
)
parser_report.add_argument(
"-d",
"--dockerfile",
type=check_file_existence,
help="Dockerfile used to build the Docker image",
)
parser_report.add_argument(
"-i",
"--docker-image",
help="Docker image that exists locally -"
" image:tag"
" The option can be used to pull docker"
" images by digest as well -"
" <repo>@<digest-type>:<digest>",
)
parser_report.add_argument(
"-w",
"--raw-image",
metavar="FILE",
help="Raw container image that exists locally in the form of a tar archive.",
)
parser_report.add_argument(
"-x",
"--extend",
metavar="EXTENSION",
help="Use an extension to analyze a container "
"image. Available extensions: cve-bin-tool",
)
parser_report.add_argument(
"-f",
"--report-format",
metavar="REPORT_FORMAT",
help="Format the report using one of the "
"available formats: "
"spdxtagvalue, json, yaml",
)
parser_report.add_argument(
"-o",
"--output-file",
default=None,
metavar="FILE",
help="Write the report to a file. "
"If no file is given the default file in "
"utils/constants.py will be used",
)
parser_report.set_defaults(name="report")
args = parser.parse_args()
# execute
if sys.version_info < (3, 0):
sys.stderr.write(
"Error running Tern. Please check that python3 is configured as default.\n"
)
else:
do_main(args)
|
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
prog="Tern",
description="""
Tern is a container image component curation tool. Tern retrieves
information about packages that are installed in a container image.
Learn more at https://github.com/vmware/tern""",
)
parser.add_argument(
"-l",
"--log-stream",
action="store_true",
help="Stream logs to the console; Useful when running in a shell",
)
parser.add_argument(
"-c",
"--clear-cache",
action="store_true",
help="Clear the cache before running",
)
parser.add_argument(
"-k",
"--keep-wd",
action="store_true",
help="Keep the working directory after execution."
" Useful when debugging container images",
)
parser.add_argument(
"-b",
"--bind-mount",
action="store_true",
help="Treat working directory as a bind mount."
" Needed when running from within a container",
)
parser.add_argument(
"-r",
"--redo",
action="store_true",
help="Repopulate the cache for found layers",
)
# sys.version gives more information than we care to print
py_ver = sys.version.replace("\n", "").split("[")[0]
parser.add_argument(
"-v",
"--version",
action="version",
version="{ver_str}\n python version = {py_v}".format(
ver_str=get_version(), py_v=py_ver
),
)
subparsers = parser.add_subparsers(help="Subcommands")
# subparser for report
parser_report = subparsers.add_parser(
"report",
help="Create a BoM report. Run 'tern report -h' for report format options.",
)
parser_report.add_argument(
"-d",
"--dockerfile",
type=check_file_existence,
help="Dockerfile used to build the Docker image",
)
parser_report.add_argument(
"-i",
"--docker-image",
help="Docker image that exists locally -"
" image:tag"
" The option can be used to pull docker"
" images by digest as well -"
" <repo>@<digest-type>:<digest>",
)
parser_report.add_argument(
"-w",
"--raw-image",
metavar="FILE",
help="Raw container image that exists locally in the form of a tar archive.",
)
parser_report.add_argument(
"-x",
"--extend",
metavar="EXTENSION",
help="Use an extension to analyze a container "
"image. Available extensions: cve-bin-tool",
)
parser_report.add_argument(
"-f",
"--report-format",
metavar="REPORT_FORMAT",
help="Format the report using one of the "
"available formats: "
"spdxtagvalue, json, yaml",
)
parser_report.add_argument(
"-o",
"--output-file",
default=None,
metavar="FILE",
help="Write the report to a file. "
"If no file is given the default file in "
"utils/constants.py will be used",
)
parser_report.set_defaults(name="report")
args = parser.parse_args()
# execute
if sys.version_info < (3, 0):
sys.stderr.write(
"Error running Tern. Please check that python3 is configured as default.\n"
)
else:
do_main(args)
|
https://github.com/tern-tools/tern/issues/498
|
docker build -t ternd .
./docker_run.sh workdir ternd "report -i golang:alpine"
-------------------------OUTPUT-------------------------
Traceback (most recent call last):
File "/usr/bin/tern", line 8, in <module>
sys.exit(main())
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 167, in main
do_main(args)
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 90, in do_main
run.execute_docker_image(args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 88, in execute_docker_image
analyze(full_image, args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 67, in analyze
analyze_docker_image(image_obj, args.redo, is_dockerfile)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 39, in analyze_docker_image
analyze_subsequent_layers(image_obj, shell, master_list, redo)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 133, in analyze_subsequent_layers
target = mount_overlay_fs(image_obj, curr_layer)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 178, in mount_overlay_fs
target = rootfs.mount_diff_layers(tar_layers)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 196, in mount_diff_layers
root_command(union_mount, args, merge_dir_path)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 66, in root_command
1, cmd=full_cmd, output=error)
subprocess.CalledProcessError: Command '['mount', '-t', 'overlay', 'overlay', '-o', 'lowerdir=/root/.tern/temp/8e635d6264340a45901f63d2a18ea5bc8c680919e07191e4ef276860952d0399/contents,upperdir=/root/.tern/temp/1332c5c5bbaa26b26053d13d13b81dd2f523a2217e01f6edfe38fe107c091859/contents,workdir=/root/.tern/temp/workdir', '/root/.tern/temp/mergedir']' returned non-zero exit status 1.
Traceback (most recent call last):
File "ci/test_files_touched.py", line 130, in <module>
subprocess.check_output(t, shell=True) # nosec
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 336, in check_output
**kwargs).stdout
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 418, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command './docker_run.sh workdir ternd "report -i golang:alpine"' returned non-zero exit status 1.
Exited with code 1
|
subprocess.CalledProcessError
|
def execute_docker_image(args):
"""Execution path if given a Docker image"""
logger.debug("Setting up...")
image_string = args.docker_image
if not args.raw_image:
# don't check docker daemon for raw images
container.check_docker_setup()
else:
image_string = args.raw_image
report.setup(image_tag_string=image_string)
# attempt to get built image metadata
full_image = report.load_full_image(image_string)
if full_image.origins.is_empty():
# image loading was successful
# Add an image origin here
full_image.origins.add_notice_origin(
formats.docker_image.format(imagetag=image_string)
)
# analyze image
analyze(full_image, args)
# generate report
report.report_out(args, full_image)
else:
# we cannot load the full image
logger.warning("Cannot retrieve full image metadata")
if not args.keep_wd:
report.clean_image_tars(full_image)
logger.debug("Teardown...")
report.teardown()
if not args.keep_wd:
report.clean_working_dir()
|
def execute_docker_image(args):
"""Execution path if given a Docker image"""
logger.debug("Setting up...")
image_string = args.docker_image
if not args.raw_image:
# don't check docker daemon for raw images
container.check_docker_setup()
else:
image_string = args.raw_image
report.setup(image_tag_string=image_string)
# attempt to get built image metadata
full_image = report.load_full_image(image_string)
if full_image.origins.is_empty():
# image loading was successful
# Add an image origin here
full_image.origins.add_notice_origin(
formats.docker_image.format(imagetag=image_string)
)
# analyze image
analyze(full_image, args)
# generate report
report.report_out(args, full_image)
else:
# we cannot load the full image
logger.warning("Cannot retrieve full image metadata")
if not args.keep_wd:
report.clean_image_tars(full_image)
logger.debug("Teardown...")
report.teardown()
if not args.keep_wd:
report.clean_working_dir(args.bind_mount)
|
https://github.com/tern-tools/tern/issues/498
|
docker build -t ternd .
./docker_run.sh workdir ternd "report -i golang:alpine"
-------------------------OUTPUT-------------------------
Traceback (most recent call last):
File "/usr/bin/tern", line 8, in <module>
sys.exit(main())
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 167, in main
do_main(args)
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 90, in do_main
run.execute_docker_image(args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 88, in execute_docker_image
analyze(full_image, args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 67, in analyze
analyze_docker_image(image_obj, args.redo, is_dockerfile)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 39, in analyze_docker_image
analyze_subsequent_layers(image_obj, shell, master_list, redo)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 133, in analyze_subsequent_layers
target = mount_overlay_fs(image_obj, curr_layer)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 178, in mount_overlay_fs
target = rootfs.mount_diff_layers(tar_layers)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 196, in mount_diff_layers
root_command(union_mount, args, merge_dir_path)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 66, in root_command
1, cmd=full_cmd, output=error)
subprocess.CalledProcessError: Command '['mount', '-t', 'overlay', 'overlay', '-o', 'lowerdir=/root/.tern/temp/8e635d6264340a45901f63d2a18ea5bc8c680919e07191e4ef276860952d0399/contents,upperdir=/root/.tern/temp/1332c5c5bbaa26b26053d13d13b81dd2f523a2217e01f6edfe38fe107c091859/contents,workdir=/root/.tern/temp/workdir', '/root/.tern/temp/mergedir']' returned non-zero exit status 1.
Traceback (most recent call last):
File "ci/test_files_touched.py", line 130, in <module>
subprocess.check_output(t, shell=True) # nosec
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 336, in check_output
**kwargs).stdout
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 418, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command './docker_run.sh workdir ternd "report -i golang:alpine"' returned non-zero exit status 1.
Exited with code 1
|
subprocess.CalledProcessError
|
def execute_dockerfile(args):
"""Execution path if given a dockerfile"""
container.check_docker_setup()
logger.debug("Setting up...")
report.setup(dockerfile=args.dockerfile)
# attempt to build the image
logger.debug("Building Docker image...")
# placeholder to check if we can analyze the full image
completed = True
build, _ = dhelper.is_build()
if build:
# attempt to get built image metadata
image_tag_string = dhelper.get_dockerfile_image_tag()
full_image = report.load_full_image(image_tag_string)
if full_image.origins.is_empty():
# image loading was successful
# Add an image origin here
full_image.origins.add_notice_origin(
formats.dockerfile_image.format(dockerfile=args.dockerfile)
)
# analyze image
analyze(full_image, args, True)
else:
# we cannot load the full image
logger.warning("Cannot retrieve full image metadata")
completed = False
# clean up image
container.remove_image(full_image.repotag)
if not args.keep_wd:
report.clean_image_tars(full_image)
else:
# cannot build the image
logger.warning("Cannot build image")
completed = False
# check if we have analyzed the full image or not
if not completed:
# get the base image
logger.debug("Loading base image...")
base_image = report.load_base_image()
if base_image.origins.is_empty():
# image loading was successful
# add a notice stating failure to build image
base_image.origins.add_notice_to_origins(
args.dockerfile, Notice(formats.image_build_failure, "warning")
)
# analyze image
analyze(base_image, args)
else:
# we cannot load the base image
logger.warning("Cannot retrieve base image metadata")
# run through commands in the Dockerfile
logger.debug("Parsing Dockerfile to generate report...")
stub_image = get_dockerfile_packages()
if not args.keep_wd:
report.clean_image_tars(base_image)
# generate report based on what images were created
if completed:
report.report_out(args, full_image)
else:
report.report_out(args, base_image, stub_image)
logger.debug("Teardown...")
report.teardown()
if not args.keep_wd:
report.clean_working_dir()
|
def execute_dockerfile(args):
"""Execution path if given a dockerfile"""
container.check_docker_setup()
logger.debug("Setting up...")
report.setup(dockerfile=args.dockerfile)
# attempt to build the image
logger.debug("Building Docker image...")
# placeholder to check if we can analyze the full image
completed = True
build, _ = dhelper.is_build()
if build:
# attempt to get built image metadata
image_tag_string = dhelper.get_dockerfile_image_tag()
full_image = report.load_full_image(image_tag_string)
if full_image.origins.is_empty():
# image loading was successful
# Add an image origin here
full_image.origins.add_notice_origin(
formats.dockerfile_image.format(dockerfile=args.dockerfile)
)
# analyze image
analyze(full_image, args, True)
else:
# we cannot load the full image
logger.warning("Cannot retrieve full image metadata")
completed = False
# clean up image
container.remove_image(full_image.repotag)
if not args.keep_wd:
report.clean_image_tars(full_image)
else:
# cannot build the image
logger.warning("Cannot build image")
completed = False
# check if we have analyzed the full image or not
if not completed:
# get the base image
logger.debug("Loading base image...")
base_image = report.load_base_image()
if base_image.origins.is_empty():
# image loading was successful
# add a notice stating failure to build image
base_image.origins.add_notice_to_origins(
args.dockerfile, Notice(formats.image_build_failure, "warning")
)
# analyze image
analyze(base_image, args)
else:
# we cannot load the base image
logger.warning("Cannot retrieve base image metadata")
# run through commands in the Dockerfile
logger.debug("Parsing Dockerfile to generate report...")
stub_image = get_dockerfile_packages()
if not args.keep_wd:
report.clean_image_tars(base_image)
# generate report based on what images were created
if completed:
report.report_out(args, full_image)
else:
report.report_out(args, base_image, stub_image)
logger.debug("Teardown...")
report.teardown()
if not args.keep_wd:
report.clean_working_dir(args.bind_mount)
|
https://github.com/tern-tools/tern/issues/498
|
docker build -t ternd .
./docker_run.sh workdir ternd "report -i golang:alpine"
-------------------------OUTPUT-------------------------
Traceback (most recent call last):
File "/usr/bin/tern", line 8, in <module>
sys.exit(main())
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 167, in main
do_main(args)
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 90, in do_main
run.execute_docker_image(args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 88, in execute_docker_image
analyze(full_image, args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 67, in analyze
analyze_docker_image(image_obj, args.redo, is_dockerfile)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 39, in analyze_docker_image
analyze_subsequent_layers(image_obj, shell, master_list, redo)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 133, in analyze_subsequent_layers
target = mount_overlay_fs(image_obj, curr_layer)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 178, in mount_overlay_fs
target = rootfs.mount_diff_layers(tar_layers)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 196, in mount_diff_layers
root_command(union_mount, args, merge_dir_path)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 66, in root_command
1, cmd=full_cmd, output=error)
subprocess.CalledProcessError: Command '['mount', '-t', 'overlay', 'overlay', '-o', 'lowerdir=/root/.tern/temp/8e635d6264340a45901f63d2a18ea5bc8c680919e07191e4ef276860952d0399/contents,upperdir=/root/.tern/temp/1332c5c5bbaa26b26053d13d13b81dd2f523a2217e01f6edfe38fe107c091859/contents,workdir=/root/.tern/temp/workdir', '/root/.tern/temp/mergedir']' returned non-zero exit status 1.
Traceback (most recent call last):
File "ci/test_files_touched.py", line 130, in <module>
subprocess.check_output(t, shell=True) # nosec
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 336, in check_output
**kwargs).stdout
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 418, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command './docker_run.sh workdir ternd "report -i golang:alpine"' returned non-zero exit status 1.
Exited with code 1
|
subprocess.CalledProcessError
|
def clean_working_dir():
"""Clean up the working directory
If bind_mount is true then leave the upper level directory"""
path = rootfs.get_working_dir()
if os.path.exists(path):
shutil.rmtree(path)
|
def clean_working_dir(bind_mount):
"""Clean up the working directory
If bind_mount is true then leave the upper level directory"""
path = rootfs.get_working_dir()
if os.path.exists(path):
if bind_mount:
# clean whatever is in temp_folder without removing the folder
inodes = os.listdir(path)
for inode in inodes:
dir_path = os.path.join(path, inode)
if os.path.isdir(dir_path):
shutil.rmtree(dir_path)
else:
os.remove(dir_path)
else:
shutil.rmtree(path)
|
https://github.com/tern-tools/tern/issues/498
|
docker build -t ternd .
./docker_run.sh workdir ternd "report -i golang:alpine"
-------------------------OUTPUT-------------------------
Traceback (most recent call last):
File "/usr/bin/tern", line 8, in <module>
sys.exit(main())
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 167, in main
do_main(args)
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 90, in do_main
run.execute_docker_image(args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 88, in execute_docker_image
analyze(full_image, args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 67, in analyze
analyze_docker_image(image_obj, args.redo, is_dockerfile)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 39, in analyze_docker_image
analyze_subsequent_layers(image_obj, shell, master_list, redo)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 133, in analyze_subsequent_layers
target = mount_overlay_fs(image_obj, curr_layer)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 178, in mount_overlay_fs
target = rootfs.mount_diff_layers(tar_layers)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 196, in mount_diff_layers
root_command(union_mount, args, merge_dir_path)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 66, in root_command
1, cmd=full_cmd, output=error)
subprocess.CalledProcessError: Command '['mount', '-t', 'overlay', 'overlay', '-o', 'lowerdir=/root/.tern/temp/8e635d6264340a45901f63d2a18ea5bc8c680919e07191e4ef276860952d0399/contents,upperdir=/root/.tern/temp/1332c5c5bbaa26b26053d13d13b81dd2f523a2217e01f6edfe38fe107c091859/contents,workdir=/root/.tern/temp/workdir', '/root/.tern/temp/mergedir']' returned non-zero exit status 1.
Traceback (most recent call last):
File "ci/test_files_touched.py", line 130, in <module>
subprocess.check_output(t, shell=True) # nosec
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 336, in check_output
**kwargs).stdout
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 418, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command './docker_run.sh workdir ternd "report -i golang:alpine"' returned non-zero exit status 1.
Exited with code 1
|
subprocess.CalledProcessError
|
def load():
"""Load the cache"""
global cache
# Do not try to populate the cache if there is no cache available
if not os.path.exists(os.path.join(rootfs.mount_dir, cache_file)):
return
with open(os.path.join(rootfs.mount_dir, cache_file)) as f:
cache = yaml.safe_load(f)
|
def load():
"""Load the cache"""
global cache
# Do not try to populate the cache if there is no cache available
if not os.path.exists(os.path.join(get_top_dir(), cache_file)):
return
with open(os.path.join(get_top_dir(), cache_file)) as f:
cache = yaml.safe_load(f)
|
https://github.com/tern-tools/tern/issues/498
|
docker build -t ternd .
./docker_run.sh workdir ternd "report -i golang:alpine"
-------------------------OUTPUT-------------------------
Traceback (most recent call last):
File "/usr/bin/tern", line 8, in <module>
sys.exit(main())
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 167, in main
do_main(args)
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 90, in do_main
run.execute_docker_image(args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 88, in execute_docker_image
analyze(full_image, args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 67, in analyze
analyze_docker_image(image_obj, args.redo, is_dockerfile)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 39, in analyze_docker_image
analyze_subsequent_layers(image_obj, shell, master_list, redo)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 133, in analyze_subsequent_layers
target = mount_overlay_fs(image_obj, curr_layer)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 178, in mount_overlay_fs
target = rootfs.mount_diff_layers(tar_layers)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 196, in mount_diff_layers
root_command(union_mount, args, merge_dir_path)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 66, in root_command
1, cmd=full_cmd, output=error)
subprocess.CalledProcessError: Command '['mount', '-t', 'overlay', 'overlay', '-o', 'lowerdir=/root/.tern/temp/8e635d6264340a45901f63d2a18ea5bc8c680919e07191e4ef276860952d0399/contents,upperdir=/root/.tern/temp/1332c5c5bbaa26b26053d13d13b81dd2f523a2217e01f6edfe38fe107c091859/contents,workdir=/root/.tern/temp/workdir', '/root/.tern/temp/mergedir']' returned non-zero exit status 1.
Traceback (most recent call last):
File "ci/test_files_touched.py", line 130, in <module>
subprocess.check_output(t, shell=True) # nosec
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 336, in check_output
**kwargs).stdout
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 418, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command './docker_run.sh workdir ternd "report -i golang:alpine"' returned non-zero exit status 1.
Exited with code 1
|
subprocess.CalledProcessError
|
def save():
"""Save the cache to the cache file"""
with open(os.path.join(rootfs.mount_dir, cache_file), "w") as f:
yaml.dump(cache, f, default_flow_style=False)
|
def save():
"""Save the cache to the cache file"""
with open(os.path.join(get_top_dir(), cache_file), "w") as f:
yaml.dump(cache, f, default_flow_style=False)
|
https://github.com/tern-tools/tern/issues/498
|
docker build -t ternd .
./docker_run.sh workdir ternd "report -i golang:alpine"
-------------------------OUTPUT-------------------------
Traceback (most recent call last):
File "/usr/bin/tern", line 8, in <module>
sys.exit(main())
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 167, in main
do_main(args)
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 90, in do_main
run.execute_docker_image(args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 88, in execute_docker_image
analyze(full_image, args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 67, in analyze
analyze_docker_image(image_obj, args.redo, is_dockerfile)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 39, in analyze_docker_image
analyze_subsequent_layers(image_obj, shell, master_list, redo)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 133, in analyze_subsequent_layers
target = mount_overlay_fs(image_obj, curr_layer)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 178, in mount_overlay_fs
target = rootfs.mount_diff_layers(tar_layers)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 196, in mount_diff_layers
root_command(union_mount, args, merge_dir_path)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 66, in root_command
1, cmd=full_cmd, output=error)
subprocess.CalledProcessError: Command '['mount', '-t', 'overlay', 'overlay', '-o', 'lowerdir=/root/.tern/temp/8e635d6264340a45901f63d2a18ea5bc8c680919e07191e4ef276860952d0399/contents,upperdir=/root/.tern/temp/1332c5c5bbaa26b26053d13d13b81dd2f523a2217e01f6edfe38fe107c091859/contents,workdir=/root/.tern/temp/workdir', '/root/.tern/temp/mergedir']' returned non-zero exit status 1.
Traceback (most recent call last):
File "ci/test_files_touched.py", line 130, in <module>
subprocess.check_output(t, shell=True) # nosec
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 336, in check_output
**kwargs).stdout
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 418, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command './docker_run.sh workdir ternd "report -i golang:alpine"' returned non-zero exit status 1.
Exited with code 1
|
subprocess.CalledProcessError
|
def clear():
"""Empty the cache - don't use unless you really have to"""
global cache
cache = {}
with open(os.path.join(rootfs.mount_dir, cache_file), "w") as f:
yaml.dump(cache, f, default_flow_style=False)
|
def clear():
"""Empty the cache - don't use unless you really have to"""
global cache
cache = {}
with open(os.path.join(get_top_dir(), cache_file), "w") as f:
yaml.dump(cache, f, default_flow_style=False)
|
https://github.com/tern-tools/tern/issues/498
|
docker build -t ternd .
./docker_run.sh workdir ternd "report -i golang:alpine"
-------------------------OUTPUT-------------------------
Traceback (most recent call last):
File "/usr/bin/tern", line 8, in <module>
sys.exit(main())
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 167, in main
do_main(args)
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 90, in do_main
run.execute_docker_image(args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 88, in execute_docker_image
analyze(full_image, args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 67, in analyze
analyze_docker_image(image_obj, args.redo, is_dockerfile)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 39, in analyze_docker_image
analyze_subsequent_layers(image_obj, shell, master_list, redo)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 133, in analyze_subsequent_layers
target = mount_overlay_fs(image_obj, curr_layer)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 178, in mount_overlay_fs
target = rootfs.mount_diff_layers(tar_layers)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 196, in mount_diff_layers
root_command(union_mount, args, merge_dir_path)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 66, in root_command
1, cmd=full_cmd, output=error)
subprocess.CalledProcessError: Command '['mount', '-t', 'overlay', 'overlay', '-o', 'lowerdir=/root/.tern/temp/8e635d6264340a45901f63d2a18ea5bc8c680919e07191e4ef276860952d0399/contents,upperdir=/root/.tern/temp/1332c5c5bbaa26b26053d13d13b81dd2f523a2217e01f6edfe38fe107c091859/contents,workdir=/root/.tern/temp/workdir', '/root/.tern/temp/mergedir']' returned non-zero exit status 1.
Traceback (most recent call last):
File "ci/test_files_touched.py", line 130, in <module>
subprocess.check_output(t, shell=True) # nosec
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 336, in check_output
**kwargs).stdout
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 418, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command './docker_run.sh workdir ternd "report -i golang:alpine"' returned non-zero exit status 1.
Exited with code 1
|
subprocess.CalledProcessError
|
def get_working_dir():
"""General purpose utility to return the absolute path of the working
directory"""
return os.path.join(mount_dir, constants.temp_folder)
|
def get_working_dir():
"""General purpose utility to return the absolute path of the working
directory"""
return os.path.join(general.get_top_dir(), constants.temp_folder)
|
https://github.com/tern-tools/tern/issues/498
|
docker build -t ternd .
./docker_run.sh workdir ternd "report -i golang:alpine"
-------------------------OUTPUT-------------------------
Traceback (most recent call last):
File "/usr/bin/tern", line 8, in <module>
sys.exit(main())
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 167, in main
do_main(args)
File "/usr/lib/python3.7/site-packages/tern/__main__.py", line 90, in do_main
run.execute_docker_image(args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 88, in execute_docker_image
analyze(full_image, args)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/run.py", line 67, in analyze
analyze_docker_image(image_obj, args.redo, is_dockerfile)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 39, in analyze_docker_image
analyze_subsequent_layers(image_obj, shell, master_list, redo)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 133, in analyze_subsequent_layers
target = mount_overlay_fs(image_obj, curr_layer)
File "/usr/lib/python3.7/site-packages/tern/analyze/docker/analyze.py", line 178, in mount_overlay_fs
target = rootfs.mount_diff_layers(tar_layers)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 196, in mount_diff_layers
root_command(union_mount, args, merge_dir_path)
File "/usr/lib/python3.7/site-packages/tern/utils/rootfs.py", line 66, in root_command
1, cmd=full_cmd, output=error)
subprocess.CalledProcessError: Command '['mount', '-t', 'overlay', 'overlay', '-o', 'lowerdir=/root/.tern/temp/8e635d6264340a45901f63d2a18ea5bc8c680919e07191e4ef276860952d0399/contents,upperdir=/root/.tern/temp/1332c5c5bbaa26b26053d13d13b81dd2f523a2217e01f6edfe38fe107c091859/contents,workdir=/root/.tern/temp/workdir', '/root/.tern/temp/mergedir']' returned non-zero exit status 1.
Traceback (most recent call last):
File "ci/test_files_touched.py", line 130, in <module>
subprocess.check_output(t, shell=True) # nosec
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 336, in check_output
**kwargs).stdout
File "/opt/circleci/.pyenv/versions/3.6.5/lib/python3.6/subprocess.py", line 418, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command './docker_run.sh workdir ternd "report -i golang:alpine"' returned non-zero exit status 1.
Exited with code 1
|
subprocess.CalledProcessError
|
def start_container(image_tag_string):
"""Start the test container in detach state"""
try:
client.containers.run(image_tag_string, name=container, detach=True)
except requests.exceptions.HTTPError:
# container may already be running
pass
try:
remove_container()
client.containers.run(image_tag_string, name=container, detach=True)
except requests.exceptions.HTTPError:
# not sure what the error is now
raise Exception("Cannot remove running container")
|
def start_container(image_tag_string):
"""Start the test container in detach state"""
try:
client.containers.run(image_tag_string, name=container, detach=True)
except HTTPError:
# container may already be running
pass
try:
remove_container()
client.containers.run(image_tag_string, name=container, detach=True)
except HTTPError:
# not sure what the error is now
raise Exception("Cannot remove running container")
|
https://github.com/tern-tools/tern/issues/207
|
(ternenv) browne@ubuntu18:~/ternenv/tern$ tern -l report -f output.txt -i debian:buster
2019-04-05 00:27:08,017 - DEBUG - __main__ - Starting...
2019-04-05 00:27:08,018 - DEBUG - report - Setting up...
2019-04-05 00:27:08,018 - DEBUG - container - Checking if image "debian:buster" is available on disk...
Traceback (most recent call last):
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/connectionpool.py", line 600, in urlopen
chunked=chunked)
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/connectionpool.py", line 354, in _make_request
conn.request(method, url, **httplib_request_kw)
File "/usr/lib/python3.6/http/client.py", line 1239, in request
self._send_request(method, url, body, headers, encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1285, in _send_request
self.endheaders(body, encode_chunked=encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1234, in endheaders
self._send_output(message_body, encode_chunked=encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1026, in _send_output
self.send(msg)
File "/usr/lib/python3.6/http/client.py", line 964, in send
self.connect()
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/transport/unixconn.py", line 42, in connect
sock.connect(self.unix_socket)
FileNotFoundError: [Errno 2] No such file or directory
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/browne/ternenv/lib/python3.6/site-packages/requests/adapters.py", line 449, in send
timeout=timeout
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/connectionpool.py", line 638, in urlopen
_stacktrace=sys.exc_info()[2])
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/util/retry.py", line 367, in increment
raise six.reraise(type(error), error, _stacktrace)
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/packages/six.py", line 685, in reraise
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/util/retry.py", line 367, in increment
raise six.reraise(type(error), error, _stacktrace)
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/packages/six.py", line 685, in reraise
raise value.with_traceback(tb)
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/connectionpool.py", line 600, in urlopen
chunked=chunked)
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/connectionpool.py", line 354, in _make_request
conn.request(method, url, **httplib_request_kw)
File "/usr/lib/python3.6/http/client.py", line 1239, in request
self._send_request(method, url, body, headers, encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1285, in _send_request
self.endheaders(body, encode_chunked=encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1234, in endheaders
self._send_output(message_body, encode_chunked=encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1026, in _send_output
self.send(msg)
File "/usr/lib/python3.6/http/client.py", line 964, in send
self.connect()
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/transport/unixconn.py", line 42, in connect
sock.connect(self.unix_socket)
urllib3.exceptions.ProtocolError: ('Connection aborted.', FileNotFoundError(2, 'No such file or directory'))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/browne/ternenv/bin/tern", line 11, in <module>
load_entry_point('tern==0.3.0', 'console_scripts', 'tern')()
File "/home/browne/ternenv/lib/python3.6/site-packages/tern/__main__.py", line 111, in main
do_main(args)
File "/home/browne/ternenv/lib/python3.6/site-packages/tern/__main__.py", line 54, in do_main
report.execute_docker_image(args)
File "/home/browne/ternenv/lib/python3.6/site-packages/tern/report/report.py", line 422, in execute_docker_image
setup(image_tag_string=args.docker_image)
File "/home/browne/ternenv/lib/python3.6/site-packages/tern/report/report.py", line 63, in setup
if not container.check_image(image_tag_string):
File "/home/browne/ternenv/lib/python3.6/site-packages/tern/utils/container.py", line 69, in check_image
client.images.get(image_tag_string)
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/models/images.py", line 316, in get
return self.prepare_model(self.client.api.inspect_image(name))
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/utils/decorators.py", line 19, in wrapped
return f(self, resource_id, *args, **kwargs)
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/api/image.py", line 245, in inspect_image
self._get(self._url("/images/{0}/json", image)), True
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/utils/decorators.py", line 46, in inner
return f(self, *args, **kwargs)
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/api/client.py", line 225, in _get
return self.get(url, **self._set_request_timeout(kwargs))
File "/home/browne/ternenv/lib/python3.6/site-packages/requests/sessions.py", line 546, in get
return self.request('GET', url, **kwargs)
File "/home/browne/ternenv/lib/python3.6/site-packages/requests/sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "/home/browne/ternenv/lib/python3.6/site-packages/requests/sessions.py", line 646, in send
r = adapter.send(request, **kwargs)
File "/home/browne/ternenv/lib/python3.6/site-packages/requests/adapters.py", line 498, in send
raise ConnectionError(err, request=request)
(ternenv) browne@ubuntu18:~/ternenv/tern$
|
FileNotFoundError
|
def execute_docker_image(args):
"""Execution path if given a Docker image"""
logger.debug("Setting up...")
image_string = args.docker_image
if not args.raw_image:
# don't check docker daemon for raw images
container.check_docker_setup()
else:
image_string = args.raw_image
report.setup(image_tag_string=image_string)
# attempt to get built image metadata
full_image = report.load_full_image(image_string)
if full_image.origins.is_empty():
# image loading was successful
# Add an image origin here
full_image.origins.add_notice_origin(
formats.docker_image.format(imagetag=image_string)
)
# analyze image
analyze(full_image, args)
# generate report
report.report_out(args, full_image)
else:
# we cannot load the full image
logger.warning("Cannot retrieve full image metadata")
if not args.keep_wd:
report.clean_image_tars(full_image)
logger.debug("Teardown...")
report.teardown()
if not args.keep_wd:
report.clean_working_dir(args.bind_mount)
|
def execute_docker_image(args):
"""Execution path if given a Docker image"""
logger.debug("Setting up...")
image_string = args.docker_image
if not args.raw_image:
# don't check docker daemon for raw images
check_docker_daemon()
else:
image_string = args.raw_image
report.setup(image_tag_string=image_string)
# attempt to get built image metadata
full_image = report.load_full_image(image_string)
if full_image.origins.is_empty():
# image loading was successful
# Add an image origin here
full_image.origins.add_notice_origin(
formats.docker_image.format(imagetag=image_string)
)
# analyze image
analyze(full_image, args)
# generate report
report.report_out(args, full_image)
else:
# we cannot load the full image
logger.warning("Cannot retrieve full image metadata")
if not args.keep_wd:
report.clean_image_tars(full_image)
logger.debug("Teardown...")
report.teardown()
if not args.keep_wd:
report.clean_working_dir(args.bind_mount)
|
https://github.com/tern-tools/tern/issues/207
|
(ternenv) browne@ubuntu18:~/ternenv/tern$ tern -l report -f output.txt -i debian:buster
2019-04-05 00:27:08,017 - DEBUG - __main__ - Starting...
2019-04-05 00:27:08,018 - DEBUG - report - Setting up...
2019-04-05 00:27:08,018 - DEBUG - container - Checking if image "debian:buster" is available on disk...
Traceback (most recent call last):
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/connectionpool.py", line 600, in urlopen
chunked=chunked)
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/connectionpool.py", line 354, in _make_request
conn.request(method, url, **httplib_request_kw)
File "/usr/lib/python3.6/http/client.py", line 1239, in request
self._send_request(method, url, body, headers, encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1285, in _send_request
self.endheaders(body, encode_chunked=encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1234, in endheaders
self._send_output(message_body, encode_chunked=encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1026, in _send_output
self.send(msg)
File "/usr/lib/python3.6/http/client.py", line 964, in send
self.connect()
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/transport/unixconn.py", line 42, in connect
sock.connect(self.unix_socket)
FileNotFoundError: [Errno 2] No such file or directory
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/browne/ternenv/lib/python3.6/site-packages/requests/adapters.py", line 449, in send
timeout=timeout
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/connectionpool.py", line 638, in urlopen
_stacktrace=sys.exc_info()[2])
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/util/retry.py", line 367, in increment
raise six.reraise(type(error), error, _stacktrace)
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/packages/six.py", line 685, in reraise
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/util/retry.py", line 367, in increment
raise six.reraise(type(error), error, _stacktrace)
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/packages/six.py", line 685, in reraise
raise value.with_traceback(tb)
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/connectionpool.py", line 600, in urlopen
chunked=chunked)
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/connectionpool.py", line 354, in _make_request
conn.request(method, url, **httplib_request_kw)
File "/usr/lib/python3.6/http/client.py", line 1239, in request
self._send_request(method, url, body, headers, encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1285, in _send_request
self.endheaders(body, encode_chunked=encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1234, in endheaders
self._send_output(message_body, encode_chunked=encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1026, in _send_output
self.send(msg)
File "/usr/lib/python3.6/http/client.py", line 964, in send
self.connect()
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/transport/unixconn.py", line 42, in connect
sock.connect(self.unix_socket)
urllib3.exceptions.ProtocolError: ('Connection aborted.', FileNotFoundError(2, 'No such file or directory'))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/browne/ternenv/bin/tern", line 11, in <module>
load_entry_point('tern==0.3.0', 'console_scripts', 'tern')()
File "/home/browne/ternenv/lib/python3.6/site-packages/tern/__main__.py", line 111, in main
do_main(args)
File "/home/browne/ternenv/lib/python3.6/site-packages/tern/__main__.py", line 54, in do_main
report.execute_docker_image(args)
File "/home/browne/ternenv/lib/python3.6/site-packages/tern/report/report.py", line 422, in execute_docker_image
setup(image_tag_string=args.docker_image)
File "/home/browne/ternenv/lib/python3.6/site-packages/tern/report/report.py", line 63, in setup
if not container.check_image(image_tag_string):
File "/home/browne/ternenv/lib/python3.6/site-packages/tern/utils/container.py", line 69, in check_image
client.images.get(image_tag_string)
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/models/images.py", line 316, in get
return self.prepare_model(self.client.api.inspect_image(name))
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/utils/decorators.py", line 19, in wrapped
return f(self, resource_id, *args, **kwargs)
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/api/image.py", line 245, in inspect_image
self._get(self._url("/images/{0}/json", image)), True
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/utils/decorators.py", line 46, in inner
return f(self, *args, **kwargs)
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/api/client.py", line 225, in _get
return self.get(url, **self._set_request_timeout(kwargs))
File "/home/browne/ternenv/lib/python3.6/site-packages/requests/sessions.py", line 546, in get
return self.request('GET', url, **kwargs)
File "/home/browne/ternenv/lib/python3.6/site-packages/requests/sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "/home/browne/ternenv/lib/python3.6/site-packages/requests/sessions.py", line 646, in send
r = adapter.send(request, **kwargs)
File "/home/browne/ternenv/lib/python3.6/site-packages/requests/adapters.py", line 498, in send
raise ConnectionError(err, request=request)
(ternenv) browne@ubuntu18:~/ternenv/tern$
|
FileNotFoundError
|
def execute_dockerfile(args):
"""Execution path if given a dockerfile"""
container.check_docker_setup()
logger.debug("Setting up...")
report.setup(dockerfile=args.dockerfile)
# attempt to build the image
logger.debug("Building Docker image...")
# placeholder to check if we can analyze the full image
completed = True
build, _ = dhelper.is_build()
if build:
# attempt to get built image metadata
image_tag_string = dhelper.get_dockerfile_image_tag()
full_image = report.load_full_image(image_tag_string)
if full_image.origins.is_empty():
# image loading was successful
# Add an image origin here
full_image.origins.add_notice_origin(
formats.dockerfile_image.format(dockerfile=args.dockerfile)
)
# analyze image
analyze(full_image, args, True)
else:
# we cannot load the full image
logger.warning("Cannot retrieve full image metadata")
completed = False
# clean up image
container.remove_image(full_image.repotag)
if not args.keep_wd:
report.clean_image_tars(full_image)
else:
# cannot build the image
logger.warning("Cannot build image")
completed = False
# check if we have analyzed the full image or not
if not completed:
# get the base image
logger.debug("Loading base image...")
base_image = report.load_base_image()
if base_image.origins.is_empty():
# image loading was successful
# add a notice stating failure to build image
base_image.origins.add_notice_to_origins(
args.dockerfile, Notice(formats.image_build_failure, "warning")
)
# analyze image
analyze(base_image, args)
else:
# we cannot load the base image
logger.warning("Cannot retrieve base image metadata")
# run through commands in the Dockerfile
logger.debug("Parsing Dockerfile to generate report...")
stub_image = get_dockerfile_packages()
if not args.keep_wd:
report.clean_image_tars(base_image)
# generate report based on what images were created
if completed:
report.report_out(args, full_image)
else:
report.report_out(args, base_image, stub_image)
logger.debug("Teardown...")
report.teardown()
if not args.keep_wd:
report.clean_working_dir(args.bind_mount)
|
def execute_dockerfile(args):
"""Execution path if given a dockerfile"""
check_docker_daemon()
logger.debug("Setting up...")
report.setup(dockerfile=args.dockerfile)
# attempt to build the image
logger.debug("Building Docker image...")
# placeholder to check if we can analyze the full image
completed = True
build, _ = dhelper.is_build()
if build:
# attempt to get built image metadata
image_tag_string = dhelper.get_dockerfile_image_tag()
full_image = report.load_full_image(image_tag_string)
if full_image.origins.is_empty():
# image loading was successful
# Add an image origin here
full_image.origins.add_notice_origin(
formats.dockerfile_image.format(dockerfile=args.dockerfile)
)
# analyze image
analyze(full_image, args, True)
else:
# we cannot load the full image
logger.warning("Cannot retrieve full image metadata")
completed = False
# clean up image
container.remove_image(full_image.repotag)
if not args.keep_wd:
report.clean_image_tars(full_image)
else:
# cannot build the image
logger.warning("Cannot build image")
completed = False
# check if we have analyzed the full image or not
if not completed:
# get the base image
logger.debug("Loading base image...")
base_image = report.load_base_image()
if base_image.origins.is_empty():
# image loading was successful
# add a notice stating failure to build image
base_image.origins.add_notice_to_origins(
args.dockerfile, Notice(formats.image_build_failure, "warning")
)
# analyze image
analyze(base_image, args)
else:
# we cannot load the base image
logger.warning("Cannot retrieve base image metadata")
# run through commands in the Dockerfile
logger.debug("Parsing Dockerfile to generate report...")
stub_image = get_dockerfile_packages()
if not args.keep_wd:
report.clean_image_tars(base_image)
# generate report based on what images were created
if completed:
report.report_out(args, full_image)
else:
report.report_out(args, base_image, stub_image)
logger.debug("Teardown...")
report.teardown()
if not args.keep_wd:
report.clean_working_dir(args.bind_mount)
|
https://github.com/tern-tools/tern/issues/207
|
(ternenv) browne@ubuntu18:~/ternenv/tern$ tern -l report -f output.txt -i debian:buster
2019-04-05 00:27:08,017 - DEBUG - __main__ - Starting...
2019-04-05 00:27:08,018 - DEBUG - report - Setting up...
2019-04-05 00:27:08,018 - DEBUG - container - Checking if image "debian:buster" is available on disk...
Traceback (most recent call last):
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/connectionpool.py", line 600, in urlopen
chunked=chunked)
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/connectionpool.py", line 354, in _make_request
conn.request(method, url, **httplib_request_kw)
File "/usr/lib/python3.6/http/client.py", line 1239, in request
self._send_request(method, url, body, headers, encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1285, in _send_request
self.endheaders(body, encode_chunked=encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1234, in endheaders
self._send_output(message_body, encode_chunked=encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1026, in _send_output
self.send(msg)
File "/usr/lib/python3.6/http/client.py", line 964, in send
self.connect()
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/transport/unixconn.py", line 42, in connect
sock.connect(self.unix_socket)
FileNotFoundError: [Errno 2] No such file or directory
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/browne/ternenv/lib/python3.6/site-packages/requests/adapters.py", line 449, in send
timeout=timeout
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/connectionpool.py", line 638, in urlopen
_stacktrace=sys.exc_info()[2])
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/util/retry.py", line 367, in increment
raise six.reraise(type(error), error, _stacktrace)
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/packages/six.py", line 685, in reraise
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/util/retry.py", line 367, in increment
raise six.reraise(type(error), error, _stacktrace)
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/packages/six.py", line 685, in reraise
raise value.with_traceback(tb)
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/connectionpool.py", line 600, in urlopen
chunked=chunked)
File "/home/browne/ternenv/lib/python3.6/site-packages/urllib3/connectionpool.py", line 354, in _make_request
conn.request(method, url, **httplib_request_kw)
File "/usr/lib/python3.6/http/client.py", line 1239, in request
self._send_request(method, url, body, headers, encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1285, in _send_request
self.endheaders(body, encode_chunked=encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1234, in endheaders
self._send_output(message_body, encode_chunked=encode_chunked)
File "/usr/lib/python3.6/http/client.py", line 1026, in _send_output
self.send(msg)
File "/usr/lib/python3.6/http/client.py", line 964, in send
self.connect()
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/transport/unixconn.py", line 42, in connect
sock.connect(self.unix_socket)
urllib3.exceptions.ProtocolError: ('Connection aborted.', FileNotFoundError(2, 'No such file or directory'))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/browne/ternenv/bin/tern", line 11, in <module>
load_entry_point('tern==0.3.0', 'console_scripts', 'tern')()
File "/home/browne/ternenv/lib/python3.6/site-packages/tern/__main__.py", line 111, in main
do_main(args)
File "/home/browne/ternenv/lib/python3.6/site-packages/tern/__main__.py", line 54, in do_main
report.execute_docker_image(args)
File "/home/browne/ternenv/lib/python3.6/site-packages/tern/report/report.py", line 422, in execute_docker_image
setup(image_tag_string=args.docker_image)
File "/home/browne/ternenv/lib/python3.6/site-packages/tern/report/report.py", line 63, in setup
if not container.check_image(image_tag_string):
File "/home/browne/ternenv/lib/python3.6/site-packages/tern/utils/container.py", line 69, in check_image
client.images.get(image_tag_string)
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/models/images.py", line 316, in get
return self.prepare_model(self.client.api.inspect_image(name))
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/utils/decorators.py", line 19, in wrapped
return f(self, resource_id, *args, **kwargs)
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/api/image.py", line 245, in inspect_image
self._get(self._url("/images/{0}/json", image)), True
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/utils/decorators.py", line 46, in inner
return f(self, *args, **kwargs)
File "/home/browne/ternenv/lib/python3.6/site-packages/docker/api/client.py", line 225, in _get
return self.get(url, **self._set_request_timeout(kwargs))
File "/home/browne/ternenv/lib/python3.6/site-packages/requests/sessions.py", line 546, in get
return self.request('GET', url, **kwargs)
File "/home/browne/ternenv/lib/python3.6/site-packages/requests/sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "/home/browne/ternenv/lib/python3.6/site-packages/requests/sessions.py", line 646, in send
r = adapter.send(request, **kwargs)
File "/home/browne/ternenv/lib/python3.6/site-packages/requests/adapters.py", line 498, in send
raise ConnectionError(err, request=request)
(ternenv) browne@ubuntu18:~/ternenv/tern$
|
FileNotFoundError
|
def root_command(command, *extra):
"""Invoke a shell command as root or using sudo. The command is a
list of shell command words"""
full_cmd = []
sudo = True
if os.getuid() == 0:
sudo = False
if sudo:
full_cmd.append("sudo")
full_cmd.extend(command)
for arg in extra:
full_cmd.append(arg)
# invoke
logger.debug("Running command: %s", " ".join(full_cmd))
pipes = subprocess.Popen(
full_cmd,
stdout=subprocess.PIPE, # nosec
stderr=subprocess.PIPE,
)
result, error = pipes.communicate() # nosec
if error:
logger.error("Command failed. %s", error.decode())
raise subprocess.CalledProcessError(1, cmd=full_cmd, output=error) # nosec
return result
|
def root_command(command, *extra):
"""Invoke a shell command as root or using sudo. The command is a
list of shell command words"""
full_cmd = []
sudo = True
if os.getuid() == 0:
sudo = False
if sudo:
full_cmd.append("sudo")
full_cmd.extend(command)
for arg in extra:
full_cmd.append(arg)
# invoke
logger.debug("Running command: %s", " ".join(full_cmd))
pipes = subprocess.Popen(
full_cmd,
stdout=subprocess.PIPE, # nosec
stderr=subprocess.PIPE,
)
result, error = pipes.communicate() # nosec
if error:
logger.error("Command failed. %s", error.decode())
raise subprocess.CalledProcessError(1, cmd=full_cmd, output=error) # nosec
else:
return result
|
https://github.com/tern-tools/tern/issues/433
|
$ python
Python 3.7.3 (default, Apr 3 2019, 19:16:38)
[GCC 8.0.1 20180414 (experimental) [trunk revision 259383]] on linux
Type "help", "copyright", "credits" or "license" for more information.
from tern.classes.docker_image import DockerImage
d = DockerImage('ubuntu:14.04')
d.load_image()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/nisha/terndev/tern/tern/classes/docker_image.py", line 161, in load_image
layer.gen_fs_hash()
File "/home/nisha/terndev/tern/tern/classes/image_layer.py", line 176, in gen_fs_hash
rootfs.extract_layer_tar(tar_file, fs_dir)
File "/home/nisha/terndev/tern/tern/utils/rootfs.py", line 90, in extract_layer_tar
tar.extractall(directory_path)
File "/usr/lib/python3.7/tarfile.py", line 2002, in extractall
numeric_owner=numeric_owner)
File "/usr/lib/python3.7/tarfile.py", line 2044, in extract
numeric_owner=numeric_owner)
File "/usr/lib/python3.7/tarfile.py", line 2120, in _extract_member
self.makedev(tarinfo, targetpath)
File "/usr/lib/python3.7/tarfile.py", line 2194, in makedev
os.makedev(tarinfo.devmajor, tarinfo.devminor))
PermissionError: [Errno 1] Operation not permitted
|
PermissionError
|
def extract_layer_tar(layer_tar_path, directory_path):
"""Assuming all the metadata for an image has been extracted into the
temp folder, extract the tarfile into the required directory"""
os.makedirs(directory_path)
with open(os.devnull, "w") as test:
result = subprocess.call(
["tar", "-tf", layer_tar_path], stdout=test, stderr=test
)
if not result:
root_command(extract_tar, layer_tar_path, "-C", directory_path)
|
def extract_layer_tar(layer_tar_path, directory_path):
"""Assuming all the metadata for an image has been extracted into the
temp folder, extract the tarfile into the required directory"""
with tarfile.open(layer_tar_path) as tar:
tar.extractall(directory_path)
|
https://github.com/tern-tools/tern/issues/433
|
$ python
Python 3.7.3 (default, Apr 3 2019, 19:16:38)
[GCC 8.0.1 20180414 (experimental) [trunk revision 259383]] on linux
Type "help", "copyright", "credits" or "license" for more information.
from tern.classes.docker_image import DockerImage
d = DockerImage('ubuntu:14.04')
d.load_image()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/nisha/terndev/tern/tern/classes/docker_image.py", line 161, in load_image
layer.gen_fs_hash()
File "/home/nisha/terndev/tern/tern/classes/image_layer.py", line 176, in gen_fs_hash
rootfs.extract_layer_tar(tar_file, fs_dir)
File "/home/nisha/terndev/tern/tern/utils/rootfs.py", line 90, in extract_layer_tar
tar.extractall(directory_path)
File "/usr/lib/python3.7/tarfile.py", line 2002, in extractall
numeric_owner=numeric_owner)
File "/usr/lib/python3.7/tarfile.py", line 2044, in extract
numeric_owner=numeric_owner)
File "/usr/lib/python3.7/tarfile.py", line 2120, in _extract_member
self.makedev(tarinfo, targetpath)
File "/usr/lib/python3.7/tarfile.py", line 2194, in makedev
os.makedev(tarinfo.devmajor, tarinfo.devminor))
PermissionError: [Errno 1] Operation not permitted
|
PermissionError
|
def analyze_docker_image(image_obj, redo=False, dockerfile=False):
"""Given a DockerImage object, for each layer, retrieve the packages, first
looking up in cache and if not there then looking up in the command
library. For looking up in command library first mount the filesystem
and then look up the command library for commands to run in chroot"""
# find the layers that are imported
if dockerfile:
docker.set_imported_layers(image_obj)
# add notices for each layer if it is imported
image_setup(image_obj)
shell = ""
# set up empty master list of packages
master_list = []
# find the binary by mounting the base layer
target = rootfs.mount_base_layer(image_obj.layers[0].tar_file)
binary = common.get_base_bin(image_obj.layers[0])
# set up a notice origin referring to the base command library listing
origin_command_lib = formats.invoking_base_commands
# set up a notice origin for the first layer
origin_first_layer = "Layer: " + image_obj.layers[0].fs_hash[:10]
# find the shell to invoke commands in
shell, msg = command_lib.get_image_shell(command_lib.get_base_listing(binary))
if not shell:
# add a warning notice for no shell in the command library
logger.warning("No shell listing in command library. Using default shell")
no_shell_message = errors.no_shell_listing.format(
binary=binary, default_shell=constants.shell
)
image_obj.layers[0].origins.add_notice_to_origins(
origin_command_lib, Notice(no_shell_message, "warning")
)
# add a hint notice to add the shell to the command library
add_shell_message = errors.no_listing_for_base_key.format(listing_key="shell")
image_obj.layers[0].origins.add_notice_to_origins(
origin_command_lib, Notice(add_shell_message, "hint")
)
shell = constants.shell
# only extract packages if there is a known binary and the layer is not
# cached
if binary:
if not common.load_from_cache(image_obj.layers[0], redo):
# get the packages of the first layer
rootfs.prep_rootfs(target)
common.add_base_packages(image_obj.layers[0], binary, shell)
# unmount proc, sys and dev
rootfs.undo_mount()
else:
no_base = errors.unrecognized_base.format(
image_name=image_obj.name, image_tag=image_obj.tag
)
logger.warning(no_base)
image_obj.layers[0].origins.add_notice_to_origins(
origin_first_layer, Notice(no_base, "warning")
)
# no binary means there is no shell so set to default shell
logger.warning("Unknown filesystem. Using default shell")
shell = constants.shell
# unmount the first layer
rootfs.unmount_rootfs()
# populate the master list with all packages found in the first layer
for p in image_obj.layers[0].packages:
master_list.append(p)
# get packages for subsequent layers
curr_layer = 1
while curr_layer < len(image_obj.layers):
if not common.load_from_cache(image_obj.layers[curr_layer], redo):
# get commands that created the layer
# for docker images this is retrieved from the image history
command_list = docker.get_commands_from_history(
image_obj.layers[curr_layer]
)
if command_list:
# mount diff layers from 0 till the current layer
mount_overlay_fs(image_obj, curr_layer)
# for each command look up the snippet library
for command in command_list:
pkg_listing = command_lib.get_package_listing(command.name)
if type(pkg_listing) is str:
common.add_base_packages(
image_obj.layers[curr_layer], pkg_listing, shell
)
else:
common.add_snippet_packages(
image_obj.layers[curr_layer], command, pkg_listing, shell
)
if command_list:
rootfs.undo_mount()
rootfs.unmount_rootfs()
# update the master list
common.update_master_list(master_list, image_obj.layers[curr_layer])
curr_layer = curr_layer + 1
common.save_to_cache(image_obj)
|
def analyze_docker_image(image_obj, redo=False, dockerfile=False):
"""Given a DockerImage object, for each layer, retrieve the packages, first
looking up in cache and if not there then looking up in the command
library. For looking up in command library first mount the filesystem
and then look up the command library for commands to run in chroot"""
# find the layers that are imported
if dockerfile:
docker.set_imported_layers(image_obj)
# add notices for each layer if it is imported
image_setup(image_obj)
shell = ""
# set up empty master list of packages
master_list = []
# find the binary by mounting the base layer
target = rootfs.mount_base_layer(image_obj.layers[0].tar_file)
binary = common.get_base_bin(image_obj.layers[0])
# set up a notice origin referring to the base command library listing
origin_command_lib = formats.invoking_base_commands
# set up a notice origin for the first layer
origin_first_layer = "Layer: " + image_obj.layers[0].fs_hash[:10]
# find the shell to invoke commands in
shell, msg = command_lib.get_image_shell(command_lib.get_base_listing(binary))
if not shell:
# add a warning notice for no shell in the command library
logger.warning("No shell listing in command library. Using default shell")
no_shell_message = errors.no_shell_listing.format(
binary=binary, default_shell=constants.shell
)
image_obj.layers[0].origins.add_notice_to_origins(
origin_command_lib, Notice(no_shell_message, "warning")
)
# add a hint notice to add the shell to the command library
add_shell_message = errors.no_listing_for_base_key.format(listing_key="shell")
image_obj.layers[0].origins.add_notice_origins(
origin_command_lib, Notice(add_shell_message, "hint")
)
shell = constants.shell
# only extract packages if there is a known binary and the layer is not
# cached
if binary:
if not common.load_from_cache(image_obj.layers[0], redo):
# get the packages of the first layer
rootfs.prep_rootfs(target)
common.add_base_packages(image_obj.layers[0], binary, shell)
# unmount proc, sys and dev
rootfs.undo_mount()
else:
no_base = errors.unrecognized_base.format(
image_name=image_obj.name, image_tag=image_obj.tag
)
logger.warning(no_base)
image_obj.layers[0].origins.add_notice_to_origins(
origin_first_layer, Notice(no_base, "warning")
)
# no binary means there is no shell so set to default shell
logger.warning("Unknown filesystem. Using default shell")
shell = constants.shell
# unmount the first layer
rootfs.unmount_rootfs()
# populate the master list with all packages found in the first layer
for p in image_obj.layers[0].packages:
master_list.append(p)
# get packages for subsequent layers
curr_layer = 1
while curr_layer < len(image_obj.layers):
if not common.load_from_cache(image_obj.layers[curr_layer], redo):
# get commands that created the layer
# for docker images this is retrieved from the image history
command_list = docker.get_commands_from_history(
image_obj.layers[curr_layer]
)
if command_list:
# mount diff layers from 0 till the current layer
mount_overlay_fs(image_obj, curr_layer)
# for each command look up the snippet library
for command in command_list:
pkg_listing = command_lib.get_package_listing(command.name)
if type(pkg_listing) is str:
common.add_base_packages(
image_obj.layers[curr_layer], pkg_listing, shell
)
else:
common.add_snippet_packages(
image_obj.layers[curr_layer], command, pkg_listing, shell
)
if command_list:
rootfs.undo_mount()
rootfs.unmount_rootfs()
# update the master list
common.update_master_list(master_list, image_obj.layers[curr_layer])
curr_layer = curr_layer + 1
common.save_to_cache(image_obj)
|
https://github.com/tern-tools/tern/issues/142
|
2018-12-12 17:09:39,537 - DEBUG - tern - Starting...
2018-12-12 17:09:39,538 - DEBUG - container - Running command: docker ps
2018-12-12 17:09:39,574 - DEBUG - report - Setting up...
2018-12-12 17:09:39,575 - DEBUG - container - Running command: docker images clearlinux/stacks-dlrs-mkl:latest
2018-12-12 17:09:39,630 - DEBUG - container - Running command: docker save clearlinux/stacks-dlrs-mkl:latest
[sudo] password for MYUSER:
2018-12-12 17:13:08,395 - DEBUG - rootfs - Running command: sudo mount -o bind temp/141e37d52b424cc0b1e59202bf981a3d4
4d834374903bc2b91258c4b2b4f8bd9/contents temp/mergedir
2018-12-12 17:13:08,418 - WARNING - command_lib - No listing for key . Consider adding this listing to command_lib/ba
se.yml.
2018-12-12 17:13:08,418 - WARNING - report - No shell listing in command library. Using default shell
Traceback (most recent call last):
File "./tern", line 93, in <module>
main(args)
File "./tern", line 47, in main
report.execute_docker_image(args)
File "/data/workspace/tern/report/report.py", line 407, in execute_docker_image
analyze_docker_image(full_image, args.redo)
File "/data/workspace/tern/report/report.py", line 191, in analyze_docker_image
image_obj.layers[0].origins.add_notice_origins(
AttributeError: 'Origins' object has no attribute 'add_notice_origins'
|
AttributeError
|
def analyze_docker_image(image_obj, dockerfile=False):
"""Given a DockerImage object, for each layer, retrieve the packages, first
looking up in cache and if not there then looking up in the command
library. For looking up in command library first mount the filesystem
and then look up the command library for commands to run in chroot"""
# find the layers that are imported
if dockerfile:
docker.set_imported_layers(image_obj)
# add notices for each layer if it is imported
image_setup(image_obj)
shell = ""
# set up empty master list of package names
master_list = []
# find the binary by mounting the base layer
target = rootfs.mount_base_layer(image_obj.layers[0].tar_file)
binary = common.get_base_bin(image_obj.layers[0])
# set up a notice origin referring to the base command library listing
origin_command_lib = formats.invoking_base_commands
# set up a notice origin for the first layer
origin_first_layer = "Layer: " + image_obj.layers[0].fs_hash[:10]
# find the shell to invoke commands in
shell, msg = command_lib.get_image_shell(command_lib.get_base_listing(binary))
if binary and not shell:
# add a warning notice for no shell in the command library
logger.warning("No shell listing in command library. Using default shell")
no_shell_message = errors.no_shell_listing.format(
binary=binary, default_shell=constants.shell
)
image_obj.layers[0].origins.add_notice_to_origins(
origin_command_lib, Notice(no_shell_message, "warning")
)
# add a hint notice to add the shell to the command library
add_shell_message = errors.no_listing_for_base_key.format(listing_key="shell")
image_obj.layers[0].origins.add_notice_origins(
origin_command_lib, Notice(add_shell_message, "hint")
)
shell = constants.shell
# only extract packages if there is a known binary and the layer is not
# cached
if not common.load_from_cache(image_obj.layers[0]):
# get the packages of the first layer
rootfs.prep_rootfs(target)
common.add_base_packages(image_obj.layers[0], binary, shell)
# unmount proc, sys and dev
rootfs.undo_mount()
else:
no_base = errors.unrecognized_base.format(
image_name=image_obj.name, image_tag=image_obj.tag
)
logger.warning(no_base)
image_obj.layers[0].origins.add_notice_to_origins(
origin_first_layer, Notice(no_base, "warning")
)
# no binary means there is no shell so set to default shell
logger.warning("Unknown filesystem. Using default shell")
shell = constants.shell
# unmount the first layer
rootfs.unmount_rootfs()
# populate the master list with all packages found in the first layer
# can't use assignment as that will just point to the image object's layer
for p in image_obj.layers[0].get_package_names():
master_list.append(p)
# get packages for subsequent layers
curr_layer = 1
while curr_layer < len(image_obj.layers):
if not common.load_from_cache(image_obj.layers[curr_layer]):
# get commands that created the layer
# for docker images this is retrieved from the image history
command_list = docker.get_commands_from_history(
image_obj.layers[curr_layer]
)
if command_list:
# mount diff layers from 0 till the current layer
mount_overlay_fs(image_obj, curr_layer)
# for each command look up the snippet library
for command in command_list:
pkg_listing = command_lib.get_package_listing(command.name)
if type(pkg_listing) is str:
common.add_base_packages(
image_obj.layers[curr_layer], pkg_listing, shell
)
else:
common.add_snippet_packages(
image_obj.layers[curr_layer], command, pkg_listing, shell
)
if command_list:
rootfs.undo_mount()
rootfs.unmount_rootfs()
# update the master list
common.update_master_list(master_list, image_obj.layers[curr_layer])
curr_layer = curr_layer + 1
common.save_to_cache(image_obj)
|
def analyze_docker_image(image_obj, dockerfile=False):
"""Given a DockerImage object, for each layer, retrieve the packages, first
looking up in cache and if not there then looking up in the command
library. For looking up in command library first mount the filesystem
and then look up the command library for commands to run in chroot"""
# find the layers that are imported
if dockerfile:
docker.set_imported_layers(image_obj)
# add notices for each layer if it is imported
image_setup(image_obj)
shell = ""
# set up empty master list of package names
master_list = []
# find the binary by mounting the base layer
target = rootfs.mount_base_layer(image_obj.layers[0].tar_file)
binary = common.get_base_bin(image_obj.layers[0])
# set up a notice origin referring to the base command library listing
origin_command_lib = formats.invoking_base_commands
# find the shell to invoke commands in
shell, msg = command_lib.get_image_shell(command_lib.get_base_listing(binary))
if not shell:
# add a warning notice for no shell in the command library
logger.warning("No shell listing in command library. Using default shell")
no_shell_message = errors.no_shell_listing.format(
binary, default_shell=constants.shell
)
image_obj.layers[0].origins.add_notice_to_origins(
origin_command_lib, Notice(no_shell_message, "warning")
)
# add a hint notice to add the shell to the command library
add_shell_message = errors.no_listing_for_base_key.format(listing_key="shell")
image_obj.layers[0].origins.add_notice_origins(
origin_command_lib, Notice(add_shell_message, "hint")
)
shell = constants.shell
# only extract packages if there is a known binary and the layer is not
# cached
if binary:
if not common.load_from_cache(image_obj.layers[0]):
# get the packages of the first layer
rootfs.prep_rootfs(target)
common.add_base_packages(image_obj.layers[0], binary, shell)
# unmount proc, sys and dev
rootfs.undo_mount()
else:
logger.warning(
errors.unrecognized_base.format(
image_name=image_obj.name, image_tag=image_obj.tag
)
)
# unmount the first layer
rootfs.unmount_rootfs()
# populate the master list with all packages found in the first layer
# can't use assignment as that will just point to the image object's layer
for p in image_obj.layers[0].get_package_names():
master_list.append(p)
# get packages for subsequent layers
curr_layer = 1
while curr_layer < len(image_obj.layers):
if not common.load_from_cache(image_obj.layers[curr_layer]):
# get commands that created the layer
# for docker images this is retrieved from the image history
command_list = docker.get_commands_from_history(
image_obj.layers[curr_layer]
)
if command_list:
# mount diff layers from 0 till the current layer
mount_overlay_fs(image_obj, curr_layer)
# for each command look up the snippet library
for command in command_list:
pkg_listing = command_lib.get_package_listing(command.name)
if type(pkg_listing) is str:
common.add_base_packages(
image_obj.layers[curr_layer], pkg_listing, shell
)
else:
common.add_snippet_packages(
image_obj.layers[curr_layer], command, pkg_listing, shell
)
if command_list:
rootfs.undo_mount()
rootfs.unmount_rootfs()
# update the master list
common.update_master_list(master_list, image_obj.layers[curr_layer])
curr_layer = curr_layer + 1
common.save_to_cache(image_obj)
|
https://github.com/tern-tools/tern/issues/121
|
2018-11-07 12:31:41,261 - DEBUG - tern - Starting...
2018-11-07 12:31:41,268 - DEBUG - container - Running command: docker ps
2018-11-07 12:31:41,344 - DEBUG - report - Setting up...
2018-11-07 12:31:45,410 - DEBUG - container - Running command: docker images docker.io/hello-world:latest
2018-11-07 12:31:45,478 - DEBUG - container - Running command: docker save docker.io/hello-world:latest
[sudo] password for nisha:
2018-11-07 12:31:51,953 - DEBUG - rootfs - Running command: sudo mount -o bind temp/f22789aafb439644ab76948424a0c1b64f328cecde82c95edad3813aaadbe077/contents temp/mergedir
2018-11-07 12:31:52,019 - WARNING - command_lib - No listing for key . Consider adding this listing to command_lib/base.yml.
2018-11-07 12:31:52,020 - WARNING - report - No shell listing in command library. Using default shell
Traceback (most recent call last):
File "./tern", line 91, in <module>
main(args)
File "./tern", line 47, in main
report.execute_docker_image(args)
File "/home/nisha/tern3.7/tern/report/report.py", line 393, in execute_docker_image
analyze_docker_image(full_image)
File "/home/nisha/tern3.7/tern/report/report.py", line 176, in analyze_docker_image
binary, default_shell=constants.shell)
KeyError: 'binary'
|
KeyError
|
def execute_docker_image(args):
"""Execution path if given a Docker image"""
check_docker_daemon()
logger.debug("Setting up...")
setup(image_tag_string=args.docker_image)
# attempt to get built image metadata
full_image = load_full_image(args.docker_image)
if full_image.origins.is_empty():
# image loading was successful
# Add an image origin here
full_image.origins.add_notice_origin(
formats.docker_image.format(imagetag=args.docker_image)
)
# analyze image
analyze_docker_image(full_image)
# generate report
generate_report(args, full_image)
else:
# we cannot load the full image
logger.warning("Cannot retrieve full image metadata")
if not args.keep_working_dir:
clean_image_tars(full_image)
logger.debug("Teardown...")
teardown()
if not args.keep_working_dir:
clean_working_dir()
|
def execute_docker_image(args):
"""Execution path if given a Docker image"""
check_docker_daemon()
logger.debug("Setting up...")
setup(image_tag_string=args.docker_image)
# attempt to get built image metadata
full_image = load_full_image(args.docker_image)
if full_image.origins.is_empty():
# image loading was successful
# Add an image origin here
full_image.origins.add_notice_origin(
formats.docker_image.format(imagetag=args.docker_image)
)
# analyze image
analyze_docker_image(full_image)
# generate report
generate_report(args, full_image)
else:
# we cannot load the full image
logger.warning("Cannot retrieve full image metadata")
if not args.keep_working_dir:
clean_image_tars(full_image)
logger.debug("Teardown...")
teardown()
if not args.keep_working_dir:
shutil.rmtree(os.path.abspath(constants.temp_folder))
|
https://github.com/tern-tools/tern/issues/120
|
2018-11-07 20:44:35,853 - DEBUG - rootfs - Running command: sudo rm -rf temp/mergedir
Traceback (most recent call last):
File "./tern", line 91, in
main(args)
File "./tern", line 47, in main
report.execute_docker_image(args)
File "/home/armijn/git/tern/report/report.py", line 402, in execute_docker_image
teardown()
File "/home/armijn/git/tern/report/report.py", line 75, in teardown
rootfs.clean_up()
File "/home/armijn/git/tern/utils/rootfs.py", line 163, in clean_up
root_command(remove, mergedir_path)
File "/home/armijn/git/tern/utils/rootfs.py", line 57, in root_command
raise subprocess.CalledProcessError(1, cmd=full_cmd, output=error)
subprocess.CalledProcessError: Command '['sudo', 'rm', '-rf', 'temp/mergedir']' returned non-zero exit status 1.
|
subprocess.CalledProcessError
|
def to_dict(self):
layer_dict = {}
pkg_list = []
for pkg in self.__packages:
pkg_list.append(pkg.to_dict())
layer_dict.update(
{
self.fs_hash: {
"packages": pkg_list,
"tar_file": self.tar_file,
"created_by": self.created_by,
}
}
)
return layer_dict
|
def to_dict(self):
layer_dict = {}
pkg_list = []
for pkg in self.__packages:
pkg_list.append(pkg.to_dict())
layer_dict.update(
{
self.fs_hash: {
"packages": pkg_list,
"tar_file": self.__tar_file,
"created_by": self.__created_by,
}
}
)
return layer_dict
|
https://github.com/tern-tools/tern/issues/96
|
$ python tests/test_class_image_layer.py
E....E
======================================================================
ERROR: testAddNotice (_main_.TestClassImageLayer)
----------------------------------------------------------------------
Traceback (most recent call last):
File "tests/test_class_image_layer.py", line 48, in testAddNotice
self.layer.add_notice(n)
AttributeError: 'ImageLayer' object has no attribute 'add_notice'
======================================================================
ERROR: testToDict (_main_.TestClassImageLayer)
----------------------------------------------------------------------
Traceback (most recent call last):
File "tests/test_class_image_layer.py", line 55, in testToDict
self.assertTrue(a_dict['123abc'])
KeyError: '123abc'
----------------------------------------------------------------------
Ran 6 tests in 0.001s
FAILED (errors=2)
|
AttributeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.