after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def forward(self, init_features: Tensor) -> Tensor:
features = [init_features]
for name, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
|
def forward(self, init_features: Tensor) -> Tensor: # type: ignore[override]
features = [init_features]
for name, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
|
https://github.com/pytorch/vision/issues/3027
|
Traceback (most recent call last):
File "repro.py", line 7, in <module>
torch.jit.script(model).save('densenet161.pt')
File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/jit/_script.py", line 911, in script
return torch.jit._recursive.create_script_module(
File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/jit/_recursive.py", line 370, in create_script_module
return create_script_module_impl(nn_module, concrete_type, stubs_fn)
File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/jit/_recursive.py", line 426, in create_script_module_impl
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/jit/_script.py", line 388, in _construct
init_fn(script_module)
File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/jit/_recursive.py", line 406, in init_fn
scripted = create_script_module_impl(orig_value, sub_concrete_type, stubs_fn)
File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/jit/_recursive.py", line 426, in create_script_module_impl
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/jit/_script.py", line 388, in _construct
init_fn(script_module)
File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/jit/_recursive.py", line 406, in init_fn
scripted = create_script_module_impl(orig_value, sub_concrete_type, stubs_fn)
File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/jit/_recursive.py", line 382, in create_script_module_impl
method_stubs = stubs_fn(nn_module)
File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/jit/_recursive.py", line 618, in infer_methods_to_compile
stubs.append(make_stub_from_method(nn_module, method))
File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/jit/_recursive.py", line 52, in make_stub_from_method
return make_stub(func, method_name)
File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/jit/_recursive.py", line 37, in make_stub
ast = get_jit_def(func, name, self_name="RecursiveScriptModule")
File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/jit/frontend.py", line 259, in get_jit_def
return build_def(ctx, fn_def, type_line, def_name, self_name=self_name)
File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/jit/frontend.py", line 288, in build_def
type_comment_decl = torch._C.parse_type_comment(type_line)
RuntimeError: expected type comment but found 'def' here:
def forward(self, init_features: Tensor) -> Tensor: # type: ignore[override]
~~~ <--- HERE
|
RuntimeError
|
def pad(img, padding, fill=0, padding_mode="constant"):
r"""Pad the given PIL.Image on all sides with the given "pad" value.
Args:
img (PIL Image): Image to be padded.
padding (int or tuple or list): Padding on each border. If a single int is provided this
is used to pad all borders. If a tuple or list of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple or list of length 4 is provided
this is the padding for the left, top, right and bottom borders respectively. For compatibility reasons
with ``functional_tensor.pad``, if a tuple or list of length 1 is provided, it is interpreted as
a single int.
fill (int or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant.
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
PIL Image: Padded image.
"""
if not _is_pil_image(img):
raise TypeError("img should be PIL Image. Got {}".format(type(img)))
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if not isinstance(fill, (numbers.Number, str, tuple)):
raise TypeError("Got inappropriate fill arg")
if not isinstance(padding_mode, str):
raise TypeError("Got inappropriate padding_mode arg")
if isinstance(padding, list):
padding = tuple(padding)
if isinstance(padding, tuple) and len(padding) not in [1, 2, 4]:
raise ValueError(
"Padding must be an int or a 1, 2, or 4 element tuple, not a "
+ "{} element tuple".format(len(padding))
)
if isinstance(padding, tuple) and len(padding) == 1:
# Compatibility with `functional_tensor.pad`
padding = padding[0]
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError(
"Padding mode should be either constant, edge, reflect or symmetric"
)
if padding_mode == "constant":
opts = _parse_fill(fill, img, "2.3.0", name="fill")
if img.mode == "P":
palette = img.getpalette()
image = ImageOps.expand(img, border=padding, **opts)
image.putpalette(palette)
return image
return ImageOps.expand(img, border=padding, **opts)
else:
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
if isinstance(padding, tuple) and len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
if isinstance(padding, tuple) and len(padding) == 4:
pad_left = padding[0]
pad_top = padding[1]
pad_right = padding[2]
pad_bottom = padding[3]
if img.mode == "P":
palette = img.getpalette()
img = np.asarray(img)
img = np.pad(
img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode
)
img = Image.fromarray(img)
img.putpalette(palette)
return img
img = np.asarray(img)
# RGB image
if len(img.shape) == 3:
img = np.pad(
img,
((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)),
padding_mode,
)
# Grayscale image
if len(img.shape) == 2:
img = np.pad(
img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode
)
return Image.fromarray(img)
|
def pad(img, padding, fill=0, padding_mode="constant"):
r"""Pad the given PIL.Image on all sides with the given "pad" value.
Args:
img (PIL Image): Image to be padded.
padding (int or tuple or list): Padding on each border. If a single int is provided this
is used to pad all borders. If a tuple or list of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple or list of length 4 is provided
this is the padding for the left, top, right and bottom borders respectively. For compatibility reasons
with ``functional_tensor.pad``, if a tuple or list of length 1 is provided, it is interpreted as
a single int.
fill (int or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant.
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
PIL Image: Padded image.
"""
if not _is_pil_image(img):
raise TypeError("img should be PIL Image. Got {}".format(type(img)))
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if not isinstance(fill, (numbers.Number, str, tuple)):
raise TypeError("Got inappropriate fill arg")
if not isinstance(padding_mode, str):
raise TypeError("Got inappropriate padding_mode arg")
if isinstance(padding, list):
padding = tuple(padding)
if isinstance(padding, tuple) and len(padding) not in [1, 2, 4]:
raise ValueError(
"Padding must be an int or a 1, 2, or 4 element tuple, not a "
+ "{} element tuple".format(len(padding))
)
if isinstance(padding, tuple) and len(padding) == 1:
# Compatibility with `functional_tensor.pad`
padding = padding[0]
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError(
"Padding mode should be either constant, edge, reflect or symmetric"
)
if padding_mode == "constant":
if isinstance(fill, numbers.Number):
fill = (fill,) * len(img.getbands())
if len(fill) != len(img.getbands()):
raise ValueError(
"fill should have the same number of elements "
"as the number of channels in the image "
"({}), got {} instead".format(len(img.getbands()), len(fill))
)
if img.mode == "P":
palette = img.getpalette()
image = ImageOps.expand(img, border=padding, fill=fill)
image.putpalette(palette)
return image
return ImageOps.expand(img, border=padding, fill=fill)
else:
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
if isinstance(padding, tuple) and len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
if isinstance(padding, tuple) and len(padding) == 4:
pad_left = padding[0]
pad_top = padding[1]
pad_right = padding[2]
pad_bottom = padding[3]
if img.mode == "P":
palette = img.getpalette()
img = np.asarray(img)
img = np.pad(
img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode
)
img = Image.fromarray(img)
img.putpalette(palette)
return img
img = np.asarray(img)
# RGB image
if len(img.shape) == 3:
img = np.pad(
img,
((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)),
padding_mode,
)
# Grayscale image
if len(img.shape) == 2:
img = np.pad(
img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode
)
return Image.fromarray(img)
|
https://github.com/pytorch/vision/issues/2512
|
TypeError Traceback (most recent call last)
<ipython-input-28-086b6b55847c> in <module>
4 x = PIL.Image.fromarray(t.numpy())
5 f = torchvision.transforms.Pad(2)
----> 6 f(x)
~/anaconda3/lib/python3.7/site-packages/torchvision/transforms/transforms.py in __call__(self, img)
338 PIL Image: Padded image.
339 """
--> 340 return F.pad(img, self.padding, self.fill, self.padding_mode)
341
342 def __repr__(self):
~/anaconda3/lib/python3.7/site-packages/torchvision/transforms/functional.py in pad(img, padding, fill, padding_mode)
405 return image
406
--> 407 return ImageOps.expand(img, border=padding, fill=fill)
408 else:
409 if isinstance(padding, int):
~/anaconda3/lib/python3.7/site-packages/PIL/ImageOps.py in expand(image, border, fill)
360 width = left + image.size[0] + right
361 height = top + image.size[1] + bottom
--> 362 out = Image.new(image.mode, (width, height), _color(fill, image.mode))
363 out.paste(image, (left, top))
364 return out
~/anaconda3/lib/python3.7/site-packages/PIL/Image.py in new(mode, size, color)
2611 im.palette = ImagePalette.ImagePalette()
2612 color = im.palette.getcolor(color)
-> 2613 return im._new(core.fill(mode, size, color))
|
TypeError
|
def _parse_fill(fill, img, min_pil_version, name="fillcolor"):
"""Helper function to get the fill color for rotate, perspective transforms, and pad.
Args:
fill (n-tuple or int or float): Pixel fill value for area outside the transformed
image. If int or float, the value is used for all bands respectively.
Defaults to 0 for all bands.
img (PIL Image): Image to be filled.
min_pil_version (str): The minimum PILLOW version for when the ``fillcolor`` option
was first introduced in the calling function. (e.g. rotate->5.2.0, perspective->5.0.0)
name (str): Name of the ``fillcolor`` option in the output. Defaults to ``"fillcolor"``.
Returns:
dict: kwarg for ``fillcolor``
"""
major_found, minor_found = (int(v) for v in PILLOW_VERSION.split(".")[:2])
major_required, minor_required = (int(v) for v in min_pil_version.split(".")[:2])
if major_found < major_required or (
major_found == major_required and minor_found < minor_required
):
if fill is None:
return {}
else:
msg = (
"The option to fill background area of the transformed image, "
"requires pillow>={}"
)
raise RuntimeError(msg.format(min_pil_version))
num_bands = len(img.getbands())
if fill is None:
fill = 0
if isinstance(fill, (int, float)) and num_bands > 1:
fill = tuple([fill] * num_bands)
if not isinstance(fill, (int, float)) and len(fill) != num_bands:
msg = (
"The number of elements in 'fill' does not match the number of "
"bands of the image ({} != {})"
)
raise ValueError(msg.format(len(fill), num_bands))
return {name: fill}
|
def _parse_fill(fill, img, min_pil_version):
"""Helper function to get the fill color for rotate and perspective transforms.
Args:
fill (n-tuple or int or float): Pixel fill value for area outside the transformed
image. If int or float, the value is used for all bands respectively.
Defaults to 0 for all bands.
img (PIL Image): Image to be filled.
min_pil_version (str): The minimum PILLOW version for when the ``fillcolor`` option
was first introduced in the calling function. (e.g. rotate->5.2.0, perspective->5.0.0)
Returns:
dict: kwarg for ``fillcolor``
"""
major_found, minor_found = (int(v) for v in PILLOW_VERSION.split(".")[:2])
major_required, minor_required = (int(v) for v in min_pil_version.split(".")[:2])
if major_found < major_required or (
major_found == major_required and minor_found < minor_required
):
if fill is None:
return {}
else:
msg = (
"The option to fill background area of the transformed image, "
"requires pillow>={}"
)
raise RuntimeError(msg.format(min_pil_version))
num_bands = len(img.getbands())
if fill is None:
fill = 0
if isinstance(fill, (int, float)) and num_bands > 1:
fill = tuple([fill] * num_bands)
if not isinstance(fill, (int, float)) and len(fill) != num_bands:
msg = (
"The number of elements in 'fill' does not match the number of "
"bands of the image ({} != {})"
)
raise ValueError(msg.format(len(fill), num_bands))
return {"fillcolor": fill}
|
https://github.com/pytorch/vision/issues/2512
|
TypeError Traceback (most recent call last)
<ipython-input-28-086b6b55847c> in <module>
4 x = PIL.Image.fromarray(t.numpy())
5 f = torchvision.transforms.Pad(2)
----> 6 f(x)
~/anaconda3/lib/python3.7/site-packages/torchvision/transforms/transforms.py in __call__(self, img)
338 PIL Image: Padded image.
339 """
--> 340 return F.pad(img, self.padding, self.fill, self.padding_mode)
341
342 def __repr__(self):
~/anaconda3/lib/python3.7/site-packages/torchvision/transforms/functional.py in pad(img, padding, fill, padding_mode)
405 return image
406
--> 407 return ImageOps.expand(img, border=padding, fill=fill)
408 else:
409 if isinstance(padding, int):
~/anaconda3/lib/python3.7/site-packages/PIL/ImageOps.py in expand(image, border, fill)
360 width = left + image.size[0] + right
361 height = top + image.size[1] + bottom
--> 362 out = Image.new(image.mode, (width, height), _color(fill, image.mode))
363 out.paste(image, (left, top))
364 return out
~/anaconda3/lib/python3.7/site-packages/PIL/Image.py in new(mode, size, color)
2611 im.palette = ImagePalette.ImagePalette()
2612 color = im.palette.getcolor(color)
-> 2613 return im._new(core.fill(mode, size, color))
|
TypeError
|
def googlenet(pretrained=False, progress=True, **kwargs):
r"""GoogLeNet (Inception v1) model architecture from
`"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
aux_logits (bool): If True, adds two auxiliary branches that can improve training.
Default: *False* when pretrained is True otherwise *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if "transform_input" not in kwargs:
kwargs["transform_input"] = True
if "aux_logits" not in kwargs:
kwargs["aux_logits"] = False
if kwargs["aux_logits"]:
warnings.warn(
"auxiliary heads in the pretrained googlenet model are NOT pretrained, "
"so make sure to train them"
)
original_aux_logits = kwargs["aux_logits"]
kwargs["aux_logits"] = True
kwargs["init_weights"] = False
model = GoogLeNet(**kwargs)
state_dict = load_state_dict_from_url(
model_urls["googlenet"], progress=progress
)
model.load_state_dict(state_dict)
if not original_aux_logits:
model.aux_logits = False
model.aux1 = None
model.aux2 = None
return model
return GoogLeNet(**kwargs)
|
def googlenet(pretrained=False, progress=True, **kwargs):
r"""GoogLeNet (Inception v1) model architecture from
`"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
aux_logits (bool): If True, adds two auxiliary branches that can improve training.
Default: *False* when pretrained is True otherwise *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if "transform_input" not in kwargs:
kwargs["transform_input"] = True
if "aux_logits" not in kwargs:
kwargs["aux_logits"] = False
if kwargs["aux_logits"]:
warnings.warn(
"auxiliary heads in the pretrained googlenet model are NOT pretrained, "
"so make sure to train them"
)
original_aux_logits = kwargs["aux_logits"]
kwargs["aux_logits"] = True
kwargs["init_weights"] = False
model = GoogLeNet(**kwargs)
state_dict = load_state_dict_from_url(
model_urls["googlenet"], progress=progress
)
model.load_state_dict(state_dict)
if not original_aux_logits:
model.aux_logits = False
del model.aux1, model.aux2
return model
return GoogLeNet(**kwargs)
|
https://github.com/pytorch/vision/issues/1936
|
Traceback (most recent call last):
File "quantize_resnet.py", line 181, in <module>
main()
File "quantize_resnet.py", line 138, in main
m = torch.jit.script(model.float().eval())
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/__init__.py", line 1267, in script
return torch.jit._recursive.create_script_module(obj, torch.jit._recursive.infer_methods_to_compile)
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/_recursive.py", line 305, in create_script_module
return create_script_module_impl(nn_module, concrete_type, stubs_fn)
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/_recursive.py", line 352, in create_script_module_impl
create_methods_from_stubs(concrete_type, stubs)
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/_recursive.py", line 279, in create_methods_from_stubs
concrete_type._create_methods(defs, rcbs, defaults)
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/_recursive.py", line 578, in compile_unbound_method
create_methods_from_stubs(concrete_type, (stub,))
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/_recursive.py", line 279, in create_methods_from_stubs
concrete_type._create_methods(defs, rcbs, defaults)
RuntimeError:
Module 'GoogLeNet' has no attribute 'aux1' :
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torchvision-0.6.0a0+b2e9565-py3.7-linux-x86_64.egg/torchvision/models/googlenet.py", line 156
aux_defined = self.training and self.aux_logits
if aux_defined:
aux1 = self.aux1(x)
~~~~~~~~~ <--- HERE
else:
aux1 = None
'GoogLeNet._forward' is being compiled since it was called from 'GoogLeNet.forward'
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torchvision-0.6.0a0+b2e9565-py3.7-linux-x86_64.egg/torchvision/models/googlenet.py", line 200
# type: (Tensor) -> GoogLeNetOutputs
x = self._transform_input(x)
x, aux1, aux2 = self._forward(x)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
aux_defined = self.training and self.aux_logits
if torch.jit.is_scripting():
|
RuntimeError
|
def __init__(
self,
num_classes=1000,
aux_logits=True,
transform_input=False,
init_weights=True,
blocks=None,
):
super(GoogLeNet, self).__init__()
if blocks is None:
blocks = [BasicConv2d, Inception, InceptionAux]
assert len(blocks) == 3
conv_block = blocks[0]
inception_block = blocks[1]
inception_aux_block = blocks[2]
self.aux_logits = aux_logits
self.transform_input = transform_input
self.conv1 = conv_block(3, 64, kernel_size=7, stride=2, padding=3)
self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.conv2 = conv_block(64, 64, kernel_size=1)
self.conv3 = conv_block(64, 192, kernel_size=3, padding=1)
self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32)
self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64)
self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64)
self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64)
self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64)
self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64)
self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128)
self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128)
self.inception5b = inception_block(832, 384, 192, 384, 48, 128, 128)
if aux_logits:
self.aux1 = inception_aux_block(512, num_classes)
self.aux2 = inception_aux_block(528, num_classes)
else:
self.aux1 = None
self.aux2 = None
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(0.2)
self.fc = nn.Linear(1024, num_classes)
if init_weights:
self._initialize_weights()
|
def __init__(
self,
num_classes=1000,
aux_logits=True,
transform_input=False,
init_weights=True,
blocks=None,
):
super(GoogLeNet, self).__init__()
if blocks is None:
blocks = [BasicConv2d, Inception, InceptionAux]
assert len(blocks) == 3
conv_block = blocks[0]
inception_block = blocks[1]
inception_aux_block = blocks[2]
self.aux_logits = aux_logits
self.transform_input = transform_input
self.conv1 = conv_block(3, 64, kernel_size=7, stride=2, padding=3)
self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.conv2 = conv_block(64, 64, kernel_size=1)
self.conv3 = conv_block(64, 192, kernel_size=3, padding=1)
self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32)
self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64)
self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64)
self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64)
self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64)
self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64)
self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128)
self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128)
self.inception5b = inception_block(832, 384, 192, 384, 48, 128, 128)
if aux_logits:
self.aux1 = inception_aux_block(512, num_classes)
self.aux2 = inception_aux_block(528, num_classes)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(0.2)
self.fc = nn.Linear(1024, num_classes)
if init_weights:
self._initialize_weights()
|
https://github.com/pytorch/vision/issues/1936
|
Traceback (most recent call last):
File "quantize_resnet.py", line 181, in <module>
main()
File "quantize_resnet.py", line 138, in main
m = torch.jit.script(model.float().eval())
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/__init__.py", line 1267, in script
return torch.jit._recursive.create_script_module(obj, torch.jit._recursive.infer_methods_to_compile)
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/_recursive.py", line 305, in create_script_module
return create_script_module_impl(nn_module, concrete_type, stubs_fn)
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/_recursive.py", line 352, in create_script_module_impl
create_methods_from_stubs(concrete_type, stubs)
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/_recursive.py", line 279, in create_methods_from_stubs
concrete_type._create_methods(defs, rcbs, defaults)
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/_recursive.py", line 578, in compile_unbound_method
create_methods_from_stubs(concrete_type, (stub,))
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/_recursive.py", line 279, in create_methods_from_stubs
concrete_type._create_methods(defs, rcbs, defaults)
RuntimeError:
Module 'GoogLeNet' has no attribute 'aux1' :
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torchvision-0.6.0a0+b2e9565-py3.7-linux-x86_64.egg/torchvision/models/googlenet.py", line 156
aux_defined = self.training and self.aux_logits
if aux_defined:
aux1 = self.aux1(x)
~~~~~~~~~ <--- HERE
else:
aux1 = None
'GoogLeNet._forward' is being compiled since it was called from 'GoogLeNet.forward'
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torchvision-0.6.0a0+b2e9565-py3.7-linux-x86_64.egg/torchvision/models/googlenet.py", line 200
# type: (Tensor) -> GoogLeNetOutputs
x = self._transform_input(x)
x, aux1, aux2 = self._forward(x)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
aux_defined = self.training and self.aux_logits
if torch.jit.is_scripting():
|
RuntimeError
|
def _forward(self, x):
# type: (Tensor) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]
# N x 3 x 224 x 224
x = self.conv1(x)
# N x 64 x 112 x 112
x = self.maxpool1(x)
# N x 64 x 56 x 56
x = self.conv2(x)
# N x 64 x 56 x 56
x = self.conv3(x)
# N x 192 x 56 x 56
x = self.maxpool2(x)
# N x 192 x 28 x 28
x = self.inception3a(x)
# N x 256 x 28 x 28
x = self.inception3b(x)
# N x 480 x 28 x 28
x = self.maxpool3(x)
# N x 480 x 14 x 14
x = self.inception4a(x)
# N x 512 x 14 x 14
aux1 = torch.jit.annotate(Optional[Tensor], None)
if self.aux1 is not None:
if self.training:
aux1 = self.aux1(x)
x = self.inception4b(x)
# N x 512 x 14 x 14
x = self.inception4c(x)
# N x 512 x 14 x 14
x = self.inception4d(x)
# N x 528 x 14 x 14
aux2 = torch.jit.annotate(Optional[Tensor], None)
if self.aux2 is not None:
if self.training:
aux2 = self.aux2(x)
x = self.inception4e(x)
# N x 832 x 14 x 14
x = self.maxpool4(x)
# N x 832 x 7 x 7
x = self.inception5a(x)
# N x 832 x 7 x 7
x = self.inception5b(x)
# N x 1024 x 7 x 7
x = self.avgpool(x)
# N x 1024 x 1 x 1
x = torch.flatten(x, 1)
# N x 1024
x = self.dropout(x)
x = self.fc(x)
# N x 1000 (num_classes)
return x, aux2, aux1
|
def _forward(self, x):
# type: (Tensor) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]
# N x 3 x 224 x 224
x = self.conv1(x)
# N x 64 x 112 x 112
x = self.maxpool1(x)
# N x 64 x 56 x 56
x = self.conv2(x)
# N x 64 x 56 x 56
x = self.conv3(x)
# N x 192 x 56 x 56
x = self.maxpool2(x)
# N x 192 x 28 x 28
x = self.inception3a(x)
# N x 256 x 28 x 28
x = self.inception3b(x)
# N x 480 x 28 x 28
x = self.maxpool3(x)
# N x 480 x 14 x 14
x = self.inception4a(x)
# N x 512 x 14 x 14
aux_defined = self.training and self.aux_logits
if aux_defined:
aux1 = self.aux1(x)
else:
aux1 = None
x = self.inception4b(x)
# N x 512 x 14 x 14
x = self.inception4c(x)
# N x 512 x 14 x 14
x = self.inception4d(x)
# N x 528 x 14 x 14
if aux_defined:
aux2 = self.aux2(x)
else:
aux2 = None
x = self.inception4e(x)
# N x 832 x 14 x 14
x = self.maxpool4(x)
# N x 832 x 7 x 7
x = self.inception5a(x)
# N x 832 x 7 x 7
x = self.inception5b(x)
# N x 1024 x 7 x 7
x = self.avgpool(x)
# N x 1024 x 1 x 1
x = torch.flatten(x, 1)
# N x 1024
x = self.dropout(x)
x = self.fc(x)
# N x 1000 (num_classes)
return x, aux2, aux1
|
https://github.com/pytorch/vision/issues/1936
|
Traceback (most recent call last):
File "quantize_resnet.py", line 181, in <module>
main()
File "quantize_resnet.py", line 138, in main
m = torch.jit.script(model.float().eval())
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/__init__.py", line 1267, in script
return torch.jit._recursive.create_script_module(obj, torch.jit._recursive.infer_methods_to_compile)
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/_recursive.py", line 305, in create_script_module
return create_script_module_impl(nn_module, concrete_type, stubs_fn)
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/_recursive.py", line 352, in create_script_module_impl
create_methods_from_stubs(concrete_type, stubs)
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/_recursive.py", line 279, in create_methods_from_stubs
concrete_type._create_methods(defs, rcbs, defaults)
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/_recursive.py", line 578, in compile_unbound_method
create_methods_from_stubs(concrete_type, (stub,))
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torch/jit/_recursive.py", line 279, in create_methods_from_stubs
concrete_type._create_methods(defs, rcbs, defaults)
RuntimeError:
Module 'GoogLeNet' has no attribute 'aux1' :
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torchvision-0.6.0a0+b2e9565-py3.7-linux-x86_64.egg/torchvision/models/googlenet.py", line 156
aux_defined = self.training and self.aux_logits
if aux_defined:
aux1 = self.aux1(x)
~~~~~~~~~ <--- HERE
else:
aux1 = None
'GoogLeNet._forward' is being compiled since it was called from 'GoogLeNet.forward'
File "/data/users/jerryzh/anaconda3/envs/py3/lib/python3.7/site-packages/torchvision-0.6.0a0+b2e9565-py3.7-linux-x86_64.egg/torchvision/models/googlenet.py", line 200
# type: (Tensor) -> GoogLeNetOutputs
x = self._transform_input(x)
x, aux1, aux2 = self._forward(x)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
aux_defined = self.training and self.aux_logits
if torch.jit.is_scripting():
|
RuntimeError
|
def set_cell_anchors(self, dtype, device):
# type: (int, Device) -> None # noqa: F821
if self.cell_anchors is not None:
cell_anchors = self.cell_anchors
assert cell_anchors is not None
# suppose that all anchors have the same device
# which is a valid assumption in the current state of the codebase
if cell_anchors[0].device == device:
return
cell_anchors = [
self.generate_anchors(sizes, aspect_ratios, dtype, device)
for sizes, aspect_ratios in zip(self.sizes, self.aspect_ratios)
]
self.cell_anchors = cell_anchors
|
def set_cell_anchors(self, dtype, device):
# type: (int, Device) -> None # noqa: F821
if self.cell_anchors is not None:
return
cell_anchors = [
self.generate_anchors(sizes, aspect_ratios, dtype, device)
for sizes, aspect_ratios in zip(self.sizes, self.aspect_ratios)
]
self.cell_anchors = cell_anchors
|
https://github.com/pytorch/vision/issues/1738
|
Traceback (most recent call last):
File "/home/ubrdog/PycharmProjects/FacialDetection/bug_replication.py", line 17, in <module>
out_data2 = model(dummy_data)
File "/home/ubrdog/miniconda3/envs/FacialDetection/lib/python3.7/site-packages/torch/nn/modules/module.py", line 539, in __call__
result = self.forward(*input, **kwargs)
File "/home/ubrdog/miniconda3/envs/FacialDetection/lib/python3.7/site-packages/torchvision-0.5.0a0+61763fa-py3.7-linux-x86_64.egg/torchvision/models/detection/generalized_rcnn.py", line 70, in forward
proposals, proposal_losses = self.rpn(images, features, targets)
File "/home/ubrdog/miniconda3/envs/FacialDetection/lib/python3.7/site-packages/torch/nn/modules/module.py", line 539, in __call__
result = self.forward(*input, **kwargs)
File "/home/ubrdog/miniconda3/envs/FacialDetection/lib/python3.7/site-packages/torchvision-0.5.0a0+61763fa-py3.7-linux-x86_64.egg/torchvision/models/detection/rpn.py", line 472, in forward
proposals = self.box_coder.decode(pred_bbox_deltas.detach(), anchors)
File "/home/ubrdog/miniconda3/envs/FacialDetection/lib/python3.7/site-packages/torchvision-0.5.0a0+61763fa-py3.7-linux-x86_64.egg/torchvision/models/detection/_utils.py", line 187, in decode
rel_codes.reshape(box_sum, -1), concat_boxes
File "/home/ubrdog/miniconda3/envs/FacialDetection/lib/python3.7/site-packages/torchvision-0.5.0a0+61763fa-py3.7-linux-x86_64.egg/torchvision/models/detection/_utils.py", line 218, in decode_single
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
RuntimeError: expected device cpu but got device cuda:0
|
RuntimeError
|
def __init__(
self, root, split="train", transform=None, target_transform=None, download=False
):
if split not in self.splits:
raise ValueError(
'Split "{}" not found. Valid splits are: {}'.format(
split,
", ".join(self.splits),
)
)
super(STL10, self).__init__(root)
self.transform = transform
self.target_transform = target_transform
self.split = split # train/test/unlabeled set
if download:
self.download()
if not self._check_integrity():
raise RuntimeError(
"Dataset not found or corrupted. You can use download=True to download it"
)
# now load the picked numpy arrays
if self.split == "train":
self.data, self.labels = self.__loadfile(
self.train_list[0][0], self.train_list[1][0]
)
elif self.split == "train+unlabeled":
self.data, self.labels = self.__loadfile(
self.train_list[0][0], self.train_list[1][0]
)
unlabeled_data, _ = self.__loadfile(self.train_list[2][0])
self.data = np.concatenate((self.data, unlabeled_data))
self.labels = np.concatenate(
(self.labels, np.asarray([-1] * unlabeled_data.shape[0]))
)
elif self.split == "unlabeled":
self.data, _ = self.__loadfile(self.train_list[2][0])
self.labels = np.asarray([-1] * self.data.shape[0])
else: # self.split == 'test':
self.data, self.labels = self.__loadfile(
self.test_list[0][0], self.test_list[1][0]
)
class_file = os.path.join(self.root, self.base_folder, self.class_names_file)
if os.path.isfile(class_file):
with open(class_file) as f:
self.classes = f.read().splitlines()
|
def __init__(
self, root, split="train", transform=None, target_transform=None, download=False
):
if split not in self.splits:
raise ValueError(
'Split "{}" not found. Valid splits are: {}'.format(
split,
", ".join(self.splits),
)
)
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.split = split # train/test/unlabeled set
if download:
self.download()
if not self._check_integrity():
raise RuntimeError(
"Dataset not found or corrupted. You can use download=True to download it"
)
# now load the picked numpy arrays
if self.split == "train":
self.data, self.labels = self.__loadfile(
self.train_list[0][0], self.train_list[1][0]
)
elif self.split == "train+unlabeled":
self.data, self.labels = self.__loadfile(
self.train_list[0][0], self.train_list[1][0]
)
unlabeled_data, _ = self.__loadfile(self.train_list[2][0])
self.data = np.concatenate((self.data, unlabeled_data))
self.labels = np.concatenate(
(self.labels, np.asarray([-1] * unlabeled_data.shape[0]))
)
elif self.split == "unlabeled":
self.data, _ = self.__loadfile(self.train_list[2][0])
self.labels = np.asarray([-1] * self.data.shape[0])
else: # self.split == 'test':
self.data, self.labels = self.__loadfile(
self.test_list[0][0], self.test_list[1][0]
)
class_file = os.path.join(self.root, self.base_folder, self.class_names_file)
if os.path.isfile(class_file):
with open(class_file) as f:
self.classes = f.read().splitlines()
|
https://github.com/pytorch/vision/issues/968
|
In [1]: from torchvision.datasets import STL10
In [2]: data = STL10("Documents/dataset", download=True)
Files already downloaded and verified
In [3]: print(data)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-3-dbd883db58b7> in <module>
----> 1 print(data)
~/pythonenvs/bentley/lib/python3.7/site-packages/torchvision/datasets/vision.py in __repr__(self)
38 body.append("Root location: {}".format(self.root))
39 body += self.extra_repr().splitlines()
---> 40 if self.transforms is not None:
41 body += [repr(self.transforms)]
42 lines = [head] + [" " * self._repr_indent + line for line in body]
AttributeError: 'STL10' object has no attribute 'transforms'
|
AttributeError
|
def __repr__(self):
head = "Dataset " + self.__class__.__name__
body = ["Number of datapoints: {}".format(self.__len__())]
if self.root is not None:
body.append("Root location: {}".format(self.root))
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return "\n".join(lines)
|
def __repr__(self):
head = "Dataset " + self.__class__.__name__
body = ["Number of datapoints: {}".format(self.__len__())]
if self.root is not None:
body.append("Root location: {}".format(self.root))
body += self.extra_repr().splitlines()
if self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return "\n".join(lines)
|
https://github.com/pytorch/vision/issues/968
|
In [1]: from torchvision.datasets import STL10
In [2]: data = STL10("Documents/dataset", download=True)
Files already downloaded and verified
In [3]: print(data)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-3-dbd883db58b7> in <module>
----> 1 print(data)
~/pythonenvs/bentley/lib/python3.7/site-packages/torchvision/datasets/vision.py in __repr__(self)
38 body.append("Root location: {}".format(self.root))
39 body += self.extra_repr().splitlines()
---> 40 if self.transforms is not None:
41 body += [repr(self.transforms)]
42 lines = [head] + [" " * self._repr_indent + line for line in body]
AttributeError: 'STL10' object has no attribute 'transforms'
|
AttributeError
|
def resolve_host(self):
optional = [
item
for item in self.OPTIONAL_HOSTS
if item in self.config and self.config[item] != "none"
]
for item in list(self.HOSTS) + optional:
host = "HOST_" + item
address = item + "_ADDRESS"
self.config[address] = system.resolve_address(self.config[host])
|
def resolve_host(self):
optional = [
item
for item in self.OPTIONAL_HOSTS
if item in self.config and self.config[item] != "none"
]
for item in list(self.HOSTS) + optional:
host = "HOST_" + item
self.config[host] = system.resolve_address(self.config[host])
|
https://github.com/Mailu/Mailu/issues/884
|
mailu_admin| [2019-01-25 14:15:37,610] ERROR in app: Exception on /internal/auth/email [GET]
mailu_admin| Traceback (most recent call last):
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/connection.py", line 493, in connect
mailu_admin| sock = self._connect()
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/connection.py", line 520, in _connect
mailu_admin| socket.SOCK_STREAM):
mailu_admin| File "/usr/lib/python3.6/socket.py", line 745, in getaddrinfo
mailu_admin| for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
mailu_admin| socket.gaierror: [Errno -2] Name does not resolve
mailu_admin| During handling of the above exception, another exception occurred:
mailu_admin| Traceback (most recent call last):
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/client.py", line 754, in execute_command
mailu_admin| connection.send_command(*args)
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/connection.py", line 619, in send_command
mailu_admin| self.send_packed_command(self.pack_command(*args))
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/connection.py", line 594, in send_packed_command
mailu_admin| self.connect()
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/connection.py", line 498, in connect
mailu_admin| raise ConnectionError(self._error_message(e))
mailu_admin| redis.exceptions.ConnectionError: Error -2 connecting to redis:6379. Name does not resolve.
|
ConnectionError
|
def init_app(self, app):
self.config.update(app.config)
# get environment variables
self.config.update(
{
key: self.__coerce_value(os.environ.get(key, value))
for key, value in DEFAULT_CONFIG.items()
}
)
self.resolve_host()
# automatically set the sqlalchemy string
if self.config["DB_FLAVOR"]:
template = self.DB_TEMPLATES[self.config["DB_FLAVOR"]]
self.config["SQLALCHEMY_DATABASE_URI"] = template.format(**self.config)
self.config["RATELIMIT_STORAGE_URL"] = "redis://{0}/2".format(
self.config["REDIS_ADDRESS"]
)
self.config["QUOTA_STORAGE_URL"] = "redis://{0}/1".format(
self.config["REDIS_ADDRESS"]
)
# update the app config itself
app.config = self
|
def init_app(self, app):
self.config.update(app.config)
# get environment variables
self.config.update(
{
key: self.__coerce_value(os.environ.get(key, value))
for key, value in DEFAULT_CONFIG.items()
}
)
self.resolve_host()
# automatically set the sqlalchemy string
if self.config["DB_FLAVOR"]:
template = self.DB_TEMPLATES[self.config["DB_FLAVOR"]]
self.config["SQLALCHEMY_DATABASE_URI"] = template.format(**self.config)
# update the app config itself
app.config = self
|
https://github.com/Mailu/Mailu/issues/884
|
mailu_admin| [2019-01-25 14:15:37,610] ERROR in app: Exception on /internal/auth/email [GET]
mailu_admin| Traceback (most recent call last):
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/connection.py", line 493, in connect
mailu_admin| sock = self._connect()
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/connection.py", line 520, in _connect
mailu_admin| socket.SOCK_STREAM):
mailu_admin| File "/usr/lib/python3.6/socket.py", line 745, in getaddrinfo
mailu_admin| for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
mailu_admin| socket.gaierror: [Errno -2] Name does not resolve
mailu_admin| During handling of the above exception, another exception occurred:
mailu_admin| Traceback (most recent call last):
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/client.py", line 754, in execute_command
mailu_admin| connection.send_command(*args)
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/connection.py", line 619, in send_command
mailu_admin| self.send_packed_command(self.pack_command(*args))
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/connection.py", line 594, in send_packed_command
mailu_admin| self.connect()
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/connection.py", line 498, in connect
mailu_admin| raise ConnectionError(self._error_message(e))
mailu_admin| redis.exceptions.ConnectionError: Error -2 connecting to redis:6379. Name does not resolve.
|
ConnectionError
|
def get_server(protocol, authenticated=False):
if protocol == "imap":
hostname, port = extract_host_port(app.config["IMAP_ADDRESS"], 143)
elif protocol == "pop3":
hostname, port = extract_host_port(app.config["POP3_ADDRESS"], 110)
elif protocol == "smtp":
if authenticated:
hostname, port = extract_host_port(app.config["AUTHSMTP_ADDRESS"], 10025)
else:
hostname, port = extract_host_port(app.config["SMTP_ADDRESS"], 25)
return hostname, port
|
def get_server(protocol, authenticated=False):
if protocol == "imap":
hostname, port = extract_host_port(app.config["HOST_IMAP"], 143)
elif protocol == "pop3":
hostname, port = extract_host_port(app.config["HOST_POP3"], 110)
elif protocol == "smtp":
if authenticated:
hostname, port = extract_host_port(app.config["HOST_AUTHSMTP"], 10025)
else:
hostname, port = extract_host_port(app.config["HOST_SMTP"], 25)
return hostname, port
|
https://github.com/Mailu/Mailu/issues/884
|
mailu_admin| [2019-01-25 14:15:37,610] ERROR in app: Exception on /internal/auth/email [GET]
mailu_admin| Traceback (most recent call last):
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/connection.py", line 493, in connect
mailu_admin| sock = self._connect()
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/connection.py", line 520, in _connect
mailu_admin| socket.SOCK_STREAM):
mailu_admin| File "/usr/lib/python3.6/socket.py", line 745, in getaddrinfo
mailu_admin| for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
mailu_admin| socket.gaierror: [Errno -2] Name does not resolve
mailu_admin| During handling of the above exception, another exception occurred:
mailu_admin| Traceback (most recent call last):
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/client.py", line 754, in execute_command
mailu_admin| connection.send_command(*args)
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/connection.py", line 619, in send_command
mailu_admin| self.send_packed_command(self.pack_command(*args))
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/connection.py", line 594, in send_packed_command
mailu_admin| self.connect()
mailu_admin| File "/usr/lib/python3.6/site-packages/redis/connection.py", line 498, in connect
mailu_admin| raise ConnectionError(self._error_message(e))
mailu_admin| redis.exceptions.ConnectionError: Error -2 connecting to redis:6379. Name does not resolve.
|
ConnectionError
|
def domain(domain_name, max_users=-1, max_aliases=-1, max_quota_bytes=0):
"""Create a domain"""
domain = models.Domain.query.get(domain_name)
if not domain:
domain = models.Domain(name=domain_name)
db.session.add(domain)
db.session.commit()
|
def domain(domain_name, max_users=-1, max_aliases=-1, max_quota_bytes=0):
domain = models.Domain.query.get(domain_name)
if not domain:
domain = models.Domain(name=domain_name)
db.session.add(domain)
db.session.commit()
|
https://github.com/Mailu/Mailu/issues/849
|
Traceback (most recent call last):
File "manage.py", line 1, in <module>
from mailu import models
ModuleNotFoundError: No module named 'mailu'
|
ModuleNotFoundError
|
def process_bind_param(self, value, dialect):
try:
localpart, domain_name = value.split("@")
return "{0}@{1}".format(
localpart,
idna.encode(domain_name).decode("ascii"),
)
except ValueError:
pass
|
def process_bind_param(self, value, dialect):
localpart, domain_name = value.split("@")
return "{0}@{1}".format(
localpart,
idna.encode(domain_name).decode("ascii"),
)
|
https://github.com/Mailu/Mailu/issues/585
|
admin_1 | 2018-09-06T03:27:32.130634316Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 667, in _init_compiled
admin_1 | 2018-09-06T03:27:32.130647067Z param.append(processors[key](compiled_params[key]))
admin_1 | 2018-09-06T03:27:32.130651784Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/sql/type_api.py", line 1160, in process
admin_1 | 2018-09-06T03:27:32.130656339Z return process_param(value, dialect)
admin_1 | 2018-09-06T03:27:32.130660848Z File "/app/mailu/models.py", line 38, in process_bind_param
admin_1 | 2018-09-06T03:27:32.130678553Z localpart, domain_name = value.split('@')
admin_1 | 2018-09-06T03:27:32.130682552Z ValueError: not enough values to unpack (expected 2, got 1)
admin_1 | 2018-09-06T03:27:32.130686500Z
admin_1 | 2018-09-06T03:27:32.130690498Z The above exception was the direct cause of the following exception:
admin_1 | 2018-09-06T03:27:32.130694402Z
admin_1 | 2018-09-06T03:27:32.130698152Z Traceback (most recent call last):
admin_1 | 2018-09-06T03:27:32.130701929Z File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1982, in wsgi_app
admin_1 | 2018-09-06T03:27:32.130706019Z response = self.full_dispatch_request()
admin_1 | 2018-09-06T03:27:32.130728757Z File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1614, in full_dispatch_request
admin_1 | 2018-09-06T03:27:32.130734376Z rv = self.handle_user_exception(e)
admin_1 | 2018-09-06T03:27:32.130738349Z File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1517, in handle_user_exception
admin_1 | 2018-09-06T03:27:32.130742416Z reraise(exc_type, exc_value, tb)
admin_1 | 2018-09-06T03:27:32.130746585Z File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 33, in reraise
admin_1 | 2018-09-06T03:27:32.130750772Z raise value
admin_1 | 2018-09-06T03:27:32.130754541Z File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1612, in full_dispatch_request
admin_1 | 2018-09-06T03:27:32.130758592Z rv = self.dispatch_request()
admin_1 | 2018-09-06T03:27:32.130762451Z File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1598, in dispatch_request
admin_1 | 2018-09-06T03:27:32.130766443Z return self.view_functions[rule.endpoint](**req.view_args)
admin_1 | 2018-09-06T03:27:32.130770363Z File "/usr/local/lib/python3.7/site-packages/flask_limiter/extension.py", line 544, in __inner
admin_1 | 2018-09-06T03:27:32.130774389Z return obj(*a, **k)
admin_1 | 2018-09-06T03:27:32.130778164Z File "/app/mailu/internal/views.py", line 18, in nginx_authentication
admin_1 | 2018-09-06T03:27:32.130782201Z headers = nginx.handle_authentication(flask.request.headers)
admin_1 | 2018-09-06T03:27:32.130785989Z File "/app/mailu/internal/nginx.py", line 40, in handle_authentication
admin_1 | 2018-09-06T03:27:32.130789994Z user = models.User.query.get(user_email)
admin_1 | 2018-09-06T03:27:32.130797166Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 882, in get
admin_1 | 2018-09-06T03:27:32.130801365Z ident, loading.load_on_ident)
admin_1 | 2018-09-06T03:27:32.130805123Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 916, in _get_impl
admin_1 | 2018-09-06T03:27:32.130809141Z return fallback_fn(self, key)
admin_1 | 2018-09-06T03:27:32.130812954Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/orm/loading.py", line 232, in load_on_ident
admin_1 | 2018-09-06T03:27:32.130817023Z return q.one()
admin_1 | 2018-09-06T03:27:32.130820651Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 2848, in one
admin_1 | 2018-09-06T03:27:32.130828433Z ret = self.one_or_none()
admin_1 | 2018-09-06T03:27:32.130832204Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 2818, in one_or_none
admin_1 | 2018-09-06T03:27:32.130836182Z ret = list(self)
admin_1 | 2018-09-06T03:27:32.130839817Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 2889, in __iter__
admin_1 | 2018-09-06T03:27:32.130843800Z return self._execute_and_instances(context)
admin_1 | 2018-09-06T03:27:32.130847637Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 2912, in _execute_and_instances
admin_1 | 2018-09-06T03:27:32.130851651Z result = conn.execute(querycontext.statement, self._params)
admin_1 | 2018-09-06T03:27:32.130855429Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 948, in execute
admin_1 | 2018-09-06T03:27:32.130859405Z return meth(self, multiparams, params)
admin_1 | 2018-09-06T03:27:32.130863160Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 269, in _execute_on_connection
admin_1 | 2018-09-06T03:27:32.130867285Z return connection._execute_clauseelement(self, multiparams, params)
admin_1 | 2018-09-06T03:27:32.130871120Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1060, in _execute_clauseelement
admin_1 | 2018-09-06T03:27:32.130875220Z compiled_sql, distilled_params
admin_1 | 2018-09-06T03:27:32.130879000Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1132, in _execute_context
admin_1 | 2018-09-06T03:27:32.130883223Z None, None)
admin_1 | 2018-09-06T03:27:32.130886963Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1413, in _handle_dbapi_exception
admin_1 | 2018-09-06T03:27:32.130891179Z exc_info
admin_1 | 2018-09-06T03:27:32.130894835Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
admin_1 | 2018-09-06T03:27:32.130898868Z reraise(type(exception), exception, tb=exc_tb, cause=cause)
admin_1 | 2018-09-06T03:27:32.130902641Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 186, in reraise
admin_1 | 2018-09-06T03:27:32.130906662Z raise value.with_traceback(tb)
admin_1 | 2018-09-06T03:27:32.130910358Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1127, in _execute_context
admin_1 | 2018-09-06T03:27:32.130915002Z context = constructor(dialect, self, conn, *args)
admin_1 | 2018-09-06T03:27:32.130918761Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 667, in _init_compiled
admin_1 | 2018-09-06T03:27:32.130922838Z param.append(processors[key](compiled_params[key]))
admin_1 | 2018-09-06T03:27:32.130926576Z File "/usr/local/lib/python3.7/site-packages/sqlalchemy/sql/type_api.py", line 1160, in process
admin_1 | 2018-09-06T03:27:32.130930646Z return process_param(value, dialect)
admin_1 | 2018-09-06T03:27:32.130934389Z File "/app/mailu/models.py", line 38, in process_bind_param
admin_1 | 2018-09-06T03:27:32.130938334Z localpart, domain_name = value.split('@')
admin_1 | 2018-09-06T03:27:32.130949412Z sqlalchemy.exc.StatementError: (builtins.ValueError) not enough values to unpack (expected 2, got 1) [SQL: 'SELECT user.created_at AS user_created_at, user.updated_at AS user_updated_at, user.comment AS user_comment, user.localpart AS user_localpart, user.password AS user_password, user.quota_bytes AS user_quota_bytes, user.global_admin AS user_global_admin, user.enabled AS user_enabled, user.enable_imap AS user_enable_imap, user.enable_pop AS user_enable_pop, user.forward_enabled AS user_forward_enabled, user.forward_destination AS user_forward_destination, user.forward_keep AS user_forward_keep, user.reply_enabled AS user_reply_enabled, user.reply_subject AS user_reply_subject, user.reply_body AS user_reply_body, user.reply_enddate AS user_reply_enddate, user.displayed_name AS user_displayed_name, user.spam_enabled AS user_spam_enabled, user.spam_threshold AS user_spam_threshold, user.domain_name AS user_domain_name, user.email AS user_email \nFROM user \nWHERE user.email = ?'] [parameters: [{'%(140175638833808 param)s': 'scan'}]]
|
ValueError
|
def calculateWhatChecker(self, length_text, key):
"""Calculates what threshold / checker to use
If the length of the text is over the maximum sentence length, use the last checker / threshold
Otherwise, traverse the keys backwards until we find a key range that does not fit.
So we traverse backwards and see if the sentence length is between current - 1 and current
In this way, we find the absolute lowest checker / percentage threshold.
We traverse backwards because if the text is longer than the max sentence length, we already know.
In total, the keys are only 5 items long or so. It is not expensive to move backwards, nor is it expensive to move forwards.
Args:
length_text -> The length of the text
key -> What key we want to use. I.E. Phase1 keys, Phase2 keys.
Returns:
what_to_use -> the key of the lowest checker."""
_keys = list(key)
_keys = list(map(int, _keys))
if length_text >= int(_keys[-1]):
what_to_use = list(key)[_keys.index(_keys[-1])]
else:
# this algorithm finds the smallest possible fit for the text
for counter, i in reversed(list(enumerate(_keys))):
# [0, 110, 150]
if i <= length_text:
what_to_use = i
return what_to_use
|
def calculateWhatChecker(self, length_text, key):
"""Calculates what threshold / checker to use
If the length of the text is over the maximum sentence length, use the last checker / threshold
Otherwise, traverse the keys backwards until we find a key range that does not fit.
So we traverse backwards and see if the sentence length is between current - 1 and current
In this way, we find the absolute lowest checker / percentage threshold.
We traverse backwards because if the text is longer than the max sentence length, we already know.
In total, the keys are only 5 items long or so. It is not expensive to move backwards, nor is it expensive to move forwards.
Args:
length_text -> The length of the text
key -> What key we want to use. I.E. Phase1 keys, Phase2 keys.
Returns:
what_to_use -> the key of the lowest checker."""
_keys = list(key)
_keys = list(map(int, _keys))
if length_text >= int(_keys[-1]):
what_to_use = key[_keys[-1]]
else:
# this algorithm finds the smallest possible fit for the text
for counter, i in reversed(list(enumerate(_keys))):
# [0, 110, 150]
if i <= length_text:
what_to_use = i
return what_to_use
|
https://github.com/Ciphey/Ciphey/issues/248
|
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\lukas\AppData\Local\pypoetry\Cache\virtualenvs\ciphey-xvyT1_eU-py3.7\lib\site-packages\click\core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "C:\Users\lukas\AppData\Local\pypoetry\Cache\virtualenvs\ciphey-xvyT1_eU-py3.7\lib\site-packages\click\core.py", line 782, in main
rv = self.invoke(ctx)
File "C:\Users\lukas\AppData\Local\pypoetry\Cache\virtualenvs\ciphey-xvyT1_eU-py3.7\lib\site-packages\click\core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Users\lukas\AppData\Local\pypoetry\Cache\virtualenvs\ciphey-xvyT1_eU-py3.7\lib\site-packages\click\core.py", line 610, in invoke
return callback(*args, **kwargs)
File "C:\Users\lukas\Documents\GitHub\Ciphey\ciphey\ciphey.py", line 277, in main
result = decrypt(config, kwargs["text"])
File "C:\Users\lukas\Documents\GitHub\Ciphey\ciphey\ciphey.py", line 40, in decrypt
res: Optional[iface.SearchResult] = config.objs["searcher"].search(ctext)
File "C:\Users\lukas\Documents\GitHub\Ciphey\ciphey\basemods\Searchers\ausearch.py", line 214, in search
check_res = self._config().objs["checker"](ctext)
File "C:\Users\lukas\Documents\GitHub\Ciphey\ciphey\iface\_modules.py", line 116, in __call__
return self.check(*args)
File "C:\Users\lukas\Documents\GitHub\Ciphey\ciphey\basemods\Checkers\ezcheck.py", line 19, in check
res = checker.check(text)
File "C:\Users\lukas\Documents\GitHub\Ciphey\ciphey\basemods\Checkers\brandon.py", line 224, in check
length_text, self.thresholds_phase1.keys()
File "C:\Users\lukas\Documents\GitHub\Ciphey\ciphey\basemods\Checkers\brandon.py", line 277, in calculateWhatChecker
what_to_use = key[_keys[-1]]
TypeError: 'dict_keys' object is not subscriptable
|
TypeError
|
def run_script(script_path, cwd="."):
"""Execute a script from a working directory.
:param script_path: Absolute path to the script to run.
:param cwd: The directory to run the script from.
"""
run_thru_shell = sys.platform.startswith("win")
if script_path.endswith(".py"):
script_command = [sys.executable, script_path]
else:
script_command = [script_path]
utils.make_executable(script_path)
try:
proc = subprocess.Popen(script_command, shell=run_thru_shell, cwd=cwd)
exit_status = proc.wait()
if exit_status != EXIT_SUCCESS:
raise FailedHookException(
"Hook script failed (exit status: {})".format(exit_status)
)
except OSError as os_error:
if os_error.errno == errno.ENOEXEC:
raise FailedHookException(
"Hook script failed, might be an empty file or missing a shebang"
)
raise FailedHookException("Hook script failed (error: {})".format(os_error))
|
def run_script(script_path, cwd="."):
"""Execute a script from a working directory.
:param script_path: Absolute path to the script to run.
:param cwd: The directory to run the script from.
"""
run_thru_shell = sys.platform.startswith("win")
if script_path.endswith(".py"):
script_command = [sys.executable, script_path]
else:
script_command = [script_path]
utils.make_executable(script_path)
proc = subprocess.Popen(script_command, shell=run_thru_shell, cwd=cwd)
exit_status = proc.wait()
if exit_status != EXIT_SUCCESS:
raise FailedHookException("Hook script failed (exit status: %d)" % exit_status)
|
https://github.com/cookiecutter/cookiecutter/issues/632
|
Traceback (most recent call last):
File "/usr/local/bin/cookiecutter", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 716, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 696, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 889, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 534, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/cookiecutter/cli.py", line 100, in main
config_file=user_config
File "/usr/local/lib/python2.7/site-packages/cookiecutter/main.py", line 140, in cookiecutter
output_dir=output_dir
File "/usr/local/lib/python2.7/site-packages/cookiecutter/generate.py", line 273, in generate_files
_run_hook_from_repo_dir(repo_dir, 'pre_gen_project', project_dir, context)
File "/usr/local/lib/python2.7/site-packages/cookiecutter/generate.py", line 232, in _run_hook_from_repo_dir
run_hook(hook_name, project_dir, context)
File "/usr/local/lib/python2.7/site-packages/cookiecutter/hooks.py", line 116, in run_hook
run_script_with_context(script, project_dir, context)
File "/usr/local/lib/python2.7/site-packages/cookiecutter/hooks.py", line 101, in run_script_with_context
run_script(temp.name, cwd)
File "/usr/local/lib/python2.7/site-packages/cookiecutter/hooks.py", line 73, in run_script
cwd=cwd
File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/subprocess.py", line 656, in __init__
_cleanup()
File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/subprocess.py", line 1335, in _execute_child
raise child_exception
OSError: [Errno 8] Exec format error
|
OSError
|
def prompt_for_config(context, no_input=False):
"""
Prompts the user to enter new config, using context as a source for the
field names and sample values.
:param no_input: Prompt the user at command line for manual configuration?
"""
cookiecutter_dict = {}
env = Environment()
for key, raw in iteritems(context["cookiecutter"]):
val = env.from_string(str(raw)).render(cookiecutter=cookiecutter_dict)
if not no_input:
prompt = '{0} (default is "{1}")? '.format(key, val)
new_val = read_response(prompt).strip()
if new_val != "":
val = new_val
cookiecutter_dict[key] = val
return cookiecutter_dict
|
def prompt_for_config(context, no_input=False):
"""
Prompts the user to enter new config, using context as a source for the
field names and sample values.
:param no_input: Prompt the user at command line for manual configuration?
"""
cookiecutter_dict = {}
env = Environment()
for key, raw in iteritems(context["cookiecutter"]):
val = env.from_string(raw).render(cookiecutter=cookiecutter_dict)
if not no_input:
prompt = '{0} (default is "{1}")? '.format(key, val)
new_val = read_response(prompt).strip()
if new_val != "":
val = new_val
cookiecutter_dict[key] = val
return cookiecutter_dict
|
https://github.com/cookiecutter/cookiecutter/issues/368
|
Traceback (most recent call last):
File "/opt/boxen/homebrew/bin/cookiecutter", line 9, in <module>
load_entry_point('cookiecutter==0.9.0', 'console_scripts', 'cookiecutter')()
File "/opt/boxen/homebrew/lib/python2.7/site-packages/cookiecutter/main.py", line 169, in main
cookiecutter(args.input_dir, args.checkout, args.no_input)
File "/opt/boxen/homebrew/lib/python2.7/site-packages/cookiecutter/main.py", line 100, in cookiecutter
context['cookiecutter'] = prompt_for_config(context, no_input)
File "/opt/boxen/homebrew/lib/python2.7/site-packages/cookiecutter/prompt.py", line 29, in prompt_for_config
val = env.from_string(raw).render(cookiecutter=cookiecutter_dict)
File "/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/environment.py", line 841, in from_string
return cls.from_code(self, self.compile(source), globals, None)
File "/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/environment.py", line 542, in compile
source = optimize(source, self)
File "/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/optimizer.py", line 27, in optimize
return optimizer.visit(node)
File "/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/visitor.py", line 39, in visit
return self.generic_visit(node, *args, **kwargs)
File "/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/visitor.py", line 59, in generic_visit
for field, old_value in node.iter_fields():
AttributeError: 'int' object has no attribute 'iter_fields'
|
AttributeError
|
def get_likelihood_parameters(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
n_samples: Optional[int] = 1,
give_mean: Optional[bool] = False,
batch_size: Optional[int] = None,
) -> Dict[str, np.ndarray]:
r"""
Estimates for the parameters of the likelihood :math:`p(x \mid z)`
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of posterior samples to use for estimation.
give_mean
Return expected value of parameters or a samples
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
"""
adata = self._validate_anndata(adata)
scdl = self._make_data_loader(adata=adata, indices=indices, batch_size=batch_size)
dropout_list = []
mean_list = []
dispersion_list = []
for tensors in scdl:
inference_kwargs = dict(n_samples=n_samples)
_, generative_outputs = self.module.forward(
tensors=tensors,
inference_kwargs=inference_kwargs,
compute_loss=False,
)
px_r = generative_outputs["px_r"]
px_rate = generative_outputs["px_rate"]
px_dropout = generative_outputs["px_dropout"]
n_batch = px_rate.size(0) if n_samples == 1 else px_rate.size(1)
px_r = np.array(px_r.cpu())
if len(px_r.shape) == 1:
dispersion_list += [np.repeat(px_r[np.newaxis, :], n_batch, axis=0)]
else:
dispersion_list += [px_r]
mean_list += [np.array(px_rate.cpu())]
dropout_list += [np.array(px_dropout.cpu())]
dropout = np.concatenate(dropout_list)
means = np.concatenate(mean_list)
dispersions = np.concatenate(dispersion_list)
if give_mean and n_samples > 1:
dropout = dropout.mean(0)
means = means.mean(0)
return_dict = {}
return_dict["mean"] = means
if self.module.gene_likelihood == "zinb":
return_dict["dropout"] = dropout
return_dict["dispersions"] = dispersions
if self.module.gene_likelihood == "nb":
return_dict["dispersions"] = dispersions
return return_dict
|
def get_likelihood_parameters(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
n_samples: Optional[int] = 1,
give_mean: Optional[bool] = False,
batch_size: Optional[int] = None,
) -> Dict[str, np.ndarray]:
r"""
Estimates for the parameters of the likelihood :math:`p(x \mid z)`
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of posterior samples to use for estimation.
give_mean
Return expected value of parameters or a samples
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
"""
adata = self._validate_anndata(adata)
scdl = self._make_data_loader(adata=adata, indices=indices, batch_size=batch_size)
dropout_list = []
mean_list = []
dispersion_list = []
for tensors in scdl:
inference_kwargs = dict(n_samples=n_samples)
_, generative_outputs = self.module.forward(
tensors=tensors,
inference_kwargs=inference_kwargs,
compute_loss=False,
)
px_r = generative_outputs["px_r"]
px_rate = generative_outputs["px_rate"]
px_dropout = generative_outputs["px_dropout"]
n_batch = px_rate.size(0) if n_samples == 1 else px_rate.size(1)
dispersion_list += [
np.repeat(np.array(px_r.cpu())[np.newaxis, :], n_batch, axis=0)
]
mean_list += [np.array(px_rate.cpu())]
dropout_list += [np.array(px_dropout.cpu())]
dropout = np.concatenate(dropout_list)
means = np.concatenate(mean_list)
dispersions = np.concatenate(dispersion_list)
if give_mean and n_samples > 1:
dropout = dropout.mean(0)
means = means.mean(0)
return_dict = {}
return_dict["mean"] = means
if self.module.gene_likelihood == "zinb":
return_dict["dropout"] = dropout
return_dict["dispersions"] = dispersions
if self.module.gene_likelihood == "nb":
return_dict["dispersions"] = dispersions
return return_dict
|
https://github.com/YosefLab/scvi-tools/issues/874
|
509 dropout = np.concatenate(dropout_list)
510 means = np.concatenate(mean_list)
--> 511 dispersions = np.concatenate(dispersion_list)
512 if give_mean and n_samples > 1:
513 dropout = dropout.mean(0)
<__array_function__ internals> in concatenate(*args, **kwargs)
ValueError: all the input array dimensions for the concatenation axis must match exactly, but along dimension 1, the array at index 0 has size 128 and the array at index 24 has size 51```
|
ValueError
|
def __init__(self, adata: Optional[AnnData] = None, use_cuda=False):
if adata is not None:
if "_scvi" not in adata.uns.keys():
raise ValueError(
"Please setup your AnnData with scvi.data.setup_anndata(adata) first"
)
self.adata = adata
self.scvi_setup_dict_ = adata.uns["_scvi"]
self.summary_stats = self.scvi_setup_dict_["summary_stats"]
self._validate_anndata(adata, copy_if_view=False)
self.is_trained_ = False
self.use_cuda = use_cuda and torch.cuda.is_available()
self._model_summary_string = ""
self.train_indices_ = None
self.test_indices_ = None
self.validation_indices_ = None
self.history_ = None
|
def __init__(self, adata: Optional[AnnData] = None, use_cuda=False):
if adata is not None:
if "_scvi" not in adata.uns.keys():
raise ValueError(
"Please setup your AnnData with scvi.data.setup_anndata(adata) first"
)
self.adata = adata
self.scvi_setup_dict_ = adata.uns["_scvi"]
self.summary_stats = self.scvi_setup_dict_["summary_stats"]
self._validate_anndata(adata, copy_if_view=False)
self.is_trained_ = False
self.use_cuda = use_cuda and torch.cuda.is_available()
self._model_summary_string = ""
self.train_indices_ = None
self.test_indices_ = None
self.validation_indices_ = None
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def history(self):
"""Returns computed metrics during training."""
return self.history_
|
def history(self):
"""Returns computed metrics during training."""
if self.is_trained_ is False:
return {}
return self.trainer.history
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def save(
self,
dir_path: str,
overwrite: bool = False,
save_anndata: bool = False,
**anndata_write_kwargs,
):
"""
Save the state of the model.
Neither the trainer optimizer state nor the trainer history are saved.
Model files are not expected to be reproducibly saved and loaded across versions
until we reach version 1.0.
Parameters
----------
dir_path
Path to a directory.
overwrite
Overwrite existing data or not. If `False` and directory
already exists at `dir_path`, error will be raised.
save_anndata
If True, also saves the anndata
anndata_write_kwargs
Kwargs for anndata write function
"""
# get all the user attributes
user_attributes = self._get_user_attributes()
# only save the public attributes with _ at the very end
user_attributes = {a[0]: a[1] for a in user_attributes if a[0][-1] == "_"}
# save the model state dict and the trainer state dict only
if not os.path.exists(dir_path) or overwrite:
os.makedirs(dir_path, exist_ok=overwrite)
else:
raise ValueError(
"{} already exists. Please provide an unexisting directory for saving.".format(
dir_path
)
)
if save_anndata:
self.adata.write(os.path.join(dir_path, "adata.h5ad"), **anndata_write_kwargs)
model_save_path = os.path.join(dir_path, "model_params.pt")
attr_save_path = os.path.join(dir_path, "attr.pkl")
varnames_save_path = os.path.join(dir_path, "var_names.csv")
var_names = self.adata.var_names.astype(str)
var_names = var_names.to_numpy()
np.savetxt(varnames_save_path, var_names, fmt="%s")
torch.save(self.model.state_dict(), model_save_path)
with open(attr_save_path, "wb") as f:
pickle.dump(user_attributes, f)
|
def save(self, dir_path: str, overwrite: bool = False):
"""
Save the state of the model.
Neither the trainer optimizer state nor the trainer history are saved.
Model files are not expected to be reproducibly saved and loaded across versions
until we reach version 1.0.
Parameters
----------
dir_path
Path to a directory.
overwrite
Overwrite existing data or not. If `False` and directory
already exists at `dir_path`, error will be raised.
"""
# get all the user attributes
user_attributes = self._get_user_attributes()
# only save the public attributes with _ at the very end
user_attributes = {a[0]: a[1] for a in user_attributes if a[0][-1] == "_"}
# save the model state dict and the trainer state dict only
if not os.path.exists(dir_path) or overwrite:
os.makedirs(dir_path, exist_ok=overwrite)
else:
raise ValueError(
"{} already exists. Please provide an unexisting directory for saving.".format(
dir_path
)
)
torch.save(self.model.state_dict(), os.path.join(dir_path, "model_params.pt"))
with open(os.path.join(dir_path, "attr.pkl"), "wb") as f:
pickle.dump(user_attributes, f)
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def load(
cls,
dir_path: str,
adata: Optional[AnnData] = None,
use_cuda: bool = False,
):
"""
Instantiate a model from the saved output.
Parameters
----------
dir_path
Path to saved outputs.
adata
AnnData organized in the same way as data used to train model.
It is not necessary to run :func:`~scvi.data.setup_anndata`,
as AnnData is validated against the saved `scvi` setup dictionary.
If None, will check for and load anndata saved with the model.
use_cuda
Whether to load model on GPU.
Returns
-------
Model with loaded state dictionaries.
Examples
--------
>>> vae = SCVI.load(adata, save_path)
>>> vae.get_latent_representation()
"""
model_path = os.path.join(dir_path, "model_params.pt")
setup_dict_path = os.path.join(dir_path, "attr.pkl")
adata_path = os.path.join(dir_path, "adata.h5ad")
varnames_path = os.path.join(dir_path, "var_names.csv")
if os.path.exists(adata_path) and adata is None:
adata = read(adata_path)
elif not os.path.exists(adata_path) and adata is None:
raise ValueError("Save path contains no saved anndata and no adata was passed.")
var_names = np.genfromtxt(varnames_path, delimiter=",", dtype=str)
user_var_names = adata.var_names.astype(str)
if not np.array_equal(var_names, user_var_names):
logger.warning(
"var_names for adata passed in does not match var_names of "
"adata used to train the model. For valid results, the vars "
"need to be the same and in the same order as the adata used to train the model."
)
with open(setup_dict_path, "rb") as handle:
attr_dict = pickle.load(handle)
scvi_setup_dict = attr_dict.pop("scvi_setup_dict_")
transfer_anndata_setup(scvi_setup_dict, adata)
if "init_params_" not in attr_dict.keys():
raise ValueError(
"No init_params_ were saved by the model. Check out the "
"developers guide if creating custom models."
)
# get the parameters for the class init signiture
init_params = attr_dict.pop("init_params_")
# update use_cuda from the saved model
use_cuda = use_cuda and torch.cuda.is_available()
init_params["use_cuda"] = use_cuda
# grab all the parameters execept for kwargs (is a dict)
non_kwargs = {k: v for k, v in init_params.items() if not isinstance(v, dict)}
# expand out kwargs
kwargs = {k: v for k, v in init_params.items() if isinstance(v, dict)}
kwargs = {k: v for (i, j) in kwargs.items() for (k, v) in j.items()}
model = cls(adata, **non_kwargs, **kwargs)
for attr, val in attr_dict.items():
setattr(model, attr, val)
if use_cuda:
model.model.load_state_dict(torch.load(model_path))
model.model.cuda()
else:
device = torch.device("cpu")
model.model.load_state_dict(torch.load(model_path, map_location=device))
model.model.eval()
model._validate_anndata(adata)
return model
|
def load(cls, adata: AnnData, dir_path: str, use_cuda: bool = False):
"""
Instantiate a model from the saved output.
Parameters
----------
adata
AnnData organized in the same way as data used to train model.
It is not necessary to run :func:`~scvi.data.setup_anndata`,
as AnnData is validated against the saved `scvi` setup dictionary.
dir_path
Path to saved outputs.
use_cuda
Whether to load model on GPU.
Returns
-------
Model with loaded state dictionaries.
Examples
--------
>>> vae = SCVI.load(adata, save_path)
>>> vae.get_latent_representation()
"""
model_path = os.path.join(dir_path, "model_params.pt")
setup_dict_path = os.path.join(dir_path, "attr.pkl")
with open(setup_dict_path, "rb") as handle:
attr_dict = pickle.load(handle)
if "init_params_" not in attr_dict.keys():
raise ValueError(
"No init_params_ were saved by the model. Check out the developers guide if creating custom models."
)
# get the parameters for the class init signiture
init_params = attr_dict.pop("init_params_")
# grab all the parameters execept for kwargs (is a dict)
non_kwargs = {k: v for k, v in init_params.items() if not isinstance(v, dict)}
# expand out kwargs
kwargs = {k: v for k, v in init_params.items() if isinstance(v, dict)}
kwargs = {k: v for (i, j) in kwargs.items() for (k, v) in j.items()}
model = cls(adata, **non_kwargs, **kwargs)
for attr, val in attr_dict.items():
setattr(model, attr, val)
use_cuda = use_cuda and torch.cuda.is_available()
if use_cuda:
model.model.load_state_dict(torch.load(model_path))
model.model.cuda()
else:
device = torch.device("cpu")
model.model.load_state_dict(torch.load(model_path, map_location=device))
model.model.eval()
model._validate_anndata(adata)
return model
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def train(
self,
n_epochs: Optional[int] = None,
train_size: float = 0.9,
test_size: Optional[float] = None,
lr: float = 1e-3,
n_epochs_kl_warmup: int = 400,
n_iter_kl_warmup: Optional[int] = None,
frequency: Optional[int] = None,
train_fun_kwargs: dict = {},
**kwargs,
):
"""
Trains the model using amortized variational inference.
Parameters
----------
n_epochs
Number of passes through the dataset.
train_size
Size of training set in the range [0.0, 1.0].
test_size
Size of the test set. If `None`, defaults to 1 - `train_size`. If
`train_size + test_size < 1`, the remaining cells belong to a validation set.
lr
Learning rate for optimization.
n_epochs_kl_warmup
Number of passes through dataset for scaling term on KL divergence to go from 0 to 1.
n_iter_kl_warmup
Number of minibatches for scaling term on KL divergence to go from 0 to 1.
To use, set to not `None` and set `n_epochs_kl_warmup` to `None`.
frequency
Frequency with which metrics are computed on the data for train/test/val sets.
train_fun_kwargs
Keyword args for the train method of :class:`~scvi.core.trainers.UnsupervisedTrainer`.
**kwargs
Other keyword args for :class:`~scvi.core.trainers.UnsupervisedTrainer`.
"""
train_fun_kwargs = dict(train_fun_kwargs)
if self.is_trained_ is False:
self.trainer = UnsupervisedTrainer(
self.model,
self.adata,
train_size=train_size,
test_size=test_size,
n_iter_kl_warmup=n_iter_kl_warmup,
n_epochs_kl_warmup=n_epochs_kl_warmup,
frequency=frequency,
use_cuda=self.use_cuda,
**kwargs,
)
self.train_indices_ = self.trainer.train_set.indices
self.test_indices_ = self.trainer.test_set.indices
self.validation_indices_ = self.trainer.validation_set.indices
self.history_ = self.trainer.history
# for autotune
if "n_epochs" not in train_fun_kwargs:
if n_epochs is None:
n_cells = self.adata.n_obs
n_epochs = np.min([round((20000 / n_cells) * 400), 400])
train_fun_kwargs["n_epochs"] = n_epochs
if "lr" not in train_fun_kwargs:
train_fun_kwargs["lr"] = lr
logger.info("Training for {} epochs".format(n_epochs))
self.trainer.train(**train_fun_kwargs)
self.is_trained_ = True
|
def train(
self,
n_epochs: Optional[int] = None,
train_size: float = 0.9,
test_size: Optional[float] = None,
lr: float = 1e-3,
n_epochs_kl_warmup: int = 400,
n_iter_kl_warmup: Optional[int] = None,
frequency: Optional[int] = None,
train_fun_kwargs: dict = {},
**kwargs,
):
"""
Trains the model using amortized variational inference.
Parameters
----------
n_epochs
Number of passes through the dataset.
train_size
Size of training set in the range [0.0, 1.0].
test_size
Size of the test set. If `None`, defaults to 1 - `train_size`. If
`train_size + test_size < 1`, the remaining cells belong to a validation set.
lr
Learning rate for optimization.
n_epochs_kl_warmup
Number of passes through dataset for scaling term on KL divergence to go from 0 to 1.
n_iter_kl_warmup
Number of minibatches for scaling term on KL divergence to go from 0 to 1.
To use, set to not `None` and set `n_epochs_kl_warmup` to `None`.
frequency
Frequency with which metrics are computed on the data for train/test/val sets.
train_fun_kwargs
Keyword args for the train method of :class:`~scvi.core.trainers.UnsupervisedTrainer`.
**kwargs
Other keyword args for :class:`~scvi.core.trainers.UnsupervisedTrainer`.
"""
train_fun_kwargs = dict(train_fun_kwargs)
if self.is_trained_ is False:
self.trainer = UnsupervisedTrainer(
self.model,
self.adata,
train_size=train_size,
test_size=test_size,
n_iter_kl_warmup=n_iter_kl_warmup,
n_epochs_kl_warmup=n_epochs_kl_warmup,
frequency=frequency,
use_cuda=self.use_cuda,
**kwargs,
)
self.train_indices_ = self.trainer.train_set.indices
self.test_indices_ = self.trainer.test_set.indices
self.validation_indices_ = self.trainer.validation_set.indices
# for autotune
if "n_epochs" not in train_fun_kwargs:
if n_epochs is None:
n_cells = self.adata.n_obs
n_epochs = np.min([round((20000 / n_cells) * 400), 400])
train_fun_kwargs["n_epochs"] = n_epochs
if "lr" not in train_fun_kwargs:
train_fun_kwargs["lr"] = lr
logger.info("Training for {} epochs".format(n_epochs))
self.trainer.train(**train_fun_kwargs)
self.is_trained_ = True
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def get_from_registry(adata: anndata.AnnData, key: str) -> np.ndarray:
"""
Returns the object in AnnData associated with the key in ``.uns['_scvi']['data_registry']``.
Parameters
----------
adata
anndata object already setup with `scvi.data.setup_anndata()`
key
key of object to get from ``adata.uns['_scvi]['data_registry']``
Returns
-------
The requested data
Examples
--------
>>> import scvi
>>> adata = scvi.data.cortex()
>>> adata.uns['_scvi']['data_registry']
{'X': ['_X', None],
'batch_indices': ['obs', 'batch'],
'local_l_mean': ['obs', '_scvi_local_l_mean'],
'local_l_var': ['obs', '_scvi_local_l_var'],
'labels': ['obs', 'labels']}
>>> batch = get_from_registry(adata, "batch_indices")
>>> batch
array([[0],
[0],
[0],
...,
[0],
[0],
[0]])
"""
data_loc = adata.uns["_scvi"]["data_registry"][key]
attr_name, attr_key = data_loc["attr_name"], data_loc["attr_key"]
data = getattr(adata, attr_name)
if attr_key != "None":
if isinstance(data, pd.DataFrame):
data = data.loc[:, attr_key]
else:
data = data[attr_key]
if isinstance(data, pd.Series):
data = data.to_numpy().reshape(-1, 1)
return data
|
def get_from_registry(adata: anndata.AnnData, key: str) -> np.ndarray:
"""
Returns the object in AnnData associated with the key in ``.uns['_scvi']['data_registry']``.
Parameters
----------
adata
anndata object already setup with `scvi.data.setup_anndata()`
key
key of object to get from ``adata.uns['_scvi]['data_registry']``
Returns
-------
The requested data
Examples
--------
>>> import scvi
>>> adata = scvi.data.cortex()
>>> adata.uns['_scvi']['data_registry']
{'X': ['_X', None],
'batch_indices': ['obs', 'batch'],
'local_l_mean': ['obs', '_scvi_local_l_mean'],
'local_l_var': ['obs', '_scvi_local_l_var'],
'labels': ['obs', 'labels']}
>>> batch = get_from_registry(adata, "batch_indices")
>>> batch
array([[0],
[0],
[0],
...,
[0],
[0],
[0]])
"""
use_raw = adata.uns["_scvi"]["use_raw"]
data_loc = adata.uns["_scvi"]["data_registry"][key]
attr_name, attr_key = data_loc["attr_name"], data_loc["attr_key"]
if use_raw is True and attr_name in ["X", "var"]:
adata = adata.raw
data = getattr(adata, attr_name)
if attr_key != "None":
if isinstance(data, pd.DataFrame):
data = data.loc[:, attr_key]
else:
data = data[attr_key]
if isinstance(data, pd.Series):
data = data.to_numpy().reshape(-1, 1)
return data
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def setup_anndata(
adata: anndata.AnnData,
batch_key: Optional[str] = None,
labels_key: Optional[str] = None,
layer: Optional[str] = None,
protein_expression_obsm_key: Optional[str] = None,
protein_names_uns_key: Optional[str] = None,
categorical_covariate_keys: Optional[List[str]] = None,
continuous_covariate_keys: Optional[List[str]] = None,
copy: bool = False,
) -> Optional[anndata.AnnData]:
"""
Sets up :class:`~anndata.AnnData` object for `scvi` models.
A mapping will be created between data fields used by `scvi` to their respective locations in adata.
This method will also compute the log mean and log variance per batch for the library size prior.
None of the data in adata are modified. Only adds fields to adata.
Parameters
----------
adata
AnnData object containing raw counts. Rows represent cells, columns represent features.
batch_key
key in `adata.obs` for batch information. Categories will automatically be converted into integer
categories and saved to `adata.obs['_scvi_batch']`. If `None`, assigns the same batch to all the data.
labels_key
key in `adata.obs` for label information. Categories will automatically be converted into integer
categories and saved to `adata.obs['_scvi_labels']`. If `None`, assigns the same label to all the data.
layer
if not `None`, uses this as the key in `adata.layers` for raw count data.
protein_expression_obsm_key
key in `adata.obsm` for protein expression data, Required for :class:`~scvi.model.TOTALVI`.
protein_names_uns_key
key in `adata.uns` for protein names. If None, will use the column names of `adata.obsm[protein_expression_obsm_key]`
if it is a DataFrame, else will assign sequential names to proteins. Only relevant but not required for :class:`~scvi.model.TOTALVI`.
categorical_covariate_keys
keys in `adata.obs` that correspond to categorical data. Used in some `scvi` models.
continuous_covariate_keys
keys in `adata.obs` that correspond to continuous data. Used in some `scvi` models.
copy
if `True`, a copy of adata is returned.
Returns
-------
If ``copy``, will return :class:`~anndata.AnnData`.
Adds the following fields to adata:
.uns['_scvi']
`scvi` setup dictionary
.obs['_local_l_mean']
per batch library size mean
.obs['_local_l_var']
per batch library size variance
.obs['_scvi_labels']
labels encoded as integers
.obs['_scvi_batch']
batch encoded as integers
Examples
--------
Example setting up a scanpy dataset with random gene data and no batch nor label information
>>> import scanpy as sc
>>> import scvi
>>> import numpy as np
>>> adata = scvi.data.synthetic_iid(run_setup_anndata=False)
>>> adata
AnnData object with n_obs × n_vars = 400 × 100
obs: 'batch', 'labels'
uns: 'protein_names'
obsm: 'protein_expression'
Filter cells and run preprocessing before `setup_anndata`
>>> sc.pp.filter_cells(adata, min_counts = 0)
Since no batch_key nor labels_key was passed, setup_anndata() will assume all cells have the same batch and label
>>> scvi.data.setup_anndata(adata)
INFO No batch_key inputted, assuming all cells are same batch
INFO No label_key inputted, assuming all cells have same label
INFO Using data from adata.X
INFO Computing library size prior per batch
INFO Registered keys:['X', 'batch_indices', 'local_l_mean', 'local_l_var', 'labels']
INFO Successfully registered anndata object containing 400 cells, 100 vars, 1 batches, 1 labels, and 0 proteins. Also registered 0 extra categorical covariates and 0 extra continuous covariates.
Example setting up scanpy dataset with random gene data, batch, and protein expression
>>> adata = scvi.data.synthetic_iid(run_setup_anndata=False)
>>> scvi.data.setup_anndata(adata, batch_key='batch', protein_expression_obsm_key='protein_expression')
INFO Using batches from adata.obs["batch"]
INFO No label_key inputted, assuming all cells have same label
INFO Using data from adata.X
INFO Computing library size prior per batch
INFO Using protein expression from adata.obsm['protein_expression']
INFO Generating sequential protein names
INFO Registered keys:['X', 'batch_indices', 'local_l_mean', 'local_l_var', 'labels', 'protein_expression']
INFO Successfully registered anndata object containing 400 cells, 100 vars, 2 batches, 1 labels, and 100 proteins. Also registered 0 extra categorical covariates and 0 extra continuous covariates.
"""
if copy:
adata = adata.copy()
if adata.is_view:
raise ValueError(
"Please run `adata = adata.copy()` or use the copy option in this function."
)
adata.uns["_scvi"] = {}
adata.uns["_scvi"]["scvi_version"] = scvi.__version__
batch_key = _setup_batch(adata, batch_key)
labels_key = _setup_labels(adata, labels_key)
x_loc, x_key = _setup_x(adata, layer)
local_l_mean_key, local_l_var_key = _setup_library_size(adata, batch_key, layer)
data_registry = {
_CONSTANTS.X_KEY: {"attr_name": x_loc, "attr_key": x_key},
_CONSTANTS.BATCH_KEY: {"attr_name": "obs", "attr_key": batch_key},
_CONSTANTS.LOCAL_L_MEAN_KEY: {"attr_name": "obs", "attr_key": local_l_mean_key},
_CONSTANTS.LOCAL_L_VAR_KEY: {"attr_name": "obs", "attr_key": local_l_var_key},
_CONSTANTS.LABELS_KEY: {"attr_name": "obs", "attr_key": labels_key},
}
if protein_expression_obsm_key is not None:
protein_expression_obsm_key = _setup_protein_expression(
adata, protein_expression_obsm_key, protein_names_uns_key, batch_key
)
data_registry[_CONSTANTS.PROTEIN_EXP_KEY] = {
"attr_name": "obsm",
"attr_key": protein_expression_obsm_key,
}
if categorical_covariate_keys is not None:
cat_loc, cat_key = _setup_extra_categorical_covs(
adata, categorical_covariate_keys
)
data_registry[_CONSTANTS.CAT_COVS_KEY] = {
"attr_name": cat_loc,
"attr_key": cat_key,
}
if continuous_covariate_keys is not None:
cont_loc, cont_key = _setup_extra_continuous_covs(
adata, continuous_covariate_keys
)
data_registry[_CONSTANTS.CONT_COVS_KEY] = {
"attr_name": cont_loc,
"attr_key": cont_key,
}
# add the data_registry to anndata
_register_anndata(adata, data_registry_dict=data_registry)
logger.debug("Registered keys:{}".format(list(data_registry.keys())))
_setup_summary_stats(
adata,
batch_key,
labels_key,
protein_expression_obsm_key,
categorical_covariate_keys,
continuous_covariate_keys,
)
logger.info("Please do not further modify adata until model is trained.")
_verify_and_correct_data_format(adata, data_registry)
if copy:
return adata
|
def setup_anndata(
adata: anndata.AnnData,
batch_key: Optional[str] = None,
labels_key: Optional[str] = None,
use_raw: bool = False,
layer: Optional[str] = None,
protein_expression_obsm_key: Optional[str] = None,
protein_names_uns_key: Optional[str] = None,
categorical_covariate_keys: Optional[List[str]] = None,
continuous_covariate_keys: Optional[List[str]] = None,
copy: bool = False,
) -> Optional[anndata.AnnData]:
"""
Sets up :class:`~anndata.AnnData` object for `scvi` models.
A mapping will be created between data fields used by `scvi` to their respective locations in adata.
This method will also compute the log mean and log variance per batch for the library size prior.
None of the data in adata are modified. Only adds fields to adata.
Parameters
----------
adata
AnnData object containing raw counts. Rows represent cells, columns represent features.
batch_key
key in `adata.obs` for batch information. Categories will automatically be converted into integer
categories and saved to `adata.obs['_scvi_batch']`. If `None`, assigns the same batch to all the data.
labels_key
key in `adata.obs` for label information. Categories will automatically be converted into integer
categories and saved to `adata.obs['_scvi_labels']`. If `None`, assigns the same label to all the data.
use_raw
Use `.raw` when applicable (e.g., for `X`)
layer
if not `None`, uses this as the key in `adata.layers` for raw count data.
protein_expression_obsm_key
key in `adata.obsm` for protein expression data, Required for :class:`~scvi.model.TOTALVI`.
protein_names_uns_key
key in `adata.uns` for protein names. If None, will use the column names of `adata.obsm[protein_expression_obsm_key]`
if it is a DataFrame, else will assign sequential names to proteins. Only relevant but not required for :class:`~scvi.model.TOTALVI`.
categorical_covariate_keys
keys in `adata.obs` that correspond to categorical data. Used in some `scvi` models.
continuous_covariate_keys
keys in `adata.obs` that correspond to continuous data. Used in some `scvi` models.
copy
if `True`, a copy of adata is returned.
Returns
-------
If ``copy``, will return :class:`~anndata.AnnData`.
Adds the following fields to adata:
.uns['_scvi']
`scvi` setup dictionary
.obs['_local_l_mean']
per batch library size mean
.obs['_local_l_var']
per batch library size variance
.obs['_scvi_labels']
labels encoded as integers
.obs['_scvi_batch']
batch encoded as integers
Examples
--------
Example setting up a scanpy dataset with random gene data and no batch nor label information
>>> import scanpy as sc
>>> import scvi
>>> import numpy as np
>>> adata = scvi.data.synthetic_iid(run_setup_anndata=False)
>>> adata
AnnData object with n_obs × n_vars = 400 × 100
obs: 'batch', 'labels'
uns: 'protein_names'
obsm: 'protein_expression'
Filter cells and run preprocessing before `setup_anndata`
>>> sc.pp.filter_cells(adata, min_counts = 0)
Since no batch_key nor labels_key was passed, setup_anndata() will assume all cells have the same batch and label
>>> scvi.data.setup_anndata(adata)
INFO No batch_key inputted, assuming all cells are same batch
INFO No label_key inputted, assuming all cells have same label
INFO Using data from adata.X
INFO Computing library size prior per batch
INFO Registered keys:['X', 'batch_indices', 'local_l_mean', 'local_l_var', 'labels']
INFO Successfully registered anndata object containing 400 cells, 100 vars, 1 batches, 1 labels, and 0 proteins. Also registered 0 extra categorical covariates and 0 extra continuous covariates.
Example setting up scanpy dataset with random gene data, batch, and protein expression
>>> adata = scvi.data.synthetic_iid(run_setup_anndata=False)
>>> scvi.data.setup_anndata(adata, batch_key='batch', protein_expression_obsm_key='protein_expression')
INFO Using batches from adata.obs["batch"]
INFO No label_key inputted, assuming all cells have same label
INFO Using data from adata.X
INFO Computing library size prior per batch
INFO Using protein expression from adata.obsm['protein_expression']
INFO Generating sequential protein names
INFO Registered keys:['X', 'batch_indices', 'local_l_mean', 'local_l_var', 'labels', 'protein_expression']
INFO Successfully registered anndata object containing 400 cells, 100 vars, 2 batches, 1 labels, and 100 proteins. Also registered 0 extra categorical covariates and 0 extra continuous covariates.
"""
if copy:
adata = adata.copy()
if adata.is_view:
raise ValueError(
"Please run `adata = adata.copy()` or use the copy option in this function."
)
adata.uns["_scvi"] = {}
adata.uns["_scvi"]["scvi_version"] = scvi.__version__
batch_key = _setup_batch(adata, batch_key)
labels_key = _setup_labels(adata, labels_key)
x_loc, x_key = _setup_x(adata, layer, use_raw)
local_l_mean_key, local_l_var_key = _setup_library_size(
adata, batch_key, layer, use_raw
)
adata.uns["_scvi"]["use_raw"] = True if use_raw is True else False
data_registry = {
_CONSTANTS.X_KEY: {"attr_name": x_loc, "attr_key": x_key},
_CONSTANTS.BATCH_KEY: {"attr_name": "obs", "attr_key": batch_key},
_CONSTANTS.LOCAL_L_MEAN_KEY: {"attr_name": "obs", "attr_key": local_l_mean_key},
_CONSTANTS.LOCAL_L_VAR_KEY: {"attr_name": "obs", "attr_key": local_l_var_key},
_CONSTANTS.LABELS_KEY: {"attr_name": "obs", "attr_key": labels_key},
}
if protein_expression_obsm_key is not None:
protein_expression_obsm_key = _setup_protein_expression(
adata, protein_expression_obsm_key, protein_names_uns_key, batch_key
)
data_registry[_CONSTANTS.PROTEIN_EXP_KEY] = {
"attr_name": "obsm",
"attr_key": protein_expression_obsm_key,
}
if categorical_covariate_keys is not None:
cat_loc, cat_key = _setup_extra_categorical_covs(
adata, categorical_covariate_keys
)
data_registry[_CONSTANTS.CAT_COVS_KEY] = {
"attr_name": cat_loc,
"attr_key": cat_key,
}
if continuous_covariate_keys is not None:
cont_loc, cont_key = _setup_extra_continuous_covs(
adata, continuous_covariate_keys
)
data_registry[_CONSTANTS.CONT_COVS_KEY] = {
"attr_name": cont_loc,
"attr_key": cont_key,
}
# add the data_registry to anndata
_register_anndata(adata, data_registry_dict=data_registry)
logger.debug("Registered keys:{}".format(list(data_registry.keys())))
_setup_summary_stats(
adata,
batch_key,
labels_key,
protein_expression_obsm_key,
categorical_covariate_keys,
continuous_covariate_keys,
)
logger.info("Please do not further modify adata until model is trained.")
_verify_and_correct_data_format(adata, data_registry)
if copy:
return adata
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def _set_data_in_registry(adata, data, key):
"""
Sets the data associated with key in adata.uns['_scvi']['data_registry'].keys() to data.
Note: This is a dangerous method and will change the underlying data of the user's anndata
Currently used to make the user's anndata C_CONTIGUOUS and csr if it is dense numpy
or sparse respectively.
Parameters
----------
adata
anndata object to change data of
data
data to change to
key
key in adata.uns['_scvi]['data_registry'].keys() associated with the data
"""
data_loc = adata.uns["_scvi"]["data_registry"][key]
attr_name, attr_key = data_loc["attr_name"], data_loc["attr_key"]
if attr_key == "None":
setattr(adata, attr_name, data)
elif attr_key != "None":
attribute = getattr(adata, attr_name)
if isinstance(attribute, pd.DataFrame):
attribute.loc[:, attr_key] = data
else:
attribute[attr_key] = data
setattr(adata, attr_name, attribute)
|
def _set_data_in_registry(adata, data, key):
"""
Sets the data associated with key in adata.uns['_scvi']['data_registry'].keys() to data.
Note: This is a dangerous method and will change the underlying data of the user's anndata
Currently used to make the user's anndata C_CONTIGUOUS and csr if it is dense numpy
or sparse respectively.
Parameters
----------
adata
anndata object to change data of
data
data to change to
key
key in adata.uns['_scvi]['data_registry'].keys() associated with the data
"""
use_raw = adata.uns["_scvi"]["use_raw"]
data_loc = adata.uns["_scvi"]["data_registry"][key]
attr_name, attr_key = data_loc["attr_name"], data_loc["attr_key"]
if use_raw is True and attr_name in ["X", "var"]:
tmp_adata = adata.raw.to_adata()
else:
tmp_adata = adata
if attr_key == "None":
setattr(tmp_adata, attr_name, data)
elif attr_key != "None":
attribute = getattr(tmp_adata, attr_name)
if isinstance(attribute, pd.DataFrame):
attribute.loc[:, attr_key] = data
else:
attribute[attr_key] = data
setattr(tmp_adata, attr_name, attribute)
if use_raw is True and attr_name in ["X", "var"]:
adata.raw = tmp_adata
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def transfer_anndata_setup(
adata_source: Union[anndata.AnnData, dict], adata_target: anndata.AnnData
):
"""
Transfer anndata setup from a source object to a target object.
This handles encoding for categorical data and is useful in the case where an
anndata object has been subsetted and a category is lost.
Parameters
----------
adata_source
AnnData that has been setup with scvi. If `dict`, must be dictionary
from source anndata containing scvi setup parameters.
adata_target
AnnData with equivalent organization as source, but possibly subsetted.
"""
adata_target.uns["_scvi"] = {}
if isinstance(adata_source, anndata.AnnData):
_scvi_dict = adata_source.uns["_scvi"]
else:
_scvi_dict = adata_source
data_registry = _scvi_dict["data_registry"]
summary_stats = _scvi_dict["summary_stats"]
# transfer version
adata_target.uns["_scvi"]["scvi_version"] = _scvi_dict["scvi_version"]
x_loc = data_registry[_CONSTANTS.X_KEY]["attr_name"]
if x_loc == "layers":
layer = data_registry[_CONSTANTS.X_KEY]["attr_key"]
else:
layer = None
target_n_vars = adata_target.shape[1]
if target_n_vars != summary_stats["n_vars"]:
raise ValueError(
"Number of vars in adata_target not the same as source. "
+ "Expected: {} Received: {}".format(target_n_vars, summary_stats["n_vars"])
)
# transfer protein_expression
protein_expression_obsm_key = _transfer_protein_expression(_scvi_dict, adata_target)
# transfer batch and labels
categorical_mappings = _scvi_dict["categorical_mappings"]
for key, val in categorical_mappings.items():
original_key = val["original_key"]
if (key == original_key) and (original_key not in adata_target.obs.keys()):
# case where original key and key are equal
# caused when no batch or label key were given
# when anndata_source was setup
logger.info(
".obs[{}] not found in target, assuming every cell is same category".format(
original_key
)
)
adata_target.obs[original_key] = np.zeros(
adata_target.shape[0], dtype=np.int64
)
elif (key != original_key) and (original_key not in adata_target.obs.keys()):
raise KeyError(
'.obs["{}"] was used to setup source, but not found in target.'.format(
original_key
)
)
mapping = val["mapping"]
cat_dtype = CategoricalDtype(categories=mapping)
_make_obs_column_categorical(
adata_target, original_key, key, categorical_dtype=cat_dtype
)
batch_key = "_scvi_batch"
labels_key = "_scvi_labels"
# transfer X
x_loc, x_key = _setup_x(adata_target, layer)
local_l_mean_key, local_l_var_key = _setup_library_size(
adata_target, batch_key, layer
)
target_data_registry = data_registry.copy()
target_data_registry.update(
{_CONSTANTS.X_KEY: {"attr_name": x_loc, "attr_key": x_key}}
)
# transfer extra categorical covs
has_cat_cov = True if _CONSTANTS.CAT_COVS_KEY in data_registry.keys() else False
if has_cat_cov:
source_cat_dict = _scvi_dict["extra_categorical_mappings"]
cat_loc, cat_key = _setup_extra_categorical_covs(
adata_target, list(source_cat_dict.keys()), category_dict=source_cat_dict
)
target_data_registry.update(
{_CONSTANTS.CAT_COVS_KEY: {"attr_name": cat_loc, "attr_key": cat_key}}
)
else:
source_cat_dict = None
# transfer extra continuous covs
has_cont_cov = True if _CONSTANTS.CONT_COVS_KEY in data_registry.keys() else False
if has_cont_cov:
obs_keys_names = _scvi_dict["extra_continuous_keys"]
cont_loc, cont_key = _setup_extra_continuous_covs(
adata_target, list(obs_keys_names)
)
target_data_registry.update(
{_CONSTANTS.CONT_COVS_KEY: {"attr_name": cont_loc, "attr_key": cont_key}}
)
else:
obs_keys_names = None
# add the data_registry to anndata
_register_anndata(adata_target, data_registry_dict=target_data_registry)
logger.info("Registered keys:{}".format(list(target_data_registry.keys())))
_setup_summary_stats(
adata_target,
batch_key,
labels_key,
protein_expression_obsm_key,
source_cat_dict,
obs_keys_names,
)
_verify_and_correct_data_format(adata_target, data_registry)
|
def transfer_anndata_setup(
adata_source: Union[anndata.AnnData, dict], adata_target: anndata.AnnData
):
"""
Transfer anndata setup from a source object to a target object.
This handles encoding for categorical data and is useful in the case where an
anndata object has been subsetted and a category is lost.
Parameters
----------
adata_source
AnnData that has been setup with scvi. If `dict`, must be dictionary
from source anndata containing scvi setup parameters.
adata_target
AnnData with equivalent organization as source, but possibly subsetted.
"""
adata_target.uns["_scvi"] = {}
if isinstance(adata_source, anndata.AnnData):
_scvi_dict = adata_source.uns["_scvi"]
else:
_scvi_dict = adata_source
data_registry = _scvi_dict["data_registry"]
summary_stats = _scvi_dict["summary_stats"]
# transfer version
adata_target.uns["_scvi"]["scvi_version"] = _scvi_dict["scvi_version"]
x_loc = data_registry[_CONSTANTS.X_KEY]["attr_name"]
if x_loc == "layers":
layer = data_registry[_CONSTANTS.X_KEY]["attr_key"]
else:
layer = None
if _scvi_dict["use_raw"] is True:
adata_target.uns["_scvi"]["use_raw"] = True
use_raw = True
else:
adata_target.uns["_scvi"]["use_raw"] = False
use_raw = False
target_n_vars = adata_target.shape[1] if not use_raw else adata_target.raw.shape[1]
if target_n_vars != summary_stats["n_vars"]:
raise ValueError(
"Number of vars in adata_target not the same as source. "
+ "Expected: {} Received: {}".format(target_n_vars, summary_stats["n_vars"])
)
# transfer protein_expression
protein_expression_obsm_key = _transfer_protein_expression(_scvi_dict, adata_target)
# transfer batch and labels
categorical_mappings = _scvi_dict["categorical_mappings"]
for key, val in categorical_mappings.items():
original_key = val["original_key"]
if (key == original_key) and (original_key not in adata_target.obs.keys()):
# case where original key and key are equal
# caused when no batch or label key were given
# when anndata_source was setup
logger.info(
".obs[{}] not found in target, assuming every cell is same category".format(
original_key
)
)
adata_target.obs[original_key] = np.zeros(
adata_target.shape[0], dtype=np.int64
)
elif (key != original_key) and (original_key not in adata_target.obs.keys()):
raise KeyError(
'.obs["{}"] was used to setup source, but not found in target.'.format(
original_key
)
)
mapping = val["mapping"]
cat_dtype = CategoricalDtype(categories=mapping)
_make_obs_column_categorical(
adata_target, original_key, key, categorical_dtype=cat_dtype
)
batch_key = "_scvi_batch"
labels_key = "_scvi_labels"
# transfer X
x_loc, x_key = _setup_x(adata_target, layer, use_raw)
local_l_mean_key, local_l_var_key = _setup_library_size(
adata_target, batch_key, layer, use_raw
)
target_data_registry = data_registry.copy()
target_data_registry.update(
{_CONSTANTS.X_KEY: {"attr_name": x_loc, "attr_key": x_key}}
)
# transfer extra categorical covs
has_cat_cov = True if _CONSTANTS.CAT_COVS_KEY in data_registry.keys() else False
if has_cat_cov:
source_cat_dict = _scvi_dict["extra_categorical_mappings"]
cat_loc, cat_key = _setup_extra_categorical_covs(
adata_target, list(source_cat_dict.keys()), category_dict=source_cat_dict
)
target_data_registry.update(
{_CONSTANTS.CAT_COVS_KEY: {"attr_name": cat_loc, "attr_key": cat_key}}
)
else:
source_cat_dict = None
# transfer extra continuous covs
has_cont_cov = True if _CONSTANTS.CONT_COVS_KEY in data_registry.keys() else False
if has_cont_cov:
obs_keys_names = _scvi_dict["extra_continuous_keys"]
cont_loc, cont_key = _setup_extra_continuous_covs(
adata_target, list(obs_keys_names)
)
target_data_registry.update(
{_CONSTANTS.CONT_COVS_KEY: {"attr_name": cont_loc, "attr_key": cont_key}}
)
else:
obs_keys_names = None
# add the data_registry to anndata
_register_anndata(adata_target, data_registry_dict=target_data_registry)
logger.info("Registered keys:{}".format(list(target_data_registry.keys())))
_setup_summary_stats(
adata_target,
batch_key,
labels_key,
protein_expression_obsm_key,
source_cat_dict,
obs_keys_names,
)
_verify_and_correct_data_format(adata_target, data_registry)
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def _setup_x(adata, layer):
if layer is not None:
assert layer in adata.layers.keys(), (
"{} is not a valid key in adata.layers".format(layer)
)
logger.info('Using data from adata.layers["{}"]'.format(layer))
x_loc = "layers"
x_key = layer
x = adata.layers[x_key]
else:
logger.info("Using data from adata.X")
x_loc = "X"
x_key = "None"
x = adata.X
if _check_nonnegative_integers(x) is False:
logger_data_loc = (
"adata.X" if layer is None else "adata.layers[{}]".format(layer)
)
warnings.warn(
"{} does not contain unnormalized count data. Are you sure this is what you want?".format(
logger_data_loc
)
)
return x_loc, x_key
|
def _setup_x(adata, layer, use_raw):
if use_raw and layer:
logging.warning("use_raw and layer were both passed in. Defaulting to use_raw.")
# checking layers
if use_raw:
if adata.raw is None:
raise ValueError("use_raw is True but adata.raw is None")
logger.info("Using data from adata.raw.X")
x_loc = "X"
x_key = "None"
x = adata.raw.X
elif layer is not None:
assert layer in adata.layers.keys(), (
"{} is not a valid key in adata.layers".format(layer)
)
logger.info('Using data from adata.layers["{}"]'.format(layer))
x_loc = "layers"
x_key = layer
x = adata.layers[x_key]
else:
logger.info("Using data from adata.X")
x_loc = "X"
x_key = "None"
x = adata.X
if _check_nonnegative_integers(x) is False:
logger_data_loc = (
"adata.X" if layer is None else "adata.layers[{}]".format(layer)
)
warnings.warn(
"{} does not contain unnormalized count data. Are you sure this is what you want?".format(
logger_data_loc
)
)
return x_loc, x_key
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def _setup_library_size(adata, batch_key, layer):
# computes the library size per batch
logger.info("Computing library size prior per batch")
local_l_mean_key = "_scvi_local_l_mean"
local_l_var_key = "_scvi_local_l_var"
_compute_library_size_batch(
adata,
batch_key=batch_key,
local_l_mean_key=local_l_mean_key,
local_l_var_key=local_l_var_key,
layer=layer,
)
return local_l_mean_key, local_l_var_key
|
def _setup_library_size(adata, batch_key, layer, use_raw):
# computes the library size per batch
logger.info("Computing library size prior per batch")
local_l_mean_key = "_scvi_local_l_mean"
local_l_var_key = "_scvi_local_l_var"
_compute_library_size_batch(
adata,
batch_key=batch_key,
local_l_mean_key=local_l_mean_key,
local_l_var_key=local_l_var_key,
layer=layer,
use_raw=use_raw,
)
return local_l_mean_key, local_l_var_key
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def _setup_summary_stats(
adata,
batch_key,
labels_key,
protein_expression_obsm_key,
categorical_covariate_keys,
continuous_covariate_keys,
):
categorical_mappings = adata.uns["_scvi"]["categorical_mappings"]
n_batch = len(np.unique(categorical_mappings[batch_key]["mapping"]))
n_cells = adata.shape[0]
n_vars = adata.shape[1]
n_labels = len(np.unique(categorical_mappings[labels_key]["mapping"]))
if protein_expression_obsm_key is not None:
n_proteins = adata.obsm[protein_expression_obsm_key].shape[1]
else:
n_proteins = 0
if categorical_covariate_keys is not None:
n_cat_covs = len(categorical_covariate_keys)
else:
n_cat_covs = 0
if continuous_covariate_keys is not None:
n_cont_covs = len(continuous_covariate_keys)
else:
n_cont_covs = 0
summary_stats = {
"n_batch": n_batch,
"n_cells": n_cells,
"n_vars": n_vars,
"n_labels": n_labels,
"n_proteins": n_proteins,
}
adata.uns["_scvi"]["summary_stats"] = summary_stats
logger.info(
"Successfully registered anndata object containing {} cells, {} vars, "
"{} batches, {} labels, and {} proteins. Also registered {} extra categorical "
"covariates and {} extra continuous covariates.".format(
n_cells, n_vars, n_batch, n_labels, n_proteins, n_cat_covs, n_cont_covs
)
)
return summary_stats
|
def _setup_summary_stats(
adata,
batch_key,
labels_key,
protein_expression_obsm_key,
categorical_covariate_keys,
continuous_covariate_keys,
):
categorical_mappings = adata.uns["_scvi"]["categorical_mappings"]
use_raw = adata.uns["_scvi"]["use_raw"]
n_batch = len(np.unique(categorical_mappings[batch_key]["mapping"]))
n_cells = adata.shape[0]
n_vars = adata.shape[1] if not use_raw else adata.raw.shape[1]
n_labels = len(np.unique(categorical_mappings[labels_key]["mapping"]))
if protein_expression_obsm_key is not None:
n_proteins = adata.obsm[protein_expression_obsm_key].shape[1]
else:
n_proteins = 0
if categorical_covariate_keys is not None:
n_cat_covs = len(categorical_covariate_keys)
else:
n_cat_covs = 0
if continuous_covariate_keys is not None:
n_cont_covs = len(continuous_covariate_keys)
else:
n_cont_covs = 0
summary_stats = {
"n_batch": n_batch,
"n_cells": n_cells,
"n_vars": n_vars,
"n_labels": n_labels,
"n_proteins": n_proteins,
}
adata.uns["_scvi"]["summary_stats"] = summary_stats
logger.info(
"Successfully registered anndata object containing {} cells, {} vars, "
"{} batches, {} labels, and {} proteins. Also registered {} extra categorical "
"covariates and {} extra continuous covariates.".format(
n_cells, n_vars, n_batch, n_labels, n_proteins, n_cat_covs, n_cont_covs
)
)
return summary_stats
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def _compute_library_size_batch(
adata,
batch_key: str,
local_l_mean_key: str = None,
local_l_var_key: str = None,
layer=None,
copy: bool = False,
):
"""
Computes the library size.
Parameters
----------
adata
anndata object containing counts
batch_key
key in obs for batch information
local_l_mean_key
key in obs to save the local log mean
local_l_var_key
key in obs to save the local log variance
layer
if not None, will use this in adata.layers[] for X
copy
if True, returns a copy of the adata
Returns
-------
type
anndata.AnnData if copy was True, else None
"""
if batch_key not in adata.obs_keys():
raise ValueError("batch_key not valid key in obs dataframe")
local_means = np.zeros((adata.shape[0], 1))
local_vars = np.zeros((adata.shape[0], 1))
batch_indices = adata.obs[batch_key]
for i_batch in np.unique(batch_indices):
idx_batch = np.squeeze(batch_indices == i_batch)
if layer is not None:
if layer not in adata.layers.keys():
raise ValueError("layer not a valid key for adata.layers")
data = adata[idx_batch].layers[layer]
else:
data = adata[idx_batch].X
(local_means[idx_batch], local_vars[idx_batch]) = _compute_library_size(data)
if local_l_mean_key is None:
local_l_mean_key = "_scvi_local_l_mean"
if local_l_var_key is None:
local_l_var_key = "_scvi_local_l_var"
if copy:
copy = adata.copy()
copy.obs[local_l_mean_key] = local_means
copy.obs[local_l_var_key] = local_vars
return copy
else:
adata.obs[local_l_mean_key] = local_means
adata.obs[local_l_var_key] = local_vars
|
def _compute_library_size_batch(
adata,
batch_key: str,
local_l_mean_key: str = None,
local_l_var_key: str = None,
layer=None,
use_raw=False,
copy: bool = False,
):
"""
Computes the library size.
Parameters
----------
adata
anndata object containing counts
batch_key
key in obs for batch information
local_l_mean_key
key in obs to save the local log mean
local_l_var_key
key in obs to save the local log variance
layer
if not None, will use this in adata.layers[] for X
use_raw
Use ``.raw`` for X
copy
if True, returns a copy of the adata
Returns
-------
type
anndata.AnnData if copy was True, else None
"""
if batch_key not in adata.obs_keys():
raise ValueError("batch_key not valid key in obs dataframe")
local_means = np.zeros((adata.shape[0], 1))
local_vars = np.zeros((adata.shape[0], 1))
batch_indices = adata.obs[batch_key]
for i_batch in np.unique(batch_indices):
idx_batch = np.squeeze(batch_indices == i_batch)
if use_raw:
data = adata[idx_batch].raw.X
elif layer is not None:
if layer not in adata.layers.keys():
raise ValueError("layer not a valid key for adata.layers")
data = adata[idx_batch].layers[layer]
else:
data = adata[idx_batch].X
(local_means[idx_batch], local_vars[idx_batch]) = _compute_library_size(data)
if local_l_mean_key is None:
local_l_mean_key = "_scvi_local_l_mean"
if local_l_var_key is None:
local_l_var_key = "_scvi_local_l_var"
if copy:
copy = adata.copy()
copy.obs[local_l_mean_key] = local_means
copy.obs[local_l_var_key] = local_vars
return copy
else:
adata.obs[local_l_mean_key] = local_means
adata.obs[local_l_var_key] = local_vars
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def _check_anndata_setup_equivalence(adata_source, adata_target):
"""Checks if target setup is equivalent to source."""
if isinstance(adata_source, anndata.AnnData):
_scvi_dict = adata_source.uns["_scvi"]
else:
_scvi_dict = adata_source
adata = adata_target
stats = _scvi_dict["summary_stats"]
target_n_vars = adata.shape[1]
error_msg = (
"Number of {} in anndata different from initial anndata used for training."
)
if target_n_vars != stats["n_vars"]:
raise ValueError(error_msg.format("vars"))
error_msg = (
"There are more {} categories in the data than were originally registered. "
+ "Please check your {} categories as well as adata.uns['_scvi']['categorical_mappings']."
)
self_categoricals = _scvi_dict["categorical_mappings"]
self_batch_mapping = self_categoricals["_scvi_batch"]["mapping"]
adata_categoricals = adata.uns["_scvi"]["categorical_mappings"]
adata_batch_mapping = adata_categoricals["_scvi_batch"]["mapping"]
# check if the categories are the same
error_msg = (
"Categorial encoding for {} is not the same between "
+ "the anndata used to train the model and the anndata just passed in. "
+ "Categorical encoding needs to be same elements, same order, and same datatype.\n"
+ "Expected categories: {}. Received categories: {}.\n"
+ "Try running `dataset.transfer_anndata_setup()` or deleting `adata.uns['_scvi']."
)
if not _assert_equal_mapping(self_batch_mapping, adata_batch_mapping):
raise ValueError(
error_msg.format("batch", self_batch_mapping, adata_batch_mapping)
)
self_labels_mapping = self_categoricals["_scvi_labels"]["mapping"]
adata_labels_mapping = adata_categoricals["_scvi_labels"]["mapping"]
if not _assert_equal_mapping(self_labels_mapping, adata_labels_mapping):
raise ValueError(
error_msg.format("label", self_labels_mapping, adata_labels_mapping)
)
# validate any extra categoricals
if "extra_categorical_mappings" in _scvi_dict.keys():
target_extra_cat_maps = adata.uns["_scvi"]["extra_categorical_mappings"]
for key, val in _scvi_dict["extra_categorical_mappings"].items():
target_map = target_extra_cat_maps[key]
if not _assert_equal_mapping(val, target_map):
raise ValueError(error_msg.format(key, val, target_map))
# validate any extra continuous covs
if "extra_continuous_keys" in _scvi_dict.keys():
if "extra_continuous_keys" not in adata.uns["_scvi"].keys():
raise ValueError('extra_continuous_keys not in adata.uns["_scvi"]')
target_cont_keys = adata.uns["_scvi"]["extra_continuous_keys"]
if not _scvi_dict["extra_continuous_keys"].equals(target_cont_keys):
raise ValueError(
"extra_continous_keys are not the same between source and target"
)
|
def _check_anndata_setup_equivalence(adata_source, adata_target):
"""Checks if target setup is equivalent to source."""
if isinstance(adata_source, anndata.AnnData):
_scvi_dict = adata_source.uns["_scvi"]
else:
_scvi_dict = adata_source
adata = adata_target
stats = _scvi_dict["summary_stats"]
use_raw = _scvi_dict["use_raw"]
target_n_vars = adata.shape[1] if not use_raw else adata.raw.shape[1]
error_msg = (
"Number of {} in anndata different from initial anndata used for training."
)
if target_n_vars != stats["n_vars"]:
raise ValueError(error_msg.format("vars"))
error_msg = (
"There are more {} categories in the data than were originally registered. "
+ "Please check your {} categories as well as adata.uns['_scvi']['categorical_mappings']."
)
self_categoricals = _scvi_dict["categorical_mappings"]
self_batch_mapping = self_categoricals["_scvi_batch"]["mapping"]
adata_categoricals = adata.uns["_scvi"]["categorical_mappings"]
adata_batch_mapping = adata_categoricals["_scvi_batch"]["mapping"]
# check if the categories are the same
error_msg = (
"Categorial encoding for {} is not the same between "
+ "the anndata used to train the model and the anndata just passed in. "
+ "Categorical encoding needs to be same elements, same order, and same datatype.\n"
+ "Expected categories: {}. Received categories: {}.\n"
+ "Try running `dataset.transfer_anndata_setup()` or deleting `adata.uns['_scvi']."
)
if not _assert_equal_mapping(self_batch_mapping, adata_batch_mapping):
raise ValueError(
error_msg.format("batch", self_batch_mapping, adata_batch_mapping)
)
self_labels_mapping = self_categoricals["_scvi_labels"]["mapping"]
adata_labels_mapping = adata_categoricals["_scvi_labels"]["mapping"]
if not _assert_equal_mapping(self_labels_mapping, adata_labels_mapping):
raise ValueError(
error_msg.format("label", self_labels_mapping, adata_labels_mapping)
)
# validate any extra categoricals
if "extra_categorical_mappings" in _scvi_dict.keys():
target_extra_cat_maps = adata.uns["_scvi"]["extra_categorical_mappings"]
for key, val in _scvi_dict["extra_categorical_mappings"].items():
target_map = target_extra_cat_maps[key]
if not _assert_equal_mapping(val, target_map):
raise ValueError(error_msg.format(key, val, target_map))
# validate any extra continuous covs
if "extra_continuous_keys" in _scvi_dict.keys():
if "extra_continuous_keys" not in adata.uns["_scvi"].keys():
raise ValueError('extra_continuous_keys not in adata.uns["_scvi"]')
target_cont_keys = adata.uns["_scvi"]["extra_continuous_keys"]
if not _scvi_dict["extra_continuous_keys"].equals(target_cont_keys):
raise ValueError(
"extra_continous_keys are not the same between source and target"
)
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def _get_var_names_from_setup_anndata(adata):
"""Gets var names by checking if using raw."""
var_names = adata.var_names
return var_names
|
def _get_var_names_from_setup_anndata(adata):
"""Gets var names by checking if using raw."""
var_names = (
adata.var_names
if adata.uns["_scvi"]["use_raw"] is False
else adata.raw.var_names
)
return var_names
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def __init__(
self,
adata_seq: AnnData,
adata_spatial: AnnData,
generative_distributions: List = ["zinb", "nb"],
model_library_size: List = [True, False],
n_latent: int = 10,
use_cuda: bool = True,
**model_kwargs,
):
super(GIMVI, self).__init__(use_cuda=use_cuda)
self.use_cuda = use_cuda and torch.cuda.is_available()
self.adatas = [adata_seq, adata_spatial]
self.scvi_setup_dicts_ = {
"seq": adata_seq.uns["_scvi"],
"spatial": adata_spatial.uns["_scvi"],
}
seq_var_names = _get_var_names_from_setup_anndata(adata_seq)
spatial_var_names = _get_var_names_from_setup_anndata(adata_spatial)
if not set(spatial_var_names) <= set(seq_var_names):
raise ValueError("spatial genes needs to be subset of seq genes")
spatial_gene_loc = [np.argwhere(seq_var_names == g)[0] for g in spatial_var_names]
spatial_gene_loc = np.concatenate(spatial_gene_loc)
gene_mappings = [slice(None), spatial_gene_loc]
sum_stats = [d.uns["_scvi"]["summary_stats"] for d in self.adatas]
n_inputs = [s["n_vars"] for s in sum_stats]
total_genes = adata_seq.uns["_scvi"]["summary_stats"]["n_vars"]
# since we are combining datasets, we need to increment the batch_idx
# of one of the datasets
adata_seq_n_batches = adata_seq.uns["_scvi"]["summary_stats"]["n_batch"]
adata_spatial.obs["_scvi_batch"] += adata_seq_n_batches
n_batches = sum([s["n_batch"] for s in sum_stats])
self.model = JVAE(
n_inputs,
total_genes,
gene_mappings,
generative_distributions,
model_library_size,
n_batch=n_batches,
n_latent=n_latent,
**model_kwargs,
)
self._model_summary_string = "gimVI model with params"
self.init_params_ = self._get_init_params(locals())
|
def __init__(
self,
adata_seq: AnnData,
adata_spatial: AnnData,
generative_distributions: List = ["zinb", "nb"],
model_library_size: List = [True, False],
n_latent: int = 10,
use_cuda: bool = True,
**model_kwargs,
):
super(GIMVI, self).__init__(use_cuda=use_cuda)
self.use_cuda = use_cuda and torch.cuda.is_available()
self.adatas = [adata_seq, adata_spatial]
seq_var_names = _get_var_names_from_setup_anndata(adata_seq)
spatial_var_names = _get_var_names_from_setup_anndata(adata_spatial)
if not set(spatial_var_names) <= set(seq_var_names):
raise ValueError("spatial genes needs to be subset of seq genes")
spatial_gene_loc = [np.argwhere(seq_var_names == g)[0] for g in spatial_var_names]
spatial_gene_loc = np.concatenate(spatial_gene_loc)
gene_mappings = [slice(None), spatial_gene_loc]
sum_stats = [d.uns["_scvi"]["summary_stats"] for d in self.adatas]
n_inputs = [s["n_vars"] for s in sum_stats]
total_genes = adata_seq.uns["_scvi"]["summary_stats"]["n_vars"]
# since we are combining datasets, we need to increment the batch_idx
# of one of the datasets
adata_seq_n_batches = adata_seq.uns["_scvi"]["summary_stats"]["n_batch"]
adata_spatial.obs["_scvi_batch"] += adata_seq_n_batches
n_batches = sum([s["n_batch"] for s in sum_stats])
self.model = JVAE(
n_inputs,
total_genes,
gene_mappings,
generative_distributions,
model_library_size,
n_batch=n_batches,
n_latent=n_latent,
**model_kwargs,
)
self._model_summary_string = "gimVI model with params"
self.init_params_ = self._get_init_params(locals())
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def train(
self,
n_epochs: Optional[int] = 200,
kappa: Optional[int] = 5,
discriminator: Optional[Classifier] = None,
train_size: float = 0.9,
frequency: int = 1,
n_epochs_kl_warmup: int = 400,
train_fun_kwargs: dict = {},
**kwargs,
):
"""
Train the model.
Parameters
----------
n_epochs
Number of passes through the dataset.
kappa
Scaling parameter for the discriminator loss.
discriminator
:class:`~scvi.core.modules.Classifier` object.
train_size
Size of training set in the range [0.0, 1.0].
frequency
Frequency with which metrics are computed on the data for train/test/val sets.
n_epochs_kl_warmup
Number of passes through dataset for scaling term on KL divergence to go from 0 to 1.
train_fun_kwargs
Keyword args for the train method of :class:`~scvi.core.trainers.trainer.Trainer`.
**kwargs
Other keyword args for :class:`~scvi.core.trainers.trainer.Trainer`.
"""
train_fun_kwargs = dict(train_fun_kwargs)
if discriminator is None:
discriminator = Classifier(self.model.n_latent, 32, 2, 3, logits=True)
self.trainer = JVAETrainer(
self.model,
discriminator,
self.adatas,
train_size,
frequency=frequency,
kappa=kappa,
n_epochs_kl_warmup=n_epochs_kl_warmup,
)
logger.info("Training for {} epochs.".format(n_epochs))
self.trainer.train(n_epochs=n_epochs, **train_fun_kwargs)
self.is_trained_ = True
self.history_ = self.trainer.history
|
def train(
self,
n_epochs: Optional[int] = 200,
kappa: Optional[int] = 5,
discriminator: Optional[Classifier] = None,
train_size: float = 0.9,
frequency: int = 1,
n_epochs_kl_warmup: int = 400,
train_fun_kwargs: dict = {},
**kwargs,
):
"""
Train the model.
Parameters
----------
n_epochs
Number of passes through the dataset.
kappa
Scaling parameter for the discriminator loss.
discriminator
:class:`~scvi.core.modules.Classifier` object.
train_size
Size of training set in the range [0.0, 1.0].
frequency
Frequency with which metrics are computed on the data for train/test/val sets.
n_epochs_kl_warmup
Number of passes through dataset for scaling term on KL divergence to go from 0 to 1.
train_fun_kwargs
Keyword args for the train method of :class:`~scvi.core.trainers.trainer.Trainer`.
**kwargs
Other keyword args for :class:`~scvi.core.trainers.trainer.Trainer`.
"""
train_fun_kwargs = dict(train_fun_kwargs)
if discriminator is None:
discriminator = Classifier(self.model.n_latent, 32, 2, 3, logits=True)
self.trainer = JVAETrainer(
self.model,
discriminator,
self.adatas,
train_size,
frequency=frequency,
kappa=kappa,
n_epochs_kl_warmup=n_epochs_kl_warmup,
)
logger.info("Training for {} epochs.".format(n_epochs))
self.trainer.train(n_epochs=n_epochs, **train_fun_kwargs)
self.is_trained_ = True
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def load(
cls,
dir_path: str,
adata_seq: Optional[AnnData] = None,
adata_spatial: Optional[AnnData] = None,
use_cuda: bool = False,
):
"""
Instantiate a model from the saved output.
Parameters
----------
adata_seq
AnnData organized in the same way as data used to train model.
It is not necessary to run :func:`~scvi.data.setup_anndata`,
as AnnData is validated against the saved `scvi` setup dictionary.
AnnData must be registered via :func:`~scvi.data.setup_anndata`.
adata_spatial
AnnData organized in the same way as data used to train model.
If None, will check for and load anndata saved with the model.
dir_path
Path to saved outputs.
use_cuda
Whether to load model on GPU.
Returns
-------
Model with loaded state dictionaries.
Examples
--------
>>> vae = GIMVI.load(adata_seq, adata_spatial, save_path)
>>> vae.get_latent_representation()
"""
model_path = os.path.join(dir_path, "model_params.pt")
setup_dict_path = os.path.join(dir_path, "attr.pkl")
seq_data_path = os.path.join(dir_path, "adata_seq.h5ad")
spatial_data_path = os.path.join(dir_path, "adata_spatial.h5ad")
seq_var_names_path = os.path.join(dir_path, "var_names_seq.csv")
spatial_var_names_path = os.path.join(dir_path, "var_names_spatial.csv")
if adata_seq is None and os.path.exists(seq_data_path):
adata_seq = read(seq_data_path)
elif adata_seq is None and not os.path.exists(seq_data_path):
raise ValueError("Save path contains no saved anndata and no adata was passed.")
if adata_spatial is None and os.path.exists(spatial_data_path):
adata_spatial = read(spatial_data_path)
elif adata_spatial is None and not os.path.exists(spatial_data_path):
raise ValueError("Save path contains no saved anndata and no adata was passed.")
adatas = [adata_seq, adata_spatial]
seq_var_names = np.genfromtxt(seq_var_names_path, delimiter=",", dtype=str)
spatial_var_names = np.genfromtxt(spatial_var_names_path, delimiter=",", dtype=str)
var_names = [seq_var_names, spatial_var_names]
for i, adata in enumerate(adatas):
saved_var_names = var_names[i]
user_var_names = adata.var_names.astype(str)
if not np.array_equal(saved_var_names, user_var_names):
logger.warning(
"var_names for adata passed in does not match var_names of "
"adata used to train the model. For valid results, the vars "
"need to be the same and in the same order as the adata used to train the model."
)
with open(setup_dict_path, "rb") as handle:
attr_dict = pickle.load(handle)
scvi_setup_dicts = attr_dict.pop("scvi_setup_dicts_")
transfer_anndata_setup(scvi_setup_dicts["seq"], adata_seq)
transfer_anndata_setup(scvi_setup_dicts["spatial"], adata_spatial)
# get the parameters for the class init signiture
init_params = attr_dict.pop("init_params_")
# update use_cuda from the saved model
use_cuda = use_cuda and torch.cuda.is_available()
init_params["use_cuda"] = use_cuda
# grab all the parameters execept for kwargs (is a dict)
non_kwargs = {k: v for k, v in init_params.items() if not isinstance(v, dict)}
# expand out kwargs
kwargs = {k: v for k, v in init_params.items() if isinstance(v, dict)}
kwargs = {k: v for (i, j) in kwargs.items() for (k, v) in j.items()}
model = cls(adata_seq, adata_spatial, **non_kwargs, **kwargs)
for attr, val in attr_dict.items():
setattr(model, attr, val)
if use_cuda:
model.model.load_state_dict(torch.load(model_path))
model.model.cuda()
else:
device = torch.device("cpu")
model.model.load_state_dict(torch.load(model_path, map_location=device))
model.model.eval()
return model
|
def load(
cls,
adata_seq: AnnData,
adata_spatial: AnnData,
dir_path: str,
use_cuda: bool = False,
):
"""
Instantiate a model from the saved output.
Parameters
----------
adata_seq
AnnData organized in the same way as data used to train model.
AnnData must be registered via :func:`~scvi.data.setup_anndata`.
adata_spatial
AnnData organized in the same way as data used to train model.
AnnData must be registered via :func:`~scvi.data.setup_anndata`.
dir_path
Path to saved outputs.
use_cuda
Whether to load model on GPU.
Returns
-------
Model with loaded state dictionaries.
Examples
--------
>>> vae = GIMVI.load(adata_seq, adata_spatial, save_path)
>>> vae.get_latent_representation()
"""
model_path = os.path.join(dir_path, "model_params.pt")
# optimizer_path = os.path.join(dir_path, "optimizer_params.pt")
setup_dict_path = os.path.join(dir_path, "attr.pkl")
with open(setup_dict_path, "rb") as handle:
attr_dict = pickle.load(handle)
# get the parameters for the class init signiture
init_params = attr_dict.pop("init_params_")
# grab all the parameters execept for kwargs (is a dict)
non_kwargs = {k: v for k, v in init_params.items() if not isinstance(v, dict)}
# expand out kwargs
kwargs = {k: v for k, v in init_params.items() if isinstance(v, dict)}
kwargs = {k: v for (i, j) in kwargs.items() for (k, v) in j.items()}
model = cls(adata_seq, adata_spatial, **non_kwargs, **kwargs)
for attr, val in attr_dict.items():
setattr(model, attr, val)
use_cuda = use_cuda and torch.cuda.is_available()
if use_cuda:
model.model.load_state_dict(torch.load(model_path))
model.model.cuda()
else:
device = torch.device("cpu")
model.model.load_state_dict(torch.load(model_path, map_location=device))
model.model.eval()
return model
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def __init__(
self,
adata: AnnData,
unlabeled_category: Union[str, int, float],
pretrained_model: Optional[SCVI] = None,
n_hidden: int = 128,
n_latent: int = 10,
n_layers: int = 1,
dropout_rate: float = 0.1,
dispersion: Literal["gene", "gene-batch", "gene-label", "gene-cell"] = "gene",
gene_likelihood: Literal["zinb", "nb", "poisson"] = "zinb",
use_cuda: bool = True,
**model_kwargs,
):
super(SCANVI, self).__init__(adata, use_cuda=use_cuda)
self.unlabeled_category = unlabeled_category
if pretrained_model is not None:
if pretrained_model.is_trained is False:
raise ValueError("pretrained model has not been trained")
self._base_model = pretrained_model.model
self._is_trained_base = True
else:
self._base_model = VAE(
n_input=self.summary_stats["n_vars"],
n_batch=self.summary_stats["n_batch"],
n_hidden=n_hidden,
n_latent=n_latent,
n_layers=n_layers,
dropout_rate=dropout_rate,
dispersion=dispersion,
gene_likelihood=gene_likelihood,
**model_kwargs,
)
self._is_trained_base = False
self.model = SCANVAE(
n_input=self.summary_stats["n_vars"],
n_batch=self.summary_stats["n_batch"],
n_labels=self.summary_stats["n_labels"],
n_hidden=n_hidden,
n_latent=n_latent,
n_layers=n_layers,
dropout_rate=dropout_rate,
dispersion=dispersion,
gene_likelihood=gene_likelihood,
**model_kwargs,
)
# get indices for labeled and unlabeled cells
key = self.scvi_setup_dict_["data_registry"][_CONSTANTS.LABELS_KEY]["attr_key"]
self._label_mapping = self.scvi_setup_dict_["categorical_mappings"][key]["mapping"]
original_key = self.scvi_setup_dict_["categorical_mappings"][key]["original_key"]
labels = np.asarray(self.adata.obs[original_key]).ravel()
self._code_to_label = {i: l for i, l in enumerate(self._label_mapping)}
self._unlabeled_indices = np.argwhere(labels == self.unlabeled_category).ravel()
self._labeled_indices = np.argwhere(labels != self.unlabeled_category).ravel()
self.unsupervised_history_ = None
self.semisupervised_history_ = None
self._model_summary_string = (
"ScanVI Model with params: \nunlabeled_category: {}, n_hidden: {}, n_latent: {}"
", n_layers: {}, dropout_rate: {}, dispersion: {}, gene_likelihood: {}"
).format(
unlabeled_category,
n_hidden,
n_latent,
n_layers,
dropout_rate,
dispersion,
gene_likelihood,
)
self.init_params_ = self._get_init_params(locals())
|
def __init__(
self,
adata: AnnData,
unlabeled_category: Union[str, int, float],
pretrained_model: Optional[SCVI] = None,
n_hidden: int = 128,
n_latent: int = 10,
n_layers: int = 1,
dropout_rate: float = 0.1,
dispersion: Literal["gene", "gene-batch", "gene-label", "gene-cell"] = "gene",
gene_likelihood: Literal["zinb", "nb", "poisson"] = "zinb",
use_cuda: bool = True,
**model_kwargs,
):
super(SCANVI, self).__init__(adata, use_cuda=use_cuda)
self.unlabeled_category = unlabeled_category
if pretrained_model is not None:
if pretrained_model.is_trained is False:
raise ValueError("pretrained model has not been trained")
self._base_model = pretrained_model.model
self._is_trained_base = True
else:
self._base_model = VAE(
n_input=self.summary_stats["n_vars"],
n_batch=self.summary_stats["n_batch"],
n_hidden=n_hidden,
n_latent=n_latent,
n_layers=n_layers,
dropout_rate=dropout_rate,
dispersion=dispersion,
gene_likelihood=gene_likelihood,
**model_kwargs,
)
self._is_trained_base = False
self.model = SCANVAE(
n_input=self.summary_stats["n_vars"],
n_batch=self.summary_stats["n_batch"],
n_labels=self.summary_stats["n_labels"],
n_hidden=n_hidden,
n_latent=n_latent,
n_layers=n_layers,
dropout_rate=dropout_rate,
dispersion=dispersion,
gene_likelihood=gene_likelihood,
**model_kwargs,
)
# get indices for labeled and unlabeled cells
key = self.scvi_setup_dict_["data_registry"][_CONSTANTS.LABELS_KEY]["attr_key"]
self._label_mapping = self.scvi_setup_dict_["categorical_mappings"][key]["mapping"]
original_key = self.scvi_setup_dict_["categorical_mappings"][key]["original_key"]
labels = np.asarray(self.adata.obs[original_key]).ravel()
self._code_to_label = {i: l for i, l in enumerate(self._label_mapping)}
self._unlabeled_indices = np.argwhere(labels == self.unlabeled_category).ravel()
self._labeled_indices = np.argwhere(labels != self.unlabeled_category).ravel()
self._model_summary_string = (
"ScanVI Model with params: \nunlabeled_category: {}, n_hidden: {}, n_latent: {}"
", n_layers: {}, dropout_rate: {}, dispersion: {}, gene_likelihood: {}"
).format(
unlabeled_category,
n_hidden,
n_latent,
n_layers,
dropout_rate,
dispersion,
gene_likelihood,
)
self.init_params_ = self._get_init_params(locals())
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def history(self):
"""Returns computed metrics during training."""
return {
"unsupervised_trainer_history": self.unsupervised_history_,
"semisupervised_trainer_history": self.semisupervised_history_,
}
|
def history(self):
"""Returns computed metrics during training."""
if self.is_trained_ is False:
return {}
else:
return {
"unsupervised_trainer_history": self._unsupervised_trainer.history,
"semisupervised_trainer_history": self.trainer.history,
}
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def train(
self,
n_epochs_unsupervised: Optional[int] = None,
n_epochs_semisupervised: Optional[int] = None,
train_size: float = 0.9,
test_size: float = None,
lr: float = 1e-3,
n_epochs_kl_warmup: int = 400,
n_iter_kl_warmup: Optional[int] = None,
frequency: Optional[int] = None,
unsupervised_trainer_kwargs: dict = {},
semisupervised_trainer_kwargs: dict = {},
unsupervised_train_kwargs: dict = {},
semisupervised_train_kwargs: dict = {},
):
"""
Train the model.
Parameters
----------
n_epochs_unsupervised
Number of passes through the dataset for unsupervised pre-training.
n_epochs_semisupervised
Number of passes through the dataset for semisupervised training.
train_size
Size of training set in the range [0.0, 1.0].
test_size
Size of the test set. If `None`, defaults to 1 - `train_size`. If
`train_size + test_size < 1`, the remaining cells belong to a validation set.
lr
Learning rate for optimization.
n_epochs_kl_warmup
Number of passes through dataset for scaling term on KL divergence to go from 0 to 1.
n_iter_kl_warmup
Number of minibatches for scaling term on KL divergence to go from 0 to 1.
To use, set to not `None` and set `n_epochs_kl_warmup` to `None`.
frequency
Frequency with which metrics are computed on the data for train/test/val sets for both
the unsupervised and semisupervised trainers. If you'd like a different frequency for
the semisupervised trainer, set frequency in semisupervised_train_kwargs.
unsupervised_trainer_kwargs
Other keyword args for :class:`~scvi.core.trainers.UnsupervisedTrainer`.
semisupervised_trainer_kwargs
Other keyword args for :class:`~scvi.core.trainers.SemiSupervisedTrainer`.
semisupervised_train_kwargs
Keyword args for the train method of :class:`~scvi.core.trainers.SemiSupervisedTrainer`.
"""
unsupervised_trainer_kwargs = dict(unsupervised_trainer_kwargs)
semisupervised_trainer_kwargs = dict(semisupervised_trainer_kwargs)
unsupervised_train_kwargs = dict(unsupervised_train_kwargs)
semisupervised_train_kwargs = dict(semisupervised_train_kwargs)
if n_epochs_unsupervised is None:
n_epochs_unsupervised = np.min(
[round((20000 / self.adata.shape[0]) * 400), 400]
)
if n_epochs_semisupervised is None:
n_epochs_semisupervised = int(
np.min([10, np.max([2, round(n_epochs_unsupervised / 3.0)])])
)
logger.info(
"Training Unsupervised Trainer for {} epochs.".format(n_epochs_unsupervised)
)
logger.info(
"Training SemiSupervised Trainer for {} epochs.".format(n_epochs_semisupervised)
)
if self._is_trained_base is not True:
self._unsupervised_trainer = UnsupervisedTrainer(
self._base_model,
self.adata,
train_size=train_size,
test_size=test_size,
n_iter_kl_warmup=n_iter_kl_warmup,
n_epochs_kl_warmup=n_epochs_kl_warmup,
frequency=frequency,
use_cuda=self.use_cuda,
**unsupervised_trainer_kwargs,
)
self._unsupervised_trainer.train(
n_epochs=n_epochs_unsupervised, lr=lr, **unsupervised_train_kwargs
)
self.unsupervised_history_ = self._unsupervised_trainer.history
self._is_trained_base = True
self.model.load_state_dict(self._base_model.state_dict(), strict=False)
if "frequency" not in semisupervised_trainer_kwargs and frequency is not None:
semisupervised_trainer_kwargs["frequency"] = frequency
self.trainer = SemiSupervisedTrainer(
self.model,
self.adata,
use_cuda=self.use_cuda,
**semisupervised_trainer_kwargs,
)
self.trainer.unlabelled_set = self.trainer.create_scvi_dl(
indices=self._unlabeled_indices
)
self.trainer.labelled_set = self.trainer.create_scvi_dl(
indices=self._labeled_indices
)
self.semisupervised_history_ = self.trainer.history
self.trainer.train(
n_epochs=n_epochs_semisupervised,
**semisupervised_train_kwargs,
)
self.is_trained_ = True
|
def train(
self,
n_epochs_unsupervised: Optional[int] = None,
n_epochs_semisupervised: Optional[int] = None,
train_size: float = 0.9,
test_size: float = None,
lr: float = 1e-3,
n_epochs_kl_warmup: int = 400,
n_iter_kl_warmup: Optional[int] = None,
frequency: Optional[int] = None,
unsupervised_trainer_kwargs: dict = {},
semisupervised_trainer_kwargs: dict = {},
unsupervised_train_kwargs: dict = {},
semisupervised_train_kwargs: dict = {},
):
"""
Train the model.
Parameters
----------
n_epochs_unsupervised
Number of passes through the dataset for unsupervised pre-training.
n_epochs_semisupervised
Number of passes through the dataset for semisupervised training.
train_size
Size of training set in the range [0.0, 1.0].
test_size
Size of the test set. If `None`, defaults to 1 - `train_size`. If
`train_size + test_size < 1`, the remaining cells belong to a validation set.
lr
Learning rate for optimization.
n_epochs_kl_warmup
Number of passes through dataset for scaling term on KL divergence to go from 0 to 1.
n_iter_kl_warmup
Number of minibatches for scaling term on KL divergence to go from 0 to 1.
To use, set to not `None` and set `n_epochs_kl_warmup` to `None`.
frequency
Frequency with which metrics are computed on the data for train/test/val sets for both
the unsupervised and semisupervised trainers. If you'd like a different frequency for
the semisupervised trainer, set frequency in semisupervised_train_kwargs.
unsupervised_trainer_kwargs
Other keyword args for :class:`~scvi.core.trainers.UnsupervisedTrainer`.
semisupervised_trainer_kwargs
Other keyword args for :class:`~scvi.core.trainers.SemiSupervisedTrainer`.
semisupervised_train_kwargs
Keyword args for the train method of :class:`~scvi.core.trainers.SemiSupervisedTrainer`.
"""
unsupervised_trainer_kwargs = dict(unsupervised_trainer_kwargs)
semisupervised_trainer_kwargs = dict(semisupervised_trainer_kwargs)
unsupervised_train_kwargs = dict(unsupervised_train_kwargs)
semisupervised_train_kwargs = dict(semisupervised_train_kwargs)
if n_epochs_unsupervised is None:
n_epochs_unsupervised = np.min(
[round((20000 / self.adata.shape[0]) * 400), 400]
)
if n_epochs_semisupervised is None:
n_epochs_semisupervised = int(
np.min([10, np.max([2, round(n_epochs_unsupervised / 3.0)])])
)
logger.info(
"Training Unsupervised Trainer for {} epochs.".format(n_epochs_unsupervised)
)
logger.info(
"Training SemiSupervised Trainer for {} epochs.".format(n_epochs_semisupervised)
)
if self._is_trained_base is not True:
self._unsupervised_trainer = UnsupervisedTrainer(
self._base_model,
self.adata,
train_size=train_size,
test_size=test_size,
n_iter_kl_warmup=n_iter_kl_warmup,
n_epochs_kl_warmup=n_epochs_kl_warmup,
frequency=frequency,
use_cuda=self.use_cuda,
**unsupervised_trainer_kwargs,
)
self._unsupervised_trainer.train(
n_epochs=n_epochs_unsupervised, lr=lr, **unsupervised_train_kwargs
)
self._is_trained_base = True
self.model.load_state_dict(self._base_model.state_dict(), strict=False)
if "frequency" not in semisupervised_trainer_kwargs and frequency is not None:
semisupervised_trainer_kwargs["frequency"] = frequency
self.trainer = SemiSupervisedTrainer(
self.model,
self.adata,
use_cuda=self.use_cuda,
**semisupervised_trainer_kwargs,
)
self.trainer.unlabelled_set = self.trainer.create_scvi_dl(
indices=self._unlabeled_indices
)
self.trainer.labelled_set = self.trainer.create_scvi_dl(
indices=self._labeled_indices
)
self.trainer.train(
n_epochs=n_epochs_semisupervised,
**semisupervised_train_kwargs,
)
self.is_trained_ = True
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def train(
self,
n_epochs: int = 400,
train_size: float = 0.9,
test_size: Optional[float] = None,
lr: float = 4e-3,
n_epochs_kl_warmup: Optional[int] = None,
n_iter_kl_warmup: Union[Literal["auto"], int] = "auto",
batch_size: int = 256,
frequency: Optional[int] = None,
train_fun_kwargs: dict = {},
**kwargs,
):
"""
Train the model.
Parameters
----------
n_epochs
Number of passes through the dataset.
train_size
Size of training set in the range [0.0, 1.0].
test_size
Size of the test set. If `None`, defaults to 1 - `train_size`. If
`train_size + test_size < 1`, the remaining cells belong to a validation set.
lr
Learning rate for optimization.
n_epochs_kl_warmup
Number of passes through dataset for scaling term on KL divergence to go from 0 to 1.
n_iter_kl_warmup
Number of minibatches for scaling term on KL divergence to go from 0 to 1.
To use, set to not `None` and set `n_epochs_kl_warmup` to `None`.
batch_size
Minibatch size to use during training.
frequency
Frequency with which metrics are computed on the data for train/test/val sets.
train_fun_kwargs
Keyword args for the train method of :class:`~scvi.core.trainers.TotalTrainer`.
**kwargs
Other keyword args for :class:`~scvi.core.trainers.TotalTrainer`.
"""
train_fun_kwargs = dict(train_fun_kwargs)
if "totalvi_batch_mask" in self.scvi_setup_dict_.keys():
imputation = True
else:
imputation = False
self.trainer = TotalTrainer(
self.model,
self.adata,
train_size=train_size,
test_size=test_size,
n_iter_kl_warmup=n_iter_kl_warmup,
n_epochs_kl_warmup=n_epochs_kl_warmup,
frequency=frequency,
batch_size=batch_size,
use_adversarial_loss=imputation,
use_cuda=self.use_cuda,
**kwargs,
)
# for autotune
if "n_epochs" not in train_fun_kwargs:
train_fun_kwargs["n_epochs"] = n_epochs
if "lr" not in train_fun_kwargs:
train_fun_kwargs["lr"] = lr
logger.info("Training for {} epochs.".format(n_epochs))
self.trainer.train(**train_fun_kwargs)
self.is_trained_ = True
self.train_indices_ = self.trainer.train_set.indices
self.test_indices_ = self.trainer.test_set.indices
self.validation_indices_ = self.trainer.validation_set.indices
self.history_ = self.trainer.history
|
def train(
self,
n_epochs: int = 400,
train_size: float = 0.9,
test_size: Optional[float] = None,
lr: float = 4e-3,
n_epochs_kl_warmup: Optional[int] = None,
n_iter_kl_warmup: Union[Literal["auto"], int] = "auto",
batch_size: int = 256,
frequency: Optional[int] = None,
train_fun_kwargs: dict = {},
**kwargs,
):
"""
Train the model.
Parameters
----------
n_epochs
Number of passes through the dataset.
train_size
Size of training set in the range [0.0, 1.0].
test_size
Size of the test set. If `None`, defaults to 1 - `train_size`. If
`train_size + test_size < 1`, the remaining cells belong to a validation set.
lr
Learning rate for optimization.
n_epochs_kl_warmup
Number of passes through dataset for scaling term on KL divergence to go from 0 to 1.
n_iter_kl_warmup
Number of minibatches for scaling term on KL divergence to go from 0 to 1.
To use, set to not `None` and set `n_epochs_kl_warmup` to `None`.
batch_size
Minibatch size to use during training.
frequency
Frequency with which metrics are computed on the data for train/test/val sets.
train_fun_kwargs
Keyword args for the train method of :class:`~scvi.core.trainers.TotalTrainer`.
**kwargs
Other keyword args for :class:`~scvi.core.trainers.TotalTrainer`.
"""
train_fun_kwargs = dict(train_fun_kwargs)
if "totalvi_batch_mask" in self.scvi_setup_dict_.keys():
imputation = True
else:
imputation = False
self.trainer = TotalTrainer(
self.model,
self.adata,
train_size=train_size,
test_size=test_size,
n_iter_kl_warmup=n_iter_kl_warmup,
n_epochs_kl_warmup=n_epochs_kl_warmup,
frequency=frequency,
batch_size=batch_size,
use_adversarial_loss=imputation,
use_cuda=self.use_cuda,
**kwargs,
)
# for autotune
if "n_epochs" not in train_fun_kwargs:
train_fun_kwargs["n_epochs"] = n_epochs
if "lr" not in train_fun_kwargs:
train_fun_kwargs["lr"] = lr
logger.info("Training for {} epochs.".format(n_epochs))
self.trainer.train(**train_fun_kwargs)
self.is_trained_ = True
self.train_indices_ = self.trainer.train_set.indices
self.test_indices_ = self.trainer.test_set.indices
self.validation_indices_ = self.trainer.validation_set.indices
|
https://github.com/YosefLab/scvi-tools/issues/816
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-125-eb1e0fcfe1d1> in <module>
1 de = model.differential_expression(
----> 2 groupby="leiden",
3 )
4 de.head()
~/anaconda3/lib/python3.7/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
179 adata = self._validate_anndata(adata)
180
--> 181 col_names = _get_var_names_from_setup_anndata(adata)
182 model_fn = partial(
183 self.get_normalized_expression,
~/anaconda3/lib/python3.7/site-packages/scvi/model/_utils.py in _get_var_names_from_setup_anndata(adata)
119 adata.var_names
120 if adata.uns["_scvi"]["use_raw"] is False
--> 121 else adata.raw.var_names
122 )
123
AttributeError: 'NoneType' object has no attribute 'var_names'
|
AttributeError
|
def _de_core(
adata,
model_fn,
groupby,
group1,
group2,
idx1,
idx2,
all_stats,
all_stats_fn,
col_names,
mode,
batchid1,
batchid2,
delta,
batch_correction,
fdr,
**kwargs,
):
"""Internal function for DE interface."""
if group1 is None and idx1 is None:
group1 = adata.obs[groupby].cat.categories.tolist()
if len(group1) == 1:
raise ValueError(
"Only a single group in the data. Can't run DE on a single group."
)
if isinstance(group1, str):
group1 = [group1]
# make a temp obs key using indices
temp_key = None
if idx1 is not None:
idx1 = np.asarray(idx1).ravel()
g1_key = "one"
obs_col = np.array(["None"] * adata.shape[0], dtype=str)
obs_col[idx1] = g1_key
group2 = None if idx2 is None else "two"
if idx2 is not None:
idx2 = np.asarray(idx2).ravel()
obs_col[idx2] = group2
temp_key = "_scvi_temp_de"
adata.obs[temp_key] = obs_col
groupby = temp_key
group1 = [g1_key]
df_results = []
dc = DifferentialComputation(model_fn, adata)
for g1 in track(
group1,
description="DE...",
):
cell_idx1 = (adata.obs[groupby] == g1).to_numpy().ravel()
if group2 is None:
cell_idx2 = ~cell_idx1
else:
cell_idx2 = (adata.obs[groupby] == group2).to_numpy().ravel()
all_info = dc.get_bayes_factors(
cell_idx1,
cell_idx2,
mode=mode,
delta=delta,
batchid1=batchid1,
batchid2=batchid2,
use_observed_batches=not batch_correction,
**kwargs,
)
if all_stats is True:
genes_properties_dict = all_stats_fn(adata, cell_idx1, cell_idx2)
all_info = {**all_info, **genes_properties_dict}
res = pd.DataFrame(all_info, index=col_names)
sort_key = "proba_de" if mode == "change" else "bayes_factor"
res = res.sort_values(by=sort_key, ascending=False)
if mode == "change":
res["is_de_fdr_{}".format(fdr)] = _fdr_de_prediction(
res["proba_de"], fdr=fdr
)
if idx1 is None:
g2 = "Rest" if group2 is None else group2
res["comparison"] = "{} vs {}".format(g1, g2)
df_results.append(res)
if temp_key is not None:
del adata.obs[temp_key]
result = pd.concat(df_results, axis=0)
return result
|
def _de_core(
adata,
model_fn,
groupby,
group1,
group2,
idx1,
idx2,
all_stats,
all_stats_fn,
col_names,
mode,
batchid1,
batchid2,
delta,
batch_correction,
fdr,
**kwargs,
):
"""Internal function for DE interface."""
if group1 is None and idx1 is None:
group1 = adata.obs[groupby].cat.categories.tolist()
if isinstance(group1, str):
group1 = [group1]
# make a temp obs key using indices
temp_key = None
if idx1 is not None:
idx1 = np.asarray(idx1).ravel()
g1_key = "one"
obs_col = np.array(["None"] * adata.shape[0], dtype=str)
obs_col[idx1] = g1_key
group2 = None if idx2 is None else "two"
if idx2 is not None:
idx2 = np.asarray(idx2).ravel()
obs_col[idx2] = group2
temp_key = "_scvi_temp_de"
adata.obs[temp_key] = obs_col
groupby = temp_key
group1 = [g1_key]
df_results = []
dc = DifferentialComputation(model_fn, adata)
for g1 in track(
group1,
description="DE...",
):
cell_idx1 = (adata.obs[groupby] == g1).to_numpy().ravel()
if group2 is None:
cell_idx2 = ~cell_idx1
else:
cell_idx2 = (adata.obs[groupby] == group2).to_numpy().ravel()
all_info = dc.get_bayes_factors(
cell_idx1,
cell_idx2,
mode=mode,
delta=delta,
batchid1=batchid1,
batchid2=batchid2,
use_observed_batches=not batch_correction,
**kwargs,
)
if all_stats is True:
genes_properties_dict = all_stats_fn(adata, cell_idx1, cell_idx2)
all_info = {**all_info, **genes_properties_dict}
res = pd.DataFrame(all_info, index=col_names)
sort_key = "proba_de" if mode == "change" else "bayes_factor"
res = res.sort_values(by=sort_key, ascending=False)
if mode == "change":
res["is_de_fdr_{}".format(fdr)] = _fdr_de_prediction(
res["proba_de"], fdr=fdr
)
if idx1 is None:
g2 = "Rest" if group2 is None else group2
res["comparison"] = "{} vs {}".format(g1, g2)
df_results.append(res)
if temp_key is not None:
del adata.obs[temp_key]
result = pd.concat(df_results, axis=0)
return result
|
https://github.com/YosefLab/scvi-tools/issues/823
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-143-b1df721788f5> in <module>
2 for tissue in np.unique(adata.obs['tissue']):
3 sub_adata = adata[adata.obs['tissue']==tissue]
----> 4 de_celltype[tissue] = vae.differential_expression(sub_adata, groupby = 'Propagated.Annotation', batch_correction=True)
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
187 batch_size=batch_size,
188 )
--> 189 result = _de_core(
190 adata,
191 model_fn,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/_utils.py in _de_core(adata, model_fn, groupby, group1, group2, idx1, idx2, all_stats, all_stats_fn, col_names, mode, batchid1, batchid2, delta, batch_correction, **kwargs)
59 cell_idx2 = (adata.obs[groupby] == group2).to_numpy().ravel()
60
---> 61 all_info = dc.get_bayes_factors(
62 cell_idx1,
63 cell_idx2,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/utils/differential.py in get_bayes_factors(self, idx1, idx2, mode, batchid1, batchid2, use_observed_batches, n_samples, use_permutation, m_permutation, change_fn, m1_domain_fn, delta, cred_interval_lvls)
168 eps = 1e-8 # used for numerical stability
169 # Normalized means sampling for both populations
--> 170 scales_batches_1 = self.scale_sampler(
171 selection=idx1,
172 batchid=batchid1,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/torch/autograd/grad_mode.py in decorate_no_grad(*args, **kwargs)
47 def decorate_no_grad(*args, **kwargs):
48 with self:
---> 49 return func(*args, **kwargs)
50 return decorate_no_grad
51
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/utils/differential.py in scale_sampler(self, selection, n_samples, n_samples_per_cell, batchid, use_observed_batches, give_mean)
392 idx = np.random.choice(np.arange(self.adata.shape[0])[selection], n_samples)
393 px_scales.append(
--> 394 self.model_fn(self.adata, indices=idx, transform_batch=batch_idx)
395 )
396 batch_idx = batch_idx if batch_idx is not None else np.nan
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/torch/autograd/grad_mode.py in decorate_no_grad(*args, **kwargs)
47 def decorate_no_grad(*args, **kwargs):
48 with self:
---> 49 return func(*args, **kwargs)
50 return decorate_no_grad
51
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/rnamixin.py in get_normalized_expression(self, adata, indices, transform_batch, gene_list, library_size, n_samples, batch_size, return_mean, return_numpy)
82 scdl = self._make_scvi_dl(adata=adata, indices=indices, batch_size=batch_size)
83 if transform_batch is not None:
---> 84 transform_batch = _get_batch_code_from_category(adata, transform_batch)
85
86 if gene_list is None:
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/model/_utils.py in _get_batch_code_from_category(adata, category)
131 batch_mappings = categorical_mappings["_scvi_batch"]["mapping"]
132 if category not in batch_mappings:
--> 133 raise ValueError('"{}" not a valid batch category.'.format(category))
134 return np.where(batch_mappings == category)[0][0]
ValueError: "0" not a valid batch category.```
#### Versions:
<!-- Output of scvi.__version__ -->
VERSION
<!-- Relevant screenshots -->
|
ValueError
|
def get_normalized_expression(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
gene_list: Optional[Sequence[str]] = None,
library_size: Union[float, Literal["latent"]] = 1,
n_samples: int = 1,
batch_size: Optional[int] = None,
return_mean: bool = True,
return_numpy: Optional[bool] = None,
) -> Union[np.ndarray, pd.DataFrame]:
r"""
Returns the normalized (decoded) gene expression.
This is denoted as :math:`\rho_n` in the scVI paper.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
transform_batch
Batch to condition on.
If transform_batch is:
- None, then real observed batch is used.
- int, then batch transform_batch is used.
gene_list
Return frequencies of expression for a subset of genes.
This can save memory when working with large datasets and few genes are
of interest.
library_size
Scale the expression frequencies to a common library size.
This allows gene expression levels to be interpreted on a common scale of relevant
magnitude. If set to `"latent"`, use the latent libary size.
n_samples
Number of posterior samples to use for estimation.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
return_mean
Whether to return the mean of the samples.
return_numpy
Return a :class:`~numpy.ndarray` instead of a :class:`~pandas.DataFrame`. DataFrame includes
gene names as columns. If either `n_samples=1` or `return_mean=True`, defaults to `False`.
Otherwise, it defaults to `True`.
Returns
-------
If `n_samples` > 1 and `return_mean` is False, then the shape is `(samples, cells, genes)`.
Otherwise, shape is `(cells, genes)`. In this case, return type is :class:`~pandas.DataFrame` unless `return_numpy` is True.
"""
adata = self._validate_anndata(adata)
scdl = self._make_scvi_dl(adata=adata, indices=indices, batch_size=batch_size)
transform_batch = _get_batch_code_from_category(adata, transform_batch)
if gene_list is None:
gene_mask = slice(None)
else:
all_genes = _get_var_names_from_setup_anndata(adata)
gene_mask = [True if gene in gene_list else False for gene in all_genes]
if n_samples > 1 and return_mean is False:
if return_numpy is False:
logger.warning(
"return_numpy must be True if n_samples > 1 and return_mean is False, returning np.ndarray"
)
return_numpy = True
if indices is None:
indices = np.arange(adata.n_obs)
if library_size == "latent":
model_fn = self.model.get_sample_rate
scaling = 1
else:
model_fn = self.model.get_sample_scale
scaling = library_size
exprs = []
for tensors in scdl:
x = tensors[_CONSTANTS.X_KEY]
batch_idx = tensors[_CONSTANTS.BATCH_KEY]
labels = tensors[_CONSTANTS.LABELS_KEY]
per_batch_exprs = []
for batch in transform_batch:
output = model_fn(
x,
batch_index=batch_idx,
y=labels,
n_samples=n_samples,
transform_batch=batch,
)[..., gene_mask]
output *= scaling
output = output.cpu().numpy()
per_batch_exprs.append(output)
per_batch_exprs = np.stack(
per_batch_exprs
) # shape is (len(transform_batch) x batch_size x n_var)
exprs += [per_batch_exprs.mean(0)]
if n_samples > 1:
# The -2 axis correspond to cells.
exprs = np.concatenate(exprs, axis=-2)
else:
exprs = np.concatenate(exprs, axis=0)
if n_samples > 1 and return_mean:
exprs = exprs.mean(0)
if return_numpy is None or return_numpy is False:
return pd.DataFrame(
exprs,
columns=adata.var_names[gene_mask],
index=adata.obs_names[indices],
)
else:
return exprs
|
def get_normalized_expression(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
gene_list: Optional[Sequence[str]] = None,
library_size: Union[float, Literal["latent"]] = 1,
n_samples: int = 1,
batch_size: Optional[int] = None,
return_mean: bool = True,
return_numpy: Optional[bool] = None,
) -> Union[np.ndarray, pd.DataFrame]:
r"""
Returns the normalized (decoded) gene expression.
This is denoted as :math:`\rho_n` in the scVI paper.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
transform_batch
Batch to condition on.
If transform_batch is:
- None, then real observed batch is used.
- int, then batch transform_batch is used.
gene_list
Return frequencies of expression for a subset of genes.
This can save memory when working with large datasets and few genes are
of interest.
library_size
Scale the expression frequencies to a common library size.
This allows gene expression levels to be interpreted on a common scale of relevant
magnitude. If set to `"latent"`, use the latent libary size.
n_samples
Number of posterior samples to use for estimation.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
return_mean
Whether to return the mean of the samples.
return_numpy
Return a :class:`~numpy.ndarray` instead of a :class:`~pandas.DataFrame`. DataFrame includes
gene names as columns. If either `n_samples=1` or `return_mean=True`, defaults to `False`.
Otherwise, it defaults to `True`.
Returns
-------
If `n_samples` > 1 and `return_mean` is False, then the shape is `(samples, cells, genes)`.
Otherwise, shape is `(cells, genes)`. In this case, return type is :class:`~pandas.DataFrame` unless `return_numpy` is True.
"""
adata = self._validate_anndata(adata)
scdl = self._make_scvi_dl(adata=adata, indices=indices, batch_size=batch_size)
if not isinstance(transform_batch, IterableClass):
transform_batch = [transform_batch]
transform_batch = _get_batch_code_from_category(adata, transform_batch)
if gene_list is None:
gene_mask = slice(None)
else:
all_genes = _get_var_names_from_setup_anndata(adata)
gene_mask = [True if gene in gene_list else False for gene in all_genes]
if n_samples > 1 and return_mean is False:
if return_numpy is False:
logger.warning(
"return_numpy must be True if n_samples > 1 and return_mean is False, returning np.ndarray"
)
return_numpy = True
if indices is None:
indices = np.arange(adata.n_obs)
if library_size == "latent":
model_fn = self.model.get_sample_rate
scaling = 1
else:
model_fn = self.model.get_sample_scale
scaling = library_size
exprs = []
for tensors in scdl:
x = tensors[_CONSTANTS.X_KEY]
batch_idx = tensors[_CONSTANTS.BATCH_KEY]
labels = tensors[_CONSTANTS.LABELS_KEY]
per_batch_exprs = []
for batch in transform_batch:
output = model_fn(
x,
batch_index=batch_idx,
y=labels,
n_samples=n_samples,
transform_batch=batch,
)[..., gene_mask]
output *= scaling
output = output.cpu().numpy()
per_batch_exprs.append(output)
per_batch_exprs = np.stack(
per_batch_exprs
) # shape is (len(transform_batch) x batch_size x n_var)
exprs += [per_batch_exprs.mean(0)]
if n_samples > 1:
# The -2 axis correspond to cells.
exprs = np.concatenate(exprs, axis=-2)
else:
exprs = np.concatenate(exprs, axis=0)
if n_samples > 1 and return_mean:
exprs = exprs.mean(0)
if return_numpy is None or return_numpy is False:
return pd.DataFrame(
exprs,
columns=adata.var_names[gene_mask],
index=adata.obs_names[indices],
)
else:
return exprs
|
https://github.com/YosefLab/scvi-tools/issues/823
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-143-b1df721788f5> in <module>
2 for tissue in np.unique(adata.obs['tissue']):
3 sub_adata = adata[adata.obs['tissue']==tissue]
----> 4 de_celltype[tissue] = vae.differential_expression(sub_adata, groupby = 'Propagated.Annotation', batch_correction=True)
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
187 batch_size=batch_size,
188 )
--> 189 result = _de_core(
190 adata,
191 model_fn,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/_utils.py in _de_core(adata, model_fn, groupby, group1, group2, idx1, idx2, all_stats, all_stats_fn, col_names, mode, batchid1, batchid2, delta, batch_correction, **kwargs)
59 cell_idx2 = (adata.obs[groupby] == group2).to_numpy().ravel()
60
---> 61 all_info = dc.get_bayes_factors(
62 cell_idx1,
63 cell_idx2,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/utils/differential.py in get_bayes_factors(self, idx1, idx2, mode, batchid1, batchid2, use_observed_batches, n_samples, use_permutation, m_permutation, change_fn, m1_domain_fn, delta, cred_interval_lvls)
168 eps = 1e-8 # used for numerical stability
169 # Normalized means sampling for both populations
--> 170 scales_batches_1 = self.scale_sampler(
171 selection=idx1,
172 batchid=batchid1,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/torch/autograd/grad_mode.py in decorate_no_grad(*args, **kwargs)
47 def decorate_no_grad(*args, **kwargs):
48 with self:
---> 49 return func(*args, **kwargs)
50 return decorate_no_grad
51
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/utils/differential.py in scale_sampler(self, selection, n_samples, n_samples_per_cell, batchid, use_observed_batches, give_mean)
392 idx = np.random.choice(np.arange(self.adata.shape[0])[selection], n_samples)
393 px_scales.append(
--> 394 self.model_fn(self.adata, indices=idx, transform_batch=batch_idx)
395 )
396 batch_idx = batch_idx if batch_idx is not None else np.nan
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/torch/autograd/grad_mode.py in decorate_no_grad(*args, **kwargs)
47 def decorate_no_grad(*args, **kwargs):
48 with self:
---> 49 return func(*args, **kwargs)
50 return decorate_no_grad
51
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/rnamixin.py in get_normalized_expression(self, adata, indices, transform_batch, gene_list, library_size, n_samples, batch_size, return_mean, return_numpy)
82 scdl = self._make_scvi_dl(adata=adata, indices=indices, batch_size=batch_size)
83 if transform_batch is not None:
---> 84 transform_batch = _get_batch_code_from_category(adata, transform_batch)
85
86 if gene_list is None:
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/model/_utils.py in _get_batch_code_from_category(adata, category)
131 batch_mappings = categorical_mappings["_scvi_batch"]["mapping"]
132 if category not in batch_mappings:
--> 133 raise ValueError('"{}" not a valid batch category.'.format(category))
134 return np.where(batch_mappings == category)[0][0]
ValueError: "0" not a valid batch category.```
#### Versions:
<!-- Output of scvi.__version__ -->
VERSION
<!-- Relevant screenshots -->
|
ValueError
|
def get_feature_correlation_matrix(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
n_samples: int = 10,
batch_size: int = 64,
rna_size_factor: int = 1000,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
correlation_type: Literal["spearman", "pearson"] = "spearman",
) -> pd.DataFrame:
"""
Generate gene-gene correlation matrix using scvi uncertainty and expression.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of posterior samples to use for estimation.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
rna_size_factor
size factor for RNA prior to sampling gamma distribution.
transform_batch
Batches to condition on.
If transform_batch is:
- None, then real observed batch is used.
- int, then batch transform_batch is used.
- list of int, then values are averaged over provided batches.
correlation_type
One of "pearson", "spearman".
Returns
-------
Gene-gene correlation matrix
"""
from scipy.stats import spearmanr
adata = self._validate_anndata(adata)
transform_batch = _get_batch_code_from_category(adata, transform_batch)
corr_mats = []
for b in transform_batch:
denoised_data = self._get_denoised_samples(
adata=adata,
indices=indices,
n_samples=n_samples,
batch_size=batch_size,
rna_size_factor=rna_size_factor,
transform_batch=b,
)
flattened = np.zeros(
(denoised_data.shape[0] * n_samples, denoised_data.shape[1])
)
for i in range(n_samples):
flattened[
denoised_data.shape[0] * (i) : denoised_data.shape[0] * (i + 1)
] = denoised_data[:, :, i]
if correlation_type == "pearson":
corr_matrix = np.corrcoef(flattened, rowvar=False)
elif correlation_type == "spearman":
corr_matrix, _ = spearmanr(flattened)
else:
raise ValueError(
"Unknown correlation type. Choose one of 'spearman', 'pearson'."
)
corr_mats.append(corr_matrix)
corr_matrix = np.mean(np.stack(corr_mats), axis=0)
var_names = _get_var_names_from_setup_anndata(adata)
return pd.DataFrame(corr_matrix, index=var_names, columns=var_names)
|
def get_feature_correlation_matrix(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
n_samples: int = 10,
batch_size: int = 64,
rna_size_factor: int = 1000,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
correlation_type: Literal["spearman", "pearson"] = "spearman",
) -> pd.DataFrame:
"""
Generate gene-gene correlation matrix using scvi uncertainty and expression.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of posterior samples to use for estimation.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
rna_size_factor
size factor for RNA prior to sampling gamma distribution.
transform_batch
Batches to condition on.
If transform_batch is:
- None, then real observed batch is used.
- int, then batch transform_batch is used.
- list of int, then values are averaged over provided batches.
correlation_type
One of "pearson", "spearman".
Returns
-------
Gene-gene correlation matrix
"""
from scipy.stats import spearmanr
adata = self._validate_anndata(adata)
if not isinstance(transform_batch, IterableClass):
transform_batch = [transform_batch]
transform_batch = _get_batch_code_from_category(adata, transform_batch)
corr_mats = []
for b in transform_batch:
denoised_data = self._get_denoised_samples(
adata=adata,
indices=indices,
n_samples=n_samples,
batch_size=batch_size,
rna_size_factor=rna_size_factor,
transform_batch=b,
)
flattened = np.zeros(
(denoised_data.shape[0] * n_samples, denoised_data.shape[1])
)
for i in range(n_samples):
flattened[
denoised_data.shape[0] * (i) : denoised_data.shape[0] * (i + 1)
] = denoised_data[:, :, i]
if correlation_type == "pearson":
corr_matrix = np.corrcoef(flattened, rowvar=False)
elif correlation_type == "spearman":
corr_matrix, _ = spearmanr(flattened)
else:
raise ValueError(
"Unknown correlation type. Choose one of 'spearman', 'pearson'."
)
corr_mats.append(corr_matrix)
corr_matrix = np.mean(np.stack(corr_mats), axis=0)
var_names = _get_var_names_from_setup_anndata(adata)
return pd.DataFrame(corr_matrix, index=var_names, columns=var_names)
|
https://github.com/YosefLab/scvi-tools/issues/823
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-143-b1df721788f5> in <module>
2 for tissue in np.unique(adata.obs['tissue']):
3 sub_adata = adata[adata.obs['tissue']==tissue]
----> 4 de_celltype[tissue] = vae.differential_expression(sub_adata, groupby = 'Propagated.Annotation', batch_correction=True)
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
187 batch_size=batch_size,
188 )
--> 189 result = _de_core(
190 adata,
191 model_fn,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/_utils.py in _de_core(adata, model_fn, groupby, group1, group2, idx1, idx2, all_stats, all_stats_fn, col_names, mode, batchid1, batchid2, delta, batch_correction, **kwargs)
59 cell_idx2 = (adata.obs[groupby] == group2).to_numpy().ravel()
60
---> 61 all_info = dc.get_bayes_factors(
62 cell_idx1,
63 cell_idx2,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/utils/differential.py in get_bayes_factors(self, idx1, idx2, mode, batchid1, batchid2, use_observed_batches, n_samples, use_permutation, m_permutation, change_fn, m1_domain_fn, delta, cred_interval_lvls)
168 eps = 1e-8 # used for numerical stability
169 # Normalized means sampling for both populations
--> 170 scales_batches_1 = self.scale_sampler(
171 selection=idx1,
172 batchid=batchid1,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/torch/autograd/grad_mode.py in decorate_no_grad(*args, **kwargs)
47 def decorate_no_grad(*args, **kwargs):
48 with self:
---> 49 return func(*args, **kwargs)
50 return decorate_no_grad
51
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/utils/differential.py in scale_sampler(self, selection, n_samples, n_samples_per_cell, batchid, use_observed_batches, give_mean)
392 idx = np.random.choice(np.arange(self.adata.shape[0])[selection], n_samples)
393 px_scales.append(
--> 394 self.model_fn(self.adata, indices=idx, transform_batch=batch_idx)
395 )
396 batch_idx = batch_idx if batch_idx is not None else np.nan
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/torch/autograd/grad_mode.py in decorate_no_grad(*args, **kwargs)
47 def decorate_no_grad(*args, **kwargs):
48 with self:
---> 49 return func(*args, **kwargs)
50 return decorate_no_grad
51
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/rnamixin.py in get_normalized_expression(self, adata, indices, transform_batch, gene_list, library_size, n_samples, batch_size, return_mean, return_numpy)
82 scdl = self._make_scvi_dl(adata=adata, indices=indices, batch_size=batch_size)
83 if transform_batch is not None:
---> 84 transform_batch = _get_batch_code_from_category(adata, transform_batch)
85
86 if gene_list is None:
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/model/_utils.py in _get_batch_code_from_category(adata, category)
131 batch_mappings = categorical_mappings["_scvi_batch"]["mapping"]
132 if category not in batch_mappings:
--> 133 raise ValueError('"{}" not a valid batch category.'.format(category))
134 return np.where(batch_mappings == category)[0][0]
ValueError: "0" not a valid batch category.```
#### Versions:
<!-- Output of scvi.__version__ -->
VERSION
<!-- Relevant screenshots -->
|
ValueError
|
def get_bayes_factors(
self,
idx1: Union[List[bool], np.ndarray],
idx2: Union[List[bool], np.ndarray],
mode: Literal["vanilla", "change"] = "vanilla",
batchid1: Optional[Sequence[Union[Number, str]]] = None,
batchid2: Optional[Sequence[Union[Number, str]]] = None,
use_observed_batches: Optional[bool] = False,
n_samples: int = 5000,
use_permutation: bool = False,
m_permutation: int = 10000,
change_fn: Optional[Union[str, Callable]] = None,
m1_domain_fn: Optional[Callable] = None,
delta: Optional[float] = 0.5,
cred_interval_lvls: Optional[Union[List[float], np.ndarray]] = None,
) -> Dict[str, np.ndarray]:
r"""
A unified method for differential expression inference.
Two modes coexist:
- the `"vanilla"` mode follows protocol described in [Lopez18]_ and [Xu19]_
In this case, we perform hypothesis testing based on the hypotheses
.. math::
M_1: h_1 > h_2 ~\text{and}~ M_2: h_1 \leq h_2.
DE can then be based on the study of the Bayes factors
.. math::
\log p(M_1 | x_1, x_2) / p(M_2 | x_1, x_2).
- the `"change"` mode (described in [Boyeau19]_)
This mode consists of estimating an effect size random variable (e.g., log fold-change) and
performing Bayesian hypothesis testing on this variable.
The `change_fn` function computes the effect size variable :math:`r` based on two inputs
corresponding to the posterior quantities (e.g., normalized expression) in both populations.
Hypotheses:
.. math::
M_1: r \in R_1 ~\text{(effect size r in region inducing differential expression)}
.. math::
M_2: r \notin R_1 ~\text{(no differential expression)}
To characterize the region :math:`R_1`, which induces DE, the user has two choices.
1. A common case is when the region :math:`[-\delta, \delta]` does not induce differential
expression. If the user specifies a threshold delta, we suppose that :math:`R_1 = \mathbb{R} \setminus [-\delta, \delta]`
2. Specify an specific indicator function:
.. math::
f: \mathbb{R} \mapsto \{0, 1\} ~\text{s.t.}~ r \in R_1 ~\text{iff.}~ f(r) = 1.
Decision-making can then be based on the estimates of
.. math::
p(M_1 \mid x_1, x_2).
Both modes require to sample the posterior distributions.
To that purpose, we sample the posterior in the following way:
1. The posterior is sampled `n_samples` times for each subpopulation.
2. For computational efficiency (posterior sampling is quite expensive), instead of
comparing the obtained samples element-wise, we can permute posterior samples.
Remember that computing the Bayes Factor requires sampling :math:`q(z_A \mid x_A)` and :math:`q(z_B \mid x_B)`.
Currently, the code covers several batch handling configurations:
1. If ``use_observed_batches=True``, then batch are considered as observations
and cells' normalized means are conditioned on real batch observations.
2. If case (cell group 1) and control (cell group 2) are conditioned on the same
batch ids. This requires ``set(batchid1) == set(batchid2)`` or ``batchid1 == batchid2 === None``.
3. If case and control are conditioned on different batch ids that do not intersect
i.e., ``set(batchid1) != set(batchid2)`` and ``len(set(batchid1).intersection(set(batchid2))) == 0``.
This function does not cover other cases yet and will warn users in such cases.
Parameters
----------
mode
one of ["vanilla", "change"]
idx1
bool array masking subpopulation cells 1. Should be True where cell is
from associated population
idx2
bool array masking subpopulation cells 2. Should be True where cell is
from associated population
batchid1
List of batch ids for which you want to perform DE Analysis for
subpopulation 1. By default, all ids are taken into account
batchid2
List of batch ids for which you want to perform DE Analysis for
subpopulation 2. By default, all ids are taken into account
use_observed_batches
Whether posterior values are conditioned on observed
batches
n_samples
Number of posterior samples
use_permutation
Activates step 2 described above.
Simply formulated, pairs obtained from posterior sampling
will be randomly permuted so that the number of pairs used
to compute Bayes Factors becomes `m_permutation`.
m_permutation
Number of times we will "mix" posterior samples in step 2.
Only makes sense when `use_permutation=True`
change_fn
function computing effect size based on both posterior values
m1_domain_fn
custom indicator function of effect size regions
inducing differential expression
delta
specific case of region inducing differential expression.
In this case, we suppose that :math:`R \setminus [-\delta, \delta]` does not induce differential expression
(LFC case)
cred_interval_lvls
List of credible interval levels to compute for the posterior
LFC distribution
Returns
-------
Differential expression properties
"""
# if not np.array_equal(self.indices, np.arange(len(self.dataset))):
# logger.warning(
# "Differential expression requires a Posterior object created with all indices."
# )
eps = 1e-8 # used for numerical stability
# Normalized means sampling for both populations
scales_batches_1 = self.scale_sampler(
selection=idx1,
batchid=batchid1,
use_observed_batches=use_observed_batches,
n_samples=n_samples,
)
scales_batches_2 = self.scale_sampler(
selection=idx2,
batchid=batchid2,
use_observed_batches=use_observed_batches,
n_samples=n_samples,
)
px_scale_mean1 = scales_batches_1["scale"].mean(axis=0)
px_scale_mean2 = scales_batches_2["scale"].mean(axis=0)
# Sampling pairs
# The objective of code section below is to ensure than the samples of normalized
# means we consider are conditioned on the same batch id
batchid1_vals = np.unique(scales_batches_1["batch"])
batchid2_vals = np.unique(scales_batches_2["batch"])
create_pairs_from_same_batches = (
set(batchid1_vals) == set(batchid2_vals)
) and not use_observed_batches
if create_pairs_from_same_batches:
# First case: same batch normalization in two groups
logger.debug("Same batches in both cell groups")
n_batches = len(set(batchid1_vals))
n_samples_per_batch = (
m_permutation // n_batches if m_permutation is not None else None
)
scales_1 = []
scales_2 = []
for batch_val in set(batchid1_vals):
# Select scale samples that originate from the same batch id
scales_1_batch = scales_batches_1["scale"][
scales_batches_1["batch"] == batch_val
]
scales_2_batch = scales_batches_2["scale"][
scales_batches_2["batch"] == batch_val
]
# Create more pairs
scales_1_local, scales_2_local = pairs_sampler(
scales_1_batch,
scales_2_batch,
use_permutation=use_permutation,
m_permutation=n_samples_per_batch,
)
scales_1.append(scales_1_local)
scales_2.append(scales_2_local)
scales_1 = np.concatenate(scales_1, axis=0)
scales_2 = np.concatenate(scales_2, axis=0)
else:
logger.debug("Ignoring batch conditionings to compare means")
if len(set(batchid1_vals).intersection(set(batchid2_vals))) >= 1:
warnings.warn(
"Batchids of cells groups 1 and 2 are different but have an non-null "
"intersection. Specific handling of such situations is not implemented "
"yet and batch correction is not trustworthy."
)
scales_1, scales_2 = pairs_sampler(
scales_batches_1["scale"],
scales_batches_2["scale"],
use_permutation=use_permutation,
m_permutation=m_permutation,
)
# Core of function: hypotheses testing based on the posterior samples we obtained above
if mode == "vanilla":
logger.debug("Differential expression using vanilla mode")
proba_m1 = np.mean(scales_1 > scales_2, 0)
proba_m2 = 1.0 - proba_m1
res = dict(
proba_m1=proba_m1,
proba_m2=proba_m2,
bayes_factor=np.log(proba_m1 + eps) - np.log(proba_m2 + eps),
scale1=px_scale_mean1,
scale2=px_scale_mean2,
)
elif mode == "change":
logger.debug("Differential expression using change mode")
# step 1: Construct the change function
def lfc(x, y):
return np.log2(x) - np.log2(y)
if change_fn == "log-fold" or change_fn is None:
change_fn = lfc
elif not callable(change_fn):
raise ValueError("'change_fn' attribute not understood")
# step2: Construct the DE area function
if m1_domain_fn is None:
delta = delta if delta is not None else 0.5
def m1_domain_fn(samples):
return np.abs(samples) >= delta
change_fn_specs = inspect.getfullargspec(change_fn)
domain_fn_specs = inspect.getfullargspec(m1_domain_fn)
if (len(change_fn_specs.args) != 2) | (len(domain_fn_specs.args) != 1):
raise ValueError(
"change_fn should take exactly two parameters as inputs; m1_domain_fn one parameter."
)
try:
change_distribution = change_fn(scales_1, scales_2)
is_de = m1_domain_fn(change_distribution)
except TypeError:
raise TypeError(
"change_fn or m1_domain_fn have has wrong properties."
"Please ensure that these functions have the right signatures and"
"outputs and that they can process numpy arrays"
)
proba_m1 = np.mean(is_de, 0)
change_distribution_props = describe_continuous_distrib(
samples=change_distribution,
credible_intervals_levels=cred_interval_lvls,
)
change_distribution_props = {
"lfc_" + key: val for (key, val) in change_distribution_props.items()
}
res = dict(
proba_de=proba_m1,
proba_not_de=1.0 - proba_m1,
bayes_factor=np.log(proba_m1 + eps) - np.log(1.0 - proba_m1 + eps),
scale1=px_scale_mean1,
scale2=px_scale_mean2,
**change_distribution_props,
)
else:
raise NotImplementedError("Mode {mode} not recognized".format(mode=mode))
return res
|
def get_bayes_factors(
self,
idx1: Union[List[bool], np.ndarray],
idx2: Union[List[bool], np.ndarray],
mode: Literal["vanilla", "change"] = "vanilla",
batchid1: Optional[Union[List[int], np.ndarray]] = None,
batchid2: Optional[Union[List[int], np.ndarray]] = None,
use_observed_batches: Optional[bool] = False,
n_samples: int = 5000,
use_permutation: bool = False,
m_permutation: int = 10000,
change_fn: Optional[Union[str, Callable]] = None,
m1_domain_fn: Optional[Callable] = None,
delta: Optional[float] = 0.5,
cred_interval_lvls: Optional[Union[List[float], np.ndarray]] = None,
) -> Dict[str, np.ndarray]:
r"""
A unified method for differential expression inference.
Two modes coexist:
- the `"vanilla"` mode follows protocol described in [Lopez18]_ and [Xu19]_
In this case, we perform hypothesis testing based on the hypotheses
.. math::
M_1: h_1 > h_2 ~\text{and}~ M_2: h_1 \leq h_2.
DE can then be based on the study of the Bayes factors
.. math::
\log p(M_1 | x_1, x_2) / p(M_2 | x_1, x_2).
- the `"change"` mode (described in [Boyeau19]_)
This mode consists of estimating an effect size random variable (e.g., log fold-change) and
performing Bayesian hypothesis testing on this variable.
The `change_fn` function computes the effect size variable :math:`r` based on two inputs
corresponding to the posterior quantities (e.g., normalized expression) in both populations.
Hypotheses:
.. math::
M_1: r \in R_1 ~\text{(effect size r in region inducing differential expression)}
.. math::
M_2: r \notin R_1 ~\text{(no differential expression)}
To characterize the region :math:`R_1`, which induces DE, the user has two choices.
1. A common case is when the region :math:`[-\delta, \delta]` does not induce differential
expression. If the user specifies a threshold delta, we suppose that :math:`R_1 = \mathbb{R} \setminus [-\delta, \delta]`
2. Specify an specific indicator function:
.. math::
f: \mathbb{R} \mapsto \{0, 1\} ~\text{s.t.}~ r \in R_1 ~\text{iff.}~ f(r) = 1.
Decision-making can then be based on the estimates of
.. math::
p(M_1 \mid x_1, x_2).
Both modes require to sample the posterior distributions.
To that purpose, we sample the posterior in the following way:
1. The posterior is sampled `n_samples` times for each subpopulation.
2. For computational efficiency (posterior sampling is quite expensive), instead of
comparing the obtained samples element-wise, we can permute posterior samples.
Remember that computing the Bayes Factor requires sampling :math:`q(z_A \mid x_A)` and :math:`q(z_B \mid x_B)`.
Currently, the code covers several batch handling configurations:
1. If ``use_observed_batches=True``, then batch are considered as observations
and cells' normalized means are conditioned on real batch observations.
2. If case (cell group 1) and control (cell group 2) are conditioned on the same
batch ids. This requires ``set(batchid1) == set(batchid2)`` or ``batchid1 == batchid2 === None``.
3. If case and control are conditioned on different batch ids that do not intersect
i.e., ``set(batchid1) != set(batchid2)`` and ``len(set(batchid1).intersection(set(batchid2))) == 0``.
This function does not cover other cases yet and will warn users in such cases.
Parameters
----------
mode
one of ["vanilla", "change"]
idx1
bool array masking subpopulation cells 1. Should be True where cell is
from associated population
idx2
bool array masking subpopulation cells 2. Should be True where cell is
from associated population
batchid1
List of batch ids for which you want to perform DE Analysis for
subpopulation 1. By default, all ids are taken into account
batchid2
List of batch ids for which you want to perform DE Analysis for
subpopulation 2. By default, all ids are taken into account
use_observed_batches
Whether posterior values are conditioned on observed
batches
n_samples
Number of posterior samples
use_permutation
Activates step 2 described above.
Simply formulated, pairs obtained from posterior sampling
will be randomly permuted so that the number of pairs used
to compute Bayes Factors becomes `m_permutation`.
m_permutation
Number of times we will "mix" posterior samples in step 2.
Only makes sense when `use_permutation=True`
change_fn
function computing effect size based on both posterior values
m1_domain_fn
custom indicator function of effect size regions
inducing differential expression
delta
specific case of region inducing differential expression.
In this case, we suppose that :math:`R \setminus [-\delta, \delta]` does not induce differential expression
(LFC case)
cred_interval_lvls
List of credible interval levels to compute for the posterior
LFC distribution
Returns
-------
Differential expression properties
"""
# if not np.array_equal(self.indices, np.arange(len(self.dataset))):
# logger.warning(
# "Differential expression requires a Posterior object created with all indices."
# )
eps = 1e-8 # used for numerical stability
# Normalized means sampling for both populations
scales_batches_1 = self.scale_sampler(
selection=idx1,
batchid=batchid1,
use_observed_batches=use_observed_batches,
n_samples=n_samples,
)
scales_batches_2 = self.scale_sampler(
selection=idx2,
batchid=batchid2,
use_observed_batches=use_observed_batches,
n_samples=n_samples,
)
px_scale_mean1 = scales_batches_1["scale"].mean(axis=0)
px_scale_mean2 = scales_batches_2["scale"].mean(axis=0)
# Sampling pairs
# The objective of code section below is to ensure than the samples of normalized
# means we consider are conditioned on the same batch id
batchid1_vals = np.unique(scales_batches_1["batch"])
batchid2_vals = np.unique(scales_batches_2["batch"])
create_pairs_from_same_batches = (
set(batchid1_vals) == set(batchid2_vals)
) and not use_observed_batches
if create_pairs_from_same_batches:
# First case: same batch normalization in two groups
logger.debug("Same batches in both cell groups")
n_batches = len(set(batchid1_vals))
n_samples_per_batch = (
m_permutation // n_batches if m_permutation is not None else None
)
scales_1 = []
scales_2 = []
for batch_val in set(batchid1_vals):
# Select scale samples that originate from the same batch id
scales_1_batch = scales_batches_1["scale"][
scales_batches_1["batch"] == batch_val
]
scales_2_batch = scales_batches_2["scale"][
scales_batches_2["batch"] == batch_val
]
# Create more pairs
scales_1_local, scales_2_local = pairs_sampler(
scales_1_batch,
scales_2_batch,
use_permutation=use_permutation,
m_permutation=n_samples_per_batch,
)
scales_1.append(scales_1_local)
scales_2.append(scales_2_local)
scales_1 = np.concatenate(scales_1, axis=0)
scales_2 = np.concatenate(scales_2, axis=0)
else:
logger.debug("Ignoring batch conditionings to compare means")
if len(set(batchid1_vals).intersection(set(batchid2_vals))) >= 1:
warnings.warn(
"Batchids of cells groups 1 and 2 are different but have an non-null "
"intersection. Specific handling of such situations is not implemented "
"yet and batch correction is not trustworthy."
)
scales_1, scales_2 = pairs_sampler(
scales_batches_1["scale"],
scales_batches_2["scale"],
use_permutation=use_permutation,
m_permutation=m_permutation,
)
# Core of function: hypotheses testing based on the posterior samples we obtained above
if mode == "vanilla":
logger.debug("Differential expression using vanilla mode")
proba_m1 = np.mean(scales_1 > scales_2, 0)
proba_m2 = 1.0 - proba_m1
res = dict(
proba_m1=proba_m1,
proba_m2=proba_m2,
bayes_factor=np.log(proba_m1 + eps) - np.log(proba_m2 + eps),
scale1=px_scale_mean1,
scale2=px_scale_mean2,
)
elif mode == "change":
logger.debug("Differential expression using change mode")
# step 1: Construct the change function
def lfc(x, y):
return np.log2(x) - np.log2(y)
if change_fn == "log-fold" or change_fn is None:
change_fn = lfc
elif not callable(change_fn):
raise ValueError("'change_fn' attribute not understood")
# step2: Construct the DE area function
if m1_domain_fn is None:
delta = delta if delta is not None else 0.5
def m1_domain_fn(samples):
return np.abs(samples) >= delta
change_fn_specs = inspect.getfullargspec(change_fn)
domain_fn_specs = inspect.getfullargspec(m1_domain_fn)
if (len(change_fn_specs.args) != 2) | (len(domain_fn_specs.args) != 1):
raise ValueError(
"change_fn should take exactly two parameters as inputs; m1_domain_fn one parameter."
)
try:
change_distribution = change_fn(scales_1, scales_2)
is_de = m1_domain_fn(change_distribution)
except TypeError:
raise TypeError(
"change_fn or m1_domain_fn have has wrong properties."
"Please ensure that these functions have the right signatures and"
"outputs and that they can process numpy arrays"
)
proba_m1 = np.mean(is_de, 0)
change_distribution_props = describe_continuous_distrib(
samples=change_distribution,
credible_intervals_levels=cred_interval_lvls,
)
change_distribution_props = {
"lfc_" + key: val for (key, val) in change_distribution_props.items()
}
res = dict(
proba_de=proba_m1,
proba_not_de=1.0 - proba_m1,
bayes_factor=np.log(proba_m1 + eps) - np.log(1.0 - proba_m1 + eps),
scale1=px_scale_mean1,
scale2=px_scale_mean2,
**change_distribution_props,
)
else:
raise NotImplementedError("Mode {mode} not recognized".format(mode=mode))
return res
|
https://github.com/YosefLab/scvi-tools/issues/823
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-143-b1df721788f5> in <module>
2 for tissue in np.unique(adata.obs['tissue']):
3 sub_adata = adata[adata.obs['tissue']==tissue]
----> 4 de_celltype[tissue] = vae.differential_expression(sub_adata, groupby = 'Propagated.Annotation', batch_correction=True)
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
187 batch_size=batch_size,
188 )
--> 189 result = _de_core(
190 adata,
191 model_fn,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/_utils.py in _de_core(adata, model_fn, groupby, group1, group2, idx1, idx2, all_stats, all_stats_fn, col_names, mode, batchid1, batchid2, delta, batch_correction, **kwargs)
59 cell_idx2 = (adata.obs[groupby] == group2).to_numpy().ravel()
60
---> 61 all_info = dc.get_bayes_factors(
62 cell_idx1,
63 cell_idx2,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/utils/differential.py in get_bayes_factors(self, idx1, idx2, mode, batchid1, batchid2, use_observed_batches, n_samples, use_permutation, m_permutation, change_fn, m1_domain_fn, delta, cred_interval_lvls)
168 eps = 1e-8 # used for numerical stability
169 # Normalized means sampling for both populations
--> 170 scales_batches_1 = self.scale_sampler(
171 selection=idx1,
172 batchid=batchid1,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/torch/autograd/grad_mode.py in decorate_no_grad(*args, **kwargs)
47 def decorate_no_grad(*args, **kwargs):
48 with self:
---> 49 return func(*args, **kwargs)
50 return decorate_no_grad
51
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/utils/differential.py in scale_sampler(self, selection, n_samples, n_samples_per_cell, batchid, use_observed_batches, give_mean)
392 idx = np.random.choice(np.arange(self.adata.shape[0])[selection], n_samples)
393 px_scales.append(
--> 394 self.model_fn(self.adata, indices=idx, transform_batch=batch_idx)
395 )
396 batch_idx = batch_idx if batch_idx is not None else np.nan
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/torch/autograd/grad_mode.py in decorate_no_grad(*args, **kwargs)
47 def decorate_no_grad(*args, **kwargs):
48 with self:
---> 49 return func(*args, **kwargs)
50 return decorate_no_grad
51
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/rnamixin.py in get_normalized_expression(self, adata, indices, transform_batch, gene_list, library_size, n_samples, batch_size, return_mean, return_numpy)
82 scdl = self._make_scvi_dl(adata=adata, indices=indices, batch_size=batch_size)
83 if transform_batch is not None:
---> 84 transform_batch = _get_batch_code_from_category(adata, transform_batch)
85
86 if gene_list is None:
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/model/_utils.py in _get_batch_code_from_category(adata, category)
131 batch_mappings = categorical_mappings["_scvi_batch"]["mapping"]
132 if category not in batch_mappings:
--> 133 raise ValueError('"{}" not a valid batch category.'.format(category))
134 return np.where(batch_mappings == category)[0][0]
ValueError: "0" not a valid batch category.```
#### Versions:
<!-- Output of scvi.__version__ -->
VERSION
<!-- Relevant screenshots -->
|
ValueError
|
def scale_sampler(
self,
selection: Union[List[bool], np.ndarray],
n_samples: Optional[int] = 5000,
n_samples_per_cell: Optional[int] = None,
batchid: Optional[Sequence[Union[Number, str]]] = None,
use_observed_batches: Optional[bool] = False,
give_mean: Optional[bool] = False,
) -> dict:
"""
Samples the posterior scale using the variational posterior distribution.
Parameters
----------
selection
Mask or list of cell ids to select
n_samples
Number of samples in total per batch (fill either `n_samples_total`
or `n_samples_per_cell`)
n_samples_per_cell
Number of time we sample from each observation per batch
(fill either `n_samples_total` or `n_samples_per_cell`)
batchid
Biological batch for which to sample from.
Default (None) sample from all batches
use_observed_batches
Whether normalized means are conditioned on observed
batches or if observed batches are to be used
give_mean
Return mean of values
Returns
-------
type
Dictionary containing:
`scale`
Posterior aggregated scale samples of shape (n_samples, n_vars)
where n_samples correspond to either:
- n_bio_batches * n_cells * n_samples_per_cell
or
- n_samples_total
`batch`
associated batch ids
"""
# Get overall number of desired samples and desired batches
if batchid is None and not use_observed_batches:
categorical_mappings = self.adata.uns["_scvi"]["categorical_mappings"]
batchid = categorical_mappings["_scvi_batch"]["mapping"]
if use_observed_batches:
if batchid is not None:
raise ValueError("Unconsistent batch policy")
batchid = [None]
if n_samples is None and n_samples_per_cell is None:
n_samples = 5000
elif n_samples_per_cell is not None and n_samples is None:
n_samples = n_samples_per_cell * len(selection)
if (n_samples_per_cell is not None) and (n_samples is not None):
warnings.warn(
"n_samples and n_samples_per_cell were provided. Ignoring n_samples_per_cell"
)
n_samples = int(n_samples / len(batchid))
if n_samples == 0:
warnings.warn("very small sample size, please consider increasing `n_samples`")
n_samples = 2
# Selection of desired cells for sampling
if selection is None:
raise ValueError("selections should be a list of cell subsets indices")
selection = np.asarray(selection)
if selection.dtype is np.dtype("bool"):
if len(selection) < self.adata.shape[0]:
raise ValueError("Mask must be same length as adata.")
selection = np.asarray(np.where(selection)[0].ravel())
# Sampling loop
px_scales = []
batch_ids = []
for batch_idx in batchid:
idx = np.random.choice(np.arange(self.adata.shape[0])[selection], n_samples)
px_scales.append(
self.model_fn(self.adata, indices=idx, transform_batch=batch_idx)
)
batch_idx = batch_idx if batch_idx is not None else np.nan
batch_ids.append([batch_idx] * px_scales[-1].shape[0])
px_scales = np.concatenate(px_scales)
batch_ids = np.concatenate(batch_ids).reshape(-1)
if px_scales.shape[0] != batch_ids.shape[0]:
raise ValueError("sampled scales and batches have inconsistent shapes")
if give_mean:
px_scales = px_scales.mean(0)
return dict(scale=px_scales, batch=batch_ids)
|
def scale_sampler(
self,
selection: Union[List[bool], np.ndarray],
n_samples: Optional[int] = 5000,
n_samples_per_cell: Optional[int] = None,
batchid: Optional[Union[List[int], np.ndarray]] = None,
use_observed_batches: Optional[bool] = False,
give_mean: Optional[bool] = False,
) -> dict:
"""
Samples the posterior scale using the variational posterior distribution.
Parameters
----------
selection
Mask or list of cell ids to select
n_samples
Number of samples in total per batch (fill either `n_samples_total`
or `n_samples_per_cell`)
n_samples_per_cell
Number of time we sample from each observation per batch
(fill either `n_samples_total` or `n_samples_per_cell`)
batchid
Biological batch for which to sample from.
Default (None) sample from all batches
use_observed_batches
Whether normalized means are conditioned on observed
batches or if observed batches are to be used
give_mean
Return mean of values
Returns
-------
type
Dictionary containing:
`scale`
Posterior aggregated scale samples of shape (n_samples, n_vars)
where n_samples correspond to either:
- n_bio_batches * n_cells * n_samples_per_cell
or
- n_samples_total
`batch`
associated batch ids
"""
# Get overall number of desired samples and desired batches
if batchid is None and not use_observed_batches:
# TODO determine if we iterate over all categorical batches from train dataset
# or just the batches in adata
batchid = np.unique(get_from_registry(self.adata, key=_CONSTANTS.BATCH_KEY))
if use_observed_batches:
if batchid is not None:
raise ValueError("Unconsistent batch policy")
batchid = [None]
if n_samples is None and n_samples_per_cell is None:
n_samples = 5000
elif n_samples_per_cell is not None and n_samples is None:
n_samples = n_samples_per_cell * len(selection)
if (n_samples_per_cell is not None) and (n_samples is not None):
warnings.warn(
"n_samples and n_samples_per_cell were provided. Ignoring n_samples_per_cell"
)
n_samples = int(n_samples / len(batchid))
if n_samples == 0:
warnings.warn("very small sample size, please consider increasing `n_samples`")
n_samples = 2
# Selection of desired cells for sampling
if selection is None:
raise ValueError("selections should be a list of cell subsets indices")
selection = np.asarray(selection)
if selection.dtype is np.dtype("bool"):
if len(selection) < self.adata.shape[0]:
raise ValueError("Mask must be same length as adata.")
selection = np.asarray(np.where(selection)[0].ravel())
# Sampling loop
px_scales = []
batch_ids = []
for batch_idx in batchid:
idx = np.random.choice(np.arange(self.adata.shape[0])[selection], n_samples)
px_scales.append(
self.model_fn(self.adata, indices=idx, transform_batch=batch_idx)
)
batch_idx = batch_idx if batch_idx is not None else np.nan
batch_ids.append([batch_idx] * px_scales[-1].shape[0])
px_scales = np.concatenate(px_scales)
batch_ids = np.concatenate(batch_ids).reshape(-1)
if px_scales.shape[0] != batch_ids.shape[0]:
raise ValueError("sampled scales and batches have inconsistent shapes")
if give_mean:
px_scales = px_scales.mean(0)
return dict(scale=px_scales, batch=batch_ids)
|
https://github.com/YosefLab/scvi-tools/issues/823
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-143-b1df721788f5> in <module>
2 for tissue in np.unique(adata.obs['tissue']):
3 sub_adata = adata[adata.obs['tissue']==tissue]
----> 4 de_celltype[tissue] = vae.differential_expression(sub_adata, groupby = 'Propagated.Annotation', batch_correction=True)
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
187 batch_size=batch_size,
188 )
--> 189 result = _de_core(
190 adata,
191 model_fn,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/_utils.py in _de_core(adata, model_fn, groupby, group1, group2, idx1, idx2, all_stats, all_stats_fn, col_names, mode, batchid1, batchid2, delta, batch_correction, **kwargs)
59 cell_idx2 = (adata.obs[groupby] == group2).to_numpy().ravel()
60
---> 61 all_info = dc.get_bayes_factors(
62 cell_idx1,
63 cell_idx2,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/utils/differential.py in get_bayes_factors(self, idx1, idx2, mode, batchid1, batchid2, use_observed_batches, n_samples, use_permutation, m_permutation, change_fn, m1_domain_fn, delta, cred_interval_lvls)
168 eps = 1e-8 # used for numerical stability
169 # Normalized means sampling for both populations
--> 170 scales_batches_1 = self.scale_sampler(
171 selection=idx1,
172 batchid=batchid1,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/torch/autograd/grad_mode.py in decorate_no_grad(*args, **kwargs)
47 def decorate_no_grad(*args, **kwargs):
48 with self:
---> 49 return func(*args, **kwargs)
50 return decorate_no_grad
51
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/utils/differential.py in scale_sampler(self, selection, n_samples, n_samples_per_cell, batchid, use_observed_batches, give_mean)
392 idx = np.random.choice(np.arange(self.adata.shape[0])[selection], n_samples)
393 px_scales.append(
--> 394 self.model_fn(self.adata, indices=idx, transform_batch=batch_idx)
395 )
396 batch_idx = batch_idx if batch_idx is not None else np.nan
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/torch/autograd/grad_mode.py in decorate_no_grad(*args, **kwargs)
47 def decorate_no_grad(*args, **kwargs):
48 with self:
---> 49 return func(*args, **kwargs)
50 return decorate_no_grad
51
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/rnamixin.py in get_normalized_expression(self, adata, indices, transform_batch, gene_list, library_size, n_samples, batch_size, return_mean, return_numpy)
82 scdl = self._make_scvi_dl(adata=adata, indices=indices, batch_size=batch_size)
83 if transform_batch is not None:
---> 84 transform_batch = _get_batch_code_from_category(adata, transform_batch)
85
86 if gene_list is None:
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/model/_utils.py in _get_batch_code_from_category(adata, category)
131 batch_mappings = categorical_mappings["_scvi_batch"]["mapping"]
132 if category not in batch_mappings:
--> 133 raise ValueError('"{}" not a valid batch category.'.format(category))
134 return np.where(batch_mappings == category)[0][0]
ValueError: "0" not a valid batch category.```
#### Versions:
<!-- Output of scvi.__version__ -->
VERSION
<!-- Relevant screenshots -->
|
ValueError
|
def _generate_synthetic(
batch_size: int = 128,
n_genes: int = 100,
n_proteins: int = 100,
n_batches: int = 2,
n_labels: int = 3,
run_setup_anndata: bool = True,
) -> AnnData:
data = np.random.negative_binomial(5, 0.3, size=(batch_size * n_batches, n_genes))
mask = np.random.binomial(n=1, p=0.7, size=(batch_size * n_batches, n_genes))
data = data * mask # We put the batch index first
labels = np.random.randint(0, n_labels, size=(batch_size * n_batches,))
labels = np.array(["label_%d" % i for i in labels])
batch = []
for i in range(n_batches):
batch += ["batch_{}".format(i)] * batch_size
adata = AnnData(data)
adata.obs["batch"] = pd.Categorical(batch)
adata.obs["labels"] = pd.Categorical(labels)
# Protein measurements
p_data = np.random.negative_binomial(5, 0.3, size=(adata.shape[0], n_proteins))
adata.obsm["protein_expression"] = p_data
adata.uns["protein_names"] = np.arange(n_proteins).astype(str)
if run_setup_anndata:
setup_anndata(
adata,
batch_key="batch",
labels_key="labels",
protein_expression_obsm_key="protein_expression",
protein_names_uns_key="protein_names",
)
return adata
|
def _generate_synthetic(
batch_size: int = 200,
n_genes: int = 100,
n_proteins: int = 100,
n_batches: int = 2,
n_labels: int = 3,
run_setup_anndata: bool = True,
) -> AnnData:
data = np.random.negative_binomial(5, 0.3, size=(batch_size * n_batches, n_genes))
mask = np.random.binomial(n=1, p=0.7, size=(batch_size * n_batches, n_genes))
data = data * mask # We put the batch index first
labels = np.random.randint(0, n_labels, size=(batch_size * n_batches,))
labels = np.array(["undefined_%d" % i for i in labels])
batch = []
for i in range(n_batches):
batch += [i] * batch_size
adata = AnnData(data)
adata.obs["batch"] = pd.Categorical(batch)
adata.obs["labels"] = pd.Categorical(labels)
# Protein measurements
p_data = np.random.negative_binomial(5, 0.3, size=(adata.shape[0], n_proteins))
adata.obsm["protein_expression"] = p_data
adata.uns["protein_names"] = np.arange(n_proteins).astype(str)
if run_setup_anndata:
setup_anndata(
adata,
batch_key="batch",
labels_key="labels",
protein_expression_obsm_key="protein_expression",
protein_names_uns_key="protein_names",
)
return adata
|
https://github.com/YosefLab/scvi-tools/issues/823
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-143-b1df721788f5> in <module>
2 for tissue in np.unique(adata.obs['tissue']):
3 sub_adata = adata[adata.obs['tissue']==tissue]
----> 4 de_celltype[tissue] = vae.differential_expression(sub_adata, groupby = 'Propagated.Annotation', batch_correction=True)
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
187 batch_size=batch_size,
188 )
--> 189 result = _de_core(
190 adata,
191 model_fn,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/_utils.py in _de_core(adata, model_fn, groupby, group1, group2, idx1, idx2, all_stats, all_stats_fn, col_names, mode, batchid1, batchid2, delta, batch_correction, **kwargs)
59 cell_idx2 = (adata.obs[groupby] == group2).to_numpy().ravel()
60
---> 61 all_info = dc.get_bayes_factors(
62 cell_idx1,
63 cell_idx2,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/utils/differential.py in get_bayes_factors(self, idx1, idx2, mode, batchid1, batchid2, use_observed_batches, n_samples, use_permutation, m_permutation, change_fn, m1_domain_fn, delta, cred_interval_lvls)
168 eps = 1e-8 # used for numerical stability
169 # Normalized means sampling for both populations
--> 170 scales_batches_1 = self.scale_sampler(
171 selection=idx1,
172 batchid=batchid1,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/torch/autograd/grad_mode.py in decorate_no_grad(*args, **kwargs)
47 def decorate_no_grad(*args, **kwargs):
48 with self:
---> 49 return func(*args, **kwargs)
50 return decorate_no_grad
51
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/utils/differential.py in scale_sampler(self, selection, n_samples, n_samples_per_cell, batchid, use_observed_batches, give_mean)
392 idx = np.random.choice(np.arange(self.adata.shape[0])[selection], n_samples)
393 px_scales.append(
--> 394 self.model_fn(self.adata, indices=idx, transform_batch=batch_idx)
395 )
396 batch_idx = batch_idx if batch_idx is not None else np.nan
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/torch/autograd/grad_mode.py in decorate_no_grad(*args, **kwargs)
47 def decorate_no_grad(*args, **kwargs):
48 with self:
---> 49 return func(*args, **kwargs)
50 return decorate_no_grad
51
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/rnamixin.py in get_normalized_expression(self, adata, indices, transform_batch, gene_list, library_size, n_samples, batch_size, return_mean, return_numpy)
82 scdl = self._make_scvi_dl(adata=adata, indices=indices, batch_size=batch_size)
83 if transform_batch is not None:
---> 84 transform_batch = _get_batch_code_from_category(adata, transform_batch)
85
86 if gene_list is None:
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/model/_utils.py in _get_batch_code_from_category(adata, category)
131 batch_mappings = categorical_mappings["_scvi_batch"]["mapping"]
132 if category not in batch_mappings:
--> 133 raise ValueError('"{}" not a valid batch category.'.format(category))
134 return np.where(batch_mappings == category)[0][0]
ValueError: "0" not a valid batch category.```
#### Versions:
<!-- Output of scvi.__version__ -->
VERSION
<!-- Relevant screenshots -->
|
ValueError
|
def _get_batch_code_from_category(
adata: anndata.AnnData, category: Sequence[Union[Number, str]]
):
if not isinstance(category, IterableClass) or isinstance(category, str):
category = [category]
categorical_mappings = adata.uns["_scvi"]["categorical_mappings"]
batch_mappings = categorical_mappings["_scvi_batch"]["mapping"]
batch_code = []
for cat in category:
if cat is None:
batch_code.append(None)
elif cat not in batch_mappings:
raise ValueError('"{}" not a valid batch category.'.format(cat))
else:
batch_loc = np.where(batch_mappings == cat)[0][0]
batch_code.append(batch_loc)
return batch_code
|
def _get_batch_code_from_category(
adata: anndata.AnnData, category: Sequence[Union[int, str]]
):
categorical_mappings = adata.uns["_scvi"]["categorical_mappings"]
batch_mappings = categorical_mappings["_scvi_batch"]["mapping"]
batch_code = []
for cat in category:
if cat is None:
batch_code.append(None)
elif cat not in batch_mappings:
raise ValueError('"{}" not a valid batch category.'.format(cat))
else:
batch_loc = np.where(batch_mappings == cat)[0][0]
batch_code.append(batch_loc)
return batch_code
|
https://github.com/YosefLab/scvi-tools/issues/823
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-143-b1df721788f5> in <module>
2 for tissue in np.unique(adata.obs['tissue']):
3 sub_adata = adata[adata.obs['tissue']==tissue]
----> 4 de_celltype[tissue] = vae.differential_expression(sub_adata, groupby = 'Propagated.Annotation', batch_correction=True)
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
187 batch_size=batch_size,
188 )
--> 189 result = _de_core(
190 adata,
191 model_fn,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/_utils.py in _de_core(adata, model_fn, groupby, group1, group2, idx1, idx2, all_stats, all_stats_fn, col_names, mode, batchid1, batchid2, delta, batch_correction, **kwargs)
59 cell_idx2 = (adata.obs[groupby] == group2).to_numpy().ravel()
60
---> 61 all_info = dc.get_bayes_factors(
62 cell_idx1,
63 cell_idx2,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/utils/differential.py in get_bayes_factors(self, idx1, idx2, mode, batchid1, batchid2, use_observed_batches, n_samples, use_permutation, m_permutation, change_fn, m1_domain_fn, delta, cred_interval_lvls)
168 eps = 1e-8 # used for numerical stability
169 # Normalized means sampling for both populations
--> 170 scales_batches_1 = self.scale_sampler(
171 selection=idx1,
172 batchid=batchid1,
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/torch/autograd/grad_mode.py in decorate_no_grad(*args, **kwargs)
47 def decorate_no_grad(*args, **kwargs):
48 with self:
---> 49 return func(*args, **kwargs)
50 return decorate_no_grad
51
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/utils/differential.py in scale_sampler(self, selection, n_samples, n_samples_per_cell, batchid, use_observed_batches, give_mean)
392 idx = np.random.choice(np.arange(self.adata.shape[0])[selection], n_samples)
393 px_scales.append(
--> 394 self.model_fn(self.adata, indices=idx, transform_batch=batch_idx)
395 )
396 batch_idx = batch_idx if batch_idx is not None else np.nan
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/torch/autograd/grad_mode.py in decorate_no_grad(*args, **kwargs)
47 def decorate_no_grad(*args, **kwargs):
48 with self:
---> 49 return func(*args, **kwargs)
50 return decorate_no_grad
51
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/core/models/rnamixin.py in get_normalized_expression(self, adata, indices, transform_batch, gene_list, library_size, n_samples, batch_size, return_mean, return_numpy)
82 scdl = self._make_scvi_dl(adata=adata, indices=indices, batch_size=batch_size)
83 if transform_batch is not None:
---> 84 transform_batch = _get_batch_code_from_category(adata, transform_batch)
85
86 if gene_list is None:
/data/yosef2/users/chenling/miniconda3/envs/scvi-tools/lib/python3.8/site-packages/scvi/model/_utils.py in _get_batch_code_from_category(adata, category)
131 batch_mappings = categorical_mappings["_scvi_batch"]["mapping"]
132 if category not in batch_mappings:
--> 133 raise ValueError('"{}" not a valid batch category.'.format(category))
134 return np.where(batch_mappings == category)[0][0]
ValueError: "0" not a valid batch category.```
#### Versions:
<!-- Output of scvi.__version__ -->
VERSION
<!-- Relevant screenshots -->
|
ValueError
|
def _de_core(
adata,
model_fn,
groupby,
group1,
group2,
idx1,
idx2,
all_stats,
all_stats_fn,
col_names,
mode,
batchid1,
batchid2,
delta,
batch_correction,
**kwargs,
):
"""Internal function for DE interface."""
if group1 is None and idx1 is None:
group1 = adata.obs[groupby].cat.categories.tolist()
if isinstance(group1, str):
group1 = [group1]
# make a temp obs key using indices
temp_key = None
if idx1 is not None:
idx1 = np.asarray(idx1).ravel()
g1_key = "one"
obs_col = np.array(["None"] * adata.shape[0], dtype=str)
obs_col[idx1] = g1_key
group2 = None if idx2 is None else "two"
if idx2 is not None:
idx2 = np.asarray(idx2).ravel()
obs_col[idx2] = group2
temp_key = "_scvi_temp_de"
adata.obs[temp_key] = obs_col
groupby = temp_key
group1 = [g1_key]
df_results = []
dc = DifferentialComputation(model_fn, adata)
for g1 in track(
group1,
description="DE...",
):
cell_idx1 = (adata.obs[groupby] == g1).to_numpy().ravel()
if group2 is None:
cell_idx2 = ~cell_idx1
else:
cell_idx2 = (adata.obs[groupby] == group2).to_numpy().ravel()
all_info = dc.get_bayes_factors(
cell_idx1,
cell_idx2,
mode=mode,
delta=delta,
batchid1=batchid1,
batchid2=batchid2,
use_observed_batches=not batch_correction,
**kwargs,
)
if all_stats is True:
genes_properties_dict = all_stats_fn(adata, cell_idx1, cell_idx2)
all_info = {**all_info, **genes_properties_dict}
res = pd.DataFrame(all_info, index=col_names)
sort_key = "proba_de" if mode == "change" else "bayes_factor"
res = res.sort_values(by=sort_key, ascending=False)
if idx1 is None:
g2 = "Rest" if group2 is None else group2
res["comparison"] = "{} vs {}".format(g1, g2)
df_results.append(res)
if temp_key is not None:
del adata.obs[temp_key]
result = pd.concat(df_results, axis=0)
return result
|
def _de_core(
adata,
model_fn,
groupby,
group1,
group2,
idx1,
idx2,
all_stats,
all_stats_fn,
col_names,
mode,
batchid1,
batchid2,
delta,
batch_correction,
**kwargs,
):
"""Internal function for DE interface."""
if group1 is None and idx1 is None:
group1 = adata.obs[groupby].cat.categories.tolist()
if isinstance(group1, str):
group1 = [group1]
# make a temp obs key using indices
temp_key = None
if idx1 is not None:
idx1 = np.asarray(idx1).ravel()
g1_key = "one"
obs_col = np.array(["None"] * adata.shape[0], dtype=str)
obs_col[idx1] = g1_key
group2 = None if idx2 is None else "two"
if idx2 is not None:
idx2 = np.asarray(idx2).ravel()
obs_col[idx2] = group2
temp_key = "_scvi_temp_de"
adata.obs[temp_key] = obs_col
groupby = temp_key
group1 = [g1_key]
df_results = []
dc = DifferentialComputation(model_fn, adata)
for g1 in track(
group1,
description="DE...",
):
cell_idx1 = (adata.obs[groupby] == g1).ravel()
if group2 is None:
cell_idx2 = ~cell_idx1
else:
cell_idx2 = adata.obs[groupby] == group2
all_info = dc.get_bayes_factors(
cell_idx1,
cell_idx2,
mode=mode,
delta=delta,
batchid1=batchid1,
batchid2=batchid2,
use_observed_batches=not batch_correction,
**kwargs,
)
if all_stats is True:
genes_properties_dict = all_stats_fn(adata, cell_idx1, cell_idx2)
all_info = {**all_info, **genes_properties_dict}
res = pd.DataFrame(all_info, index=col_names)
sort_key = "proba_de" if mode == "change" else "bayes_factor"
res = res.sort_values(by=sort_key, ascending=False)
if idx1 is None:
g2 = "Rest" if group2 is None else group2
res["comparison"] = "{} vs {}".format(g1, g2)
df_results.append(res)
if temp_key is not None:
del adata.obs[temp_key]
result = pd.concat(df_results, axis=0)
return result
|
https://github.com/YosefLab/scvi-tools/issues/783
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-17-d7b37e40fc13> in <module>()
5 idx1=idx1,
6 idx2=idx2,
----> 7 mode='change',
8 )
7 frames
/usr/local/lib/python3.6/dist-packages/scvi/core/models/rnamixin.py in differential_expression(self, adata, groupby, group1, group2, idx1, idx2, mode, delta, batch_size, all_stats, batch_correction, batchid1, batchid2, **kwargs)
202 delta,
203 batch_correction,
--> 204 **kwargs,
205 )
206
/usr/local/lib/python3.6/dist-packages/scvi/core/models/_utils.py in _de_core(adata, model_fn, groupby, group1, group2, idx1, idx2, all_stats, all_stats_fn, col_names, mode, batchid1, batchid2, delta, batch_correction, **kwargs)
70
71 if all_stats is True:
---> 72 genes_properties_dict = all_stats_fn(adata, cell_idx1, cell_idx2)
73 all_info = {**all_info, **genes_properties_dict}
74
/usr/local/lib/python3.6/dist-packages/scvi/model/_utils.py in scrna_raw_counts_properties(adata, idx1, idx2)
35 data = get_from_registry(adata, _CONSTANTS.X_KEY)
36 data1 = data[idx1]
---> 37 data2 = data[idx2]
38 mean1 = np.asarray((data1).mean(axis=0)).ravel()
39 mean2 = np.asarray((data2).mean(axis=0)).ravel()
/usr/local/lib/python3.6/dist-packages/scipy/sparse/_index.py in __getitem__(self, key)
57 return self._get_arrayXint(row, col)
58 elif isinstance(col, slice):
---> 59 return self._get_arrayXslice(row, col)
60 else: # row.ndim == 2
61 if isinstance(col, INT_TYPES):
/usr/local/lib/python3.6/dist-packages/scipy/sparse/csr.py in _get_arrayXslice(self, row, col)
323 col = np.arange(*col.indices(self.shape[1]))
324 return self._get_arrayXarray(row, col)
--> 325 return self._major_index_fancy(row)._get_submatrix(minor=col)
326
327
/usr/local/lib/python3.6/dist-packages/scipy/sparse/compressed.py in _major_index_fancy(self, idx)
688 idx_dtype = self.indices.dtype
689 res_indptr = np.zeros(M+1, dtype=idx_dtype)
--> 690 np.cumsum(row_nnz[idx], out=res_indptr[1:])
691
692 nnz = res_indptr[-1]
<__array_function__ internals> in cumsum(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py in cumsum(a, axis, dtype, out)
2468
2469 """
-> 2470 return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
2471
2472
/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py in _wrapfunc(obj, method, *args, **kwds)
59
60 try:
---> 61 return bound(*args, **kwds)
62 except TypeError:
63 # A TypeError occurs if the object does have such a method in its
ValueError: provided out is the wrong size for the reduction
|
ValueError
|
def _setup_extra_categorical_covs(
adata: anndata.AnnData,
categorical_covariate_keys: List[str],
category_dict: Dict[str, List[str]] = None,
):
"""
Setup obsm df for extra categorical covariates.
Parameters
----------
adata
AnnData to setup
categorical_covariate_keys
List of keys in adata.obs with categorical data
category_dict
Optional dictionary with keys being keys of categorical data in obs
and values being precomputed categories for each obs vector
"""
for key in categorical_covariate_keys:
_assert_key_in_obs(adata, key)
cat_loc = "obsm"
cat_key = "_scvi_extra_categoricals"
one_hots = []
categories = {}
for key in categorical_covariate_keys:
cat = adata.obs[key]
if category_dict is not None:
possible_cats = category_dict[key]
cat = cat.astype(CategoricalDtype(categories=possible_cats))
else:
categories[key] = cat.astype("category").cat.categories.to_numpy(copy=True)
one_hot_rep = pd.get_dummies(cat, prefix=key)
one_hots.append(one_hot_rep)
adata.obsm[cat_key] = pd.concat(one_hots, axis=1)
store_cats = categories if category_dict is None else category_dict
adata.uns["_scvi"]["extra_categorical_mappings"] = store_cats
return cat_loc, cat_key
|
def _setup_extra_categorical_covs(
adata: anndata.AnnData,
categorical_covariate_keys: List[str],
category_dict: Dict[str, List[str]] = None,
):
"""
Setup obsm df for extra categorical covariates.
Parameters
----------
adata
AnnData to setup
categorical_covariate_keys
List of keys in adata.obs with categorical data
category_dict
Optional dictionary with keys being keys of categorical data in obs
and values being precomputed categories for each obs vector
"""
for key in categorical_covariate_keys:
_assert_key_in_obs(adata, key)
cat_loc = "obsm"
cat_key = "_scvi_extra_categoricals"
one_hots = []
categories = {}
for key in categorical_covariate_keys:
cat = adata.obs[key]
if category_dict is not None:
possible_cats = category_dict[key]
cat = cat.astype(CategoricalDtype(categories=possible_cats))
else:
categories[key] = cat.astype("category").cat.categories
one_hot_rep = pd.get_dummies(cat, prefix=key)
one_hots.append(one_hot_rep)
adata.obsm[cat_key] = pd.concat(one_hots, axis=1)
store_cats = categories if category_dict is None else category_dict
adata.uns["_scvi"]["extra_categorical_mappings"] = store_cats
return cat_loc, cat_key
|
https://github.com/YosefLab/scvi-tools/issues/792
|
NotImplementedError: Failed to write value for uns/_scvi/categorical_mappings/_scvi_batch/mapping, since a writer for type <class 'pandas.core.indexes.base.Index'> has not been implemented yet.
Above error raised while writing key 'uns/_scvi/categorical_mappings/_scvi_batch/mapping' of <class 'h5py._hl.files.File'> from /.
|
NotImplementedError
|
def _make_obs_column_categorical(
adata, column_key, alternate_column_key, categorical_dtype=None
):
"""
Makes the data in column_key in obs all categorical.
If adata.obs[column_key] is not categorical, will categorize
and save to .obs[alternate_column_key]
"""
if categorical_dtype is None:
categorical_obs = adata.obs[column_key].astype("category")
else:
categorical_obs = adata.obs[column_key].astype(categorical_dtype)
# put codes in .obs[alternate_column_key]
codes = categorical_obs.cat.codes
mapping = categorical_obs.cat.categories.to_numpy(copy=True)
if -1 in np.unique(codes):
received_categories = adata.obs[column_key].astype("category").cat.categories
raise ValueError(
'Making .obs["{}"] categorical failed. Expected categories: {}. '
"Received categories: {}. ".format(column_key, mapping, received_categories)
)
adata.obs[alternate_column_key] = codes
# store categorical mappings
store_dict = {
alternate_column_key: {"original_key": column_key, "mapping": mapping}
}
if "categorical_mappings" not in adata.uns["_scvi"].keys():
adata.uns["_scvi"].update({"categorical_mappings": store_dict})
else:
adata.uns["_scvi"]["categorical_mappings"].update(store_dict)
# make sure each category contains enough cells
unique, counts = np.unique(adata.obs[alternate_column_key], return_counts=True)
if np.min(counts) < 3:
category = unique[np.argmin(counts)]
warnings.warn(
"Category {} in adata.obs['{}'] has fewer than 3 cells. SCVI may not train properly.".format(
category, alternate_column_key
)
)
# possible check for continuous?
if len(unique) > (adata.shape[0] / 3):
warnings.warn(
"Is adata.obs['{}'] continuous? SCVI doesn't support continuous obs yet."
)
return alternate_column_key
|
def _make_obs_column_categorical(
adata, column_key, alternate_column_key, categorical_dtype=None
):
"""
Makes the data in column_key in obs all categorical.
If adata.obs[column_key] is not categorical, will categorize
and save to .obs[alternate_column_key]
"""
if categorical_dtype is None:
categorical_obs = adata.obs[column_key].astype("category")
else:
categorical_obs = adata.obs[column_key].astype(categorical_dtype)
# put codes in .obs[alternate_column_key]
codes = categorical_obs.cat.codes
mapping = categorical_obs.cat.categories
if -1 in np.unique(codes):
received_categories = adata.obs[column_key].astype("category").cat.categories
raise ValueError(
'Making .obs["{}"] categorical failed. Expected categories: {}. '
"Received categories: {}. ".format(column_key, mapping, received_categories)
)
adata.obs[alternate_column_key] = codes
# store categorical mappings
store_dict = {
alternate_column_key: {"original_key": column_key, "mapping": mapping}
}
if "categorical_mappings" not in adata.uns["_scvi"].keys():
adata.uns["_scvi"].update({"categorical_mappings": store_dict})
else:
adata.uns["_scvi"]["categorical_mappings"].update(store_dict)
# make sure each category contains enough cells
unique, counts = np.unique(adata.obs[alternate_column_key], return_counts=True)
if np.min(counts) < 3:
category = unique[np.argmin(counts)]
warnings.warn(
"Category {} in adata.obs['{}'] has fewer than 3 cells. SCVI may not train properly.".format(
category, alternate_column_key
)
)
# possible check for continuous?
if len(unique) > (adata.shape[0] / 3):
warnings.warn(
"Is adata.obs['{}'] continuous? SCVI doesn't support continuous obs yet."
)
return alternate_column_key
|
https://github.com/YosefLab/scvi-tools/issues/792
|
NotImplementedError: Failed to write value for uns/_scvi/categorical_mappings/_scvi_batch/mapping, since a writer for type <class 'pandas.core.indexes.base.Index'> has not been implemented yet.
Above error raised while writing key 'uns/_scvi/categorical_mappings/_scvi_batch/mapping' of <class 'h5py._hl.files.File'> from /.
|
NotImplementedError
|
def _check_anndata_setup_equivalence(adata_source, adata_target):
"""Checks if target setup is equivalent to source."""
if isinstance(adata_source, anndata.AnnData):
_scvi_dict = adata_source.uns["_scvi"]
else:
_scvi_dict = adata_source
adata = adata_target
stats = _scvi_dict["summary_stats"]
use_raw = _scvi_dict["use_raw"]
target_n_vars = adata.shape[1] if not use_raw else adata.raw.shape[1]
error_msg = (
"Number of {} in anndata different from initial anndata used for training."
)
if target_n_vars != stats["n_genes"]:
raise ValueError(error_msg.format("genes"))
error_msg = (
"There are more {} categories in the data than were originally registered. "
+ "Please check your {} categories as well as adata.uns['_scvi']['categorical_mappings']."
)
self_categoricals = _scvi_dict["categorical_mappings"]
self_batch_mapping = self_categoricals["_scvi_batch"]["mapping"]
adata_categoricals = adata.uns["_scvi"]["categorical_mappings"]
adata_batch_mapping = adata_categoricals["_scvi_batch"]["mapping"]
# check if the categories are the same
error_msg = (
"Categorial encoding for {} is not the same between "
+ "the anndata used to train the model and the anndata just passed in. "
+ "Categorical encoding needs to be same elements, same order, and same datatype.\n"
+ "Expected categories: {}. Received categories: {}.\n"
+ "Try running `dataset.transfer_anndata_setup()` or deleting `adata.uns['_scvi']."
)
if not _assert_equal_mapping(self_batch_mapping, adata_batch_mapping):
raise ValueError(
error_msg.format("batch", self_batch_mapping, adata_batch_mapping)
)
self_labels_mapping = self_categoricals["_scvi_labels"]["mapping"]
adata_labels_mapping = adata_categoricals["_scvi_labels"]["mapping"]
if not _assert_equal_mapping(self_labels_mapping, adata_labels_mapping):
raise ValueError(
error_msg.format("label", self_labels_mapping, adata_labels_mapping)
)
# validate any extra categoricals
if "extra_categorical_mappings" in _scvi_dict.keys():
target_extra_cat_maps = adata.uns["_scvi"]["extra_categorical_mappings"]
for key, val in _scvi_dict["extra_categorical_mappings"].items():
target_map = target_extra_cat_maps[key]
if not _assert_equal_mapping(val, target_map):
raise ValueError(error_msg.format(key, val, target_map))
# validate any extra continuous covs
if "extra_continuous_keys" in _scvi_dict.keys():
if "extra_continuous_keys" not in adata.uns["_scvi"].keys():
raise ValueError('extra_continuous_keys not in adata.uns["_scvi"]')
target_cont_keys = adata.uns["_scvi"]["extra_continuous_keys"]
if not _scvi_dict["extra_continuous_keys"].equals(target_cont_keys):
raise ValueError(
"extra_continous_keys are not the same between source and target"
)
|
def _check_anndata_setup_equivalence(adata_source, adata_target):
"""Checks if target setup is equivalent to source."""
if isinstance(adata_source, anndata.AnnData):
_scvi_dict = adata_source.uns["_scvi"]
else:
_scvi_dict = adata_source
adata = adata_target
stats = _scvi_dict["summary_stats"]
use_raw = _scvi_dict["use_raw"]
target_n_vars = adata.shape[1] if not use_raw else adata.raw.shape[1]
error_msg = (
"Number of {} in anndata different from initial anndata used for training."
)
if target_n_vars != stats["n_genes"]:
raise ValueError(error_msg.format("genes"))
error_msg = (
"There are more {} categories in the data than were originally registered. "
+ "Please check your {} categories as well as adata.uns['_scvi']['categorical_mappings']."
)
self_categoricals = _scvi_dict["categorical_mappings"]
self_batch_mapping = self_categoricals["_scvi_batch"]["mapping"]
adata_categoricals = adata.uns["_scvi"]["categorical_mappings"]
adata_batch_mapping = adata_categoricals["_scvi_batch"]["mapping"]
# check if the categories are the same
error_msg = (
"Categorial encoding for {} is not the same between "
+ "the anndata used to train the model and the anndata just passed in. "
+ "Categorical encoding needs to be same elements, same order, and same datatype.\n"
+ "Expected categories: {}. Received categories: {}.\n"
+ "Try running `dataset.transfer_anndata_setup()` or deleting `adata.uns['_scvi']."
)
if np.sum(self_batch_mapping == adata_batch_mapping) != len(self_batch_mapping):
raise ValueError(
error_msg.format("batch", self_batch_mapping, adata_batch_mapping)
)
self_labels_mapping = self_categoricals["_scvi_labels"]["mapping"]
adata_labels_mapping = adata_categoricals["_scvi_labels"]["mapping"]
if np.sum(self_labels_mapping == adata_labels_mapping) != len(self_labels_mapping):
raise ValueError(
error_msg.format("label", self_labels_mapping, adata_labels_mapping)
)
# validate any extra categoricals
if "extra_categorical_mappings" in _scvi_dict.keys():
target_extra_cat_maps = adata.uns["_scvi"]["extra_categorical_mappings"]
for key, val in _scvi_dict["extra_categorical_mappings"].items():
target_map = target_extra_cat_maps[key]
if np.sum(val == target_map) != len(val):
raise ValueError(error_msg.format(key, val, target_map))
# validate any extra continuous covs
if "extra_continuous_keys" in _scvi_dict.keys():
if "extra_continuous_keys" not in adata.uns["_scvi"].keys():
raise ValueError('extra_continuous_keys not in adata.uns["_scvi"]')
target_cont_keys = adata.uns["_scvi"]["extra_continuous_keys"]
if not _scvi_dict["extra_continuous_keys"].equals(target_cont_keys):
raise ValueError(
"extra_continous_keys are not the same between source and target"
)
|
https://github.com/YosefLab/scvi-tools/issues/792
|
NotImplementedError: Failed to write value for uns/_scvi/categorical_mappings/_scvi_batch/mapping, since a writer for type <class 'pandas.core.indexes.base.Index'> has not been implemented yet.
Above error raised while writing key 'uns/_scvi/categorical_mappings/_scvi_batch/mapping' of <class 'h5py._hl.files.File'> from /.
|
NotImplementedError
|
def _download(url: str, save_path: str, filename: str):
"""Writes data from url to file."""
if os.path.exists(os.path.join(save_path, filename)):
logger.info("File %s already downloaded" % (os.path.join(save_path, filename)))
return
try:
r = urllib.request.urlopen(url)
except HTTPError:
req = urllib.request.Request(url, headers={"User-Agent": "Magic Browser"})
r = urllib.request.urlopen(req)
logger.info("Downloading file at %s" % os.path.join(save_path, filename))
def read_iter(file, block_size=1000):
"""Given a file 'file', returns an iterator that returns bytes of
size 'blocksize' from the file, using read().
"""
while True:
block = file.read(block_size)
if not block:
break
yield block
# Create the path to save the data
if not os.path.exists(save_path):
os.makedirs(save_path)
with open(os.path.join(save_path, filename), "wb") as f:
for data in read_iter(r):
f.write(data)
|
def _download(url: str, save_path: str, filename: str):
"""Writes data from url to file."""
if os.path.exists(os.path.join(save_path, filename)):
logger.info("File %s already downloaded" % (os.path.join(save_path, filename)))
return
r = urllib.request.urlopen(url)
logger.info("Downloading file at %s" % os.path.join(save_path, filename))
def read_iter(file, block_size=1000):
"""Given a file 'file', returns an iterator that returns bytes of
size 'blocksize' from the file, using read().
"""
while True:
block = file.read(block_size)
if not block:
break
yield block
# Create the path to save the data
if not os.path.exists(save_path):
os.makedirs(save_path)
with open(os.path.join(save_path, filename), "wb") as f:
for data in read_iter(r):
f.write(data)
|
https://github.com/YosefLab/scvi-tools/issues/706
|
---------------------------------------------------------------------------
HTTPError Traceback (most recent call last)
<ipython-input-9-f56b0a5f9d5d> in <module>
2 get_ipython().run_line_magic('autoreload', '2')
3 from scvi.dataset import PbmcDataset
----> 4 d = PbmcDataset(save_path = 'test_asdffolder')
~/scVI/scvi/dataset/pbmc.py in __init__(self, save_path, save_path_10X, remove_extracted_data, delayed_populating)
54 filenames=["gene_info_pbmc.csv", "pbmc_metadata.pickle"],
55 save_path=save_path,
---> 56 delayed_populating=delayed_populating,
57 )
58 # this downloads the necessary file for a future call to populate
~/scVI/scvi/dataset/dataset.py in __init__(self, urls, filenames, save_path, delayed_populating)
2017 self.download()
2018 if not delayed_populating:
-> 2019 self.populate()
2020
2021 def download(self):
~/scVI/scvi/dataset/pbmc.py in populate(self)
83 save_path=self.save_path_10X,
84 remove_extracted_data=self.remove_extracted_data,
---> 85 measurement_names_column=0,
86 ),
87 Dataset10X(
~/scVI/scvi/dataset/dataset10X.py in __init__(self, dataset_name, filename, save_path, url, type, dense, measurement_names_column, remove_extracted_data, delayed_populating)
154 filenames=filename,
155 save_path=save_path,
--> 156 delayed_populating=delayed_populating,
157 )
158
~/scVI/scvi/dataset/dataset.py in __init__(self, urls, filenames, save_path, delayed_populating)
2015
2016 self.save_path = os.path.abspath(save_path)
-> 2017 self.download()
2018 if not delayed_populating:
2019 self.populate()
~/scVI/scvi/dataset/dataset.py in download(self)
2021 def download(self):
2022 for url, download_name in zip(self.urls, self.filenames):
-> 2023 _download(url, self.save_path, download_name)
2024
2025 @abstractmethod
~/scVI/scvi/dataset/dataset.py in _download(url, save_path, filename)
2041 # req = urllib.request.Request(url, headers={"User-Agent": "Magic Browser"})
2042 # r = urllib.request.urlopen(req)
-> 2043 r = urllib.request.urlopen(url)
2044 logger.info("Downloading file at %s" % os.path.join(save_path, filename))
2045
~/anaconda3/envs/scvi/lib/python3.6/urllib/request.py in urlopen(url, data, timeout, cafile, capath, cadefault, context)
221 else:
222 opener = _opener
--> 223 return opener.open(url, data, timeout)
224
225 def install_opener(opener):
~/anaconda3/envs/scvi/lib/python3.6/urllib/request.py in open(self, fullurl, data, timeout)
530 for processor in self.process_response.get(protocol, []):
531 meth = getattr(processor, meth_name)
--> 532 response = meth(req, response)
533
534 return response
~/anaconda3/envs/scvi/lib/python3.6/urllib/request.py in http_response(self, request, response)
640 if not (200 <= code < 300):
641 response = self.parent.error(
--> 642 'http', request, response, code, msg, hdrs)
643
644 return response
~/anaconda3/envs/scvi/lib/python3.6/urllib/request.py in error(self, proto, *args)
568 if http_err:
569 args = (dict, 'default', 'http_error_default') + orig_args
--> 570 return self._call_chain(*args)
571
572 # XXX probably also want an abstract factory that knows when it makes
~/anaconda3/envs/scvi/lib/python3.6/urllib/request.py in _call_chain(self, chain, kind, meth_name, *args)
502 for handler in handlers:
503 func = getattr(handler, meth_name)
--> 504 result = func(*args)
505 if result is not None:
506 return result
~/anaconda3/envs/scvi/lib/python3.6/urllib/request.py in http_error_default(self, req, fp, code, msg, hdrs)
648 class HTTPDefaultErrorHandler(BaseHandler):
649 def http_error_default(self, req, fp, code, msg, hdrs):
--> 650 raise HTTPError(req.full_url, code, msg, hdrs, fp)
651
652 class HTTPRedirectHandler(BaseHandler):
HTTPError: HTTP Error 403: Forbidden```
#### Versions:
<!-- Output of scvi.__version__ -->
VERSION 0.6.6
|
HTTPError
|
def __init__(self):
# registers
self.dataset_versions = set()
self.gene_attribute_names = set()
self.cell_attribute_names = set()
self.cell_categorical_attribute_names = set()
self.attribute_mappings = defaultdict(list)
self.cell_measurements_col_mappings = dict()
# initialize attributes
self._X = None
self._batch_indices = None
self._labels = None
self.n_batches = None
self.n_labels = None
self.gene_names = None
self.cell_types = None
self.local_means = None
self.local_vars = None
self._norm_X = None
self._corrupted_X = None
# attributes that should not be set by initialization methods
self.protected_attributes = ["X", "_X"]
|
def __init__(self):
# registers
self.dataset_versions = set()
self.gene_attribute_names = set()
self.cell_attribute_names = set()
self.cell_categorical_attribute_names = set()
self.attribute_mappings = defaultdict(list)
self.cell_measurements_col_mappings = dict()
# initialize attributes
self._X = None
self._batch_indices = None
self._labels = None
self.n_batches = None
self.n_labels = None
self.gene_names = None
self.cell_types = None
self.local_means = None
self.local_vars = None
self._norm_X = None
self._corrupted_X = None
# attributes that should not be set by initialization methods
self.protected_attributes = ["X"]
|
https://github.com/YosefLab/scvi-tools/issues/704
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-89-02337f1f2d24> in <module>
1 adata2.obs = adata2.obs[['n_genes']]
2 adata2.obs.columns = ['_X']
----> 3 scvi_data = AnnDatasetFromAnnData(adata2)
~/miniconda3/envs/solo-sc/lib/python3.7/site-packages/scvi/dataset/anndataset.py in __init__(self, ad, batch_label, ctype_label, class_label, use_raw, cell_measurements_col_mappings)
91 cell_types=cell_types,
92 cell_attributes_dict=obs,
---> 93 gene_attributes_dict=var,
94 )
95 self.filter_cells_by_count()
~/miniconda3/envs/solo-sc/lib/python3.7/site-packages/scvi/dataset/dataset.py in populate_from_data(self, X, Ys, batch_indices, labels, gene_names, cell_types, cell_attributes_dict, gene_attributes_dict, remap_attributes)
200 if gene_attributes_dict:
201 for attribute_name, attribute_value in gene_attributes_dict.items():
--> 202 self.initialize_gene_attribute(attribute_name, attribute_value)
203
204 if remap_attributes:
~/miniconda3/envs/solo-sc/lib/python3.7/site-packages/scvi/dataset/dataset.py in initialize_gene_attribute(self, attribute_name, attribute)
779 )
780 attribute_name = valid_attribute_name
--> 781 if not self.nb_genes == len(attribute):
782 raise ValueError(
783 "Number of genes ({n_genes}) and length of gene attribute ({n_attr}) mismatch".format(
~/miniconda3/envs/solo-sc/lib/python3.7/site-packages/scvi/dataset/dataset.py in nb_genes(self)
641 @property
642 def nb_genes(self) -> int:
--> 643 return self.X.shape[1]
644
645 @property
IndexError: tuple index out of range
|
IndexError
|
def __init__(
self,
filename_or_anndata: Union[str, anndata.AnnData],
save_path: str = "data/",
url: str = None,
new_n_genes: bool = False,
subset_genes: List[int] = None,
):
if type(filename_or_anndata) == str:
self.download_name = filename_or_anndata
self.save_path = save_path
self.url = url
data, gene_names, batch_indices, cell_types, labels = (
self.download_and_preprocess()
)
elif isinstance(filename_or_anndata, anndata.AnnData):
ad = filename_or_anndata
data, gene_names, batch_indices, cell_types, labels = (
self.extract_data_from_anndata(ad)
)
else:
raise Exception(
"Please provide a filename of an AnnData file or an already loaded AnnData object"
)
X, local_means, local_vars, batch_indices_, labels = (
GeneExpressionDataset.get_attributes_from_matrix(data, labels=labels)
)
batch_indices = batch_indices if batch_indices is not None else batch_indices_
super().__init__(
X,
local_means,
local_vars,
batch_indices,
labels,
gene_names=gene_names,
cell_types=cell_types,
)
self.subsample_genes(new_n_genes=new_n_genes, subset_genes=subset_genes)
|
def __init__(
self,
filename_or_anndata,
save_path="data/",
url=None,
new_n_genes=False,
subset_genes=None,
):
""" """
if type(filename_or_anndata) == str:
self.download_name = filename_or_anndata
self.save_path = save_path
self.url = url
data, gene_names, batch_indices, cell_types, labels = (
self.download_and_preprocess()
)
elif isinstance(filename_or_anndata, anndata.AnnData):
ad = filename_or_anndata
data, gene_names, batch_indices, cell_types, labels = (
self.extract_data_from_anndata(ad)
)
else:
raise Exception(
"Please provide a filename of an AnnData file or an already loaded AnnData object"
)
X, local_means, local_vars, batch_indices_, labels = (
GeneExpressionDataset.get_attributes_from_matrix(data, labels=labels)
)
batch_indices = batch_indices if batch_indices is not None else batch_indices_
super().__init__(
X,
local_means,
local_vars,
batch_indices,
labels,
gene_names=gene_names,
cell_types=cell_types,
)
self.subsample_genes(new_n_genes=new_n_genes, subset_genes=subset_genes)
|
https://github.com/YosefLab/scvi-tools/issues/288
|
---------------------------------------------------------------------------
MemoryError Traceback (most recent call last)
<ipython-input-3-138de299bfb8> in <module>
1 # load
2 dataset = AnnDataset('data/dataset.h5ad',
----> 3 save_path=path)
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scvi/dataset/anndata.py in __init__(self, filename, save_path, url, new_n_genes, subset_genes)
36 self.url = url
37
---> 38 data, gene_names = self.download_and_preprocess()
39
40 super().__init__(*GeneExpressionDataset.get_attributes_from_matrix(data),
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scvi/dataset/dataset.py in download_and_preprocess(self)
61 def download_and_preprocess(self):
62 self.download()
---> 63 return self.preprocess()
64
65 def collate_fn(self, batch):
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scvi/dataset/anndata.py in preprocess(self)
52 data = ad.X.copy() # Dense
53 else:
---> 54 data = ad.X.toarray() # Sparse
55 select = data.sum(axis=1) > 0 # Take out cells that doesn't express any gene
56 data = data[select, :]
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scipy/sparse/compressed.py in toarray(self, order, out)
960 if out is None and order is None:
961 order = self._swap('cf')[0]
--> 962 out = self._process_toarray_args(order, out)
963 if not (out.flags.c_contiguous or out.flags.f_contiguous):
964 raise ValueError('Output array must be C or F contiguous')
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scipy/sparse/base.py in _process_toarray_args(self, order, out)
1185 return out
1186 else:
-> 1187 return np.zeros(self.shape, dtype=self.dtype, order=order)
1188
1189
MemoryError:
|
MemoryError
|
def extract_data_from_anndata(self, ad: anndata.AnnData):
data, gene_names, batch_indices, cell_types, labels = None, None, None, None, None
self.obs = (
ad.obs
) # provide access to observation annotations from the underlying AnnData object.
# treat all possible cases according to anndata doc
if isinstance(ad.X, np.ndarray):
data = ad.X.copy()
if isinstance(ad.X, pd.DataFrame):
data = ad.X.values
if isinstance(ad.X, csr_matrix):
# keep sparsity above 1 Gb in dense form
if reduce(operator.mul, ad.X.shape) * ad.X.dtype.itemsize < 1e9:
data = ad.X.toarray()
else:
data = ad.X.copy()
gene_names = np.array(ad.var.index.values, dtype=str)
if "batch_indices" in self.obs.columns:
batch_indices = self.obs["batch_indices"].values
if "cell_types" in self.obs.columns:
cell_types = self.obs["cell_types"]
cell_types = cell_types.drop_duplicates().values.astype("str")
if "labels" in self.obs.columns:
labels = self.obs["labels"]
elif "cell_types" in self.obs.columns:
labels = self.obs["cell_types"].rank(method="dense").values.astype("int")
return data, gene_names, batch_indices, cell_types, labels
|
def extract_data_from_anndata(self, ad: anndata.AnnData):
data, gene_names, batch_indices, cell_types, labels = None, None, None, None, None
self.obs = (
ad.obs
) # provide access to observation annotations from the underlying AnnData object.
if isinstance(ad.X, np.ndarray):
data = ad.X.copy() # Dense
else:
data = ad.X.toarray() # Sparse
select = data.sum(axis=1) > 0 # Take out cells that doesn't express any gene
data = data[select, :]
gene_names = np.array(ad.var.index.values, dtype=str)
if "batch_indices" in self.obs.columns:
batch_indices = self.obs["batch_indices"].values
if "cell_types" in self.obs.columns:
cell_types = self.obs["cell_types"]
cell_types = cell_types.drop_duplicates().values.astype("str")
if "labels" in self.obs.columns:
labels = self.obs["labels"]
elif "cell_types" in self.obs.columns:
labels = self.obs["cell_types"].rank(method="dense").values.astype("int")
return data, gene_names, batch_indices, cell_types, labels
|
https://github.com/YosefLab/scvi-tools/issues/288
|
---------------------------------------------------------------------------
MemoryError Traceback (most recent call last)
<ipython-input-3-138de299bfb8> in <module>
1 # load
2 dataset = AnnDataset('data/dataset.h5ad',
----> 3 save_path=path)
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scvi/dataset/anndata.py in __init__(self, filename, save_path, url, new_n_genes, subset_genes)
36 self.url = url
37
---> 38 data, gene_names = self.download_and_preprocess()
39
40 super().__init__(*GeneExpressionDataset.get_attributes_from_matrix(data),
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scvi/dataset/dataset.py in download_and_preprocess(self)
61 def download_and_preprocess(self):
62 self.download()
---> 63 return self.preprocess()
64
65 def collate_fn(self, batch):
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scvi/dataset/anndata.py in preprocess(self)
52 data = ad.X.copy() # Dense
53 else:
---> 54 data = ad.X.toarray() # Sparse
55 select = data.sum(axis=1) > 0 # Take out cells that doesn't express any gene
56 data = data[select, :]
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scipy/sparse/compressed.py in toarray(self, order, out)
960 if out is None and order is None:
961 order = self._swap('cf')[0]
--> 962 out = self._process_toarray_args(order, out)
963 if not (out.flags.c_contiguous or out.flags.f_contiguous):
964 raise ValueError('Output array must be C or F contiguous')
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scipy/sparse/base.py in _process_toarray_args(self, order, out)
1185 return out
1186 else:
-> 1187 return np.zeros(self.shape, dtype=self.dtype, order=order)
1188
1189
MemoryError:
|
MemoryError
|
def update_cells(self, subset_cells):
new_n_cells = (
len(subset_cells)
if subset_cells.dtype is not np.dtype("bool")
else subset_cells.sum()
)
print("Downsampling from %i to %i cells" % (len(self), new_n_cells))
for attr_name in [
"_X",
"labels",
"batch_indices",
"local_means",
"local_vars",
"x_coord",
"y_coord",
]:
if getattr(self, attr_name) is not None:
setattr(self, attr_name, getattr(self, attr_name)[subset_cells])
self.library_size_batch()
|
def update_cells(self, subset_cells):
new_n_cells = (
len(subset_cells)
if subset_cells.dtype is not np.dtype("bool")
else subset_cells.sum()
)
print("Downsampling from %i to %i cells" % (len(self), new_n_cells))
for attr_name in ["_X", "labels", "batch_indices", "local_means", "local_vars"]:
setattr(self, attr_name, getattr(self, attr_name)[subset_cells])
self.library_size_batch()
|
https://github.com/YosefLab/scvi-tools/issues/288
|
---------------------------------------------------------------------------
MemoryError Traceback (most recent call last)
<ipython-input-3-138de299bfb8> in <module>
1 # load
2 dataset = AnnDataset('data/dataset.h5ad',
----> 3 save_path=path)
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scvi/dataset/anndata.py in __init__(self, filename, save_path, url, new_n_genes, subset_genes)
36 self.url = url
37
---> 38 data, gene_names = self.download_and_preprocess()
39
40 super().__init__(*GeneExpressionDataset.get_attributes_from_matrix(data),
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scvi/dataset/dataset.py in download_and_preprocess(self)
61 def download_and_preprocess(self):
62 self.download()
---> 63 return self.preprocess()
64
65 def collate_fn(self, batch):
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scvi/dataset/anndata.py in preprocess(self)
52 data = ad.X.copy() # Dense
53 else:
---> 54 data = ad.X.toarray() # Sparse
55 select = data.sum(axis=1) > 0 # Take out cells that doesn't express any gene
56 data = data[select, :]
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scipy/sparse/compressed.py in toarray(self, order, out)
960 if out is None and order is None:
961 order = self._swap('cf')[0]
--> 962 out = self._process_toarray_args(order, out)
963 if not (out.flags.c_contiguous or out.flags.f_contiguous):
964 raise ValueError('Output array must be C or F contiguous')
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scipy/sparse/base.py in _process_toarray_args(self, order, out)
1185 return out
1186 else:
-> 1187 return np.zeros(self.shape, dtype=self.dtype, order=order)
1188
1189
MemoryError:
|
MemoryError
|
def get_attributes_from_matrix(X, batch_indices=0, labels=None):
ne_cells = X.sum(axis=1) > 0
to_keep = np.where(ne_cells)[0]
if not ne_cells.all():
X = X[to_keep]
removed_idx = np.where(~ne_cells)[0]
print(
"Cells with zero expression in all genes considered were removed, the indices of the removed cells "
"in the expression matrix were:"
)
print(removed_idx)
local_mean, local_var = GeneExpressionDataset.library_size(X)
batch_indices = (
batch_indices * np.ones((X.shape[0], 1))
if type(batch_indices) is int
else batch_indices[to_keep]
)
labels = (
labels[to_keep].reshape(-1, 1)
if labels is not None
else np.zeros_like(batch_indices)
)
return X, local_mean, local_var, batch_indices, labels
|
def get_attributes_from_matrix(X, batch_indices=0, labels=None):
ne_cells = X.sum(axis=1) > 0
to_keep = np.where(ne_cells)
if not ne_cells.all():
X = X[to_keep]
removed_idx = np.where(~ne_cells)[0]
print(
"Cells with zero expression in all genes considered were removed, the indices of the removed cells "
"in the expression matrix were:"
)
print(removed_idx)
local_mean, local_var = GeneExpressionDataset.library_size(X)
batch_indices = (
batch_indices * np.ones((X.shape[0], 1))
if type(batch_indices) is int
else batch_indices[to_keep]
)
labels = (
labels[to_keep].reshape(-1, 1)
if labels is not None
else np.zeros_like(batch_indices)
)
return X, local_mean, local_var, batch_indices, labels
|
https://github.com/YosefLab/scvi-tools/issues/288
|
---------------------------------------------------------------------------
MemoryError Traceback (most recent call last)
<ipython-input-3-138de299bfb8> in <module>
1 # load
2 dataset = AnnDataset('data/dataset.h5ad',
----> 3 save_path=path)
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scvi/dataset/anndata.py in __init__(self, filename, save_path, url, new_n_genes, subset_genes)
36 self.url = url
37
---> 38 data, gene_names = self.download_and_preprocess()
39
40 super().__init__(*GeneExpressionDataset.get_attributes_from_matrix(data),
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scvi/dataset/dataset.py in download_and_preprocess(self)
61 def download_and_preprocess(self):
62 self.download()
---> 63 return self.preprocess()
64
65 def collate_fn(self, batch):
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scvi/dataset/anndata.py in preprocess(self)
52 data = ad.X.copy() # Dense
53 else:
---> 54 data = ad.X.toarray() # Sparse
55 select = data.sum(axis=1) > 0 # Take out cells that doesn't express any gene
56 data = data[select, :]
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scipy/sparse/compressed.py in toarray(self, order, out)
960 if out is None and order is None:
961 order = self._swap('cf')[0]
--> 962 out = self._process_toarray_args(order, out)
963 if not (out.flags.c_contiguous or out.flags.f_contiguous):
964 raise ValueError('Output array must be C or F contiguous')
~/anaconda3/envs/pyro/lib/python3.6/site-packages/scipy/sparse/base.py in _process_toarray_args(self, order, out)
1185 return out
1186 else:
-> 1187 return np.zeros(self.shape, dtype=self.dtype, order=order)
1188
1189
MemoryError:
|
MemoryError
|
def download(datapath):
model_name = "pretrained_transformers"
mdir = os.path.join(get_model_dir(datapath), model_name)
version = "v3.0"
if not built(mdir, version):
opt = {"datapath": datapath}
fnames = ["pretrained_transformers.tgz"]
download_models(opt, fnames, model_name, version=version, use_model_type=False)
|
def download(datapath):
model_name = "pretrained_transformers"
mdir = os.path.join(get_model_dir(datapath), model_name)
version = "v2.0"
if not built(mdir, version):
opt = {"datapath": datapath}
fnames = ["pretrained_transformers.tgz"]
download_models(opt, fnames, model_name, version=version, use_model_type=False)
|
https://github.com/facebookresearch/ParlAI/issues/1912
|
model = torch.load('/Downloads/polyranker/model', map_location=lambda cpu, _: cpu)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/edinan/anaconda3/lib/python3.6/site-packages/torch/serialization.py", line 368, in load
return _load(f, map_location, pickle_module)
File "/Users/edinan/anaconda3/lib/python3.6/site-packages/torch/serialization.py", line 542, in _load
result = unpickler.load()
ModuleNotFoundError: No module named 'apex'
|
ModuleNotFoundError
|
def main():
# Get command line arguments
argparser = ParlaiParser()
DictionaryAgent.add_cmdline_args(argparser)
ParsedRemoteAgent.add_cmdline_args(argparser)
argparser.add_argument("--num-examples", default=1000, type=int)
argparser.add_argument("--num-its", default=100, type=int)
argparser.add_argument("--dict-max-exs", default=10000, type=int)
parlai_home = os.environ["PARLAI_HOME"]
if "--remote-cmd" not in sys.argv:
if os.system("which luajit") != 0:
raise RuntimeError(
"Could not detect torch luajit installed: "
+ "please install torch from http://torch.ch "
+ "or manually set --remote-cmd for this example."
)
sys.argv.append("--remote-cmd")
sys.argv.append(
"luajit {}/parlai/agents/".format(parlai_home)
+ "memnn_luatorch_cpu/memnn_zmq_parsed.lua"
)
if "--remote-args" not in sys.argv:
sys.argv.append("--remote-args")
sys.argv.append(
"{}/examples/".format(parlai_home) + "memnn_luatorch_cpu/params_default.lua"
)
opt = argparser.parse_args()
# set up dictionary
print("Setting up dictionary.")
dictionary = DictionaryAgent(opt)
if not opt.get("dict_file"):
# build dictionary since we didn't load it
ordered_opt = copy.deepcopy(opt)
ordered_opt["datatype"] = "train:ordered"
ordered_opt["numthreads"] = 1
world_dict = create_task(ordered_opt, dictionary)
print("Dictionary building on training data.")
cnt = 0
# pass examples to dictionary
while not world_dict.epoch_done():
cnt += 1
if cnt > opt["dict_max_exs"] and opt["dict_max_exs"] > 0:
print("Processed {} exs, moving on.".format(opt["dict_max_exs"]))
# don't wait too long...
break
world_dict.parley()
# we need to save the dictionary to load it in memnn (sort it by freq)
dictionary.sort()
dictionary.save("/tmp/dict.txt", sort=True)
print("Dictionary ready, moving on to training.")
opt["datatype"] = "train"
agent = ParsedRemoteAgent(opt, {"dictionary_shared": dictionary.share()})
world_train = create_task(opt, agent)
valid_opt = copy.deepcopy(opt)
valid_opt["datatype"] = "valid"
valid_opt["numthreads"] = (
1 # switch to 1 thread, the memnn code will handle it better
)
world_valid = create_task(valid_opt, agent)
start = time.time()
with world_train:
for _ in range(opt["num_its"]):
print("[ training ]")
for _ in range(opt["num_examples"] * opt.get("numthreads", 1)):
world_train.parley()
print("[ validating ]")
world_valid.reset()
while not world_valid.epoch_done(): # check valid accuracy
world_valid.parley()
print("[ validation summary. ]")
report_valid = world_valid.report()
print(report_valid)
if report_valid["accuracy"] > 0.95:
break
# show some example dialogs after training:
world_valid = create_task(valid_opt, agent)
for _k in range(3):
world_valid.parley()
print(world_valid.display())
print("finished in {} s".format(round(time.time() - start, 2)))
|
def main():
# Get command line arguments
argparser = ParlaiParser()
DictionaryAgent.add_cmdline_args(argparser)
ParsedRemoteAgent.add_cmdline_args(argparser)
argparser.add_argument("--num-examples", default=1000, type=int)
argparser.add_argument("--num-its", default=100, type=int)
argparser.add_argument("--dict-max-exs", default=10000, type=int)
parlai_home = os.environ["PARLAI_HOME"]
if "--remote-cmd" not in sys.argv:
if os.system("which luajit") != 0:
raise RuntimeError(
"Could not detect torch luajit installed: "
+ "please install torch from http://torch.ch "
+ "or manually set --remote-cmd for this example."
)
sys.argv.append("--remote-cmd")
sys.argv.append(
"luajit {}/parlai/agents/".format(parlai_home)
+ "memnn_luatorch_cpu/memnn_zmq_parsed.lua"
)
if "--remote-args" not in sys.argv:
sys.argv.append("--remote-args")
sys.argv.append(
"{}/examples/".format(parlai_home) + "memnn_luatorch_cpu/params_default.lua"
)
opt = argparser.parse_args()
# set up dictionary
print("Setting up dictionary.")
dictionary = DictionaryAgent(opt)
if not opt.get("dict_file"):
# build dictionary since we didn't load it
ordered_opt = copy.deepcopy(opt)
ordered_opt["datatype"] = "train:ordered"
ordered_opt["numthreads"] = 1
world_dict = create_task(ordered_opt, dictionary)
print("Dictionary building on training data.")
cnt = 0
# pass examples to dictionary
while not world_dict.epoch_done():
cnt += 1
if cnt > opt["dict_max_exs"] and opt["dict_max_exs"] > 0:
print("Processed {} exs, moving on.".format(opt["dict_max_exs"]))
# don't wait too long...
break
world_dict.parley()
# we need to save the dictionary to load it in memnn (sort it by freq)
dictionary.sort()
dictionary.save("/tmp/dict.txt", sort=True)
print("Dictionary ready, moving on to training.")
opt["datatype"] = "train"
agent = ParsedRemoteAgent(opt, {"dictionary_shared": dictionary.share()})
world_train = create_task(opt, agent)
opt["datatype"] = "valid"
world_valid = create_task(opt, agent)
start = time.time()
with world_train:
for _ in range(opt["num_its"]):
print("[ training ]")
for _ in range(opt["num_examples"] * opt.get("numthreads", 1)):
world_train.parley()
world_train.synchronize()
print("[ validating ]")
world_valid.reset()
while not world_valid.epoch_done(): # check valid accuracy
world_valid.parley()
print("[ validation summary. ]")
report_valid = world_valid.report()
print(report_valid)
if report_valid["accuracy"] > 0.95:
break
# show some example dialogs after training:
world_valid = create_task(opt, agent)
for _k in range(3):
world_valid.parley()
print(world_valid.display())
print("finished in {} s".format(round(time.time() - start, 2)))
|
https://github.com/facebookresearch/ParlAI/issues/510
|
[ training ]
lua thread bound to tcp://*:5557
lua thread bound to tcp://*:5558
lua thread bound to tcp://*:5559
lua thread bound to tcp://*:5560
lua thread bound to tcp://*:5561
lua thread bound to tcp://*:5562
lua thread bound to tcp://*:5563
[ exs: 1577 | time: 1s | mean_rank: 5.70 | resp_loss: 0.82 | rank_loss: 0.16 ]
[ exs: 4136 | time: 2s | mean_rank: 3.37 | resp_loss: 0.81 | rank_loss: 0.10 ]
[ exs: 6639 | time: 3s | mean_rank: 3.42 | resp_loss: 0.82 | rank_loss: 0.10 ]
[synchronizing]
Traceback (most recent call last):
File "memnn_luatorch_cpu/full_task_train.py", line 117, in <module>
main()
File "memnn_luatorch_cpu/full_task_train.py", line 95, in main
world_train.synchronize()
AttributeError: 'HogwildWorld' object has no attribute 'synchronize'
|
AttributeError
|
def get_delegated_roles_metadata_filenames(
metadata_directory, consistent_snapshot, storage_backend=None
):
"""
Return a dictionary containing all filenames in 'metadata_directory'
except the top-level roles.
If multiple versions of a file exist because of a consistent snapshot,
only the file with biggest version prefix is included.
"""
filenames = {}
metadata_files = sorted(
storage_backend.list_folder(metadata_directory), reverse=True
)
# Iterate over role metadata files, sorted by their version-number prefix, with
# more recent versions first, and only add the most recent version of any
# (non top-level) metadata to the list of returned filenames. Note that there
# should only be one version of each file, if consistent_snapshot is False.
for metadata_role in metadata_files:
metadata_path = os.path.join(metadata_directory, metadata_role)
# Strip the version number if 'consistent_snapshot' is True,
# or if 'metadata_role' is Root.
# Example: '10.django.json' --> 'django.json'
consistent = metadata_role.endswith("root.json") or consistent_snapshot == True
metadata_name, junk = _strip_version_number(metadata_role, consistent)
if metadata_name.endswith(METADATA_EXTENSION):
extension_length = len(METADATA_EXTENSION)
metadata_name = metadata_name[:-extension_length]
else:
logger.debug(
"Skipping file with unsupported metadata"
" extension: " + repr(metadata_path)
)
continue
# Skip top-level roles, only interested in delegated roles.
if metadata_name in tuf.roledb.TOP_LEVEL_ROLES:
continue
# Prevent reloading duplicate versions if consistent_snapshot is True
if metadata_name not in filenames:
filenames[metadata_name] = metadata_path
return filenames
|
def get_delegated_roles_metadata_filenames(
metadata_directory, consistent_snapshot, storage_backend=None
):
"""
Return a dictionary containing all filenames in 'metadata_directory'
except the top-level roles.
If multiple versions of a file exist because of a consistent snapshot,
only the file with biggest version prefix is included.
"""
filenames = {}
metadata_files = sorted(
storage_backend.list_folder(metadata_directory), reverse=True
)
# Iterate over role metadata files, sorted by their version-number prefix, with
# more recent versions first, and only add the most recent version of any
# (non top-level) metadata to the list of returned filenames. Note that there
# should only be one version of each file, if consistent_snapshot is False.
for metadata_role in metadata_files:
metadata_path = os.path.join(metadata_directory, metadata_role)
# Strip the version number if 'consistent_snapshot' is True,
# or if 'metadata_role' is Root.
# Example: '10.django.json' --> 'django.json'
consistent_snapshot = (
metadata_role.endswith("root.json") or consistent_snapshot == True
)
metadata_name, junk = _strip_version_number(metadata_role, consistent_snapshot)
if metadata_name.endswith(METADATA_EXTENSION):
extension_length = len(METADATA_EXTENSION)
metadata_name = metadata_name[:-extension_length]
else:
logger.debug(
"Skipping file with unsupported metadata"
" extension: " + repr(metadata_path)
)
continue
# Skip top-level roles, only interested in delegated roles.
if metadata_name in tuf.roledb.TOP_LEVEL_ROLES:
continue
# Prevent reloading duplicate versions if consistent_snapshot is True
if metadata_name not in filenames:
filenames[metadata_name] = metadata_path
return filenames
|
https://github.com/theupdateframework/tuf/issues/1069
|
repository=load_repository("repository")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/jku/src/tuf/tuf/repository_tool.py", line 3038, in load_repository
metadata_path = delegated_roles_filenames[rolename]
KeyError: '0'
|
KeyError
|
def _download_file(url, required_length, STRICT_REQUIRED_LENGTH=True):
"""
<Purpose>
Given the url and length of the desired file, this function opens a
connection to 'url' and downloads the file while ensuring its length
matches 'required_length' if 'STRICT_REQUIRED_LENGH' is True (If False,
the file's length is not checked and a slow retrieval exception is raised
if the downloaded rate falls below the acceptable rate).
<Arguments>
url:
A URL string that represents the location of the file.
required_length:
An integer value representing the length of the file.
STRICT_REQUIRED_LENGTH:
A Boolean indicator used to signal whether we should perform strict
checking of required_length. True by default. We explicitly set this to
False when we know that we want to turn this off for downloading the
timestamp metadata, which has no signed required_length.
<Side Effects>
A file object is created on disk to store the contents of 'url'.
<Exceptions>
tuf.exceptions.DownloadLengthMismatchError, if there was a
mismatch of observed vs expected lengths while downloading the file.
securesystemslib.exceptions.FormatError, if any of the arguments are
improperly formatted.
Any other unforeseen runtime exception.
<Returns>
A file object that points to the contents of 'url'.
"""
# Do all of the arguments have the appropriate format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.URL_SCHEMA.check_match(url)
tuf.formats.LENGTH_SCHEMA.check_match(required_length)
# 'url.replace('\\', '/')' is needed for compatibility with Windows-based
# systems, because they might use back-slashes in place of forward-slashes.
# This converts it to the common format. unquote() replaces %xx escapes in a
# url with their single-character equivalent. A back-slash may be encoded as
# %5c in the url, which should also be replaced with a forward slash.
url = six.moves.urllib.parse.unquote(url).replace("\\", "/")
logger.info("Downloading: " + repr(url))
# This is the temporary file that we will return to contain the contents of
# the downloaded file.
temp_file = tempfile.TemporaryFile()
try:
# Use a different requests.Session per schema+hostname combination, to
# reuse connections while minimizing subtle security issues.
parsed_url = six.moves.urllib.parse.urlparse(url)
if not parsed_url.scheme or not parsed_url.hostname:
raise tuf.exceptions.URLParsingError(
"Could not get scheme and hostname from URL: " + url
)
session_index = parsed_url.scheme + "+" + parsed_url.hostname
logger.debug("url: " + url)
logger.debug("session index: " + session_index)
session = _sessions.get(session_index)
if not session:
session = requests.Session()
_sessions[session_index] = session
# Attach some default headers to every Session.
requests_user_agent = session.headers["User-Agent"]
# Follows the RFC: https://tools.ietf.org/html/rfc7231#section-5.5.3
tuf_user_agent = "tuf/" + tuf.__version__ + " " + requests_user_agent
session.headers.update(
{
# Tell the server not to compress or modify anything.
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding#Directives
"Accept-Encoding": "identity",
# The TUF user agent.
"User-Agent": tuf_user_agent,
}
)
logger.debug("Made new session for " + session_index)
else:
logger.debug("Reusing session for " + session_index)
# Get the requests.Response object for this URL.
#
# Always stream to control how requests are downloaded:
# http://docs.python-requests.org/en/master/user/advanced/#body-content-workflow
#
# We will always manually close Responses, so no need for a context
# manager.
#
# Always set the timeout. This timeout value is interpreted by requests as:
# - connect timeout (max delay before first byte is received)
# - read (gap) timeout (max delay between bytes received)
# These are NOT overall/total, wall-clock timeouts for any single read.
# http://docs.python-requests.org/en/master/user/advanced/#timeouts
response = session.get(url, stream=True, timeout=tuf.settings.SOCKET_TIMEOUT)
# Check response status.
response.raise_for_status()
# Download the contents of the URL, up to the required length, to a
# temporary file, and get the total number of downloaded bytes.
total_downloaded, average_download_speed = _download_fixed_amount_of_data(
response, temp_file, required_length
)
# Does the total number of downloaded bytes match the required length?
_check_downloaded_length(
total_downloaded,
required_length,
STRICT_REQUIRED_LENGTH=STRICT_REQUIRED_LENGTH,
average_download_speed=average_download_speed,
)
except Exception:
# Close 'temp_file'. Any written data is lost.
temp_file.close()
logger.exception("Could not download URL: " + repr(url))
raise
else:
return temp_file
|
def _download_file(url, required_length, STRICT_REQUIRED_LENGTH=True):
"""
<Purpose>
Given the url and length of the desired file, this function opens a
connection to 'url' and downloads the file while ensuring its length
matches 'required_length' if 'STRICT_REQUIRED_LENGH' is True (If False,
the file's length is not checked and a slow retrieval exception is raised
if the downloaded rate falls below the acceptable rate).
<Arguments>
url:
A URL string that represents the location of the file.
required_length:
An integer value representing the length of the file.
STRICT_REQUIRED_LENGTH:
A Boolean indicator used to signal whether we should perform strict
checking of required_length. True by default. We explicitly set this to
False when we know that we want to turn this off for downloading the
timestamp metadata, which has no signed required_length.
<Side Effects>
A file object is created on disk to store the contents of 'url'.
<Exceptions>
tuf.exceptions.DownloadLengthMismatchError, if there was a
mismatch of observed vs expected lengths while downloading the file.
securesystemslib.exceptions.FormatError, if any of the arguments are
improperly formatted.
Any other unforeseen runtime exception.
<Returns>
A file object that points to the contents of 'url'.
"""
# Do all of the arguments have the appropriate format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.URL_SCHEMA.check_match(url)
tuf.formats.LENGTH_SCHEMA.check_match(required_length)
# 'url.replace('\\', '/')' is needed for compatibility with Windows-based
# systems, because they might use back-slashes in place of forward-slashes.
# This converts it to the common format. unquote() replaces %xx escapes in a
# url with their single-character equivalent. A back-slash may be encoded as
# %5c in the url, which should also be replaced with a forward slash.
url = six.moves.urllib.parse.unquote(url).replace("\\", "/")
logger.info("Downloading: " + repr(url))
# This is the temporary file that we will return to contain the contents of
# the downloaded file.
temp_file = tempfile.TemporaryFile()
try:
# Use a different requests.Session per schema+hostname combination, to
# reuse connections while minimizing subtle security issues.
parsed_url = six.moves.urllib.parse.urlparse(url)
if not parsed_url.scheme or not parsed_url.hostname:
raise tuf.exceptions.URLParsingError(
"Could not get scheme and hostname from URL: " + url
)
session_index = parsed_url.scheme + "+" + parsed_url.hostname
logger.debug("url: " + url)
logger.debug("session index: " + session_index)
session = _sessions.get(session_index)
if not session:
session = requests.Session()
_sessions[session_index] = session
# Attach some default headers to every Session.
requests_user_agent = session.headers["User-Agent"]
# Follows the RFC: https://tools.ietf.org/html/rfc7231#section-5.5.3
tuf_user_agent = "tuf/" + tuf.__version__ + " " + requests_user_agent
session.headers.update(
{
# Tell the server not to compress or modify anything.
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding#Directives
"Accept-Encoding": "identity",
# The TUF user agent.
"User-Agent": tuf_user_agent,
}
)
logger.debug("Made new session for " + session_index)
else:
logger.debug("Reusing session for " + session_index)
# Get the requests.Response object for this URL.
#
# Always stream to control how requests are downloaded:
# http://docs.python-requests.org/en/master/user/advanced/#body-content-workflow
#
# We will always manually close Responses, so no need for a context
# manager.
#
# Always set the timeout. This timeout value is interpreted by requests as:
# - connect timeout (max delay before first byte is received)
# - read (gap) timeout (max delay between bytes received)
# These are NOT overall/total, wall-clock timeouts for any single read.
# http://docs.python-requests.org/en/master/user/advanced/#timeouts
response = session.get(url, stream=True, timeout=tuf.settings.SOCKET_TIMEOUT)
# Check response status.
response.raise_for_status()
# We ask the server about how big it thinks this file should be.
reported_length = _get_content_length(response)
# Then, we check whether the required length matches the reported length.
_check_content_length(reported_length, required_length, STRICT_REQUIRED_LENGTH)
# Download the contents of the URL, up to the required length, to a
# temporary file, and get the total number of downloaded bytes.
total_downloaded, average_download_speed = _download_fixed_amount_of_data(
response, temp_file, required_length
)
# Does the total number of downloaded bytes match the required length?
_check_downloaded_length(
total_downloaded,
required_length,
STRICT_REQUIRED_LENGTH=STRICT_REQUIRED_LENGTH,
average_download_speed=average_download_speed,
)
except Exception:
# Close 'temp_file'. Any written data is lost.
temp_file.close()
logger.exception("Could not download URL: " + repr(url))
raise
else:
return temp_file
|
https://github.com/theupdateframework/tuf/issues/1068
|
<snip...>
http://127.0.0.1:80 "GET /tuf/2.bins.json HTTP/1.1" 200 None
ERROR: Could not get content length about <Response [200]> from server: int() can't convert non-string with explicit base
Traceback (most recent call last):
File "/home/jku/src/pip/venv/lib/python3.8/site-packages/tuf/download.py", line 432, in _get_content_length
reported_length = int(reported_length, 10)
TypeError: int() can't convert non-string with explicit base
The server reported a length of None bytes.
ERROR: Could not download URL: 'http://127.0.0.1/tuf/2.bins.json'
Traceback (most recent call last):
File "/home/jku/src/pip/venv/lib/python3.8/site-packages/tuf/download.py", line 270, in _download_file
_check_content_length(reported_length, required_length,
File "/home/jku/src/pip/venv/lib/python3.8/site-packages/tuf/download.py", line 479, in _check_content_length
if reported_length < required_length:
TypeError: '<' not supported between instances of 'NoneType' and 'int'
ERROR: Update failed from http://127.0.0.1/tuf/2.bins.json.
Traceback (most recent call last):
File "/home/jku/src/pip/venv/lib/python3.8/site-packages/tuf/client/updater.py", line 1506, in _get_metadata_file
file_object = tuf.download.unsafe_download(file_mirror,
File "/home/jku/src/pip/venv/lib/python3.8/site-packages/tuf/download.py", line 150, in unsafe_download
return _download_file(url, required_length, STRICT_REQUIRED_LENGTH=False)
File "/home/jku/src/pip/venv/lib/python3.8/site-packages/tuf/download.py", line 270, in _download_file
_check_content_length(reported_length, required_length,
File "/home/jku/src/pip/venv/lib/python3.8/site-packages/tuf/download.py", line 479, in _check_content_length
if reported_length < required_length:
TypeError: '<' not supported between instances of 'NoneType' and 'int'
ERROR: Failed to update '2.bins.json' from all mirrors: {'http://127.0.0.1/tuf/2.bins.json': TypeError("'<' not supported between instances of 'NoneType' and 'int'")}
ERROR: Metadata for 'bins' cannot be updated.
ERROR: Exception:
Traceback (most recent call last):
File "/home/jku/src/pip/src/pip/_internal/cli/base_command.py", line 208, in _main
status = self.run(options, args)
File "/home/jku/src/pip/src/pip/_internal/cli/req_command.py", line 184, in wrapper
return func(self, options, args)
File "/home/jku/src/pip/src/pip/_internal/commands/install.py", line 327, in run
requirement_set = resolver.resolve(
File "/home/jku/src/pip/src/pip/_internal/resolution/legacy/resolver.py", line 180, in resolve
discovered_reqs.extend(self._resolve_one(requirement_set, req))
File "/home/jku/src/pip/src/pip/_internal/resolution/legacy/resolver.py", line 385, in _resolve_one
abstract_dist = self._get_abstract_dist_for(req_to_install)
File "/home/jku/src/pip/src/pip/_internal/resolution/legacy/resolver.py", line 337, in _get_abstract_dist_for
abstract_dist = self.preparer.prepare_linked_requirement(req)
File "/home/jku/src/pip/src/pip/_internal/operations/prepare.py", line 451, in prepare_linked_requirement
local_file = unpack_url(
File "/home/jku/src/pip/src/pip/_internal/operations/prepare.py", line 255, in unpack_url
file = get_http_url(
File "/home/jku/src/pip/src/pip/_internal/operations/prepare.py", line 129, in get_http_url
from_path, content_type = downloader.download(link, temp_dir.path)
File "/home/jku/src/pip/src/pip/_internal/network/download.py", line 255, in download
target = self._updater.get_one_valid_targetinfo(path)
File "/home/jku/src/pip/venv/lib/python3.8/site-packages/tuf/client/updater.py", line 2727, in get_one_valid_targetinfo
target = self._preorder_depth_first_walk(target_filepath)
File "/home/jku/src/pip/venv/lib/python3.8/site-packages/tuf/client/updater.py", line 2801, in _preorder_depth_first_walk
self._refresh_targets_metadata(role_name,
File "/home/jku/src/pip/venv/lib/python3.8/site-packages/tuf/client/updater.py", line 2525, in _refresh_targets_metadata
self._update_metadata_if_changed(rolename)
File "/home/jku/src/pip/venv/lib/python3.8/site-packages/tuf/client/updater.py", line 1951, in _update_metadata_if_changed
self._update_metadata(metadata_role, upperbound_filelength,
File "/home/jku/src/pip/venv/lib/python3.8/site-packages/tuf/client/updater.py", line 1785, in _update_metadata
self._get_metadata_file(metadata_role, remote_filename,
File "/home/jku/src/pip/venv/lib/python3.8/site-packages/tuf/client/updater.py", line 1602, in _get_metadata_file
raise tuf.exceptions.NoWorkingMirrorError(file_mirror_errors)
tuf.exceptions.NoWorkingMirrorError: No working mirror was found:
'127.0.0.1': TypeError("'<' not supported between instances of 'NoneType' and 'int'")
|
TypeError
|
def _read(cls, path, engine, columns, **kwargs):
"""Load a parquet object from the file path, returning a Modin DataFrame.
Modin only supports pyarrow engine for now.
Parameters
----------
path: str
The filepath of the parquet file in local filesystem or hdfs.
engine: 'pyarrow'
Parquet library to use
columns: list or None
If not None, only these columns will be read from the file.
kwargs: dict
Keyword arguments.
Returns
-------
PandasQueryCompiler
A new Query Compiler.
Notes
-----
ParquetFile API is used. Please refer to the documentation here
https://arrow.apache.org/docs/python/parquet.html
"""
from pyarrow.parquet import ParquetFile, ParquetDataset
from modin.pandas.io import PQ_INDEX_REGEX
if isinstance(path, str) and os.path.isdir(path):
partitioned_columns = set()
directory = True
# We do a tree walk of the path directory because partitioned
# parquet directories have a unique column at each directory level.
# Thus, we can use os.walk(), which does a dfs search, to walk
# through the different columns that the data is partitioned on
for root, dir_names, files in os.walk(path):
if dir_names:
partitioned_columns.add(dir_names[0].split("=")[0])
if files:
# Metadata files, git files, .DSStore
if files[0][0] == ".":
continue
break
partitioned_columns = list(partitioned_columns)
if len(partitioned_columns):
ErrorMessage.default_to_pandas("Mixed Partitioning Columns in Parquet")
return cls.single_worker_read(
path, engine=engine, columns=columns, **kwargs
)
else:
directory = False
if not columns:
import s3fs
if directory:
# Path of the sample file that we will read to get the remaining columns
pd = ParquetDataset(path)
meta = pd.metadata
column_names = pd.schema.names
elif isinstance(path, str) and path.startswith("hdfs://"):
import fsspec.core
fs, path = fsspec.core.url_to_fs(path)
pd = ParquetDataset(path, filesystem=fs)
meta = pd.metadata
column_names = pd.schema.names
elif isinstance(path, s3fs.S3File) or (
isinstance(path, str) and path.startswith("s3://")
):
from botocore.exceptions import NoCredentialsError
if isinstance(path, s3fs.S3File):
bucket_path = path.url().split(".s3.amazonaws.com")
path = "s3://" + bucket_path[0].split("://")[1] + bucket_path[1]
try:
fs = s3fs.S3FileSystem()
pd = ParquetDataset(path, filesystem=fs)
except NoCredentialsError:
fs = s3fs.S3FileSystem(anon=True)
pd = ParquetDataset(path, filesystem=fs)
meta = pd.metadata
column_names = pd.schema.names
else:
meta = ParquetFile(path).metadata
column_names = meta.schema.names
if meta is not None and meta.metadata is not None:
pandas_metadata = meta.metadata.get(b"pandas", None)
if pandas_metadata is not None:
import json
# This is how we convert the metadata from pyarrow to a python
# dictionary, from which we then get the index columns.
# We use these to filter out from the columns in the metadata since
# the pyarrow storage has no concept of row labels/index.
# This ensures that our metadata lines up with the partitions without
# extra communication steps once we have done all the remote
# computation.
index_columns = json.loads(pandas_metadata.decode("utf8")).get(
"index_columns", []
)
column_names = [c for c in column_names if c not in index_columns]
columns = [name for name in column_names if not PQ_INDEX_REGEX.match(name)]
return cls.build_query_compiler(path, columns, **kwargs)
|
def _read(cls, path, engine, columns, **kwargs):
"""Load a parquet object from the file path, returning a Modin DataFrame.
Modin only supports pyarrow engine for now.
Parameters
----------
path: str
The filepath of the parquet file in local filesystem or hdfs.
engine: 'pyarrow'
Parquet library to use
columns: list or None
If not None, only these columns will be read from the file.
kwargs: dict
Keyword arguments.
Returns
-------
PandasQueryCompiler
A new Query Compiler.
Notes
-----
ParquetFile API is used. Please refer to the documentation here
https://arrow.apache.org/docs/python/parquet.html
"""
from pyarrow.parquet import ParquetFile, ParquetDataset
from modin.pandas.io import PQ_INDEX_REGEX
if isinstance(path, str) and os.path.isdir(path):
partitioned_columns = set()
directory = True
# We do a tree walk of the path directory because partitioned
# parquet directories have a unique column at each directory level.
# Thus, we can use os.walk(), which does a dfs search, to walk
# through the different columns that the data is partitioned on
for root, dir_names, files in os.walk(path):
if dir_names:
partitioned_columns.add(dir_names[0].split("=")[0])
if files:
# Metadata files, git files, .DSStore
if files[0][0] == ".":
continue
break
partitioned_columns = list(partitioned_columns)
if len(partitioned_columns):
ErrorMessage.default_to_pandas("Mixed Partitioning Columns in Parquet")
return cls.single_worker_read(
path, engine=engine, columns=columns, **kwargs
)
else:
directory = False
if not columns:
import s3fs
if directory:
# Path of the sample file that we will read to get the remaining columns
pd = ParquetDataset(path)
meta = pd.metadata
column_names = pd.schema.names
elif isinstance(path, str) and path.startswith("hdfs://"):
import fsspec.core
fs, path = fsspec.core.url_to_fs(path)
pd = ParquetDataset(path, filesystem=fs)
meta = pd.metadata
column_names = pd.schema.names
elif isinstance(path, s3fs.S3File) or (
isinstance(path, str) and path.startswith("s3://")
):
from botocore.exceptions import NoCredentialsError
if isinstance(path, s3fs.S3File):
bucket_path = path.url().split(".s3.amazonaws.com")
path = "s3://" + bucket_path[0].split("://")[1] + bucket_path[1]
try:
fs = s3fs.S3FileSystem()
pd = ParquetDataset(path, filesystem=fs)
except NoCredentialsError:
fs = s3fs.S3FileSystem(anon=True)
pd = ParquetDataset(path, filesystem=fs)
meta = pd.metadata
column_names = pd.schema.names
else:
meta = ParquetFile(path).metadata
column_names = meta.schema.names
if meta is not None:
# This is how we convert the metadata from pyarrow to a python
# dictionary, from which we then get the index columns.
# We use these to filter out from the columns in the metadata since
# the pyarrow storage has no concept of row labels/index.
# This ensures that our metadata lines up with the partitions without
# extra communication steps once we `have done all the remote
# computation.
index_columns = eval(
meta.metadata[b"pandas"].replace(b"null", b"None")
).get("index_columns", [])
column_names = [c for c in column_names if c not in index_columns]
columns = [name for name in column_names if not PQ_INDEX_REGEX.match(name)]
return cls.build_query_compiler(path, columns, **kwargs)
|
https://github.com/modin-project/modin/issues/1476
|
Traceback (most recent call last):
File "modinTest.py", line 6, in <module>
modin_df = pd.read_parquet(path)
File "/home/srds/virtual_env/airflow_venv/lib/python3.6/site-packages/modin/pandas/io.py", line 42, in read_parquet
path=path, columns=columns, engine=engine, **kwargs
File "/home/srds/virtual_env/airflow_venv/lib/python3.6/site-packages/modin/data_management/factories.py", line 57, in read_parquet
return cls._determine_engine()._read_parquet(**kwargs)
File "/home/srds/virtual_env/airflow_venv/lib/python3.6/site-packages/modin/data_management/factories.py", line 61, in _read_parquet
return cls.io_cls.read_parquet(**kwargs)
File "/home/srds/virtual_env/airflow_venv/lib/python3.6/site-packages/modin/engines/base/io/column_stores/parquet_reader.py", line 79, in read
meta.metadata[b"pandas"].replace(b"null", b"None")
TypeError: 'NoneType' object is not subscriptable
|
TypeError
|
def _read(cls, filepath_or_buffer, **kwargs):
filepath_or_buffer = cls.get_path_or_buffer(filepath_or_buffer)
if isinstance(filepath_or_buffer, str):
if not cls.file_exists(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
filepath_or_buffer = cls.get_path(filepath_or_buffer)
elif not cls.pathlib_or_pypath(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
compression_type = cls.infer_compression(
filepath_or_buffer, kwargs.get("compression")
)
if compression_type is not None:
if (
compression_type == "gzip"
or compression_type == "bz2"
or compression_type == "xz"
):
kwargs["compression"] = compression_type
elif (
compression_type == "zip"
and sys.version_info[0] == 3
and sys.version_info[1] >= 7
):
# need python3.7 to .seek and .tell ZipExtFile
kwargs["compression"] = compression_type
else:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
chunksize = kwargs.get("chunksize")
if chunksize is not None:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
skiprows = kwargs.get("skiprows")
if skiprows is not None and not isinstance(skiprows, int):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
nrows = kwargs.pop("nrows", None)
names = kwargs.get("names", None)
index_col = kwargs.get("index_col", None)
usecols = kwargs.get("usecols", None)
encoding = kwargs.get("encoding", None)
if names is None:
# For the sake of the empty df, we assume no `index_col` to get the correct
# column names before we build the index. Because we pass `names` in, this
# step has to happen without removing the `index_col` otherwise it will not
# be assigned correctly
names = pandas.read_csv(
filepath_or_buffer,
**dict(kwargs, usecols=None, nrows=0, skipfooter=0, index_col=None),
).columns
elif index_col is None and not usecols:
# When names is set to some list that is smaller than the number of columns
# in the file, the first columns are built as a hierarchical index.
empty_pd_df = pandas.read_csv(filepath_or_buffer, nrows=0, encoding=encoding)
num_cols = len(empty_pd_df.columns)
if num_cols > len(names):
index_col = list(range(num_cols - len(names)))
if len(index_col) == 1:
index_col = index_col[0]
kwargs["index_col"] = index_col
empty_pd_df = pandas.read_csv(
filepath_or_buffer, **dict(kwargs, nrows=0, skipfooter=0)
)
column_names = empty_pd_df.columns
skipfooter = kwargs.get("skipfooter", None)
skiprows = kwargs.pop("skiprows", None)
parse_dates = kwargs.pop("parse_dates", False)
partition_kwargs = dict(
kwargs,
header=None,
names=names,
skipfooter=0,
skiprows=None,
parse_dates=parse_dates,
)
encoding = kwargs.get("encoding", None)
quotechar = kwargs.get("quotechar", '"').encode(
encoding if encoding is not None else "UTF-8"
)
is_quoting = kwargs.get("quoting", "") != csv.QUOTE_NONE
with cls.file_open(filepath_or_buffer, "rb", compression_type) as f:
# Skip the header since we already have the header information and skip the
# rows we are told to skip.
if isinstance(skiprows, int) or skiprows is None:
if skiprows is None:
skiprows = 0
header = kwargs.get("header", "infer")
if header == "infer" and kwargs.get("names", None) is None:
skiprows += 1
elif isinstance(header, int):
skiprows += header + 1
elif hasattr(header, "__iter__") and not isinstance(header, str):
skiprows += max(header) + 1
if kwargs.get("encoding", None) is not None:
partition_kwargs["skiprows"] = 1
# Launch tasks to read partitions
partition_ids = []
index_ids = []
dtypes_ids = []
# Max number of partitions available
num_partitions = NPartitions.get()
# This is the number of splits for the columns
num_splits = min(len(column_names), num_partitions)
# Metadata
column_chunksize = compute_chunksize(empty_pd_df, num_splits, axis=1)
if column_chunksize > len(column_names):
column_widths = [len(column_names)]
# This prevents us from unnecessarily serializing a bunch of empty
# objects.
num_splits = 1
else:
column_widths = [
column_chunksize
if len(column_names) > (column_chunksize * (i + 1))
else 0
if len(column_names) < (column_chunksize * i)
else len(column_names) - (column_chunksize * i)
for i in range(num_splits)
]
args = {
"fname": filepath_or_buffer,
"num_splits": num_splits,
**partition_kwargs,
}
splits = cls.partitioned_file(
f,
num_partitions=num_partitions,
nrows=nrows,
skiprows=skiprows,
quotechar=quotechar,
is_quoting=is_quoting,
)
for start, end in splits:
args.update({"start": start, "end": end})
partition_id = cls.deploy(cls.parse, num_splits + 2, args)
partition_ids.append(partition_id[:-2])
index_ids.append(partition_id[-2])
dtypes_ids.append(partition_id[-1])
# Compute the index based on a sum of the lengths of each partition (by default)
# or based on the column(s) that were requested.
if index_col is None:
row_lengths = cls.materialize(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
else:
index_objs = cls.materialize(index_ids)
row_lengths = [len(o) for o in index_objs]
new_index = index_objs[0].append(index_objs[1:])
new_index.name = empty_pd_df.index.name
# Compute dtypes by getting collecting and combining all of the partitions. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column. The index is set below.
dtypes = cls.get_dtypes(dtypes_ids) if len(dtypes_ids) > 0 else None
partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)
# If parse_dates is present, the column names that we have might not be
# the same length as the returned column names. If we do need to modify
# the column names, we remove the old names from the column names and
# insert the new one at the front of the Index.
if parse_dates is not None:
# We have to recompute the column widths if `parse_dates` is set because
# we are not guaranteed to have the correct information regarding how many
# columns are on each partition.
column_widths = None
# Check if is list of lists
if isinstance(parse_dates, list) and isinstance(parse_dates[0], list):
for group in parse_dates:
new_col_name = "_".join(group)
column_names = column_names.drop(group).insert(0, new_col_name)
# Check if it is a dictionary
elif isinstance(parse_dates, dict):
for new_col_name, group in parse_dates.items():
column_names = column_names.drop(group).insert(0, new_col_name)
# Set the index for the dtypes to the column names
if isinstance(dtypes, pandas.Series):
dtypes.index = column_names
else:
dtypes = pandas.Series(dtypes, index=column_names)
new_frame = cls.frame_cls(
partition_ids,
new_index,
column_names,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_query_compiler = cls.query_compiler_cls(new_frame)
if skipfooter:
new_query_compiler = new_query_compiler.drop(
new_query_compiler.index[-skipfooter:]
)
if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1:
return new_query_compiler[new_query_compiler.columns[0]]
if index_col is None:
new_query_compiler._modin_frame._apply_index_objs(axis=0)
return new_query_compiler
|
def _read(cls, filepath_or_buffer, **kwargs):
filepath_or_buffer = cls.get_path_or_buffer(filepath_or_buffer)
if isinstance(filepath_or_buffer, str):
if not cls.file_exists(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
filepath_or_buffer = cls.get_path(filepath_or_buffer)
elif not cls.pathlib_or_pypath(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
compression_type = cls.infer_compression(
filepath_or_buffer, kwargs.get("compression")
)
if compression_type is not None:
if (
compression_type == "gzip"
or compression_type == "bz2"
or compression_type == "xz"
):
kwargs["compression"] = compression_type
elif (
compression_type == "zip"
and sys.version_info[0] == 3
and sys.version_info[1] >= 7
):
# need python3.7 to .seek and .tell ZipExtFile
kwargs["compression"] = compression_type
else:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
chunksize = kwargs.get("chunksize")
if chunksize is not None:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
skiprows = kwargs.get("skiprows")
if skiprows is not None and not isinstance(skiprows, int):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
nrows = kwargs.pop("nrows", None)
names = kwargs.get("names", None)
index_col = kwargs.get("index_col", None)
usecols = kwargs.get("usecols", None)
encoding = kwargs.get("encoding", None)
if names is None:
# For the sake of the empty df, we assume no `index_col` to get the correct
# column names before we build the index. Because we pass `names` in, this
# step has to happen without removing the `index_col` otherwise it will not
# be assigned correctly
names = pandas.read_csv(
filepath_or_buffer,
**dict(kwargs, usecols=None, nrows=0, skipfooter=0, index_col=None),
).columns
elif index_col is None and not usecols:
# When names is set to some list that is smaller than the number of columns
# in the file, the first columns are built as a hierarchical index.
empty_pd_df = pandas.read_csv(filepath_or_buffer, nrows=0, encoding=encoding)
num_cols = len(empty_pd_df.columns)
if num_cols > len(names):
index_col = list(range(num_cols - len(names)))
if len(index_col) == 1:
index_col = index_col[0]
kwargs["index_col"] = index_col
empty_pd_df = pandas.read_csv(
filepath_or_buffer, **dict(kwargs, nrows=0, skipfooter=0)
)
column_names = empty_pd_df.columns
skipfooter = kwargs.get("skipfooter", None)
skiprows = kwargs.pop("skiprows", None)
usecols_md = _validate_usecols_arg(usecols)
if usecols is not None and usecols_md[1] != "integer":
del kwargs["usecols"]
all_cols = pandas.read_csv(
cls.file_open(filepath_or_buffer, "rb"),
**dict(kwargs, nrows=0, skipfooter=0),
).columns
usecols = all_cols.get_indexer_for(list(usecols_md[0]))
parse_dates = kwargs.pop("parse_dates", False)
partition_kwargs = dict(
kwargs,
header=None,
names=names,
skipfooter=0,
skiprows=None,
parse_dates=parse_dates,
usecols=usecols,
)
encoding = kwargs.get("encoding", None)
quotechar = kwargs.get("quotechar", '"').encode(
encoding if encoding is not None else "UTF-8"
)
is_quoting = kwargs.get("quoting", "") != csv.QUOTE_NONE
with cls.file_open(filepath_or_buffer, "rb", compression_type) as f:
# Skip the header since we already have the header information and skip the
# rows we are told to skip.
if isinstance(skiprows, int) or skiprows is None:
if skiprows is None:
skiprows = 0
header = kwargs.get("header", "infer")
if header == "infer" and kwargs.get("names", None) is None:
skiprows += 1
elif isinstance(header, int):
skiprows += header + 1
elif hasattr(header, "__iter__") and not isinstance(header, str):
skiprows += max(header) + 1
if kwargs.get("encoding", None) is not None:
partition_kwargs["skiprows"] = 1
# Launch tasks to read partitions
partition_ids = []
index_ids = []
dtypes_ids = []
# Max number of partitions available
num_partitions = NPartitions.get()
# This is the number of splits for the columns
num_splits = min(len(column_names), num_partitions)
# Metadata
column_chunksize = compute_chunksize(empty_pd_df, num_splits, axis=1)
if column_chunksize > len(column_names):
column_widths = [len(column_names)]
# This prevents us from unnecessarily serializing a bunch of empty
# objects.
num_splits = 1
else:
column_widths = [
column_chunksize
if len(column_names) > (column_chunksize * (i + 1))
else 0
if len(column_names) < (column_chunksize * i)
else len(column_names) - (column_chunksize * i)
for i in range(num_splits)
]
args = {
"fname": filepath_or_buffer,
"num_splits": num_splits,
**partition_kwargs,
}
splits = cls.partitioned_file(
f,
num_partitions=num_partitions,
nrows=nrows,
skiprows=skiprows,
quotechar=quotechar,
is_quoting=is_quoting,
)
for start, end in splits:
args.update({"start": start, "end": end})
partition_id = cls.deploy(cls.parse, num_splits + 2, args)
partition_ids.append(partition_id[:-2])
index_ids.append(partition_id[-2])
dtypes_ids.append(partition_id[-1])
# Compute the index based on a sum of the lengths of each partition (by default)
# or based on the column(s) that were requested.
if index_col is None:
row_lengths = cls.materialize(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
else:
index_objs = cls.materialize(index_ids)
row_lengths = [len(o) for o in index_objs]
new_index = index_objs[0].append(index_objs[1:])
new_index.name = empty_pd_df.index.name
# Compute dtypes by getting collecting and combining all of the partitions. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column. The index is set below.
dtypes = cls.get_dtypes(dtypes_ids) if len(dtypes_ids) > 0 else None
partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)
# If parse_dates is present, the column names that we have might not be
# the same length as the returned column names. If we do need to modify
# the column names, we remove the old names from the column names and
# insert the new one at the front of the Index.
if parse_dates is not None:
# We have to recompute the column widths if `parse_dates` is set because
# we are not guaranteed to have the correct information regarding how many
# columns are on each partition.
column_widths = None
# Check if is list of lists
if isinstance(parse_dates, list) and isinstance(parse_dates[0], list):
for group in parse_dates:
new_col_name = "_".join(group)
column_names = column_names.drop(group).insert(0, new_col_name)
# Check if it is a dictionary
elif isinstance(parse_dates, dict):
for new_col_name, group in parse_dates.items():
column_names = column_names.drop(group).insert(0, new_col_name)
# Set the index for the dtypes to the column names
if isinstance(dtypes, pandas.Series):
dtypes.index = column_names
else:
dtypes = pandas.Series(dtypes, index=column_names)
new_frame = cls.frame_cls(
partition_ids,
new_index,
column_names,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_query_compiler = cls.query_compiler_cls(new_frame)
if skipfooter:
new_query_compiler = new_query_compiler.drop(
new_query_compiler.index[-skipfooter:]
)
if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1:
return new_query_compiler[new_query_compiler.columns[0]]
if index_col is None:
new_query_compiler._modin_frame._apply_index_objs(axis=0)
return new_query_compiler
|
https://github.com/modin-project/modin/issues/2307
|
TypeError Traceback (most recent call last)
<ipython-input-4-6eaf150bb793> in <module>()
----> 1 df = pd.read_csv('/tmp/tmp_csv.csv',usecols=column_selector)
2 df.head()
5 frames
/usr/local/lib/python3.6/dist-packages/modin/pandas/io.py in parser_func(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, escapechar, comment, encoding, dialect, error_bad_lines, warn_bad_lines, skipfooter, doublequote, delim_whitespace, low_memory, memory_map, float_precision)
110 if kwargs.get("sep", sep) is False:
111 kwargs["sep"] = "\t"
--> 112 return _read(**kwargs)
113
114 return parser_func
/usr/local/lib/python3.6/dist-packages/modin/pandas/io.py in _read(**kwargs)
125 from modin.data_management.factories.dispatcher import EngineDispatcher
126
--> 127 pd_obj = EngineDispatcher.read_csv(**kwargs)
128 # This happens when `read_csv` returns a TextFileReader object for iterating through
129 if isinstance(pd_obj, pandas.io.parsers.TextFileReader):
/usr/local/lib/python3.6/dist-packages/modin/data_management/factories/dispatcher.py in read_csv(cls, **kwargs)
111 @classmethod
112 def read_csv(cls, **kwargs):
--> 113 return cls.__engine._read_csv(**kwargs)
114
115 @classmethod
/usr/local/lib/python3.6/dist-packages/modin/data_management/factories/factories.py in _read_csv(cls, **kwargs)
85 @classmethod
86 def _read_csv(cls, **kwargs):
---> 87 return cls.io_cls.read_csv(**kwargs)
88
89 @classmethod
/usr/local/lib/python3.6/dist-packages/modin/engines/base/io/file_reader.py in read(cls, *args, **kwargs)
27 @classmethod
28 def read(cls, *args, **kwargs):
---> 29 query_compiler = cls._read(*args, **kwargs)
30 # TODO (devin-petersohn): Make this section more general for non-pandas kernel
31 # implementations.
/usr/local/lib/python3.6/dist-packages/modin/engines/base/io/text/csv_reader.py in _read(cls, filepath_or_buffer, **kwargs)
82 **dict(kwargs, nrows=0, skipfooter=0),
83 ).columns
---> 84 usecols = all_cols.get_indexer_for(list(usecols_md[0]))
85 parse_dates = kwargs.pop("parse_dates", False)
86 partition_kwargs = dict(
TypeError: 'function' object is not iterable
|
TypeError
|
def read_csv(
cls,
filepath_or_buffer,
sep=",",
delimiter=None,
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
iterator=False,
chunksize=None,
compression="infer",
thousands=None,
decimal=b".",
lineterminator=None,
quotechar='"',
quoting=0,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
doublequote=True,
delim_whitespace=False,
low_memory=True,
memory_map=False,
float_precision=None,
storage_options=None,
):
items = locals().copy()
mykwargs = {k: items[k] for k in items if k in cls.arg_keys}
eng = str(engine).lower().strip()
try:
if eng in ["pandas", "c"]:
return cls._read(**mykwargs)
if isinstance(dtype, dict):
column_types = {c: cls._dtype_to_arrow(t) for c, t in dtype.items()}
else:
column_types = cls._dtype_to_arrow(dtype)
if (type(parse_dates) is list) and type(column_types) is dict:
for c in parse_dates:
column_types[c] = pa.timestamp("s")
if names:
if header == 0:
skiprows = skiprows + 1 if skiprows is not None else 1
elif header is None or header == "infer":
pass
else:
raise NotImplementedError(
"read_csv with 'arrow' engine and provided 'names' parameter supports only 0, None and 'infer' header values"
)
else:
if header == 0 or header == "infer":
pass
else:
raise NotImplementedError(
"read_csv with 'arrow' engine without 'names' parameter provided supports only 0 and 'infer' header values"
)
if delimiter is None:
delimiter = sep
if delim_whitespace and delimiter != ",":
raise ValueError(
"Specified a delimiter and delim_whitespace=True; you can only specify one."
)
po = ParseOptions(
delimiter="\\s+" if delim_whitespace else delimiter,
quote_char=quotechar,
double_quote=doublequote,
escape_char=escapechar,
newlines_in_values=False,
ignore_empty_lines=skip_blank_lines,
)
co = ConvertOptions(
check_utf8=None,
column_types=column_types,
null_values=None,
true_values=None,
false_values=None,
# timestamp fields should be handled as strings if parse_dates
# didn't passed explicitly as an array or a dict
timestamp_parsers=[""] if isinstance(parse_dates, bool) else None,
strings_can_be_null=None,
include_columns=None,
include_missing_columns=None,
auto_dict_encode=None,
auto_dict_max_cardinality=None,
)
ro = ReadOptions(
use_threads=True,
block_size=None,
skip_rows=skiprows,
column_names=names,
autogenerate_column_names=None,
)
at = read_csv(
filepath_or_buffer,
read_options=ro,
parse_options=po,
convert_options=co,
)
return cls.from_arrow(at)
except (pa.ArrowNotImplementedError, NotImplementedError):
if eng in ["arrow"]:
raise
ErrorMessage.default_to_pandas("`read_csv`")
return cls._read(**mykwargs)
|
def read_csv(
cls,
filepath_or_buffer,
sep=",",
delimiter=None,
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
iterator=False,
chunksize=None,
compression="infer",
thousands=None,
decimal=b".",
lineterminator=None,
quotechar='"',
quoting=0,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
doublequote=True,
delim_whitespace=False,
low_memory=True,
memory_map=False,
float_precision=None,
storage_options=None,
):
items = locals().copy()
mykwargs = {k: items[k] for k in items if k in cls.arg_keys}
eng = str(engine).lower().strip()
try:
if eng in ["pandas", "c"]:
return cls._read(**mykwargs)
if isinstance(dtype, dict):
column_types = {c: cls._dtype_to_arrow(t) for c, t in dtype.items()}
else:
column_types = cls._dtype_to_arrow(dtype)
if (type(parse_dates) is list) and type(column_types) is dict:
for c in parse_dates:
column_types[c] = pa.timestamp("s")
if names:
if header == 0:
skiprows = skiprows + 1 if skiprows is not None else 1
elif header is None or header == "infer":
pass
else:
raise NotImplementedError(
"read_csv with 'arrow' engine and provided 'names' parameter supports only 0, None and 'infer' header values"
)
else:
if header == 0 or header == "infer":
pass
else:
raise NotImplementedError(
"read_csv with 'arrow' engine without 'names' parameter provided supports only 0 and 'infer' header values"
)
if delimiter is None:
delimiter = sep
if delim_whitespace and delimiter != ",":
raise ValueError(
"Specified a delimiter and delim_whitespace=True; you can only specify one."
)
po = ParseOptions(
delimiter="\\s+" if delim_whitespace else delimiter,
quote_char=quotechar,
double_quote=doublequote,
escape_char=escapechar,
newlines_in_values=False,
ignore_empty_lines=skip_blank_lines,
)
co = ConvertOptions(
check_utf8=None,
column_types=column_types,
null_values=None,
true_values=None,
false_values=None,
strings_can_be_null=None,
include_columns=None,
include_missing_columns=None,
auto_dict_encode=None,
auto_dict_max_cardinality=None,
)
ro = ReadOptions(
use_threads=True,
block_size=None,
skip_rows=skiprows,
column_names=names,
autogenerate_column_names=None,
)
at = read_csv(
filepath_or_buffer,
read_options=ro,
parse_options=po,
convert_options=co,
)
return cls.from_arrow(at)
except (pa.ArrowNotImplementedError, NotImplementedError):
if eng in ["arrow"]:
raise
ErrorMessage.default_to_pandas("`read_csv`")
return cls._read(**mykwargs)
|
https://github.com/modin-project/modin/issues/2737
|
Traceback (most recent call last):
File "test.py", line 46, in <module>
df_equals(df_pandas, df_modin)
File "/modin/modin/pandas/test/utils.py", line 542, in df_equals
check_categorical=False,
File "/miniconda3/envs/modin_omnisci/lib/python3.7/site-packages/pandas/_testing.py", line 1704, in assert_frame_equal
atol=atol,
File "/miniconda3/envs/modin_omnisci/lib/python3.7/site-packages/pandas/_testing.py", line 1427, in assert_series_equal
raise AssertionError(msg)
AssertionError: [datetimelike_compat=True] ['2000-01-01' '2000-01-01' '2000-01-01' '2000-01-01' '2000-01-01'
'2000-01-01' '2000-01-01' '2000-01-01' '2000-01-01' '2000-01-01'
'2000-01-01' '2000-01-01' '2000-01-01' '2000-01-01' '2000-01-01'
'2000-01-01' '2000-01-01' '2000-01-01' '2000-01-01' '2000-01-01'
'2000-01-01' '2000-01-01' '2000-01-01' '2000-01-01' '2000-01-02'
'2000-01-02' '2000-01-02' '2000-01-02' '2000-01-02' '2000-01-02'
'2000-01-02' '2000-01-02' '2000-01-02' '2000-01-02' '2000-01-02'
'2000-01-02' '2000-01-02' '2000-01-02' '2000-01-02' '2000-01-02'
'2000-01-02' '2000-01-02' '2000-01-02' '2000-01-02' '2000-01-02'
'2000-01-02' '2000-01-02' '2000-01-02' '2000-01-03' '2000-01-03'
'2000-01-03' '2000-01-03' '2000-01-03' '2000-01-03' '2000-01-03'
'2000-01-03' '2000-01-03' '2000-01-03' '2000-01-03' '2000-01-03'
'2000-01-03' '2000-01-03' '2000-01-03' '2000-01-03'] is not equal to <DatetimeArray>
['2000-01-01 00:00:00', '2000-01-01 00:00:00', '2000-01-01 00:00:00',
'2000-01-01 00:00:00', '2000-01-01 00:00:00', '2000-01-01 00:00:00',
'2000-01-01 00:00:00', '2000-01-01 00:00:00', '2000-01-01 00:00:00',
'2000-01-01 00:00:00', '2000-01-01 00:00:00', '2000-01-01 00:00:00',
'2000-01-01 00:00:00', '2000-01-01 00:00:00', '2000-01-01 00:00:00',
'2000-01-01 00:00:00', '2000-01-01 00:00:00', '2000-01-01 00:00:00',
'2000-01-01 00:00:00', '2000-01-01 00:00:00', '2000-01-01 00:00:00',
'2000-01-01 00:00:00', '2000-01-01 00:00:00', '2000-01-01 00:00:00',
'2000-01-02 00:00:00', '2000-01-02 00:00:00', '2000-01-02 00:00:00',
'2000-01-02 00:00:00', '2000-01-02 00:00:00', '2000-01-02 00:00:00',
'2000-01-02 00:00:00', '2000-01-02 00:00:00', '2000-01-02 00:00:00',
'2000-01-02 00:00:00', '2000-01-02 00:00:00', '2000-01-02 00:00:00',
'2000-01-02 00:00:00', '2000-01-02 00:00:00', '2000-01-02 00:00:00',
'2000-01-02 00:00:00', '2000-01-02 00:00:00', '2000-01-02 00:00:00',
'2000-01-02 00:00:00', '2000-01-02 00:00:00', '2000-01-02 00:00:00',
'2000-01-02 00:00:00', '2000-01-02 00:00:00', '2000-01-02 00:00:00',
'2000-01-03 00:00:00', '2000-01-03 00:00:00', '2000-01-03 00:00:00',
'2000-01-03 00:00:00', '2000-01-03 00:00:00', '2000-01-03 00:00:00',
'2000-01-03 00:00:00', '2000-01-03 00:00:00', '2000-01-03 00:00:00',
'2000-01-03 00:00:00', '2000-01-03 00:00:00', '2000-01-03 00:00:00',
'2000-01-03 00:00:00', '2000-01-03 00:00:00', '2000-01-03 00:00:00',
'2000-01-03 00:00:00']
Length: 64, dtype: datetime64[ns].
|
AssertionError
|
def reindex(
self,
index=None,
columns=None,
copy=True,
**kwargs,
):
if (
kwargs.get("level") is not None
or (index is not None and self._query_compiler.has_multiindex())
or (columns is not None and self._query_compiler.has_multiindex(axis=1))
):
if index is not None:
kwargs["index"] = index
if columns is not None:
kwargs["columns"] = columns
return self._default_to_pandas("reindex", copy=copy, **kwargs)
new_query_compiler = None
if index is not None:
if not isinstance(index, pandas.Index):
index = pandas.Index(index)
if not index.equals(self.index):
new_query_compiler = self._query_compiler.reindex(
axis=0, labels=index, **kwargs
)
if new_query_compiler is None:
new_query_compiler = self._query_compiler
final_query_compiler = None
if columns is not None:
if not isinstance(columns, pandas.Index):
columns = pandas.Index(columns)
if not columns.equals(self.columns):
final_query_compiler = new_query_compiler.reindex(
axis=1, labels=columns, **kwargs
)
if final_query_compiler is None:
final_query_compiler = new_query_compiler
return self._create_or_update_from_compiler(final_query_compiler, not copy)
|
def reindex(
self,
labels=None,
index=None,
columns=None,
axis=None,
method=None,
copy=True,
level=None,
fill_value=np.nan,
limit=None,
tolerance=None,
):
axis = self._get_axis_number(axis)
if (columns is not None and self._query_compiler.has_multiindex(axis=1)) or (
index is not None and self._query_compiler.has_multiindex()
):
return self._default_to_pandas(
"reindex",
labels=labels,
index=index,
columns=columns,
method=method,
copy=copy,
level=level,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
if (
level is not None
or (axis == 1 and self._query_compiler.has_multiindex(axis=1))
or (axis == 0 and self._query_compiler.has_multiindex())
):
return self._default_to_pandas(
"reindex",
labels=labels,
level=level,
method=method,
copy=copy,
axis=axis,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
if axis == 0 and labels is not None:
index = labels
elif labels is not None:
columns = labels
new_query_compiler = None
if index is not None:
if not isinstance(index, pandas.Index):
index = pandas.Index(index)
if not index.equals(self.index):
new_query_compiler = self._query_compiler.reindex(
axis=0,
labels=index,
method=method,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
if new_query_compiler is None:
new_query_compiler = self._query_compiler
final_query_compiler = None
if columns is not None:
if not isinstance(columns, pandas.Index):
columns = pandas.Index(columns)
if not columns.equals(self.columns):
final_query_compiler = new_query_compiler.reindex(
axis=1,
labels=columns,
method=method,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
if final_query_compiler is None:
final_query_compiler = new_query_compiler
return self._create_or_update_from_compiler(final_query_compiler, not copy)
|
https://github.com/modin-project/modin/issues/2735
|
Traceback (most recent call last):
File "../rofl.py", line 5, in <module>
res = sr.reindex([1, 2, 3, 4, 5])
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/series.py", line 1040, in reindex
fill_value=fill_value,
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/base.py", line 1714, in reindex
tolerance=tolerance,
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/base.py", line 395, in _default_to_pandas
pandas_obj, *args, **kwargs
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/series.py", line 4315, in reindex
return super().reindex(index=index, **kwargs)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/generic.py", line 4786, in reindex
"reindex() got an unexpected keyword "
TypeError: reindex() got an unexpected keyword argument "labels"
|
TypeError
|
def _aggregate(self, func, *args, **kwargs):
_axis = kwargs.pop("_axis", 0)
kwargs.pop("_level", None)
if isinstance(func, str):
kwargs.pop("is_transform", None)
return self._string_function(func, *args, **kwargs)
# Dictionaries have complex behavior because they can be renamed here.
elif func is None or isinstance(func, dict):
return self._default_to_pandas("agg", func, *args, **kwargs)
elif is_list_like(func) or callable(func):
kwargs.pop("is_transform", None)
return self.apply(func, axis=_axis, args=args, **kwargs)
else:
raise TypeError("type {} is not callable".format(type(func)))
|
def _aggregate(self, arg, *args, **kwargs):
_axis = kwargs.pop("_axis", 0)
kwargs.pop("_level", None)
if isinstance(arg, str):
kwargs.pop("is_transform", None)
return self._string_function(arg, *args, **kwargs)
# Dictionaries have complex behavior because they can be renamed here.
elif isinstance(arg, dict):
return self._default_to_pandas("agg", arg, *args, **kwargs)
elif is_list_like(arg) or callable(arg):
kwargs.pop("is_transform", None)
return self.apply(arg, axis=_axis, args=args, **kwargs)
else:
raise TypeError("type {} is not callable".format(type(arg)))
|
https://github.com/modin-project/modin/issues/2305
|
Traceback (most recent call last):
File "agg_test2.py", line 13, in <module>
df1 = df.agg(new_col=('col2', max))
File "/localdisk/gashiman/modin/modin/pandas/base.py", line 504, in aggregate
return self.apply(func, axis=axis, args=args, **kwargs)
File "/localdisk/gashiman/modin/modin/pandas/dataframe.py", line 289, in apply
query_compiler = super(DataFrame, self).apply(
File "/localdisk/gashiman/modin/modin/pandas/base.py", line 716, in apply
raise TypeError("{} object is not callable".format(type(func)))
TypeError: <class 'NoneType'> object is not callable
|
TypeError
|
def reindex(
self,
labels=None,
index=None,
columns=None,
axis=None,
method=None,
copy=True,
level=None,
fill_value=np.nan,
limit=None,
tolerance=None,
):
axis = self._get_axis_number(axis)
if (columns is not None and self._query_compiler.has_multiindex(axis=1)) or (
index is not None and self._query_compiler.has_multiindex()
):
return self._default_to_pandas(
"reindex",
labels=labels,
index=index,
columns=columns,
method=method,
copy=copy,
level=level,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
if (
level is not None
or (axis == 1 and self._query_compiler.has_multiindex(axis=1))
or (axis == 0 and self._query_compiler.has_multiindex())
):
return self._default_to_pandas(
"reindex",
labels=labels,
level=level,
method=method,
copy=copy,
axis=axis,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
if axis == 0 and labels is not None:
index = labels
elif labels is not None:
columns = labels
new_query_compiler = None
if index is not None:
if not isinstance(index, pandas.Index):
index = pandas.Index(index)
if not index.equals(self.index):
new_query_compiler = self._query_compiler.reindex(
axis=0,
labels=index,
method=method,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
if new_query_compiler is None:
new_query_compiler = self._query_compiler
final_query_compiler = None
if columns is not None:
if not isinstance(columns, pandas.Index):
columns = pandas.Index(columns)
if not columns.equals(self.columns):
final_query_compiler = new_query_compiler.reindex(
axis=1,
labels=columns,
method=method,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
if final_query_compiler is None:
final_query_compiler = new_query_compiler
return self._create_or_update_from_compiler(final_query_compiler, not copy)
|
def reindex(
self,
labels=None,
index=None,
columns=None,
axis=None,
method=None,
copy=True,
level=None,
fill_value=np.nan,
limit=None,
tolerance=None,
):
axis = self._get_axis_number(axis)
if (
level is not None
or (
(columns is not None or axis == 1)
and self._query_compiler.has_multiindex(axis=1)
)
or ((index is not None or axis == 0) and self._query_compiler.has_multiindex())
):
return self._default_to_pandas(
"reindex",
labels=labels,
index=index,
columns=columns,
axis=axis,
method=method,
copy=copy,
level=level,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
if axis == 0 and labels is not None:
index = labels
elif labels is not None:
columns = labels
new_query_compiler = None
if index is not None:
if not isinstance(index, pandas.Index):
index = pandas.Index(index)
if not index.equals(self.index):
new_query_compiler = self._query_compiler.reindex(
axis=0,
labels=index,
method=method,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
if new_query_compiler is None:
new_query_compiler = self._query_compiler
final_query_compiler = None
if columns is not None:
if not isinstance(columns, pandas.Index):
columns = pandas.Index(columns)
if not columns.equals(self.columns):
final_query_compiler = new_query_compiler.reindex(
axis=1,
labels=columns,
method=method,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
if final_query_compiler is None:
final_query_compiler = new_query_compiler
return self._create_or_update_from_compiler(final_query_compiler, not copy)
|
https://github.com/modin-project/modin/issues/1806
|
df = pandas.DataFrame({"foo": [1,2,3,4], "bar": ["a", "b", "c", "d"], "waldo": [11, 12, 13, 14]})
UserWarning: Distributing <class 'dict'> object. This may take some time.
df = df.set_index(["foo", "bar"])
df
waldo
foo bar
1 a 11
2 b 12
3 c 13
4 d 14
new_index = pandas.MultiIndex.from_product([["a", "b", "c"], ["d", "e", "f"]])
df.reindex(new_index)
UserWarning: `DataFrame.reindex` defaulting to pandas implementation.
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/orenmazor/.pyenv/versions/3.8.2/Python.framework/Versions/3.8/lib/python3.8/site-packages/modin/pandas/base.py", line 2038, in reindex
return self._default_to_pandas(
File "/Users/orenmazor/.pyenv/versions/3.8.2/Python.framework/Versions/3.8/lib/python3.8/site-packages/modin/pandas/base.py", line 251, in _default_to_pandas
result = getattr(getattr(pandas, self.__name__), op)(
File "/Users/orenmazor/.pyenv/versions/3.8.2/Python.framework/Versions/3.8/lib/python3.8/site-packages/pandas/util/_decorators.py", line 227, in wrapper
return func(*args, **kwargs)
File "/Users/orenmazor/.pyenv/versions/3.8.2/Python.framework/Versions/3.8/lib/python3.8/site-packages/pandas/core/frame.py", line 3851, in reindex
axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex")
File "/Users/orenmazor/.pyenv/versions/3.8.2/Python.framework/Versions/3.8/lib/python3.8/site-packages/pandas/util/_validators.py", line 260, in validate_axis_style_args
raise TypeError(msg)
TypeError: Cannot specify both 'axis' and any of 'index' or 'columns'.
```
|
TypeError
|
def _copartition(self, axis, other, how, sort, force_repartition=False):
"""
Copartition two dataframes.
Perform aligning of partitions, index and partition blocks.
Parameters
----------
axis : 0 or 1
The axis to copartition along (0 - rows, 1 - columns).
other : BasePandasFrame
The other dataframes(s) to copartition against.
how : str
How to manage joining the index object ("left", "right", etc.)
sort : boolean
Whether or not to sort the joined index.
force_repartition : bool, default False
Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns
-------
Tuple
A tuple (left data, right data list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
# define helper functions
def get_axis_lengths(partitions, axis):
if axis:
return [obj.width() for obj in partitions[0]]
return [obj.length() for obj in partitions.T[0]]
self_index = self.axes[axis]
others_index = [o.axes[axis] for o in other]
joined_index, make_reindexer = self._join_index_objects(
axis, [self_index] + others_index, how, sort
)
frames = [self] + other
non_empty_frames_idx = [i for i, o in enumerate(frames) if o._partitions.size != 0]
# If all frames are empty
if len(non_empty_frames_idx) == 0:
return self._partitions, [o._partitions for o in other], joined_index
base_frame_idx = non_empty_frames_idx[0]
base_frame = frames[base_frame_idx]
other_frames = frames[base_frame_idx + 1 :]
# Picking first non-empty frame
base_frame = frames[non_empty_frames_idx[0]]
base_index = base_frame.axes[axis]
# define conditions for reindexing and repartitioning `self` frame
do_reindex_base = not base_index.equals(joined_index)
do_repartition_base = force_repartition or do_reindex_base
# perform repartitioning and reindexing for `base_frame` if needed
if do_repartition_base:
reindexed_base = base_frame._frame_mgr_cls.map_axis_partitions(
axis,
base_frame._partitions,
make_reindexer(do_reindex_base, base_frame_idx),
)
else:
reindexed_base = base_frame._partitions
# define length of base and `other` frames to aligning purpose
base_lengths = get_axis_lengths(reindexed_base, axis)
others_lengths = [o._axes_lengths[axis] for o in other_frames]
# define conditions for reindexing and repartitioning `other` frames
do_reindex_others = [not o.axes[axis].equals(joined_index) for o in other_frames]
do_repartition_others = [None] * len(other_frames)
for i in range(len(other_frames)):
do_repartition_others[i] = (
force_repartition
or do_reindex_others[i]
or others_lengths[i] != base_lengths
)
# perform repartitioning and reindexing for `other_frames` if needed
reindexed_other_list = [None] * len(other_frames)
for i in range(len(other_frames)):
if do_repartition_others[i]:
# indices of others frame start from `base_frame_idx` + 1
reindexed_other_list[i] = other_frames[
i
]._frame_mgr_cls.map_axis_partitions(
axis,
other_frames[i]._partitions,
make_reindexer(do_repartition_others[i], base_frame_idx + 1 + i),
lengths=base_lengths,
)
else:
reindexed_other_list[i] = other_frames[i]._partitions
reindexed_frames = (
[frames[i]._partitions for i in range(base_frame_idx)]
+ [reindexed_base]
+ reindexed_other_list
)
return reindexed_frames[0], reindexed_frames[1:], joined_index
|
def _copartition(self, axis, other, how, sort, force_repartition=False):
"""
Copartition two dataframes.
Perform aligning of partitions, index and partition blocks.
Parameters
----------
axis : 0 or 1
The axis to copartition along (0 - rows, 1 - columns).
other : BasePandasFrame
The other dataframes(s) to copartition against.
how : str
How to manage joining the index object ("left", "right", etc.)
sort : boolean
Whether or not to sort the joined index.
force_repartition : bool, default False
Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns
-------
Tuple
A tuple (left data, right data list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
# define helper functions
def get_axis_lengths(partitions, axis):
if axis:
return [obj.width() for obj in partitions[0]]
return [obj.length() for obj in partitions.T[0]]
self_index = self.axes[axis]
others_index = [o.axes[axis] for o in other]
joined_index, make_reindexer = self._join_index_objects(
axis, [self_index] + others_index, how, sort
)
frames = [self] + other
non_empty_frames_idx = [i for i, o in enumerate(frames) if o._partitions.size != 0]
# If all frames are empty
if len(non_empty_frames_idx) == 0:
return self._partitions, [o._partitions for o in other], joined_index
base_frame_idx = non_empty_frames_idx[0]
base_frame = frames[base_frame_idx]
other_frames = frames[base_frame_idx + 1 :]
# Picking first non-empty frame
base_frame = frames[non_empty_frames_idx[0]]
base_index = base_frame.axes[axis]
# define conditions for reindexing and repartitioning `self` frame
do_reindex_base = not base_index.equals(joined_index)
do_repartition_base = force_repartition or do_reindex_base
# perform repartitioning and reindexing for `base_frame` if needed
if do_repartition_base:
reindexed_base = base_frame._frame_mgr_cls.map_axis_partitions(
axis,
base_frame._partitions,
make_reindexer(do_reindex_base, base_frame_idx),
)
else:
reindexed_base = base_frame._partitions
# define length of base and `other` frames to aligning purpose
base_lengths = get_axis_lengths(reindexed_base, axis)
others_lengths = [o._axes_lengths[axis] for o in other_frames]
# define conditions for reindexing and repartitioning `other` frames
do_reindex_others = [not o.axes[axis].equals(joined_index) for o in other_frames]
do_repartition_others = [None] * len(other_frames)
for i in range(len(other_frames)):
do_repartition_others[i] = (
force_repartition
or do_reindex_others[i]
or others_lengths[i] != base_lengths
)
# perform repartitioning and reindexing for `other` frames if needed
reindexed_other_list = [None] * len(other_frames)
for i in range(len(other_frames)):
if do_repartition_others[i]:
# indices of others frame start from `base_frame_idx` + 1
reindexed_other_list[i] = other_frames[
i
]._frame_mgr_cls.map_axis_partitions(
axis,
other[i]._partitions,
make_reindexer(do_repartition_others[i], base_frame_idx + 1 + i),
lengths=base_lengths,
)
else:
reindexed_other_list[i] = other_frames[i]._partitions
reindexed_frames = (
[frames[i]._partitions for i in range(base_frame_idx)]
+ [reindexed_base]
+ reindexed_other_list
)
return reindexed_frames[0], reindexed_frames[1:], joined_index
|
https://github.com/modin-project/modin/issues/2709
|
Traceback (most recent call last):
File "../rofl.py", line 10, in <module>
print(df) # Internal error
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/base.py", line 2741, in __str__
return repr(self)
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/dataframe.py", line 184, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/base.py", line 168, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "/localdisk/dchigare/repos/modin_bp/modin/backends/pandas/query_compiler.py", line 205, in to_pandas
return self._modin_frame.to_pandas()
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 2098, in to_pandas
f"Internal and external indices on axis {axis} do not match.",
File "/localdisk/dchigare/repos/modin_bp/modin/error_message.py", line 63, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices on axis 1 do not match.
|
Exception
|
def _groupby_dict_reduce(
self, by, axis, agg_func, agg_args, agg_kwargs, groupby_kwargs, drop=False
):
map_dict = {}
reduce_dict = {}
rename_columns = any(
not isinstance(fn, str) and isinstance(fn, Iterable) for fn in agg_func.values()
)
for col, col_funcs in agg_func.items():
if not rename_columns:
map_dict[col], reduce_dict[col] = groupby_reduce_functions[col_funcs]
continue
if isinstance(col_funcs, str):
col_funcs = [col_funcs]
map_fns = []
for i, fn in enumerate(col_funcs):
if not isinstance(fn, str) and isinstance(fn, Iterable):
new_col_name, func = fn
elif isinstance(fn, str):
new_col_name, func = fn, fn
else:
raise TypeError
map_fns.append((new_col_name, groupby_reduce_functions[func][0]))
reduced_col_name = (
(*col, new_col_name) if isinstance(col, tuple) else (col, new_col_name)
)
reduce_dict[reduced_col_name] = groupby_reduce_functions[func][1]
map_dict[col] = map_fns
return GroupbyReduceFunction.register(map_dict, reduce_dict)(
query_compiler=self,
by=by,
axis=axis,
groupby_args=groupby_kwargs,
map_args=agg_kwargs,
reduce_args=agg_kwargs,
numeric_only=False,
drop=drop,
)
|
def _groupby_dict_reduce(
self, by, axis, agg_func, agg_args, agg_kwargs, groupby_kwargs, drop=False
):
map_dict = {}
reduce_dict = {}
rename_columns = any(
not isinstance(fn, str) and isinstance(fn, Iterable) for fn in agg_func.values()
)
for col, col_funcs in agg_func.items():
if not rename_columns:
map_dict[col], reduce_dict[col] = groupby_reduce_functions[col_funcs]
continue
if isinstance(col_funcs, str):
col_funcs = [col_funcs]
map_fns = []
for i, fn in enumerate(col_funcs):
if not isinstance(fn, str) and isinstance(fn, Iterable):
new_col_name, func = fn
elif isinstance(fn, str):
new_col_name, func = fn, fn
else:
raise TypeError
map_fns.append((new_col_name, groupby_reduce_functions[func][0]))
reduce_dict[(col, new_col_name)] = groupby_reduce_functions[func][1]
map_dict[col] = map_fns
return GroupbyReduceFunction.register(map_dict, reduce_dict)(
query_compiler=self,
by=by,
axis=axis,
groupby_args=groupby_kwargs,
map_args=agg_kwargs,
reduce_args=agg_kwargs,
numeric_only=False,
drop=drop,
)
|
https://github.com/modin-project/modin/issues/2543
|
Traceback (most recent call last):
File "../rofl.py", line 18, in <module>
df_equals(md_res, pd_res)
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/test/utils.py", line 527, in df_equals
check_categorical=False,
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/_testing.py", line 1562, in assert_frame_equal
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}",
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/_testing.py", line 1036, in raise_assert_detail
raise AssertionError(msg)
AssertionError: DataFrame are different
DataFrame shape mismatch
[left]: (251, 2)
[right]: (251, 4)
|
AssertionError
|
def aggregate(self, func=None, *args, **kwargs):
if self._axis != 0:
# This is not implemented in pandas,
# so we throw a different message
raise NotImplementedError("axis other than 0 is not supported")
if (
callable(func)
and isinstance(func, BuiltinFunctionType)
and func.__name__ in dir(self)
):
func = func.__name__
relabeling_required = False
if isinstance(func, dict) or func is None:
def try_get_str_func(fn):
if not isinstance(fn, str) and isinstance(fn, Iterable):
return [try_get_str_func(f) for f in fn]
return fn.__name__ if callable(fn) and fn.__name__ in dir(self) else fn
relabeling_required, func_dict, new_columns, order = reconstruct_func(
func, **kwargs
)
func_dict = {col: try_get_str_func(fn) for col, fn in func_dict.items()}
if any(i not in self._df.columns for i in func_dict.keys()):
from pandas.core.base import SpecificationError
raise SpecificationError("nested renamer is not supported")
if func is None:
kwargs = {}
func = func_dict
elif is_list_like(func):
return self._default_to_pandas(
lambda df, *args, **kwargs: df.aggregate(func, *args, **kwargs),
*args,
**kwargs,
)
elif callable(func):
return self._apply_agg_function(
lambda grp, *args, **kwargs: grp.aggregate(func, *args, **kwargs),
*args,
**kwargs,
)
elif isinstance(func, str):
# Using "getattr" here masks possible AttributeError which we throw
# in __getattr__, so we should call __getattr__ directly instead.
agg_func = self.__getattr__(func)
if callable(agg_func):
return agg_func(*args, **kwargs)
result = self._apply_agg_function(
func,
*args,
**kwargs,
)
if relabeling_required:
if not self._as_index:
nby_cols = len(result.columns) - len(new_columns)
order = np.concatenate([np.arange(nby_cols), order + nby_cols])
by_cols = result.columns[:nby_cols]
new_columns = pandas.Index(new_columns)
if by_cols.nlevels != new_columns.nlevels:
by_cols = by_cols.remove_unused_levels()
empty_levels = [
i
for i, level in enumerate(by_cols.levels)
if len(level) == 1 and level[0] == ""
]
by_cols = by_cols.droplevel(empty_levels)
new_columns = by_cols.append(new_columns)
result = result.iloc[:, order]
result.columns = new_columns
return result
|
def aggregate(self, func=None, *args, **kwargs):
if self._axis != 0:
# This is not implemented in pandas,
# so we throw a different message
raise NotImplementedError("axis other than 0 is not supported")
if (
callable(func)
and isinstance(func, BuiltinFunctionType)
and func.__name__ in dir(self)
):
func = func.__name__
relabeling_required = False
if isinstance(func, dict) or func is None:
def try_get_str_func(fn):
if not isinstance(fn, str) and isinstance(fn, Iterable):
return [try_get_str_func(f) for f in fn]
return fn.__name__ if callable(fn) and fn.__name__ in dir(self) else fn
relabeling_required, func_dict, new_columns, order = reconstruct_func(
func, **kwargs
)
func_dict = {col: try_get_str_func(fn) for col, fn in func_dict.items()}
if any(i not in self._df.columns for i in func_dict.keys()):
from pandas.core.base import SpecificationError
raise SpecificationError("nested renamer is not supported")
if func is None:
kwargs = {}
func = func_dict
elif is_list_like(func):
return self._default_to_pandas(
lambda df, *args, **kwargs: df.aggregate(func, *args, **kwargs),
*args,
**kwargs,
)
elif callable(func):
return self._apply_agg_function(
lambda grp, *args, **kwargs: grp.aggregate(func, *args, **kwargs),
*args,
**kwargs,
)
elif isinstance(func, str):
# Using "getattr" here masks possible AttributeError which we throw
# in __getattr__, so we should call __getattr__ directly instead.
agg_func = self.__getattr__(func)
if callable(agg_func):
return agg_func(*args, **kwargs)
result = self._apply_agg_function(
func,
*args,
**kwargs,
)
if relabeling_required:
result = result.iloc[:, order]
result.columns = new_columns
return result
|
https://github.com/modin-project/modin/issues/2543
|
Traceback (most recent call last):
File "../rofl.py", line 18, in <module>
df_equals(md_res, pd_res)
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/test/utils.py", line 527, in df_equals
check_categorical=False,
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/_testing.py", line 1562, in assert_frame_equal
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}",
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/_testing.py", line 1036, in raise_assert_detail
raise AssertionError(msg)
AssertionError: DataFrame are different
DataFrame shape mismatch
[left]: (251, 2)
[right]: (251, 4)
|
AssertionError
|
def unique(self):
return self.__constructor__(query_compiler=self._query_compiler.unique()).to_numpy()
|
def unique(self):
return self._query_compiler.unique().to_numpy().squeeze()
|
https://github.com/modin-project/modin/issues/2566
|
===PANDAS===
0 green
dtype: object
<class 'pandas.core.series.Series'>
['green']
<class 'numpy.ndarray'>
1
===MODIN===
0 green
dtype: object
<class 'modin.pandas.series.Series'>
green
<class 'numpy.ndarray'>
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-1-c4f0aa247643> in <module>
19 print(su)
20 print(type(su))
---> 21 print(len(su))
TypeError: len() of unsized object
|
TypeError
|
def _broadcast_item(self, row_lookup, col_lookup, item, to_shape):
"""
Use numpy to broadcast or reshape item.
TODO: Add more details for this docstring template.
Parameters
----------
What arguments does this function have.
[
PARAMETER_NAME: PARAMETERS TYPES
Description.
]
Returns
-------
What this returns (if anything)
Notes
-----
Numpy is memory efficient, there shouldn't be performance issue.
"""
# It is valid to pass a DataFrame or Series to __setitem__ that is larger than
# the target the user is trying to overwrite. This
if isinstance(item, (pandas.Series, pandas.DataFrame, Series, DataFrame)):
# convert indices in lookups to names, as Pandas reindex expects them to be so
index_values = self.qc.index[row_lookup]
if not all(idx in item.index for idx in index_values):
raise ValueError(
"Must have equal len keys and value when setting with an iterable"
)
if hasattr(item, "columns"):
column_values = self.qc.columns[col_lookup]
if not all(col in item.columns for col in column_values):
# TODO: think if it is needed to handle cases when columns have duplicate names
raise ValueError(
"Must have equal len keys and value when setting with an iterable"
)
item = item.reindex(index=index_values, columns=column_values)
else:
item = item.reindex(index=index_values)
try:
item = np.array(item)
if np.prod(to_shape) == np.prod(item.shape):
return item.reshape(to_shape)
else:
return np.broadcast_to(item, to_shape)
except ValueError:
from_shape = np.array(item).shape
raise ValueError(
"could not broadcast input array from shape {from_shape} into shape "
"{to_shape}".format(from_shape=from_shape, to_shape=to_shape)
)
|
def _broadcast_item(self, row_lookup, col_lookup, item, to_shape):
"""
Use numpy to broadcast or reshape item.
TODO: Add more details for this docstring template.
Parameters
----------
What arguments does this function have.
[
PARAMETER_NAME: PARAMETERS TYPES
Description.
]
Returns
-------
What this returns (if anything)
Notes
-----
Numpy is memory efficient, there shouldn't be performance issue.
"""
# It is valid to pass a DataFrame or Series to __setitem__ that is larger than
# the target the user is trying to overwrite. This
if isinstance(item, (pandas.Series, pandas.DataFrame, Series, DataFrame)):
if not all(idx in item.index for idx in row_lookup):
raise ValueError(
"Must have equal len keys and value when setting with an iterable"
)
if hasattr(item, "columns"):
if not all(idx in item.columns for idx in col_lookup):
raise ValueError(
"Must have equal len keys and value when setting with an iterable"
)
item = item.reindex(index=row_lookup, columns=col_lookup)
else:
item = item.reindex(index=row_lookup)
try:
item = np.array(item)
if np.prod(to_shape) == np.prod(item.shape):
return item.reshape(to_shape)
else:
return np.broadcast_to(item, to_shape)
except ValueError:
from_shape = np.array(item).shape
raise ValueError(
"could not broadcast input array from shape {from_shape} into shape "
"{to_shape}".format(from_shape=from_shape, to_shape=to_shape)
)
|
https://github.com/modin-project/modin/issues/1620
|
Traceback (most recent call last):
File "/home/yz/IdeaProjects/modin_test/test.py", line 10, in <module>
data.loc[:,['D','C']] = data.loc[:,['D','C']].astype('float')
File "/home/yz/anaconda3/lib/python3.7/site-packages/modin/pandas/indexing.py", line 275, in __setitem__
super(_LocIndexer, self).__setitem__(row_lookup, col_lookup, item)
File "/home/yz/anaconda3/lib/python3.7/site-packages/modin/pandas/indexing.py", line 166, in __setitem__
item = self._broadcast_item(row_lookup, col_lookup, item, to_shape)
File "/home/yz/anaconda3/lib/python3.7/site-packages/modin/pandas/indexing.py", line 186, in _broadcast_item
"Must have equal len keys and value when setting "
ValueError: Must have equal len keys and value when setting with an iterable
|
ValueError
|
def mean(self, axis, **kwargs):
if kwargs.get("level") is not None:
return self.default_to_pandas(pandas.DataFrame.mean, axis=axis, **kwargs)
skipna = kwargs.get("skipna", True)
# TODO-FIX: this function may work incorrectly with user-defined "numeric" values.
# Since `count(numeric_only=True)` discards all unknown "numeric" types, we can get incorrect
# divisor inside the reduce function.
def map_fn(df, **kwargs):
result = pandas.DataFrame(
{
"sum": df.sum(axis=axis, skipna=skipna),
"count": df.count(axis=axis, numeric_only=True),
}
)
return result if axis else result.T
def reduce_fn(df, **kwargs):
sum_cols = df["sum"] if axis else df.loc["sum"]
count_cols = df["count"] if axis else df.loc["count"]
if not isinstance(sum_cols, pandas.Series):
# If we got `NaN` as the result of the sum in any axis partition,
# then we must consider the whole sum as `NaN`, so setting `skipna=False`
sum_cols = sum_cols.sum(axis=axis, skipna=False)
count_cols = count_cols.sum(axis=axis, skipna=False)
return sum_cols / count_cols
return MapReduceFunction.register(
map_fn,
reduce_fn,
preserve_index=(kwargs.get("numeric_only") is not None),
)(self, axis=axis, **kwargs)
|
def mean(self, axis, **kwargs):
if kwargs.get("level") is not None:
return self.default_to_pandas(pandas.DataFrame.mean, axis=axis, **kwargs)
skipna = kwargs.get("skipna", True)
def map_apply_fn(ser, **kwargs):
try:
sum_result = ser.sum(skipna=skipna)
count_result = ser.count()
except TypeError:
return None
else:
return (sum_result, count_result)
def reduce_apply_fn(ser, **kwargs):
sum_result = ser.apply(lambda x: x[0]).sum(skipna=skipna)
count_result = ser.apply(lambda x: x[1]).sum(skipna=skipna)
return sum_result / count_result
def reduce_fn(df, **kwargs):
df.dropna(axis=1, inplace=True, how="any")
return build_applyier(reduce_apply_fn, axis=axis)(df)
def build_applyier(func, **applyier_kwargs):
def applyier(df, **kwargs):
result = df.apply(func, **applyier_kwargs)
return result.set_axis(df.axes[axis ^ 1], axis=0)
return applyier
return MapReduceFunction.register(
build_applyier(map_apply_fn, axis=axis, result_type="reduce"),
reduce_fn,
preserve_index=(kwargs.get("numeric_only") is not None),
)(self, axis=axis, **kwargs)
|
https://github.com/modin-project/modin/issues/2313
|
Traceback (most recent call last):
File "../TESTS/t2.py", line 108, in <module>
df_equals(md_df.mean(axis=1), pd_df.mean(axis=1))
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/test/utils.py", line 520, in df_equals
assert_series_equal(df1, df2, check_dtype=False, check_series_type=False)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/_testing.py", line 1401, in assert_series_equal
index_values=np.asarray(left.index),
File "pandas/_libs/testing.pyx", line 67, in pandas._libs.testing.assert_almost_equal
File "pandas/_libs/testing.pyx", line 182, in pandas._libs.testing.assert_almost_equal
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/_testing.py", line 1036, in raise_assert_detail
raise AssertionError(msg)
AssertionError: Series are different
Series values are different (100.0 %)
[index]: [0, 1, 2, 3]
[left]: [nan, nan, nan, nan]
[right]: [-2.5, -2.1333333333333333, 6.033333333333334, 8.0]
|
AssertionError
|
def reduce_fn(df, **kwargs):
sum_cols = df["sum"] if axis else df.loc["sum"]
count_cols = df["count"] if axis else df.loc["count"]
if not isinstance(sum_cols, pandas.Series):
# If we got `NaN` as the result of the sum in any axis partition,
# then we must consider the whole sum as `NaN`, so setting `skipna=False`
sum_cols = sum_cols.sum(axis=axis, skipna=False)
count_cols = count_cols.sum(axis=axis, skipna=False)
return sum_cols / count_cols
|
def reduce_fn(df, **kwargs):
df.dropna(axis=1, inplace=True, how="any")
return build_applyier(reduce_apply_fn, axis=axis)(df)
|
https://github.com/modin-project/modin/issues/2313
|
Traceback (most recent call last):
File "../TESTS/t2.py", line 108, in <module>
df_equals(md_df.mean(axis=1), pd_df.mean(axis=1))
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/test/utils.py", line 520, in df_equals
assert_series_equal(df1, df2, check_dtype=False, check_series_type=False)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/_testing.py", line 1401, in assert_series_equal
index_values=np.asarray(left.index),
File "pandas/_libs/testing.pyx", line 67, in pandas._libs.testing.assert_almost_equal
File "pandas/_libs/testing.pyx", line 182, in pandas._libs.testing.assert_almost_equal
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/_testing.py", line 1036, in raise_assert_detail
raise AssertionError(msg)
AssertionError: Series are different
Series values are different (100.0 %)
[index]: [0, 1, 2, 3]
[left]: [nan, nan, nan, nan]
[right]: [-2.5, -2.1333333333333333, 6.033333333333334, 8.0]
|
AssertionError
|
def applyier(df, other):
concated = pandas.concat([df, other], axis=1, copy=False)
result = concated.pivot_table(
index=index,
values=values if len(values) > 0 else None,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
# in that case Pandas transposes the result of `pivot_table`,
# transposing it back to be consistent with column axis values along
# different partitions
if len(index) == 0 and len(columns) > 0:
result = result.T
return result
|
def applyier(df, **kwargs):
result = df.apply(func, **applyier_kwargs)
return result.set_axis(df.axes[axis ^ 1], axis=0)
|
https://github.com/modin-project/modin/issues/2313
|
Traceback (most recent call last):
File "../TESTS/t2.py", line 108, in <module>
df_equals(md_df.mean(axis=1), pd_df.mean(axis=1))
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/test/utils.py", line 520, in df_equals
assert_series_equal(df1, df2, check_dtype=False, check_series_type=False)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/_testing.py", line 1401, in assert_series_equal
index_values=np.asarray(left.index),
File "pandas/_libs/testing.pyx", line 67, in pandas._libs.testing.assert_almost_equal
File "pandas/_libs/testing.pyx", line 182, in pandas._libs.testing.assert_almost_equal
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/_testing.py", line 1036, in raise_assert_detail
raise AssertionError(msg)
AssertionError: Series are different
Series values are different (100.0 %)
[index]: [0, 1, 2, 3]
[left]: [nan, nan, nan, nan]
[right]: [-2.5, -2.1333333333333333, 6.033333333333334, 8.0]
|
AssertionError
|
def _make_parser_func(sep):
"""
Create a parser function from the given sep.
Parameters
----------
sep: str
The separator default to use for the parser.
Returns
-------
A function object.
"""
def parser_func(
filepath_or_buffer: Union[str, pathlib.Path, IO[AnyStr]],
sep=sep,
delimiter=None,
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
iterator=False,
chunksize=None,
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=0,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
doublequote=True,
delim_whitespace=False,
low_memory=True,
memory_map=False,
float_precision=None,
):
# ISSUE #2408: parse parameter shared with pandas read_csv and read_table and update with provided args
_pd_read_csv_signature = {
val.name for val in inspect.signature(pandas.read_csv).parameters.values()
}
_, _, _, f_locals = inspect.getargvalues(inspect.currentframe())
if f_locals.get("sep", sep) is False:
f_locals["sep"] = "\t"
kwargs = {k: v for k, v in f_locals.items() if k in _pd_read_csv_signature}
return _read(**kwargs)
return parser_func
|
def _make_parser_func(sep):
"""
Create a parser function from the given sep.
Parameters
----------
sep: str
The separator default to use for the parser.
Returns
-------
A function object.
"""
def parser_func(
filepath_or_buffer: Union[str, pathlib.Path, IO[AnyStr]],
sep=sep,
delimiter=None,
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
iterator=False,
chunksize=None,
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=0,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
doublequote=True,
delim_whitespace=False,
low_memory=True,
memory_map=False,
float_precision=None,
):
_, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
if kwargs.get("sep", sep) is False:
kwargs["sep"] = "\t"
return _read(**kwargs)
return parser_func
|
https://github.com/modin-project/modin/issues/2408
|
Traceback (most recent call last):
File "/home/my_username/Documents/projects/profiling/main.py", line 13, in <module>
load_csv()
File "/home/my_username/anaconda3/envs/profiling/lib/python3.7/site-packages/memory_profiler.py", line 1142, in wrapper
val = prof(func)(*args, **kwargs)
File "/home/my_username/anaconda3/envs/profiling/lib/python3.7/site-packages/memory_profiler.py", line 717, in f
return func(*args, **kwds)
File "/home/my_username/Documents/projects/profiling/main.py", line 10, in load_csv
return pd.read_csv("./sample.csv")
File "/home/my_username/anaconda3/envs/profiling/lib/python3.7/site-packages/modin/pandas/io.py", line 109, in parser_func
return _read(**kwargs)
File "/home/my_username/anaconda3/envs/profiling/lib/python3.7/site-packages/modin/pandas/io.py", line 127, in _read
pd_obj = EngineDispatcher.read_csv(**kwargs)
File "/home/my_username/anaconda3/envs/profiling/lib/python3.7/site-packages/modin/data_management/factories/dispatcher.py", line 104, in read_csv
return cls.__engine._read_csv(**kwargs)
File "/home/my_username/anaconda3/envs/profiling/lib/python3.7/site-packages/modin/data_management/factories/factories.py", line 87, in _read_csv
return cls.io_cls.read_csv(**kwargs)
File "/home/my_username/anaconda3/envs/profiling/lib/python3.7/site-packages/modin/engines/base/io/file_reader.py", line 29, in read
query_compiler = cls._read(*args, **kwargs)
File "/home/my_username/anaconda3/envs/profiling/lib/python3.7/site-packages/modin/engines/base/io/text/csv_reader.py", line 69, in _read
**dict(kwargs, usecols=None, nrows=0, skipfooter=0, index_col=None),
TypeError: read_csv() got an unexpected keyword argument '_'
|
TypeError
|
def parser_func(
filepath_or_buffer: Union[str, pathlib.Path, IO[AnyStr]],
sep=sep,
delimiter=None,
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
iterator=False,
chunksize=None,
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=0,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
doublequote=True,
delim_whitespace=False,
low_memory=True,
memory_map=False,
float_precision=None,
):
# ISSUE #2408: parse parameter shared with pandas read_csv and read_table and update with provided args
_pd_read_csv_signature = {
val.name for val in inspect.signature(pandas.read_csv).parameters.values()
}
_, _, _, f_locals = inspect.getargvalues(inspect.currentframe())
if f_locals.get("sep", sep) is False:
f_locals["sep"] = "\t"
kwargs = {k: v for k, v in f_locals.items() if k in _pd_read_csv_signature}
return _read(**kwargs)
|
def parser_func(
filepath_or_buffer: Union[str, pathlib.Path, IO[AnyStr]],
sep=sep,
delimiter=None,
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
iterator=False,
chunksize=None,
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=0,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
doublequote=True,
delim_whitespace=False,
low_memory=True,
memory_map=False,
float_precision=None,
):
_, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
if kwargs.get("sep", sep) is False:
kwargs["sep"] = "\t"
return _read(**kwargs)
|
https://github.com/modin-project/modin/issues/2408
|
Traceback (most recent call last):
File "/home/my_username/Documents/projects/profiling/main.py", line 13, in <module>
load_csv()
File "/home/my_username/anaconda3/envs/profiling/lib/python3.7/site-packages/memory_profiler.py", line 1142, in wrapper
val = prof(func)(*args, **kwargs)
File "/home/my_username/anaconda3/envs/profiling/lib/python3.7/site-packages/memory_profiler.py", line 717, in f
return func(*args, **kwds)
File "/home/my_username/Documents/projects/profiling/main.py", line 10, in load_csv
return pd.read_csv("./sample.csv")
File "/home/my_username/anaconda3/envs/profiling/lib/python3.7/site-packages/modin/pandas/io.py", line 109, in parser_func
return _read(**kwargs)
File "/home/my_username/anaconda3/envs/profiling/lib/python3.7/site-packages/modin/pandas/io.py", line 127, in _read
pd_obj = EngineDispatcher.read_csv(**kwargs)
File "/home/my_username/anaconda3/envs/profiling/lib/python3.7/site-packages/modin/data_management/factories/dispatcher.py", line 104, in read_csv
return cls.__engine._read_csv(**kwargs)
File "/home/my_username/anaconda3/envs/profiling/lib/python3.7/site-packages/modin/data_management/factories/factories.py", line 87, in _read_csv
return cls.io_cls.read_csv(**kwargs)
File "/home/my_username/anaconda3/envs/profiling/lib/python3.7/site-packages/modin/engines/base/io/file_reader.py", line 29, in read
query_compiler = cls._read(*args, **kwargs)
File "/home/my_username/anaconda3/envs/profiling/lib/python3.7/site-packages/modin/engines/base/io/text/csv_reader.py", line 69, in _read
**dict(kwargs, usecols=None, nrows=0, skipfooter=0, index_col=None),
TypeError: read_csv() got an unexpected keyword argument '_'
|
TypeError
|
def _read(cls, io, **kwargs):
if kwargs.get("engine", None) is not None and kwargs.get("engine") != "openpyxl":
warnings.warn(
"Modin only implements parallel `read_excel` with `openpyxl` engine, "
'please specify `engine=None` or `engine="openpyxl"` to '
"use Modin's parallel implementation."
)
return cls.single_worker_read(io, **kwargs)
if sys.version_info < (3, 7):
warnings.warn("Python 3.7 or higher required for parallel `read_excel`.")
return cls.single_worker_read(io, **kwargs)
from zipfile import ZipFile
from openpyxl.worksheet.worksheet import Worksheet
from openpyxl.worksheet._reader import WorksheetReader
from openpyxl.reader.excel import ExcelReader
from modin.backends.pandas.parsers import PandasExcelParser
sheet_name = kwargs.get("sheet_name", 0)
if sheet_name is None or isinstance(sheet_name, list):
warnings.warn(
"`read_excel` functionality is only implemented for a single sheet at a "
"time. Multiple sheet reading coming soon!"
)
return cls.single_worker_read(io, **kwargs)
warnings.warn(
"Parallel `read_excel` is a new feature! Please email "
"bug_reports@modin.org if you run into any problems."
)
# NOTE: ExcelReader() in read-only mode does not close file handle by itself
# work around that by passing file object if we received some path
io_file = open(io, "rb") if isinstance(io, str) else io
try:
ex = ExcelReader(io_file, read_only=True)
ex.read()
wb = ex.wb
# Get shared strings
ex.read_manifest()
ex.read_strings()
ws = Worksheet(wb)
finally:
if isinstance(io, str):
# close only if it were us who opened the object
io_file.close()
pandas_kw = dict(kwargs) # preserve original kwargs
with ZipFile(io) as z:
from io import BytesIO
# Convert index to sheet name in file
if isinstance(sheet_name, int):
sheet_name = "sheet{}".format(sheet_name + 1)
else:
sheet_name = "sheet{}".format(wb.sheetnames.index(sheet_name) + 1)
if any(sheet_name.lower() in name for name in z.namelist()):
sheet_name = sheet_name.lower()
elif any(sheet_name.title() in name for name in z.namelist()):
sheet_name = sheet_name.title()
else:
raise ValueError("Sheet {} not found".format(sheet_name.lower()))
# Pass this value to the workers
kwargs["sheet_name"] = sheet_name
f = z.open("xl/worksheets/{}.xml".format(sheet_name))
f = BytesIO(f.read())
total_bytes = cls.file_size(f)
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
# Read some bytes from the sheet so we can extract the XML header and first
# line. We need to make sure we get the first line of the data as well
# because that is where the column names are. The header information will
# be extracted and sent to all of the nodes.
sheet_block = f.read(EXCEL_READ_BLOCK_SIZE)
end_of_row_tag = b"</row>"
while end_of_row_tag not in sheet_block:
sheet_block += f.read(EXCEL_READ_BLOCK_SIZE)
idx_of_header_end = sheet_block.index(end_of_row_tag) + len(end_of_row_tag)
sheet_header = sheet_block[:idx_of_header_end]
# Reset the file pointer to begin at the end of the header information.
f.seek(idx_of_header_end)
kwargs["_header"] = sheet_header
footer = b"</sheetData></worksheet>"
# Use openpyxml to parse the data
reader = WorksheetReader(
ws, BytesIO(sheet_header + footer), ex.shared_strings, False
)
# Attach cells to the worksheet
reader.bind_cells()
data = PandasExcelParser.get_sheet_data(ws, kwargs.get("convert_float", True))
# Extract column names from parsed data.
column_names = pandas.Index(data[0])
index_col = kwargs.get("index_col", None)
# Remove column names that are specified as `index_col`
if index_col is not None:
column_names = column_names.drop(column_names[index_col])
if not all(column_names):
# some column names are empty, use pandas reader to take the names from it
pandas_kw["nrows"] = 1
df = pandas.read_excel(io, **pandas_kw)
column_names = df.columns
# Compute partition metadata upfront so it is uniform for all partitions
chunk_size = max(1, (total_bytes - f.tell()) // num_partitions)
num_splits = min(len(column_names), num_partitions)
kwargs["fname"] = io
# Skiprows will be used to inform a partition how many rows come before it.
kwargs["skiprows"] = 0
row_count = 0
data_ids = []
index_ids = []
dtypes_ids = []
# Compute column metadata
column_chunksize = compute_chunksize(
pandas.DataFrame(columns=column_names), num_splits, axis=1
)
if column_chunksize > len(column_names):
column_widths = [len(column_names)]
# This prevents us from unnecessarily serializing a bunch of empty
# objects.
num_splits = 1
else:
column_widths = [
column_chunksize
if len(column_names) > (column_chunksize * (i + 1))
else 0
if len(column_names) < (column_chunksize * i)
else len(column_names) - (column_chunksize * i)
for i in range(num_splits)
]
kwargs["num_splits"] = num_splits
while f.tell() < total_bytes:
args = kwargs
args["skiprows"] = row_count + args["skiprows"]
args["start"] = f.tell()
chunk = f.read(chunk_size)
# This edge case can happen when we have reached the end of the data
# but not the end of the file.
if b"<row" not in chunk:
break
row_close_tag = b"</row>"
row_count = re.subn(row_close_tag, b"", chunk)[1]
# Make sure we are reading at least one row.
while row_count == 0:
chunk += f.read(chunk_size)
row_count += re.subn(row_close_tag, b"", chunk)[1]
last_index = chunk.rindex(row_close_tag)
f.seek(-(len(chunk) - last_index) + len(row_close_tag), 1)
args["end"] = f.tell()
# If there is no data, exit before triggering computation.
if b"</row>" not in chunk and b"</sheetData>" in chunk:
break
remote_results_list = cls.deploy(cls.parse, num_splits + 2, args)
data_ids.append(remote_results_list[:-2])
index_ids.append(remote_results_list[-2])
dtypes_ids.append(remote_results_list[-1])
# The end of the spreadsheet
if b"</sheetData>" in chunk:
break
# Compute the index based on a sum of the lengths of each partition (by default)
# or based on the column(s) that were requested.
if index_col is None:
row_lengths = cls.materialize(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
else:
index_objs = cls.materialize(index_ids)
row_lengths = [len(o) for o in index_objs]
new_index = index_objs[0].append(index_objs[1:])
# Compute dtypes by getting collecting and combining all of the partitions. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column. The index is set below.
dtypes = cls.get_dtypes(dtypes_ids)
data_ids = cls.build_partition(data_ids, row_lengths, column_widths)
# Set the index for the dtypes to the column names
if isinstance(dtypes, pandas.Series):
dtypes.index = column_names
else:
dtypes = pandas.Series(dtypes, index=column_names)
new_frame = cls.frame_cls(
data_ids,
new_index,
column_names,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_query_compiler = cls.query_compiler_cls(new_frame)
if index_col is None:
new_query_compiler._modin_frame._apply_index_objs(axis=0)
return new_query_compiler
|
def _read(cls, io, **kwargs):
if kwargs.get("engine", None) is not None and kwargs.get("engine") != "openpyxl":
warnings.warn(
"Modin only implements parallel `read_excel` with `openpyxl` engine, "
'please specify `engine=None` or `engine="openpyxl"` to '
"use Modin's parallel implementation."
)
return cls.single_worker_read(io, **kwargs)
if sys.version_info < (3, 7):
warnings.warn("Python 3.7 or higher required for parallel `read_excel`.")
return cls.single_worker_read(io, **kwargs)
from zipfile import ZipFile
from openpyxl.worksheet.worksheet import Worksheet
from openpyxl.worksheet._reader import WorksheetReader
from openpyxl.reader.excel import ExcelReader
from modin.backends.pandas.parsers import PandasExcelParser
sheet_name = kwargs.get("sheet_name", 0)
if sheet_name is None or isinstance(sheet_name, list):
warnings.warn(
"`read_excel` functionality is only implemented for a single sheet at a "
"time. Multiple sheet reading coming soon!"
)
return cls.single_worker_read(io, **kwargs)
warnings.warn(
"Parallel `read_excel` is a new feature! Please email "
"bug_reports@modin.org if you run into any problems."
)
# NOTE: ExcelReader() in read-only mode does not close file handle by itself
# work around that by passing file object if we received some path
io_file = open(io, "rb") if isinstance(io, str) else io
try:
ex = ExcelReader(io_file, read_only=True)
ex.read()
wb = ex.wb
# Get shared strings
ex.read_manifest()
ex.read_strings()
ws = Worksheet(wb)
finally:
if isinstance(io, str):
# close only if it were us who opened the object
io_file.close()
with ZipFile(io) as z:
from io import BytesIO
# Convert index to sheet name in file
if isinstance(sheet_name, int):
sheet_name = "sheet{}".format(sheet_name + 1)
else:
sheet_name = "sheet{}".format(wb.sheetnames.index(sheet_name) + 1)
if any(sheet_name.lower() in name for name in z.namelist()):
sheet_name = sheet_name.lower()
elif any(sheet_name.title() in name for name in z.namelist()):
sheet_name = sheet_name.title()
else:
raise ValueError("Sheet {} not found".format(sheet_name.lower()))
# Pass this value to the workers
kwargs["sheet_name"] = sheet_name
f = z.open("xl/worksheets/{}.xml".format(sheet_name))
f = BytesIO(f.read())
total_bytes = cls.file_size(f)
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
# Read some bytes from the sheet so we can extract the XML header and first
# line. We need to make sure we get the first line of the data as well
# because that is where the column names are. The header information will
# be extracted and sent to all of the nodes.
sheet_block = f.read(EXCEL_READ_BLOCK_SIZE)
end_of_row_tag = b"</row>"
while end_of_row_tag not in sheet_block:
sheet_block += f.read(EXCEL_READ_BLOCK_SIZE)
idx_of_header_end = sheet_block.index(end_of_row_tag) + len(end_of_row_tag)
sheet_header = sheet_block[:idx_of_header_end]
# Reset the file pointer to begin at the end of the header information.
f.seek(idx_of_header_end)
kwargs["_header"] = sheet_header
footer = b"</sheetData></worksheet>"
# Use openpyxml to parse the data
reader = WorksheetReader(
ws, BytesIO(sheet_header + footer), ex.shared_strings, False
)
# Attach cells to the worksheet
reader.bind_cells()
data = PandasExcelParser.get_sheet_data(ws, kwargs.get("convert_float", True))
# Extract column names from parsed data.
column_names = pandas.Index(data[0])
index_col = kwargs.get("index_col", None)
# Remove column names that are specified as `index_col`
if index_col is not None:
column_names = column_names.drop(column_names[index_col])
# Compute partition metadata upfront so it is uniform for all partitions
chunk_size = max(1, (total_bytes - f.tell()) // num_partitions)
num_splits = min(len(column_names), num_partitions)
kwargs["fname"] = io
# Skiprows will be used to inform a partition how many rows come before it.
kwargs["skiprows"] = 0
row_count = 0
data_ids = []
index_ids = []
dtypes_ids = []
# Compute column metadata
column_chunksize = compute_chunksize(
pandas.DataFrame(columns=column_names), num_splits, axis=1
)
if column_chunksize > len(column_names):
column_widths = [len(column_names)]
# This prevents us from unnecessarily serializing a bunch of empty
# objects.
num_splits = 1
else:
column_widths = [
column_chunksize
if len(column_names) > (column_chunksize * (i + 1))
else 0
if len(column_names) < (column_chunksize * i)
else len(column_names) - (column_chunksize * i)
for i in range(num_splits)
]
kwargs["num_splits"] = num_splits
while f.tell() < total_bytes:
args = kwargs
args["skiprows"] = row_count + args["skiprows"]
args["start"] = f.tell()
chunk = f.read(chunk_size)
# This edge case can happen when we have reached the end of the data
# but not the end of the file.
if b"<row" not in chunk:
break
row_close_tag = b"</row>"
row_count = re.subn(row_close_tag, b"", chunk)[1]
# Make sure we are reading at least one row.
while row_count == 0:
chunk += f.read(chunk_size)
row_count += re.subn(row_close_tag, b"", chunk)[1]
last_index = chunk.rindex(row_close_tag)
f.seek(-(len(chunk) - last_index) + len(row_close_tag), 1)
args["end"] = f.tell()
# If there is no data, exit before triggering computation.
if b"</row>" not in chunk and b"</sheetData>" in chunk:
break
remote_results_list = cls.deploy(cls.parse, num_splits + 2, args)
data_ids.append(remote_results_list[:-2])
index_ids.append(remote_results_list[-2])
dtypes_ids.append(remote_results_list[-1])
# The end of the spreadsheet
if b"</sheetData>" in chunk:
break
# Compute the index based on a sum of the lengths of each partition (by default)
# or based on the column(s) that were requested.
if index_col is None:
row_lengths = cls.materialize(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
else:
index_objs = cls.materialize(index_ids)
row_lengths = [len(o) for o in index_objs]
new_index = index_objs[0].append(index_objs[1:])
# Compute dtypes by getting collecting and combining all of the partitions. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column. The index is set below.
dtypes = cls.get_dtypes(dtypes_ids)
data_ids = cls.build_partition(data_ids, row_lengths, column_widths)
# Set the index for the dtypes to the column names
if isinstance(dtypes, pandas.Series):
dtypes.index = column_names
else:
dtypes = pandas.Series(dtypes, index=column_names)
new_frame = cls.frame_cls(
data_ids,
new_index,
column_names,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_query_compiler = cls.query_compiler_cls(new_frame)
if index_col is None:
new_query_compiler._modin_frame._apply_index_objs(axis=0)
return new_query_compiler
|
https://github.com/modin-project/modin/issues/2404
|
pd.read_excel('test_emptyline.xlsx')
UserWarning: Parallel `read_excel` is a new feature! Please email bug_reports@modin.org if you run into any problems.
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\foo\modin\pandas\dataframe.py", line 183, in __repr__
result = repr(self._build_repr_df(num_rows, num_cols))
File "c:\foo\modin\pandas\base.py", line 168, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "c:\foo\modin\backends\pandas\query_compiler.py", line 233, in to_pandas
return self._modin_frame.to_pandas()
File "c:\foo\modin\engines\base\frame\data.py", line 2063, in to_pandas
f"Internal and external indices on axis {axis} do not match.",
File "c:\foo\modin\error_message.py", line 63, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices on axis 1 do not match.
|
Exception
|
def setitem(self, axis, key, value):
"""Set the column defined by `key` to the `value` provided.
Args:
key: The column name to set.
value: The value to set the column to.
Returns:
A new QueryCompiler
"""
return self._setitem(axis=axis, key=key, value=value, how=None)
|
def setitem(self, axis, key, value):
"""Set the column defined by `key` to the `value` provided.
Args:
key: The column name to set.
value: The value to set the column to.
Returns:
A new QueryCompiler
"""
def setitem_builder(df, internal_indices=[]):
df = df.copy()
if len(internal_indices) == 1:
if axis == 0:
df[df.columns[internal_indices[0]]] = value
else:
df.iloc[internal_indices[0]] = value
else:
if axis == 0:
df[df.columns[internal_indices]] = value
else:
df.iloc[internal_indices] = value
return df
if isinstance(value, type(self)):
value.columns = [key]
if axis == 0:
idx = self.columns.get_indexer_for([key])[0]
if 0 < idx < len(self.columns) - 1:
first_mask = self._modin_frame.mask(col_numeric_idx=list(range(idx)))
second_mask = self._modin_frame.mask(
col_numeric_idx=list(range(idx + 1, len(self.columns)))
)
return self.__constructor__(
first_mask._concat(
1, [value._modin_frame, second_mask], "inner", False
)
)
else:
mask = self.drop(columns=[key])._modin_frame
if idx == 0:
return self.__constructor__(
value._modin_frame._concat(1, [mask], "inner", False)
)
else:
return self.__constructor__(
mask._concat(1, [value._modin_frame], "inner", False)
)
else:
value = value.transpose()
idx = self.index.get_indexer_for([key])[0]
if 0 < idx < len(self.index) - 1:
first_mask = self._modin_frame.mask(row_numeric_idx=list(range(idx)))
second_mask = self._modin_frame.mask(
row_numeric_idx=list(range(idx + 1, len(self.index)))
)
return self.__constructor__(
first_mask._concat(
0, [value._modin_frame, second_mask], "inner", False
)
)
else:
mask = self.drop(index=[key])._modin_frame
if idx == 0:
return self.__constructor__(
value._modin_frame._concat(0, [mask], "inner", False)
)
else:
return self.__constructor__(
mask._concat(0, [value._modin_frame], "inner", False)
)
if is_list_like(value):
new_modin_frame = self._modin_frame._apply_full_axis_select_indices(
axis,
setitem_builder,
[key],
new_index=self.index,
new_columns=self.columns,
keep_remaining=True,
)
else:
new_modin_frame = self._modin_frame._apply_select_indices(
axis,
setitem_builder,
[key],
new_index=self.index,
new_columns=self.columns,
keep_remaining=True,
)
return self.__constructor__(new_modin_frame)
|
https://github.com/modin-project/modin/issues/2442
|
Traceback (most recent call last):
File "test_outer.py", line 13, in <module>
md_df["b"] = pd.Series(np.zeros(len(md_df))) # TypeError
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/dataframe.py", line 1978, in __setitem__
join="left",
File "/localdisk/dchigare/repos/modin_bp/modin/backends/pandas/query_compiler.py", line 303, in concat
new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1850, in _concat
axis ^ 1, others, how, sort, force_repartition=True
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1701, in _copartition
joined_index = self._join_index_objects(axis, index_other_obj, how, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 980, in _join_index_objects
joined_obj = merge_index(joined_obj, obj)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 974, in merge_index
return obj1.join(obj2, how=how, sort=sort)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/datetimelike.py", line 893, in join
sort=sort,
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3483, in join
return this.join(other, how=how, return_indexers=return_indexers)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3494, in join
other, how=how, return_indexers=return_indexers
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3815, in _join_monotonic
join_index, lidx, ridx = self._left_indexer(sv, ov)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 257, in _left_indexer
return libjoin.left_join_indexer(left, right)
File "pandas/_libs/join.pyx", line 357, in pandas._libs.join.left_join_indexer
TypeError: '<' not supported between instances of 'Timestamp' and 'int'
|
TypeError
|
def _join_index_objects(axis, indexes, how, sort):
"""
Join the pair of index objects (columns or rows) by a given strategy.
Unlike Index.join() in Pandas, if axis is 1, the sort is
False, and how is "outer", the result will _not_ be sorted.
Parameters
----------
axis : 0 or 1
The axis index object to join (0 - rows, 1 - columns).
indexes : list(Index)
The indexes to join on.
how : {'left', 'right', 'inner', 'outer', None}
The type of join to join to make. If `None` then joined index
considered to be the first index in the `indexes` list.
sort : boolean
Whether or not to sort the joined index
Returns
-------
(Index, func)
Joined index with make_reindexer func
"""
assert isinstance(indexes, list)
# define helper functions
def merge(left_index, right_index):
if axis == 1 and how == "outer" and not sort:
return left_index.union(right_index, sort=False)
else:
return left_index.join(right_index, how=how, sort=sort)
# define condition for joining indexes
all_indices_equal = all(indexes[0].equals(index) for index in [indexes[1:]])
do_join_index = how is not None and not all_indices_equal
# define condition for joining indexes with getting indexers
need_indexers = (
axis == 0
and not all_indices_equal
and any(not index.is_unique for index in indexes)
)
indexers = None
# perform joining indexes
if do_join_index:
if len(indexes) == 2 and need_indexers:
# in case of count of indexes > 2 we should perform joining all indexes
# after that get indexers
# in the fast path we can obtain joined_index and indexers in one call
indexers = [None, None]
joined_index, indexers[0], indexers[1] = indexes[0].join(
indexes[1], how=how, sort=sort, return_indexers=True
)
else:
joined_index = indexes[0]
# TODO: revisit for performance
for index in indexes[1:]:
joined_index = merge(joined_index, index)
else:
joined_index = indexes[0].copy()
if need_indexers and indexers is None:
indexers = [index.get_indexer_for(joined_index) for index in indexes]
def make_reindexer(do_reindex: bool, frame_idx: int):
# the order of the frames must match the order of the indexes
if not do_reindex:
return lambda df: df
if need_indexers:
assert indexers is not None
return lambda df: df._reindex_with_indexers(
{0: [joined_index, indexers[frame_idx]]},
copy=True,
allow_dups=True,
)
return lambda df: df.reindex(joined_index, axis=axis)
return joined_index, make_reindexer
|
def _join_index_objects(axis, indexes, how, sort):
"""
Join the pair of index objects (columns or rows) by a given strategy.
Unlike Index.join() in Pandas, if axis is 1, the sort is
False, and how is "outer", the result will _not_ be sorted.
Parameters
----------
axis : 0 or 1
The axis index object to join (0 - rows, 1 - columns).
indexes : list(Index)
The indexes to join on.
how : {'left', 'right', 'inner', 'outer'}
The type of join to join to make.
sort : boolean
Whether or not to sort the joined index
Returns
-------
(Index, func)
Joined index with make_reindexer func
"""
assert isinstance(indexes, list)
# define helper functions
def merge(left_index, right_index):
if axis == 1 and how == "outer" and not sort:
return left_index.union(right_index, sort=False)
else:
return left_index.join(right_index, how=how, sort=sort)
# define condition for joining indexes
do_join_index = False
for index in indexes[1:]:
if not indexes[0].equals(index):
do_join_index = True
break
# define condition for joining indexes with getting indexers
is_duplicates = any(not index.is_unique for index in indexes) and axis == 0
indexers = []
if is_duplicates:
indexers = [None] * len(indexes)
# perform joining indexes
if do_join_index:
if len(indexes) == 2 and is_duplicates:
# in case of count of indexes > 2 we should perform joining all indexes
# after that get indexers
# in the fast path we can obtain joined_index and indexers in one call
joined_index, indexers[0], indexers[1] = indexes[0].join(
indexes[1], how=how, sort=sort, return_indexers=True
)
else:
joined_index = indexes[0]
# TODO: revisit for performance
for index in indexes[1:]:
joined_index = merge(joined_index, index)
if is_duplicates:
for i, index in enumerate(indexes):
indexers[i] = index.get_indexer_for(joined_index)
else:
joined_index = indexes[0].copy()
def make_reindexer(do_reindex: bool, frame_idx: int):
# the order of the frames must match the order of the indexes
if not do_reindex:
return lambda df: df
if is_duplicates:
assert indexers != []
return lambda df: df._reindex_with_indexers(
{0: [joined_index, indexers[frame_idx]]},
copy=True,
allow_dups=True,
)
return lambda df: df.reindex(joined_index, axis=axis)
return joined_index, make_reindexer
|
https://github.com/modin-project/modin/issues/2442
|
Traceback (most recent call last):
File "test_outer.py", line 13, in <module>
md_df["b"] = pd.Series(np.zeros(len(md_df))) # TypeError
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/dataframe.py", line 1978, in __setitem__
join="left",
File "/localdisk/dchigare/repos/modin_bp/modin/backends/pandas/query_compiler.py", line 303, in concat
new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1850, in _concat
axis ^ 1, others, how, sort, force_repartition=True
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1701, in _copartition
joined_index = self._join_index_objects(axis, index_other_obj, how, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 980, in _join_index_objects
joined_obj = merge_index(joined_obj, obj)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 974, in merge_index
return obj1.join(obj2, how=how, sort=sort)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/datetimelike.py", line 893, in join
sort=sort,
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3483, in join
return this.join(other, how=how, return_indexers=return_indexers)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3494, in join
other, how=how, return_indexers=return_indexers
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3815, in _join_monotonic
join_index, lidx, ridx = self._left_indexer(sv, ov)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 257, in _left_indexer
return libjoin.left_join_indexer(left, right)
File "pandas/_libs/join.pyx", line 357, in pandas._libs.join.left_join_indexer
TypeError: '<' not supported between instances of 'Timestamp' and 'int'
|
TypeError
|
def make_reindexer(do_reindex: bool, frame_idx: int):
# the order of the frames must match the order of the indexes
if not do_reindex:
return lambda df: df
if need_indexers:
assert indexers is not None
return lambda df: df._reindex_with_indexers(
{0: [joined_index, indexers[frame_idx]]},
copy=True,
allow_dups=True,
)
return lambda df: df.reindex(joined_index, axis=axis)
|
def make_reindexer(do_reindex: bool, frame_idx: int):
# the order of the frames must match the order of the indexes
if not do_reindex:
return lambda df: df
if is_duplicates:
assert indexers != []
return lambda df: df._reindex_with_indexers(
{0: [joined_index, indexers[frame_idx]]},
copy=True,
allow_dups=True,
)
return lambda df: df.reindex(joined_index, axis=axis)
|
https://github.com/modin-project/modin/issues/2442
|
Traceback (most recent call last):
File "test_outer.py", line 13, in <module>
md_df["b"] = pd.Series(np.zeros(len(md_df))) # TypeError
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/dataframe.py", line 1978, in __setitem__
join="left",
File "/localdisk/dchigare/repos/modin_bp/modin/backends/pandas/query_compiler.py", line 303, in concat
new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1850, in _concat
axis ^ 1, others, how, sort, force_repartition=True
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1701, in _copartition
joined_index = self._join_index_objects(axis, index_other_obj, how, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 980, in _join_index_objects
joined_obj = merge_index(joined_obj, obj)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 974, in merge_index
return obj1.join(obj2, how=how, sort=sort)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/datetimelike.py", line 893, in join
sort=sort,
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3483, in join
return this.join(other, how=how, return_indexers=return_indexers)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3494, in join
other, how=how, return_indexers=return_indexers
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3815, in _join_monotonic
join_index, lidx, ridx = self._left_indexer(sv, ov)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 257, in _left_indexer
return libjoin.left_join_indexer(left, right)
File "pandas/_libs/join.pyx", line 357, in pandas._libs.join.left_join_indexer
TypeError: '<' not supported between instances of 'Timestamp' and 'int'
|
TypeError
|
def _copartition(self, axis, other, how, sort, force_repartition=False):
"""
Copartition two dataframes.
Perform aligning of partitions, index and partition blocks.
Parameters
----------
axis : 0 or 1
The axis to copartition along (0 - rows, 1 - columns).
other : BasePandasFrame
The other dataframes(s) to copartition against.
how : str
How to manage joining the index object ("left", "right", etc.)
sort : boolean
Whether or not to sort the joined index.
force_repartition : bool, default False
Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns
-------
Tuple
A tuple (left data, right data list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
# define helper functions
def get_axis_lengths(partitions, axis):
if axis:
return [obj.width() for obj in partitions[0]]
return [obj.length() for obj in partitions.T[0]]
self_index = self.axes[axis]
others_index = [o.axes[axis] for o in other]
joined_index, make_reindexer = self._join_index_objects(
axis, [self_index] + others_index, how, sort
)
frames = [self] + other
non_empty_frames_idx = [i for i, o in enumerate(frames) if o._partitions.size != 0]
# If all frames are empty
if len(non_empty_frames_idx) == 0:
return self._partitions, [o._partitions for o in other], joined_index
base_frame_idx = non_empty_frames_idx[0]
base_frame = frames[base_frame_idx]
other_frames = frames[base_frame_idx + 1 :]
# Picking first non-empty frame
base_frame = frames[non_empty_frames_idx[0]]
base_index = base_frame.axes[axis]
# define conditions for reindexing and repartitioning `self` frame
do_reindex_base = not base_index.equals(joined_index)
do_repartition_base = force_repartition or do_reindex_base
# perform repartitioning and reindexing for `base_frame` if needed
if do_repartition_base:
reindexed_base = base_frame._frame_mgr_cls.map_axis_partitions(
axis,
base_frame._partitions,
make_reindexer(do_reindex_base, base_frame_idx),
)
else:
reindexed_base = base_frame._partitions
# define length of base and `other` frames to aligning purpose
base_lengths = get_axis_lengths(reindexed_base, axis)
others_lengths = [o._axes_lengths[axis] for o in other_frames]
# define conditions for reindexing and repartitioning `other` frames
do_reindex_others = [not o.axes[axis].equals(joined_index) for o in other_frames]
do_repartition_others = [None] * len(other_frames)
for i in range(len(other_frames)):
do_repartition_others[i] = (
force_repartition
or do_reindex_others[i]
or others_lengths[i] != base_lengths
)
# perform repartitioning and reindexing for `other` frames if needed
reindexed_other_list = [None] * len(other_frames)
for i in range(len(other_frames)):
if do_repartition_others[i]:
# indices of others frame start from `base_frame_idx` + 1
reindexed_other_list[i] = other_frames[
i
]._frame_mgr_cls.map_axis_partitions(
axis,
other[i]._partitions,
make_reindexer(do_repartition_others[i], base_frame_idx + 1 + i),
lengths=base_lengths,
)
else:
reindexed_other_list[i] = other_frames[i]._partitions
reindexed_frames = (
[frames[i]._partitions for i in range(base_frame_idx)]
+ [reindexed_base]
+ reindexed_other_list
)
return reindexed_frames[0], reindexed_frames[1:], joined_index
|
def _copartition(self, axis, other, how, sort, force_repartition=False):
"""
Copartition two dataframes.
Perform aligning of partitions, index and partition blocks.
Parameters
----------
axis : 0 or 1
The axis to copartition along (0 - rows, 1 - columns).
other : BasePandasFrame
The other dataframes(s) to copartition against.
how : str
How to manage joining the index object ("left", "right", etc.)
sort : boolean
Whether or not to sort the joined index.
force_repartition : bool, default False
Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns
-------
Tuple
A tuple (left data, right data list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
# define helper functions
def get_axis_lengths(partitions, axis):
if axis:
return [obj.width() for obj in partitions[0]]
return [obj.length() for obj in partitions.T[0]]
self_index = self.axes[axis]
others_index = [o.axes[axis] for o in other]
joined_index, make_reindexer = self._join_index_objects(
axis, [self_index] + others_index, how, sort
)
# define conditions for reindexing and repartitioning `self` frame
do_reindex_self = not self_index.equals(joined_index)
do_repartition_self = force_repartition or do_reindex_self
# perform repartitioning and reindexing for `self` frame if needed
if do_repartition_self:
reindexed_self = self._frame_mgr_cls.map_axis_partitions(
axis,
self._partitions,
# self frame has 0 idx
make_reindexer(do_reindex_self, 0),
)
else:
reindexed_self = self._partitions
# define length of `self` and `other` frames to aligning purpose
self_lengths = get_axis_lengths(reindexed_self, axis)
others_lengths = [o._axes_lengths[axis] for o in other]
# define conditions for reindexing and repartitioning `other` frames
do_reindex_others = [not index.equals(joined_index) for index in others_index]
do_repartition_others = [None] * len(other)
for i in range(len(other)):
do_repartition_others[i] = (
force_repartition
or do_reindex_others[i]
or others_lengths[i] != self_lengths
)
# perform repartitioning and reindexing for `other` frames if needed
reindexed_other_list = [None] * len(other)
for i in range(len(other)):
if do_repartition_others[i]:
reindexed_other_list[i] = other[i]._frame_mgr_cls.map_axis_partitions(
axis,
other[i]._partitions,
# indices of others frame start from 1 (0 - self frame)
make_reindexer(do_reindex_others[i], 1 + i),
lengths=self_lengths,
)
else:
reindexed_other_list[i] = other[i]._partitions
return reindexed_self, reindexed_other_list, joined_index
|
https://github.com/modin-project/modin/issues/2442
|
Traceback (most recent call last):
File "test_outer.py", line 13, in <module>
md_df["b"] = pd.Series(np.zeros(len(md_df))) # TypeError
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/dataframe.py", line 1978, in __setitem__
join="left",
File "/localdisk/dchigare/repos/modin_bp/modin/backends/pandas/query_compiler.py", line 303, in concat
new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1850, in _concat
axis ^ 1, others, how, sort, force_repartition=True
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1701, in _copartition
joined_index = self._join_index_objects(axis, index_other_obj, how, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 980, in _join_index_objects
joined_obj = merge_index(joined_obj, obj)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 974, in merge_index
return obj1.join(obj2, how=how, sort=sort)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/datetimelike.py", line 893, in join
sort=sort,
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3483, in join
return this.join(other, how=how, return_indexers=return_indexers)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3494, in join
other, how=how, return_indexers=return_indexers
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3815, in _join_monotonic
join_index, lidx, ridx = self._left_indexer(sv, ov)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 257, in _left_indexer
return libjoin.left_join_indexer(left, right)
File "pandas/_libs/join.pyx", line 357, in pandas._libs.join.left_join_indexer
TypeError: '<' not supported between instances of 'Timestamp' and 'int'
|
TypeError
|
def concat(cls, axis, left_parts, right_parts):
"""Concatenate the blocks with another set of blocks.
Note: Assumes that the blocks are already the same shape on the
dimension being concatenated. A ValueError will be thrown if this
condition is not met.
Args:
axis: The axis to concatenate to.
right_parts: the other blocks to be concatenated. This is a
BaseFrameManager object.
Returns
-------
A new BaseFrameManager object, the type of object that called this.
"""
if type(right_parts) is list:
# `np.array` with partitions of empty ModinFrame has a shape (0,)
# but `np.concatenate` can concatenate arrays only if its shapes at
# specified axis are equals, so filtering empty frames to avoid concat error
right_parts = [o for o in right_parts if o.size != 0]
to_concat = [left_parts] + right_parts if left_parts.size != 0 else right_parts
return np.concatenate(to_concat, axis=axis) if len(to_concat) else left_parts
else:
return np.append(left_parts, right_parts, axis=axis)
|
def concat(cls, axis, left_parts, right_parts):
"""Concatenate the blocks with another set of blocks.
Note: Assumes that the blocks are already the same shape on the
dimension being concatenated. A ValueError will be thrown if this
condition is not met.
Args:
axis: The axis to concatenate to.
right_parts: the other blocks to be concatenated. This is a
BaseFrameManager object.
Returns
-------
A new BaseFrameManager object, the type of object that called this.
"""
if type(right_parts) is list:
# `np.array` with partitions of empty ModinFrame has a shape (0,)
# but `np.concatenate` can concatenate arrays only if its shapes at
# specified axis are equals, so filtering empty frames to avoid concat error
right_parts = [o for o in right_parts if o.size != 0]
return np.concatenate([left_parts] + right_parts, axis=axis)
else:
return np.append(left_parts, right_parts, axis=axis)
|
https://github.com/modin-project/modin/issues/2442
|
Traceback (most recent call last):
File "test_outer.py", line 13, in <module>
md_df["b"] = pd.Series(np.zeros(len(md_df))) # TypeError
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/dataframe.py", line 1978, in __setitem__
join="left",
File "/localdisk/dchigare/repos/modin_bp/modin/backends/pandas/query_compiler.py", line 303, in concat
new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1850, in _concat
axis ^ 1, others, how, sort, force_repartition=True
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1701, in _copartition
joined_index = self._join_index_objects(axis, index_other_obj, how, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 980, in _join_index_objects
joined_obj = merge_index(joined_obj, obj)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 974, in merge_index
return obj1.join(obj2, how=how, sort=sort)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/datetimelike.py", line 893, in join
sort=sort,
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3483, in join
return this.join(other, how=how, return_indexers=return_indexers)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3494, in join
other, how=how, return_indexers=return_indexers
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3815, in _join_monotonic
join_index, lidx, ridx = self._left_indexer(sv, ov)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 257, in _left_indexer
return libjoin.left_join_indexer(left, right)
File "pandas/_libs/join.pyx", line 357, in pandas._libs.join.left_join_indexer
TypeError: '<' not supported between instances of 'Timestamp' and 'int'
|
TypeError
|
def setitem(self, axis, key, value):
"""Set the column defined by `key` to the `value` provided.
Args:
key: The column name to set.
value: The value to set the column to.
Returns:
A new QueryCompiler
"""
if axis == 1 or not isinstance(value, type(self)):
return super().setitem(axis=axis, key=key, value=value)
try:
result = self._setitem(axis, key, value)
# OmniSci engine does not yet support cases when `value` is not a subframe of `self`.
except NotImplementedError:
result = super().setitem(axis=axis, key=key, value=value)
return result
|
def setitem(self, axis, key, value):
"""Set the column defined by `key` to the `value` provided.
Args:
key: The column name to set.
value: The value to set the column to.
Returns:
A new QueryCompiler
"""
if axis == 1 or not isinstance(value, type(self)):
return super().setitem(axis=axis, key=key, value=value)
return self._setitem(axis, key, value)
|
https://github.com/modin-project/modin/issues/2442
|
Traceback (most recent call last):
File "test_outer.py", line 13, in <module>
md_df["b"] = pd.Series(np.zeros(len(md_df))) # TypeError
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/dataframe.py", line 1978, in __setitem__
join="left",
File "/localdisk/dchigare/repos/modin_bp/modin/backends/pandas/query_compiler.py", line 303, in concat
new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1850, in _concat
axis ^ 1, others, how, sort, force_repartition=True
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1701, in _copartition
joined_index = self._join_index_objects(axis, index_other_obj, how, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 980, in _join_index_objects
joined_obj = merge_index(joined_obj, obj)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 974, in merge_index
return obj1.join(obj2, how=how, sort=sort)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/datetimelike.py", line 893, in join
sort=sort,
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3483, in join
return this.join(other, how=how, return_indexers=return_indexers)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3494, in join
other, how=how, return_indexers=return_indexers
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3815, in _join_monotonic
join_index, lidx, ridx = self._left_indexer(sv, ov)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 257, in _left_indexer
return libjoin.left_join_indexer(left, right)
File "pandas/_libs/join.pyx", line 357, in pandas._libs.join.left_join_indexer
TypeError: '<' not supported between instances of 'Timestamp' and 'int'
|
TypeError
|
def insert(self, loc, column, value):
"""Insert new column data.
Args:
loc: Insertion index.
column: Column labels to insert.
value: Dtype object values to insert.
Returns:
A new DFAlgQueryCompiler with new data inserted.
"""
if isinstance(value, type(self)):
value.columns = [column]
try:
result = self.insert_item(axis=1, loc=loc, value=value)
# OmniSci engine does not yet support cases when `value` is not a subframe of `self`.
except NotImplementedError:
result = super().insert(loc=loc, column=column, value=value)
return result
if is_list_like(value):
return super().insert(loc=loc, column=column, value=value)
return self.__constructor__(self._modin_frame.insert(loc, column, value))
|
def insert(self, loc, column, value):
"""Insert new column data.
Args:
loc: Insertion index.
column: Column labels to insert.
value: Dtype object values to insert.
Returns:
A new DFAlgQueryCompiler with new data inserted.
"""
if is_list_like(value):
return super().insert(loc=loc, column=column, value=value)
return self.__constructor__(self._modin_frame.insert(loc, column, value))
|
https://github.com/modin-project/modin/issues/2442
|
Traceback (most recent call last):
File "test_outer.py", line 13, in <module>
md_df["b"] = pd.Series(np.zeros(len(md_df))) # TypeError
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/dataframe.py", line 1978, in __setitem__
join="left",
File "/localdisk/dchigare/repos/modin_bp/modin/backends/pandas/query_compiler.py", line 303, in concat
new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1850, in _concat
axis ^ 1, others, how, sort, force_repartition=True
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1701, in _copartition
joined_index = self._join_index_objects(axis, index_other_obj, how, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 980, in _join_index_objects
joined_obj = merge_index(joined_obj, obj)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 974, in merge_index
return obj1.join(obj2, how=how, sort=sort)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/datetimelike.py", line 893, in join
sort=sort,
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3483, in join
return this.join(other, how=how, return_indexers=return_indexers)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3494, in join
other, how=how, return_indexers=return_indexers
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3815, in _join_monotonic
join_index, lidx, ridx = self._left_indexer(sv, ov)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 257, in _left_indexer
return libjoin.left_join_indexer(left, right)
File "pandas/_libs/join.pyx", line 357, in pandas._libs.join.left_join_indexer
TypeError: '<' not supported between instances of 'Timestamp' and 'int'
|
TypeError
|
def insert(self, loc, column, value, allow_duplicates=False):
if isinstance(value, (DataFrame, pandas.DataFrame)):
if len(value.columns) != 1:
raise ValueError("Wrong number of items passed 2, placement implies 1")
value = value.squeeze(axis=1)
if not self._query_compiler.lazy_execution and len(self.index) == 0:
if not hasattr(value, "index"):
try:
value = pandas.Series(value)
except (TypeError, ValueError, IndexError):
raise ValueError(
"Cannot insert into a DataFrame with no defined index "
"and a value that cannot be converted to a "
"Series"
)
new_index = value.index.copy()
new_columns = self.columns.insert(loc, column)
new_query_compiler = DataFrame(
value, index=new_index, columns=new_columns
)._query_compiler
elif len(self.columns) == 0 and loc == 0:
new_query_compiler = DataFrame(
data=value, columns=[column], index=self.index
)._query_compiler
else:
if (
is_list_like(value)
and not isinstance(value, (pandas.Series, Series))
and len(value) != len(self.index)
):
raise ValueError("Length of values does not match length of index")
if not allow_duplicates and column in self.columns:
raise ValueError("cannot insert {0}, already exists".format(column))
if loc > len(self.columns):
raise IndexError(
"index {0} is out of bounds for axis 0 with size {1}".format(
loc, len(self.columns)
)
)
if loc < 0:
raise ValueError("unbounded slice")
if isinstance(value, Series):
value = value._query_compiler
new_query_compiler = self._query_compiler.insert(loc, column, value)
self._update_inplace(new_query_compiler=new_query_compiler)
|
def insert(self, loc, column, value, allow_duplicates=False):
if isinstance(value, (DataFrame, pandas.DataFrame)):
if len(value.columns) != 1:
raise ValueError("Wrong number of items passed 2, placement implies 1")
value = value.iloc[:, 0]
if isinstance(value, Series):
# TODO: Remove broadcast of Series
value = value._to_pandas()
if not self._query_compiler.lazy_execution and len(self.index) == 0:
try:
value = pandas.Series(value)
except (TypeError, ValueError, IndexError):
raise ValueError(
"Cannot insert into a DataFrame with no defined index "
"and a value that cannot be converted to a "
"Series"
)
new_index = value.index.copy()
new_columns = self.columns.insert(loc, column)
new_query_compiler = DataFrame(
value, index=new_index, columns=new_columns
)._query_compiler
elif len(self.columns) == 0 and loc == 0:
new_query_compiler = DataFrame(
data=value, columns=[column], index=self.index
)._query_compiler
else:
if (
is_list_like(value)
and not isinstance(value, pandas.Series)
and len(value) != len(self.index)
):
raise ValueError("Length of values does not match length of index")
if not allow_duplicates and column in self.columns:
raise ValueError("cannot insert {0}, already exists".format(column))
if loc > len(self.columns):
raise IndexError(
"index {0} is out of bounds for axis 0 with size {1}".format(
loc, len(self.columns)
)
)
if loc < 0:
raise ValueError("unbounded slice")
new_query_compiler = self._query_compiler.insert(loc, column, value)
self._update_inplace(new_query_compiler=new_query_compiler)
|
https://github.com/modin-project/modin/issues/2442
|
Traceback (most recent call last):
File "test_outer.py", line 13, in <module>
md_df["b"] = pd.Series(np.zeros(len(md_df))) # TypeError
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/dataframe.py", line 1978, in __setitem__
join="left",
File "/localdisk/dchigare/repos/modin_bp/modin/backends/pandas/query_compiler.py", line 303, in concat
new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1850, in _concat
axis ^ 1, others, how, sort, force_repartition=True
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1701, in _copartition
joined_index = self._join_index_objects(axis, index_other_obj, how, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 980, in _join_index_objects
joined_obj = merge_index(joined_obj, obj)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 974, in merge_index
return obj1.join(obj2, how=how, sort=sort)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/datetimelike.py", line 893, in join
sort=sort,
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3483, in join
return this.join(other, how=how, return_indexers=return_indexers)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3494, in join
other, how=how, return_indexers=return_indexers
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3815, in _join_monotonic
join_index, lidx, ridx = self._left_indexer(sv, ov)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 257, in _left_indexer
return libjoin.left_join_indexer(left, right)
File "pandas/_libs/join.pyx", line 357, in pandas._libs.join.left_join_indexer
TypeError: '<' not supported between instances of 'Timestamp' and 'int'
|
TypeError
|
def __setitem__(self, key, value):
if hashable(key) and key not in self.columns:
if isinstance(value, Series) and len(self.columns) == 0:
self._query_compiler = value._query_compiler.copy()
# Now that the data is appended, we need to update the column name for
# that column to `key`, otherwise the name could be incorrect. Drop the
# last column name from the list (the appended value's name and append
# the new name.
self.columns = self.columns[:-1].append(pandas.Index([key]))
return
elif isinstance(value, (pandas.DataFrame, DataFrame)) and value.shape[1] != 1:
raise ValueError(
"Wrong number of items passed %i, placement implies 1" % value.shape[1]
)
elif isinstance(value, np.ndarray) and len(value.shape) > 1:
if value.shape[1] == 1:
# Transform into columnar table and take first column
value = value.copy().T[0]
else:
raise ValueError(
"Wrong number of items passed %i, placement implies 1"
% value.shape[1]
)
# Do new column assignment after error checks and possible value modifications
self.insert(loc=len(self.columns), column=key, value=value)
return
if not isinstance(key, str):
if isinstance(key, DataFrame) or isinstance(key, np.ndarray):
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError("Array must be same shape as DataFrame")
key = DataFrame(key, columns=self.columns)
return self.mask(key, value, inplace=True)
def setitem_without_string_columns(df):
# Arrow makes memory-mapped objects immutable, so copy will allow them
# to be mutable again.
df = df.copy(True)
df[key] = value
return df
return self._update_inplace(
self._default_to_pandas(setitem_without_string_columns)._query_compiler
)
if is_list_like(value):
if isinstance(value, (pandas.DataFrame, DataFrame)):
value = value[value.columns[0]].values
elif isinstance(value, np.ndarray):
assert len(value.shape) < 3, (
"Shape of new values must be compatible with manager shape"
)
value = value.T.reshape(-1)
if len(self) > 0:
value = value[: len(self)]
if not isinstance(value, Series):
value = list(value)
if not self._query_compiler.lazy_execution and len(self.index) == 0:
new_self = DataFrame({key: value}, columns=self.columns)
self._update_inplace(new_self._query_compiler)
else:
if isinstance(value, Series):
value = value._query_compiler
self._update_inplace(self._query_compiler.setitem(0, key, value))
|
def __setitem__(self, key, value):
if hashable(key) and key not in self.columns:
# Handle new column case first
if isinstance(value, Series):
if len(self.columns) == 0:
self._query_compiler = value._query_compiler.copy()
else:
self._create_or_update_from_compiler(
self._query_compiler.concat(
1,
value._query_compiler,
join="left",
),
inplace=True,
)
# Now that the data is appended, we need to update the column name for
# that column to `key`, otherwise the name could be incorrect. Drop the
# last column name from the list (the appended value's name and append
# the new name.
self.columns = self.columns[:-1].append(pandas.Index([key]))
return
elif isinstance(value, (pandas.DataFrame, DataFrame)) and value.shape[1] != 1:
raise ValueError(
"Wrong number of items passed %i, placement implies 1" % value.shape[1]
)
elif isinstance(value, np.ndarray) and len(value.shape) > 1:
if value.shape[1] == 1:
# Transform into columnar table and take first column
value = value.copy().T[0]
else:
raise ValueError(
"Wrong number of items passed %i, placement implies 1"
% value.shape[1]
)
# Do new column assignment after error checks and possible value modifications
self.insert(loc=len(self.columns), column=key, value=value)
return
if not isinstance(key, str):
if isinstance(key, DataFrame) or isinstance(key, np.ndarray):
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError("Array must be same shape as DataFrame")
key = DataFrame(key, columns=self.columns)
return self.mask(key, value, inplace=True)
def setitem_without_string_columns(df):
# Arrow makes memory-mapped objects immutable, so copy will allow them
# to be mutable again.
df = df.copy(True)
df[key] = value
return df
return self._update_inplace(
self._default_to_pandas(setitem_without_string_columns)._query_compiler
)
if is_list_like(value):
if isinstance(value, (pandas.DataFrame, DataFrame)):
value = value[value.columns[0]].values
elif isinstance(value, np.ndarray):
assert len(value.shape) < 3, (
"Shape of new values must be compatible with manager shape"
)
value = value.T.reshape(-1)
if len(self) > 0:
value = value[: len(self)]
if not isinstance(value, Series):
value = list(value)
if not self._query_compiler.lazy_execution and len(self.index) == 0:
new_self = DataFrame({key: value}, columns=self.columns)
self._update_inplace(new_self._query_compiler)
else:
if isinstance(value, Series):
value = value._query_compiler
self._update_inplace(self._query_compiler.setitem(0, key, value))
|
https://github.com/modin-project/modin/issues/2442
|
Traceback (most recent call last):
File "test_outer.py", line 13, in <module>
md_df["b"] = pd.Series(np.zeros(len(md_df))) # TypeError
File "/localdisk/dchigare/repos/modin_bp/modin/pandas/dataframe.py", line 1978, in __setitem__
join="left",
File "/localdisk/dchigare/repos/modin_bp/modin/backends/pandas/query_compiler.py", line 303, in concat
new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1850, in _concat
axis ^ 1, others, how, sort, force_repartition=True
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 1701, in _copartition
joined_index = self._join_index_objects(axis, index_other_obj, how, sort)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 980, in _join_index_objects
joined_obj = merge_index(joined_obj, obj)
File "/localdisk/dchigare/repos/modin_bp/modin/engines/base/frame/data.py", line 974, in merge_index
return obj1.join(obj2, how=how, sort=sort)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/datetimelike.py", line 893, in join
sort=sort,
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3483, in join
return this.join(other, how=how, return_indexers=return_indexers)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3494, in join
other, how=how, return_indexers=return_indexers
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3815, in _join_monotonic
join_index, lidx, ridx = self._left_indexer(sv, ov)
File "/localdisk/dchigare/miniconda3/envs/modin_tests/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 257, in _left_indexer
return libjoin.left_join_indexer(left, right)
File "pandas/_libs/join.pyx", line 357, in pandas._libs.join.left_join_indexer
TypeError: '<' not supported between instances of 'Timestamp' and 'int'
|
TypeError
|
def aggregate(self, func=None, *args, **kwargs):
if self._axis != 0:
# This is not implemented in pandas,
# so we throw a different message
raise NotImplementedError("axis other than 0 is not supported")
if (
callable(func)
and isinstance(func, BuiltinFunctionType)
and func.__name__ in dir(self)
):
func = func.__name__
relabeling_required = False
if isinstance(func, dict) or func is None:
def _reconstruct_func(func, **kwargs):
relabeling_required, func, new_columns, order = reconstruct_func(
func, **kwargs
)
# We convert to the string version of the function for simplicity.
func = {
k: v if not callable(v) or v.__name__ not in dir(self) else v.__name__
for k, v in func.items()
}
return relabeling_required, func, new_columns, order
relabeling_required, func_dict, new_columns, order = _reconstruct_func(
func, **kwargs
)
if any(i not in self._df.columns for i in func_dict.keys()):
from pandas.core.base import SpecificationError
raise SpecificationError("nested renamer is not supported")
func = func_dict
elif is_list_like(func):
return self._default_to_pandas(
lambda df, *args, **kwargs: df.aggregate(func, *args, **kwargs),
*args,
**kwargs,
)
elif callable(func):
return self._apply_agg_function(
lambda grp, *args, **kwargs: grp.aggregate(func, *args, **kwargs),
*args,
**kwargs,
)
elif isinstance(func, str):
# Using "getattr" here masks possible AttributeError which we throw
# in __getattr__, so we should call __getattr__ directly instead.
agg_func = self.__getattr__(func)
if callable(agg_func):
return agg_func(*args, **kwargs)
result = self._apply_agg_function(
func,
*args,
**kwargs,
)
if relabeling_required:
result = result.iloc[:, order]
result.columns = new_columns
return result
|
def aggregate(self, func=None, *args, **kwargs):
if self._axis != 0:
# This is not implemented in pandas,
# so we throw a different message
raise NotImplementedError("axis other than 0 is not supported")
relabeling_required = False
if isinstance(func, dict) or func is None:
def _reconstruct_func(func, **kwargs):
relabeling_required, func, new_columns, order = reconstruct_func(
func, **kwargs
)
# We convert to the string version of the function for simplicity.
func = {
k: v if not callable(v) or v.__name__ not in dir(self) else v.__name__
for k, v in func.items()
}
return relabeling_required, func, new_columns, order
relabeling_required, func_dict, new_columns, order = _reconstruct_func(
func, **kwargs
)
if any(i not in self._df.columns for i in func_dict.keys()):
from pandas.core.base import SpecificationError
raise SpecificationError("nested renamer is not supported")
func = func_dict
elif is_list_like(func):
return self._default_to_pandas(
lambda df, *args, **kwargs: df.aggregate(func, *args, **kwargs),
*args,
**kwargs,
)
elif isinstance(func, str):
# Using "getattr" here masks possible AttributeError which we throw
# in __getattr__, so we should call __getattr__ directly instead.
agg_func = self.__getattr__(func)
if callable(agg_func):
return agg_func(*args, **kwargs)
result = self._apply_agg_function(
func,
drop=self._as_index,
*args,
**kwargs,
)
if relabeling_required:
result = result.iloc[:, order]
result.columns = new_columns
return result
|
https://github.com/modin-project/modin/issues/2463
|
Traceback (most recent call last):
File "/localdisk/gashiman/modin/ci/benchmarks/test_benchmarks.py", line 206, in test_groupby_sum
result = benchmark.pedantic(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pytest_benchmark/fixture.py", line 139, in pedantic
return self._raw_pedantic(target, args=args, kwargs=kwargs, setup=setup, rounds=rounds,
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pytest_benchmark/fixture.py", line 213, in _raw_pedantic
runner(loops_range)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pytest_benchmark/fixture.py", line 87, in runner
sys.settrace(None)
File "/localdisk/gashiman/modin/ci/benchmarks/test_benchmarks.py", line 170, in benchmark_groupby_agg_sum_function
result = gb.agg(sum)
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 399, in aggregate
result = self._apply_agg_function(
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 887, in _apply_agg_function
new_manager = groupby_qc.groupby_agg(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 2633, in groupby_agg
new_modin_frame = self._modin_frame._apply_full_axis(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 1301, in _apply_full_axis
return self.broadcast_apply_full_axis(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 1673, in broadcast_apply_full_axis
new_axes = [
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 1674, in <listcomp>
self._compute_axis_labels(i, new_partitions)
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 289, in _compute_axis_labels
return self._frame_mgr_cls.get_indices(
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 99, in get_indices
new_idx = ray.get(new_idx)
File "/localdisk/gashiman/miniconda3/lib/python3.8/site-packages/ray/worker.py", line 1452, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(TypeError): ray::deploy_ray_func() (pid=4067967, ip=10.241.129.55)
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 468, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::deploy_ray_func() (pid=4067969, ip=10.241.129.55)
File "python/ray/_raylet.pyx", line 482, in ray._raylet.execute_task
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/axis_partition.py", line 105, in deploy_ray_func
result = func(*args)
File "/localdisk/gashiman/modin/modin/engines/base/frame/axis_partition.py", line 224, in deploy_axis_func
result = func(dataframe, **kwargs)
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 1036, in _map_reduce_func
series_result = func(df, *args, **kwargs)
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 2634, in <lambda>
axis, lambda df: groupby_agg_builder(df)
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 2627, in groupby_agg_builder
return compute_groupby(df)
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 2618, in compute_groupby
result = agg_func(grouped_df, **agg_kwargs)
File "/localdisk/gashiman/modin/modin/utils.py", line 123, in wrapper
result = func(*args, **kwargs)
TypeError: unsupported operand type(s) for +: 'int' and 'tuple'
|
ray.exceptions.RayTaskError
|
def call(cls, func, *call_args, **call_kwds):
def caller(query_compiler, other, *args, **kwargs):
axis = kwargs.get("axis", 0)
broadcast = kwargs.pop("broadcast", False)
join_type = call_kwds.get("join_type", "outer")
if isinstance(other, type(query_compiler)):
if broadcast:
assert len(other.columns) == 1, (
"Invalid broadcast argument for `broadcast_apply`, too many columns: {}".format(
len(other.columns)
)
)
# Transpose on `axis=1` because we always represent an individual
# column or row as a single-column Modin DataFrame
if axis == 1:
other = other.transpose()
return query_compiler.__constructor__(
query_compiler._modin_frame.broadcast_apply(
axis,
lambda l, r: func(l, r.squeeze(), *args, **kwargs),
other._modin_frame,
join_type=join_type,
preserve_labels=call_kwds.get("preserve_labels", False),
)
)
else:
return query_compiler.__constructor__(
query_compiler._modin_frame._binary_op(
lambda x, y: func(x, y, *args, **kwargs),
other._modin_frame,
join_type=join_type,
)
)
else:
if isinstance(other, (list, np.ndarray, pandas.Series)):
new_columns = query_compiler.columns
new_modin_frame = query_compiler._modin_frame._apply_full_axis(
axis,
lambda df: func(df, other, *args, **kwargs),
new_index=query_compiler.index,
new_columns=new_columns,
)
else:
new_modin_frame = query_compiler._modin_frame._map(
lambda df: func(df, other, *args, **kwargs)
)
return query_compiler.__constructor__(new_modin_frame)
return caller
|
def call(cls, func, *call_args, **call_kwds):
def caller(query_compiler, other, *args, **kwargs):
axis = kwargs.get("axis", 0)
broadcast = kwargs.pop("broadcast", False)
if isinstance(other, type(query_compiler)):
if broadcast:
assert len(other.columns) == 1, (
"Invalid broadcast argument for `broadcast_apply`, too many columns: {}".format(
len(other.columns)
)
)
# Transpose on `axis=1` because we always represent an individual
# column or row as a single-column Modin DataFrame
if axis == 1:
other = other.transpose()
return query_compiler.__constructor__(
query_compiler._modin_frame.broadcast_apply(
axis,
lambda l, r: func(l, r.squeeze(), *args, **kwargs),
other._modin_frame,
preserve_labels=call_kwds.get("preserve_labels", False),
)
)
else:
join_type = call_kwds.get("join_type", "outer")
return query_compiler.__constructor__(
query_compiler._modin_frame._binary_op(
lambda x, y: func(x, y, *args, **kwargs),
other._modin_frame,
join_type=join_type,
)
)
else:
if isinstance(other, (list, np.ndarray, pandas.Series)):
new_columns = query_compiler.columns
new_modin_frame = query_compiler._modin_frame._apply_full_axis(
axis,
lambda df: func(df, other, *args, **kwargs),
new_index=query_compiler.index,
new_columns=new_columns,
)
else:
new_modin_frame = query_compiler._modin_frame._map(
lambda df: func(df, other, *args, **kwargs)
)
return query_compiler.__constructor__(new_modin_frame)
return caller
|
https://github.com/modin-project/modin/issues/2133
|
df = pd.DataFrame({"a":[1, 2 ,3]})
df.shift(-1).a / df.a
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-8-7cc3bcb4fa16> in <module>
1 df = pd.DataFrame({"a":[1, 2 ,3]})
----> 2 df.shift(-1).a / df.a
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/series.py in __truediv__(self, right)
348
349 def __truediv__(self, right):
--> 350 return self.truediv(right)
351
352 def __rtruediv__(self, left):
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/series.py in truediv(self, other, level, fill_value, axis)
1478 new_self, new_other = self._prepare_inter_op(other)
1479 return super(Series, new_self).truediv(
-> 1480 new_other, level=level, fill_value=None, axis=axis
1481 )
1482
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/base.py in truediv(self, other, axis, level, fill_value)
3299 """
3300 return self._binary_op(
-> 3301 "truediv", other, axis=axis, level=level, fill_value=fill_value
3302 )
3303
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/base.py in _binary_op(self, op, other, **kwargs)
231 )
232 other = self._validate_other(other, axis, numeric_or_object_only=True)
--> 233 new_query_compiler = getattr(self._query_compiler, op)(other, **kwargs)
234 return self._create_or_update_from_compiler(new_query_compiler)
235
~/Envs/strategy/lib/python3.7/site-packages/modin/data_management/functions/binary_function.py in caller(query_compiler, other, *args, **kwargs)
49 lambda x, y: func(x, y, *args, **kwargs),
50 other._modin_frame,
---> 51 join_type=join_type,
52 )
53 )
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/data.py in _binary_op(self, op, right_frame, join_type)
1604 right_parts = right_parts[0]
1605 new_frame = self._frame_mgr_cls.binary_operation(
-> 1606 1, left_parts, lambda l, r: op(l, r), right_parts
1607 )
1608 new_columns = self.columns.join(right_frame.columns, how=join_type)
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/partition_manager.py in binary_operation(cls, axis, left, func, right)
753 other_axis_partition=right_partitions[i],
754 )
--> 755 for i in range(len(left_partitions))
756 ]
757 )
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
753 other_axis_partition=right_partitions[i],
754 )
--> 755 for i in range(len(left_partitions))
756 ]
757 )
IndexError: list index out of range
|
IndexError
|
def caller(query_compiler, other, *args, **kwargs):
axis = kwargs.get("axis", 0)
broadcast = kwargs.pop("broadcast", False)
join_type = call_kwds.get("join_type", "outer")
if isinstance(other, type(query_compiler)):
if broadcast:
assert len(other.columns) == 1, (
"Invalid broadcast argument for `broadcast_apply`, too many columns: {}".format(
len(other.columns)
)
)
# Transpose on `axis=1` because we always represent an individual
# column or row as a single-column Modin DataFrame
if axis == 1:
other = other.transpose()
return query_compiler.__constructor__(
query_compiler._modin_frame.broadcast_apply(
axis,
lambda l, r: func(l, r.squeeze(), *args, **kwargs),
other._modin_frame,
join_type=join_type,
preserve_labels=call_kwds.get("preserve_labels", False),
)
)
else:
return query_compiler.__constructor__(
query_compiler._modin_frame._binary_op(
lambda x, y: func(x, y, *args, **kwargs),
other._modin_frame,
join_type=join_type,
)
)
else:
if isinstance(other, (list, np.ndarray, pandas.Series)):
new_columns = query_compiler.columns
new_modin_frame = query_compiler._modin_frame._apply_full_axis(
axis,
lambda df: func(df, other, *args, **kwargs),
new_index=query_compiler.index,
new_columns=new_columns,
)
else:
new_modin_frame = query_compiler._modin_frame._map(
lambda df: func(df, other, *args, **kwargs)
)
return query_compiler.__constructor__(new_modin_frame)
|
def caller(query_compiler, other, *args, **kwargs):
axis = kwargs.get("axis", 0)
broadcast = kwargs.pop("broadcast", False)
if isinstance(other, type(query_compiler)):
if broadcast:
assert len(other.columns) == 1, (
"Invalid broadcast argument for `broadcast_apply`, too many columns: {}".format(
len(other.columns)
)
)
# Transpose on `axis=1` because we always represent an individual
# column or row as a single-column Modin DataFrame
if axis == 1:
other = other.transpose()
return query_compiler.__constructor__(
query_compiler._modin_frame.broadcast_apply(
axis,
lambda l, r: func(l, r.squeeze(), *args, **kwargs),
other._modin_frame,
preserve_labels=call_kwds.get("preserve_labels", False),
)
)
else:
join_type = call_kwds.get("join_type", "outer")
return query_compiler.__constructor__(
query_compiler._modin_frame._binary_op(
lambda x, y: func(x, y, *args, **kwargs),
other._modin_frame,
join_type=join_type,
)
)
else:
if isinstance(other, (list, np.ndarray, pandas.Series)):
new_columns = query_compiler.columns
new_modin_frame = query_compiler._modin_frame._apply_full_axis(
axis,
lambda df: func(df, other, *args, **kwargs),
new_index=query_compiler.index,
new_columns=new_columns,
)
else:
new_modin_frame = query_compiler._modin_frame._map(
lambda df: func(df, other, *args, **kwargs)
)
return query_compiler.__constructor__(new_modin_frame)
|
https://github.com/modin-project/modin/issues/2133
|
df = pd.DataFrame({"a":[1, 2 ,3]})
df.shift(-1).a / df.a
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-8-7cc3bcb4fa16> in <module>
1 df = pd.DataFrame({"a":[1, 2 ,3]})
----> 2 df.shift(-1).a / df.a
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/series.py in __truediv__(self, right)
348
349 def __truediv__(self, right):
--> 350 return self.truediv(right)
351
352 def __rtruediv__(self, left):
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/series.py in truediv(self, other, level, fill_value, axis)
1478 new_self, new_other = self._prepare_inter_op(other)
1479 return super(Series, new_self).truediv(
-> 1480 new_other, level=level, fill_value=None, axis=axis
1481 )
1482
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/base.py in truediv(self, other, axis, level, fill_value)
3299 """
3300 return self._binary_op(
-> 3301 "truediv", other, axis=axis, level=level, fill_value=fill_value
3302 )
3303
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/base.py in _binary_op(self, op, other, **kwargs)
231 )
232 other = self._validate_other(other, axis, numeric_or_object_only=True)
--> 233 new_query_compiler = getattr(self._query_compiler, op)(other, **kwargs)
234 return self._create_or_update_from_compiler(new_query_compiler)
235
~/Envs/strategy/lib/python3.7/site-packages/modin/data_management/functions/binary_function.py in caller(query_compiler, other, *args, **kwargs)
49 lambda x, y: func(x, y, *args, **kwargs),
50 other._modin_frame,
---> 51 join_type=join_type,
52 )
53 )
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/data.py in _binary_op(self, op, right_frame, join_type)
1604 right_parts = right_parts[0]
1605 new_frame = self._frame_mgr_cls.binary_operation(
-> 1606 1, left_parts, lambda l, r: op(l, r), right_parts
1607 )
1608 new_columns = self.columns.join(right_frame.columns, how=join_type)
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/partition_manager.py in binary_operation(cls, axis, left, func, right)
753 other_axis_partition=right_partitions[i],
754 )
--> 755 for i in range(len(left_partitions))
756 ]
757 )
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
753 other_axis_partition=right_partitions[i],
754 )
--> 755 for i in range(len(left_partitions))
756 ]
757 )
IndexError: list index out of range
|
IndexError
|
def broadcast_apply(
self, axis, func, other, join_type="left", preserve_labels=True, dtypes=None
):
"""
Broadcast partitions of other dataframe partitions and apply a function.
Parameters
----------
axis: int,
The axis to broadcast over.
func: callable,
The function to apply.
other: BasePandasFrame
The Modin DataFrame to broadcast.
join_type: str (optional)
The type of join to apply.
preserve_labels: boolean (optional)
Whether or not to keep labels from this Modin DataFrame.
dtypes: "copy" or None (optional)
Whether to keep old dtypes or infer new dtypes from data.
Returns
-------
BasePandasFrame
"""
# Only sort the indices if they do not match
left_parts, right_parts, joined_index = self._copartition(
axis, other, join_type, sort=not self.axes[axis].equals(other.axes[axis])
)
# unwrap list returned by `copartition`.
right_parts = right_parts[0]
new_frame = self._frame_mgr_cls.broadcast_apply(axis, func, left_parts, right_parts)
if dtypes == "copy":
dtypes = self._dtypes
new_index = self.index
new_columns = self.columns
if not preserve_labels:
if axis == 1:
new_columns = joined_index
else:
new_index = joined_index
return self.__constructor__(
new_frame, new_index, new_columns, None, None, dtypes=dtypes
)
|
def broadcast_apply(self, axis, func, other, preserve_labels=True, dtypes=None):
"""Broadcast partitions of other dataframe partitions and apply a function.
Args:
axis: The axis to broadcast over.
func: The function to apply.
other: The Modin DataFrame to broadcast.
preserve_labels: Whether or not to keep labels from this Modin DataFrame.
dtypes: "copy" or None. Whether to keep old dtypes or infer new dtypes from
data.
Returns:
A new Modin DataFrame
"""
# Only sort the indices if they do not match
left_parts, right_parts, joined_index = self._copartition(
axis, other, "left", sort=not self.axes[axis].equals(other.axes[axis])
)
# unwrap list returned by `copartition`.
right_parts = right_parts[0]
new_frame = self._frame_mgr_cls.broadcast_apply(axis, func, left_parts, right_parts)
if dtypes == "copy":
dtypes = self._dtypes
new_index = self.index
new_columns = self.columns
if not preserve_labels:
if axis == 1:
new_columns = joined_index
else:
new_index = joined_index
return self.__constructor__(
new_frame, new_index, new_columns, None, None, dtypes=dtypes
)
|
https://github.com/modin-project/modin/issues/2133
|
df = pd.DataFrame({"a":[1, 2 ,3]})
df.shift(-1).a / df.a
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-8-7cc3bcb4fa16> in <module>
1 df = pd.DataFrame({"a":[1, 2 ,3]})
----> 2 df.shift(-1).a / df.a
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/series.py in __truediv__(self, right)
348
349 def __truediv__(self, right):
--> 350 return self.truediv(right)
351
352 def __rtruediv__(self, left):
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/series.py in truediv(self, other, level, fill_value, axis)
1478 new_self, new_other = self._prepare_inter_op(other)
1479 return super(Series, new_self).truediv(
-> 1480 new_other, level=level, fill_value=None, axis=axis
1481 )
1482
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/base.py in truediv(self, other, axis, level, fill_value)
3299 """
3300 return self._binary_op(
-> 3301 "truediv", other, axis=axis, level=level, fill_value=fill_value
3302 )
3303
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/base.py in _binary_op(self, op, other, **kwargs)
231 )
232 other = self._validate_other(other, axis, numeric_or_object_only=True)
--> 233 new_query_compiler = getattr(self._query_compiler, op)(other, **kwargs)
234 return self._create_or_update_from_compiler(new_query_compiler)
235
~/Envs/strategy/lib/python3.7/site-packages/modin/data_management/functions/binary_function.py in caller(query_compiler, other, *args, **kwargs)
49 lambda x, y: func(x, y, *args, **kwargs),
50 other._modin_frame,
---> 51 join_type=join_type,
52 )
53 )
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/data.py in _binary_op(self, op, right_frame, join_type)
1604 right_parts = right_parts[0]
1605 new_frame = self._frame_mgr_cls.binary_operation(
-> 1606 1, left_parts, lambda l, r: op(l, r), right_parts
1607 )
1608 new_columns = self.columns.join(right_frame.columns, how=join_type)
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/partition_manager.py in binary_operation(cls, axis, left, func, right)
753 other_axis_partition=right_partitions[i],
754 )
--> 755 for i in range(len(left_partitions))
756 ]
757 )
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
753 other_axis_partition=right_partitions[i],
754 )
--> 755 for i in range(len(left_partitions))
756 ]
757 )
IndexError: list index out of range
|
IndexError
|
def _copartition(self, axis, other, how, sort, force_repartition=False):
"""
Copartition two dataframes.
Parameters
----------
axis : 0 or 1
The axis to copartition along (0 - rows, 1 - columns).
other : BasePandasFrame
The other dataframes(s) to copartition against.
how : str
How to manage joining the index object ("left", "right", etc.)
sort : boolean
Whether or not to sort the joined index.
force_repartition : boolean
Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns
-------
Tuple
A tuple (left data, right data list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
is_aligning_applied = False
for i in range(len(other)):
if (
len(self._partitions) != len(other[i]._partitions)
and len(self.axes[0]) == len(other[i].axes[0])
and axis == 0
):
is_aligning_applied = True
self._partitions = self._frame_mgr_cls.map_axis_partitions(
axis, self._partitions, lambda df: df
)
other[i]._partitions = other[i]._frame_mgr_cls.map_axis_partitions(
axis, other[i]._partitions, lambda df: df
)
if (
all(o.axes[axis].equals(self.axes[axis]) for o in other)
and not is_aligning_applied
):
return (
self._partitions,
[self._simple_shuffle(axis, o) for o in other],
self.axes[axis].copy(),
)
index_other_obj = [o.axes[axis] for o in other]
joined_index = self._join_index_objects(axis, index_other_obj, how, sort)
# We have to set these because otherwise when we perform the functions it may
# end up serializing this entire object.
left_old_idx = self.axes[axis]
right_old_idxes = index_other_obj
is_avoid_reindex = len(joined_index) != len(joined_index.unique()) and axis == 0
# Start with this and we'll repartition the first time, and then not again.
if (
not is_aligning_applied
and not is_avoid_reindex
and (force_repartition or not left_old_idx.equals(joined_index))
):
reindexed_self = self._frame_mgr_cls.map_axis_partitions(
axis, self._partitions, lambda df: df.reindex(joined_index, axis=axis)
)
else:
reindexed_self = self._partitions
reindexed_other_list = []
for i in range(len(other)):
if (
is_aligning_applied
or is_avoid_reindex
or (not force_repartition and right_old_idxes[i].equals(joined_index))
):
reindexed_other = other[i]._partitions
else:
reindexed_other = other[i]._frame_mgr_cls.map_axis_partitions(
axis,
other[i]._partitions,
lambda df: df.reindex(joined_index, axis=axis),
)
reindexed_other_list.append(reindexed_other)
return reindexed_self, reindexed_other_list, joined_index
|
def _copartition(self, axis, other, how, sort, force_repartition=False):
"""
Copartition two dataframes.
Parameters
----------
axis : 0 or 1
The axis to copartition along (0 - rows, 1 - columns).
other : BasePandasFrame
The other dataframes(s) to copartition against.
how : str
How to manage joining the index object ("left", "right", etc.)
sort : boolean
Whether or not to sort the joined index.
force_repartition : boolean
Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns
-------
Tuple
A tuple (left data, right data list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
if all(o.axes[axis].equals(self.axes[axis]) for o in other):
return (
self._partitions,
[self._simple_shuffle(axis, o) for o in other],
self.axes[axis].copy(),
)
index_other_obj = [o.axes[axis] for o in other]
joined_index = self._join_index_objects(axis, index_other_obj, how, sort)
# We have to set these because otherwise when we perform the functions it may
# end up serializing this entire object.
left_old_idx = self.axes[axis]
right_old_idxes = index_other_obj
# Start with this and we'll repartition the first time, and then not again.
if not left_old_idx.equals(joined_index) or force_repartition:
reindexed_self = self._frame_mgr_cls.map_axis_partitions(
axis, self._partitions, lambda df: df.reindex(joined_index, axis=axis)
)
else:
reindexed_self = self._partitions
reindexed_other_list = []
for i in range(len(other)):
if right_old_idxes[i].equals(joined_index) and not force_repartition:
reindexed_other = other[i]._partitions
else:
reindexed_other = other[i]._frame_mgr_cls.map_axis_partitions(
axis,
other[i]._partitions,
lambda df: df.reindex(joined_index, axis=axis),
)
reindexed_other_list.append(reindexed_other)
return reindexed_self, reindexed_other_list, joined_index
|
https://github.com/modin-project/modin/issues/2133
|
df = pd.DataFrame({"a":[1, 2 ,3]})
df.shift(-1).a / df.a
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-8-7cc3bcb4fa16> in <module>
1 df = pd.DataFrame({"a":[1, 2 ,3]})
----> 2 df.shift(-1).a / df.a
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/series.py in __truediv__(self, right)
348
349 def __truediv__(self, right):
--> 350 return self.truediv(right)
351
352 def __rtruediv__(self, left):
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/series.py in truediv(self, other, level, fill_value, axis)
1478 new_self, new_other = self._prepare_inter_op(other)
1479 return super(Series, new_self).truediv(
-> 1480 new_other, level=level, fill_value=None, axis=axis
1481 )
1482
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/base.py in truediv(self, other, axis, level, fill_value)
3299 """
3300 return self._binary_op(
-> 3301 "truediv", other, axis=axis, level=level, fill_value=fill_value
3302 )
3303
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/base.py in _binary_op(self, op, other, **kwargs)
231 )
232 other = self._validate_other(other, axis, numeric_or_object_only=True)
--> 233 new_query_compiler = getattr(self._query_compiler, op)(other, **kwargs)
234 return self._create_or_update_from_compiler(new_query_compiler)
235
~/Envs/strategy/lib/python3.7/site-packages/modin/data_management/functions/binary_function.py in caller(query_compiler, other, *args, **kwargs)
49 lambda x, y: func(x, y, *args, **kwargs),
50 other._modin_frame,
---> 51 join_type=join_type,
52 )
53 )
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/data.py in _binary_op(self, op, right_frame, join_type)
1604 right_parts = right_parts[0]
1605 new_frame = self._frame_mgr_cls.binary_operation(
-> 1606 1, left_parts, lambda l, r: op(l, r), right_parts
1607 )
1608 new_columns = self.columns.join(right_frame.columns, how=join_type)
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/partition_manager.py in binary_operation(cls, axis, left, func, right)
753 other_axis_partition=right_partitions[i],
754 )
--> 755 for i in range(len(left_partitions))
756 ]
757 )
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
753 other_axis_partition=right_partitions[i],
754 )
--> 755 for i in range(len(left_partitions))
756 ]
757 )
IndexError: list index out of range
|
IndexError
|
def _binary_op(self, op, right_frame, join_type="outer"):
"""
Perform an operation that requires joining with another dataframe.
Parameters
----------
op : callable
The function to apply after the join.
right_frame : BasePandasFrame
The dataframe to join with.
join_type : str (optional)
The type of join to apply.
Returns
-------
BasePandasFrame
A new dataframe.
"""
left_parts, right_parts, joined_index = self._copartition(
0, right_frame, join_type, sort=True
)
# unwrap list returned by `copartition`.
right_parts = right_parts[0]
new_frame = self._frame_mgr_cls.binary_operation(
1, left_parts, lambda l, r: op(l, r), right_parts
)
new_columns = self.columns.join(right_frame.columns, how=join_type)
return self.__constructor__(new_frame, joined_index, new_columns, None, None)
|
def _binary_op(self, op, right_frame, join_type="outer"):
"""
Perform an operation that requires joining with another dataframe.
Parameters
----------
op : callable
The function to apply after the join.
right_frame : BasePandasFrame
The dataframe to join with.
join_type : str (optional)
The type of join to apply.
Returns
-------
BasePandasFrame
A new dataframe.
"""
left_parts, right_parts, joined_index = self._copartition(
0, right_frame, join_type, sort=True
)
# unwrap list returned by `copartition`.
right_parts = right_parts[0]
new_frame = self._frame_mgr_cls.binary_operation(
1, left_parts, lambda l, r: op(l, r), right_parts
)
new_columns = self.columns.join(right_frame.columns, how=join_type)
return self.__constructor__(new_frame, self.index, new_columns, None, None)
|
https://github.com/modin-project/modin/issues/2133
|
df = pd.DataFrame({"a":[1, 2 ,3]})
df.shift(-1).a / df.a
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-8-7cc3bcb4fa16> in <module>
1 df = pd.DataFrame({"a":[1, 2 ,3]})
----> 2 df.shift(-1).a / df.a
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/series.py in __truediv__(self, right)
348
349 def __truediv__(self, right):
--> 350 return self.truediv(right)
351
352 def __rtruediv__(self, left):
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/series.py in truediv(self, other, level, fill_value, axis)
1478 new_self, new_other = self._prepare_inter_op(other)
1479 return super(Series, new_self).truediv(
-> 1480 new_other, level=level, fill_value=None, axis=axis
1481 )
1482
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/base.py in truediv(self, other, axis, level, fill_value)
3299 """
3300 return self._binary_op(
-> 3301 "truediv", other, axis=axis, level=level, fill_value=fill_value
3302 )
3303
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/base.py in _binary_op(self, op, other, **kwargs)
231 )
232 other = self._validate_other(other, axis, numeric_or_object_only=True)
--> 233 new_query_compiler = getattr(self._query_compiler, op)(other, **kwargs)
234 return self._create_or_update_from_compiler(new_query_compiler)
235
~/Envs/strategy/lib/python3.7/site-packages/modin/data_management/functions/binary_function.py in caller(query_compiler, other, *args, **kwargs)
49 lambda x, y: func(x, y, *args, **kwargs),
50 other._modin_frame,
---> 51 join_type=join_type,
52 )
53 )
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/data.py in _binary_op(self, op, right_frame, join_type)
1604 right_parts = right_parts[0]
1605 new_frame = self._frame_mgr_cls.binary_operation(
-> 1606 1, left_parts, lambda l, r: op(l, r), right_parts
1607 )
1608 new_columns = self.columns.join(right_frame.columns, how=join_type)
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/partition_manager.py in binary_operation(cls, axis, left, func, right)
753 other_axis_partition=right_partitions[i],
754 )
--> 755 for i in range(len(left_partitions))
756 ]
757 )
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
753 other_axis_partition=right_partitions[i],
754 )
--> 755 for i in range(len(left_partitions))
756 ]
757 )
IndexError: list index out of range
|
IndexError
|
def _prepare_inter_op(self, other):
"""
Implement [METHOD_NAME].
TODO: Add more details for this docstring template.
Parameters
----------
What arguments does this function have.
[
PARAMETER_NAME: PARAMETERS TYPES
Description.
]
Returns
-------
What this returns (if anything)
"""
if isinstance(other, Series):
new_self = self.copy()
new_other = other.copy()
if self.name == other.name:
new_self.name = new_other.name = self.name
else:
new_self.name = new_other.name = "__reduced__"
else:
new_self = self
new_other = other
return new_self, new_other
|
def _prepare_inter_op(self, other):
"""
Implement [METHOD_NAME].
TODO: Add more details for this docstring template.
Parameters
----------
What arguments does this function have.
[
PARAMETER_NAME: PARAMETERS TYPES
Description.
]
Returns
-------
What this returns (if anything)
"""
if isinstance(other, Series):
new_self = self.copy()
new_self.name = "__reduced__"
new_other = other.copy()
new_other.name = "__reduced__"
else:
new_self = self
new_other = other
return new_self, new_other
|
https://github.com/modin-project/modin/issues/2133
|
df = pd.DataFrame({"a":[1, 2 ,3]})
df.shift(-1).a / df.a
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-8-7cc3bcb4fa16> in <module>
1 df = pd.DataFrame({"a":[1, 2 ,3]})
----> 2 df.shift(-1).a / df.a
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/series.py in __truediv__(self, right)
348
349 def __truediv__(self, right):
--> 350 return self.truediv(right)
351
352 def __rtruediv__(self, left):
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/series.py in truediv(self, other, level, fill_value, axis)
1478 new_self, new_other = self._prepare_inter_op(other)
1479 return super(Series, new_self).truediv(
-> 1480 new_other, level=level, fill_value=None, axis=axis
1481 )
1482
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/base.py in truediv(self, other, axis, level, fill_value)
3299 """
3300 return self._binary_op(
-> 3301 "truediv", other, axis=axis, level=level, fill_value=fill_value
3302 )
3303
~/Envs/strategy/lib/python3.7/site-packages/modin/pandas/base.py in _binary_op(self, op, other, **kwargs)
231 )
232 other = self._validate_other(other, axis, numeric_or_object_only=True)
--> 233 new_query_compiler = getattr(self._query_compiler, op)(other, **kwargs)
234 return self._create_or_update_from_compiler(new_query_compiler)
235
~/Envs/strategy/lib/python3.7/site-packages/modin/data_management/functions/binary_function.py in caller(query_compiler, other, *args, **kwargs)
49 lambda x, y: func(x, y, *args, **kwargs),
50 other._modin_frame,
---> 51 join_type=join_type,
52 )
53 )
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/data.py in _binary_op(self, op, right_frame, join_type)
1604 right_parts = right_parts[0]
1605 new_frame = self._frame_mgr_cls.binary_operation(
-> 1606 1, left_parts, lambda l, r: op(l, r), right_parts
1607 )
1608 new_columns = self.columns.join(right_frame.columns, how=join_type)
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/partition_manager.py in binary_operation(cls, axis, left, func, right)
753 other_axis_partition=right_partitions[i],
754 )
--> 755 for i in range(len(left_partitions))
756 ]
757 )
~/Envs/strategy/lib/python3.7/site-packages/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
753 other_axis_partition=right_partitions[i],
754 )
--> 755 for i in range(len(left_partitions))
756 ]
757 )
IndexError: list index out of range
|
IndexError
|
def parse(fname, **kwargs):
num_splits = kwargs.pop("num_splits", None)
columns = kwargs.get("columns", None)
if fname.startswith("s3://"):
from botocore.exceptions import NoCredentialsError
import s3fs
try:
fs = s3fs.S3FileSystem()
fname = fs.open(fname)
except NoCredentialsError:
fs = s3fs.S3FileSystem(anon=True)
fname = fs.open(fname)
if num_splits is None:
return pandas.read_parquet(fname, **kwargs)
kwargs["use_pandas_metadata"] = True
df = pandas.read_parquet(fname, **kwargs)
if isinstance(df.index, pandas.RangeIndex):
idx = len(df.index)
else:
idx = df.index
columns = [c for c in columns if c not in df.index.names and c in df.columns]
if columns is not None:
df = df[columns]
# Append the length of the index here to build it externally
return _split_result_for_readers(0, num_splits, df) + [idx, df.dtypes]
|
def parse(fname, **kwargs):
num_splits = kwargs.pop("num_splits", None)
columns = kwargs.get("columns", None)
if num_splits is None:
return pandas.read_parquet(fname, **kwargs)
kwargs["use_pandas_metadata"] = True
df = pandas.read_parquet(fname, **kwargs)
if isinstance(df.index, pandas.RangeIndex):
idx = len(df.index)
else:
idx = df.index
columns = [c for c in columns if c not in df.index.names and c in df.columns]
if columns is not None:
df = df[columns]
# Append the length of the index here to build it externally
return _split_result_for_readers(0, num_splits, df) + [idx, df.dtypes]
|
https://github.com/modin-project/modin/issues/1765
|
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
<ipython-input-14-814dc08ef229> in <module>
----> 1 df2 = mpd.read_parquet(path)
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/modin/pandas/io.py in read_parquet(path, engine, columns, **kwargs)
40 return DataFrame(
41 query_compiler=EngineDispatcher.read_parquet(
---> 42 path=path, columns=columns, engine=engine, **kwargs
43 )
44 )
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/modin/data_management/dispatcher.py in read_parquet(cls, **kwargs)
105 @classmethod
106 def read_parquet(cls, **kwargs):
--> 107 return cls.__engine._read_parquet(**kwargs)
108
109 @classmethod
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/modin/data_management/factories.py in _read_parquet(cls, **kwargs)
46 @classmethod
47 def _read_parquet(cls, **kwargs):
---> 48 return cls.io_cls.read_parquet(**kwargs)
49
50 @classmethod
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/modin/engines/base/io/file_reader.py in read(cls, *args, **kwargs)
27 @classmethod
28 def read(cls, *args, **kwargs):
---> 29 query_compiler = cls._read(*args, **kwargs)
30 # TODO (devin-petersohn): Make this section more general for non-pandas kernel
31 # implementations.
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/modin/engines/base/io/column_stores/parquet_reader.py in _read(cls, path, engine, columns, **kwargs)
68 column_names = pd.schema.names
69 else:
---> 70 meta = ParquetFile(path).metadata
71 column_names = meta.schema.names
72 if meta is not None:
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/parquet.py in __init__(self, source, metadata, common_metadata, read_dictionary, memory_map, buffer_size)
135 self.reader.open(source, use_memory_map=memory_map,
136 buffer_size=buffer_size,
--> 137 read_dictionary=read_dictionary, metadata=metadata)
138 self.common_metadata = common_metadata
139 self._nested_paths_by_prefix = self._build_nested_paths()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/_parquet.pyx in pyarrow._parquet.ParquetReader.open()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.get_reader()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib._get_native_file()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.OSFile.__cinit__()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.OSFile._open_readable()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/error.pxi in pyarrow.lib.pyarrow_internal_check_status()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/error.pxi in pyarrow.lib.check_status()
FileNotFoundError: [Errno 2] Failed to open local file s3://bucket_name/data/dataframe.snappy.parquet'. Detail: [errno 2] No such file or directory
|
FileNotFoundError
|
def _filter_empties(self):
"""Removes empty partitions to avoid triggering excess computation."""
if len(self.axes[0]) == 0 or len(self.axes[1]) == 0:
# This is the case for an empty frame. We don't want to completely remove
# all metadata and partitions so for the moment, we won't prune if the frame
# is empty.
# TODO: Handle empty dataframes better
return
self._partitions = np.array(
[
[
self._partitions[i][j]
for j in range(len(self._partitions[i]))
if j < len(self._column_widths) and self._column_widths[j] != 0
]
for i in range(len(self._partitions))
if i < len(self._row_lengths) and self._row_lengths[i] != 0
]
)
self._column_widths_cache = [w for w in self._column_widths if w != 0]
self._row_lengths_cache = [r for r in self._row_lengths if r != 0]
|
def _filter_empties(self):
"""Removes empty partitions to avoid triggering excess computation."""
if len(self.axes[0]) == 0 or len(self.axes[1]) == 0:
# This is the case for an empty frame. We don't want to completely remove
# all metadata and partitions so for the moment, we won't prune if the frame
# is empty.
# TODO: Handle empty dataframes better
return
self._partitions = np.array(
[
[
self._partitions[i][j]
for j in range(len(self._partitions[i]))
if j < len(self._column_widths) and self._column_widths[j] > 0
]
for i in range(len(self._partitions))
if i < len(self._row_lengths) and self._row_lengths[i] > 0
]
)
self._column_widths_cache = [w for w in self._column_widths if w > 0]
self._row_lengths_cache = [r for r in self._row_lengths if r > 0]
|
https://github.com/modin-project/modin/issues/1765
|
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
<ipython-input-14-814dc08ef229> in <module>
----> 1 df2 = mpd.read_parquet(path)
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/modin/pandas/io.py in read_parquet(path, engine, columns, **kwargs)
40 return DataFrame(
41 query_compiler=EngineDispatcher.read_parquet(
---> 42 path=path, columns=columns, engine=engine, **kwargs
43 )
44 )
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/modin/data_management/dispatcher.py in read_parquet(cls, **kwargs)
105 @classmethod
106 def read_parquet(cls, **kwargs):
--> 107 return cls.__engine._read_parquet(**kwargs)
108
109 @classmethod
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/modin/data_management/factories.py in _read_parquet(cls, **kwargs)
46 @classmethod
47 def _read_parquet(cls, **kwargs):
---> 48 return cls.io_cls.read_parquet(**kwargs)
49
50 @classmethod
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/modin/engines/base/io/file_reader.py in read(cls, *args, **kwargs)
27 @classmethod
28 def read(cls, *args, **kwargs):
---> 29 query_compiler = cls._read(*args, **kwargs)
30 # TODO (devin-petersohn): Make this section more general for non-pandas kernel
31 # implementations.
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/modin/engines/base/io/column_stores/parquet_reader.py in _read(cls, path, engine, columns, **kwargs)
68 column_names = pd.schema.names
69 else:
---> 70 meta = ParquetFile(path).metadata
71 column_names = meta.schema.names
72 if meta is not None:
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/parquet.py in __init__(self, source, metadata, common_metadata, read_dictionary, memory_map, buffer_size)
135 self.reader.open(source, use_memory_map=memory_map,
136 buffer_size=buffer_size,
--> 137 read_dictionary=read_dictionary, metadata=metadata)
138 self.common_metadata = common_metadata
139 self._nested_paths_by_prefix = self._build_nested_paths()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/_parquet.pyx in pyarrow._parquet.ParquetReader.open()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.get_reader()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib._get_native_file()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.OSFile.__cinit__()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.OSFile._open_readable()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/error.pxi in pyarrow.lib.pyarrow_internal_check_status()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/error.pxi in pyarrow.lib.check_status()
FileNotFoundError: [Errno 2] Failed to open local file s3://bucket_name/data/dataframe.snappy.parquet'. Detail: [errno 2] No such file or directory
|
FileNotFoundError
|
def _read(cls, path, engine, columns, **kwargs):
"""Load a parquet object from the file path, returning a Modin DataFrame.
Modin only supports pyarrow engine for now.
Parameters
----------
path: str
The filepath of the parquet file in local filesystem or hdfs.
engine: 'pyarrow'
Parquet library to use
columns: list or None
If not None, only these columns will be read from the file.
kwargs: dict
Keyword arguments.
Returns
-------
PandasQueryCompiler
A new Query Compiler.
Notes
-----
ParquetFile API is used. Please refer to the documentation here
https://arrow.apache.org/docs/python/parquet.html
"""
from pyarrow.parquet import ParquetFile, ParquetDataset
from modin.pandas.io import PQ_INDEX_REGEX
if isinstance(path, str) and os.path.isdir(path):
partitioned_columns = set()
directory = True
# We do a tree walk of the path directory because partitioned
# parquet directories have a unique column at each directory level.
# Thus, we can use os.walk(), which does a dfs search, to walk
# through the different columns that the data is partitioned on
for root, dir_names, files in os.walk(path):
if dir_names:
partitioned_columns.add(dir_names[0].split("=")[0])
if files:
# Metadata files, git files, .DSStore
if files[0][0] == ".":
continue
break
partitioned_columns = list(partitioned_columns)
if len(partitioned_columns):
ErrorMessage.default_to_pandas("Mixed Partitioning Columns in Parquet")
return cls.single_worker_read(
path, engine=engine, columns=columns, **kwargs
)
else:
directory = False
if not columns:
if directory:
# Path of the sample file that we will read to get the remaining columns
pd = ParquetDataset(path)
meta = pd.metadata
column_names = pd.schema.names
elif isinstance(path, str) and path.startswith("hdfs://"):
import fsspec.core
fs, path = fsspec.core.url_to_fs(path)
pd = ParquetDataset(path, filesystem=fs)
meta = pd.metadata
column_names = pd.schema.names
elif isinstance(path, s3fs.S3File) or (
isinstance(path, str) and path.startswith("s3://")
):
from botocore.exceptions import NoCredentialsError
if isinstance(path, s3fs.S3File):
bucket_path = path.url().split(".s3.amazonaws.com")
path = "s3://" + bucket_path[0].split("://")[1] + bucket_path[1]
try:
fs = s3fs.S3FileSystem()
pd = ParquetDataset(path, filesystem=fs)
except NoCredentialsError:
fs = s3fs.S3FileSystem(anon=True)
pd = ParquetDataset(path, filesystem=fs)
meta = pd.metadata
column_names = pd.schema.names
else:
meta = ParquetFile(path).metadata
column_names = meta.schema.names
if meta is not None:
# This is how we convert the metadata from pyarrow to a python
# dictionary, from which we then get the index columns.
# We use these to filter out from the columns in the metadata since
# the pyarrow storage has no concept of row labels/index.
# This ensures that our metadata lines up with the partitions without
# extra communication steps once we `have done all the remote
# computation.
index_columns = eval(
meta.metadata[b"pandas"].replace(b"null", b"None")
).get("index_columns", [])
column_names = [c for c in column_names if c not in index_columns]
columns = [name for name in column_names if not PQ_INDEX_REGEX.match(name)]
return cls.build_query_compiler(path, columns, **kwargs)
|
def _read(cls, path, engine, columns, **kwargs):
"""Load a parquet object from the file path, returning a Modin DataFrame.
Modin only supports pyarrow engine for now.
Parameters
----------
path: str
The filepath of the parquet file in local filesystem or hdfs.
engine: 'pyarrow'
Parquet library to use
columns: list or None
If not None, only these columns will be read from the file.
kwargs: dict
Keyword arguments.
Returns
-------
PandasQueryCompiler
A new Query Compiler.
Notes
-----
ParquetFile API is used. Please refer to the documentation here
https://arrow.apache.org/docs/python/parquet.html
"""
from pyarrow.parquet import ParquetFile, ParquetDataset
from modin.pandas.io import PQ_INDEX_REGEX
if os.path.isdir(path):
partitioned_columns = set()
directory = True
# We do a tree walk of the path directory because partitioned
# parquet directories have a unique column at each directory level.
# Thus, we can use os.walk(), which does a dfs search, to walk
# through the different columns that the data is partitioned on
for root, dir_names, files in os.walk(path):
if dir_names:
partitioned_columns.add(dir_names[0].split("=")[0])
if files:
# Metadata files, git files, .DSStore
if files[0][0] == ".":
continue
break
partitioned_columns = list(partitioned_columns)
if len(partitioned_columns):
ErrorMessage.default_to_pandas("Mixed Partitioning Columns in Parquet")
return cls.single_worker_read(
path, engine=engine, columns=columns, **kwargs
)
else:
directory = False
if not columns:
if directory:
# Path of the sample file that we will read to get the remaining columns
pd = ParquetDataset(path)
meta = pd.metadata
column_names = pd.schema.names
elif isinstance(path, str) and path.startswith("hdfs://"):
import fsspec.core
fs, path = fsspec.core.url_to_fs(path)
pd = ParquetDataset(path, filesystem=fs)
meta = pd.metadata
column_names = pd.schema.names
else:
meta = ParquetFile(path).metadata
column_names = meta.schema.names
if meta is not None:
# This is how we convert the metadata from pyarrow to a python
# dictionary, from which we then get the index columns.
# We use these to filter out from the columns in the metadata since
# the pyarrow storage has no concept of row labels/index.
# This ensures that our metadata lines up with the partitions without
# extra communication steps once we `have done all the remote
# computation.
index_columns = eval(
meta.metadata[b"pandas"].replace(b"null", b"None")
).get("index_columns", [])
column_names = [c for c in column_names if c not in index_columns]
columns = [name for name in column_names if not PQ_INDEX_REGEX.match(name)]
return cls.build_query_compiler(path, columns, **kwargs)
|
https://github.com/modin-project/modin/issues/1765
|
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
<ipython-input-14-814dc08ef229> in <module>
----> 1 df2 = mpd.read_parquet(path)
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/modin/pandas/io.py in read_parquet(path, engine, columns, **kwargs)
40 return DataFrame(
41 query_compiler=EngineDispatcher.read_parquet(
---> 42 path=path, columns=columns, engine=engine, **kwargs
43 )
44 )
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/modin/data_management/dispatcher.py in read_parquet(cls, **kwargs)
105 @classmethod
106 def read_parquet(cls, **kwargs):
--> 107 return cls.__engine._read_parquet(**kwargs)
108
109 @classmethod
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/modin/data_management/factories.py in _read_parquet(cls, **kwargs)
46 @classmethod
47 def _read_parquet(cls, **kwargs):
---> 48 return cls.io_cls.read_parquet(**kwargs)
49
50 @classmethod
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/modin/engines/base/io/file_reader.py in read(cls, *args, **kwargs)
27 @classmethod
28 def read(cls, *args, **kwargs):
---> 29 query_compiler = cls._read(*args, **kwargs)
30 # TODO (devin-petersohn): Make this section more general for non-pandas kernel
31 # implementations.
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/modin/engines/base/io/column_stores/parquet_reader.py in _read(cls, path, engine, columns, **kwargs)
68 column_names = pd.schema.names
69 else:
---> 70 meta = ParquetFile(path).metadata
71 column_names = meta.schema.names
72 if meta is not None:
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/parquet.py in __init__(self, source, metadata, common_metadata, read_dictionary, memory_map, buffer_size)
135 self.reader.open(source, use_memory_map=memory_map,
136 buffer_size=buffer_size,
--> 137 read_dictionary=read_dictionary, metadata=metadata)
138 self.common_metadata = common_metadata
139 self._nested_paths_by_prefix = self._build_nested_paths()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/_parquet.pyx in pyarrow._parquet.ParquetReader.open()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.get_reader()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib._get_native_file()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.OSFile.__cinit__()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.OSFile._open_readable()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/error.pxi in pyarrow.lib.pyarrow_internal_check_status()
/opt/anaconda3/envs/myenv/lib/python3.7/site-packages/pyarrow/error.pxi in pyarrow.lib.check_status()
FileNotFoundError: [Errno 2] Failed to open local file s3://bucket_name/data/dataframe.snappy.parquet'. Detail: [errno 2] No such file or directory
|
FileNotFoundError
|
def value_counts(self, **kwargs):
"""
Return a QueryCompiler of Series containing counts of unique values.
Returns
-------
PandasQueryCompiler
"""
if kwargs.get("bins", None) is not None:
new_modin_frame = self._modin_frame._apply_full_axis(
0, lambda df: df.squeeze(axis=1).value_counts(**kwargs)
)
return self.__constructor__(new_modin_frame)
def map_func(df, *args, **kwargs):
return df.squeeze(axis=1).value_counts(**kwargs).to_frame()
def reduce_func(df, *args, **kwargs):
normalize = kwargs.get("normalize", False)
sort = kwargs.get("sort", True)
ascending = kwargs.get("ascending", False)
dropna = kwargs.get("dropna", True)
try:
result = df.squeeze(axis=1).groupby(df.index, sort=False).sum()
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
result = df.copy().squeeze(axis=1).groupby(df.index, sort=False).sum()
if not dropna and np.nan in df.index:
result = result.append(
pandas.Series([df.squeeze(axis=1).loc[[np.nan]].sum()], index=[np.nan])
)
if normalize:
result = result / df.squeeze(axis=1).sum()
result = result.sort_values(ascending=ascending) if sort else result
# We want to sort both values and indices of the result object.
# This function will sort indices for equal values.
def sort_index_for_equal_values(result, ascending):
"""
Sort indices for equal values of result object.
Parameters
----------
result : pandas.Series or pandas.DataFrame with one column
The object whose indices for equal values is needed to sort.
ascending : boolean
Sort in ascending (if it is True) or descending (if it is False) order.
Returns
-------
pandas.DataFrame
A new DataFrame with sorted indices.
"""
is_range = False
is_end = False
i = 0
new_index = np.empty(len(result), dtype=type(result.index))
while i < len(result):
j = i
if i < len(result) - 1:
while result[result.index[i]] == result[result.index[i + 1]]:
i += 1
if is_range is False:
is_range = True
if i == len(result) - 1:
is_end = True
break
if is_range:
k = j
for val in sorted(result.index[j : i + 1], reverse=not ascending):
new_index[k] = val
k += 1
if is_end:
break
is_range = False
else:
new_index[j] = result.index[j]
i += 1
return pandas.DataFrame(result, index=new_index, columns=["__reduced__"])
return sort_index_for_equal_values(result, ascending)
return MapReduceFunction.register(
map_func, reduce_func, axis=0, preserve_index=False
)(self, **kwargs)
|
def value_counts(self, **kwargs):
"""
Return a QueryCompiler of Series containing counts of unique values.
Returns
-------
PandasQueryCompiler
"""
if kwargs.get("bins", None) is not None:
new_modin_frame = self._modin_frame._apply_full_axis(
0, lambda df: df.squeeze(axis=1).value_counts(**kwargs)
)
return self.__constructor__(new_modin_frame)
def map_func(df, *args, **kwargs):
return df.squeeze(axis=1).value_counts(**kwargs)
def reduce_func(df, *args, **kwargs):
normalize = kwargs.get("normalize", False)
sort = kwargs.get("sort", True)
ascending = kwargs.get("ascending", False)
dropna = kwargs.get("dropna", True)
try:
result = df.squeeze(axis=1).groupby(df.index, sort=False).sum()
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
result = df.copy().squeeze(axis=1).groupby(df.index, sort=False).sum()
if not dropna and np.nan in df.index:
result = result.append(
pandas.Series([df.squeeze(axis=1).loc[[np.nan]].sum()], index=[np.nan])
)
if normalize:
result = result / df.squeeze(axis=1).sum()
result = result.sort_values(ascending=ascending) if sort else result
# We want to sort both values and indices of the result object.
# This function will sort indices for equal values.
def sort_index_for_equal_values(result, ascending):
"""
Sort indices for equal values of result object.
Parameters
----------
result : pandas.Series or pandas.DataFrame with one column
The object whose indices for equal values is needed to sort.
ascending : boolean
Sort in ascending (if it is True) or descending (if it is False) order.
Returns
-------
pandas.DataFrame
A new DataFrame with sorted indices.
"""
is_range = False
is_end = False
i = 0
new_index = np.empty(len(result), dtype=type(result.index))
while i < len(result):
j = i
if i < len(result) - 1:
while result[result.index[i]] == result[result.index[i + 1]]:
i += 1
if is_range is False:
is_range = True
if i == len(result) - 1:
is_end = True
break
if is_range:
k = j
for val in sorted(result.index[j : i + 1], reverse=not ascending):
new_index[k] = val
k += 1
if is_end:
break
is_range = False
else:
new_index[j] = result.index[j]
i += 1
return pandas.DataFrame(result, index=new_index)
return sort_index_for_equal_values(result, ascending)
return MapReduceFunction.register(map_func, reduce_func, preserve_index=False)(
self, **kwargs
)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def map_func(df, *args, **kwargs):
return df.squeeze(axis=1).value_counts(**kwargs).to_frame()
|
def map_func(df, *args, **kwargs):
return df.squeeze(axis=1).value_counts(**kwargs)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
def reduce_func(df, *args, **kwargs):
normalize = kwargs.get("normalize", False)
sort = kwargs.get("sort", True)
ascending = kwargs.get("ascending", False)
dropna = kwargs.get("dropna", True)
try:
result = df.squeeze(axis=1).groupby(df.index, sort=False).sum()
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
result = df.copy().squeeze(axis=1).groupby(df.index, sort=False).sum()
if not dropna and np.nan in df.index:
result = result.append(
pandas.Series([df.squeeze(axis=1).loc[[np.nan]].sum()], index=[np.nan])
)
if normalize:
result = result / df.squeeze(axis=1).sum()
result = result.sort_values(ascending=ascending) if sort else result
# We want to sort both values and indices of the result object.
# This function will sort indices for equal values.
def sort_index_for_equal_values(result, ascending):
"""
Sort indices for equal values of result object.
Parameters
----------
result : pandas.Series or pandas.DataFrame with one column
The object whose indices for equal values is needed to sort.
ascending : boolean
Sort in ascending (if it is True) or descending (if it is False) order.
Returns
-------
pandas.DataFrame
A new DataFrame with sorted indices.
"""
is_range = False
is_end = False
i = 0
new_index = np.empty(len(result), dtype=type(result.index))
while i < len(result):
j = i
if i < len(result) - 1:
while result[result.index[i]] == result[result.index[i + 1]]:
i += 1
if is_range is False:
is_range = True
if i == len(result) - 1:
is_end = True
break
if is_range:
k = j
for val in sorted(result.index[j : i + 1], reverse=not ascending):
new_index[k] = val
k += 1
if is_end:
break
is_range = False
else:
new_index[j] = result.index[j]
i += 1
return pandas.DataFrame(result, index=new_index, columns=["__reduced__"])
return sort_index_for_equal_values(result, ascending)
|
def reduce_func(df, *args, **kwargs):
normalize = kwargs.get("normalize", False)
sort = kwargs.get("sort", True)
ascending = kwargs.get("ascending", False)
dropna = kwargs.get("dropna", True)
try:
result = df.squeeze(axis=1).groupby(df.index, sort=False).sum()
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
result = df.copy().squeeze(axis=1).groupby(df.index, sort=False).sum()
if not dropna and np.nan in df.index:
result = result.append(
pandas.Series([df.squeeze(axis=1).loc[[np.nan]].sum()], index=[np.nan])
)
if normalize:
result = result / df.squeeze(axis=1).sum()
result = result.sort_values(ascending=ascending) if sort else result
# We want to sort both values and indices of the result object.
# This function will sort indices for equal values.
def sort_index_for_equal_values(result, ascending):
"""
Sort indices for equal values of result object.
Parameters
----------
result : pandas.Series or pandas.DataFrame with one column
The object whose indices for equal values is needed to sort.
ascending : boolean
Sort in ascending (if it is True) or descending (if it is False) order.
Returns
-------
pandas.DataFrame
A new DataFrame with sorted indices.
"""
is_range = False
is_end = False
i = 0
new_index = np.empty(len(result), dtype=type(result.index))
while i < len(result):
j = i
if i < len(result) - 1:
while result[result.index[i]] == result[result.index[i + 1]]:
i += 1
if is_range is False:
is_range = True
if i == len(result) - 1:
is_end = True
break
if is_range:
k = j
for val in sorted(result.index[j : i + 1], reverse=not ascending):
new_index[k] = val
k += 1
if is_end:
break
is_range = False
else:
new_index[j] = result.index[j]
i += 1
return pandas.DataFrame(result, index=new_index)
return sort_index_for_equal_values(result, ascending)
|
https://github.com/modin-project/modin/issues/1976
|
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(f"modin:\n{modin_df.sum(min_count=1)}")
File "modin\pandas\base.py", line 3512, in __str__
return repr(self)
File "modin\pandas\series.py", line 307, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "modin\pandas\base.py", line 108, in _build_repr_df
return self.iloc[indexer]._query_compiler.to_pandas()
File "modin\backends\pandas\query_compiler.py", line 191, in to_pandas
return self._modin_frame.to_pandas()
File "modin\engines\base\frame\data.py", line 1801, in to_pandas
"Internal and external indices do not match.",
File "modin\error_message.py", line 54, in catch_bugs_and_request_email
" caused this error.\n{}".format(extra_log)
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Internal and external indices do not match.
|
Exception
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.