after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def search(self, *args):
"""Searches package recipes and binaries in the local cache or in a remote.
If you provide a pattern, then it will search for existing package recipes matching it.
If a full reference is provided (pkg/0.1@user/channel) then the existing binary packages for
that reference will be displayed.
If no remote is specified, the serach will be done in the local cache.
Search is case sensitive, exact case has to be used. For case insensitive file systems, like
Windows, case sensitive search can be forced with '--case-sensitive'.
"""
parser = argparse.ArgumentParser(
description=self.search.__doc__, prog="conan search"
)
parser.add_argument(
"pattern_or_reference", nargs="?", help=_PATTERN_OR_REFERENCE_HELP
)
parser.add_argument(
"-o",
"--outdated",
default=False,
action="store_true",
help="Show only outdated from recipe packages",
)
parser.add_argument(
"-q", "--query", default=None, action=OnceArgument, help=_QUERY_HELP
)
parser.add_argument(
"-r",
"--remote",
action=OnceArgument,
help="Remote to search in. '-r all' searches all remotes",
)
parser.add_argument(
"--case-sensitive",
default=False,
action="store_true",
help="Make a case-sensitive search. Use it to guarantee case-sensitive "
"search in Windows or other case-insensitive file systems",
)
parser.add_argument(
"--raw",
default=False,
action="store_true",
help="Print just the list of recipes",
)
parser.add_argument(
"--table",
action=OnceArgument,
help="Outputs html file with a table of binaries. Only valid for a "
"reference search",
)
parser.add_argument(
"-j",
"--json",
default=None,
action=OnceArgument,
help="json file path where the search information will be written to",
)
args = parser.parse_args(*args)
if args.table and args.json:
raise ConanException("'--table' argument cannot be used together with '--json'")
try:
reference = ConanFileReference.loads(args.pattern_or_reference)
if "*" in reference:
# Fixes a version with only a wildcard (valid reference) but not real reference
# e.j: conan search lib/*@lasote/stable
reference = None
except (TypeError, ConanException):
reference = None
cwd = os.getcwd()
info = None
try:
if reference:
info = self._conan.search_packages(
reference, query=args.query, remote=args.remote, outdated=args.outdated
)
# search is done for one reference
self._outputer.print_search_packages(
info["results"], reference, args.query, args.table
)
else:
if args.table:
raise ConanException(
"'--table' argument can only be used with a reference"
)
self._check_query_parameter_and_get_reference(
args.pattern_or_reference, args.query
)
info = self._conan.search_recipes(
args.pattern_or_reference,
remote=args.remote,
case_sensitive=args.case_sensitive,
)
# Deprecate 2.0: Dirty check if search is done for all remotes or for remote "all"
try:
remote_all = self._conan.get_remote_by_name("all")
except NoRemoteAvailable:
remote_all = None
all_remotes_search = remote_all is None and args.remote == "all"
self._outputer.print_search_references(
info["results"], args.pattern_or_reference, args.raw, all_remotes_search
)
except ConanException as exc:
info = exc.info
raise
finally:
if args.json and info:
self._outputer.json_output(info, args.json, cwd)
|
def search(self, *args):
"""Searches package recipes and binaries in the local cache or in a remote.
If you provide a pattern, then it will search for existing package recipes matching it.
If a full reference is provided (pkg/0.1@user/channel) then the existing binary packages for
that reference will be displayed.
If no remote is specified, the serach will be done in the local cache.
Search is case sensitive, exact case has to be used. For case insensitive file systems, like
Windows, case sensitive search can be forced with '--case-sensitive'.
"""
parser = argparse.ArgumentParser(
description=self.search.__doc__, prog="conan search"
)
parser.add_argument(
"pattern_or_reference", nargs="?", help=_PATTERN_OR_REFERENCE_HELP
)
parser.add_argument(
"-o",
"--outdated",
default=False,
action="store_true",
help="Show only outdated from recipe packages",
)
parser.add_argument(
"-q", "--query", default=None, action=OnceArgument, help=_QUERY_HELP
)
parser.add_argument(
"-r",
"--remote",
action=OnceArgument,
help="Remote to search in. '-r all' searches all remotes",
)
parser.add_argument(
"--case-sensitive",
default=False,
action="store_true",
help="Make a case-sensitive search. Use it to guarantee case-sensitive "
"search in Windows or other case-insensitive file systems",
)
parser.add_argument(
"--raw",
default=False,
action="store_true",
help="Print just the list of recipes",
)
parser.add_argument(
"--table",
action=OnceArgument,
help="Outputs html file with a table of binaries. Only valid for a "
"reference search",
)
parser.add_argument(
"-j",
"--json",
default=None,
action=OnceArgument,
help="json file path where the search information will be written to",
)
args = parser.parse_args(*args)
if args.table and args.json:
raise ConanException("'--table' argument cannot be used together with '--json'")
try:
reference = ConanFileReference.loads(args.pattern_or_reference)
if "*" in reference:
# Fixes a version with only a wildcard (valid reference) but not real reference
# e.j: conan search lib/*@lasote/stable
reference = None
except (TypeError, ConanException):
reference = None
cwd = os.getcwd()
info = None
try:
if reference:
info = self._conan.search_packages(
reference, query=args.query, remote=args.remote, outdated=args.outdated
)
# search is done for one reference
self._outputer.print_search_packages(
info["results"], reference, args.query, args.table
)
else:
if args.table:
raise ConanException(
"'--table' argument can only be used with a reference"
)
self._check_query_parameter_and_get_reference(
args.pattern_or_reference, args.query
)
info = self._conan.search_recipes(
args.pattern_or_reference,
remote=args.remote,
case_sensitive=args.case_sensitive,
)
# Deprecate 2.0: Dirty check if search is done for all remotes or for remote "all"
remote_registry = RemoteRegistry(self._client_cache.registry, None)
all_remotes_search = (
"all" not in (r.name for r in remote_registry.remotes)
and args.remote == "all"
)
self._outputer.print_search_references(
info["results"], args.pattern_or_reference, args.raw, all_remotes_search
)
except ConanException as exc:
info = exc.info
raise
finally:
if args.json and info:
self._outputer.json_output(info, args.json, cwd)
|
https://github.com/conan-io/conan/issues/3041
|
$ conan --version
Conan version 1.4.4
$ conan search sdl2
Traceback (most recent call last):
File "/usr/local/Cellar/conan/1.4.4/libexec/lib/python3.6/site-packages/conans/client/remote_registry.py", line 66, in _load
contents = load(self._filename)
File "/usr/local/Cellar/conan/1.4.4/libexec/lib/python3.6/site-packages/conans/util/files.py", line 168, in load
with open(path, 'rb') as handle:
FileNotFoundError: [Errno 2] No such file or directory: '/Users/XXXX/.conan/registry.txt'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/Cellar/conan/1.4.4/libexec/lib/python3.6/site-packages/conans/client/command.py", line 1182, in run
method(args[0][1:])
File "/usr/local/Cellar/conan/1.4.4/libexec/lib/python3.6/site-packages/conans/client/command.py", line 861, in search
all_remotes_search = ("all" not in (r.name for r in remote_registry.remotes) and
File "/usr/local/Cellar/conan/1.4.4/libexec/lib/python3.6/site-packages/conans/client/remote_registry.py", line 86, in remotes
return list(self._remote_dict.values())
File "/usr/local/Cellar/conan/1.4.4/libexec/lib/python3.6/site-packages/conans/client/remote_registry.py", line 99, in _remote_dict
remotes, _ = self._load()
File "/usr/local/Cellar/conan/1.4.4/libexec/lib/python3.6/site-packages/conans/client/remote_registry.py", line 68, in _load
self._output.warn("Remotes registry file missing, creating default one in %s"
AttributeError: 'NoneType' object has no attribute 'warn'
ERROR: 'NoneType' object has no attribute 'warn'
|
FileNotFoundError
|
def get_scm(conanfile, src_folder):
data = getattr(conanfile, "scm", None)
if data is not None and isinstance(data, dict):
return SCM(data, src_folder)
else:
# not an instance of dict or None, skip SCM feature.
pass
|
def get_scm(conanfile, src_folder):
data = getattr(conanfile, "scm", None)
if data is not None:
return SCM(data, src_folder)
|
https://github.com/conan-io/conan/issues/3004
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/conan-1.4.2-py2.7.egg/conans/client/command.py", line 1182, in run
method(args[0][1:])
File "/usr/local/lib/python2.7/dist-packages/conan-1.4.2-py2.7.egg/conans/client/command.py", line 246, in create
test_build_folder=args.test_build_folder)
File "/usr/local/lib/python2.7/dist-packages/conan-1.4.2-py2.7.egg/conans/client/conan_api.py", line 77, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/conan-1.4.2-py2.7.egg/conans/client/conan_api.py", line 310, in create
self._user_io.out, self._client_cache)
File "/usr/local/lib/python2.7/dist-packages/conan-1.4.2-py2.7.egg/conans/client/cmd/export.py", line 61, in cmd_export
_export_conanfile(conanfile_path, output, client_cache, conanfile, conan_ref, keep_source)
File "/usr/local/lib/python2.7/dist-packages/conan-1.4.2-py2.7.egg/conans/client/cmd/export.py", line 136, in _export_conanfile
output, paths, conan_ref)
File "/usr/local/lib/python2.7/dist-packages/conan-1.4.2-py2.7.egg/conans/client/cmd/export.py", line 107, in _capture_export_scm_data
scm = get_scm(conanfile, src_path)
File "/usr/local/lib/python2.7/dist-packages/conan-1.4.2-py2.7.egg/conans/client/source.py", line 18, in get_scm
return SCM(data, src_folder)
File "/usr/local/lib/python2.7/dist-packages/conan-1.4.2-py2.7.egg/conans/model/scm.py", line 13, in __init__
self.type = data.get("type")
AttributeError: 'property' object has no attribute 'get'
|
AttributeError
|
def _link_folders(src, dst, linked_folders):
for linked_folder in linked_folders:
link = os.readlink(os.path.join(src, linked_folder))
dst_link = os.path.join(dst, linked_folder)
try:
# Remove the previous symlink
os.remove(dst_link)
except OSError:
pass
# link is a string relative to linked_folder
# e.j: os.symlink("test/bar", "./foo/test_link") will create a link to foo/test/bar in ./foo/test_link
mkdir(os.path.dirname(dst_link))
os.symlink(link, dst_link)
# Remove empty links
for linked_folder in linked_folders:
dst_link = os.path.join(dst, linked_folder)
abs_path = os.path.realpath(dst_link)
if not os.path.exists(abs_path):
os.remove(dst_link)
|
def _link_folders(src, dst, linked_folders):
for linked_folder in linked_folders:
link = os.readlink(os.path.join(src, linked_folder))
dst_link = os.path.join(dst, linked_folder)
try:
# Remove the previous symlink
os.remove(dst_link)
except OSError:
pass
# link is a string relative to linked_folder
# e.j: os.symlink("test/bar", "./foo/test_link") will create a link to foo/test/bar in ./foo/test_link
os.symlink(link, dst_link)
# Remove empty links
for linked_folder in linked_folders:
dst_link = os.path.join(dst, linked_folder)
abs_path = os.path.realpath(dst_link)
if not os.path.exists(abs_path):
os.remove(dst_link)
|
https://github.com/conan-io/conan/issues/2959
|
...
PROJECT: Generator txt created conanbuildinfo.txt
PROJECT: Generated conaninfo.txt
Traceback (most recent call last):
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/command.py", line 1182, in run
method(args[0][1:])
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/command.py", line 325, in install
install_folder=args.install_folder)
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/conan_api.py", line 77, in wrapper
return f(*args, **kwargs)
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/conan_api.py", line 465, in install
no_imports=no_imports)
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/manager.py", line 344, in install
run_imports(conanfile, install_folder, output)
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/importer.py", line 82, in run_imports
conanfile.imports()
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/loader_parse.py", line 184, in imports
conan_file.copy(*import_params)
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/importer.py", line 160, in __call__
excludes=excludes, keep_path=keep_path)
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/file_copier.py", line 83, in __call__
self._link_folders(src, dst, link_folders)
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/file_copier.py", line 149, in _link_folders
os.symlink(link, dst_link)
OSError: [Errno 2] No such file or directory
ERROR: [Errno 2] No such file or directory
|
OSError
|
def loads(cls, text):
result = []
for line in text.splitlines():
if not line.strip():
continue
name, value = line.split("=", 1)
result.append((name.strip(), value.strip()))
return cls.from_list(result)
|
def loads(cls, text):
result = []
for line in text.splitlines():
if not line.strip():
continue
name, value = line.split("=")
result.append((name.strip(), value.strip()))
return cls.from_list(result)
|
https://github.com/conan-io/conan/issues/2816
|
DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): artifactory.avira.org
DEBUG:urllib3.connectionpool:<artifactory-url> "GET /artifactory/api/conan/conan-local/v1/conans/<dependency-of-the-recipe>/download_urls HTTP/1.1" 200 None
DEBUG:urllib3.connectionpool:<artifactory-url> "GET /artifactory/api/conan/conan-local/v1/files/<dependency-of-the-recipe>/conaninfo.txt HTTP/1.1" 200 1197
Traceback (most recent call last):
File "/Users/user/venv/lib/python3.6/site-packages/conans/client/remote_manager.py", line 252, in _call_remote
return getattr(self._auth_manager, method)(*argc, **argv)
File "/Users/user/venv/lib/python3.6/site-packages/conans/client/rest/auth_manager.py", line 32, in wrapper
ret = func(self, *args, **kwargs)
File "/Users/user/venv/lib/python3.6/site-packages/conans/client/rest/auth_manager.py", line 157, in get_package_info
return self._rest_client.get_package_info(package_reference)
File "/Users/user/venv/lib/python3.6/site-packages/conans/client/rest/rest_client.py", line 132, in get_package_info
return ConanInfo.loads(contents[CONANINFO])
File "/Users/user/venv/lib/python3.6/site-packages/conans/model/info.py", line 264, in loads
result.settings = Values.loads(parser.settings)
File "/Users/user/venv/lib/python3.6/site-packages/conans/model/values.py", line 66, in loads
name, value = line.split("=")
ValueError: too many values to unpack (expected 2)
|
ValueError
|
def path_shortener(path, short_paths):
"""short_paths is 4-state:
False: Never shorten the path
True: Always shorten the path, create link if not existing
None: Use shorten path only if already exists, not create
"""
if short_paths is False or os.getenv("CONAN_USER_HOME_SHORT") == "None":
return path
link = os.path.join(path, CONAN_LINK)
if os.path.exists(link):
return load(link)
elif short_paths is None:
return path
short_home = os.getenv("CONAN_USER_HOME_SHORT")
if not short_home:
drive = os.path.splitdrive(path)[0]
short_home = drive + "/.conan"
mkdir(short_home)
# Workaround for short_home living in NTFS file systems. Give full control permission to current user to avoid
# access problems in cygwin/msys2 windows subsystems when using short_home folder
try:
username = os.getenv("USERDOMAIN")
domainname = (
"%s\%s" % (username, os.environ["USERNAME"])
if username
else os.environ["USERNAME"]
)
cmd = r'cacls %s /E /G "%s":F' % (short_home, domainname)
subprocess.check_output(
cmd, stderr=subprocess.STDOUT
) # Ignoring any returned output, make command quiet
except subprocess.CalledProcessError:
# cmd can fail if trying to set ACL in non NTFS drives, ignoring it.
pass
redirect = tempfile.mkdtemp(dir=short_home, prefix="")
# This "1" is the way to have a non-existing directory, so commands like
# shutil.copytree() to it, works. It can be removed without compromising the
# temp folder generator and conan-links consistency
redirect = os.path.join(redirect, "1")
save(link, redirect)
return redirect
|
def path_shortener(path, short_paths):
"""short_paths is 4-state:
False: Never shorten the path
True: Always shorten the path, create link if not existing
None: Use shorten path only if already exists, not create
"""
if short_paths is False or os.getenv("CONAN_USER_HOME_SHORT") == "None":
return path
link = os.path.join(path, CONAN_LINK)
if os.path.exists(link):
return load(link)
elif short_paths is None:
return path
short_home = os.getenv("CONAN_USER_HOME_SHORT")
if not short_home:
drive = os.path.splitdrive(path)[0]
short_home = drive + "/.conan"
mkdir(short_home)
# Workaround for short_home living in NTFS file systems. Give full control permission to current user to avoid
# access problems in cygwin/msys2 windows subsystems when using short_home folder
try:
cmd = r'cacls %s /E /G "%s\%s":F' % (
short_home,
os.environ["USERDOMAIN"],
os.environ["USERNAME"],
)
subprocess.check_output(
cmd, stderr=subprocess.STDOUT
) # Ignoring any returned output, make command quiet
except subprocess.CalledProcessError as e:
# cmd can fail if trying to set ACL in non NTFS drives, ignoring it.
pass
redirect = tempfile.mkdtemp(dir=short_home, prefix="")
# This "1" is the way to have a non-existing directory, so commands like
# shutil.copytree() to it, works. It can be removed without compromising the
# temp folder generator and conan-links consistency
redirect = os.path.join(redirect, "1")
save(link, redirect)
return redirect
|
https://github.com/conan-io/conan/issues/2761
|
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\conans\client\command.py", line 1187, in run
method(args[0][1:])
File "C:\Python27\lib\site-packages\conans\client\command.py", line 304, in install
install_folder=args.install_folder)
File "C:\Python27\lib\site-packages\conans\client\conan_api.py", line 61, in wrapper
return f(*args, **kwargs)
File "C:\Python27\lib\site-packages\conans\client\conan_api.py", line 444, in install
no_imports=no_imports)
File "C:\Python27\lib\site-packages\conans\client\manager.py", line 395, in install
installer.install(deps_graph, profile.build_requires, keep_build)
File "C:\Python27\lib\site-packages\conans\client\installer.py", line 262, in install
nodes_to_process = self._get_nodes(nodes_by_level, skip_private_nodes)
File "C:\Python27\lib\site-packages\conans\client\installer.py", line 501, in _get_nodes
check_outdated)
File "C:\Python27\lib\site-packages\conans\client\proxy.py", line 47, in package_available
package_folder = self._client_cache.package(package_ref, short_paths=short_paths)
File "C:\Python27\lib\site-packages\conans\paths.py", line 162, in package
return path_shortener(p, short_paths)
File "C:\Python27\lib\site-packages\conans\util\windows.py", line 57, in path_shortener
cmd = r'cacls %s /E /G "%s\%s":F' % (short_home, os.environ['USERDOMAIN'], os.environ['USERNAME'])
File "C:\Python27\lib\os.py", line 425, in __getitem__
return self.data[key.upper()]
KeyError: 'USERDOMAIN'
|
KeyError
|
def run_imports(conanfile, dest_folder, output):
if not hasattr(conanfile, "imports"):
return []
file_importer = _FileImporter(conanfile, dest_folder)
conanfile.copy = file_importer
conanfile.imports_folder = dest_folder
with get_env_context_manager(conanfile):
with tools.chdir(dest_folder):
conanfile.imports()
copied_files = file_importer.copied_files
_make_files_writable(copied_files)
import_output = ScopedOutput("%s imports()" % output.scope, output)
_report_save_manifest(copied_files, import_output, dest_folder, IMPORTS_MANIFESTS)
return copied_files
|
def run_imports(conanfile, dest_folder, output):
if not hasattr(conanfile, "imports"):
return []
file_importer = _FileImporter(conanfile, dest_folder)
conanfile.copy = file_importer
conanfile.imports_folder = dest_folder
with get_env_context_manager(conanfile):
with tools.chdir(dest_folder):
conanfile.imports()
copied_files = file_importer.copied_files
import_output = ScopedOutput("%s imports()" % output.scope, output)
_report_save_manifest(copied_files, import_output, dest_folder, IMPORTS_MANIFESTS)
return copied_files
|
https://github.com/conan-io/conan/issues/2441
|
conan install -u --build=outdated --set build_type=Release --set compiler=Visual Studio --set compiler.runtime=MT D:\dev\ruggedsw\base\dds\test
PROJECT: Installing D:\dev\ruggedsw\base\dds\test\conanfile.txt
Requirements
Boost/1.66.0-0@rugged/stable from shuttle
google.test/1.8.0-0@rugged/stable from shuttle
rugged.base/develop@demo/testing from local
rugged.cmake/0.2.0@rugged/stable from shuttle
rugged.dds/develop@demo/testing from local
rugged.idl.coredx.cpp/develop@demo/testing from local
twinoaks.coredx/4.0.16-0@rugged/stable from shuttle
twinoaks.coredx.license/latest@rugged/stable from shuttle
Packages
Boost/1.66.0-0@rugged/stable:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9
google.test/1.8.0-0@rugged/stable:7ce94352b9d6c95dd4f54be06f40814de83033cc
rugged.base/develop@demo/testing:0360a5f1de3610acad6fc54319fae7f93d69080f
rugged.cmake/0.2.0@rugged/stable:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9
rugged.dds/develop@demo/testing:be478f16b38bce7c5b9b377d28de2be18dd0f742
rugged.idl.coredx.cpp/develop@demo/testing:cf3c4eea01e24ccdb48737197cec263338b785dc
twinoaks.coredx/4.0.16-0@rugged/stable:7bd6f2c3d5c4e48a75805376b58cde753392f711
twinoaks.coredx.license/latest@rugged/stable:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9
Boost/1.66.0-0@rugged/stable: Package is up to date
google.test/1.8.0-0@rugged/stable: Package is up to date
rugged.cmake/0.2.0@rugged/stable: Package is up to date
twinoaks.coredx/4.0.16-0@rugged/stable: Package is up to date
twinoaks.coredx.license/latest@rugged/stable: Package is up to date
rugged.base/develop@demo/testing: Package is up to date
rugged.idl.coredx.cpp/develop@demo/testing: Package is up to date
rugged.dds/develop@demo/testing: Package is up to date
Boost/1.66.0-0@rugged/stable: Already installed!
google.test/1.8.0-0@rugged/stable: Already installed!
rugged.cmake/0.2.0@rugged/stable: Already installed!
twinoaks.coredx/4.0.16-0@rugged/stable: Already installed!
twinoaks.coredx.license/latest@rugged/stable: Already installed!
rugged.base/develop@demo/testing: Already installed!
rugged.idl.coredx.cpp/develop@demo/testing: Already installed!
rugged.dds/develop@demo/testing: Already installed!
PROJECT: Generator cmake created conanbuildinfo.cmake
PROJECT: Generator txt created conanbuildinfo.txt
PROJECT: Generated conaninfo.txt
Traceback (most recent call last):
File "c:\program files\python35\lib\site-packages\conans\model\ref.py", line 70, in loads
name, version, user, channel = tokens
ValueError: not enough values to unpack (expected 4, got 1)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\program files\python35\lib\site-packages\conans\client\command.py", line 243, in install
reference = ConanFileReference.loads(args.path)
File "c:\program files\python35\lib\site-packages\conans\model\ref.py", line 73, in loads
"OpenCV/1.0.6@user/stable" % text)
conans.errors.ConanException: Wrong package recipe reference D:\dev\ruggedsw\base\dds\test
Write something like OpenCV/1.0.6@user/stable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\program files\python35\lib\site-packages\conans\client\command.py", line 1099, in run
method(args[0][1:])
File "c:\program files\python35\lib\site-packages\conans\client\command.py", line 254, in install
install_folder=args.install_folder)
File "c:\program files\python35\lib\site-packages\conans\client\conan_api.py", line 63, in wrapper
return f(*args, **kwargs)
File "c:\program files\python35\lib\site-packages\conans\client\conan_api.py", line 402, in install
no_imports=no_imports)
File "c:\program files\python35\lib\site-packages\conans\client\manager.py", line 385, in install
run_imports(conanfile, install_folder, output)
File "c:\program files\python35\lib\site-packages\conans\client\importer.py", line 72, in run_imports
conanfile.imports()
File "c:\program files\python35\lib\site-packages\conans\client\loader_parse.py", line 175, in imports
conan_file.copy(*import_params)
File "c:\program files\python35\lib\site-packages\conans\client\importer.py", line 139, in __call__
excludes=excludes)
File "c:\program files\python35\lib\site-packages\conans\client\file_copier.py", line 77, in __call__
copied_files = self._copy_files(files_to_copy, src, dst, keep_path, links)
File "c:\program files\python35\lib\site-packages\conans\client\file_copier.py", line 171, in _copy_files
shutil.copy2(abs_src_name, abs_dst_name)
File "c:\program files\python35\lib\shutil.py", line 251, in copy2
copyfile(src, dst, follow_symlinks=follow_symlinks)
File "c:\program files\python35\lib\shutil.py", line 115, in copyfile
with open(dst, 'wb') as fdst:
PermissionError: [Errno 13] Permission denied: 'D:\\dev\\ruggedsw\\base\\dds\\test\\build.vs.Release.Static\\CoreDXeval.lic'
ERROR: [Errno 13] Permission denied: 'D:\\dev\\ruggedsw\\base\\dds\\test\\build.vs.Release.Static\\CoreDXeval.lic'
|
ValueError
|
def run_deploy(conanfile, install_folder, output):
deploy_output = ScopedOutput("%s deploy()" % output.scope, output)
file_importer = _FileImporter(conanfile, install_folder)
package_copied = set()
# This is necessary to capture FileCopier full destination paths
# Maybe could be improved in FileCopier
def file_copier(*args, **kwargs):
file_copy = FileCopier(conanfile.package_folder, install_folder)
copied = file_copy(*args, **kwargs)
_make_files_writable(copied)
package_copied.update(copied)
conanfile.copy_deps = file_importer
conanfile.copy = file_copier
conanfile.install_folder = install_folder
with get_env_context_manager(conanfile):
with tools.chdir(install_folder):
conanfile.deploy()
copied_files = file_importer.copied_files
copied_files.update(package_copied)
_report_save_manifest(
copied_files, deploy_output, install_folder, "deploy_manifest.txt"
)
|
def run_deploy(conanfile, install_folder, output):
deploy_output = ScopedOutput("%s deploy()" % output.scope, output)
file_importer = _FileImporter(conanfile, install_folder)
package_copied = set()
# This is necessary to capture FileCopier full destination paths
# Maybe could be improved in FileCopier
def file_copier(*args, **kwargs):
file_copy = FileCopier(conanfile.package_folder, install_folder)
copied = file_copy(*args, **kwargs)
package_copied.update(copied)
conanfile.copy_deps = file_importer
conanfile.copy = file_copier
conanfile.install_folder = install_folder
with get_env_context_manager(conanfile):
with tools.chdir(install_folder):
conanfile.deploy()
copied_files = file_importer.copied_files
copied_files.update(package_copied)
_report_save_manifest(
copied_files, deploy_output, install_folder, "deploy_manifest.txt"
)
|
https://github.com/conan-io/conan/issues/2441
|
conan install -u --build=outdated --set build_type=Release --set compiler=Visual Studio --set compiler.runtime=MT D:\dev\ruggedsw\base\dds\test
PROJECT: Installing D:\dev\ruggedsw\base\dds\test\conanfile.txt
Requirements
Boost/1.66.0-0@rugged/stable from shuttle
google.test/1.8.0-0@rugged/stable from shuttle
rugged.base/develop@demo/testing from local
rugged.cmake/0.2.0@rugged/stable from shuttle
rugged.dds/develop@demo/testing from local
rugged.idl.coredx.cpp/develop@demo/testing from local
twinoaks.coredx/4.0.16-0@rugged/stable from shuttle
twinoaks.coredx.license/latest@rugged/stable from shuttle
Packages
Boost/1.66.0-0@rugged/stable:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9
google.test/1.8.0-0@rugged/stable:7ce94352b9d6c95dd4f54be06f40814de83033cc
rugged.base/develop@demo/testing:0360a5f1de3610acad6fc54319fae7f93d69080f
rugged.cmake/0.2.0@rugged/stable:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9
rugged.dds/develop@demo/testing:be478f16b38bce7c5b9b377d28de2be18dd0f742
rugged.idl.coredx.cpp/develop@demo/testing:cf3c4eea01e24ccdb48737197cec263338b785dc
twinoaks.coredx/4.0.16-0@rugged/stable:7bd6f2c3d5c4e48a75805376b58cde753392f711
twinoaks.coredx.license/latest@rugged/stable:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9
Boost/1.66.0-0@rugged/stable: Package is up to date
google.test/1.8.0-0@rugged/stable: Package is up to date
rugged.cmake/0.2.0@rugged/stable: Package is up to date
twinoaks.coredx/4.0.16-0@rugged/stable: Package is up to date
twinoaks.coredx.license/latest@rugged/stable: Package is up to date
rugged.base/develop@demo/testing: Package is up to date
rugged.idl.coredx.cpp/develop@demo/testing: Package is up to date
rugged.dds/develop@demo/testing: Package is up to date
Boost/1.66.0-0@rugged/stable: Already installed!
google.test/1.8.0-0@rugged/stable: Already installed!
rugged.cmake/0.2.0@rugged/stable: Already installed!
twinoaks.coredx/4.0.16-0@rugged/stable: Already installed!
twinoaks.coredx.license/latest@rugged/stable: Already installed!
rugged.base/develop@demo/testing: Already installed!
rugged.idl.coredx.cpp/develop@demo/testing: Already installed!
rugged.dds/develop@demo/testing: Already installed!
PROJECT: Generator cmake created conanbuildinfo.cmake
PROJECT: Generator txt created conanbuildinfo.txt
PROJECT: Generated conaninfo.txt
Traceback (most recent call last):
File "c:\program files\python35\lib\site-packages\conans\model\ref.py", line 70, in loads
name, version, user, channel = tokens
ValueError: not enough values to unpack (expected 4, got 1)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\program files\python35\lib\site-packages\conans\client\command.py", line 243, in install
reference = ConanFileReference.loads(args.path)
File "c:\program files\python35\lib\site-packages\conans\model\ref.py", line 73, in loads
"OpenCV/1.0.6@user/stable" % text)
conans.errors.ConanException: Wrong package recipe reference D:\dev\ruggedsw\base\dds\test
Write something like OpenCV/1.0.6@user/stable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\program files\python35\lib\site-packages\conans\client\command.py", line 1099, in run
method(args[0][1:])
File "c:\program files\python35\lib\site-packages\conans\client\command.py", line 254, in install
install_folder=args.install_folder)
File "c:\program files\python35\lib\site-packages\conans\client\conan_api.py", line 63, in wrapper
return f(*args, **kwargs)
File "c:\program files\python35\lib\site-packages\conans\client\conan_api.py", line 402, in install
no_imports=no_imports)
File "c:\program files\python35\lib\site-packages\conans\client\manager.py", line 385, in install
run_imports(conanfile, install_folder, output)
File "c:\program files\python35\lib\site-packages\conans\client\importer.py", line 72, in run_imports
conanfile.imports()
File "c:\program files\python35\lib\site-packages\conans\client\loader_parse.py", line 175, in imports
conan_file.copy(*import_params)
File "c:\program files\python35\lib\site-packages\conans\client\importer.py", line 139, in __call__
excludes=excludes)
File "c:\program files\python35\lib\site-packages\conans\client\file_copier.py", line 77, in __call__
copied_files = self._copy_files(files_to_copy, src, dst, keep_path, links)
File "c:\program files\python35\lib\site-packages\conans\client\file_copier.py", line 171, in _copy_files
shutil.copy2(abs_src_name, abs_dst_name)
File "c:\program files\python35\lib\shutil.py", line 251, in copy2
copyfile(src, dst, follow_symlinks=follow_symlinks)
File "c:\program files\python35\lib\shutil.py", line 115, in copyfile
with open(dst, 'wb') as fdst:
PermissionError: [Errno 13] Permission denied: 'D:\\dev\\ruggedsw\\base\\dds\\test\\build.vs.Release.Static\\CoreDXeval.lic'
ERROR: [Errno 13] Permission denied: 'D:\\dev\\ruggedsw\\base\\dds\\test\\build.vs.Release.Static\\CoreDXeval.lic'
|
ValueError
|
def file_copier(*args, **kwargs):
file_copy = FileCopier(conanfile.package_folder, install_folder)
copied = file_copy(*args, **kwargs)
_make_files_writable(copied)
package_copied.update(copied)
|
def file_copier(*args, **kwargs):
file_copy = FileCopier(conanfile.package_folder, install_folder)
copied = file_copy(*args, **kwargs)
package_copied.update(copied)
|
https://github.com/conan-io/conan/issues/2441
|
conan install -u --build=outdated --set build_type=Release --set compiler=Visual Studio --set compiler.runtime=MT D:\dev\ruggedsw\base\dds\test
PROJECT: Installing D:\dev\ruggedsw\base\dds\test\conanfile.txt
Requirements
Boost/1.66.0-0@rugged/stable from shuttle
google.test/1.8.0-0@rugged/stable from shuttle
rugged.base/develop@demo/testing from local
rugged.cmake/0.2.0@rugged/stable from shuttle
rugged.dds/develop@demo/testing from local
rugged.idl.coredx.cpp/develop@demo/testing from local
twinoaks.coredx/4.0.16-0@rugged/stable from shuttle
twinoaks.coredx.license/latest@rugged/stable from shuttle
Packages
Boost/1.66.0-0@rugged/stable:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9
google.test/1.8.0-0@rugged/stable:7ce94352b9d6c95dd4f54be06f40814de83033cc
rugged.base/develop@demo/testing:0360a5f1de3610acad6fc54319fae7f93d69080f
rugged.cmake/0.2.0@rugged/stable:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9
rugged.dds/develop@demo/testing:be478f16b38bce7c5b9b377d28de2be18dd0f742
rugged.idl.coredx.cpp/develop@demo/testing:cf3c4eea01e24ccdb48737197cec263338b785dc
twinoaks.coredx/4.0.16-0@rugged/stable:7bd6f2c3d5c4e48a75805376b58cde753392f711
twinoaks.coredx.license/latest@rugged/stable:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9
Boost/1.66.0-0@rugged/stable: Package is up to date
google.test/1.8.0-0@rugged/stable: Package is up to date
rugged.cmake/0.2.0@rugged/stable: Package is up to date
twinoaks.coredx/4.0.16-0@rugged/stable: Package is up to date
twinoaks.coredx.license/latest@rugged/stable: Package is up to date
rugged.base/develop@demo/testing: Package is up to date
rugged.idl.coredx.cpp/develop@demo/testing: Package is up to date
rugged.dds/develop@demo/testing: Package is up to date
Boost/1.66.0-0@rugged/stable: Already installed!
google.test/1.8.0-0@rugged/stable: Already installed!
rugged.cmake/0.2.0@rugged/stable: Already installed!
twinoaks.coredx/4.0.16-0@rugged/stable: Already installed!
twinoaks.coredx.license/latest@rugged/stable: Already installed!
rugged.base/develop@demo/testing: Already installed!
rugged.idl.coredx.cpp/develop@demo/testing: Already installed!
rugged.dds/develop@demo/testing: Already installed!
PROJECT: Generator cmake created conanbuildinfo.cmake
PROJECT: Generator txt created conanbuildinfo.txt
PROJECT: Generated conaninfo.txt
Traceback (most recent call last):
File "c:\program files\python35\lib\site-packages\conans\model\ref.py", line 70, in loads
name, version, user, channel = tokens
ValueError: not enough values to unpack (expected 4, got 1)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\program files\python35\lib\site-packages\conans\client\command.py", line 243, in install
reference = ConanFileReference.loads(args.path)
File "c:\program files\python35\lib\site-packages\conans\model\ref.py", line 73, in loads
"OpenCV/1.0.6@user/stable" % text)
conans.errors.ConanException: Wrong package recipe reference D:\dev\ruggedsw\base\dds\test
Write something like OpenCV/1.0.6@user/stable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\program files\python35\lib\site-packages\conans\client\command.py", line 1099, in run
method(args[0][1:])
File "c:\program files\python35\lib\site-packages\conans\client\command.py", line 254, in install
install_folder=args.install_folder)
File "c:\program files\python35\lib\site-packages\conans\client\conan_api.py", line 63, in wrapper
return f(*args, **kwargs)
File "c:\program files\python35\lib\site-packages\conans\client\conan_api.py", line 402, in install
no_imports=no_imports)
File "c:\program files\python35\lib\site-packages\conans\client\manager.py", line 385, in install
run_imports(conanfile, install_folder, output)
File "c:\program files\python35\lib\site-packages\conans\client\importer.py", line 72, in run_imports
conanfile.imports()
File "c:\program files\python35\lib\site-packages\conans\client\loader_parse.py", line 175, in imports
conan_file.copy(*import_params)
File "c:\program files\python35\lib\site-packages\conans\client\importer.py", line 139, in __call__
excludes=excludes)
File "c:\program files\python35\lib\site-packages\conans\client\file_copier.py", line 77, in __call__
copied_files = self._copy_files(files_to_copy, src, dst, keep_path, links)
File "c:\program files\python35\lib\site-packages\conans\client\file_copier.py", line 171, in _copy_files
shutil.copy2(abs_src_name, abs_dst_name)
File "c:\program files\python35\lib\shutil.py", line 251, in copy2
copyfile(src, dst, follow_symlinks=follow_symlinks)
File "c:\program files\python35\lib\shutil.py", line 115, in copyfile
with open(dst, 'wb') as fdst:
PermissionError: [Errno 13] Permission denied: 'D:\\dev\\ruggedsw\\base\\dds\\test\\build.vs.Release.Static\\CoreDXeval.lic'
ERROR: [Errno 13] Permission denied: 'D:\\dev\\ruggedsw\\base\\dds\\test\\build.vs.Release.Static\\CoreDXeval.lic'
|
ValueError
|
def search_packages(self, reference=None, remote=None, query=None, outdated=False):
"""Return the single information saved in conan.vars about all the packages
or the packages which match with a pattern
Attributes:
pattern = string to match packages
remote = search on another origin to get packages info
packages_pattern = String query with binary
packages properties: "arch=x86 AND os=Windows"
"""
if remote:
remote = RemoteRegistry(self._client_cache.registry, self._user_io.out).remote(
remote
)
packages_props = self._remote_manager.search_packages(remote, reference, query)
ordered_packages = OrderedDict(sorted(packages_props.items()))
manifest = self._remote_manager.get_conan_digest(reference, remote)
recipe_hash = manifest.summary_hash
else:
searcher = DiskSearchManager(self._client_cache)
packages_props = searcher.search_packages(reference, query)
ordered_packages = OrderedDict(sorted(packages_props.items()))
try:
recipe_hash = self._client_cache.load_manifest(reference).summary_hash
except IOError: # It could not exist in local
recipe_hash = None
if outdated and recipe_hash:
ordered_packages = filter_outdated(ordered_packages, recipe_hash)
return ordered_packages, reference, recipe_hash, query
|
def search_packages(self, reference=None, remote=None, query=None, outdated=False):
"""Return the single information saved in conan.vars about all the packages
or the packages which match with a pattern
Attributes:
pattern = string to match packages
remote = search on another origin to get packages info
packages_pattern = String query with binary
packages properties: "arch=x86 AND os=Windows"
"""
if remote:
remote = RemoteRegistry(self._client_cache.registry, self._user_io).remote(
remote
)
packages_props = self._remote_manager.search_packages(remote, reference, query)
ordered_packages = OrderedDict(sorted(packages_props.items()))
manifest = self._remote_manager.get_conan_digest(reference, remote)
recipe_hash = manifest.summary_hash
else:
searcher = DiskSearchManager(self._client_cache)
packages_props = searcher.search_packages(reference, query)
ordered_packages = OrderedDict(sorted(packages_props.items()))
try:
recipe_hash = self._client_cache.load_manifest(reference).summary_hash
except IOError: # It could not exist in local
recipe_hash = None
if outdated and recipe_hash:
ordered_packages = filter_outdated(ordered_packages, recipe_hash)
return ordered_packages, reference, recipe_hash, query
|
https://github.com/conan-io/conan/issues/2589
|
(conan) ~ $ conan search zlib/1.2.11@conan/stable -r=conan-center
Traceback (most recent call last):
File "/home/mgodbolt/apps/miniconda/envs/conan/lib/python2.7/site-packages/conans/client/command.py", line 1131, in run
method(args[0][1:])
File "/home/mgodbolt/apps/miniconda/envs/conan/lib/python2.7/site-packages/conans/client/command.py", line 814, in search
outdated=args.outdated)
File "/home/mgodbolt/apps/miniconda/envs/conan/lib/python2.7/site-packages/conans/client/conan_api.py", line 64, in wrapper
return f(*args, **kwargs)
File "/home/mgodbolt/apps/miniconda/envs/conan/lib/python2.7/site-packages/conans/client/conan_api.py", line 595, in search_packages
outdated=outdated)
File "/home/mgodbolt/apps/miniconda/envs/conan/lib/python2.7/site-packages/conans/client/cmd/search.py", line 44, in search_packages
remote = RemoteRegistry(self._client_cache.registry, self._user_io).remote(remote)
File "/home/mgodbolt/apps/miniconda/envs/conan/lib/python2.7/site-packages/conans/client/remote_registry.py", line 95, in remote
remotes, _ = self._load()
File "/home/mgodbolt/apps/miniconda/envs/conan/lib/python2.7/site-packages/conans/client/remote_registry.py", line 65, in _load
self._output.warn("Remotes registry file missing, creating default one in %s"
AttributeError: 'UserIO' object has no attribute 'warn'
ERROR: 'UserIO' object has no attribute 'warn'
|
AttributeError
|
def pyinstall(source_folder):
pyinstaller_path = os.path.join(os.getcwd(), "pyinstaller")
_install_pyintaller(pyinstaller_path)
for folder in ("conan", "conan_server", "conan_build_info"):
try:
shutil.rmtree(os.path.join(pyinstaller_path, folder))
except Exception as e:
print("Unable to remove old folder", e)
conan_path = os.path.join(source_folder, "conans", "conan.py")
conan_server_path = os.path.join(source_folder, "conans", "conan_server.py")
conan_build_info_path = os.path.join(source_folder, "conans/build_info/command.py")
hidden = "--hidden-import=glob --hidden-import=pylint.reporters.text"
if platform.system() != "Windows":
hidden += " --hidden-import=setuptools.msvc"
subprocess.call(
"python pyinstaller.py -y -p %s --console %s %s"
% (source_folder, conan_path, hidden),
cwd=pyinstaller_path,
shell=True,
)
_run_bin(pyinstaller_path)
subprocess.call(
"python pyinstaller.py -y -p %s --console %s"
% (source_folder, conan_server_path),
cwd=pyinstaller_path,
shell=True,
)
subprocess.call(
"python pyinstaller.py -y -p %s --console %s -n conan_build_info"
% (source_folder, conan_build_info_path),
cwd=pyinstaller_path,
shell=True,
)
conan_bin = os.path.join(pyinstaller_path, "conan", "dist", "conan")
conan_server_folder = os.path.join(
pyinstaller_path, "conan_server", "dist", "conan_server"
)
conan_build_info_folder = os.path.join(
pyinstaller_path, "conan_build_info", "dist", "conan_build_info"
)
dir_util.copy_tree(conan_server_folder, conan_bin)
dir_util.copy_tree(conan_build_info_folder, conan_bin)
_run_bin(pyinstaller_path)
return os.path.abspath(os.path.join(pyinstaller_path, "conan", "dist", "conan"))
|
def pyinstall(source_folder):
pyinstaller_path = os.path.join(os.getcwd(), "pyinstaller")
_install_pyintaller(pyinstaller_path)
for folder in ("conan", "conan_server", "conan_build_info"):
try:
shutil.rmtree(os.path.join(pyinstaller_path, folder))
except Exception as e:
print("Unable to remove old folder", e)
conan_path = os.path.join(source_folder, "conans", "conan.py")
conan_server_path = os.path.join(source_folder, "conans", "conan_server.py")
conan_build_info_path = os.path.join(source_folder, "conans/build_info/command.py")
hidden = "--hidden-import=glob"
if platform.system() != "Windows":
hidden += " --hidden-import=setuptools.msvc"
subprocess.call(
"python pyinstaller.py -y -p %s --console %s %s"
% (source_folder, conan_path, hidden),
cwd=pyinstaller_path,
shell=True,
)
_run_bin(pyinstaller_path)
subprocess.call(
"python pyinstaller.py -y -p %s --console %s"
% (source_folder, conan_server_path),
cwd=pyinstaller_path,
shell=True,
)
subprocess.call(
"python pyinstaller.py -y -p %s --console %s -n conan_build_info"
% (source_folder, conan_build_info_path),
cwd=pyinstaller_path,
shell=True,
)
conan_bin = os.path.join(pyinstaller_path, "conan", "dist", "conan")
conan_server_folder = os.path.join(
pyinstaller_path, "conan_server", "dist", "conan_server"
)
conan_build_info_folder = os.path.join(
pyinstaller_path, "conan_build_info", "dist", "conan_build_info"
)
dir_util.copy_tree(conan_server_folder, conan_bin)
dir_util.copy_tree(conan_build_info_folder, conan_bin)
_run_bin(pyinstaller_path)
return os.path.abspath(os.path.join(pyinstaller_path, "conan", "dist", "conan"))
|
https://github.com/conan-io/conan/issues/1868
|
~/test $ conan new Hello/0.1 -t
File saved: conanfile.py
File saved: test_package/CMakeLists.txt
File saved: test_package/conanfile.py
File saved: test_package/example.cpp
~/test $ conan create demo/testing
Hello/0.1@demo/testing: Exporting package recipe
Traceback (most recent call last):
File "conan/conans/client/command.py", line 895, in run
File "conan/conans/client/command.py", line 184, in create
File "conan/conans/client/conan_api.py", line 63, in wrapper
File "conan/conans/client/conan_api.py", line 292, in create
File "conan/conans/client/manager.py", line 166, in export
File "conan/conans/client/linter.py", line 21, in conan_linter
File "conan/conans/client/linter.py", line 68, in _lint_py3
File "conan/conans/client/linter.py", line 51, in _runner
File "pylint/lint.py", line 1220, in __init__
File "pylint/lint.py", line 458, in load_default_plugins
File "pylint/lint.py", line 478, in _load_reporter
File "astroid/modutils.py", line 437, in get_module_part
File "astroid/modutils.py", line 338, in file_from_modpath
File "astroid/modutils.py", line 383, in file_info_from_modpath
File "astroid/modutils.py", line 603, in _spec_from_modpath
File "astroid/interpreter/_import/spec.py", line 279, in find_spec
File "astroid/interpreter/_import/spec.py", line 246, in _find_spec_with_path
ImportError: No module named text
ERROR: No module named text
|
ImportError
|
def create(
self,
profile_name=None,
settings=None,
options=None,
env=None,
scope=None,
test_folder=None,
not_export=False,
build=None,
keep_source=False,
verify=None,
manifests=None,
manifests_interactive=None,
remote=None,
update=False,
cwd=None,
user=None,
channel=None,
name=None,
version=None,
):
settings = settings or []
options = options or []
env = env or []
cwd = prepare_cwd(cwd)
if not name or not version:
conanfile_path = os.path.join(cwd, "conanfile.py")
conanfile = load_conanfile_class(conanfile_path)
name, version = conanfile.name, conanfile.version
if not name or not version:
raise ConanException("conanfile.py doesn't declare package name or version")
reference = ConanFileReference(name, version, user, channel)
scoped_output = ScopedOutput(str(reference), self._user_io.out)
# Forcing an export!
if not not_export:
scoped_output.highlight("Exporting package recipe")
self._manager.export(
user, channel, cwd, keep_source=keep_source, name=name, version=version
)
if build is None: # Not specified, force build the tested library
build = [name]
manifests = _parse_manifests_arguments(
verify, manifests, manifests_interactive, cwd
)
manifest_folder, manifest_interactive, manifest_verify = manifests
profile = profile_from_args(
profile_name,
settings,
options,
env,
scope,
cwd,
self._client_cache.profiles_path,
)
self._manager.install(
reference=reference,
current_path=cwd,
manifest_folder=manifest_folder,
manifest_verify=manifest_verify,
manifest_interactive=manifest_interactive,
remote=remote,
profile=profile,
build_modes=build,
update=update,
)
test_folders = [test_folder] if test_folder else ["test_package", "test"]
for test_folder_name in test_folders:
test_folder = os.path.join(cwd, test_folder_name)
test_conanfile_path = os.path.join(test_folder, "conanfile.py")
if os.path.exists(test_conanfile_path):
break
else:
self._user_io.out.warn(
"test package folder not available, or it doesn't have "
"a conanfile.py\nIt is recommended to set a 'test_package' "
"while creating packages"
)
return
scoped_output.highlight("Testing with 'test_package'")
sha = hashlib.sha1("".join(options + settings).encode()).hexdigest()
build_folder = os.path.join(test_folder, "build", sha)
rmdir(build_folder)
test_conanfile = os.path.join(test_folder, CONANFILE)
self._manager.install(
inject_require=reference,
reference=test_folder,
current_path=build_folder,
manifest_folder=manifest_folder,
manifest_verify=manifest_verify,
manifest_interactive=manifest_interactive,
remote=remote,
profile=profile,
update=update,
generators=["txt"],
)
self._manager.build(
test_conanfile,
test_folder,
build_folder,
package_folder=None,
test=str(reference),
)
|
def create(
self,
profile_name=None,
settings=None,
options=None,
env=None,
scope=None,
test_folder=None,
not_export=False,
build=None,
keep_source=False,
verify=default_manifest_folder,
manifests=default_manifest_folder,
manifests_interactive=default_manifest_folder,
remote=None,
update=False,
cwd=None,
user=None,
channel=None,
name=None,
version=None,
):
settings = settings or []
options = options or []
env = env or []
cwd = prepare_cwd(cwd)
if not name or not version:
conanfile_path = os.path.join(cwd, "conanfile.py")
conanfile = load_conanfile_class(conanfile_path)
name, version = conanfile.name, conanfile.version
if not name or not version:
raise ConanException("conanfile.py doesn't declare package name or version")
reference = ConanFileReference(name, version, user, channel)
scoped_output = ScopedOutput(str(reference), self._user_io.out)
# Forcing an export!
if not not_export:
scoped_output.highlight("Exporting package recipe")
self._manager.export(
user, channel, cwd, keep_source=keep_source, name=name, version=version
)
if build is None: # Not specified, force build the tested library
build = [name]
manifests = _parse_manifests_arguments(
verify, manifests, manifests_interactive, cwd
)
manifest_folder, manifest_interactive, manifest_verify = manifests
profile = profile_from_args(
profile_name,
settings,
options,
env,
scope,
cwd,
self._client_cache.profiles_path,
)
self._manager.install(
reference=reference,
current_path=cwd,
manifest_folder=manifest_folder,
manifest_verify=manifest_verify,
manifest_interactive=manifest_interactive,
remote=remote,
profile=profile,
build_modes=build,
update=update,
)
test_folders = [test_folder] if test_folder else ["test_package", "test"]
for test_folder_name in test_folders:
test_folder = os.path.join(cwd, test_folder_name)
test_conanfile_path = os.path.join(test_folder, "conanfile.py")
if os.path.exists(test_conanfile_path):
break
else:
self._user_io.out.warn(
"test package folder not available, or it doesn't have "
"a conanfile.py\nIt is recommended to set a 'test_package' "
"while creating packages"
)
return
scoped_output.highlight("Testing with 'test_package'")
sha = hashlib.sha1("".join(options + settings).encode()).hexdigest()
build_folder = os.path.join(test_folder, "build", sha)
rmdir(build_folder)
test_conanfile = os.path.join(test_folder, CONANFILE)
self._manager.install(
inject_require=reference,
reference=test_folder,
current_path=build_folder,
manifest_folder=manifest_folder,
manifest_verify=manifest_verify,
manifest_interactive=manifest_interactive,
remote=remote,
profile=profile,
update=update,
generators=["txt"],
)
self._manager.build(
test_conanfile,
test_folder,
build_folder,
package_folder=None,
test=str(reference),
)
|
https://github.com/conan-io/conan/issues/1689
|
Traceback (most recent call last):
File "./uploadRecipe.py", line 93, in <module>
conan.create(profile_name=args.profile, user=args.user, channel=args.channel, build='missing', cwd=args.recipe)
File "/opt/venv-conan3/lib/python3.5/site-packages/conans/client/conan_api.py", line 56, in wrapper
return f(*args, **kwargs)
File "/opt/venv-conan3/lib/python3.5/site-packages/conans/client/conan_api.py", line 288, in create
manifests = _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd)
File "/opt/venv-conan3/lib/python3.5/site-packages/conans/client/conan_api.py", line 656, in _parse_manifests_arguments
raise ConanException("Do not specify both manifests and "
conans.errors.ConanException: Do not specify both manifests and manifests-interactive arguments
|
conans.errors.ConanException
|
def install(
self,
reference="",
package=None,
settings=None,
options=None,
env=None,
scope=None,
all=False,
remote=None,
werror=False,
verify=None,
manifests=None,
manifests_interactive=None,
build=None,
profile_name=None,
update=False,
generator=None,
no_imports=False,
filename=None,
cwd=None,
):
self._user_io.out.werror_active = werror
cwd = prepare_cwd(cwd)
try:
ref = ConanFileReference.loads(reference)
except:
ref = os.path.normpath(os.path.join(cwd, reference))
if all or package: # Install packages without settings (fixed ids or all)
if all:
package = []
if not reference or not isinstance(ref, ConanFileReference):
raise ConanException(
"Invalid package recipe reference. e.g., MyPackage/1.2@user/channel"
)
self._manager.download(ref, package, remote=remote)
else: # Classic install, package chosen with settings and options
manifests = _parse_manifests_arguments(
verify, manifests, manifests_interactive, cwd
)
manifest_folder, manifest_interactive, manifest_verify = manifests
profile = profile_from_args(
profile_name,
settings,
options,
env,
scope,
cwd,
self._client_cache.profiles_path,
)
self._manager.install(
reference=ref,
current_path=cwd,
remote=remote,
profile=profile,
build_modes=build,
filename=filename,
update=update,
manifest_folder=manifest_folder,
manifest_verify=manifest_verify,
manifest_interactive=manifest_interactive,
generators=generator,
no_imports=no_imports,
)
|
def install(
self,
reference="",
package=None,
settings=None,
options=None,
env=None,
scope=None,
all=False,
remote=None,
werror=False,
verify=default_manifest_folder,
manifests=default_manifest_folder,
manifests_interactive=default_manifest_folder,
build=None,
profile_name=None,
update=False,
generator=None,
no_imports=False,
filename=None,
cwd=None,
):
self._user_io.out.werror_active = werror
cwd = prepare_cwd(cwd)
try:
ref = ConanFileReference.loads(reference)
except:
ref = os.path.normpath(os.path.join(cwd, reference))
if all or package: # Install packages without settings (fixed ids or all)
if all:
package = []
if not reference or not isinstance(ref, ConanFileReference):
raise ConanException(
"Invalid package recipe reference. e.g., MyPackage/1.2@user/channel"
)
self._manager.download(ref, package, remote=remote)
else: # Classic install, package chosen with settings and options
manifests = _parse_manifests_arguments(
verify, manifests, manifests_interactive, cwd
)
manifest_folder, manifest_interactive, manifest_verify = manifests
profile = profile_from_args(
profile_name,
settings,
options,
env,
scope,
cwd,
self._client_cache.profiles_path,
)
self._manager.install(
reference=ref,
current_path=cwd,
remote=remote,
profile=profile,
build_modes=build,
filename=filename,
update=update,
manifest_folder=manifest_folder,
manifest_verify=manifest_verify,
manifest_interactive=manifest_interactive,
generators=generator,
no_imports=no_imports,
)
|
https://github.com/conan-io/conan/issues/1689
|
Traceback (most recent call last):
File "./uploadRecipe.py", line 93, in <module>
conan.create(profile_name=args.profile, user=args.user, channel=args.channel, build='missing', cwd=args.recipe)
File "/opt/venv-conan3/lib/python3.5/site-packages/conans/client/conan_api.py", line 56, in wrapper
return f(*args, **kwargs)
File "/opt/venv-conan3/lib/python3.5/site-packages/conans/client/conan_api.py", line 288, in create
manifests = _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd)
File "/opt/venv-conan3/lib/python3.5/site-packages/conans/client/conan_api.py", line 656, in _parse_manifests_arguments
raise ConanException("Do not specify both manifests and "
conans.errors.ConanException: Do not specify both manifests and manifests-interactive arguments
|
conans.errors.ConanException
|
def _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd):
if manifests and manifests_interactive:
raise ConanException(
"Do not specify both manifests and manifests-interactive arguments"
)
if verify and (manifests or manifests_interactive):
raise ConanException(
"Do not specify both 'verify' and "
"'manifests' or 'manifests-interactive' arguments"
)
manifest_folder = verify or manifests or manifests_interactive
if manifest_folder:
if not os.path.isabs(manifest_folder):
if not cwd:
raise ConanException(
"'cwd' should be defined if the manifest folder is relative."
)
manifest_folder = os.path.join(cwd, manifest_folder)
manifest_verify = verify is not None
manifest_interactive = manifests_interactive is not None
else:
manifest_verify = manifest_interactive = False
return manifest_folder, manifest_interactive, manifest_verify
|
def _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd):
if manifests and manifests_interactive:
raise ConanException(
"Do not specify both manifests and manifests-interactive arguments"
)
if verify and (manifests or manifests_interactive):
raise ConanException(
"Do not specify both 'verify' and "
"'manifests' or 'manifests-interactive' arguments"
)
manifest_folder = verify or manifests or manifests_interactive
if manifest_folder:
if not os.path.isabs(manifest_folder):
manifest_folder = os.path.join(cwd, manifest_folder)
manifest_verify = verify is not None
manifest_interactive = manifests_interactive is not None
else:
manifest_verify = manifest_interactive = False
return manifest_folder, manifest_interactive, manifest_verify
|
https://github.com/conan-io/conan/issues/1689
|
Traceback (most recent call last):
File "./uploadRecipe.py", line 93, in <module>
conan.create(profile_name=args.profile, user=args.user, channel=args.channel, build='missing', cwd=args.recipe)
File "/opt/venv-conan3/lib/python3.5/site-packages/conans/client/conan_api.py", line 56, in wrapper
return f(*args, **kwargs)
File "/opt/venv-conan3/lib/python3.5/site-packages/conans/client/conan_api.py", line 288, in create
manifests = _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd)
File "/opt/venv-conan3/lib/python3.5/site-packages/conans/client/conan_api.py", line 656, in _parse_manifests_arguments
raise ConanException("Do not specify both manifests and "
conans.errors.ConanException: Do not specify both manifests and manifests-interactive arguments
|
conans.errors.ConanException
|
def get_recipe_sources(
self, conan_reference, export_folder, export_sources_folder, remote
):
t1 = time.time()
def filter_function(urls):
file_url = urls.get(EXPORT_SOURCES_TGZ_NAME)
if file_url:
urls = {EXPORT_SOURCES_TGZ_NAME: file_url}
else:
return None
return urls
zipped_files = self._call_remote(
remote, "get_recipe", conan_reference, export_folder, filter_function
)
duration = time.time() - t1
log_recipe_sources_download(conan_reference, duration, remote, zipped_files)
if not zipped_files:
mkdir(export_sources_folder) # create the folder even if no source files
return
unzip_and_get_files(zipped_files, export_sources_folder, EXPORT_SOURCES_TGZ_NAME)
c_src_path = os.path.join(export_sources_folder, ".c_src")
if os.path.exists(c_src_path):
merge_directories(c_src_path, export_sources_folder)
rmdir(c_src_path)
for dirname, _, filenames in os.walk(export_sources_folder):
for fname in filenames:
touch(os.path.join(dirname, fname))
|
def get_recipe_sources(
self, conan_reference, export_folder, export_sources_folder, remote
):
t1 = time.time()
def filter_function(urls):
file_url = urls.get(EXPORT_SOURCES_TGZ_NAME)
if file_url:
urls = {EXPORT_SOURCES_TGZ_NAME: file_url}
else:
return None
return urls
zipped_files = self._call_remote(
remote, "get_recipe", conan_reference, export_folder, filter_function
)
duration = time.time() - t1
log_recipe_sources_download(conan_reference, duration, remote, zipped_files)
if not zipped_files:
mkdir(export_sources_folder) # create the folder even if no source files
return
unzip_and_get_files(zipped_files, export_sources_folder, EXPORT_SOURCES_TGZ_NAME)
c_src_path = os.path.join(export_sources_folder, ".c_src")
if os.path.exists(c_src_path):
merge_directories(c_src_path, export_sources_folder)
shutil.rmtree(c_src_path)
for dirname, _, filenames in os.walk(export_sources_folder):
for fname in filenames:
touch(os.path.join(dirname, fname))
|
https://github.com/conan-io/conan/issues/1693
|
PROJECT: Installed build requirements of: myOrg.SomeComponent/2.14.0-alpha-build.5@user/unstable
Downloading conan_sources.tgz
Traceback (most recent call last):
File "d:\buildFolder\virtualenv\lib\site-packages\conans\client\command.py", line 884, in run
method(args[0][1:])
File "d:\buildFolder\virtualenv\lib\site-packages\conans\client\command.py", line 268, in install
filename=args.file, cwd=args.cwd)
File "d:\buildFolder\virtualenv\lib\site-packages\conans\client\conan_api.py", line 57, in wrapper
return f(*args, **kwargs)
File "d:\buildFolder\virtualenv\lib\site-packages\conans\client\conan_api.py", line 399, in install
no_imports=no_imports)
File "d:\buildFolder\virtualenv\lib\site-packages\conans\client\manager.py", line 347, in install
installer.install(deps_graph, current_path)
File "d:\buildFolder\virtualenv\lib\site-packages\conans\client\installer.py", line 135, in install
self._build(nodes_by_level, skip_private_nodes, deps_graph)
File "d:\buildFolder\virtualenv\lib\site-packages\conans\client\installer.py", line 222, in _build
self._remote_proxy.get_recipe_sources(conan_ref, conan_file.short_paths)
File "d:\buildFolder\virtualenv\lib\site-packages\conans\client\proxy.py", line 123, in get_recipe_sources
current_remote)
File "d:\buildFolder\virtualenv\lib\site-packages\conans\client\remote_manager.py", line 199, in get_recipe_sources
shutil.rmtree(c_src_path)
File "d:\buildFolder\virtualenv\lib\shutil.py", line 478, in rmtree
return _rmtree_unsafe(path, onerror)
File "d:\buildFolder\virtualenv\lib\shutil.py", line 368, in _rmtree_unsafe
_rmtree_unsafe(fullname, onerror)
File "d:\buildFolder\virtualenv\lib\shutil.py", line 368, in _rmtree_unsafe
_rmtree_unsafe(fullname, onerror)
File "d:\buildFolder\virtualenv\lib\shutil.py", line 368, in _rmtree_unsafe
_rmtree_unsafe(fullname, onerror)
File "d:\buildFolder\virtualenv\lib\shutil.py", line 368, in _rmtree_unsafe
_rmtree_unsafe(fullname, onerror)
File "d:\buildFolder\virtualenv\lib\shutil.py", line 373, in _rmtree_unsafe
onerror(os.unlink, fullname, sys.exc_info())
File "d:\buildFolder\virtualenv\lib\shutil.py", line 371, in _rmtree_unsafe
os.unlink(fullname)
PermissionError: [WinError 5] Access is denied: 'd:\\buildFolder\\data/.cn\\vy4r6yxd\\1\\.c_src\\SomeComponent\\doc\\QA\\Old\\Testdrehbuch-SomeProject.xls'
ERROR: [WinError 5] Access is denied: 'd:\\buildFolder\\data/.cn\\vy4r6yxd\\1\\.c_src\\SomeComponent\\doc\\QA\\Old\\Testdrehbuch-SomeProject.xls'
|
PermissionError
|
def loads(text):
"""parses a multiline text in the form
Package:option=value
other_option=3
OtherPack:opt3=12.1
"""
result = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
name, value = line.split("=", 1)
result.append((name.strip(), value.strip()))
return OptionsValues(result)
|
def loads(text):
"""parses a multiline text in the form
Package:option=value
other_option=3
OtherPack:opt3=12.1
"""
result = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
name, value = line.split("=")
result.append((name.strip(), value.strip()))
return OptionsValues(result)
|
https://github.com/conan-io/conan/issues/1296
|
Traceback (most recent call last):
File "/usr/local/bin/conan", line 9, in <module>
load_entry_point('conan==0.22.3', 'console_scripts', 'conan')()
File "/usr/local/lib/python3.5/dist-packages/conans/conan.py", line 6, in run
main(sys.argv[1:])
File "/usr/local/lib/python3.5/dist-packages/conans/client/command.py", line 1047, in main
error = command.run(args)
File "/usr/local/lib/python3.5/dist-packages/conans/client/command.py", line 862, in run
raise exc
File "/usr/local/lib/python3.5/dist-packages/conans/client/command.py", line 840, in run
method(args[0][1:])
File "/usr/local/lib/python3.5/dist-packages/conans/client/command.py", line 310, in install
no_imports=args.no_imports)
File "/usr/local/lib/python3.5/dist-packages/conans/client/manager.py", line 258, in install
conanfile = self._get_conanfile_object(loader, reference, filename, current_path)
File "/usr/local/lib/python3.5/dist-packages/conans/client/manager.py", line 147, in _get_conanfile_object
conanfile = loader.load_conan_txt(conan_path, output)
File "/usr/local/lib/python3.5/dist-packages/conans/client/loader.py", line 109, in load_conan_txt
conanfile = self._parse_conan_txt(contents, path, output)
File "/usr/local/lib/python3.5/dist-packages/conans/client/loader.py", line 129, in _parse_conan_txt
options = OptionsValues.loads(parser.options)
File "/usr/local/lib/python3.5/dist-packages/conans/model/options.py", line 242, in loads
name, value = line.split("=")
ValueError: too many values to unpack (expected 2)
|
ValueError
|
def export_conanfile(output, paths, conanfile, origin_folder, conan_ref, keep_source):
destination_folder = paths.export(conan_ref)
previous_digest = _init_export_folder(destination_folder)
execute_export(conanfile, origin_folder, destination_folder, output)
digest = FileTreeManifest.create(destination_folder)
save(os.path.join(destination_folder, CONAN_MANIFEST), str(digest))
if previous_digest and previous_digest == digest:
digest = previous_digest
output.info("The stored package has not changed")
modified_recipe = False
else:
output.success("A new %s version was exported" % CONANFILE)
output.info("Folder: %s" % destination_folder)
modified_recipe = True
source = paths.source(conan_ref, conanfile.short_paths)
dirty = os.path.join(source, DIRTY_FILE)
remove = False
if os.path.exists(dirty):
output.info("Source folder is dirty, forcing removal")
remove = True
elif modified_recipe and not keep_source and os.path.exists(source):
output.info("Package recipe modified in export, forcing source folder removal")
output.info("Use the --keep-source, -k option to skip it")
remove = True
if remove:
output.info("Removing 'source' folder, this can take a while for big packages")
try:
# remove only the internal
rmdir(source)
except BaseException as e:
output.error(
"Unable to delete source folder. Will be marked as dirty for deletion"
)
output.warn(str(e))
save(os.path.join(source, DIRTY_FILE), "")
|
def export_conanfile(output, paths, conanfile, origin_folder, conan_ref, keep_source):
destination_folder = paths.export(conan_ref)
previous_digest = _init_export_folder(destination_folder)
execute_export(conanfile, origin_folder, destination_folder, output)
digest = FileTreeManifest.create(destination_folder)
save(os.path.join(destination_folder, CONAN_MANIFEST), str(digest))
if previous_digest and previous_digest.file_sums == digest.file_sums:
digest = previous_digest
output.info("The stored package has not changed")
modified_recipe = False
else:
output.success("A new %s version was exported" % CONANFILE)
output.info("Folder: %s" % destination_folder)
modified_recipe = True
source = paths.source(conan_ref, conanfile.short_paths)
dirty = os.path.join(source, DIRTY_FILE)
remove = False
if os.path.exists(dirty):
output.info("Source folder is dirty, forcing removal")
remove = True
elif modified_recipe and not keep_source and os.path.exists(source):
output.info("Package recipe modified in export, forcing source folder removal")
output.info("Use the --keep-source, -k option to skip it")
remove = True
if remove:
output.info("Removing 'source' folder, this can take a while for big packages")
try:
# remove only the internal
rmdir(source)
except BaseException as e:
output.error(
"Unable to delete source folder. Will be marked as dirty for deletion"
)
output.warn(str(e))
save(os.path.join(source, DIRTY_FILE), "")
|
https://github.com/conan-io/conan/issues/1040
|
D:\slave\ws\cab\extern\boost\1.63.0@0\ws>conan.exe upload "Boost/1.63.0@cab/extern" --all --confirm -r bop --retry 3
Uploading Boost/1.63.0@cab/extern
Compressing recipe...
Uploading conanmanifest.txt
Uploading conan_export.tgz
Uploaded conan recipe 'Boost/1.63.0@cab/extern' to 'bop': http://conan.bop
Uploading package 1/5: 4f0fd3886115d238859ac2a3d41664e4236efc61
Checking package integrity...
Package integrity OK!
Compressing package...
Requesting upload permissions...
Requesting upload permissions...Done!
Uploading conan_package.tgz
Uploading package 2/5: 5d34366328ff5fe8b9f93b912e5855ffb5fda596
Checking package integrity...
Traceback (most recent call last):
File "C:\Python34\Scripts\conan-script.py", line 9, in <module>
load_entry_point('conan==0.19.2', 'console_scripts', 'conan')()
File "C:\Python34\lib\site-packages\conans\conan.py", line 6, in run
main(sys.argv[1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 1065, in main
error = command.run(args)
File "C:\Python34\lib\site-packages\conans\client\command.py", line 979, in run
raise exc
File "C:\Python34\lib\site-packages\conans\client\command.py", line 959, in run
method(args[0][1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 826, in upload
retry_wait=args.retry_wait)
File "C:\Python34\lib\site-packages\conans\client\manager.py", line 540, in upload
retry=retry, retry_wait=retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 41, in upload_conan
self._upload_conan(conan_ref, force, all_packages, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 56, in _upload_conan
retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 76, in upload_package
self._remote_proxy.upload_package(package_ref, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\proxy.py", line 295, in upload_package
result = self._remote_manager.upload_package(package_ref, remote, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\remote_manager.py", line 77, in upload_package
if read_manifest.file_sums[fname] != expected_manifest.file_sums[fname]:
KeyError: 'include/boost/asio/ip/unicast.hpp'
|
KeyError
|
def undo_imports(current_path, output):
manifest_path = os.path.join(current_path, IMPORTS_MANIFESTS)
try:
manifest_content = load(manifest_path)
except:
raise ConanException("Cannot load file %s" % manifest_path)
try:
manifest = FileTreeManifest.loads(manifest_content)
except:
raise ConanException("Wrong manifest file format %s" % manifest_path)
not_removed = 0
files = manifest.files()
for filepath in files:
if not os.path.exists(filepath):
output.warn("File doesn't exist: %s" % filepath)
continue
try:
os.remove(filepath)
except:
output.error("Cannot remove file (open or busy): %s" % filepath)
not_removed += 1
if not_removed:
raise ConanException("Cannot remove %s or more imported files" % not_removed)
output.success("Removed %s imported files" % (len(files)))
try:
os.remove(manifest_path)
output.success("Removed imports manifest file: %s" % manifest_path)
except:
raise ConanException(
"Cannot remove manifest file (open or busy): %s" % manifest_path
)
|
def undo_imports(current_path, output):
manifest_path = os.path.join(current_path, IMPORTS_MANIFESTS)
try:
manifest_content = load(manifest_path)
except:
raise ConanException("Cannot load file %s" % manifest_path)
try:
manifest = FileTreeManifest.loads(manifest_content)
except:
raise ConanException("Wrong manifest file format %s" % manifest_path)
not_removed = 0
for filepath, _ in manifest.file_sums.items():
if not os.path.exists(filepath):
output.warn("File doesn't exist: %s" % filepath)
continue
try:
os.remove(filepath)
except:
output.error("Cannot remove file (open or busy): %s" % filepath)
not_removed += 1
if not_removed:
raise ConanException("Cannot remove %s or more imported files" % not_removed)
output.success("Removed %s imported files" % (len(manifest.file_sums)))
try:
os.remove(manifest_path)
output.success("Removed imports manifest file: %s" % manifest_path)
except:
raise ConanException(
"Cannot remove manifest file (open or busy): %s" % manifest_path
)
|
https://github.com/conan-io/conan/issues/1040
|
D:\slave\ws\cab\extern\boost\1.63.0@0\ws>conan.exe upload "Boost/1.63.0@cab/extern" --all --confirm -r bop --retry 3
Uploading Boost/1.63.0@cab/extern
Compressing recipe...
Uploading conanmanifest.txt
Uploading conan_export.tgz
Uploaded conan recipe 'Boost/1.63.0@cab/extern' to 'bop': http://conan.bop
Uploading package 1/5: 4f0fd3886115d238859ac2a3d41664e4236efc61
Checking package integrity...
Package integrity OK!
Compressing package...
Requesting upload permissions...
Requesting upload permissions...Done!
Uploading conan_package.tgz
Uploading package 2/5: 5d34366328ff5fe8b9f93b912e5855ffb5fda596
Checking package integrity...
Traceback (most recent call last):
File "C:\Python34\Scripts\conan-script.py", line 9, in <module>
load_entry_point('conan==0.19.2', 'console_scripts', 'conan')()
File "C:\Python34\lib\site-packages\conans\conan.py", line 6, in run
main(sys.argv[1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 1065, in main
error = command.run(args)
File "C:\Python34\lib\site-packages\conans\client\command.py", line 979, in run
raise exc
File "C:\Python34\lib\site-packages\conans\client\command.py", line 959, in run
method(args[0][1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 826, in upload
retry_wait=args.retry_wait)
File "C:\Python34\lib\site-packages\conans\client\manager.py", line 540, in upload
retry=retry, retry_wait=retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 41, in upload_conan
self._upload_conan(conan_ref, force, all_packages, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 56, in _upload_conan
retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 76, in upload_package
self._remote_proxy.upload_package(package_ref, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\proxy.py", line 295, in upload_package
result = self._remote_manager.upload_package(package_ref, remote, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\remote_manager.py", line 77, in upload_package
if read_manifest.file_sums[fname] != expected_manifest.file_sums[fname]:
KeyError: 'include/boost/asio/ip/unicast.hpp'
|
KeyError
|
def _check(self, reference, manifest, remote, path):
if os.path.exists(path):
existing_manifest = FileTreeManifest.loads(load(path))
if existing_manifest == manifest:
self._log.append("Manifest for '%s': OK" % str(reference))
return
if self._verify:
raise ConanException(
"Modified or new manifest '%s' detected.\n"
"Remote: %s\nProject manifest doesn't match installed one"
% (str(reference), remote)
)
self._handle_add(reference, remote, manifest, path)
|
def _check(self, reference, manifest, remote, path):
if os.path.exists(path):
existing_manifest = FileTreeManifest.loads(load(path))
if existing_manifest.file_sums == manifest.file_sums:
self._log.append("Manifest for '%s': OK" % str(reference))
return
if self._verify:
raise ConanException(
"Modified or new manifest '%s' detected.\n"
"Remote: %s\nProject manifest doesn't match installed one"
% (str(reference), remote)
)
self._handle_add(reference, remote, manifest, path)
|
https://github.com/conan-io/conan/issues/1040
|
D:\slave\ws\cab\extern\boost\1.63.0@0\ws>conan.exe upload "Boost/1.63.0@cab/extern" --all --confirm -r bop --retry 3
Uploading Boost/1.63.0@cab/extern
Compressing recipe...
Uploading conanmanifest.txt
Uploading conan_export.tgz
Uploaded conan recipe 'Boost/1.63.0@cab/extern' to 'bop': http://conan.bop
Uploading package 1/5: 4f0fd3886115d238859ac2a3d41664e4236efc61
Checking package integrity...
Package integrity OK!
Compressing package...
Requesting upload permissions...
Requesting upload permissions...Done!
Uploading conan_package.tgz
Uploading package 2/5: 5d34366328ff5fe8b9f93b912e5855ffb5fda596
Checking package integrity...
Traceback (most recent call last):
File "C:\Python34\Scripts\conan-script.py", line 9, in <module>
load_entry_point('conan==0.19.2', 'console_scripts', 'conan')()
File "C:\Python34\lib\site-packages\conans\conan.py", line 6, in run
main(sys.argv[1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 1065, in main
error = command.run(args)
File "C:\Python34\lib\site-packages\conans\client\command.py", line 979, in run
raise exc
File "C:\Python34\lib\site-packages\conans\client\command.py", line 959, in run
method(args[0][1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 826, in upload
retry_wait=args.retry_wait)
File "C:\Python34\lib\site-packages\conans\client\manager.py", line 540, in upload
retry=retry, retry_wait=retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 41, in upload_conan
self._upload_conan(conan_ref, force, all_packages, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 56, in _upload_conan
retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 76, in upload_package
self._remote_proxy.upload_package(package_ref, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\proxy.py", line 295, in upload_package
result = self._remote_manager.upload_package(package_ref, remote, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\remote_manager.py", line 77, in upload_package
if read_manifest.file_sums[fname] != expected_manifest.file_sums[fname]:
KeyError: 'include/boost/asio/ip/unicast.hpp'
|
KeyError
|
def _match_manifests(self, read_manifest, expected_manifest, reference):
if read_manifest is None or read_manifest != expected_manifest:
raise ConanException(
"%s local cache package is corrupted: "
"some file hash doesn't match manifest" % (str(reference))
)
|
def _match_manifests(self, read_manifest, expected_manifest, reference):
if read_manifest is None or read_manifest.file_sums != expected_manifest.file_sums:
raise ConanException(
"%s local cache package is corrupted: "
"some file hash doesn't match manifest" % (str(reference))
)
|
https://github.com/conan-io/conan/issues/1040
|
D:\slave\ws\cab\extern\boost\1.63.0@0\ws>conan.exe upload "Boost/1.63.0@cab/extern" --all --confirm -r bop --retry 3
Uploading Boost/1.63.0@cab/extern
Compressing recipe...
Uploading conanmanifest.txt
Uploading conan_export.tgz
Uploaded conan recipe 'Boost/1.63.0@cab/extern' to 'bop': http://conan.bop
Uploading package 1/5: 4f0fd3886115d238859ac2a3d41664e4236efc61
Checking package integrity...
Package integrity OK!
Compressing package...
Requesting upload permissions...
Requesting upload permissions...Done!
Uploading conan_package.tgz
Uploading package 2/5: 5d34366328ff5fe8b9f93b912e5855ffb5fda596
Checking package integrity...
Traceback (most recent call last):
File "C:\Python34\Scripts\conan-script.py", line 9, in <module>
load_entry_point('conan==0.19.2', 'console_scripts', 'conan')()
File "C:\Python34\lib\site-packages\conans\conan.py", line 6, in run
main(sys.argv[1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 1065, in main
error = command.run(args)
File "C:\Python34\lib\site-packages\conans\client\command.py", line 979, in run
raise exc
File "C:\Python34\lib\site-packages\conans\client\command.py", line 959, in run
method(args[0][1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 826, in upload
retry_wait=args.retry_wait)
File "C:\Python34\lib\site-packages\conans\client\manager.py", line 540, in upload
retry=retry, retry_wait=retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 41, in upload_conan
self._upload_conan(conan_ref, force, all_packages, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 56, in _upload_conan
retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 76, in upload_package
self._remote_proxy.upload_package(package_ref, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\proxy.py", line 295, in upload_package
result = self._remote_manager.upload_package(package_ref, remote, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\remote_manager.py", line 77, in upload_package
if read_manifest.file_sums[fname] != expected_manifest.file_sums[fname]:
KeyError: 'include/boost/asio/ip/unicast.hpp'
|
KeyError
|
def get_package(self, package_ref, short_paths):
"""obtain a package, either from disk or retrieve from remotes if necessary
and not necessary to build
"""
output = ScopedOutput(str(package_ref.conan), self._out)
package_folder = self._client_cache.package(package_ref, short_paths=short_paths)
# Check current package status
if os.path.exists(package_folder):
if self._check_updates:
read_manifest = self._client_cache.load_package_manifest(package_ref)
try: # get_conan_digest can fail, not in server
upstream_manifest = self.get_package_digest(package_ref)
if upstream_manifest != read_manifest:
if upstream_manifest.time > read_manifest.time:
output.warn("Current package is older than remote upstream one")
if self._update:
output.warn(
"Removing it to retrieve or build an updated one"
)
rmdir(package_folder)
else:
output.warn("Current package is newer than remote upstream one")
except ConanException:
pass
installed = False
local_package = os.path.exists(package_folder)
if local_package:
output.info("Already installed!")
installed = True
log_package_got_from_local_cache(package_ref)
else:
installed = self._retrieve_remote_package(package_ref, package_folder, output)
self.handle_package_manifest(package_ref, installed)
return installed
|
def get_package(self, package_ref, short_paths):
"""obtain a package, either from disk or retrieve from remotes if necessary
and not necessary to build
"""
output = ScopedOutput(str(package_ref.conan), self._out)
package_folder = self._client_cache.package(package_ref, short_paths=short_paths)
# Check current package status
if os.path.exists(package_folder):
if self._check_updates:
read_manifest = self._client_cache.load_package_manifest(package_ref)
try: # get_conan_digest can fail, not in server
upstream_manifest = self.get_package_digest(package_ref)
if upstream_manifest.file_sums != read_manifest.file_sums:
if upstream_manifest.time > read_manifest.time:
output.warn("Current package is older than remote upstream one")
if self._update:
output.warn(
"Removing it to retrieve or build an updated one"
)
rmdir(package_folder)
else:
output.warn("Current package is newer than remote upstream one")
except ConanException:
pass
installed = False
local_package = os.path.exists(package_folder)
if local_package:
output.info("Already installed!")
installed = True
log_package_got_from_local_cache(package_ref)
else:
installed = self._retrieve_remote_package(package_ref, package_folder, output)
self.handle_package_manifest(package_ref, installed)
return installed
|
https://github.com/conan-io/conan/issues/1040
|
D:\slave\ws\cab\extern\boost\1.63.0@0\ws>conan.exe upload "Boost/1.63.0@cab/extern" --all --confirm -r bop --retry 3
Uploading Boost/1.63.0@cab/extern
Compressing recipe...
Uploading conanmanifest.txt
Uploading conan_export.tgz
Uploaded conan recipe 'Boost/1.63.0@cab/extern' to 'bop': http://conan.bop
Uploading package 1/5: 4f0fd3886115d238859ac2a3d41664e4236efc61
Checking package integrity...
Package integrity OK!
Compressing package...
Requesting upload permissions...
Requesting upload permissions...Done!
Uploading conan_package.tgz
Uploading package 2/5: 5d34366328ff5fe8b9f93b912e5855ffb5fda596
Checking package integrity...
Traceback (most recent call last):
File "C:\Python34\Scripts\conan-script.py", line 9, in <module>
load_entry_point('conan==0.19.2', 'console_scripts', 'conan')()
File "C:\Python34\lib\site-packages\conans\conan.py", line 6, in run
main(sys.argv[1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 1065, in main
error = command.run(args)
File "C:\Python34\lib\site-packages\conans\client\command.py", line 979, in run
raise exc
File "C:\Python34\lib\site-packages\conans\client\command.py", line 959, in run
method(args[0][1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 826, in upload
retry_wait=args.retry_wait)
File "C:\Python34\lib\site-packages\conans\client\manager.py", line 540, in upload
retry=retry, retry_wait=retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 41, in upload_conan
self._upload_conan(conan_ref, force, all_packages, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 56, in _upload_conan
retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 76, in upload_package
self._remote_proxy.upload_package(package_ref, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\proxy.py", line 295, in upload_package
result = self._remote_manager.upload_package(package_ref, remote, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\remote_manager.py", line 77, in upload_package
if read_manifest.file_sums[fname] != expected_manifest.file_sums[fname]:
KeyError: 'include/boost/asio/ip/unicast.hpp'
|
KeyError
|
def update_available(self, conan_reference):
"""Returns 0 if the conanfiles are equal, 1 if there is an update and -1 if
the local is newer than the remote"""
if not conan_reference:
return 0
read_manifest, _ = self._client_cache.conan_manifests(conan_reference)
if read_manifest:
try: # get_conan_digest can fail, not in server
upstream_manifest = self.get_conan_digest(conan_reference)
if upstream_manifest != read_manifest:
return 1 if upstream_manifest.time > read_manifest.time else -1
except ConanException:
pass
return 0
|
def update_available(self, conan_reference):
"""Returns 0 if the conanfiles are equal, 1 if there is an update and -1 if
the local is newer than the remote"""
if not conan_reference:
return 0
read_manifest, _ = self._client_cache.conan_manifests(conan_reference)
if read_manifest:
try: # get_conan_digest can fail, not in server
upstream_manifest = self.get_conan_digest(conan_reference)
if upstream_manifest.file_sums != read_manifest.file_sums:
return 1 if upstream_manifest.time > read_manifest.time else -1
except ConanException:
pass
return 0
|
https://github.com/conan-io/conan/issues/1040
|
D:\slave\ws\cab\extern\boost\1.63.0@0\ws>conan.exe upload "Boost/1.63.0@cab/extern" --all --confirm -r bop --retry 3
Uploading Boost/1.63.0@cab/extern
Compressing recipe...
Uploading conanmanifest.txt
Uploading conan_export.tgz
Uploaded conan recipe 'Boost/1.63.0@cab/extern' to 'bop': http://conan.bop
Uploading package 1/5: 4f0fd3886115d238859ac2a3d41664e4236efc61
Checking package integrity...
Package integrity OK!
Compressing package...
Requesting upload permissions...
Requesting upload permissions...Done!
Uploading conan_package.tgz
Uploading package 2/5: 5d34366328ff5fe8b9f93b912e5855ffb5fda596
Checking package integrity...
Traceback (most recent call last):
File "C:\Python34\Scripts\conan-script.py", line 9, in <module>
load_entry_point('conan==0.19.2', 'console_scripts', 'conan')()
File "C:\Python34\lib\site-packages\conans\conan.py", line 6, in run
main(sys.argv[1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 1065, in main
error = command.run(args)
File "C:\Python34\lib\site-packages\conans\client\command.py", line 979, in run
raise exc
File "C:\Python34\lib\site-packages\conans\client\command.py", line 959, in run
method(args[0][1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 826, in upload
retry_wait=args.retry_wait)
File "C:\Python34\lib\site-packages\conans\client\manager.py", line 540, in upload
retry=retry, retry_wait=retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 41, in upload_conan
self._upload_conan(conan_ref, force, all_packages, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 56, in _upload_conan
retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 76, in upload_package
self._remote_proxy.upload_package(package_ref, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\proxy.py", line 295, in upload_package
result = self._remote_manager.upload_package(package_ref, remote, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\remote_manager.py", line 77, in upload_package
if read_manifest.file_sums[fname] != expected_manifest.file_sums[fname]:
KeyError: 'include/boost/asio/ip/unicast.hpp'
|
KeyError
|
def upload_package(
self, package_reference, remote, retry, retry_wait, skip_upload=False
):
"""Will upload the package to the first remote"""
t1 = time.time()
# existing package, will use short paths if defined
package_folder = self._client_cache.package(package_reference, short_paths=None)
# Get all the files in that directory
files = gather_files(package_folder)
self._output.rewrite_line("Checking package integrity...")
if CONANINFO not in files or CONAN_MANIFEST not in files:
logger.error("Missing info or manifest in uploading files: %s" % (str(files)))
raise ConanException(
"Cannot upload corrupted package '%s'" % str(package_reference)
)
logger.debug("====> Time remote_manager build_files_set : %f" % (time.time() - t1))
# If package has been modified remove tgz to regenerate it
read_manifest, expected_manifest = self._client_cache.package_manifests(
package_reference
)
if read_manifest != expected_manifest:
self._output.writeln("")
diff = read_manifest.difference(expected_manifest)
for fname, (h1, h2) in diff.items():
self._output.warn(
"Mismatched checksum '%s' (manifest: %s, file: %s)" % (fname, h1, h2)
)
if PACKAGE_TGZ_NAME in files:
try:
tgz_path = os.path.join(package_folder, PACKAGE_TGZ_NAME)
os.unlink(tgz_path)
except Exception:
pass
error_msg = os.linesep.join(
"Mismatched checksum '%s' (manifest: %s, file: %s)" % (fname, h1, h2)
for fname, (h1, h2) in diff.items()
)
logger.error("Manifests doesn't match!\n%s" % error_msg)
raise ConanException(
"Cannot upload corrupted package '%s'" % str(package_reference)
)
else:
self._output.rewrite_line("Package integrity OK!")
self._output.writeln("")
logger.debug(
"====> Time remote_manager check package integrity : %f" % (time.time() - t1)
)
the_files = compress_package_files(files, package_folder, self._output)
if not skip_upload:
tmp = self._call_remote(
remote, "upload_package", package_reference, the_files, retry, retry_wait
)
duration = time.time() - t1
log_package_upload(package_reference, duration, the_files, remote)
logger.debug("====> Time remote_manager upload_package: %f" % duration)
return tmp
else:
return None
|
def upload_package(
self, package_reference, remote, retry, retry_wait, skip_upload=False
):
"""Will upload the package to the first remote"""
t1 = time.time()
# existing package, will use short paths if defined
package_folder = self._client_cache.package(package_reference, short_paths=None)
# Get all the files in that directory
files = gather_files(package_folder)
self._output.rewrite_line("Checking package integrity...")
if CONANINFO not in files or CONAN_MANIFEST not in files:
logger.error("Missing info or manifest in uploading files: %s" % (str(files)))
raise ConanException(
"Cannot upload corrupted package '%s'" % str(package_reference)
)
logger.debug("====> Time remote_manager build_files_set : %f" % (time.time() - t1))
# If package has been modified remove tgz to regenerate it
read_manifest, expected_manifest = self._client_cache.package_manifests(
package_reference
)
if read_manifest is None or read_manifest.file_sums != expected_manifest.file_sums:
self._output.writeln("")
for fname in read_manifest.file_sums.keys():
if read_manifest.file_sums[fname] != expected_manifest.file_sums[fname]:
self._output.warn(
"Mismatched checksum for file %s (checksum: %s, expected: %s)"
% (
fname,
read_manifest.file_sums[fname],
expected_manifest.file_sums[fname],
)
)
if PACKAGE_TGZ_NAME in files:
try:
tgz_path = os.path.join(package_folder, PACKAGE_TGZ_NAME)
os.unlink(tgz_path)
except Exception:
pass
logger.error(
"Manifests doesn't match!: %s != %s"
% (str(read_manifest.file_sums), str(expected_manifest.file_sums))
)
raise ConanException(
"Cannot upload corrupted package '%s'" % str(package_reference)
)
else:
self._output.rewrite_line("Package integrity OK!")
self._output.writeln("")
logger.debug(
"====> Time remote_manager check package integrity : %f" % (time.time() - t1)
)
the_files = compress_package_files(files, package_folder, self._output)
if not skip_upload:
tmp = self._call_remote(
remote, "upload_package", package_reference, the_files, retry, retry_wait
)
duration = time.time() - t1
log_package_upload(package_reference, duration, the_files, remote)
logger.debug("====> Time remote_manager upload_package: %f" % duration)
return tmp
else:
return None
|
https://github.com/conan-io/conan/issues/1040
|
D:\slave\ws\cab\extern\boost\1.63.0@0\ws>conan.exe upload "Boost/1.63.0@cab/extern" --all --confirm -r bop --retry 3
Uploading Boost/1.63.0@cab/extern
Compressing recipe...
Uploading conanmanifest.txt
Uploading conan_export.tgz
Uploaded conan recipe 'Boost/1.63.0@cab/extern' to 'bop': http://conan.bop
Uploading package 1/5: 4f0fd3886115d238859ac2a3d41664e4236efc61
Checking package integrity...
Package integrity OK!
Compressing package...
Requesting upload permissions...
Requesting upload permissions...Done!
Uploading conan_package.tgz
Uploading package 2/5: 5d34366328ff5fe8b9f93b912e5855ffb5fda596
Checking package integrity...
Traceback (most recent call last):
File "C:\Python34\Scripts\conan-script.py", line 9, in <module>
load_entry_point('conan==0.19.2', 'console_scripts', 'conan')()
File "C:\Python34\lib\site-packages\conans\conan.py", line 6, in run
main(sys.argv[1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 1065, in main
error = command.run(args)
File "C:\Python34\lib\site-packages\conans\client\command.py", line 979, in run
raise exc
File "C:\Python34\lib\site-packages\conans\client\command.py", line 959, in run
method(args[0][1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 826, in upload
retry_wait=args.retry_wait)
File "C:\Python34\lib\site-packages\conans\client\manager.py", line 540, in upload
retry=retry, retry_wait=retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 41, in upload_conan
self._upload_conan(conan_ref, force, all_packages, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 56, in _upload_conan
retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 76, in upload_package
self._remote_proxy.upload_package(package_ref, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\proxy.py", line 295, in upload_package
result = self._remote_manager.upload_package(package_ref, remote, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\remote_manager.py", line 77, in upload_package
if read_manifest.file_sums[fname] != expected_manifest.file_sums[fname]:
KeyError: 'include/boost/asio/ip/unicast.hpp'
|
KeyError
|
def _check_recipe_date(self, conan_ref):
try:
remote_recipe_manifest = self._remote_proxy.get_conan_digest(conan_ref)
except NotFoundException:
return # First upload
local_manifest = self._paths.load_manifest(conan_ref)
if (
remote_recipe_manifest != local_manifest
and remote_recipe_manifest.time > local_manifest.time
):
raise ConanException(
"Remote recipe is newer than local recipe: "
"\n Remote date: %s\n Local date: %s"
% (remote_recipe_manifest.time, local_manifest.time)
)
|
def _check_recipe_date(self, conan_ref):
try:
remote_recipe_manifest = self._remote_proxy.get_conan_digest(conan_ref)
except NotFoundException:
return # First upload
local_manifest = self._paths.load_manifest(conan_ref)
if (
remote_recipe_manifest.file_sums != local_manifest.file_sums
and remote_recipe_manifest.time > local_manifest.time
):
raise ConanException(
"Remote recipe is newer than local recipe: "
"\n Remote date: %s\n Local date: %s"
% (remote_recipe_manifest.time, local_manifest.time)
)
|
https://github.com/conan-io/conan/issues/1040
|
D:\slave\ws\cab\extern\boost\1.63.0@0\ws>conan.exe upload "Boost/1.63.0@cab/extern" --all --confirm -r bop --retry 3
Uploading Boost/1.63.0@cab/extern
Compressing recipe...
Uploading conanmanifest.txt
Uploading conan_export.tgz
Uploaded conan recipe 'Boost/1.63.0@cab/extern' to 'bop': http://conan.bop
Uploading package 1/5: 4f0fd3886115d238859ac2a3d41664e4236efc61
Checking package integrity...
Package integrity OK!
Compressing package...
Requesting upload permissions...
Requesting upload permissions...Done!
Uploading conan_package.tgz
Uploading package 2/5: 5d34366328ff5fe8b9f93b912e5855ffb5fda596
Checking package integrity...
Traceback (most recent call last):
File "C:\Python34\Scripts\conan-script.py", line 9, in <module>
load_entry_point('conan==0.19.2', 'console_scripts', 'conan')()
File "C:\Python34\lib\site-packages\conans\conan.py", line 6, in run
main(sys.argv[1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 1065, in main
error = command.run(args)
File "C:\Python34\lib\site-packages\conans\client\command.py", line 979, in run
raise exc
File "C:\Python34\lib\site-packages\conans\client\command.py", line 959, in run
method(args[0][1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 826, in upload
retry_wait=args.retry_wait)
File "C:\Python34\lib\site-packages\conans\client\manager.py", line 540, in upload
retry=retry, retry_wait=retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 41, in upload_conan
self._upload_conan(conan_ref, force, all_packages, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 56, in _upload_conan
retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 76, in upload_package
self._remote_proxy.upload_package(package_ref, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\proxy.py", line 295, in upload_package
result = self._remote_manager.upload_package(package_ref, remote, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\remote_manager.py", line 77, in upload_package
if read_manifest.file_sums[fname] != expected_manifest.file_sums[fname]:
KeyError: 'include/boost/asio/ip/unicast.hpp'
|
KeyError
|
def __eq__(self, other):
"""Two manifests are equal if file_sums"""
return self.file_sums == other.file_sums
|
def __eq__(self, other):
return self.time == other.time and self.file_sums == other.file_sums
|
https://github.com/conan-io/conan/issues/1040
|
D:\slave\ws\cab\extern\boost\1.63.0@0\ws>conan.exe upload "Boost/1.63.0@cab/extern" --all --confirm -r bop --retry 3
Uploading Boost/1.63.0@cab/extern
Compressing recipe...
Uploading conanmanifest.txt
Uploading conan_export.tgz
Uploaded conan recipe 'Boost/1.63.0@cab/extern' to 'bop': http://conan.bop
Uploading package 1/5: 4f0fd3886115d238859ac2a3d41664e4236efc61
Checking package integrity...
Package integrity OK!
Compressing package...
Requesting upload permissions...
Requesting upload permissions...Done!
Uploading conan_package.tgz
Uploading package 2/5: 5d34366328ff5fe8b9f93b912e5855ffb5fda596
Checking package integrity...
Traceback (most recent call last):
File "C:\Python34\Scripts\conan-script.py", line 9, in <module>
load_entry_point('conan==0.19.2', 'console_scripts', 'conan')()
File "C:\Python34\lib\site-packages\conans\conan.py", line 6, in run
main(sys.argv[1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 1065, in main
error = command.run(args)
File "C:\Python34\lib\site-packages\conans\client\command.py", line 979, in run
raise exc
File "C:\Python34\lib\site-packages\conans\client\command.py", line 959, in run
method(args[0][1:])
File "C:\Python34\lib\site-packages\conans\client\command.py", line 826, in upload
retry_wait=args.retry_wait)
File "C:\Python34\lib\site-packages\conans\client\manager.py", line 540, in upload
retry=retry, retry_wait=retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 41, in upload_conan
self._upload_conan(conan_ref, force, all_packages, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 56, in _upload_conan
retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\uploader.py", line 76, in upload_package
self._remote_proxy.upload_package(package_ref, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\proxy.py", line 295, in upload_package
result = self._remote_manager.upload_package(package_ref, remote, retry, retry_wait)
File "C:\Python34\lib\site-packages\conans\client\remote_manager.py", line 77, in upload_package
if read_manifest.file_sums[fname] != expected_manifest.file_sums[fname]:
KeyError: 'include/boost/asio/ip/unicast.hpp'
|
KeyError
|
def migrate_and_get_client_cache(base_folder, out, storage_folder=None):
# Init paths
client_cache = ClientCache(base_folder, storage_folder, out)
# Migration system
migrator = ClientMigrator(client_cache, Version(CLIENT_VERSION), out)
migrator.migrate()
return client_cache
|
def migrate_and_get_client_cache(base_folder, out, manager, storage_folder=None):
# Init paths
client_cache = ClientCache(base_folder, storage_folder, out)
# Migration system
migrator = ClientMigrator(client_cache, Version(CLIENT_VERSION), out, manager)
migrator.migrate()
# Init again paths, migration could change config
client_cache = ClientCache(base_folder, storage_folder, out)
return client_cache
|
https://github.com/conan-io/conan/issues/803
|
Traceback (most recent call last):
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\store\sqlite.py", line 20, in init
statement.execute("PRAGMA auto_vacuum = INCREMENTAL;")
sqlite3.OperationalError: database is locked
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\Scripts\conan-script.py", line 11, in <module>
load_entry_point('conan==0.17.2', 'console_scripts', 'conan')()
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\conan.py", line 6, in run
main(sys.argv[1:])
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\command.py", line 963, in main
command = get_command()
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\command.py", line 950, in get_command
remote_manager = instance_remote_manager(client_cache)
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\command.py", line 914, in instance_remote_manager
localdb = LocalDB(client_cache.localdb)
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\store\localdb.py", line 14, in __init__
self.init()
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\store\localdb.py", line 17, in init
SQLiteDB.init(self)
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\store\sqlite.py", line 22, in init
raise ConanException("Could not initialize local cache", e)
conans.errors.ConanException: ('Could not initialize local cache', OperationalError('database is locked',))
|
sqlite3.OperationalError
|
def get_command():
def instance_remote_manager(client_cache):
requester = requests.Session()
requester.proxies = client_cache.conan_config.proxies
# Verify client version against remotes
version_checker_requester = VersionCheckerRequester(
requester,
Version(CLIENT_VERSION),
Version(MIN_SERVER_COMPATIBLE_VERSION),
out,
)
# To handle remote connections
rest_api_client = RestApiClient(out, requester=version_checker_requester)
# To store user and token
localdb = LocalDB(client_cache.localdb)
# Wraps RestApiClient to add authentication support (same interface)
auth_manager = ConanApiAuthManager(rest_api_client, user_io, localdb)
# Handle remote connections
remote_manager = RemoteManager(client_cache, auth_manager, out)
return remote_manager
use_color = get_env("CONAN_COLOR_DISPLAY", 1)
if use_color and hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
import colorama
colorama.init()
color = True
else:
color = False
out = ConanOutput(sys.stdout, color)
user_io = UserIO(out=out)
user_folder = os.getenv("CONAN_USER_HOME", conan_expand_user("~"))
try:
client_cache = migrate_and_get_client_cache(user_folder, out)
except Exception as e:
out.error(str(e))
sys.exit(True)
# Get the new command instance after migrations have been done
remote_manager = instance_remote_manager(client_cache)
# Get a search manager
search_adapter = DiskSearchAdapter()
search_manager = DiskSearchManager(client_cache, search_adapter)
command = Command(
client_cache, user_io, ConanRunner(), remote_manager, search_manager
)
return command
|
def get_command():
def instance_remote_manager(client_cache):
requester = requests.Session()
requester.proxies = client_cache.conan_config.proxies
# Verify client version against remotes
version_checker_requester = VersionCheckerRequester(
requester,
Version(CLIENT_VERSION),
Version(MIN_SERVER_COMPATIBLE_VERSION),
out,
)
# To handle remote connections
rest_api_client = RestApiClient(out, requester=version_checker_requester)
# To store user and token
localdb = LocalDB(client_cache.localdb)
# Wraps RestApiClient to add authentication support (same interface)
auth_manager = ConanApiAuthManager(rest_api_client, user_io, localdb)
# Handle remote connections
remote_manager = RemoteManager(client_cache, auth_manager, out)
return remote_manager
use_color = get_env("CONAN_COLOR_DISPLAY", 1)
if use_color and hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
import colorama
colorama.init()
color = True
else:
color = False
out = ConanOutput(sys.stdout, color)
user_io = UserIO(out=out)
user_folder = os.getenv("CONAN_USER_HOME", conan_expand_user("~"))
try:
# To capture exceptions in conan.conf parsing
client_cache = ClientCache(user_folder, None, out)
# obtain a temp ConanManager instance to execute the migrations
remote_manager = instance_remote_manager(client_cache)
# Get a DiskSearchManager
search_adapter = DiskSearchAdapter()
search_manager = DiskSearchManager(client_cache, search_adapter)
manager = ConanManager(
client_cache, user_io, ConanRunner(), remote_manager, search_manager
)
client_cache = migrate_and_get_client_cache(user_folder, out, manager)
except Exception as e:
out.error(str(e))
sys.exit(True)
# Get the new command instance after migrations have been done
remote_manager = instance_remote_manager(client_cache)
# Get a search manager
search_adapter = DiskSearchAdapter()
search_manager = DiskSearchManager(client_cache, search_adapter)
command = Command(
client_cache, user_io, ConanRunner(), remote_manager, search_manager
)
return command
|
https://github.com/conan-io/conan/issues/803
|
Traceback (most recent call last):
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\store\sqlite.py", line 20, in init
statement.execute("PRAGMA auto_vacuum = INCREMENTAL;")
sqlite3.OperationalError: database is locked
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\Scripts\conan-script.py", line 11, in <module>
load_entry_point('conan==0.17.2', 'console_scripts', 'conan')()
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\conan.py", line 6, in run
main(sys.argv[1:])
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\command.py", line 963, in main
command = get_command()
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\command.py", line 950, in get_command
remote_manager = instance_remote_manager(client_cache)
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\command.py", line 914, in instance_remote_manager
localdb = LocalDB(client_cache.localdb)
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\store\localdb.py", line 14, in __init__
self.init()
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\store\localdb.py", line 17, in init
SQLiteDB.init(self)
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\store\sqlite.py", line 22, in init
raise ConanException("Could not initialize local cache", e)
conans.errors.ConanException: ('Could not initialize local cache', OperationalError('database is locked',))
|
sqlite3.OperationalError
|
def __init__(self, client_cache, current_version, out):
self.client_cache = client_cache
super(ClientMigrator, self).__init__(
client_cache.conan_folder, client_cache.store, current_version, out
)
|
def __init__(self, client_cache, current_version, out, manager):
self.client_cache = client_cache
self.manager = manager
super(ClientMigrator, self).__init__(
client_cache.conan_folder, client_cache.store, current_version, out
)
|
https://github.com/conan-io/conan/issues/803
|
Traceback (most recent call last):
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\store\sqlite.py", line 20, in init
statement.execute("PRAGMA auto_vacuum = INCREMENTAL;")
sqlite3.OperationalError: database is locked
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\Scripts\conan-script.py", line 11, in <module>
load_entry_point('conan==0.17.2', 'console_scripts', 'conan')()
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\conan.py", line 6, in run
main(sys.argv[1:])
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\command.py", line 963, in main
command = get_command()
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\command.py", line 950, in get_command
remote_manager = instance_remote_manager(client_cache)
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\command.py", line 914, in instance_remote_manager
localdb = LocalDB(client_cache.localdb)
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\store\localdb.py", line 14, in __init__
self.init()
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\store\localdb.py", line 17, in init
SQLiteDB.init(self)
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\store\sqlite.py", line 22, in init
raise ConanException("Could not initialize local cache", e)
conans.errors.ConanException: ('Could not initialize local cache', OperationalError('database is locked',))
|
sqlite3.OperationalError
|
def init(self, clean=False):
cursor = None
try:
cursor = self.connection.cursor()
if clean:
cursor.execute("drop table if exists %s" % REMOTES_USER_TABLE)
cursor.execute(
"create table if not exists %s "
"(remote_url TEXT UNIQUE, user TEXT, token TEXT)" % REMOTES_USER_TABLE
)
except Exception as e:
message = "Could not initialize local sqlite database"
raise ConanException(message, e)
finally:
if cursor:
cursor.close()
|
def init(self, clean=False):
SQLiteDB.init(self)
cursor = None
try:
cursor = self.connection.cursor()
# conan retrocompatibility
cursor.execute("drop table if exists %s" % USER_TABLE)
if clean:
cursor.execute("drop table if exists %s" % REMOTES_USER_TABLE)
cursor.execute(
"create table if not exists %s "
"(remote_url TEXT UNIQUE, user TEXT, token TEXT)" % REMOTES_USER_TABLE
)
except Exception as e:
message = "Could not initialize local sqlite database"
raise ConanException(message, e)
finally:
if cursor:
cursor.close()
|
https://github.com/conan-io/conan/issues/803
|
Traceback (most recent call last):
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\store\sqlite.py", line 20, in init
statement.execute("PRAGMA auto_vacuum = INCREMENTAL;")
sqlite3.OperationalError: database is locked
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\Scripts\conan-script.py", line 11, in <module>
load_entry_point('conan==0.17.2', 'console_scripts', 'conan')()
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\conan.py", line 6, in run
main(sys.argv[1:])
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\command.py", line 963, in main
command = get_command()
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\command.py", line 950, in get_command
remote_manager = instance_remote_manager(client_cache)
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\command.py", line 914, in instance_remote_manager
localdb = LocalDB(client_cache.localdb)
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\store\localdb.py", line 14, in __init__
self.init()
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\store\localdb.py", line 17, in init
SQLiteDB.init(self)
File "C:\Users\sztomi\AppData\Roaming\Python\Python36\site-packages\conans\client\store\sqlite.py", line 22, in init
raise ConanException("Could not initialize local cache", e)
conans.errors.ConanException: ('Could not initialize local cache', OperationalError('database is locked',))
|
sqlite3.OperationalError
|
def _config_node(self, conanfile, conanref, down_reqs, down_ref, down_options):
"""update settings and option in the current ConanFile, computing actual
requirement values, cause they can be overriden by downstream requires
param settings: dict of settings values => {"os": "windows"}
"""
try:
if hasattr(conanfile, "config"):
if not conanref:
self._output.warn(
"config() has been deprecated. Use config_options and configure"
)
conanfile.config()
conanfile.config_options()
conanfile.options.propagate_upstream(
down_options, down_ref, conanref, self._output
)
if hasattr(conanfile, "config"):
conanfile.config()
conanfile.configure()
conanfile.settings.validate() # All has to be ok!
conanfile.options.validate()
# Update requirements (overwrites), computing new upstream
conanfile.requirements()
new_options = conanfile.options.values
new_down_reqs = conanfile.requires.update(
down_reqs, self._output, conanref, down_ref
)
except ConanException as e:
raise ConanException("%s: %s" % (conanref or "Conanfile", str(e)))
except Exception as e:
msg = format_conanfile_exception(
str(conanref or "Conanfile"), "config, config_options or configure", e
)
raise ConanException(msg)
return new_down_reqs, new_options
|
def _config_node(self, conanfile, conanref, down_reqs, down_ref, down_options):
"""update settings and option in the current ConanFile, computing actual
requirement values, cause they can be overriden by downstream requires
param settings: dict of settings values => {"os": "windows"}
"""
try:
conanfile.requires.output = self._output
if hasattr(conanfile, "config"):
if not conanref:
self._output.warn(
"config() has been deprecated. Use config_options and configure"
)
conanfile.config()
conanfile.config_options()
conanfile.options.propagate_upstream(
down_options, down_ref, conanref, self._output
)
if hasattr(conanfile, "config"):
conanfile.config()
conanfile.configure()
conanfile.settings.validate() # All has to be ok!
conanfile.options.validate()
# Update requirements (overwrites), computing new upstream
conanfile.requirements()
new_options = conanfile.options.values
new_down_reqs = conanfile.requires.update(
down_reqs, self._output, conanref, down_ref
)
except ConanException as e:
raise ConanException("%s: %s" % (conanref or "Conanfile", str(e)))
except Exception as e:
msg = format_conanfile_exception(
str(conanref or "Conanfile"), "config, config_options or configure", e
)
raise ConanException(msg)
return new_down_reqs, new_options
|
https://github.com/conan-io/conan/issues/757
|
Traceback (most recent call last):
File "<string>", line 10, in <module>
File "<string>", line 6, in run
File "conan\conans\client\command.py", line 931, in main
File "conan\conans\client\command.py", line 833, in run
File "conan\conans\client\command.py", line 441, in install
File "conan\conans\client\manager.py", line 276, in install
File "conan\conans\client\manager.py", line 176, in _get_graph
File "conan\conans\client\loader.py", line 181, in load_conan_txt
File "conan\conans\client\loader.py", line 193, in parse_conan_txt
File "conan\conans\model\requires.py", line 121, in add
AttributeError: 'NoneType' object has no attribute 'werror'
conan returned -1
|
AttributeError
|
def add(self, reference, private=False, override=False, dev=False):
"""to define requirements by the user in text, prior to any propagation"""
assert isinstance(reference, six.string_types)
if dev and not self.allow_dev:
return
conan_reference = ConanFileReference.loads(reference)
name = conan_reference.name
new_requirement = Requirement(conan_reference, private, override, dev)
old_requirement = self.get(name)
if old_requirement and old_requirement != new_requirement:
raise ConanException(
"Duplicated requirement %s != %s" % (old_requirement, new_requirement)
)
else:
self[name] = new_requirement
|
def add(self, reference, private=False, override=False, dev=False):
"""to define requirements by the user in text, prior to any propagation"""
assert isinstance(reference, six.string_types)
if dev and not self.allow_dev:
return
conan_reference = ConanFileReference.loads(reference)
name = conan_reference.name
new_requirement = Requirement(conan_reference, private, override, dev)
old_requirement = self.get(name)
if old_requirement and old_requirement != new_requirement:
self.output.werror(
"Duplicated requirement %s != %s" % (old_requirement, new_requirement)
)
else:
self[name] = new_requirement
|
https://github.com/conan-io/conan/issues/757
|
Traceback (most recent call last):
File "<string>", line 10, in <module>
File "<string>", line 6, in run
File "conan\conans\client\command.py", line 931, in main
File "conan\conans\client\command.py", line 833, in run
File "conan\conans\client\command.py", line 441, in install
File "conan\conans\client\manager.py", line 276, in install
File "conan\conans\client\manager.py", line 176, in _get_graph
File "conan\conans\client\loader.py", line 181, in load_conan_txt
File "conan\conans\client\loader.py", line 193, in parse_conan_txt
File "conan\conans\model\requires.py", line 121, in add
AttributeError: 'NoneType' object has no attribute 'werror'
conan returned -1
|
AttributeError
|
def _create_new_node(self, current_node, dep_graph, requirement, public_deps, name_req):
"""creates and adds a new node to the dependency graph"""
conanfile_path = self._retriever.get_recipe(requirement.conan_reference)
output = ScopedOutput(str(requirement.conan_reference), self._output)
dep_conanfile = self._loader.load_conan(conanfile_path, output)
if dep_conanfile:
new_node = Node(requirement.conan_reference, dep_conanfile)
dep_graph.add_node(new_node)
dep_graph.add_edge(current_node, new_node)
if not requirement.private:
public_deps[name_req] = new_node
# RECURSION!
return new_node
else:
self._output.error("Could not retrieve %s" % requirement.conan_reference)
|
def _create_new_node(self, current_node, dep_graph, requirement, public_deps, name_req):
"""creates and adds a new node to the dependency graph"""
conanfile_path = self._retriever.get_conanfile(requirement.conan_reference)
output = ScopedOutput(str(requirement.conan_reference), self._output)
dep_conanfile = self._loader.load_conan(conanfile_path, output)
if dep_conanfile:
new_node = Node(requirement.conan_reference, dep_conanfile)
dep_graph.add_node(new_node)
dep_graph.add_edge(current_node, new_node)
if not requirement.private:
public_deps[name_req] = new_node
# RECURSION!
return new_node
else:
self._output.error("Could not retrieve %s" % requirement.conan_reference)
|
https://github.com/conan-io/conan/issues/501
|
DEBUG :uploader_downloader.py[74]: <type 'exceptions.MemoryError'> [2016-09-23 15:15:02,983]
DEBUG :uploader_downloader.py[75]: Traceback (most recent call last):
File "c:\python27\lib\site-packages\conans\client\rest\uploader_downloader.py", line 62, in download
ret.extend(data)
MemoryError
|
exceptions.MemoryError
|
def download_packages(self, reference, package_ids):
assert isinstance(package_ids, list)
remote, _ = self._get_remote(reference)
export_path = self._client_cache.export(reference)
self._remote_manager.get_recipe(reference, export_path, remote)
self._registry.set_ref(reference, remote)
output = ScopedOutput(str(reference), self._out)
for package_id in package_ids:
package_reference = PackageReference(reference, package_id)
self._retrieve_remote_package(package_reference, output, remote)
|
def download_packages(self, reference, package_ids):
assert isinstance(package_ids, list)
remote, _ = self._get_remote(reference)
self._remote_manager.get_conanfile(reference, remote)
self._registry.set_ref(reference, remote)
output = ScopedOutput(str(reference), self._out)
for package_id in package_ids:
package_reference = PackageReference(reference, package_id)
self._retrieve_remote_package(package_reference, output, remote)
|
https://github.com/conan-io/conan/issues/501
|
DEBUG :uploader_downloader.py[74]: <type 'exceptions.MemoryError'> [2016-09-23 15:15:02,983]
DEBUG :uploader_downloader.py[75]: Traceback (most recent call last):
File "c:\python27\lib\site-packages\conans\client\rest\uploader_downloader.py", line 62, in download
ret.extend(data)
MemoryError
|
exceptions.MemoryError
|
def _retrieve_remote_package(self, package_reference, output, remote=None):
if remote is None:
remote = self._registry.get_ref(package_reference.conan)
if not remote:
output.warn(
"Package doesn't have a remote defined. "
"Probably created locally and not uploaded"
)
return False
package_id = str(package_reference.package_id)
try:
output.info(
"Looking for package %s in remote '%s' " % (package_id, remote.name)
)
# Will raise if not found NotFoundException
package_path = self._client_cache.package(package_reference)
self._remote_manager.get_package(package_reference, package_path, remote)
output.success("Package installed %s" % package_id)
return True
except ConanConnectionError:
raise # This shouldn't be skipped
except ConanException as e:
output.warn("Binary for %s not in remote: %s" % (package_id, str(e)))
return False
|
def _retrieve_remote_package(self, package_reference, output, remote=None):
if remote is None:
remote = self._registry.get_ref(package_reference.conan)
if not remote:
output.warn(
"Package doesn't have a remote defined. "
"Probably created locally and not uploaded"
)
return False
package_id = str(package_reference.package_id)
try:
output.info(
"Looking for package %s in remote '%s' " % (package_id, remote.name)
)
# Will raise if not found NotFoundException
self._remote_manager.get_package(package_reference, remote)
output.success("Package installed %s" % package_id)
return True
except ConanConnectionError:
raise # This shouldn't be skipped
except ConanException as e:
output.warn("Binary for %s not in remote: %s" % (package_id, str(e)))
return False
|
https://github.com/conan-io/conan/issues/501
|
DEBUG :uploader_downloader.py[74]: <type 'exceptions.MemoryError'> [2016-09-23 15:15:02,983]
DEBUG :uploader_downloader.py[75]: Traceback (most recent call last):
File "c:\python27\lib\site-packages\conans\client\rest\uploader_downloader.py", line 62, in download
ret.extend(data)
MemoryError
|
exceptions.MemoryError
|
def _refresh():
export_path = self._client_cache.export(conan_reference)
rmdir(export_path)
# It might need to remove shortpath
rmdir(self._client_cache.source(conan_reference), True)
current_remote, _ = self._get_remote(conan_reference)
output.info("Retrieving from remote '%s'..." % current_remote.name)
self._remote_manager.get_recipe(conan_reference, export_path, current_remote)
if self._update:
output.info("Updated!")
else:
output.info("Installed!")
|
def _refresh():
conan_dir_path = self._client_cache.export(conan_reference)
rmdir(conan_dir_path)
# It might need to remove shortpath
rmdir(self._client_cache.source(conan_reference), True)
current_remote, _ = self._get_remote(conan_reference)
output.info("Retrieving from remote '%s'..." % current_remote.name)
self._remote_manager.get_conanfile(conan_reference, current_remote)
if self._update:
output.info("Updated!")
else:
output.info("Installed!")
|
https://github.com/conan-io/conan/issues/501
|
DEBUG :uploader_downloader.py[74]: <type 'exceptions.MemoryError'> [2016-09-23 15:15:02,983]
DEBUG :uploader_downloader.py[75]: Traceback (most recent call last):
File "c:\python27\lib\site-packages\conans\client\rest\uploader_downloader.py", line 62, in download
ret.extend(data)
MemoryError
|
exceptions.MemoryError
|
def _retrieve_from_remote(remote):
output.info("Trying with '%s'..." % remote.name)
export_path = self._client_cache.export(conan_reference)
result = self._remote_manager.get_recipe(conan_reference, export_path, remote)
self._registry.set_ref(conan_reference, remote)
return result
|
def _retrieve_from_remote(remote):
output.info("Trying with '%s'..." % remote.name)
result = self._remote_manager.get_conanfile(conan_reference, remote)
self._registry.set_ref(conan_reference, remote)
return result
|
https://github.com/conan-io/conan/issues/501
|
DEBUG :uploader_downloader.py[74]: <type 'exceptions.MemoryError'> [2016-09-23 15:15:02,983]
DEBUG :uploader_downloader.py[75]: Traceback (most recent call last):
File "c:\python27\lib\site-packages\conans\client\rest\uploader_downloader.py", line 62, in download
ret.extend(data)
MemoryError
|
exceptions.MemoryError
|
def get_package(self, package_reference, dest_folder, remote):
"""
Read the conans package from remotes
Will iterate the remotes to find the conans unless remote was specified
returns (dict relative_filepath:abs_path , remote_name)"""
zipped_files = self._call_remote(
remote, "get_package", package_reference, dest_folder
)
files = unzip_and_get_files(zipped_files, dest_folder, PACKAGE_TGZ_NAME)
# Issue #214 https://github.com/conan-io/conan/issues/214
for dirname, _, files in os.walk(dest_folder):
for fname in files:
touch(os.path.join(dirname, fname))
return files
|
def get_package(self, package_reference, remote):
"""
Read the conans package from remotes
Will iterate the remotes to find the conans unless remote was specified
returns (dict relative_filepath:content , remote_name)"""
package_files = self._call_remote(remote, "get_package", package_reference)
destination_dir = self._client_cache.package(package_reference)
uncompress_files(package_files, destination_dir, PACKAGE_TGZ_NAME)
# Issue #214 https://github.com/conan-io/conan/issues/214
for dirname, _, files in os.walk(destination_dir):
for fname in files:
touch(os.path.join(dirname, fname))
|
https://github.com/conan-io/conan/issues/501
|
DEBUG :uploader_downloader.py[74]: <type 'exceptions.MemoryError'> [2016-09-23 15:15:02,983]
DEBUG :uploader_downloader.py[75]: Traceback (most recent call last):
File "c:\python27\lib\site-packages\conans\client\rest\uploader_downloader.py", line 62, in download
ret.extend(data)
MemoryError
|
exceptions.MemoryError
|
def get_package(self, package_reference, dest_folder):
return self._rest_client.get_package(package_reference, dest_folder)
|
def get_package(self, package_reference):
return self._rest_client.get_package(package_reference)
|
https://github.com/conan-io/conan/issues/501
|
DEBUG :uploader_downloader.py[74]: <type 'exceptions.MemoryError'> [2016-09-23 15:15:02,983]
DEBUG :uploader_downloader.py[75]: Traceback (most recent call last):
File "c:\python27\lib\site-packages\conans\client\rest\uploader_downloader.py", line 62, in download
ret.extend(data)
MemoryError
|
exceptions.MemoryError
|
def get_package(self, package_reference, dest_folder):
"""Gets a dict of filename:contents from package"""
url = "%s/conans/%s/packages/%s/download_urls" % (
self._remote_api_url,
"/".join(package_reference.conan),
package_reference.package_id,
)
urls = self._get_json(url)
if not urls:
raise NotFoundException("Package not found!")
# TODO: Get fist an snapshot and compare files and download only required?
# Download the resources
file_paths = self.download_files_to_folder(urls, dest_folder, self._output)
return file_paths
|
def get_package(self, package_reference):
"""Gets a dict of filename:contents from package"""
url = "%s/conans/%s/packages/%s/download_urls" % (
self._remote_api_url,
"/".join(package_reference.conan),
package_reference.package_id,
)
urls = self._get_json(url)
if not urls:
raise NotFoundException("Package not found!")
# TODO: Get fist an snapshot and compare files and download only required?
# Download the resources
contents = self.download_files(urls, self._output)
return contents
|
https://github.com/conan-io/conan/issues/501
|
DEBUG :uploader_downloader.py[74]: <type 'exceptions.MemoryError'> [2016-09-23 15:15:02,983]
DEBUG :uploader_downloader.py[75]: Traceback (most recent call last):
File "c:\python27\lib\site-packages\conans\client\rest\uploader_downloader.py", line 62, in download
ret.extend(data)
MemoryError
|
exceptions.MemoryError
|
def download(self, url, file_path=None, auth=None):
ret = bytearray()
response = self.requester.get(url, stream=True, verify=self.verify, auth=auth)
if not response.ok:
raise ConanException(
"Error %d downloading file %s" % (response.status_code, url)
)
try:
total_length = response.headers.get("content-length")
if total_length is None: # no content length header
if not file_path:
ret += response.content
else:
save(file_path, response.content, append=True)
else:
dl = 0
total_length = int(total_length)
last_progress = None
chunk_size = 1024 if not file_path else 1024 * 100
for data in response.iter_content(chunk_size=chunk_size):
dl += len(data)
if not file_path:
ret.extend(data)
else:
save(file_path, data, append=True)
units = progress_units(dl, total_length)
if last_progress != units: # Avoid screen refresh if nothing has change
if self.output:
print_progress(self.output, units)
last_progress = units
if not file_path:
return bytes(ret)
else:
return
except Exception as e:
logger.debug(e.__class__)
logger.debug(traceback.format_exc())
# If this part failed, it means problems with the connection to server
raise ConanConnectionError(
"Download failed, check server, possibly try again\n%s" % str(e)
)
|
def download(self, url, file_path=None, auth=None):
ret = bytearray()
response = self.requester.get(url, stream=True, verify=self.verify, auth=auth)
if not response.ok:
raise ConanException(
"Error %d downloading file %s" % (response.status_code, url)
)
try:
total_length = response.headers.get("content-length")
if total_length is None: # no content length header
if not file_path:
ret += response.content
else:
save(file_path, response.content, append=True)
else:
dl = 0
total_length = int(total_length)
last_progress = None
chunk_size = 1024 if not file_path else 1024 * 100
for data in response.iter_content(chunk_size=chunk_size):
dl += len(data)
if not file_path:
ret.extend(data)
else:
save(file_path, data, append=True)
units = progress_units(dl, total_length)
if last_progress != units: # Avoid screen refresh if nothing has change
if self.output:
print_progress(self.output, units)
last_progress = units
return bytes(ret)
except Exception as e:
logger.debug(e.__class__)
logger.debug(traceback.format_exc())
# If this part failed, it means problems with the connection to server
raise ConanConnectionError(
"Download failed, check server, possibly try again\n%s" % str(e)
)
|
https://github.com/conan-io/conan/issues/501
|
DEBUG :uploader_downloader.py[74]: <type 'exceptions.MemoryError'> [2016-09-23 15:15:02,983]
DEBUG :uploader_downloader.py[75]: Traceback (most recent call last):
File "c:\python27\lib\site-packages\conans\client\rest\uploader_downloader.py", line 62, in download
ret.extend(data)
MemoryError
|
exceptions.MemoryError
|
def run(self, *args):
"""HIDDEN: entry point for executing commands, dispatcher to class
methods
"""
errors = False
try:
try:
command = args[0][0]
commands = self._commands()
method = commands[command]
except KeyError as exc:
if command in ["-v", "--version"]:
self._user_io.out.success("Conan version %s" % CLIENT_VERSION)
return False
self._show_help()
if command in ["-h", "--help"]:
return False
raise ConanException("Unknown command %s" % str(exc))
except IndexError as exc: # No parameters
self._show_help()
return False
method(args[0][1:])
except (KeyboardInterrupt, SystemExit) as exc:
logger.error(exc)
errors = True
except ConanException as exc:
try:
msg = unicode(exc)
except:
msg = str(exc)
# import traceback
# logger.debug(traceback.format_exc())
errors = True
self._user_io.out.error(msg)
return errors
|
def run(self, *args):
"""HIDDEN: entry point for executing commands, dispatcher to class
methods
"""
errors = False
try:
try:
command = args[0][0]
commands = self._commands()
method = commands[command]
except KeyError as exc:
if command in ["-v", "--version"]:
self._user_io.out.success("Conan version %s" % CLIENT_VERSION)
return False
self._show_help()
if command in ["-h", "--help"]:
return False
raise ConanException("Unknown command %s" % str(exc))
except IndexError as exc: # No parameters
self._show_help()
return False
method(args[0][1:])
except (KeyboardInterrupt, SystemExit) as exc:
logger.error(exc)
errors = True
except ConanException as exc:
logger.error(exc)
# import traceback
# logger.debug(traceback.format_exc())
errors = True
self._user_io.out.error(str(exc))
return errors
|
https://github.com/conan-io/conan/issues/416
|
conan install ..\test_package
Boost/1.60.0@lasote/stable: Not found, looking in remotes...
Boost/1.60.0@lasote/stable: Trying with 'conan.io'...
Traceback (most recent call last):
File "<string>", line 10, in <module>
File "<string>", line 6, in run
File "conan\conans\client\command.py", line 767, in main
File "conan\conans\client\command.py", line 688, in run
UnicodeEncodeError: 'ascii' codec can't encode character u'\xfc' in position 727: ordinal not in range(128)
conan returned -1
|
UnicodeEncodeError
|
def _get_json(self, url, data=None):
if data: # POST request
headers = {"Content-type": "application/json", "Accept": "text/plain"}
headers.update(self.custom_headers)
response = self.requester.post(
url,
auth=self.auth,
headers=headers,
verify=self.VERIFY_SSL,
stream=True,
data=json.dumps(data),
)
else:
response = self.requester.get(
url,
auth=self.auth,
headers=self.custom_headers,
verify=self.VERIFY_SSL,
stream=True,
)
if response.status_code != 200: # Error message is text
response.charset = (
"utf-8" # To be able to access ret.text (ret.content are bytes)
)
raise get_exception_from_error(response.status_code)(response.text)
result = json.loads(decode_text(response.content))
if not isinstance(result, dict):
raise ConanException("Unexpected server response %s" % result)
return result
|
def _get_json(self, url, data=None):
if data: # POST request
headers = {"Content-type": "application/json", "Accept": "text/plain"}
headers.update(self.custom_headers)
response = self.requester.post(
url,
auth=self.auth,
headers=headers,
verify=self.VERIFY_SSL,
stream=True,
data=json.dumps(data),
)
else:
response = self.requester.get(
url,
auth=self.auth,
headers=self.custom_headers,
verify=self.VERIFY_SSL,
stream=True,
)
if response.status_code != 200: # Error message is text
response.charset = (
"utf-8" # To be able to access ret.text (ret.content are bytes)
)
raise get_exception_from_error(response.status_code)(response.text)
return json.loads(decode_text(response.content))
|
https://github.com/conan-io/conan/issues/416
|
conan install ..\test_package
Boost/1.60.0@lasote/stable: Not found, looking in remotes...
Boost/1.60.0@lasote/stable: Trying with 'conan.io'...
Traceback (most recent call last):
File "<string>", line 10, in <module>
File "<string>", line 6, in run
File "conan\conans\client\command.py", line 767, in main
File "conan\conans\client\command.py", line 688, in run
UnicodeEncodeError: 'ascii' codec can't encode character u'\xfc' in position 727: ordinal not in range(128)
conan returned -1
|
UnicodeEncodeError
|
def _compute_private_nodes(self, deps_graph, build_mode):
"""computes a list of nodes that are not required to be built, as they are
private requirements of already available shared libraries as binaries
"""
private_closure = deps_graph.private_nodes()
skippable_nodes = []
for private_node, private_requirers in private_closure:
for private_requirer in private_requirers:
conan_ref, conan_file = private_requirer
if conan_ref is None:
continue
package_id = conan_file.info.package_id()
package_reference = PackageReference(conan_ref, package_id)
package_folder = self._paths.package(package_reference)
if not path_exists(package_folder, self._paths.store):
if not self._force_build(conan_ref, build_mode): # Not download package
self._user_io.out.info(
"Package for %s does not exist" % str(conan_ref)
)
if not self._retrieve_remote_package(package_reference):
break
else:
skippable_nodes.append(private_node)
return skippable_nodes
|
def _compute_private_nodes(self, deps_graph, build_mode):
"""computes a list of nodes that are not required to be built, as they are
private requirements of already available shared libraries as binaries
"""
private_closure = deps_graph.private_nodes()
skippable_nodes = []
for private_node, private_requirers in private_closure:
for private_requirer in private_requirers:
conan_ref, conan_file = private_requirer
package_id = conan_file.info.package_id()
package_reference = PackageReference(conan_ref, package_id)
package_folder = self._paths.package(package_reference)
if not path_exists(package_folder, self._paths.store):
if not self._force_build(conan_ref, build_mode): # Not download package
self._user_io.out.info(
"Package for %s does not exist" % str(conan_ref)
)
if not self._retrieve_remote_package(package_reference):
break
else:
skippable_nodes.append(private_node)
return skippable_nodes
|
https://github.com/conan-io/conan/issues/79
|
HEADER ONLY
Requirements
Boost/1.60.0@lasote/stable
catch/1.3.0@bjoern/testing
catch/1.3.0@bjoern/testing
filesystem/1.0.0@bjoern/testing
io/1.0.0@bjoern/testing
zlib/1.2.8@lasote/stable
Traceback (most recent call last):
File "/Users/bjoern/.pipsi/bin/conan", line 11, in <module>
sys.exit(run())
File "/Users/bjoern/.pipsi/virtualenvs/conan/lib/python2.7/site-packages/conans/conan.py", line 6, in run
main(sys.argv[1:])
File "/Users/bjoern/.pipsi/virtualenvs/conan/lib/python2.7/site-packages/conans/client/command.py", line 432, in main
error = command.run(args)
File "/Users/bjoern/.pipsi/virtualenvs/conan/lib/python2.7/site-packages/conans/client/command.py", line 367, in run
method(args[0][1:])
File "/Users/bjoern/.pipsi/virtualenvs/conan/lib/python2.7/site-packages/conans/client/command.py", line 186, in install
build_mode=args.build)
File "/Users/bjoern/.pipsi/virtualenvs/conan/lib/python2.7/site-packages/conans/client/manager.py", line 122, in install
installer.install(deps_graph, build_mode)
File "/Users/bjoern/.pipsi/virtualenvs/conan/lib/python2.7/site-packages/conans/client/installer.py", line 70, in install
skip_private_nodes = self._compute_private_nodes(deps_graph, build_mode)
File "/Users/bjoern/.pipsi/virtualenvs/conan/lib/python2.7/site-packages/conans/client/installer.py", line 110, in _compute_private_nodes
package_folder = self._paths.package(package_reference)
File "/Users/bjoern/.pipsi/virtualenvs/conan/lib/python2.7/site-packages/conans/paths.py", line 92, in package
return normpath(join(self.conan(package_reference.conan), PACKAGES_FOLDER,
File "/Users/bjoern/.pipsi/virtualenvs/conan/lib/python2.7/site-packages/conans/paths.py", line 45, in conan
assert isinstance(conan_reference, ConanFileReference)
AssertionError
|
AssertionError
|
def __init__(self, model, parent, factory, resource_defs, service_model):
self._model = model
operation_name = self._model.request.operation
self._parent = parent
search_path = model.resource.path
self._handler = ResourceHandler(
search_path,
factory,
resource_defs,
service_model,
model.resource,
operation_name,
)
|
def __init__(self, model, parent, factory, resource_defs, service_model):
self._model = model
operation_name = self._model.request.operation
self._parent = parent
search_path = model.path
self._handler = ResourceHandler(
search_path,
factory,
resource_defs,
service_model,
model.resource,
operation_name,
)
|
https://github.com/boto/boto3/issues/57
|
Traceback (most recent call last):
File "./bug_report", line 10, in <module>
print o.size
File "/home/glacier/lib/lib/python2.7/site-packages/boto3/resources/factory.py", line 257, in property_loader
'{0} has no load method'.format(self.__class__.__name__))
boto3.exceptions.ResourceLoadException: s3.ObjectSummary has no load method
|
boto3.exceptions.ResourceLoadException
|
def on_shutdown(
manager: TaskManager, unsaved_jobs_lock: Lock
) -> Callable[[signal.Signals, Any], None]:
def actual_callback(s: signal.Signals, __: Any) -> None:
global shutting_down
manager.logger.error("Got interupted by %r, shutting down", s)
with unsaved_jobs_lock:
shutting_down = True
manager.close(relaxed=False)
sys.exit(1)
return actual_callback
|
def on_shutdown(
manager: TaskManager.TaskManager, unsaved_jobs_lock: Lock
) -> Callable[[signal.Signals, Any], None]:
def actual_callback(s: signal.Signals, __: Any) -> None:
global shutting_down
manager.logger.error("Got interupted by %r, shutting down", s)
with unsaved_jobs_lock:
shutting_down = True
manager.close(relaxed=False)
sys.exit(1)
return actual_callback
|
https://github.com/mozilla/OpenWPM/issues/810
|
browser_manager - INFO - BROWSER 4: Launching browser...
task_manager - INFO -
OpenWPM Version: b'v0.13.0-3-g051a384'
Firefox Version: b'83.0'
========== Manager Configuration ==========
{
"aggregator_address": [
"127.0.0.1",
51454
],
"data_directory": "/Users/ankushdua/Desktop/",
"database_name": "/Users/ankushdua/Desktop/crawl-data.sqlite",
"failure_limit": null,
"log_directory": "/Users/ankushdua/Desktop/",
"log_file": "/Users/ankushdua/Desktop/openwpm.log",
"logger_address": [
"127.0.0.1",
51453
],
"memory_watchdog": true,
"num_browsers": 1,
"output_format": "local",
"process_watchdog": true,
"s3_bucket": null,
"s3_directory": null,
"screenshot_path": "/Users/ankushdua/Desktop/screenshots",
"source_dump_path": "/Users/ankushdua/Desktop/sources",
"testing": false
}
========== Browser Configuration ==========
Keys:
{
"browser_id": 0,
"bot_mitigation": 1,
"browser": 2,
"callstack_instrument": 3,
"cookie_instrument": 4,
"display_mode": 5,
"dns_instrument": 6,
"donottrack": 7,
"extension_enabled": 8,
"http_instrument": 9,
"js_instrument": 10,
"navigation_instrument": 11,
"prefs": 12,
"recovery_tar": 13,
"save_content": 14,
"tp_cookies": 15,
"tracking-protection": 16
}
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
--- ----- ------- ---- ---- -------- ---- ----- ---- ---- ---- ---- ---- ---- ----- ------ -----
4 False firefox True True headless True False True True True True {} False always False
========== JS Instrument Settings ==========
{
"4": "[{\"object\": window['ScriptProcessorNode'].prototype, \"instrumentedName\": \"ScriptProcessorNode\", \"logSettings\": {\"propertiesToInstrument\": [], \"nonExistingPropertiesToInstrument\": [], \"excludedProperties\": [], \"logCallStack\": false, \"logFunctionsAsStrings\": false, \"logFunctionGets\": false, \"preventSets\": false, \"recursive\": false, \"depth\": 5}}, {\"object\": window['GainNode'].prototype, \"instrumentedName\": \"GainNode\", \"logSettings\": {\"propertiesToInstrument\": [], \"nonExistingPropertiesToInstrument\": [], \"excludedProperties\": [], \"logCallStack\": false, \"logFunctionsAsStrings\": false, \"logFunctionGets\": false, \"preventSets\": false, \"recursive\": false, \"depth\": 5}}, {\"object\": window['AnalyserNode'].prototype, \"instrumentedName\": \"AnalyserNode\", \"logSettings\": {\"propertiesToInstrument\": [], \"nonExistingPropertiesToInstrument\": [], \"excludedProperties\": [], \"logCallStack\": false, \"logFunctionsAsStrings\": false, \"logFunctionGets\": false, \"preventSets\": false, \"recursive\": false, \"depth\": 5}}, {\"object\": window['OscillatorNode'].prototype, \"instrumentedName\": \"OscillatorNode\", \"logSettings\": {\"propertiesToInstrument\": [], \"nonExistingPropertiesToInstrument\": [], \"excludedProperties\": [], \"logCallStack\": false, \"logFunctionsAsStrings\": false, \"logFunctionGets\": false, \"preventSets\": false, \"recursive\": false, \"depth\": 5}}, {\"object\": window['OfflineAudioContext'].prototype, \"instrumentedName\": \"OfflineAudioContext\", \"logSettings\": {\"propertiesToInstrument\": [], \"nonExistingPropertiesToInstrument\": [], \"excludedProperties\": [], \"logCallStack\": false, \"logFunctionsAsStrings\": false, \"logFunctionGets\": false, \"preventSets\": false, \"recursive\": false, \"depth\": 5}}, {\"object\": window['AudioContext'].prototype, \"instrumentedName\": \"AudioContext\", \"logSettings\": {\"propertiesToInstrument\": [], \"nonExistingPropertiesToInstrument\": [], \"excludedProperties\": [], \"logCallStack\": false, \"logFunctionsAsStrings\": false, \"logFunctionGets\": false, \"preventSets\": false, \"recursive\": false, \"depth\": 5}}, {\"object\": window['RTCPeerConnection'].prototype, \"instrumentedName\": \"RTCPeerConnection\", \"logSettings\": {\"propertiesToInstrument\": [], \"nonExistingPropertiesToInstrument\": [], \"excludedProperties\": [], \"logCallStack\": false, \"logFunctionsAsStrings\": false, \"logFunctionGets\": false, \"preventSets\": false, \"recursive\": false, \"depth\": 5}}, {\"object\": window['HTMLCanvasElement'].prototype, \"instrumentedName\": \"HTMLCanvasElement\", \"logSettings\": {\"propertiesToInstrument\": [], \"nonExistingPropertiesToInstrument\": [], \"excludedProperties\": [], \"logCallStack\": false, \"logFunctionsAsStrings\": false, \"logFunctionGets\": false, \"preventSets\": false, \"recursive\": false, \"depth\": 5}}, {\"object\": window['Storage'].prototype, \"instrumentedName\": \"Storage\", \"logSettings\": {\"propertiesToInstrument\": [], \"nonExistingPropertiesToInstrument\": [], \"excludedProperties\": [], \"logCallStack\": false, \"logFunctionsAsStrings\": false, \"logFunctionGets\": false, \"preventSets\": false, \"recursive\": false, \"depth\": 5}}, {\"object\": window.navigator, \"instrumentedName\": \"window.navigator\", \"logSettings\": {\"propertiesToInstrument\": [], \"nonExistingPropertiesToInstrument\": [], \"excludedProperties\": [], \"logCallStack\": false, \"logFunctionsAsStrings\": false, \"logFunctionGets\": false, \"preventSets\": false, \"recursive\": false, \"depth\": 5}}, {\"object\": window['CanvasRenderingContext2D'].prototype, \"instrumentedName\": \"CanvasRenderingContext2D\", \"logSettings\": {\"propertiesToInstrument\": [], \"nonExistingPropertiesToInstrument\": [], \"excludedProperties\": [\"beginPath\", \"closePath\", \"quadraticCurveTo\", \"drawImage\", \"translate\", \"lineTo\", \"transform\", \"setTransform\", \"clearRect\", \"globalAlpha\", \"canvas\", \"moveTo\"], \"logCallStack\": false, \"logFunctionsAsStrings\": false, \"logFunctionGets\": false, \"preventSets\": false, \"recursive\": false, \"depth\": 5}}, {\"object\": window, \"instrumentedName\": \"window\", \"logSettings\": {\"propertiesToInstrument\": [\"sessionStorage\", \"name\", \"localStorage\"], \"nonExistingPropertiesToInstrument\": [], \"excludedProperties\": [], \"logCallStack\": false, \"logFunctionsAsStrings\": false, \"logFunctionGets\": false, \"preventSets\": false, \"recursive\": false, \"depth\": 5}}, {\"object\": window.document, \"instrumentedName\": \"window.document\", \"logSettings\": {\"propertiesToInstrument\": [\"referrer\", \"cookie\"], \"nonExistingPropertiesToInstrument\": [], \"excludedProperties\": [], \"logCallStack\": true, \"logFunctionsAsStrings\": false, \"logFunctionGets\": false, \"preventSets\": false, \"recursive\": false, \"depth\": 5}}, {\"object\": window.screen, \"instrumentedName\": \"window.screen\", \"logSettings\": {\"propertiesToInstrument\": [\"colorDepth\", \"pixelDepth\"], \"nonExistingPropertiesToInstrument\": [], \"excludedProperties\": [], \"logCallStack\": false, \"logFunctionsAsStrings\": false, \"logFunctionGets\": false, \"preventSets\": false, \"recursive\": false, \"depth\": 5}}]"
}
========== Input profile tar files ==========
No profile tar files specified
========== Output (archive) profile dirs ==========
No profile archive directories specified
task_manager - INFO - Starting to work on CommandSequence with visit_id 6 on browser with id 4
Traceback (most recent call last):
File "demo.py", line 48, in <module>
command_sequence = command_sequence.CommandSequence(
AttributeError: 'CommandSequence' object has no attribute 'CommandSequence'
browser_manager - INFO - BROWSER 4: EXECUTING COMMAND: IntitializeCommand()
browser_manager - INFO - BROWSER 4: EXECUTING COMMAND: GetCommand(http://www.example.com,3)
browser_manager - INFO - BROWSER 4: EXECUTING COMMAND: FinalizeCommand(5)
task_manager - INFO - Finished working on CommandSequence with visit_id 6 on browser with id 4
Exception in thread OpenWPM-watchdog:
Traceback (most recent call last):
File "/Users/ankushdua/anaconda3/envs/openwpm1/lib/python3.8/site-packages/psutil/_psosx.py", line 342, in wrapper
return fun(self, *args, **kwargs)
File "/Users/ankushdua/anaconda3/envs/openwpm1/lib/python3.8/site-packages/psutil/_psosx.py", line 484, in memory_full_info
uss = cext.proc_memory_uss(self.pid)
PermissionError: [Errno 13] Access denied (originated from task_for_pid)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/ankushdua/anaconda3/envs/openwpm1/lib/python3.8/threading.py", line 932, in _bootstrap_inner
self.run()
File "/Users/ankushdua/anaconda3/envs/openwpm1/lib/python3.8/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "/Users/ankushdua/OpenWPM/openwpm/task_manager.py", line 231, in _manager_watchdog
mem_bytes += child.memory_full_info().uss
File "/Users/ankushdua/anaconda3/envs/openwpm1/lib/python3.8/site-packages/psutil/__init__.py", line 1072, in memory_full_info
return self._proc.memory_full_info()
File "/Users/ankushdua/anaconda3/envs/openwpm1/lib/python3.8/site-packages/psutil/_psosx.py", line 349, in wrapper
raise AccessDenied(self.pid, self._name)
psutil.AccessDenied: psutil.AccessDenied (pid=89532)
CommandSequence http://www.example.com done
browser_manager - INFO - BROWSER 4: BrowserManager restart initiated. Clear profile? True
browser_manager - INFO - BROWSER 4: Launching browser...
|
AttributeError
|
def _accept(self):
"""Listen for connections and pass handling to a new thread"""
while True:
try:
(client, address) = self.sock.accept()
thread = threading.Thread(target=self._handle_conn, args=(client, address))
thread.daemon = True
thread.start()
except ConnectionAbortedError:
# Workaround for #278
print("A connection establish request was performed on a closed socket")
return
|
def _accept(self):
"""Listen for connections and pass handling to a new thread"""
while True:
(client, address) = self.sock.accept()
thread = threading.Thread(target=self._handle_conn, args=(client, address))
thread.daemon = True
thread.start()
|
https://github.com/mozilla/OpenWPM/issues/278
|
BaseAggregator - INFO - Received shutdown signal!
Exception in thread Thread-1-LocalListener:
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 917, in _bootstrap_inner
self.run()
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "/Users/awagner/mozilla/OpenWPM/automation/SocketInterface.py", line 43, in _accept
(client, address) = self.sock.accept()
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 212, in accept
fd, addr = self._accept()
ConnectionAbortedError: [Errno 53] Software caused connection abort
Exception in thread Thread-1-loggingserver:
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 917, in _bootstrap_inner
self.run()
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "/Users/awagner/mozilla/OpenWPM/automation/SocketInterface.py", line 43, in _accept
(client, address) = self.sock.accept()
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 212, in accept
fd, addr = self._accept()
ConnectionAbortedError: [Errno 53] Software caused connection abort
|
ConnectionAbortedError
|
def get_firefox_binary_path():
"""
If ../../firefox-bin/firefox-bin or os.environ["FIREFOX_BINARY"] exists,
return it. Else, throw a RuntimeError.
"""
if "FIREFOX_BINARY" in os.environ:
firefox_binary_path = os.environ["FIREFOX_BINARY"]
if not os.path.isfile(firefox_binary_path):
raise RuntimeError(
"No file found at the path specified in "
"environment variable `FIREFOX_BINARY`."
"Current `FIREFOX_BINARY`: %s" % firefox_binary_path
)
return firefox_binary_path
root_dir = os.path.dirname(__file__) + "/../.."
if platform == "darwin":
firefox_binary_path = os.path.abspath(
root_dir + "/Nightly.app/Contents/MacOS/firefox-bin"
)
else:
firefox_binary_path = os.path.abspath(root_dir + "/firefox-bin/firefox-bin")
if not os.path.isfile(firefox_binary_path):
raise RuntimeError(
"The `firefox-bin/firefox-bin` binary is not found in the root "
"of the OpenWPM directory (did you run the install script "
"(`install.sh`)?). Alternatively, you can specify a binary "
"location using the OS environment variable FIREFOX_BINARY."
)
return firefox_binary_path
|
def get_firefox_binary_path():
"""
If ../../firefox-bin/firefox-bin or os.environ["FIREFOX_BINARY"] exists,
return it. Else, throw a RuntimeError.
"""
if "FIREFOX_BINARY" in os.environ:
firefox_binary_path = os.environ["FIREFOX_BINARY"]
if not os.path.isfile(firefox_binary_path):
raise RuntimeError(
"No file found at the path specified in "
"environment variable `FIREFOX_BINARY`."
"Current `FIREFOX_BINARY`: %s" % firefox_binary_path
)
return firefox_binary_path
root_dir = os.path.dirname(__file__) # directory of this file
firefox_binary_path = os.path.abspath(root_dir + "/../../firefox-bin/firefox-bin")
if not os.path.isfile(firefox_binary_path):
raise RuntimeError(
"The `firefox-bin/firefox-bin` binary is not found in the root "
"of the OpenWPM directory (did you run the install script "
"(`install.sh`)?). Alternatively, you can specify a binary "
"location using the OS environment variable FIREFOX_BINARY."
)
return firefox_binary_path
|
https://github.com/mozilla/OpenWPM/issues/278
|
BaseAggregator - INFO - Received shutdown signal!
Exception in thread Thread-1-LocalListener:
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 917, in _bootstrap_inner
self.run()
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "/Users/awagner/mozilla/OpenWPM/automation/SocketInterface.py", line 43, in _accept
(client, address) = self.sock.accept()
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 212, in accept
fd, addr = self._accept()
ConnectionAbortedError: [Errno 53] Software caused connection abort
Exception in thread Thread-1-loggingserver:
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 917, in _bootstrap_inner
self.run()
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "/Users/awagner/mozilla/OpenWPM/automation/SocketInterface.py", line 43, in _accept
(client, address) = self.sock.accept()
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 212, in accept
fd, addr = self._accept()
ConnectionAbortedError: [Errno 53] Software caused connection abort
|
ConnectionAbortedError
|
def _accept(self):
"""Listen for connections and pass handling to a new thread"""
while True:
try:
(client, address) = self.sock.accept()
thread = threading.Thread(target=self._handle_conn, args=(client, address))
thread.daemon = True
thread.start()
except ConnectionAbortedError:
# Workaround for #278
print("A connection establish request was performed on a closed socket")
return
|
def _accept(self):
"""Listen for connections and pass handling to a new thread"""
while True:
try:
(client, address) = self.sock.accept()
thread = threading.Thread(target=self._handle_conn, args=(client, address))
thread.daemon = True
thread.start()
except ConnectionAbortedError:
print("A connection establish request was performed on a closed socket")
return
|
https://github.com/mozilla/OpenWPM/issues/278
|
BaseAggregator - INFO - Received shutdown signal!
Exception in thread Thread-1-LocalListener:
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 917, in _bootstrap_inner
self.run()
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "/Users/awagner/mozilla/OpenWPM/automation/SocketInterface.py", line 43, in _accept
(client, address) = self.sock.accept()
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 212, in accept
fd, addr = self._accept()
ConnectionAbortedError: [Errno 53] Software caused connection abort
Exception in thread Thread-1-loggingserver:
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 917, in _bootstrap_inner
self.run()
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "/Users/awagner/mozilla/OpenWPM/automation/SocketInterface.py", line 43, in _accept
(client, address) = self.sock.accept()
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 212, in accept
fd, addr = self._accept()
ConnectionAbortedError: [Errno 53] Software caused connection abort
|
ConnectionAbortedError
|
def _check_failure_status(self) -> None:
"""Check the status of command failures. Raise exceptions as necessary
The failure status property is used by the various asynchronous
command execution threads which interface with the
remote browser manager processes. If a failure status is found, the
appropriate steps are taken to gracefully close the infrastructure
"""
self.logger.debug("Checking command failure status indicator...")
if not self.failure_status:
return
self.logger.debug("TaskManager failure status set, halting command execution.")
self._shutdown_manager()
if self.failure_status["ErrorType"] == "ExceedCommandFailureLimit":
raise CommandExecutionError(
"TaskManager exceeded maximum consecutive command execution failures.",
self.failure_status["CommandSequence"],
)
elif self.failure_status["ErrorType"] == ("ExceedLaunchFailureLimit"):
raise CommandExecutionError(
"TaskManager failed to launch browser within allowable failure limit.",
self.failure_status["CommandSequence"],
)
if self.failure_status["ErrorType"] == "CriticalChildException":
exc_type, exc, tb = pickle.loads(self.failure_status["Exception"])
raise exc.with_traceback(tb)
|
def _check_failure_status(self) -> None:
"""Check the status of command failures. Raise exceptions as necessary
The failure status property is used by the various asynchronous
command execution threads which interface with the
remote browser manager processes. If a failure status is found, the
appropriate steps are taken to gracefully close the infrastructure
"""
self.logger.debug("Checking command failure status indicator...")
if not self.failure_status:
return
self.logger.debug("TaskManager failure status set, halting command execution.")
self._shutdown_manager()
if self.failure_status["ErrorType"] == "ExceedCommandFailureLimit":
raise CommandExecutionError(
"TaskManager exceeded maximum consecutive command execution failures.",
self.failure_status["CommandSequence"],
)
elif self.failure_status["ErrorType"] == ("ExceedLaunchFailureLimit"):
raise CommandExecutionError(
"TaskManager failed to launch browser within allowable failure limit.",
self.failure_status["CommandSequence"],
)
if self.failure_status["ErrorType"] == "CriticalChildException":
exc = pickle.loads(self.failure_status["Exception"])
assert type(exc) == BaseException, (
"Unexpected object passed in place of exception while handling"
" a critical exception in a child process. Please report this "
"error to https://github.com/mozilla/OpenWPM/issues/547. "
f"Object was of type {type(exc)} and looked like {exc!r}."
)
raise exc
|
https://github.com/mozilla/OpenWPM/issues/547
|
Traceback (most recent call last):
File "collect_links.py", line 55, in <module>
manager.execute_command_sequence(command_sequence)
File "<path>OpenWPM/automation/TaskManager.py", line 565, in execute_command_sequence
self._distribute_command(command_sequence, index)
File "<path>OpenWPM/automation/TaskManager.py", line 334, in _distribute_command
thread = self._start_thread(browser, command_seq)
File "<path>OpenWPM/automation/TaskManager.py", line 395, in _start_thread
self._check_failure_status()
File "<path>OpenWPM/automation/TaskManager.py", line 301, in _check_failure_status
raise pickle.loads(self.failure_status['Exception'])
TypeError: exceptions must derive from BaseException
|
TypeError
|
def _issue_command(self, browser, command_sequence, condition=None):
"""
sends command tuple to the BrowserManager
"""
browser.is_fresh = False
# if this is a synced call, block on condition
if condition is not None:
with condition:
condition.wait()
reset = command_sequence.reset
if not reset:
self.logger.warn(
"BROWSER %i: Browser will not reset after CommandSequence "
"executes. OpenWPM does not currently support stateful crawls "
"(see: https://github.com/mozilla/OpenWPM/projects/2). "
"The next command issued to this browser may or may not "
"use the same profile (depending on the failure status of "
"this command). To prevent this warning, initialize the "
"CommandSequence with `reset` set to `True` to use a fresh "
"profile for each command." % browser.crawl_id
)
start_time = None
for command_and_timeout in command_sequence.commands_with_timeout:
command, timeout = command_and_timeout
if command[0] in [
"GET",
"BROWSE",
"SAVE_SCREENSHOT",
"SCREENSHOT_FULL_PAGE",
"DUMP_PAGE_SOURCE",
"RECURSIVE_DUMP_PAGE_SOURCE",
]:
start_time = time.time()
command += (browser.curr_visit_id,)
elif command[0] in ["DUMP_FLASH_COOKIES", "DUMP_PROFILE_COOKIES"]:
command += (
start_time,
browser.curr_visit_id,
)
browser.current_timeout = timeout
# passes off command and waits for a success (or failure signal)
browser.command_queue.put(command)
command_arguments = command[1] if len(command) > 1 else None
# received reply from BrowserManager, either success or failure
critical_failure = False
error_text = None
tb = None
try:
status = browser.status_queue.get(True, browser.current_timeout)
if status == "OK":
command_status = "ok"
elif status[0] == "CRITICAL":
command_status = "critical"
self.logger.critical(
"BROWSER %i: Received critical error from browser "
"process while executing command %s. Setting failure "
"status." % (browser.crawl_id, str(command))
)
self.failure_status = {
"ErrorType": "CriticalChildException",
"CommandSequence": command_sequence,
"Exception": status[1],
}
error_text, tb = self._unpack_picked_error(status[1])
critical_failure = True
elif status[0] == "FAILED":
command_status = "error"
error_text, tb = self._unpack_picked_error(status[1])
self.logger.info(
"BROWSER %i: Received failure status while executing "
"command: %s" % (browser.crawl_id, command[0])
)
elif status[0] == "NETERROR":
command_status = "neterror"
error_text, tb = self._unpack_picked_error(status[1])
error_text = parse_neterror(error_text)
self.logger.info(
"BROWSER %i: Received neterror %s while executing "
"command: %s" % (browser.crawl_id, error_text, command[0])
)
else:
raise ValueError("Unknown browser status message %s" % status)
except EmptyQueue:
command_status = "timeout"
self.logger.info(
"BROWSER %i: Timeout while executing command, %s, killing "
"browser manager" % (browser.crawl_id, command[0])
)
self.sock.send(
(
"crawl_history",
{
"crawl_id": browser.crawl_id,
"visit_id": browser.curr_visit_id,
"command": command[0],
"arguments": str(command_arguments),
"retry_number": command_sequence.retry_number,
"command_status": command_status,
"error": error_text,
"traceback": tb,
},
)
)
if critical_failure:
return
if command_status != "ok":
with self.threadlock:
self.failurecount += 1
if self.failurecount > self.failure_limit:
self.logger.critical(
"BROWSER %i: Command execution failure pushes failure "
"count above the allowable limit. Setting "
"failure_status." % browser.crawl_id
)
self.failure_status = {
"ErrorType": "ExceedCommandFailureLimit",
"CommandSequence": command_sequence,
}
return
browser.restart_required = True
self.logger.debug(
"BROWSER %i: Browser restart required" % (browser.crawl_id)
)
else:
with self.threadlock:
self.failurecount = 0
if browser.restart_required:
break
# Sleep after executing CommandSequence to provide extra time for
# internal buffers to drain. Stopgap in support of #135
time.sleep(2)
if self.closing:
return
if browser.restart_required or reset:
success = browser.restart_browser_manager(clear_profile=reset)
if not success:
self.logger.critical(
"BROWSER %i: Exceeded the maximum allowable consecutive "
"browser launch failures. Setting failure_status." % (browser.crawl_id)
)
self.failure_status = {
"ErrorType": "ExceedLaunchFailureLimit",
"CommandSequence": command_sequence,
}
return
browser.restart_required = False
|
def _issue_command(self, browser, command_sequence, condition=None):
"""
sends command tuple to the BrowserManager
"""
browser.is_fresh = False
# if this is a synced call, block on condition
if condition is not None:
with condition:
condition.wait()
reset = command_sequence.reset
if not reset:
self.logger.warn(
"BROWSER %i: Browser will not reset after CommandSequence "
"executes. OpenWPM does not currently support stateful crawls "
"(see: https://github.com/mozilla/OpenWPM/projects/2). "
"The next command issued to this browser may or may not "
"use the same profile (depending on the failure status of "
"this command). To prevent this warning, initialize the "
"CommandSequence with `reset` set to `True` to use a fresh "
"profile for each command." % browser.crawl_id
)
start_time = None
for command_and_timeout in command_sequence.commands_with_timeout:
command, timeout = command_and_timeout
if command[0] in [
"GET",
"BROWSE",
"SAVE_SCREENSHOT",
"SCREENSHOT_FULL_PAGE",
"DUMP_PAGE_SOURCE",
"RECURSIVE_DUMP_PAGE_SOURCE",
]:
start_time = time.time()
command += (browser.curr_visit_id,)
elif command[0] in ["DUMP_FLASH_COOKIES", "DUMP_PROFILE_COOKIES"]:
command += (
start_time,
browser.curr_visit_id,
)
browser.current_timeout = timeout
# passes off command and waits for a success (or failure signal)
browser.command_queue.put(command)
command_arguments = command[1] if len(command) > 1 else None
# received reply from BrowserManager, either success or failure
critical_failure = False
error_text = None
tb = None
try:
status = browser.status_queue.get(True, browser.current_timeout)
if status == "OK":
command_status = "ok"
elif status[0] == "CRITICAL":
self.logger.critical(
"BROWSER %i: Received critical error from browser "
"process while executing command %s. Setting failure "
"status." % (browser.crawl_id, str(command))
)
self.failure_status = {
"ErrorType": "CriticalChildException",
"CommandSequence": command_sequence,
"Exception": status[1],
}
error_text, tb = self._unpack_picked_error(status[1])
critical_failure = True
elif status[0] == "FAILED":
command_status = "error"
error_text, tb = self._unpack_picked_error(status[1])
self.logger.info(
"BROWSER %i: Received failure status while executing "
"command: %s" % (browser.crawl_id, command[0])
)
elif status[0] == "NETERROR":
command_status = "neterror"
error_text, tb = self._unpack_picked_error(status[1])
error_text = parse_neterror(error_text)
self.logger.info(
"BROWSER %i: Received neterror %s while executing "
"command: %s" % (browser.crawl_id, error_text, command[0])
)
else:
raise ValueError("Unknown browser status message %s" % status)
except EmptyQueue:
command_status = "timeout"
self.logger.info(
"BROWSER %i: Timeout while executing command, %s, killing "
"browser manager" % (browser.crawl_id, command[0])
)
self.sock.send(
(
"crawl_history",
{
"crawl_id": browser.crawl_id,
"visit_id": browser.curr_visit_id,
"command": command[0],
"arguments": str(command_arguments),
"retry_number": command_sequence.retry_number,
"command_status": command_status,
"error": error_text,
"traceback": tb,
},
)
)
if critical_failure:
return
if command_status != "ok":
with self.threadlock:
self.failurecount += 1
if self.failurecount > self.failure_limit:
self.logger.critical(
"BROWSER %i: Command execution failure pushes failure "
"count above the allowable limit. Setting "
"failure_status." % browser.crawl_id
)
self.failure_status = {
"ErrorType": "ExceedCommandFailureLimit",
"CommandSequence": command_sequence,
}
return
browser.restart_required = True
self.logger.debug(
"BROWSER %i: Browser restart required" % (browser.crawl_id)
)
else:
with self.threadlock:
self.failurecount = 0
if browser.restart_required:
break
# Sleep after executing CommandSequence to provide extra time for
# internal buffers to drain. Stopgap in support of #135
time.sleep(2)
if self.closing:
return
if browser.restart_required or reset:
success = browser.restart_browser_manager(clear_profile=reset)
if not success:
self.logger.critical(
"BROWSER %i: Exceeded the maximum allowable consecutive "
"browser launch failures. Setting failure_status." % (browser.crawl_id)
)
self.failure_status = {
"ErrorType": "ExceedLaunchFailureLimit",
"CommandSequence": command_sequence,
}
return
browser.restart_required = False
|
https://github.com/mozilla/OpenWPM/issues/546
|
Exception in thread Thread-1209:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "<path>/OpenWPM/automation/TaskManager.py", line 512, in _issue_command
"command_status": command_status,
UnboundLocalError: local variable 'command_status' referenced before assignment
|
UnboundLocalError
|
def _send_to_s3(self, force=False):
"""Copy in-memory batches to s3"""
for table_name, batches in self._batches.items():
if not force and len(batches) <= CACHE_SIZE:
continue
if table_name == SITE_VISITS_INDEX:
out_str = "\n".join([json.dumps(x) for x in batches])
if not isinstance(out_str, six.binary_type):
out_str = out_str.encode("utf-8")
fname = "%s/site_index/instance-%s-%s.json.gz" % (
self.dir,
self._instance_id,
hashlib.md5(out_str).hexdigest(),
)
self._write_str_to_s3(out_str, fname)
else:
if len(batches) == 0:
continue
try:
table = pa.Table.from_batches(batches)
pq.write_to_dataset(
table,
self._s3_bucket_uri % table_name,
filesystem=self._fs,
partition_cols=["instance_id"],
compression="snappy",
flavor="spark",
)
except (pa.lib.ArrowInvalid, EndpointConnectionError):
self.logger.error(
"Error while sending records for: %s" % table_name, exc_info=True
)
pass
self._batches[table_name] = list()
|
def _send_to_s3(self, force=False):
"""Copy in-memory batches to s3"""
for table_name, batches in self._batches.items():
if not force and len(batches) <= CACHE_SIZE:
continue
if table_name == SITE_VISITS_INDEX:
out_str = "\n".join([json.dumps(x) for x in batches])
if not isinstance(out_str, six.binary_type):
out_str = out_str.encode("utf-8")
fname = "%s/site_index/instance-%s-%s.json.gz" % (
self.dir,
self._instance_id,
hashlib.md5(out_str).hexdigest(),
)
self._write_str_to_s3(out_str, fname)
else:
if len(batches) == 0:
continue
try:
table = pa.Table.from_batches(batches)
pq.write_to_dataset(
table,
self._s3_bucket_uri % table_name,
filesystem=self._fs,
preserve_index=False,
partition_cols=["instance_id"],
compression="snappy",
flavor="spark",
)
except (pa.lib.ArrowInvalid, EndpointConnectionError):
self.logger.error(
"Error while sending records for: %s" % table_name, exc_info=True
)
pass
self._batches[table_name] = list()
|
https://github.com/mozilla/OpenWPM/issues/498
|
multiprocess_utils - ERROR - Exception in child process.
Traceback (most recent call last):
File "/home/travis/build/mozilla/OpenWPM/automation/utilities/multiprocess_utils.py", line 42, in run
mp.Process.run(self)
File "/home/travis/virtualenv/python2.7.15/lib/python2.7/site-packages/multiprocess/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "/home/travis/build/mozilla/OpenWPM/automation/DataAggregator/S3Aggregator.py", line 56, in listener_process_runner
listener.drain_queue()
File "/home/travis/build/mozilla/OpenWPM/automation/DataAggregator/S3Aggregator.py", line 320, in drain_queue
self._send_to_s3(force=True)
File "/home/travis/build/mozilla/OpenWPM/automation/DataAggregator/S3Aggregator.py", line 227, in _send_to_s3
flavor='spark'
File "/home/travis/virtualenv/python2.7.15/lib/python2.7/site-packages/pyarrow/parquet.py", line 1450, in write_to_dataset
write_table(subtable, f, **kwargs)
File "/home/travis/virtualenv/python2.7.15/lib/python2.7/site-packages/pyarrow/parquet.py", line 1343, in write_table
**kwargs) as writer:
File "/home/travis/virtualenv/python2.7.15/lib/python2.7/site-packages/pyarrow/parquet.py", line 448, in __init__
**options)
File "pyarrow/_parquet.pyx", line 1220, in pyarrow._parquet.ParquetWriter.__cinit__
def __cinit__(self, where, Schema schema, use_dictionary=None,
TypeError: __cinit__() got an unexpected keyword argument 'preserve_index'
Process Process-17:
Traceback (most recent call last):
File "/home/travis/virtualenv/python2.7.15/lib/python2.7/site-packages/multiprocess/process.py", line 267, in _bootstrap
self.run()
File "/home/travis/build/mozilla/OpenWPM/automation/utilities/multiprocess_utils.py", line 50, in run
raise e
TypeError: __cinit__() got an unexpected keyword argument 'preserve_index'
|
TypeError
|
def _issue_command(self, browser, command_sequence, condition=None):
"""
sends command tuple to the BrowserManager
"""
browser.is_fresh = False
# if this is a synced call, block on condition
if condition is not None:
with condition:
condition.wait()
reset = command_sequence.reset
start_time = None
for command_and_timeout in command_sequence.commands_with_timeout:
command, timeout = command_and_timeout
if command[0] in [
"GET",
"BROWSE",
"SAVE_SCREENSHOT",
"SCREENSHOT_FULL_PAGE",
"DUMP_PAGE_SOURCE",
"RECURSIVE_DUMP_PAGE_SOURCE",
]:
start_time = time.time()
command += (browser.curr_visit_id,)
elif command[0] in ["DUMP_FLASH_COOKIES", "DUMP_PROFILE_COOKIES"]:
command += (
start_time,
browser.curr_visit_id,
)
browser.current_timeout = timeout
# passes off command and waits for a success (or failure signal)
browser.command_queue.put(command)
command_succeeded = 0 # 1 success, 0 error, -1 timeout
command_arguments = command[1] if len(command) > 1 else None
# received reply from BrowserManager, either success or failure
try:
status = browser.status_queue.get(True, browser.current_timeout)
if status == "OK":
command_succeeded = 1
elif status[0] == "CRITICAL":
self.logger.critical(
"BROWSER %i: Received critical error from browser "
"process while executing command %s. Setting failure "
"status." % (browser.crawl_id, str(command))
)
self.failure_status = {
"ErrorType": "CriticalChildException",
"CommandSequence": command_sequence,
"Exception": status[1],
}
return
else:
command_succeeded = 0
self.logger.info(
"BROWSER %i: Received failure status while executing "
"command: %s" % (browser.crawl_id, command[0])
)
except EmptyQueue:
command_succeeded = -1
self.logger.info(
"BROWSER %i: Timeout while executing command, %s, killing "
"browser manager" % (browser.crawl_id, command[0])
)
self.sock.send(
(
"crawl_history",
{
"crawl_id": browser.crawl_id,
"visit_id": browser.curr_visit_id,
"command": command[0],
"arguments": str(command_arguments),
"bool_success": command_succeeded,
},
)
)
if command_succeeded != 1:
with self.threadlock:
self.failurecount += 1
if self.failurecount > self.failure_limit:
self.logger.critical(
"BROWSER %i: Command execution failure pushes failure "
"count above the allowable limit. Setting "
"failure_status." % browser.crawl_id
)
self.failure_status = {
"ErrorType": "ExceedCommandFailureLimit",
"CommandSequence": command_sequence,
}
return
browser.restart_required = True
self.logger.debug(
"BROWSER %i: Browser restart required" % (browser.crawl_id)
)
else:
with self.threadlock:
self.failurecount = 0
if browser.restart_required:
break
# Sleep after executing CommandSequence to provide extra time for
# internal buffers to drain. Stopgap in support of #135
time.sleep(2)
if self.closing:
return
if browser.restart_required or reset:
success = browser.restart_browser_manager(clear_profile=reset)
if not success:
self.logger.critical(
"BROWSER %i: Exceeded the maximum allowable consecutive "
"browser launch failures. Setting failure_status." % (browser.crawl_id)
)
self.failure_status = {
"ErrorType": "ExceedLaunchFailureLimit",
"CommandSequence": command_sequence,
}
return
browser.restart_required = False
|
def _issue_command(self, browser, command_sequence, condition=None):
"""
sends command tuple to the BrowserManager
"""
browser.is_fresh = False
# if this is a synced call, block on condition
if condition is not None:
with condition:
condition.wait()
reset = command_sequence.reset
start_time = None
for command_and_timeout in command_sequence.commands_with_timeout:
command, timeout = command_and_timeout
if command[0] in [
"GET",
"BROWSE",
"SAVE_SCREENSHOT",
"SCREENSHOT_FULL_PAGE",
"DUMP_PAGE_SOURCE",
"RECURSIVE_DUMP_PAGE_SOURCE",
]:
start_time = time.time()
command += (browser.curr_visit_id,)
elif command[0] in ["DUMP_FLASH_COOKIES", "DUMP_PROFILE_COOKIES"]:
command += (
start_time,
browser.curr_visit_id,
)
browser.current_timeout = timeout
# passes off command and waits for a success (or failure signal)
browser.command_queue.put(command)
command_succeeded = 0 # 1 success, 0 error, -1 timeout
command_arguments = command[1] if len(command) > 1 else None
# received reply from BrowserManager, either success or failure
try:
status = browser.status_queue.get(True, browser.current_timeout)
if status == "OK":
command_succeeded = 1
elif status[0] == "CRITICAL":
self.logger.critical(
"BROWSER %i: Received critical error from browser "
"process while executing command %s. Setting failure "
"status." % (browser.crawl_id, str(command))
)
self.failure_status = {
"ErrorType": "CriticalChildException",
"CommandSequence": command_sequence,
"Exception": status[1],
}
return
else:
command_succeeded = 0
self.logger.info(
"BROWSER %i: Received failure status while executing "
"command: %s" % (browser.crawl_id, command[0])
)
except EmptyQueue:
command_succeeded = -1
self.logger.info(
"BROWSER %i: Timeout while executing command, %s, killing "
"browser manager" % (browser.crawl_id, command[0])
)
self.sock.send(
(
"crawl_history",
{
"crawl_id": browser.crawl_id,
"visit_id": browser.curr_visit_id,
"command": command[0],
"arguments": command_arguments,
"bool_success": command_succeeded,
},
)
)
if command_succeeded != 1:
with self.threadlock:
self.failurecount += 1
if self.failurecount > self.failure_limit:
self.logger.critical(
"BROWSER %i: Command execution failure pushes failure "
"count above the allowable limit. Setting "
"failure_status." % browser.crawl_id
)
self.failure_status = {
"ErrorType": "ExceedCommandFailureLimit",
"CommandSequence": command_sequence,
}
return
browser.restart_required = True
self.logger.debug(
"BROWSER %i: Browser restart required" % (browser.crawl_id)
)
else:
with self.threadlock:
self.failurecount = 0
if browser.restart_required:
break
# Sleep after executing CommandSequence to provide extra time for
# internal buffers to drain. Stopgap in support of #135
time.sleep(2)
if self.closing:
return
if browser.restart_required or reset:
success = browser.restart_browser_manager(clear_profile=reset)
if not success:
self.logger.critical(
"BROWSER %i: Exceeded the maximum allowable consecutive "
"browser launch failures. Setting failure_status." % (browser.crawl_id)
)
self.failure_status = {
"ErrorType": "ExceedLaunchFailureLimit",
"CommandSequence": command_sequence,
}
return
browser.restart_required = False
|
https://github.com/mozilla/OpenWPM/issues/286
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/multiprocess/process.py", line 297, in _bootstrap
self.run()
File "/usr/local/lib/python3.7/site-packages/multiprocess/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/awagner/mozilla/OpenWPM/automation/DataAggregator/S3Aggregator.py", line 40, in listener_process_runner
listener.process_record(record)
File "/Users/awagner/mozilla/OpenWPM/automation/DataAggregator/S3Aggregator.py", line 225, in process_record
self._create_batch(self.browser_map[crawl_id])
File "/Users/awagner/mozilla/OpenWPM/automation/DataAggregator/S3Aggregator.py", line 97, in _create_batch
df, schema=PQ_SCHEMAS[table_name], preserve_index=False
File "pyarrow/table.pxi", line 858, in pyarrow.lib.RecordBatch.from_pandas
File "/usr/local/lib/python3.7/site-packages/pyarrow/pandas_compat.py", line 468, in dataframe_to_arrays
convert_types)]
File "/usr/local/lib/python3.7/site-packages/pyarrow/pandas_compat.py", line 467, in <listcomp>
for c, t in zip(columns_to_convert,
File "/usr/local/lib/python3.7/site-packages/pyarrow/pandas_compat.py", line 463, in convert_column
raise e
File "/usr/local/lib/python3.7/site-packages/pyarrow/pandas_compat.py", line 457, in convert_column
return pa.array(col, type=ty, from_pandas=True, safe=safe)
File "pyarrow/array.pxi", line 169, in pyarrow.lib.array
File "pyarrow/array.pxi", line 78, in pyarrow.lib._ndarray_to_array
File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status
pyarrow.lib.ArrowTypeError: ('an integer is required (got type str)', 'Conversion failed for column visit_id with type object')
|
pyarrow.lib.ArrowTypeError
|
def browse_website(
url,
num_links,
sleep,
visit_id,
webdriver,
browser_params,
manager_params,
extension_socket,
):
"""Calls get_website before visiting <num_links> present on the page.
Note: the site_url in the site_visits table for the links visited will
be the site_url of the original page and NOT the url of the links visited.
"""
# First get the site
get_website(url, sleep, visit_id, webdriver, browser_params, extension_socket)
# Connect to logger
logger = loggingclient(*manager_params["logger_address"])
# Then visit a few subpages
for _ in range(num_links):
links = [x for x in get_intra_links(webdriver, url) if is_displayed(x) is True]
if not links:
break
r = int(random.random() * len(links))
logger.info(
"BROWSER %i: visiting internal link %s"
% (browser_params["crawl_id"], links[r].get_attribute("href"))
)
try:
links[r].click()
wait_until_loaded(webdriver, 300)
time.sleep(max(1, sleep))
if browser_params["bot_mitigation"]:
bot_mitigation(webdriver)
webdriver.back()
wait_until_loaded(webdriver, 300)
except Exception:
pass
|
def browse_website(
url,
num_links,
sleep,
visit_id,
webdriver,
browser_params,
manager_params,
extension_socket,
):
"""Calls get_website before visiting <num_links> present on the page.
Note: the site_url in the site_visits table for the links visited will
be the site_url of the original page and NOT the url of the links visited.
"""
# First get the site
get_website(url, sleep, visit_id, webdriver, browser_params, extension_socket)
# Connect to logger
logger = loggingclient(*manager_params["logger_address"])
# Then visit a few subpages
for i in range(num_links):
links = [x for x in get_intra_links(webdriver, url) if x.is_displayed() is True]
if not links:
break
r = int(random.random() * len(links))
logger.info(
"BROWSER %i: visiting internal link %s"
% (browser_params["crawl_id"], links[r].get_attribute("href"))
)
try:
links[r].click()
wait_until_loaded(webdriver, 300)
time.sleep(max(1, sleep))
if browser_params["bot_mitigation"]:
bot_mitigation(webdriver)
webdriver.back()
wait_until_loaded(webdriver, 300)
except Exception:
pass
|
https://github.com/mozilla/OpenWPM/issues/167
|
TaskManager - INFO -
OpenWPM Version: v0.8.0-137-gb0a8e00
Firefox Version: 52.4.1
========== Browser Configuration ==========
Keys:
{
"crawl_id": 0,
"adblock-plus": 1,
"bot_mitigation": 2,
"browser": 3,
"cookie_instrument": 4,
"cp_instrument": 5,
"disable_flash": 6,
"disconnect": 7,
"donottrack": 8,
"extension_enabled": 9,
"ghostery": 10,
"headless": 11,
"http_instrument": 12,
"https-everywhere": 13,
"js_instrument": 14,
"prefs": 15,
"random_attributes": 16,
"save_all_content": 17,
"save_javascript": 18,
"tp_cookies": 19,
"tracking-protection": 20,
"ublock-origin": 21
}
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
--- ----- ----- ------- ----- ----- ----- ----- ----- ---- ----- ---- ---- ----- ----- ---- ----- ----- ----- ------ ----- -----
19 False False firefox False False False False False True False True True False False {} False False False always False False
========== Input profile tar files ==========
No profile tar files specified
========== Output (archive) profile dirs ==========
No profile archive directories specified
BrowserManager - INFO - BROWSER 19: EXECUTING COMMAND: ('BROWSE', 'http://www.en.aau.dk/', 50, 0, 16)
Using psl from cache: /tmp/public_suffix_list.dat
browser_commands - INFO - BROWSER 19: visiting internal link http://www.en.aau.dk/news/
browser_commands - INFO - BROWSER 19: visiting internal link http://www.en.aau.dk/cooperation/
browser_commands - INFO - BROWSER 19: visiting internal link http://www.en.aau.dk/events/
browser_commands - INFO - BROWSER 19: visiting internal link http://www.vacancies.aau.dk/
BrowserManager - INFO - BROWSER 19: Crash in driver, restarting browser manager
Traceback (most recent call last):
File "/user/es.aau.dk/sok/OpenWPM/automation/BrowserManager.py", line 404, in BrowserManager
browser_params, manager_params, extension_socket)
File "/user/es.aau.dk/sok/OpenWPM/automation/Commands/command_executor.py", line 22, in execute_command
extension_socket=extension_socket)
File "/user/es.aau.dk/sok/OpenWPM/automation/Commands/browser_commands.py", line 189, in browse_website
if x.is_displayed() is True]
File "/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/webelement.py", line 363, in is_displayed
return self._execute(Command.IS_ELEMENT_DISPLAYED)['value']
File "/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/webelement.py", line 501, in _execute
return self._parent.execute(command, params)
File "/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/webdriver.py", line 308, in execute
self.error_handler.check_response(response)
File "/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/errorhandler.py", line 194, in check_response
raise exception_class(message, screen, stacktrace)
StaleElementReferenceException: Message: The element reference of <a class="mobileMenu topLink"> stale: either the element is no longer attached to the DOM or the page has been refreshed
TaskManager - INFO - BROWSER 19: Received failure status while executing command: BROWSE
|
StaleElementReferenceException
|
def is_active(input_element):
"""Check if we can interact with the given element."""
try:
return is_displayed(input_element) and input_element.is_enabled()
except WebDriverException:
return False
|
def is_active(input_element):
"""Check if we can interact with the given element."""
try:
return input_element.is_displayed() and input_element.is_enabled()
except WebDriverException:
return False
|
https://github.com/mozilla/OpenWPM/issues/167
|
TaskManager - INFO -
OpenWPM Version: v0.8.0-137-gb0a8e00
Firefox Version: 52.4.1
========== Browser Configuration ==========
Keys:
{
"crawl_id": 0,
"adblock-plus": 1,
"bot_mitigation": 2,
"browser": 3,
"cookie_instrument": 4,
"cp_instrument": 5,
"disable_flash": 6,
"disconnect": 7,
"donottrack": 8,
"extension_enabled": 9,
"ghostery": 10,
"headless": 11,
"http_instrument": 12,
"https-everywhere": 13,
"js_instrument": 14,
"prefs": 15,
"random_attributes": 16,
"save_all_content": 17,
"save_javascript": 18,
"tp_cookies": 19,
"tracking-protection": 20,
"ublock-origin": 21
}
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
--- ----- ----- ------- ----- ----- ----- ----- ----- ---- ----- ---- ---- ----- ----- ---- ----- ----- ----- ------ ----- -----
19 False False firefox False False False False False True False True True False False {} False False False always False False
========== Input profile tar files ==========
No profile tar files specified
========== Output (archive) profile dirs ==========
No profile archive directories specified
BrowserManager - INFO - BROWSER 19: EXECUTING COMMAND: ('BROWSE', 'http://www.en.aau.dk/', 50, 0, 16)
Using psl from cache: /tmp/public_suffix_list.dat
browser_commands - INFO - BROWSER 19: visiting internal link http://www.en.aau.dk/news/
browser_commands - INFO - BROWSER 19: visiting internal link http://www.en.aau.dk/cooperation/
browser_commands - INFO - BROWSER 19: visiting internal link http://www.en.aau.dk/events/
browser_commands - INFO - BROWSER 19: visiting internal link http://www.vacancies.aau.dk/
BrowserManager - INFO - BROWSER 19: Crash in driver, restarting browser manager
Traceback (most recent call last):
File "/user/es.aau.dk/sok/OpenWPM/automation/BrowserManager.py", line 404, in BrowserManager
browser_params, manager_params, extension_socket)
File "/user/es.aau.dk/sok/OpenWPM/automation/Commands/command_executor.py", line 22, in execute_command
extension_socket=extension_socket)
File "/user/es.aau.dk/sok/OpenWPM/automation/Commands/browser_commands.py", line 189, in browse_website
if x.is_displayed() is True]
File "/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/webelement.py", line 363, in is_displayed
return self._execute(Command.IS_ELEMENT_DISPLAYED)['value']
File "/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/webelement.py", line 501, in _execute
return self._parent.execute(command, params)
File "/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/webdriver.py", line 308, in execute
self.error_handler.check_response(response)
File "/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/errorhandler.py", line 194, in check_response
raise exception_class(message, screen, stacktrace)
StaleElementReferenceException: Message: The element reference of <a class="mobileMenu topLink"> stale: either the element is no longer attached to the DOM or the page has been refreshed
TaskManager - INFO - BROWSER 19: Received failure status while executing command: BROWSE
|
StaleElementReferenceException
|
def fixSubTableOverFlows(ttf, overflowRecord):
"""
An offset has overflowed within a sub-table. We need to divide this subtable into smaller parts.
"""
ok = 0
table = ttf[overflowRecord.tableType].table
lookup = table.LookupList.Lookup[overflowRecord.LookupListIndex]
subIndex = overflowRecord.SubTableIndex
subtable = lookup.SubTable[subIndex]
# First, try not sharing anything for this subtable...
if not hasattr(subtable, "DontShare"):
subtable.DontShare = True
return True
if hasattr(subtable, "ExtSubTable"):
# We split the subtable of the Extension table, and add a new Extension table
# to contain the new subtable.
subTableType = subtable.ExtSubTable.__class__.LookupType
extSubTable = subtable
subtable = extSubTable.ExtSubTable
newExtSubTableClass = lookupTypes[overflowRecord.tableType][
extSubTable.__class__.LookupType
]
newExtSubTable = newExtSubTableClass()
newExtSubTable.Format = extSubTable.Format
lookup.SubTable.insert(subIndex + 1, newExtSubTable)
newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType]
newSubTable = newSubTableClass()
newExtSubTable.ExtSubTable = newSubTable
else:
subTableType = subtable.__class__.LookupType
newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType]
newSubTable = newSubTableClass()
lookup.SubTable.insert(subIndex + 1, newSubTable)
if hasattr(lookup, "SubTableCount"): # may not be defined yet.
lookup.SubTableCount = lookup.SubTableCount + 1
try:
splitFunc = splitTable[overflowRecord.tableType][subTableType]
except KeyError:
return ok
ok = splitFunc(subtable, newSubTable, overflowRecord)
return ok
|
def fixSubTableOverFlows(ttf, overflowRecord):
"""
An offset has overflowed within a sub-table. We need to divide this subtable into smaller parts.
"""
ok = 0
table = ttf[overflowRecord.tableType].table
lookup = table.LookupList.Lookup[overflowRecord.LookupListIndex]
subIndex = overflowRecord.SubTableIndex
subtable = lookup.SubTable[subIndex]
# First, try not sharing anything for this subtable...
if not hasattr(subtable, "DontShare"):
subtable.DontShare = True
return True
if hasattr(subtable, "ExtSubTable"):
# We split the subtable of the Extension table, and add a new Extension table
# to contain the new subtable.
subTableType = subtable.ExtSubTable.__class__.LookupType
extSubTable = subtable
subtable = extSubTable.ExtSubTable
newExtSubTableClass = lookupTypes[overflowRecord.tableType][
subtable.__class__.LookupType
]
newExtSubTable = newExtSubTableClass()
newExtSubTable.Format = extSubTable.Format
lookup.SubTable.insert(subIndex + 1, newExtSubTable)
newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType]
newSubTable = newSubTableClass()
newExtSubTable.ExtSubTable = newSubTable
else:
subTableType = subtable.__class__.LookupType
newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType]
newSubTable = newSubTableClass()
lookup.SubTable.insert(subIndex + 1, newSubTable)
if hasattr(lookup, "SubTableCount"): # may not be defined yet.
lookup.SubTableCount = lookup.SubTableCount + 1
try:
splitFunc = splitTable[overflowRecord.tableType][subTableType]
except KeyError:
return ok
ok = splitFunc(subtable, newSubTable, overflowRecord)
return ok
|
https://github.com/fonttools/fonttools/issues/574
|
Parsing 'GlyphOrder' table...
Parsing 'head' table...
Parsing 'hhea' table...
Parsing 'maxp' table...
Parsing 'OS/2' table...
Parsing 'name' table...
Parsing 'cmap' table...
Parsing 'post' table...
Parsing 'CFF ' table...
Parsing 'BASE' table...
Parsing 'GDEF' table...
Parsing 'GPOS' table...
Parsing 'GSUB' table...
Parsing 'hmtx' table...
Attempting to fix OTLOffsetOverflowError ('GPOS', 'LookupIndex:', 11, 'SubTableIndex:', None, 'ItemName:', None, 'ItemIndex:', None)
Attempting to fix OTLOffsetOverflowError ('GPOS', 'LookupIndex:', 10, 'SubTableIndex:', 1, 'ItemName:', 'Coverage', 'ItemIndex:', None)
Attempting to fix OTLOffsetOverflowError ('GPOS', 'LookupIndex:', 10, 'SubTableIndex:', 1, 'ItemName:', 'Coverage', 'ItemIndex:', None)
Traceback (most recent call last):
File "/fonttools/Lib/fontTools/ttx.py", line 383, in main
process(jobs, options)
File "/fonttools/Lib/fontTools/ttx.py", line 356, in process
action(input, output, options)
File "/fonttools/Lib/fontTools/misc/loggingTools.py", line 369, in wrapper
return func(*args, **kwds)
File "/fonttools/Lib/fontTools/ttx.py", line 277, in ttCompile
ttf.save(output)
File "/fonttools/Lib/fontTools/ttLib/__init__.py", line 216, in save
self._writeTable(tag, writer, done)
File "/fonttools/Lib/fontTools/ttLib/__init__.py", line 648, in _writeTable
tabledata = self.getTableData(tag)
File "/fonttools/Lib/fontTools/ttLib/__init__.py", line 659, in getTableData
return self.tables[tag].compile(self)
File "/fonttools/Lib/fontTools/ttLib/tables/otBase.py", line 90, in compile
self.table.compile(writer, font)
File "/fonttools/Lib/fontTools/ttLib/tables/otBase.py", line 660, in compile
conv.write(writer, font, table, value)
File "/fonttools/Lib/fontTools/ttLib/tables/otConverters.py", line 358, in write
value.compile(subWriter, font)
File "/fonttools/Lib/fontTools/ttLib/tables/otBase.py", line 635, in compile
conv.write(writer, font, table, value, i)
File "/fonttools/Lib/fontTools/ttLib/tables/otConverters.py", line 358, in write
value.compile(subWriter, font)
File "/fonttools/Lib/fontTools/ttLib/tables/otBase.py", line 635, in compile
conv.write(writer, font, table, value, i)
File "/fonttools/Lib/fontTools/ttLib/tables/otConverters.py", line 358, in write
value.compile(subWriter, font)
File "/fonttools/Lib/fontTools/ttLib/tables/otBase.py", line 618, in compile
writer['LookupType'].setValue(self.__class__.LookupType)
File "/fonttools/Lib/fontTools/ttLib/tables/otBase.py", line 514, in setValue
assert table[name] == value, (name, table[name], value)
AssertionError: (('LookupType', 9, 2), 'PairPos[2]', 'Lookup[10]', 'LookupList')
|
AssertionError
|
def train(args):
# parameters from arguments
class_dim = args.class_dim
model_name = args.model
checkpoint = args.checkpoint
pretrained_model = args.pretrained_model
with_memory_optimization = args.with_mem_opt
model_save_dir = args.model_save_dir
image_shape = [int(m) for m in args.image_shape.split(",")]
assert model_name in model_list, "{} is not in lists: {}".format(
args.model, model_list
)
image = fluid.layers.data(name="image", shape=image_shape, dtype="float32")
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
# model definition
model = models.__dict__[model_name]()
if model_name is "GoogleNet":
out0, out1, out2 = model.net(input=image, class_dim=class_dim)
cost0 = fluid.layers.cross_entropy(input=out0, label=label)
cost1 = fluid.layers.cross_entropy(input=out1, label=label)
cost2 = fluid.layers.cross_entropy(input=out2, label=label)
avg_cost0 = fluid.layers.mean(x=cost0)
avg_cost1 = fluid.layers.mean(x=cost1)
avg_cost2 = fluid.layers.mean(x=cost2)
avg_cost = avg_cost0 + 0.3 * avg_cost1 + 0.3 * avg_cost2
acc_top1 = fluid.layers.accuracy(input=out0, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out0, label=label, k=5)
else:
out = model.net(input=image, class_dim=class_dim)
cost = fluid.layers.cross_entropy(input=out, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
test_program = fluid.default_main_program().clone(for_test=True)
# parameters from model and arguments
params = model.params
params["total_images"] = args.total_images
params["lr"] = args.lr
params["num_epochs"] = args.num_epochs
params["learning_strategy"]["batch_size"] = args.batch_size
params["learning_strategy"]["name"] = args.lr_strategy
# initialize optimizer
optimizer = optimizer_setting(params)
opts = optimizer.minimize(avg_cost)
if with_memory_optimization:
fluid.memory_optimize(fluid.default_main_program())
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
if checkpoint is not None:
fluid.io.load_persistables(exe, checkpoint)
if pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(pretrained_model, var.name))
fluid.io.load_vars(exe, pretrained_model, predicate=if_exist)
train_batch_size = args.batch_size
test_batch_size = 16
train_reader = paddle.batch(reader.train(), batch_size=train_batch_size)
test_reader = paddle.batch(reader.val(), batch_size=test_batch_size)
feeder = fluid.DataFeeder(place=place, feed_list=[image, label])
train_exe = fluid.ParallelExecutor(
use_cuda=True if args.use_gpu else False, loss_name=avg_cost.name
)
fetch_list = [avg_cost.name, acc_top1.name, acc_top5.name]
for pass_id in range(params["num_epochs"]):
train_info = [[], [], []]
test_info = [[], [], []]
for batch_id, data in enumerate(train_reader()):
t1 = time.time()
loss, acc1, acc5 = train_exe.run(fetch_list, feed=feeder.feed(data))
t2 = time.time()
period = t2 - t1
loss = np.mean(np.array(loss))
acc1 = np.mean(np.array(acc1))
acc5 = np.mean(np.array(acc5))
train_info[0].append(loss)
train_info[1].append(acc1)
train_info[2].append(acc5)
if batch_id % 10 == 0:
print(
"Pass {0}, trainbatch {1}, loss {2}, \
acc1 {3}, acc5 {4} time {5}".format(
pass_id, batch_id, loss, acc1, acc5, "%2.2f sec" % period
)
)
sys.stdout.flush()
train_loss = np.array(train_info[0]).mean()
train_acc1 = np.array(train_info[1]).mean()
train_acc5 = np.array(train_info[2]).mean()
cnt = 0
for test_batch_id, data in enumerate(test_reader()):
t1 = time.time()
loss, acc1, acc5 = exe.run(
test_program, fetch_list=fetch_list, feed=feeder.feed(data)
)
t2 = time.time()
period = t2 - t1
loss = np.mean(loss)
acc1 = np.mean(acc1)
acc5 = np.mean(acc5)
test_info[0].append(loss * len(data))
test_info[1].append(acc1 * len(data))
test_info[2].append(acc5 * len(data))
cnt += len(data)
if test_batch_id % 10 == 0:
print(
"Pass {0},testbatch {1},loss {2}, \
acc1 {3},acc5 {4},time {5}".format(
pass_id, test_batch_id, loss, acc1, acc5, "%2.2f sec" % period
)
)
sys.stdout.flush()
test_loss = np.sum(test_info[0]) / cnt
test_acc1 = np.sum(test_info[1]) / cnt
test_acc5 = np.sum(test_info[2]) / cnt
print(
"End pass {0}, train_loss {1}, train_acc1 {2}, train_acc5 {3}, "
"test_loss {4}, test_acc1 {5}, test_acc5 {6}".format(
pass_id,
train_loss,
train_acc1,
train_acc5,
test_loss,
test_acc1,
test_acc5,
)
)
sys.stdout.flush()
model_path = os.path.join(model_save_dir + "/" + model_name, str(pass_id))
if not os.path.isdir(model_path):
os.makedirs(model_path)
fluid.io.save_persistables(exe, model_path)
|
def train(args):
# parameters from arguments
class_dim = args.class_dim
model_name = args.model
checkpoint = args.checkpoint
pretrained_model = args.pretrained_model
with_memory_optimization = args.with_mem_opt
model_save_dir = args.model_save_dir
image_shape = [int(m) for m in args.image_shape.split(",")]
assert model_name in model_list, "{} is not in lists: {}".format(
args.model, model_list
)
image = fluid.layers.data(name="image", shape=image_shape, dtype="float32")
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
# model definition
model = models.__dict__[model_name]()
if model_name is "GoogleNet":
out0, out1, out2 = model.net(input=image, class_dim=class_dim)
cost0 = fluid.layers.cross_entropy(input=out0, label=label)
cost1 = fluid.layers.cross_entropy(input=out1, label=label)
cost2 = fluid.layers.cross_entropy(input=out2, label=label)
avg_cost0 = fluid.layers.mean(x=cost0)
avg_cost1 = fluid.layers.mean(x=cost1)
avg_cost2 = fluid.layers.mean(x=cost2)
avg_cost = avg_cost0 + 0.3 * avg_cost1 + 0.3 * avg_cost2
acc_top1 = fluid.layers.accuracy(input=out0, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out0, label=label, k=5)
else:
out = model.net(input=image, class_dim=class_dim)
cost = fluid.layers.cross_entropy(input=out, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
test_program = fluid.default_main_program().clone(for_test=True)
# parameters from model and arguments
params = model.params
params["total_images"] = args.total_images
params["lr"] = args.lr
params["num_epochs"] = args.num_epochs
params["learning_strategy"]["batch_size"] = args.batch_size
params["learning_strategy"]["name"] = args.lr_strategy
# initialize optimizer
optimizer = optimizer_setting(params)
opts = optimizer.minimize(avg_cost)
if with_memory_optimization:
fluid.memory_optimize(fluid.default_main_program())
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
if checkpoint is not None:
fluid.io.load_persistables(exe, checkpoint)
if pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(pretrained_model, var.name))
fluid.io.load_vars(exe, pretrained_model, predicate=if_exist)
train_batch_size = args.batch_size
test_batch_size = 16
train_reader = paddle.batch(reader.train(), batch_size=train_batch_size)
test_reader = paddle.batch(reader.val(), batch_size=test_batch_size)
feeder = fluid.DataFeeder(place=place, feed_list=[image, label])
train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=avg_cost.name)
fetch_list = [avg_cost.name, acc_top1.name, acc_top5.name]
for pass_id in range(params["num_epochs"]):
train_info = [[], [], []]
test_info = [[], [], []]
for batch_id, data in enumerate(train_reader()):
t1 = time.time()
loss, acc1, acc5 = train_exe.run(fetch_list, feed=feeder.feed(data))
t2 = time.time()
period = t2 - t1
loss = np.mean(np.array(loss))
acc1 = np.mean(np.array(acc1))
acc5 = np.mean(np.array(acc5))
train_info[0].append(loss)
train_info[1].append(acc1)
train_info[2].append(acc5)
if batch_id % 10 == 0:
print(
"Pass {0}, trainbatch {1}, loss {2}, \
acc1 {3}, acc5 {4} time {5}".format(
pass_id, batch_id, loss, acc1, acc5, "%2.2f sec" % period
)
)
sys.stdout.flush()
train_loss = np.array(train_info[0]).mean()
train_acc1 = np.array(train_info[1]).mean()
train_acc5 = np.array(train_info[2]).mean()
cnt = 0
for test_batch_id, data in enumerate(test_reader()):
t1 = time.time()
loss, acc1, acc5 = exe.run(
test_program, fetch_list=fetch_list, feed=feeder.feed(data)
)
t2 = time.time()
period = t2 - t1
loss = np.mean(loss)
acc1 = np.mean(acc1)
acc5 = np.mean(acc5)
test_info[0].append(loss * len(data))
test_info[1].append(acc1 * len(data))
test_info[2].append(acc5 * len(data))
cnt += len(data)
if test_batch_id % 10 == 0:
print(
"Pass {0},testbatch {1},loss {2}, \
acc1 {3},acc5 {4},time {5}".format(
pass_id, test_batch_id, loss, acc1, acc5, "%2.2f sec" % period
)
)
sys.stdout.flush()
test_loss = np.sum(test_info[0]) / cnt
test_acc1 = np.sum(test_info[1]) / cnt
test_acc5 = np.sum(test_info[2]) / cnt
print(
"End pass {0}, train_loss {1}, train_acc1 {2}, train_acc5 {3}, "
"test_loss {4}, test_acc1 {5}, test_acc5 {6}".format(
pass_id,
train_loss,
train_acc1,
train_acc5,
test_loss,
test_acc1,
test_acc5,
)
)
sys.stdout.flush()
model_path = os.path.join(model_save_dir + "/" + model_name, str(pass_id))
if not os.path.isdir(model_path):
os.makedirs(model_path)
fluid.io.save_persistables(exe, model_path)
|
https://github.com/PaddlePaddle/models/issues/1089
|
Traceback (most recent call last):
File "infer.py", line 94, in <module>
main()
File "infer.py", line 90, in main
infer(args)
File "infer.py", line 68, in infer
test_reader = paddle.batch(reader.test(), batch_size=test_batch_size)
TypeError: test() takes exactly 1 argument (0 given)
|
TypeError
|
def _find_url(self, known_keys: list, links: dict) -> str:
links_keys = links.keys()
common_keys = [item for item in links_keys if item in known_keys]
key = next(iter(common_keys), None)
return links.get(key, {}).get("href", None)
|
def _find_url(self, known_keys: set, links: dict) -> str:
intersection = known_keys.intersection(links)
iterator = iter(intersection)
key = next(iterator, None)
return links.get(key, {}).get("href", None)
|
https://github.com/andreroggeri/pynubank/issues/195
|
Traceback (most recent call last):
File ".\extrato-ofx2.py", line 53, in <module>
nubank_transactions = nu.get_card_statements()
File "C:\Users\danfc\AppData\Local\Programs\Python\Python37-32\lib\site-packages\pynubank\nubank.py", line 123, in get_card_statements
feed = self.get_card_feed()
File "C:\Users\danfc\AppData\Local\Programs\Python\Python37-32\lib\site-packages\pynubank\nubank.py", line 120, in get_card_feed
return self.client.get(self.feed_url)
File "C:\Users\danfc\AppData\Local\Programs\Python\Python37-32\lib\site-packages\pynubank\utils\http.py", line 37, in get
return self._handle_response(get(url, headers=self._headers, **self._cert_args))
File "C:\Users\danfc\AppData\Local\Programs\Python\Python37-32\lib\site-packages\pynubank\utils\http.py", line 32, in _handle_response
raise NuRequestException(response)
pynubank.exception.NuRequestException: The request made failed with HTTP status code 403
|
pynubank.exception.NuRequestException
|
def _save_auth_data(self, auth_data: dict) -> None:
self.client.set_header("Authorization", f"Bearer {auth_data['access_token']}")
links = auth_data["_links"]
self.query_url = links["ghostflame"]["href"]
feed_url_keys = ["events", "magnitude"]
bills_url_keys = ["bills_summary"]
customer_url_keys = ["customer"]
self.feed_url = self._find_url(feed_url_keys, links)
self.bills_url = self._find_url(bills_url_keys, links)
self.customer_url = self._find_url(customer_url_keys, links)
|
def _save_auth_data(self, auth_data: dict) -> None:
self.client.set_header("Authorization", f"Bearer {auth_data['access_token']}")
links = auth_data["_links"]
self.query_url = links["ghostflame"]["href"]
feed_url_keys = {"events", "magnitude"}
bills_url_keys = {"bills_summary"}
customer_url_keys = {"customer"}
self.feed_url = self._find_url(feed_url_keys, links)
self.bills_url = self._find_url(bills_url_keys, links)
self.customer_url = self._find_url(customer_url_keys, links)
|
https://github.com/andreroggeri/pynubank/issues/195
|
Traceback (most recent call last):
File ".\extrato-ofx2.py", line 53, in <module>
nubank_transactions = nu.get_card_statements()
File "C:\Users\danfc\AppData\Local\Programs\Python\Python37-32\lib\site-packages\pynubank\nubank.py", line 123, in get_card_statements
feed = self.get_card_feed()
File "C:\Users\danfc\AppData\Local\Programs\Python\Python37-32\lib\site-packages\pynubank\nubank.py", line 120, in get_card_feed
return self.client.get(self.feed_url)
File "C:\Users\danfc\AppData\Local\Programs\Python\Python37-32\lib\site-packages\pynubank\utils\http.py", line 37, in get
return self._handle_response(get(url, headers=self._headers, **self._cert_args))
File "C:\Users\danfc\AppData\Local\Programs\Python\Python37-32\lib\site-packages\pynubank\utils\http.py", line 32, in _handle_response
raise NuRequestException(response)
pynubank.exception.NuRequestException: The request made failed with HTTP status code 403
|
pynubank.exception.NuRequestException
|
def _password_auth(self, cpf: str, password: str):
payload = {
"grant_type": "password",
"login": cpf,
"password": password,
"client_id": "other.conta",
"client_secret": "yQPeLzoHuJzlMMSAjC-LgNUJdUecx8XO",
}
response = requests.post(self.auth_url, json=payload, headers=self.headers)
data = self._handle_response(response)
return data
|
def _password_auth(self, cpf: str, password: str):
payload = {
"grant_type": "password",
"login": cpf,
"password": password,
"client_id": "other.conta",
"client_secret": "yQPeLzoHuJzlMMSAjC-LgNUJdUecx8XO",
}
response = requests.post(self.auth_url, json=payload, headers=self.headers)
data = self._handle_response(response)
self.refresh_token = data["refresh_token"]
return data
|
https://github.com/andreroggeri/pynubank/issues/69
|
from nubank import *
nu = Nubank()
uuid, qr_code = nu.get_qr_code()
qr_code.print_ascii(invert=True)
█████████████████████████████████████
█████████████████████████████████████
████ ▄▄▄▄▄ ██▄▀ █ ▀ ▀▀▄▄▄█ ▄▄▄▄▄ ████
████ █ █ █ ▀▀ ▄▄▀ ▀▀ ▄ █ █ █ ████
████ █▄▄▄█ █ ▀▄ ███▀█▄▄▄▀█ █▄▄▄█ ████
████▄▄▄▄▄▄▄█ █ █ █ █ █▄█ █▄▄▄▄▄▄▄████
████ █ ▄▄██████▀▀▄▄▄▀▀▄█ ▄ ▄█▀████
████▄▄ █▄▀▄▄█▄ ██ ▄▄█▄ ██▀█ █▄ █████
████▀▀▄▀ ▀▄▀▄█▀█▀█▄▄▀▄▄ ▄ ▀▄▀▄▄▀▀████
████▄█▀ ▀▄█▄█▀█ █▀ ██ █ █▄▄▀▀▄▀▀████
████▀▀▄▄ ▄█ █▄█▄▀▀█▄▄▀ █▄▀▀▀█▄▀▀████
████ ██ ▀█▄▄▀ ▀▀▄ ▄▄▄█▀▀▀▀█ █▄██████
████▄██▄▄█▄▄ ▄█ █▄▄▄▄▀█ ▄▄▄ ▄▄▀████
████ ▄▄▄▄▄ █▀█▄█▄█▀ █▄ █▄█ ▄█ █████
████ █ █ █ █▀▄█▀█▄ ▄█ ▄▄ ▄▄▄▀▀████
████ █▄▄▄█ █▄▄▀ ▀ █▄ ▀ ▄▀ ▄▄ █████
████▄▄▄▄▄▄▄█▄█▄███▄▄▄▄▄███▄▄██▄██████
█████████████████████████████████████
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
nu.authenticate_with_qr_code('CPF', 'SENHA', uuid)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/henriquecarvalho/github-repos/pynubank/pynubank/nubank.py", line 113, in authenticate_with_qr_code
self.refresh_token = auth_data['refresh_token']
KeyError: 'refresh_token'
|
KeyError
|
def authenticate_with_qr_code(self, cpf: str, password, uuid: str):
auth_data = self._password_auth(cpf, password)
self.headers["Authorization"] = f"Bearer {auth_data['access_token']}"
payload = {"qr_code_id": uuid, "type": "login-webapp"}
response = requests.post(
self.proxy_list_app_url["lift"], json=payload, headers=self.headers
)
auth_data = self._handle_response(response)
self.headers["Authorization"] = f"Bearer {auth_data['access_token']}"
self.feed_url = auth_data["_links"]["events"]["href"]
self.query_url = auth_data["_links"]["ghostflame"]["href"]
self.bills_url = auth_data["_links"]["bills_summary"]["href"]
|
def authenticate_with_qr_code(self, cpf: str, password, uuid: str):
auth_data = self._password_auth(cpf, password)
self.headers["Authorization"] = f"Bearer {auth_data['access_token']}"
payload = {"qr_code_id": uuid, "type": "login-webapp"}
response = requests.post(
self.proxy_list_app_url["lift"], json=payload, headers=self.headers
)
auth_data = self._handle_response(response)
self.refresh_token = auth_data["refresh_token"]
self.headers["Authorization"] = f"Bearer {auth_data['access_token']}"
self.feed_url = auth_data["_links"]["events"]["href"]
self.query_url = auth_data["_links"]["ghostflame"]["href"]
self.bills_url = auth_data["_links"]["bills_summary"]["href"]
|
https://github.com/andreroggeri/pynubank/issues/69
|
from nubank import *
nu = Nubank()
uuid, qr_code = nu.get_qr_code()
qr_code.print_ascii(invert=True)
█████████████████████████████████████
█████████████████████████████████████
████ ▄▄▄▄▄ ██▄▀ █ ▀ ▀▀▄▄▄█ ▄▄▄▄▄ ████
████ █ █ █ ▀▀ ▄▄▀ ▀▀ ▄ █ █ █ ████
████ █▄▄▄█ █ ▀▄ ███▀█▄▄▄▀█ █▄▄▄█ ████
████▄▄▄▄▄▄▄█ █ █ █ █ █▄█ █▄▄▄▄▄▄▄████
████ █ ▄▄██████▀▀▄▄▄▀▀▄█ ▄ ▄█▀████
████▄▄ █▄▀▄▄█▄ ██ ▄▄█▄ ██▀█ █▄ █████
████▀▀▄▀ ▀▄▀▄█▀█▀█▄▄▀▄▄ ▄ ▀▄▀▄▄▀▀████
████▄█▀ ▀▄█▄█▀█ █▀ ██ █ █▄▄▀▀▄▀▀████
████▀▀▄▄ ▄█ █▄█▄▀▀█▄▄▀ █▄▀▀▀█▄▀▀████
████ ██ ▀█▄▄▀ ▀▀▄ ▄▄▄█▀▀▀▀█ █▄██████
████▄██▄▄█▄▄ ▄█ █▄▄▄▄▀█ ▄▄▄ ▄▄▀████
████ ▄▄▄▄▄ █▀█▄█▄█▀ █▄ █▄█ ▄█ █████
████ █ █ █ █▀▄█▀█▄ ▄█ ▄▄ ▄▄▄▀▀████
████ █▄▄▄█ █▄▄▀ ▀ █▄ ▀ ▄▀ ▄▄ █████
████▄▄▄▄▄▄▄█▄█▄███▄▄▄▄▄███▄▄██▄██████
█████████████████████████████████████
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
nu.authenticate_with_qr_code('CPF', 'SENHA', uuid)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/henriquecarvalho/github-repos/pynubank/pynubank/nubank.py", line 113, in authenticate_with_qr_code
self.refresh_token = auth_data['refresh_token']
KeyError: 'refresh_token'
|
KeyError
|
def get_job_result(self, job_id: str) -> Result:
"""Returns the result of a job.
Args:
job_id (str): the job ID
Returns:
strawberryfields.api.Result: the job result
"""
path = "/jobs/{}/result".format(job_id)
response = requests.get(
self._url(path), headers={"Accept": "application/x-numpy", **self._headers}
)
if response.status_code == 200:
# Read the numpy binary data in the payload into memory
with io.BytesIO() as buf:
buf.write(response.content)
buf.seek(0)
samples = np.load(buf, allow_pickle=False)
if np.issubdtype(samples.dtype, np.integer):
# Samples represent photon numbers.
# Convert to int64, to avoid unexpected behaviour
# when users postprocess these samples.
samples = samples.astype(np.int64)
return Result(samples, is_stateful=False)
raise RequestFailedError(
"Failed to get job result: {}".format(self._format_error_message(response))
)
|
def get_job_result(self, job_id: str) -> Result:
"""Returns the result of a job.
Args:
job_id (str): the job ID
Returns:
strawberryfields.api.Result: the job result
"""
path = "/jobs/{}/result".format(job_id)
response = requests.get(
self._url(path), headers={"Accept": "application/x-numpy", **self._headers}
)
if response.status_code == 200:
# Read the numpy binary data in the payload into memory
with io.BytesIO() as buf:
buf.write(response.content)
buf.seek(0)
samples = np.load(buf, allow_pickle=False)
return Result(samples, is_stateful=False)
raise RequestFailedError(
"Failed to get job result: {}".format(self._format_error_message(response))
)
|
https://github.com/XanaduAI/strawberryfields/issues/356
|
Traceback (most recent call last):
File "remote_engine_example.py", line 27, in <module>
print(result)
File "/strawberryfields/strawberryfields/api/result.py", line 113, in __str__
len(self.samples), self.state, self.samples
File "/strawberryfields/strawberryfields/api/result.py", line 107, in state
raise AttributeError("The state is undefined for a stateless computation.")
AttributeError: The state is undefined for a stateless computation.
|
AttributeError
|
def measure_homodyne(self, phi, mode, select=None, **kwargs):
"""
Performs a homodyne measurement on a mode.
"""
m_omega_over_hbar = 1 / self._hbar
# Make sure the state is mixed for reduced density matrix
if self._pure:
state = ops.mix(self._state, self._num_modes)
else:
state = self._state
if select is not None:
meas_result = select
if isinstance(meas_result, numbers.Number):
homodyne_sample = float(meas_result)
else:
raise TypeError("Selected measurement result must be of numeric type.")
else:
# Compute reduced density matrix
unmeasured = [i for i in range(self._num_modes) if not i == mode]
reduced = ops.partial_trace(state, self._num_modes, unmeasured)
# Rotate to measurement basis
reduced = self.apply_gate_BLAS(
ops.phase(-phi, self._trunc), [0], state=reduced, pure=False, n=1
)
# Create pdf. Same as tf implementation, but using
# the recursive relation H_0(x) = 1, H_1(x) = 2x, H_{n+1}(x) = 2xH_n(x) - 2nH_{n-1}(x)
q_mag = kwargs.get("max", 10)
num_bins = kwargs.get("num_bins", 100000)
q_tensor, Hvals = ops.hermiteVals(
q_mag, num_bins, m_omega_over_hbar, self._trunc
)
H_matrix = np.zeros((self._trunc, self._trunc, num_bins))
for n, m in product(range(self._trunc), repeat=2):
H_matrix[n][m] = (
1 / sqrt(2**n * bang(n) * 2**m * bang(m)) * Hvals[n] * Hvals[m]
)
H_terms = np.expand_dims(reduced, -1) * np.expand_dims(H_matrix, 0)
rho_dist = (
np.sum(H_terms, axis=(1, 2))
* (m_omega_over_hbar / pi) ** 0.5
* np.exp(-m_omega_over_hbar * q_tensor**2)
* (q_tensor[1] - q_tensor[0])
) # Delta_q for normalization (only works if the bins are equally spaced)
# Sample from rho_dist. This is a bit different from tensorflow due to how
# numpy treats multinomial sampling. In particular, numpy returns a
# histogram of the samples whereas tensorflow gives the list of samples.
# Numpy also does not use the log probabilities
probs = rho_dist.flatten().real
probs /= np.sum(probs)
# Due to floating point precision error, values in the calculated probability distribution
# may have a very small negative value of -epsilon. The following sets
# these small negative values to 0.
probs[np.abs(probs) < 1e-10] = 0
sample_hist = np.random.multinomial(1, probs)
sample_idx = list(sample_hist).index(1)
homodyne_sample = q_tensor[sample_idx]
# Project remaining modes into the conditional state
inf_squeezed_vac = np.array(
[
(-0.5) ** (n // 2) * sqrt(bang(n)) / bang(n // 2)
if n % 2 == 0
else 0.0 + 0.0j
for n in range(self._trunc)
],
dtype=ops.def_type,
)
alpha = homodyne_sample * sqrt(m_omega_over_hbar / 2)
composed = np.dot(ops.phase(phi, self._trunc), ops.displacement(alpha, self._trunc))
eigenstate = self.apply_gate_BLAS(
composed, [0], state=inf_squeezed_vac, pure=True, n=1
)
vac_state = np.array(
[1.0 + 0.0j if i == 0 else 0.0 + 0.0j for i in range(self._trunc)],
dtype=ops.def_type,
)
projector = np.outer(vac_state, eigenstate.conj())
self._state = self.apply_gate_BLAS(projector, [mode])
# Normalize
self._state = self._state / self.norm()
return homodyne_sample
|
def measure_homodyne(self, phi, mode, select=None, **kwargs):
"""
Performs a homodyne measurement on a mode.
"""
m_omega_over_hbar = 1 / self._hbar
# Make sure the state is mixed for reduced density matrix
if self._pure:
state = ops.mix(self._state, self._num_modes)
else:
state = self._state
if select is not None:
meas_result = select
if isinstance(meas_result, numbers.Number):
homodyne_sample = float(meas_result)
else:
raise TypeError("Selected measurement result must be of numeric type.")
else:
# Compute reduced density matrix
unmeasured = [i for i in range(self._num_modes) if not i == mode]
reduced = ops.partial_trace(state, self._num_modes, unmeasured)
# Rotate to measurement basis
reduced = self.apply_gate_BLAS(
ops.phase(-phi, self._trunc), [0], state=reduced, pure=False, n=1
)
# Create pdf. Same as tf implementation, but using
# the recursive relation H_0(x) = 1, H_1(x) = 2x, H_{n+1}(x) = 2xH_n(x) - 2nH_{n-1}(x)
q_mag = kwargs.get("max", 10)
num_bins = kwargs.get("num_bins", 100000)
q_tensor, Hvals = ops.hermiteVals(
q_mag, num_bins, m_omega_over_hbar, self._trunc
)
H_matrix = np.zeros((self._trunc, self._trunc, num_bins))
for n, m in product(range(self._trunc), repeat=2):
H_matrix[n][m] = (
1 / sqrt(2**n * bang(n) * 2**m * bang(m)) * Hvals[n] * Hvals[m]
)
H_terms = np.expand_dims(reduced, -1) * np.expand_dims(H_matrix, 0)
rho_dist = (
np.sum(H_terms, axis=(1, 2))
* (m_omega_over_hbar / pi) ** 0.5
* np.exp(-m_omega_over_hbar * q_tensor**2)
* (q_tensor[1] - q_tensor[0])
) # Delta_q for normalization (only works if the bins are equally spaced)
# Sample from rho_dist. This is a bit different from tensorflow due to how
# numpy treats multinomial sampling. In particular, numpy returns a
# histogram of the samples whereas tensorflow gives the list of samples.
# Numpy also does not use the log probabilities
probs = rho_dist.flatten().real
probs /= np.sum(probs)
sample_hist = np.random.multinomial(1, probs)
sample_idx = list(sample_hist).index(1)
homodyne_sample = q_tensor[sample_idx]
# Project remaining modes into the conditional state
inf_squeezed_vac = np.array(
[
(-0.5) ** (n // 2) * sqrt(bang(n)) / bang(n // 2)
if n % 2 == 0
else 0.0 + 0.0j
for n in range(self._trunc)
],
dtype=ops.def_type,
)
alpha = homodyne_sample * sqrt(m_omega_over_hbar / 2)
composed = np.dot(ops.phase(phi, self._trunc), ops.displacement(alpha, self._trunc))
eigenstate = self.apply_gate_BLAS(
composed, [0], state=inf_squeezed_vac, pure=True, n=1
)
vac_state = np.array(
[1.0 + 0.0j if i == 0 else 0.0 + 0.0j for i in range(self._trunc)],
dtype=ops.def_type,
)
projector = np.outer(vac_state, eigenstate.conj())
self._state = self.apply_gate_BLAS(projector, [mode])
# Normalize
self._state = self._state / self.norm()
return homodyne_sample
|
https://github.com/XanaduAI/strawberryfields/issues/354
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-27-d8f9402683f5> in <module>
2
3 for x in pbar(flag):
----> 4 dummy = xaxb()
5
6 x_vals[i]=dummy[0]
<ipython-input-22-64fa305c29f6> in xaxb()
12 MeasureX | q[1]
13
---> 14 result = eng.run(prog)
15
16 x, y = result.samples
~/opt/anaconda3/lib/python3.7/site-packages/strawberryfields/engine.py in run(self, program, args, compile_options, run_options)
479 eng_run_options = {key: temp_run_options[key] for key in temp_run_options.keys() & eng_run_keys}
480
--> 481 result = super()._run(program, args=args, compile_options=compile_options, **eng_run_options)
482
483 modes = temp_run_options["modes"]
~/opt/anaconda3/lib/python3.7/site-packages/strawberryfields/engine.py in _run(self, program, args, compile_options, **kwargs)
347 p.lock()
348
--> 349 self._run_program(p, **kwargs)
350 self.run_progs.append(p)
351 # store the latest measurement results
~/opt/anaconda3/lib/python3.7/site-packages/strawberryfields/engine.py in _run_program(self, prog, **kwargs)
421 try:
422 # try to apply it to the backend
--> 423 cmd.op.apply(cmd.reg, self.backend, **kwargs) # NOTE we could also handle storing measured vals here
424 applied.append(cmd)
425 except NotApplicableError:
~/opt/anaconda3/lib/python3.7/site-packages/strawberryfields/ops.py in apply(self, reg, backend, **kwargs)
285 Only applies to Measurements.
286 """
--> 287 values = super().apply(reg, backend, **kwargs)
288 # convert the returned values into an iterable with the measured modes indexed along
289 # the first axis and shots along second axis (if larger than 1), so that we can assign
~/opt/anaconda3/lib/python3.7/site-packages/strawberryfields/ops.py in apply(self, reg, backend, **kwargs)
217 temp = [rr.ind for rr in reg]
218 # call the child class specialized _apply method
--> 219 return self._apply(temp, backend, **kwargs)
220
221
~/opt/anaconda3/lib/python3.7/site-packages/strawberryfields/ops.py in _apply(self, reg, backend, shots, **kwargs)
806 select = select / s
807
--> 808 return s * backend.measure_homodyne(p[0], *reg, shots=shots, select=select, **kwargs)
809
810 def __str__(self):
~/opt/anaconda3/lib/python3.7/site-packages/strawberryfields/backends/fockbackend/backend.py in measure_homodyne(self, phi, mode, shots, select, **kwargs)
187 raise NotImplementedError("fock backend currently does not support "
188 "shots != 1 for homodyne measurement")
--> 189 return self.circuit.measure_homodyne(phi, self._remap_modes(mode), select=select, **kwargs)
190
191 def loss(self, T, mode):
~/opt/anaconda3/lib/python3.7/site-packages/strawberryfields/backends/fockbackend/circuit.py in measure_homodyne(self, phi, mode, select, **kwargs)
498 probs = rho_dist.flatten().real
499 probs /= np.sum(probs)
--> 500 sample_hist = np.random.multinomial(1, probs)
501 sample_idx = list(sample_hist).index(1)
502 homodyne_sample = q_tensor[sample_idx]
mtrand.pyx in numpy.random.mtrand.RandomState.multinomial()
_common.pyx in numpy.random._common.check_array_constraint()
ValueError: pvals < 0, pvals > 1 or pvals contains NaNs
|
ValueError
|
def _compile_with_cache_cuda(
source,
options,
arch,
cache_dir,
extra_source=None,
backend="nvrtc",
enable_cooperative_groups=False,
name_expressions=None,
log_stream=None,
cache_in_memory=False,
jitify=False,
):
# NVRTC does not use extra_source. extra_source is used for cache key.
global _empty_file_preprocess_cache
if cache_dir is None:
cache_dir = get_cache_dir()
if arch is None:
arch = _get_arch()
options += ("-ftz=true",)
if enable_cooperative_groups:
# `cooperative_groups` requires relocatable device code.
options += ("--device-c",)
if _get_bool_env_variable("CUPY_CUDA_COMPILE_WITH_DEBUG", False):
options += ("--device-debug", "--generate-line-info")
is_jitify_requested = "-DCUPY_USE_JITIFY" in options
if jitify and not is_jitify_requested:
# jitify is set in RawKernel/RawModule, translate it to an option
# that is useless to the compiler, but can be used as part of the
# hash key
options += ("-DCUPY_USE_JITIFY",)
elif is_jitify_requested and not jitify:
# jitify is requested internally, just set the flag
jitify = True
if jitify and backend != "nvrtc":
raise ValueError("jitify only works with NVRTC")
env = (arch, options, _get_nvrtc_version(), backend)
base = _empty_file_preprocess_cache.get(env, None)
if base is None:
# This is checking of NVRTC compiler internal version
base = _preprocess("", options, arch, backend)
_empty_file_preprocess_cache[env] = base
key_src = "%s %s %s %s" % (env, base, source, extra_source)
key_src = key_src.encode("utf-8")
name = "%s_2.cubin" % hashlib.md5(key_src).hexdigest()
mod = function.Module()
if not cache_in_memory:
# Read from disk cache
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir, exist_ok=True)
# To handle conflicts in concurrent situation, we adopt lock-free
# method to avoid performance degradation.
# We force recompiling to retrieve C++ mangled names if so desired.
path = os.path.join(cache_dir, name)
if os.path.exists(path) and not name_expressions:
with open(path, "rb") as file:
data = file.read()
if len(data) >= 32:
hash = data[:32]
cubin = data[32:]
cubin_hash = hashlib.md5(cubin).hexdigest().encode("ascii")
if hash == cubin_hash:
mod.load(cubin)
return mod
else:
# Enforce compiling -- the resulting kernel will be cached elsewhere,
# so we do nothing
pass
if backend == "nvrtc":
cu_name = "" if cache_in_memory else name + ".cu"
ptx, mapping = compile_using_nvrtc(
source,
options,
arch,
cu_name,
name_expressions,
log_stream,
cache_in_memory,
jitify,
)
if _is_cudadevrt_needed(options):
# for separate compilation
ls = function.LinkState()
ls.add_ptr_data(ptx, "cupy.ptx")
_cudadevrt = _get_cudadevrt_path()
ls.add_ptr_file(_cudadevrt)
cubin = ls.complete()
else:
cubin = ptx
mod._set_mapping(mapping)
elif backend == "nvcc":
rdc = _is_cudadevrt_needed(options)
cubin = compile_using_nvcc(
source,
options,
arch,
name + ".cu",
code_type="cubin",
separate_compilation=rdc,
log_stream=log_stream,
)
else:
raise ValueError("Invalid backend %s" % backend)
if not cache_in_memory:
# Write to disk cache
cubin_hash = hashlib.md5(cubin).hexdigest().encode("ascii")
# shutil.move is not atomic operation, so it could result in a
# corrupted file. We detect it by appending md5 hash at the beginning
# of each cache file. If the file is corrupted, it will be ignored
# next time it is read.
with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:
tf.write(cubin_hash)
tf.write(cubin)
temp_path = tf.name
shutil.move(temp_path, path)
# Save .cu source file along with .cubin
if _get_bool_env_variable("CUPY_CACHE_SAVE_CUDA_SOURCE", False):
with open(path + ".cu", "w") as f:
f.write(source)
else:
# we don't do any disk I/O
pass
mod.load(cubin)
return mod
|
def _compile_with_cache_cuda(
source,
options,
arch,
cache_dir,
extra_source=None,
backend="nvrtc",
enable_cooperative_groups=False,
name_expressions=None,
log_stream=None,
cache_in_memory=False,
jitify=False,
):
# NVRTC does not use extra_source. extra_source is used for cache key.
global _empty_file_preprocess_cache
if cache_dir is None:
cache_dir = get_cache_dir()
if arch is None:
arch = _get_arch()
options += ("-ftz=true",)
if enable_cooperative_groups:
# `cooperative_groups` requires `-rdc=true`.
# The three latter flags are to resolve linker error.
# (https://devtalk.nvidia.com/default/topic/1023604/linker-error/)
options += ("-rdc=true", "-Xcompiler", "-fPIC", "-shared")
if _get_bool_env_variable("CUPY_CUDA_COMPILE_WITH_DEBUG", False):
options += ("--device-debug", "--generate-line-info")
is_jitify_requested = "-DCUPY_USE_JITIFY" in options
if jitify and not is_jitify_requested:
# jitify is set in RawKernel/RawModule, translate it to an option
# that is useless to the compiler, but can be used as part of the
# hash key
options += ("-DCUPY_USE_JITIFY",)
elif is_jitify_requested and not jitify:
# jitify is requested internally, just set the flag
jitify = True
if jitify and backend != "nvrtc":
raise ValueError("jitify only works with NVRTC")
env = (arch, options, _get_nvrtc_version(), backend)
base = _empty_file_preprocess_cache.get(env, None)
if base is None:
# This is checking of NVRTC compiler internal version
base = _preprocess("", options, arch, backend)
_empty_file_preprocess_cache[env] = base
key_src = "%s %s %s %s" % (env, base, source, extra_source)
key_src = key_src.encode("utf-8")
name = "%s_2.cubin" % hashlib.md5(key_src).hexdigest()
mod = function.Module()
if not cache_in_memory:
# Read from disk cache
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir, exist_ok=True)
# To handle conflicts in concurrent situation, we adopt lock-free
# method to avoid performance degradation.
# We force recompiling to retrieve C++ mangled names if so desired.
path = os.path.join(cache_dir, name)
if os.path.exists(path) and not name_expressions:
with open(path, "rb") as file:
data = file.read()
if len(data) >= 32:
hash = data[:32]
cubin = data[32:]
cubin_hash = hashlib.md5(cubin).hexdigest().encode("ascii")
if hash == cubin_hash:
mod.load(cubin)
return mod
else:
# Enforce compiling -- the resulting kernel will be cached elsewhere,
# so we do nothing
pass
if backend == "nvrtc":
cu_name = "" if cache_in_memory else name + ".cu"
ptx, mapping = compile_using_nvrtc(
source,
options,
arch,
cu_name,
name_expressions,
log_stream,
cache_in_memory,
jitify,
)
if _is_cudadevrt_needed(options):
# for separate compilation
ls = function.LinkState()
ls.add_ptr_data(ptx, "cupy.ptx")
_cudadevrt = _get_cudadevrt_path()
ls.add_ptr_file(_cudadevrt)
cubin = ls.complete()
else:
cubin = ptx
mod._set_mapping(mapping)
elif backend == "nvcc":
rdc = _is_cudadevrt_needed(options)
cubin = compile_using_nvcc(
source,
options,
arch,
name + ".cu",
code_type="cubin",
separate_compilation=rdc,
log_stream=log_stream,
)
else:
raise ValueError("Invalid backend %s" % backend)
if not cache_in_memory:
# Write to disk cache
cubin_hash = hashlib.md5(cubin).hexdigest().encode("ascii")
# shutil.move is not atomic operation, so it could result in a
# corrupted file. We detect it by appending md5 hash at the beginning
# of each cache file. If the file is corrupted, it will be ignored
# next time it is read.
with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:
tf.write(cubin_hash)
tf.write(cubin)
temp_path = tf.name
shutil.move(temp_path, path)
# Save .cu source file along with .cubin
if _get_bool_env_variable("CUPY_CACHE_SAVE_CUDA_SOURCE", False):
with open(path + ".cu", "w") as f:
f.write(source)
else:
# we don't do any disk I/O
pass
mod.load(cubin)
return mod
|
https://github.com/cupy/cupy/issues/4421
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "cupy\core\raw.pyx", line 282, in cupy.core.raw.RawKernel.compile
File "cupy\core\raw.pyx", line 110, in cupy.core.raw.RawKernel._kernel
File "cupy\cuda\function.pyx", line 234, in cupy.cuda.function.Module.get_function
File "cupy\cuda\function.pyx", line 175, in cupy.cuda.function.Function.__init__
File "cupy_backends\cuda\api\driver.pyx", line 262, in cupy_backends.cuda.api.driver.moduleGetFunction
File "cupy_backends\cuda\api\driver.pyx", line 124, in cupy_backends.cuda.api.driver.check_status
cupy_backends.cuda.api.driver.CUDADriverError: CUDA_ERROR_NOT_FOUND: named symbol not found
|
cupy_backends.cuda.api.driver.CUDADriverError
|
def bytes(length):
"""Returns random bytes.
.. note:: This function is just a wrapper for :obj:`numpy.random.bytes`.
The resulting bytes are generated on the host (NumPy), not GPU.
.. seealso:: :meth:`numpy.random.bytes
<numpy.random.mtrand.RandomState.bytes>`
"""
# TODO(kmaehashi): should it be provided in CuPy?
return _numpy.random.bytes(length)
|
def bytes(length):
"""Returns random bytes.
.. seealso:: :meth:`numpy.random.bytes
<numpy.random.mtrand.RandomState.bytes>`
"""
return _numpy.bytes(length)
|
https://github.com/cupy/cupy/issues/4312
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-121-104e7c75af44> in <module>
1 import cupy as cp
----> 2 b = cp.random.bytes(10)
~/.conda/envs/rapids-0.16/lib/python3.7/site-packages/cupy/random/__init__.py in bytes(length)
8 <numpy.random.mtrand.RandomState.bytes>`
9 """
---> 10 return _numpy.bytes(length)
11
12
~/.conda/envs/rapids-0.16/lib/python3.7/site-packages/numpy/__init__.py in __getattr__(attr)
213 else:
214 raise AttributeError("module {!r} has no attribute "
--> 215 "{!r}".format(__name__, attr))
216
217 def __dir__():
AttributeError: module 'numpy' has no attribute 'bytes'
|
AttributeError
|
def affine_transform(
input,
matrix,
offset=0.0,
output_shape=None,
output=None,
order=None,
mode="constant",
cval=0.0,
prefilter=True,
):
"""Apply an affine transformation.
Given an output image pixel index vector ``o``, the pixel value is
determined from the input image at position
``cupy.dot(matrix, o) + offset``.
Args:
input (cupy.ndarray): The input array.
matrix (cupy.ndarray): The inverse coordinate transformation matrix,
mapping output coordinates to input coordinates. If ``ndim`` is the
number of dimensions of ``input``, the given matrix must have one
of the following shapes:
- ``(ndim, ndim)``: the linear transformation matrix for each
output coordinate.
- ``(ndim,)``: assume that the 2D transformation matrix is
diagonal, with the diagonal specified by the given value.
- ``(ndim + 1, ndim + 1)``: assume that the transformation is
specified using homogeneous coordinates. In this case, any
value passed to ``offset`` is ignored.
- ``(ndim, ndim + 1)``: as above, but the bottom row of a
homogeneous transformation matrix is always
``[0, 0, ..., 1]``, and may be omitted.
offset (float or sequence): The offset into the array where the
transform is applied. If a float, ``offset`` is the same for each
axis. If a sequence, ``offset`` should contain one value for each
axis.
output_shape (tuple of ints): Shape tuple.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array.
order (int): The order of the spline interpolation. If it is not given,
order 1 is used. It is different from :mod:`scipy.ndimage` and can
change in the future. Currently it supports only order 0 and 1.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'`` or ``'opencv'``). Default is ``'constant'``.
cval (scalar): Value used for points outside the boundaries of
the input if ``mode='constant'`` or ``mode='opencv'``. Default is
0.0
prefilter (bool): It is not used yet. It just exists for compatibility
with :mod:`scipy.ndimage`.
Returns:
cupy.ndarray or None:
The transformed input. If ``output`` is given as a parameter,
``None`` is returned.
.. seealso:: :func:`scipy.ndimage.affine_transform`
"""
_check_parameter("affine_transform", order, mode)
offset = _util._fix_sequence_arg(offset, input.ndim, "offset", float)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError("no proper affine matrix provided")
if matrix.ndim == 2:
if matrix.shape[0] == matrix.shape[1] - 1:
offset = matrix[:, -1]
matrix = matrix[:, :-1]
elif matrix.shape[0] == input.ndim + 1:
offset = matrix[:-1, -1]
matrix = matrix[:-1, :-1]
if matrix.shape != (input.ndim, input.ndim):
raise RuntimeError("improper affine shape")
if mode == "opencv":
m = cupy.zeros((input.ndim + 1, input.ndim + 1))
m[:-1, :-1] = matrix
m[:-1, -1] = offset
m[-1, -1] = 1
m = cupy.linalg.inv(m)
m[:2] = cupy.roll(m[:2], 1, axis=0)
m[:2, :2] = cupy.roll(m[:2, :2], 1, axis=1)
matrix = m[:-1, :-1]
offset = m[:-1, -1]
if output_shape is None:
output_shape = input.shape
if mode == "opencv" or mode == "_opencv_edge":
if matrix.ndim == 1:
matrix = cupy.diag(matrix)
coordinates = cupy.indices(output_shape, dtype=cupy.float64)
coordinates = cupy.dot(matrix, coordinates.reshape((input.ndim, -1)))
coordinates += cupy.expand_dims(cupy.asarray(offset), -1)
ret = _util._get_output(output, input, shape=output_shape)
ret[:] = map_coordinates(
input, coordinates, ret.dtype, order, mode, cval, prefilter
).reshape(output_shape)
return ret
matrix = matrix.astype(cupy.float64, copy=False)
if order is None:
order = 1
ndim = input.ndim
output = _util._get_output(output, input, shape=output_shape)
if input.dtype.kind in "iu":
input = input.astype(cupy.float32)
integer_output = output.dtype.kind in "iu"
_util._check_cval(mode, cval, integer_output)
large_int = max(_prod(input.shape), _prod(output_shape)) > 1 << 31
if matrix.ndim == 1:
offset = cupy.asarray(offset, dtype=cupy.float64)
offset = -offset / matrix
kern = _interp_kernels._get_zoom_shift_kernel(
ndim,
large_int,
output_shape,
mode,
cval=cval,
order=order,
integer_output=integer_output,
)
kern(input, offset, matrix, output)
else:
kern = _interp_kernels._get_affine_kernel(
ndim,
large_int,
output_shape,
mode,
cval=cval,
order=order,
integer_output=integer_output,
)
m = cupy.zeros((ndim, ndim + 1), dtype=cupy.float64)
m[:, :-1] = matrix
m[:, -1] = cupy.asarray(offset, dtype=cupy.float64)
kern(input, m, output)
return output
|
def affine_transform(
input,
matrix,
offset=0.0,
output_shape=None,
output=None,
order=None,
mode="constant",
cval=0.0,
prefilter=True,
):
"""Apply an affine transformation.
Given an output image pixel index vector ``o``, the pixel value is
determined from the input image at position
``cupy.dot(matrix, o) + offset``.
Args:
input (cupy.ndarray): The input array.
matrix (cupy.ndarray): The inverse coordinate transformation matrix,
mapping output coordinates to input coordinates. If ``ndim`` is the
number of dimensions of ``input``, the given matrix must have one
of the following shapes:
- ``(ndim, ndim)``: the linear transformation matrix for each
output coordinate.
- ``(ndim,)``: assume that the 2D transformation matrix is
diagonal, with the diagonal specified by the given value.
- ``(ndim + 1, ndim + 1)``: assume that the transformation is
specified using homogeneous coordinates. In this case, any
value passed to ``offset`` is ignored.
- ``(ndim, ndim + 1)``: as above, but the bottom row of a
homogeneous transformation matrix is always
``[0, 0, ..., 1]``, and may be omitted.
offset (float or sequence): The offset into the array where the
transform is applied. If a float, ``offset`` is the same for each
axis. If a sequence, ``offset`` should contain one value for each
axis.
output_shape (tuple of ints): Shape tuple.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array.
order (int): The order of the spline interpolation. If it is not given,
order 1 is used. It is different from :mod:`scipy.ndimage` and can
change in the future. Currently it supports only order 0 and 1.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'`` or ``'opencv'``). Default is ``'constant'``.
cval (scalar): Value used for points outside the boundaries of
the input if ``mode='constant'`` or ``mode='opencv'``. Default is
0.0
prefilter (bool): It is not used yet. It just exists for compatibility
with :mod:`scipy.ndimage`.
Returns:
cupy.ndarray or None:
The transformed input. If ``output`` is given as a parameter,
``None`` is returned.
.. seealso:: :func:`scipy.ndimage.affine_transform`
"""
_check_parameter("affine_transform", order, mode)
offset = _util._fix_sequence_arg(offset, input.ndim, "offset", float)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError("no proper affine matrix provided")
if matrix.ndim == 2:
if matrix.shape[0] == matrix.shape[1] - 1:
offset = matrix[:, -1]
matrix = matrix[:, :-1]
elif matrix.shape[0] == input.ndim + 1:
offset = matrix[:-1, -1]
matrix = matrix[:-1, :-1]
if matrix.shape != (input.ndim, input.ndim):
raise RuntimeError("improper affine shape")
if mode == "opencv":
m = cupy.zeros((input.ndim + 1, input.ndim + 1))
m[:-1, :-1] = matrix
m[:-1, -1] = offset
m[-1, -1] = 1
m = cupy.linalg.inv(m)
m[:2] = cupy.roll(m[:2], 1, axis=0)
m[:2, :2] = cupy.roll(m[:2, :2], 1, axis=1)
matrix = m[:-1, :-1]
offset = m[:-1, -1]
if output_shape is None:
output_shape = input.shape
matrix = matrix.astype(cupy.float64, copy=False)
if order is None:
order = 1
ndim = input.ndim
output = _util._get_output(output, input, shape=output_shape)
if input.dtype.kind in "iu":
input = input.astype(cupy.float32)
integer_output = output.dtype.kind in "iu"
_util._check_cval(mode, cval, integer_output)
large_int = max(_prod(input.shape), _prod(output_shape)) > 1 << 31
if matrix.ndim == 1:
offset = cupy.asarray(offset, dtype=cupy.float64)
offset = -offset / matrix
kern = _interp_kernels._get_zoom_shift_kernel(
ndim,
large_int,
output_shape,
mode,
cval=cval,
order=order,
integer_output=integer_output,
)
kern(input, offset, matrix, output)
else:
kern = _interp_kernels._get_affine_kernel(
ndim,
large_int,
output_shape,
mode,
cval=cval,
order=order,
integer_output=integer_output,
)
m = cupy.zeros((ndim, ndim + 1), dtype=cupy.float64)
m[:, :-1] = matrix
m[:, -1] = cupy.asarray(offset, dtype=cupy.float64)
kern(input, m, output)
return output
|
https://github.com/cupy/cupy/issues/3601
|
cupyx.scipy.ndimage.affine_transform(im, M, output_shape=smaller_shape, output=smaller, mode='opencv')
/home/ext-mtakagi/cupy/cupyx/scipy/ndimage/interpolation.py:30: UserWarning: In the current feature the default order of affine_transform is 1. It is different from scipy.ndimage and can change in the future.
'the future.'.format(func_name))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ext-mtakagi/cupy/cupyx/scipy/ndimage/interpolation.py", line 212, in affine_transform
integer_output=integer_output)
File "cupy/util.pyx", line 103, in cupy.util.memoize.decorator.ret
File "/home/ext-mtakagi/cupy/cupyx/scipy/ndimage/_interp_kernels.py", line 405, in _get_affine_kernel
integer_output=integer_output,
File "/home/ext-mtakagi/cupy/cupyx/scipy/ndimage/_interp_kernels.py", line 269, in _generate_interp_custom
mode, ixvar, 'xsize_{}'.format(j)))
File "/home/ext-mtakagi/cupy/cupyx/scipy/ndimage/filters.py", line 720, in _generate_boundary_condition_ops
return ops
UnboundLocalError: local variable 'ops' referenced before assignment
|
UnboundLocalError
|
def _generate_nd_kernel(
name,
pre,
found,
post,
mode,
w_shape,
int_type,
offsets,
cval,
ctype="X",
preamble="",
options=(),
has_weights=True,
has_structure=False,
has_mask=False,
binary_morphology=False,
all_weights_nonzero=False,
):
# Currently this code uses CArray for weights but avoids using CArray for
# the input data and instead does the indexing itself since it is faster.
# If CArray becomes faster than follow the comments that start with
# CArray: to switch over to using CArray for the input data as well.
ndim = len(w_shape)
in_params = "raw X x"
if has_weights:
in_params += ", raw W w"
if has_structure:
in_params += ", raw S s"
if has_mask:
in_params += ", raw M mask"
out_params = "Y y"
# CArray: remove xstride_{j}=... from string
size = (
"%s xsize_{j}=x.shape()[{j}], ysize_{j} = _raw_y.shape()[{j}]"
", xstride_{j}=x.strides()[{j}];" % int_type
)
sizes = [size.format(j=j) for j in range(ndim)]
inds = _util._generate_indices_ops(ndim, int_type, offsets)
# CArray: remove expr entirely
expr = " + ".join(["ix_{}".format(j) for j in range(ndim)])
ws_init = ws_pre = ws_post = ""
if has_weights or has_structure:
ws_init = "int iws = 0;"
if has_structure:
ws_pre = "S sval = s[iws];\n"
if has_weights:
ws_pre += "W wval = w[iws];\n"
if not all_weights_nonzero:
ws_pre += "if (nonzero(wval))"
ws_post = "iws++;"
loops = []
for j in range(ndim):
if w_shape[j] == 1:
# CArray: string becomes 'inds[{j}] = ind_{j};', remove (int_)type
loops.append(
"{{ {type} ix_{j} = ind_{j} * xstride_{j};".format(j=j, type=int_type)
)
else:
boundary = _util._generate_boundary_condition_ops(
mode, "ix_{}".format(j), "xsize_{}".format(j)
)
# CArray: last line of string becomes inds[{j}] = ix_{j};
loops.append(
"""
for (int iw_{j} = 0; iw_{j} < {wsize}; iw_{j}++)
{{
{type} ix_{j} = ind_{j} + iw_{j};
{boundary}
ix_{j} *= xstride_{j};
""".format(j=j, wsize=w_shape[j], boundary=boundary, type=int_type)
)
# CArray: string becomes 'x[inds]', no format call needed
value = "(*(X*)&data[{expr}])".format(expr=expr)
if mode == "constant":
cond = " || ".join(["(ix_{} < 0)".format(j) for j in range(ndim)])
if cval is numpy.nan:
cval = "CUDART_NAN"
elif cval == numpy.inf:
cval = "CUDART_INF"
elif cval == -numpy.inf:
cval = "-CUDART_INF"
if binary_morphology:
found = found.format(cond=cond, value=value)
else:
if mode == "constant":
value = "(({cond}) ? cast<{ctype}>({cval}) : {value})".format(
cond=cond, ctype=ctype, cval=cval, value=value
)
found = found.format(value=value)
# CArray: replace comment and next line in string with
# {type} inds[{ndim}] = {{0}};
# and add ndim=ndim, type=int_type to format call
operation = """
{sizes}
{inds}
// don't use a CArray for indexing (faster to deal with indexing ourselves)
const unsigned char* data = (const unsigned char*)&x[0];
{ws_init}
{pre}
{loops}
// inner-most loop
{ws_pre} {{
{found}
}}
{ws_post}
{end_loops}
{post}
""".format(
sizes="\n".join(sizes),
inds=inds,
pre=pre,
post=post,
ws_init=ws_init,
ws_pre=ws_pre,
ws_post=ws_post,
loops="\n".join(loops),
found=found,
end_loops="}" * ndim,
)
name = "cupy_ndimage_{}_{}d_{}_w{}".format(
name, ndim, mode, "_".join(["{}".format(x) for x in w_shape])
)
if all_weights_nonzero:
name += "_all_nonzero"
if int_type == "ptrdiff_t":
name += "_i64"
if has_structure:
name += "_with_structure"
if has_mask:
name += "_with_mask"
preamble = math_constants_preamble + _CAST_FUNCTION + preamble
return cupy.ElementwiseKernel(
in_params,
out_params,
operation,
name,
reduce_dims=False,
preamble=preamble,
options=("--std=c++11",) + options,
)
|
def _generate_nd_kernel(
name,
pre,
found,
post,
mode,
w_shape,
int_type,
offsets,
cval,
ctype="X",
preamble="",
options=(),
has_weights=True,
has_structure=False,
has_mask=False,
binary_morphology=False,
all_weights_nonzero=False,
):
# Currently this code uses CArray for weights but avoids using CArray for
# the input data and instead does the indexing itself since it is faster.
# If CArray becomes faster than follow the comments that start with
# CArray: to switch over to using CArray for the input data as well.
ndim = len(w_shape)
in_params = "raw X x"
if has_weights:
in_params += ", raw W w"
if has_structure:
in_params += ", raw S s"
if has_mask:
in_params += ", raw M mask"
out_params = "Y y"
# CArray: remove xstride_{j}=... from string
size = (
"%s xsize_{j}=x.shape()[{j}], ysize_{j} = _raw_y.shape()[{j}]"
", xstride_{j}=x.strides()[{j}];" % int_type
)
sizes = [size.format(j=j) for j in range(ndim)]
inds = _util._generate_indices_ops(ndim, int_type, offsets)
# CArray: remove expr entirely
expr = " + ".join(["ix_{}".format(j) for j in range(ndim)])
ws_init = ws_pre = ws_post = ""
if has_weights or has_structure:
ws_init = "int iws = 0;"
if has_structure:
ws_pre = "S sval = s[iws];\n"
if has_weights:
ws_pre += "W wval = w[iws];\n"
if not all_weights_nonzero:
ws_pre += "if (nonzero(wval))"
ws_post = "iws++;"
loops = []
for j in range(ndim):
if w_shape[j] == 1:
# CArray: string becomes 'inds[{j}] = ind_{j};', remove (int_)type
loops.append(
"{{ {type} ix_{j} = ind_{j} * xstride_{j};".format(j=j, type=int_type)
)
else:
boundary = _util._generate_boundary_condition_ops(
mode, "ix_{}".format(j), "xsize_{}".format(j)
)
# CArray: last line of string becomes inds[{j}] = ix_{j};
loops.append(
"""
for (int iw_{j} = 0; iw_{j} < {wsize}; iw_{j}++)
{{
{type} ix_{j} = ind_{j} + iw_{j};
{boundary}
ix_{j} *= xstride_{j};
""".format(j=j, wsize=w_shape[j], boundary=boundary, type=int_type)
)
# CArray: string becomes 'x[inds]', no format call needed
value = "(*(X*)&data[{expr}])".format(expr=expr)
if mode == "constant":
cond = " || ".join(["(ix_{} < 0)".format(j) for j in range(ndim)])
if binary_morphology:
found = found.format(cond=cond, value=value)
else:
if mode == "constant":
value = "(({cond}) ? cast<{ctype}>({cval}) : {value})".format(
cond=cond, ctype=ctype, cval=cval, value=value
)
found = found.format(value=value)
# CArray: replace comment and next line in string with
# {type} inds[{ndim}] = {{0}};
# and add ndim=ndim, type=int_type to format call
operation = """
{sizes}
{inds}
// don't use a CArray for indexing (faster to deal with indexing ourselves)
const unsigned char* data = (const unsigned char*)&x[0];
{ws_init}
{pre}
{loops}
// inner-most loop
{ws_pre} {{
{found}
}}
{ws_post}
{end_loops}
{post}
""".format(
sizes="\n".join(sizes),
inds=inds,
pre=pre,
post=post,
ws_init=ws_init,
ws_pre=ws_pre,
ws_post=ws_post,
loops="\n".join(loops),
found=found,
end_loops="}" * ndim,
)
name = "cupy_ndimage_{}_{}d_{}_w{}".format(
name, ndim, mode, "_".join(["{}".format(x) for x in w_shape])
)
if all_weights_nonzero:
name += "_all_nonzero"
if int_type == "ptrdiff_t":
name += "_i64"
if has_structure:
name += "_with_structure"
if has_mask:
name += "_with_mask"
preamble = _CAST_FUNCTION + preamble
return cupy.ElementwiseKernel(
in_params,
out_params,
operation,
name,
reduce_dims=False,
preamble=preamble,
options=("--std=c++11",) + options,
)
|
https://github.com/cupy/cupy/issues/4082
|
/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py:15: UserWarning: In the current feature the default order of shift is 1. It is different from scipy.ndimage and can change in the future.
warnings.warn('In the current feature the default order of {} is 1. '
Traceback (most recent call last):
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 516, in compile
nvrtc.compileProgram(self.ptr, options)
File "cupy_backends/cuda/libs/nvrtc.pyx", line 108, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 120, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 58, in cupy_backends.cuda.libs.nvrtc.check_status
cupy_backends.cuda.libs.nvrtc.NVRTCError: NVRTC_ERROR_COMPILATION (6)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py", line 379, in shift
kern(input, shift, output)
File "cupy/core/_kernel.pyx", line 821, in cupy.core._kernel.ElementwiseKernel.__call__
File "cupy/core/_kernel.pyx", line 846, in cupy.core._kernel.ElementwiseKernel._get_elementwise_kernel
File "cupy/_util.pyx", line 103, in cupy._util.memoize.decorator.ret
File "cupy/core/_kernel.pyx", line 639, in cupy.core._kernel._get_elementwise_kernel
File "cupy/core/_kernel.pyx", line 37, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/_kernel.pyx", line 60, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/core.pyx", line 1937, in cupy.core.core.compile_with_cache
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 335, in compile_with_cache
return _compile_with_cache_cuda(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 402, in _compile_with_cache_cuda
ptx, mapping = compile_using_nvrtc(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 173, in compile_using_nvrtc
return _compile(source, options, cu_path,
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 157, in _compile
ptx, mapping = prog.compile(options, log_stream)
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 527, in compile
raise CompileException(log, self.src, self.name, options,
cupy.cuda.compiler.CompileException: /tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(28): error: invalid type conversion
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "s" was declared but never referenced
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "t" was declared but never referenced
1 error detected in the compilation of "/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu".
|
cupy_backends.cuda.libs.nvrtc.NVRTCError
|
def _generate_interp_custom(
coord_func,
ndim,
large_int,
yshape,
mode,
cval,
order,
name="",
integer_output=False,
):
"""
Args:
coord_func (function): generates code to do the coordinate
transformation. See for example, `_get_coord_shift`.
ndim (int): The number of dimensions.
large_int (bool): If true use Py_ssize_t instead of int for indexing.
yshape (tuple): Shape of the output array.
mode (str): Signal extension mode to use at the array boundaries
cval (float): constant value used when `mode == 'constant'`.
name (str): base name for the interpolation kernel
integer_output (bool): boolean indicating whether the output has an
integer type.
Returns:
operation (str): code body for the ElementwiseKernel
name (str): name for the ElementwiseKernel
"""
ops = []
ops.append("double out = 0.0;")
if large_int:
uint_t = "size_t"
int_t = "ptrdiff_t"
else:
uint_t = "unsigned int"
int_t = "int"
# determine strides for x along each axis
for j in range(ndim):
ops.append("const {int_t} xsize_{j} = x.shape()[{j}];".format(int_t=int_t, j=j))
ops.append("const {uint_t} sx_{j} = 1;".format(uint_t=uint_t, j=ndim - 1))
for j in range(ndim - 1, 0, -1):
ops.append(
"const {uint_t} sx_{jm} = sx_{j} * xsize_{j};".format(
uint_t=uint_t,
jm=j - 1,
j=j,
)
)
# create in_coords array to store the unraveled indices
ops.append(_unravel_loop_index(yshape, uint_t))
# compute the transformed (target) coordinates, c_j
ops = ops + coord_func(ndim)
if cval is numpy.nan:
cval = "CUDART_NAN"
elif cval == numpy.inf:
cval = "CUDART_INF"
elif cval == -numpy.inf:
cval = "-CUDART_INF"
else:
cval = "(double){cval}".format(cval=cval)
if mode == "constant":
# use cval if coordinate is outside the bounds of x
_cond = " || ".join(
["(c_{j} < 0) || (c_{j} > xsize_{j} - 1)".format(j=j) for j in range(ndim)]
)
ops.append(
"""
if ({cond})
{{
out = {cval};
}}
else
{{""".format(cond=_cond, cval=cval)
)
if order == 0:
for j in range(ndim):
# determine nearest neighbor
ops.append(
"""
{int_t} cf_{j} = ({int_t})lrint((double)c_{j});
""".format(int_t=int_t, j=j)
)
# handle boundary
if mode != "constant":
ixvar = "cf_{j}".format(j=j)
ops.append(
_util._generate_boundary_condition_ops(
mode, ixvar, "xsize_{}".format(j)
)
)
# sum over ic_j will give the raveled coordinate in the input
ops.append(
"""
{int_t} ic_{j} = cf_{j} * sx_{j};
""".format(int_t=int_t, j=j)
)
_coord_idx = " + ".join(["ic_{}".format(j) for j in range(ndim)])
ops.append(
"""
out = x[{coord_idx}];""".format(coord_idx=_coord_idx)
)
elif order == 1:
for j in range(ndim):
# get coordinates for linear interpolation along axis j
ops.append(
"""
{int_t} cf_{j} = ({int_t})floor((double)c_{j});
{int_t} cc_{j} = cf_{j} + 1;
{int_t} n_{j} = (c_{j} == cf_{j}) ? 1 : 2; // points needed
""".format(int_t=int_t, j=j)
)
# handle boundaries for extension modes.
ops.append(
"""
{int_t} cf_bounded_{j} = cf_{j};
{int_t} cc_bounded_{j} = cc_{j};
""".format(int_t=int_t, j=j)
)
if mode != "constant":
ixvar = "cf_bounded_{j}".format(j=j)
ops.append(
_util._generate_boundary_condition_ops(
mode, ixvar, "xsize_{}".format(j)
)
)
ixvar = "cc_bounded_{j}".format(j=j)
ops.append(
_util._generate_boundary_condition_ops(
mode, ixvar, "xsize_{}".format(j)
)
)
ops.append(
"""
for (int s_{j} = 0; s_{j} < n_{j}; s_{j}++)
{{
W w_{j};
{int_t} ic_{j};
if (s_{j} == 0)
{{
w_{j} = (W)cc_{j} - c_{j};
ic_{j} = cf_bounded_{j} * sx_{j};
}} else
{{
w_{j} = c_{j} - (W)cf_{j};
ic_{j} = cc_bounded_{j} * sx_{j};
}}""".format(int_t=int_t, j=j)
)
_weight = " * ".join(["w_{j}".format(j=j) for j in range(ndim)])
_coord_idx = " + ".join(["ic_{j}".format(j=j) for j in range(ndim)])
ops.append(
"""
X val = x[{coord_idx}];
out += val * ({weight});""".format(coord_idx=_coord_idx, weight=_weight)
)
ops.append("}" * ndim)
if mode == "constant":
ops.append("}")
if integer_output:
ops.append("y = (Y)rint((double)out);")
else:
ops.append("y = (Y)out;")
operation = "\n".join(ops)
name = "interpolate_{}_order{}_{}_{}d_y{}".format(
name,
order,
mode,
ndim,
"_".join(["{}".format(j) for j in yshape]),
)
if uint_t == "size_t":
name += "_i64"
return operation, name
|
def _generate_interp_custom(
coord_func,
ndim,
large_int,
yshape,
mode,
cval,
order,
name="",
integer_output=False,
):
"""
Args:
coord_func (function): generates code to do the coordinate
transformation. See for example, `_get_coord_shift`.
ndim (int): The number of dimensions.
large_int (bool): If true use Py_ssize_t instead of int for indexing.
yshape (tuple): Shape of the output array.
mode (str): Signal extension mode to use at the array boundaries
cval (float): constant value used when `mode == 'constant'`.
name (str): base name for the interpolation kernel
integer_output (bool): boolean indicating whether the output has an
integer type.
Returns:
operation (str): code body for the ElementwiseKernel
name (str): name for the ElementwiseKernel
"""
ops = []
ops.append("double out = 0.0;")
if large_int:
uint_t = "size_t"
int_t = "ptrdiff_t"
else:
uint_t = "unsigned int"
int_t = "int"
# determine strides for x along each axis
for j in range(ndim):
ops.append("const {int_t} xsize_{j} = x.shape()[{j}];".format(int_t=int_t, j=j))
ops.append("const {uint_t} sx_{j} = 1;".format(uint_t=uint_t, j=ndim - 1))
for j in range(ndim - 1, 0, -1):
ops.append(
"const {uint_t} sx_{jm} = sx_{j} * xsize_{j};".format(
uint_t=uint_t,
jm=j - 1,
j=j,
)
)
# create in_coords array to store the unraveled indices
ops.append(_unravel_loop_index(yshape, uint_t))
# compute the transformed (target) coordinates, c_j
ops = ops + coord_func(ndim)
if mode == "constant":
# use cval if coordinate is outside the bounds of x
_cond = " || ".join(
["(c_{j} < 0) || (c_{j} > xsize_{j} - 1)".format(j=j) for j in range(ndim)]
)
ops.append(
"""
if ({cond})
{{
out = (double){cval};
}}
else
{{""".format(cond=_cond, cval=cval)
)
if order == 0:
for j in range(ndim):
# determine nearest neighbor
ops.append(
"""
{int_t} cf_{j} = ({int_t})lrint((double)c_{j});
""".format(int_t=int_t, j=j)
)
# handle boundary
if mode != "constant":
ixvar = "cf_{j}".format(j=j)
ops.append(
_util._generate_boundary_condition_ops(
mode, ixvar, "xsize_{}".format(j)
)
)
# sum over ic_j will give the raveled coordinate in the input
ops.append(
"""
{int_t} ic_{j} = cf_{j} * sx_{j};
""".format(int_t=int_t, j=j)
)
_coord_idx = " + ".join(["ic_{}".format(j) for j in range(ndim)])
ops.append(
"""
out = x[{coord_idx}];""".format(coord_idx=_coord_idx)
)
elif order == 1:
for j in range(ndim):
# get coordinates for linear interpolation along axis j
ops.append(
"""
{int_t} cf_{j} = ({int_t})floor((double)c_{j});
{int_t} cc_{j} = cf_{j} + 1;
{int_t} n_{j} = (c_{j} == cf_{j}) ? 1 : 2; // points needed
""".format(int_t=int_t, j=j)
)
# handle boundaries for extension modes.
ops.append(
"""
{int_t} cf_bounded_{j} = cf_{j};
{int_t} cc_bounded_{j} = cc_{j};
""".format(int_t=int_t, j=j)
)
if mode != "constant":
ixvar = "cf_bounded_{j}".format(j=j)
ops.append(
_util._generate_boundary_condition_ops(
mode, ixvar, "xsize_{}".format(j)
)
)
ixvar = "cc_bounded_{j}".format(j=j)
ops.append(
_util._generate_boundary_condition_ops(
mode, ixvar, "xsize_{}".format(j)
)
)
ops.append(
"""
for (int s_{j} = 0; s_{j} < n_{j}; s_{j}++)
{{
W w_{j};
{int_t} ic_{j};
if (s_{j} == 0)
{{
w_{j} = (W)cc_{j} - c_{j};
ic_{j} = cf_bounded_{j} * sx_{j};
}} else
{{
w_{j} = c_{j} - (W)cf_{j};
ic_{j} = cc_bounded_{j} * sx_{j};
}}""".format(int_t=int_t, j=j)
)
_weight = " * ".join(["w_{j}".format(j=j) for j in range(ndim)])
_coord_idx = " + ".join(["ic_{j}".format(j=j) for j in range(ndim)])
ops.append(
"""
X val = x[{coord_idx}];
out += val * ({weight});""".format(coord_idx=_coord_idx, weight=_weight)
)
ops.append("}" * ndim)
if mode == "constant":
ops.append("}")
if integer_output:
ops.append("y = (Y)rint((double)out);")
else:
ops.append("y = (Y)out;")
operation = "\n".join(ops)
name = "interpolate_{}_order{}_{}_{}d_y{}".format(
name,
order,
mode,
ndim,
"_".join(["{}".format(j) for j in yshape]),
)
if uint_t == "size_t":
name += "_i64"
return operation, name
|
https://github.com/cupy/cupy/issues/4082
|
/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py:15: UserWarning: In the current feature the default order of shift is 1. It is different from scipy.ndimage and can change in the future.
warnings.warn('In the current feature the default order of {} is 1. '
Traceback (most recent call last):
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 516, in compile
nvrtc.compileProgram(self.ptr, options)
File "cupy_backends/cuda/libs/nvrtc.pyx", line 108, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 120, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 58, in cupy_backends.cuda.libs.nvrtc.check_status
cupy_backends.cuda.libs.nvrtc.NVRTCError: NVRTC_ERROR_COMPILATION (6)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py", line 379, in shift
kern(input, shift, output)
File "cupy/core/_kernel.pyx", line 821, in cupy.core._kernel.ElementwiseKernel.__call__
File "cupy/core/_kernel.pyx", line 846, in cupy.core._kernel.ElementwiseKernel._get_elementwise_kernel
File "cupy/_util.pyx", line 103, in cupy._util.memoize.decorator.ret
File "cupy/core/_kernel.pyx", line 639, in cupy.core._kernel._get_elementwise_kernel
File "cupy/core/_kernel.pyx", line 37, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/_kernel.pyx", line 60, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/core.pyx", line 1937, in cupy.core.core.compile_with_cache
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 335, in compile_with_cache
return _compile_with_cache_cuda(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 402, in _compile_with_cache_cuda
ptx, mapping = compile_using_nvrtc(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 173, in compile_using_nvrtc
return _compile(source, options, cu_path,
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 157, in _compile
ptx, mapping = prog.compile(options, log_stream)
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 527, in compile
raise CompileException(log, self.src, self.name, options,
cupy.cuda.compiler.CompileException: /tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(28): error: invalid type conversion
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "s" was declared but never referenced
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "t" was declared but never referenced
1 error detected in the compilation of "/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu".
|
cupy_backends.cuda.libs.nvrtc.NVRTCError
|
def _get_map_kernel(
ndim, large_int, yshape, mode, cval=0.0, order=1, integer_output=False
):
in_params = "raw X x, raw W coords"
out_params = "Y y"
operation, name = _generate_interp_custom(
coord_func=_get_coord_map,
ndim=ndim,
large_int=large_int,
yshape=yshape,
mode=mode,
cval=cval,
order=order,
name="shift",
integer_output=integer_output,
)
return cupy.ElementwiseKernel(
in_params, out_params, operation, name, preamble=math_constants_preamble
)
|
def _get_map_kernel(
ndim, large_int, yshape, mode, cval=0.0, order=1, integer_output=False
):
in_params = "raw X x, raw W coords"
out_params = "Y y"
operation, name = _generate_interp_custom(
coord_func=_get_coord_map,
ndim=ndim,
large_int=large_int,
yshape=yshape,
mode=mode,
cval=cval,
order=order,
name="shift",
integer_output=integer_output,
)
return cupy.ElementwiseKernel(in_params, out_params, operation, name)
|
https://github.com/cupy/cupy/issues/4082
|
/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py:15: UserWarning: In the current feature the default order of shift is 1. It is different from scipy.ndimage and can change in the future.
warnings.warn('In the current feature the default order of {} is 1. '
Traceback (most recent call last):
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 516, in compile
nvrtc.compileProgram(self.ptr, options)
File "cupy_backends/cuda/libs/nvrtc.pyx", line 108, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 120, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 58, in cupy_backends.cuda.libs.nvrtc.check_status
cupy_backends.cuda.libs.nvrtc.NVRTCError: NVRTC_ERROR_COMPILATION (6)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py", line 379, in shift
kern(input, shift, output)
File "cupy/core/_kernel.pyx", line 821, in cupy.core._kernel.ElementwiseKernel.__call__
File "cupy/core/_kernel.pyx", line 846, in cupy.core._kernel.ElementwiseKernel._get_elementwise_kernel
File "cupy/_util.pyx", line 103, in cupy._util.memoize.decorator.ret
File "cupy/core/_kernel.pyx", line 639, in cupy.core._kernel._get_elementwise_kernel
File "cupy/core/_kernel.pyx", line 37, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/_kernel.pyx", line 60, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/core.pyx", line 1937, in cupy.core.core.compile_with_cache
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 335, in compile_with_cache
return _compile_with_cache_cuda(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 402, in _compile_with_cache_cuda
ptx, mapping = compile_using_nvrtc(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 173, in compile_using_nvrtc
return _compile(source, options, cu_path,
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 157, in _compile
ptx, mapping = prog.compile(options, log_stream)
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 527, in compile
raise CompileException(log, self.src, self.name, options,
cupy.cuda.compiler.CompileException: /tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(28): error: invalid type conversion
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "s" was declared but never referenced
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "t" was declared but never referenced
1 error detected in the compilation of "/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu".
|
cupy_backends.cuda.libs.nvrtc.NVRTCError
|
def _get_shift_kernel(
ndim, large_int, yshape, mode, cval=0.0, order=1, integer_output=False
):
in_params = "raw X x, raw W shift"
out_params = "Y y"
operation, name = _generate_interp_custom(
coord_func=_get_coord_shift,
ndim=ndim,
large_int=large_int,
yshape=yshape,
mode=mode,
cval=cval,
order=order,
name="shift",
integer_output=integer_output,
)
return cupy.ElementwiseKernel(
in_params, out_params, operation, name, preamble=math_constants_preamble
)
|
def _get_shift_kernel(
ndim, large_int, yshape, mode, cval=0.0, order=1, integer_output=False
):
in_params = "raw X x, raw W shift"
out_params = "Y y"
operation, name = _generate_interp_custom(
coord_func=_get_coord_shift,
ndim=ndim,
large_int=large_int,
yshape=yshape,
mode=mode,
cval=cval,
order=order,
name="shift",
integer_output=integer_output,
)
return cupy.ElementwiseKernel(in_params, out_params, operation, name)
|
https://github.com/cupy/cupy/issues/4082
|
/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py:15: UserWarning: In the current feature the default order of shift is 1. It is different from scipy.ndimage and can change in the future.
warnings.warn('In the current feature the default order of {} is 1. '
Traceback (most recent call last):
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 516, in compile
nvrtc.compileProgram(self.ptr, options)
File "cupy_backends/cuda/libs/nvrtc.pyx", line 108, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 120, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 58, in cupy_backends.cuda.libs.nvrtc.check_status
cupy_backends.cuda.libs.nvrtc.NVRTCError: NVRTC_ERROR_COMPILATION (6)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py", line 379, in shift
kern(input, shift, output)
File "cupy/core/_kernel.pyx", line 821, in cupy.core._kernel.ElementwiseKernel.__call__
File "cupy/core/_kernel.pyx", line 846, in cupy.core._kernel.ElementwiseKernel._get_elementwise_kernel
File "cupy/_util.pyx", line 103, in cupy._util.memoize.decorator.ret
File "cupy/core/_kernel.pyx", line 639, in cupy.core._kernel._get_elementwise_kernel
File "cupy/core/_kernel.pyx", line 37, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/_kernel.pyx", line 60, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/core.pyx", line 1937, in cupy.core.core.compile_with_cache
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 335, in compile_with_cache
return _compile_with_cache_cuda(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 402, in _compile_with_cache_cuda
ptx, mapping = compile_using_nvrtc(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 173, in compile_using_nvrtc
return _compile(source, options, cu_path,
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 157, in _compile
ptx, mapping = prog.compile(options, log_stream)
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 527, in compile
raise CompileException(log, self.src, self.name, options,
cupy.cuda.compiler.CompileException: /tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(28): error: invalid type conversion
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "s" was declared but never referenced
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "t" was declared but never referenced
1 error detected in the compilation of "/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu".
|
cupy_backends.cuda.libs.nvrtc.NVRTCError
|
def _get_zoom_shift_kernel(
ndim, large_int, yshape, mode, cval=0.0, order=1, integer_output=False
):
in_params = "raw X x, raw W shift, raw W zoom"
out_params = "Y y"
operation, name = _generate_interp_custom(
coord_func=_get_coord_zoom_and_shift,
ndim=ndim,
large_int=large_int,
yshape=yshape,
mode=mode,
cval=cval,
order=order,
name="zoom_shift",
integer_output=integer_output,
)
return cupy.ElementwiseKernel(
in_params, out_params, operation, name, preamble=math_constants_preamble
)
|
def _get_zoom_shift_kernel(
ndim, large_int, yshape, mode, cval=0.0, order=1, integer_output=False
):
in_params = "raw X x, raw W shift, raw W zoom"
out_params = "Y y"
operation, name = _generate_interp_custom(
coord_func=_get_coord_zoom_and_shift,
ndim=ndim,
large_int=large_int,
yshape=yshape,
mode=mode,
cval=cval,
order=order,
name="zoom_shift",
integer_output=integer_output,
)
return cupy.ElementwiseKernel(in_params, out_params, operation, name)
|
https://github.com/cupy/cupy/issues/4082
|
/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py:15: UserWarning: In the current feature the default order of shift is 1. It is different from scipy.ndimage and can change in the future.
warnings.warn('In the current feature the default order of {} is 1. '
Traceback (most recent call last):
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 516, in compile
nvrtc.compileProgram(self.ptr, options)
File "cupy_backends/cuda/libs/nvrtc.pyx", line 108, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 120, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 58, in cupy_backends.cuda.libs.nvrtc.check_status
cupy_backends.cuda.libs.nvrtc.NVRTCError: NVRTC_ERROR_COMPILATION (6)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py", line 379, in shift
kern(input, shift, output)
File "cupy/core/_kernel.pyx", line 821, in cupy.core._kernel.ElementwiseKernel.__call__
File "cupy/core/_kernel.pyx", line 846, in cupy.core._kernel.ElementwiseKernel._get_elementwise_kernel
File "cupy/_util.pyx", line 103, in cupy._util.memoize.decorator.ret
File "cupy/core/_kernel.pyx", line 639, in cupy.core._kernel._get_elementwise_kernel
File "cupy/core/_kernel.pyx", line 37, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/_kernel.pyx", line 60, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/core.pyx", line 1937, in cupy.core.core.compile_with_cache
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 335, in compile_with_cache
return _compile_with_cache_cuda(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 402, in _compile_with_cache_cuda
ptx, mapping = compile_using_nvrtc(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 173, in compile_using_nvrtc
return _compile(source, options, cu_path,
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 157, in _compile
ptx, mapping = prog.compile(options, log_stream)
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 527, in compile
raise CompileException(log, self.src, self.name, options,
cupy.cuda.compiler.CompileException: /tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(28): error: invalid type conversion
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "s" was declared but never referenced
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "t" was declared but never referenced
1 error detected in the compilation of "/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu".
|
cupy_backends.cuda.libs.nvrtc.NVRTCError
|
def _get_zoom_kernel(
ndim, large_int, yshape, mode, cval=0.0, order=1, integer_output=False
):
in_params = "raw X x, raw W zoom"
out_params = "Y y"
operation, name = _generate_interp_custom(
coord_func=_get_coord_zoom,
ndim=ndim,
large_int=large_int,
yshape=yshape,
mode=mode,
cval=cval,
order=order,
name="zoom",
integer_output=integer_output,
)
return cupy.ElementwiseKernel(
in_params, out_params, operation, name, preamble=math_constants_preamble
)
|
def _get_zoom_kernel(
ndim, large_int, yshape, mode, cval=0.0, order=1, integer_output=False
):
in_params = "raw X x, raw W zoom"
out_params = "Y y"
operation, name = _generate_interp_custom(
coord_func=_get_coord_zoom,
ndim=ndim,
large_int=large_int,
yshape=yshape,
mode=mode,
cval=cval,
order=order,
name="zoom",
integer_output=integer_output,
)
return cupy.ElementwiseKernel(in_params, out_params, operation, name)
|
https://github.com/cupy/cupy/issues/4082
|
/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py:15: UserWarning: In the current feature the default order of shift is 1. It is different from scipy.ndimage and can change in the future.
warnings.warn('In the current feature the default order of {} is 1. '
Traceback (most recent call last):
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 516, in compile
nvrtc.compileProgram(self.ptr, options)
File "cupy_backends/cuda/libs/nvrtc.pyx", line 108, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 120, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 58, in cupy_backends.cuda.libs.nvrtc.check_status
cupy_backends.cuda.libs.nvrtc.NVRTCError: NVRTC_ERROR_COMPILATION (6)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py", line 379, in shift
kern(input, shift, output)
File "cupy/core/_kernel.pyx", line 821, in cupy.core._kernel.ElementwiseKernel.__call__
File "cupy/core/_kernel.pyx", line 846, in cupy.core._kernel.ElementwiseKernel._get_elementwise_kernel
File "cupy/_util.pyx", line 103, in cupy._util.memoize.decorator.ret
File "cupy/core/_kernel.pyx", line 639, in cupy.core._kernel._get_elementwise_kernel
File "cupy/core/_kernel.pyx", line 37, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/_kernel.pyx", line 60, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/core.pyx", line 1937, in cupy.core.core.compile_with_cache
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 335, in compile_with_cache
return _compile_with_cache_cuda(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 402, in _compile_with_cache_cuda
ptx, mapping = compile_using_nvrtc(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 173, in compile_using_nvrtc
return _compile(source, options, cu_path,
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 157, in _compile
ptx, mapping = prog.compile(options, log_stream)
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 527, in compile
raise CompileException(log, self.src, self.name, options,
cupy.cuda.compiler.CompileException: /tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(28): error: invalid type conversion
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "s" was declared but never referenced
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "t" was declared but never referenced
1 error detected in the compilation of "/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu".
|
cupy_backends.cuda.libs.nvrtc.NVRTCError
|
def _get_affine_kernel(
ndim, large_int, yshape, mode, cval=0.0, order=1, integer_output=False
):
in_params = "raw X x, raw W mat"
out_params = "Y y"
operation, name = _generate_interp_custom(
coord_func=_get_coord_affine,
ndim=ndim,
large_int=large_int,
yshape=yshape,
mode=mode,
cval=cval,
order=order,
name="affine",
integer_output=integer_output,
)
return cupy.ElementwiseKernel(
in_params, out_params, operation, name, preamble=math_constants_preamble
)
|
def _get_affine_kernel(
ndim, large_int, yshape, mode, cval=0.0, order=1, integer_output=False
):
in_params = "raw X x, raw W mat"
out_params = "Y y"
operation, name = _generate_interp_custom(
coord_func=_get_coord_affine,
ndim=ndim,
large_int=large_int,
yshape=yshape,
mode=mode,
cval=cval,
order=order,
name="affine",
integer_output=integer_output,
)
return cupy.ElementwiseKernel(in_params, out_params, operation, name)
|
https://github.com/cupy/cupy/issues/4082
|
/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py:15: UserWarning: In the current feature the default order of shift is 1. It is different from scipy.ndimage and can change in the future.
warnings.warn('In the current feature the default order of {} is 1. '
Traceback (most recent call last):
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 516, in compile
nvrtc.compileProgram(self.ptr, options)
File "cupy_backends/cuda/libs/nvrtc.pyx", line 108, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 120, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 58, in cupy_backends.cuda.libs.nvrtc.check_status
cupy_backends.cuda.libs.nvrtc.NVRTCError: NVRTC_ERROR_COMPILATION (6)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py", line 379, in shift
kern(input, shift, output)
File "cupy/core/_kernel.pyx", line 821, in cupy.core._kernel.ElementwiseKernel.__call__
File "cupy/core/_kernel.pyx", line 846, in cupy.core._kernel.ElementwiseKernel._get_elementwise_kernel
File "cupy/_util.pyx", line 103, in cupy._util.memoize.decorator.ret
File "cupy/core/_kernel.pyx", line 639, in cupy.core._kernel._get_elementwise_kernel
File "cupy/core/_kernel.pyx", line 37, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/_kernel.pyx", line 60, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/core.pyx", line 1937, in cupy.core.core.compile_with_cache
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 335, in compile_with_cache
return _compile_with_cache_cuda(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 402, in _compile_with_cache_cuda
ptx, mapping = compile_using_nvrtc(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 173, in compile_using_nvrtc
return _compile(source, options, cu_path,
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 157, in _compile
ptx, mapping = prog.compile(options, log_stream)
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 527, in compile
raise CompileException(log, self.src, self.name, options,
cupy.cuda.compiler.CompileException: /tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(28): error: invalid type conversion
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "s" was declared but never referenced
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "t" was declared but never referenced
1 error detected in the compilation of "/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu".
|
cupy_backends.cuda.libs.nvrtc.NVRTCError
|
def _correlate_or_convolve(
input, weights, output, mode, cval, origin, convolution=False
):
origins, int_type = _filters_core._check_nd_args(input, weights, mode, origin)
if weights.size == 0:
return cupy.zeros_like(input)
_util._check_cval(mode, cval, _util._is_integer_output(output, input))
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
origins = list(origins)
for i, wsize in enumerate(weights.shape):
origins[i] = -origins[i]
if wsize % 2 == 0:
origins[i] -= 1
origins = tuple(origins)
offsets = _filters_core._origins_to_offsets(origins, weights.shape)
kernel = _get_correlate_kernel(mode, weights.shape, int_type, offsets, cval)
output = _filters_core._call_kernel(kernel, input, weights, output)
return output
|
def _correlate_or_convolve(
input, weights, output, mode, cval, origin, convolution=False
):
origins, int_type = _filters_core._check_nd_args(input, weights, mode, origin)
if weights.size == 0:
return cupy.zeros_like(input)
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
origins = list(origins)
for i, wsize in enumerate(weights.shape):
origins[i] = -origins[i]
if wsize % 2 == 0:
origins[i] -= 1
origins = tuple(origins)
offsets = _filters_core._origins_to_offsets(origins, weights.shape)
kernel = _get_correlate_kernel(mode, weights.shape, int_type, offsets, cval)
return _filters_core._call_kernel(kernel, input, weights, output)
|
https://github.com/cupy/cupy/issues/4082
|
/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py:15: UserWarning: In the current feature the default order of shift is 1. It is different from scipy.ndimage and can change in the future.
warnings.warn('In the current feature the default order of {} is 1. '
Traceback (most recent call last):
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 516, in compile
nvrtc.compileProgram(self.ptr, options)
File "cupy_backends/cuda/libs/nvrtc.pyx", line 108, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 120, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 58, in cupy_backends.cuda.libs.nvrtc.check_status
cupy_backends.cuda.libs.nvrtc.NVRTCError: NVRTC_ERROR_COMPILATION (6)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py", line 379, in shift
kern(input, shift, output)
File "cupy/core/_kernel.pyx", line 821, in cupy.core._kernel.ElementwiseKernel.__call__
File "cupy/core/_kernel.pyx", line 846, in cupy.core._kernel.ElementwiseKernel._get_elementwise_kernel
File "cupy/_util.pyx", line 103, in cupy._util.memoize.decorator.ret
File "cupy/core/_kernel.pyx", line 639, in cupy.core._kernel._get_elementwise_kernel
File "cupy/core/_kernel.pyx", line 37, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/_kernel.pyx", line 60, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/core.pyx", line 1937, in cupy.core.core.compile_with_cache
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 335, in compile_with_cache
return _compile_with_cache_cuda(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 402, in _compile_with_cache_cuda
ptx, mapping = compile_using_nvrtc(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 173, in compile_using_nvrtc
return _compile(source, options, cu_path,
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 157, in _compile
ptx, mapping = prog.compile(options, log_stream)
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 527, in compile
raise CompileException(log, self.src, self.name, options,
cupy.cuda.compiler.CompileException: /tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(28): error: invalid type conversion
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "s" was declared but never referenced
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "t" was declared but never referenced
1 error detected in the compilation of "/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu".
|
cupy_backends.cuda.libs.nvrtc.NVRTCError
|
def _min_or_max_filter(
input, size, ftprnt, structure, output, mode, cval, origin, func
):
# structure is used by morphology.grey_erosion() and grey_dilation()
# and not by the regular min/max filters
sizes, ftprnt, structure = _filters_core._check_size_footprint_structure(
input.ndim, size, ftprnt, structure
)
if cval is cupy.nan:
raise NotImplementedError("NaN cval is unsupported")
if sizes is not None:
# Seperable filter, run as a series of 1D filters
fltr = minimum_filter1d if func == "min" else maximum_filter1d
return _filters_core._run_1d_filters(
[fltr if size > 1 else None for size in sizes],
input,
sizes,
output,
mode,
cval,
origin,
)
origins, int_type = _filters_core._check_nd_args(
input, ftprnt, mode, origin, "footprint"
)
if structure is not None and structure.ndim != input.ndim:
raise RuntimeError("structure array has incorrect shape")
if ftprnt.size == 0:
return cupy.zeros_like(input)
offsets = _filters_core._origins_to_offsets(origins, ftprnt.shape)
kernel = _get_min_or_max_kernel(
mode,
ftprnt.shape,
func,
offsets,
float(cval),
int_type,
has_structure=structure is not None,
has_central_value=bool(ftprnt[offsets]),
)
return _filters_core._call_kernel(
kernel, input, ftprnt, output, structure, weights_dtype=bool
)
|
def _min_or_max_filter(
input, size, ftprnt, structure, output, mode, cval, origin, func
):
# structure is used by morphology.grey_erosion() and grey_dilation()
# and not by the regular min/max filters
sizes, ftprnt, structure = _filters_core._check_size_footprint_structure(
input.ndim, size, ftprnt, structure
)
if sizes is not None:
# Seperable filter, run as a series of 1D filters
fltr = minimum_filter1d if func == "min" else maximum_filter1d
return _filters_core._run_1d_filters(
[fltr if size > 1 else None for size in sizes],
input,
sizes,
output,
mode,
cval,
origin,
)
origins, int_type = _filters_core._check_nd_args(
input, ftprnt, mode, origin, "footprint"
)
if structure is not None and structure.ndim != input.ndim:
raise RuntimeError("structure array has incorrect shape")
if ftprnt.size == 0:
return cupy.zeros_like(input)
offsets = _filters_core._origins_to_offsets(origins, ftprnt.shape)
kernel = _get_min_or_max_kernel(
mode,
ftprnt.shape,
func,
offsets,
float(cval),
int_type,
has_structure=structure is not None,
has_central_value=bool(ftprnt[offsets]),
)
return _filters_core._call_kernel(
kernel, input, ftprnt, output, structure, weights_dtype=bool
)
|
https://github.com/cupy/cupy/issues/4082
|
/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py:15: UserWarning: In the current feature the default order of shift is 1. It is different from scipy.ndimage and can change in the future.
warnings.warn('In the current feature the default order of {} is 1. '
Traceback (most recent call last):
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 516, in compile
nvrtc.compileProgram(self.ptr, options)
File "cupy_backends/cuda/libs/nvrtc.pyx", line 108, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 120, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 58, in cupy_backends.cuda.libs.nvrtc.check_status
cupy_backends.cuda.libs.nvrtc.NVRTCError: NVRTC_ERROR_COMPILATION (6)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py", line 379, in shift
kern(input, shift, output)
File "cupy/core/_kernel.pyx", line 821, in cupy.core._kernel.ElementwiseKernel.__call__
File "cupy/core/_kernel.pyx", line 846, in cupy.core._kernel.ElementwiseKernel._get_elementwise_kernel
File "cupy/_util.pyx", line 103, in cupy._util.memoize.decorator.ret
File "cupy/core/_kernel.pyx", line 639, in cupy.core._kernel._get_elementwise_kernel
File "cupy/core/_kernel.pyx", line 37, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/_kernel.pyx", line 60, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/core.pyx", line 1937, in cupy.core.core.compile_with_cache
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 335, in compile_with_cache
return _compile_with_cache_cuda(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 402, in _compile_with_cache_cuda
ptx, mapping = compile_using_nvrtc(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 173, in compile_using_nvrtc
return _compile(source, options, cu_path,
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 157, in _compile
ptx, mapping = prog.compile(options, log_stream)
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 527, in compile
raise CompileException(log, self.src, self.name, options,
cupy.cuda.compiler.CompileException: /tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(28): error: invalid type conversion
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "s" was declared but never referenced
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "t" was declared but never referenced
1 error detected in the compilation of "/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu".
|
cupy_backends.cuda.libs.nvrtc.NVRTCError
|
def _rank_filter(
input,
get_rank,
size=None,
footprint=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
_, footprint, _ = _filters_core._check_size_footprint_structure(
input.ndim, size, footprint, None, force_footprint=True
)
if cval is cupy.nan:
raise NotImplementedError("NaN cval is unsupported")
origins, int_type = _filters_core._check_nd_args(
input, footprint, mode, origin, "footprint"
)
if footprint.size == 0:
return cupy.zeros_like(input)
filter_size = int(footprint.sum())
rank = get_rank(filter_size)
if rank < 0 or rank >= filter_size:
raise RuntimeError("rank not within filter footprint size")
if rank == 0:
return _min_or_max_filter(
input, None, footprint, None, output, mode, cval, origins, "min"
)
if rank == filter_size - 1:
return _min_or_max_filter(
input, None, footprint, None, output, mode, cval, origins, "max"
)
offsets = _filters_core._origins_to_offsets(origins, footprint.shape)
kernel = _get_rank_kernel(
filter_size, rank, mode, footprint.shape, offsets, float(cval), int_type
)
return _filters_core._call_kernel(
kernel, input, footprint, output, weights_dtype=bool
)
|
def _rank_filter(
input,
get_rank,
size=None,
footprint=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
_, footprint, _ = _filters_core._check_size_footprint_structure(
input.ndim, size, footprint, None, force_footprint=True
)
origins, int_type = _filters_core._check_nd_args(
input, footprint, mode, origin, "footprint"
)
if footprint.size == 0:
return cupy.zeros_like(input)
filter_size = int(footprint.sum())
rank = get_rank(filter_size)
if rank < 0 or rank >= filter_size:
raise RuntimeError("rank not within filter footprint size")
if rank == 0:
return _min_or_max_filter(
input, None, footprint, None, output, mode, cval, origins, "min"
)
if rank == filter_size - 1:
return _min_or_max_filter(
input, None, footprint, None, output, mode, cval, origins, "max"
)
offsets = _filters_core._origins_to_offsets(origins, footprint.shape)
kernel = _get_rank_kernel(
filter_size, rank, mode, footprint.shape, offsets, float(cval), int_type
)
return _filters_core._call_kernel(
kernel, input, footprint, output, weights_dtype=bool
)
|
https://github.com/cupy/cupy/issues/4082
|
/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py:15: UserWarning: In the current feature the default order of shift is 1. It is different from scipy.ndimage and can change in the future.
warnings.warn('In the current feature the default order of {} is 1. '
Traceback (most recent call last):
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 516, in compile
nvrtc.compileProgram(self.ptr, options)
File "cupy_backends/cuda/libs/nvrtc.pyx", line 108, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 120, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 58, in cupy_backends.cuda.libs.nvrtc.check_status
cupy_backends.cuda.libs.nvrtc.NVRTCError: NVRTC_ERROR_COMPILATION (6)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py", line 379, in shift
kern(input, shift, output)
File "cupy/core/_kernel.pyx", line 821, in cupy.core._kernel.ElementwiseKernel.__call__
File "cupy/core/_kernel.pyx", line 846, in cupy.core._kernel.ElementwiseKernel._get_elementwise_kernel
File "cupy/_util.pyx", line 103, in cupy._util.memoize.decorator.ret
File "cupy/core/_kernel.pyx", line 639, in cupy.core._kernel._get_elementwise_kernel
File "cupy/core/_kernel.pyx", line 37, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/_kernel.pyx", line 60, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/core.pyx", line 1937, in cupy.core.core.compile_with_cache
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 335, in compile_with_cache
return _compile_with_cache_cuda(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 402, in _compile_with_cache_cuda
ptx, mapping = compile_using_nvrtc(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 173, in compile_using_nvrtc
return _compile(source, options, cu_path,
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 157, in _compile
ptx, mapping = prog.compile(options, log_stream)
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 527, in compile
raise CompileException(log, self.src, self.name, options,
cupy.cuda.compiler.CompileException: /tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(28): error: invalid type conversion
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "s" was declared but never referenced
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "t" was declared but never referenced
1 error detected in the compilation of "/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu".
|
cupy_backends.cuda.libs.nvrtc.NVRTCError
|
def map_coordinates(
input,
coordinates,
output=None,
order=None,
mode="constant",
cval=0.0,
prefilter=True,
):
"""Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output, the
corresponding coordinates in the input. The value of the input at those
coordinates is determined by spline interpolation of the requested order.
The shape of the output is derived from that of the coordinate array by
dropping the first axis. The values of the array along the first axis are
the coordinates in the input array at which the output value is found.
Args:
input (cupy.ndarray): The input array.
coordinates (array_like): The coordinates at which ``input`` is
evaluated.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array.
order (int): The order of the spline interpolation. If it is not given,
order 1 is used. It is different from :mod:`scipy.ndimage` and can
change in the future. Currently it supports only order 0 and 1.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'`` or ``'opencv'``). Default is ``'constant'``.
cval (scalar): Value used for points outside the boundaries of
the input if ``mode='constant'`` or ``mode='opencv'``. Default is
0.0
prefilter (bool): It is not used yet. It just exists for compatibility
with :mod:`scipy.ndimage`.
Returns:
cupy.ndarray:
The result of transforming the input. The shape of the output is
derived from that of ``coordinates`` by dropping the first axis.
.. seealso:: :func:`scipy.ndimage.map_coordinates`
"""
_check_parameter("map_coordinates", order, mode)
if mode == "opencv" or mode == "_opencv_edge":
input = cupy.pad(input, [(1, 1)] * input.ndim, "constant", constant_values=cval)
coordinates = cupy.add(coordinates, 1)
mode = "constant"
ret = _util._get_output(output, input, coordinates.shape[1:])
integer_output = ret.dtype.kind in "iu"
_util._check_cval(mode, cval, integer_output)
if input.dtype.kind in "iu":
input = input.astype(cupy.float32)
large_int = max(_prod(input.shape), coordinates.shape[0]) > 1 << 31
kern = _interp_kernels._get_map_kernel(
input.ndim,
large_int,
yshape=coordinates.shape,
mode=mode,
cval=cval,
order=order,
integer_output=integer_output,
)
kern(input, coordinates, ret)
return ret
|
def map_coordinates(
input,
coordinates,
output=None,
order=None,
mode="constant",
cval=0.0,
prefilter=True,
):
"""Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output, the
corresponding coordinates in the input. The value of the input at those
coordinates is determined by spline interpolation of the requested order.
The shape of the output is derived from that of the coordinate array by
dropping the first axis. The values of the array along the first axis are
the coordinates in the input array at which the output value is found.
Args:
input (cupy.ndarray): The input array.
coordinates (array_like): The coordinates at which ``input`` is
evaluated.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array.
order (int): The order of the spline interpolation. If it is not given,
order 1 is used. It is different from :mod:`scipy.ndimage` and can
change in the future. Currently it supports only order 0 and 1.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'`` or ``'opencv'``). Default is ``'constant'``.
cval (scalar): Value used for points outside the boundaries of
the input if ``mode='constant'`` or ``mode='opencv'``. Default is
0.0
prefilter (bool): It is not used yet. It just exists for compatibility
with :mod:`scipy.ndimage`.
Returns:
cupy.ndarray:
The result of transforming the input. The shape of the output is
derived from that of ``coordinates`` by dropping the first axis.
.. seealso:: :func:`scipy.ndimage.map_coordinates`
"""
_check_parameter("map_coordinates", order, mode)
if mode == "opencv" or mode == "_opencv_edge":
input = cupy.pad(input, [(1, 1)] * input.ndim, "constant", constant_values=cval)
coordinates = cupy.add(coordinates, 1)
mode = "constant"
ret = _util._get_output(output, input, coordinates.shape[1:])
integer_output = ret.dtype.kind in "iu"
if input.dtype.kind in "iu":
input = input.astype(cupy.float32)
large_int = max(_prod(input.shape), coordinates.shape[0]) > 1 << 31
kern = _interp_kernels._get_map_kernel(
input.ndim,
large_int,
yshape=coordinates.shape,
mode=mode,
cval=cval,
order=order,
integer_output=integer_output,
)
kern(input, coordinates, ret)
return ret
|
https://github.com/cupy/cupy/issues/4082
|
/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py:15: UserWarning: In the current feature the default order of shift is 1. It is different from scipy.ndimage and can change in the future.
warnings.warn('In the current feature the default order of {} is 1. '
Traceback (most recent call last):
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 516, in compile
nvrtc.compileProgram(self.ptr, options)
File "cupy_backends/cuda/libs/nvrtc.pyx", line 108, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 120, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 58, in cupy_backends.cuda.libs.nvrtc.check_status
cupy_backends.cuda.libs.nvrtc.NVRTCError: NVRTC_ERROR_COMPILATION (6)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py", line 379, in shift
kern(input, shift, output)
File "cupy/core/_kernel.pyx", line 821, in cupy.core._kernel.ElementwiseKernel.__call__
File "cupy/core/_kernel.pyx", line 846, in cupy.core._kernel.ElementwiseKernel._get_elementwise_kernel
File "cupy/_util.pyx", line 103, in cupy._util.memoize.decorator.ret
File "cupy/core/_kernel.pyx", line 639, in cupy.core._kernel._get_elementwise_kernel
File "cupy/core/_kernel.pyx", line 37, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/_kernel.pyx", line 60, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/core.pyx", line 1937, in cupy.core.core.compile_with_cache
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 335, in compile_with_cache
return _compile_with_cache_cuda(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 402, in _compile_with_cache_cuda
ptx, mapping = compile_using_nvrtc(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 173, in compile_using_nvrtc
return _compile(source, options, cu_path,
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 157, in _compile
ptx, mapping = prog.compile(options, log_stream)
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 527, in compile
raise CompileException(log, self.src, self.name, options,
cupy.cuda.compiler.CompileException: /tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(28): error: invalid type conversion
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "s" was declared but never referenced
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "t" was declared but never referenced
1 error detected in the compilation of "/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu".
|
cupy_backends.cuda.libs.nvrtc.NVRTCError
|
def affine_transform(
input,
matrix,
offset=0.0,
output_shape=None,
output=None,
order=None,
mode="constant",
cval=0.0,
prefilter=True,
):
"""Apply an affine transformation.
Given an output image pixel index vector ``o``, the pixel value is
determined from the input image at position
``cupy.dot(matrix, o) + offset``.
Args:
input (cupy.ndarray): The input array.
matrix (cupy.ndarray): The inverse coordinate transformation matrix,
mapping output coordinates to input coordinates. If ``ndim`` is the
number of dimensions of ``input``, the given matrix must have one
of the following shapes:
- ``(ndim, ndim)``: the linear transformation matrix for each
output coordinate.
- ``(ndim,)``: assume that the 2D transformation matrix is
diagonal, with the diagonal specified by the given value.
- ``(ndim + 1, ndim + 1)``: assume that the transformation is
specified using homogeneous coordinates. In this case, any
value passed to ``offset`` is ignored.
- ``(ndim, ndim + 1)``: as above, but the bottom row of a
homogeneous transformation matrix is always
``[0, 0, ..., 1]``, and may be omitted.
offset (float or sequence): The offset into the array where the
transform is applied. If a float, ``offset`` is the same for each
axis. If a sequence, ``offset`` should contain one value for each
axis.
output_shape (tuple of ints): Shape tuple.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array.
order (int): The order of the spline interpolation. If it is not given,
order 1 is used. It is different from :mod:`scipy.ndimage` and can
change in the future. Currently it supports only order 0 and 1.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'`` or ``'opencv'``). Default is ``'constant'``.
cval (scalar): Value used for points outside the boundaries of
the input if ``mode='constant'`` or ``mode='opencv'``. Default is
0.0
prefilter (bool): It is not used yet. It just exists for compatibility
with :mod:`scipy.ndimage`.
Returns:
cupy.ndarray or None:
The transformed input. If ``output`` is given as a parameter,
``None`` is returned.
.. seealso:: :func:`scipy.ndimage.affine_transform`
"""
_check_parameter("affine_transform", order, mode)
offset = _util._fix_sequence_arg(offset, input.ndim, "offset", float)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError("no proper affine matrix provided")
if matrix.ndim == 2:
if matrix.shape[0] == matrix.shape[1] - 1:
offset = matrix[:, -1]
matrix = matrix[:, :-1]
elif matrix.shape[0] == input.ndim + 1:
offset = matrix[:-1, -1]
matrix = matrix[:-1, :-1]
if matrix.shape != (input.ndim, input.ndim):
raise RuntimeError("improper affine shape")
if mode == "opencv":
m = cupy.zeros((input.ndim + 1, input.ndim + 1))
m[:-1, :-1] = matrix
m[:-1, -1] = offset
m[-1, -1] = 1
m = cupy.linalg.inv(m)
m[:2] = cupy.roll(m[:2], 1, axis=0)
m[:2, :2] = cupy.roll(m[:2, :2], 1, axis=1)
matrix = m[:-1, :-1]
offset = m[:-1, -1]
if output_shape is None:
output_shape = input.shape
matrix = matrix.astype(cupy.float64, copy=False)
if order is None:
order = 1
ndim = input.ndim
output = _util._get_output(output, input, shape=output_shape)
if input.dtype.kind in "iu":
input = input.astype(cupy.float32)
integer_output = output.dtype.kind in "iu"
_util._check_cval(mode, cval, integer_output)
large_int = max(_prod(input.shape), _prod(output_shape)) > 1 << 31
if matrix.ndim == 1:
offset = cupy.asarray(offset, dtype=cupy.float64)
offset = -offset / matrix
kern = _interp_kernels._get_zoom_shift_kernel(
ndim,
large_int,
output_shape,
mode,
cval=cval,
order=order,
integer_output=integer_output,
)
kern(input, offset, matrix, output)
else:
kern = _interp_kernels._get_affine_kernel(
ndim,
large_int,
output_shape,
mode,
cval=cval,
order=order,
integer_output=integer_output,
)
m = cupy.zeros((ndim, ndim + 1), dtype=cupy.float64)
m[:, :-1] = matrix
m[:, -1] = cupy.asarray(offset, dtype=cupy.float64)
kern(input, m, output)
return output
|
def affine_transform(
input,
matrix,
offset=0.0,
output_shape=None,
output=None,
order=None,
mode="constant",
cval=0.0,
prefilter=True,
):
"""Apply an affine transformation.
Given an output image pixel index vector ``o``, the pixel value is
determined from the input image at position
``cupy.dot(matrix, o) + offset``.
Args:
input (cupy.ndarray): The input array.
matrix (cupy.ndarray): The inverse coordinate transformation matrix,
mapping output coordinates to input coordinates. If ``ndim`` is the
number of dimensions of ``input``, the given matrix must have one
of the following shapes:
- ``(ndim, ndim)``: the linear transformation matrix for each
output coordinate.
- ``(ndim,)``: assume that the 2D transformation matrix is
diagonal, with the diagonal specified by the given value.
- ``(ndim + 1, ndim + 1)``: assume that the transformation is
specified using homogeneous coordinates. In this case, any
value passed to ``offset`` is ignored.
- ``(ndim, ndim + 1)``: as above, but the bottom row of a
homogeneous transformation matrix is always
``[0, 0, ..., 1]``, and may be omitted.
offset (float or sequence): The offset into the array where the
transform is applied. If a float, ``offset`` is the same for each
axis. If a sequence, ``offset`` should contain one value for each
axis.
output_shape (tuple of ints): Shape tuple.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array.
order (int): The order of the spline interpolation. If it is not given,
order 1 is used. It is different from :mod:`scipy.ndimage` and can
change in the future. Currently it supports only order 0 and 1.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'`` or ``'opencv'``). Default is ``'constant'``.
cval (scalar): Value used for points outside the boundaries of
the input if ``mode='constant'`` or ``mode='opencv'``. Default is
0.0
prefilter (bool): It is not used yet. It just exists for compatibility
with :mod:`scipy.ndimage`.
Returns:
cupy.ndarray or None:
The transformed input. If ``output`` is given as a parameter,
``None`` is returned.
.. seealso:: :func:`scipy.ndimage.affine_transform`
"""
_check_parameter("affine_transform", order, mode)
offset = _util._fix_sequence_arg(offset, input.ndim, "offset", float)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError("no proper affine matrix provided")
if matrix.ndim == 2:
if matrix.shape[0] == matrix.shape[1] - 1:
offset = matrix[:, -1]
matrix = matrix[:, :-1]
elif matrix.shape[0] == input.ndim + 1:
offset = matrix[:-1, -1]
matrix = matrix[:-1, :-1]
if matrix.shape != (input.ndim, input.ndim):
raise RuntimeError("improper affine shape")
if mode == "opencv":
m = cupy.zeros((input.ndim + 1, input.ndim + 1))
m[:-1, :-1] = matrix
m[:-1, -1] = offset
m[-1, -1] = 1
m = cupy.linalg.inv(m)
m[:2] = cupy.roll(m[:2], 1, axis=0)
m[:2, :2] = cupy.roll(m[:2, :2], 1, axis=1)
matrix = m[:-1, :-1]
offset = m[:-1, -1]
if output_shape is None:
output_shape = input.shape
matrix = matrix.astype(cupy.float64, copy=False)
if order is None:
order = 1
ndim = input.ndim
output = _util._get_output(output, input, shape=output_shape)
if input.dtype.kind in "iu":
input = input.astype(cupy.float32)
integer_output = output.dtype.kind in "iu"
large_int = max(_prod(input.shape), _prod(output_shape)) > 1 << 31
if matrix.ndim == 1:
offset = cupy.asarray(offset, dtype=cupy.float64)
offset = -offset / matrix
kern = _interp_kernels._get_zoom_shift_kernel(
ndim,
large_int,
output_shape,
mode,
cval=cval,
order=order,
integer_output=integer_output,
)
kern(input, offset, matrix, output)
else:
kern = _interp_kernels._get_affine_kernel(
ndim,
large_int,
output_shape,
mode,
cval=cval,
order=order,
integer_output=integer_output,
)
m = cupy.zeros((ndim, ndim + 1), dtype=cupy.float64)
m[:, :-1] = matrix
m[:, -1] = cupy.asarray(offset, dtype=cupy.float64)
kern(input, m, output)
return output
|
https://github.com/cupy/cupy/issues/4082
|
/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py:15: UserWarning: In the current feature the default order of shift is 1. It is different from scipy.ndimage and can change in the future.
warnings.warn('In the current feature the default order of {} is 1. '
Traceback (most recent call last):
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 516, in compile
nvrtc.compileProgram(self.ptr, options)
File "cupy_backends/cuda/libs/nvrtc.pyx", line 108, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 120, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 58, in cupy_backends.cuda.libs.nvrtc.check_status
cupy_backends.cuda.libs.nvrtc.NVRTCError: NVRTC_ERROR_COMPILATION (6)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py", line 379, in shift
kern(input, shift, output)
File "cupy/core/_kernel.pyx", line 821, in cupy.core._kernel.ElementwiseKernel.__call__
File "cupy/core/_kernel.pyx", line 846, in cupy.core._kernel.ElementwiseKernel._get_elementwise_kernel
File "cupy/_util.pyx", line 103, in cupy._util.memoize.decorator.ret
File "cupy/core/_kernel.pyx", line 639, in cupy.core._kernel._get_elementwise_kernel
File "cupy/core/_kernel.pyx", line 37, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/_kernel.pyx", line 60, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/core.pyx", line 1937, in cupy.core.core.compile_with_cache
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 335, in compile_with_cache
return _compile_with_cache_cuda(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 402, in _compile_with_cache_cuda
ptx, mapping = compile_using_nvrtc(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 173, in compile_using_nvrtc
return _compile(source, options, cu_path,
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 157, in _compile
ptx, mapping = prog.compile(options, log_stream)
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 527, in compile
raise CompileException(log, self.src, self.name, options,
cupy.cuda.compiler.CompileException: /tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(28): error: invalid type conversion
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "s" was declared but never referenced
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "t" was declared but never referenced
1 error detected in the compilation of "/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu".
|
cupy_backends.cuda.libs.nvrtc.NVRTCError
|
def shift(
input, shift, output=None, order=None, mode="constant", cval=0.0, prefilter=True
):
"""Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Args:
input (cupy.ndarray): The input array.
shift (float or sequence): The shift along the axes. If a float,
``shift`` is the same for each axis. If a sequence, ``shift``
should contain one value for each axis.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array.
order (int): The order of the spline interpolation. If it is not given,
order 1 is used. It is different from :mod:`scipy.ndimage` and can
change in the future. Currently it supports only order 0 and 1.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'`` or ``'opencv'``). Default is ``'constant'``.
cval (scalar): Value used for points outside the boundaries of
the input if ``mode='constant'`` or ``mode='opencv'``. Default is
0.0
prefilter (bool): It is not used yet. It just exists for compatibility
with :mod:`scipy.ndimage`.
Returns:
cupy.ndarray or None:
The shifted input.
.. seealso:: :func:`scipy.ndimage.shift`
"""
_check_parameter("shift", order, mode)
shift = _util._fix_sequence_arg(shift, input.ndim, "shift", float)
if mode == "opencv":
mode = "_opencv_edge"
output = affine_transform(
input,
cupy.ones(input.ndim, input.dtype),
cupy.negative(cupy.asarray(shift)),
None,
output,
order,
mode,
cval,
prefilter,
)
else:
if order is None:
order = 1
output = _util._get_output(output, input)
if input.dtype.kind in "iu":
input = input.astype(cupy.float32)
integer_output = output.dtype.kind in "iu"
_util._check_cval(mode, cval, integer_output)
large_int = _prod(input.shape) > 1 << 31
kern = _interp_kernels._get_shift_kernel(
input.ndim,
large_int,
input.shape,
mode,
cval=cval,
order=order,
integer_output=integer_output,
)
shift = cupy.asarray(shift, dtype=cupy.float64)
kern(input, shift, output)
return output
|
def shift(
input, shift, output=None, order=None, mode="constant", cval=0.0, prefilter=True
):
"""Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Args:
input (cupy.ndarray): The input array.
shift (float or sequence): The shift along the axes. If a float,
``shift`` is the same for each axis. If a sequence, ``shift``
should contain one value for each axis.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array.
order (int): The order of the spline interpolation. If it is not given,
order 1 is used. It is different from :mod:`scipy.ndimage` and can
change in the future. Currently it supports only order 0 and 1.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'`` or ``'opencv'``). Default is ``'constant'``.
cval (scalar): Value used for points outside the boundaries of
the input if ``mode='constant'`` or ``mode='opencv'``. Default is
0.0
prefilter (bool): It is not used yet. It just exists for compatibility
with :mod:`scipy.ndimage`.
Returns:
cupy.ndarray or None:
The shifted input.
.. seealso:: :func:`scipy.ndimage.shift`
"""
_check_parameter("shift", order, mode)
shift = _util._fix_sequence_arg(shift, input.ndim, "shift", float)
if mode == "opencv":
mode = "_opencv_edge"
output = affine_transform(
input,
cupy.ones(input.ndim, input.dtype),
cupy.negative(cupy.asarray(shift)),
None,
output,
order,
mode,
cval,
prefilter,
)
else:
if order is None:
order = 1
output = _util._get_output(output, input)
if input.dtype.kind in "iu":
input = input.astype(cupy.float32)
integer_output = output.dtype.kind in "iu"
large_int = _prod(input.shape) > 1 << 31
kern = _interp_kernels._get_shift_kernel(
input.ndim,
large_int,
input.shape,
mode,
cval=cval,
order=order,
integer_output=integer_output,
)
shift = cupy.asarray(shift, dtype=cupy.float64)
kern(input, shift, output)
return output
|
https://github.com/cupy/cupy/issues/4082
|
/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py:15: UserWarning: In the current feature the default order of shift is 1. It is different from scipy.ndimage and can change in the future.
warnings.warn('In the current feature the default order of {} is 1. '
Traceback (most recent call last):
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 516, in compile
nvrtc.compileProgram(self.ptr, options)
File "cupy_backends/cuda/libs/nvrtc.pyx", line 108, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 120, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 58, in cupy_backends.cuda.libs.nvrtc.check_status
cupy_backends.cuda.libs.nvrtc.NVRTCError: NVRTC_ERROR_COMPILATION (6)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py", line 379, in shift
kern(input, shift, output)
File "cupy/core/_kernel.pyx", line 821, in cupy.core._kernel.ElementwiseKernel.__call__
File "cupy/core/_kernel.pyx", line 846, in cupy.core._kernel.ElementwiseKernel._get_elementwise_kernel
File "cupy/_util.pyx", line 103, in cupy._util.memoize.decorator.ret
File "cupy/core/_kernel.pyx", line 639, in cupy.core._kernel._get_elementwise_kernel
File "cupy/core/_kernel.pyx", line 37, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/_kernel.pyx", line 60, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/core.pyx", line 1937, in cupy.core.core.compile_with_cache
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 335, in compile_with_cache
return _compile_with_cache_cuda(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 402, in _compile_with_cache_cuda
ptx, mapping = compile_using_nvrtc(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 173, in compile_using_nvrtc
return _compile(source, options, cu_path,
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 157, in _compile
ptx, mapping = prog.compile(options, log_stream)
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 527, in compile
raise CompileException(log, self.src, self.name, options,
cupy.cuda.compiler.CompileException: /tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(28): error: invalid type conversion
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "s" was declared but never referenced
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "t" was declared but never referenced
1 error detected in the compilation of "/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu".
|
cupy_backends.cuda.libs.nvrtc.NVRTCError
|
def zoom(
input, zoom, output=None, order=None, mode="constant", cval=0.0, prefilter=True
):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Args:
input (cupy.ndarray): The input array.
zoom (float or sequence): The zoom factor along the axes. If a float,
``zoom`` is the same for each axis. If a sequence, ``zoom`` should
contain one value for each axis.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array.
order (int): The order of the spline interpolation. If it is not given,
order 1 is used. It is different from :mod:`scipy.ndimage` and can
change in the future. Currently it supports only order 0 and 1.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'`` or ``'opencv'``). Default is ``'constant'``.
cval (scalar): Value used for points outside the boundaries of
the input if ``mode='constant'`` or ``mode='opencv'``. Default is
0.0
prefilter (bool): It is not used yet. It just exists for compatibility
with :mod:`scipy.ndimage`.
Returns:
cupy.ndarray or None:
The zoomed input.
.. seealso:: :func:`scipy.ndimage.zoom`
"""
_check_parameter("zoom", order, mode)
zoom = _util._fix_sequence_arg(zoom, input.ndim, "zoom", float)
output_shape = []
for s, z in zip(input.shape, zoom):
output_shape.append(int(round(s * z)))
output_shape = tuple(output_shape)
if mode == "opencv":
zoom = []
offset = []
for in_size, out_size in zip(input.shape, output_shape):
if out_size > 1:
zoom.append(float(in_size) / out_size)
offset.append((zoom[-1] - 1) / 2.0)
else:
zoom.append(0)
offset.append(0)
mode = "nearest"
output = affine_transform(
input,
cupy.asarray(zoom),
offset,
output_shape,
output,
order,
mode,
cval,
prefilter,
)
else:
if order is None:
order = 1
zoom = []
for in_size, out_size in zip(input.shape, output_shape):
if out_size > 1:
zoom.append(float(in_size - 1) / (out_size - 1))
else:
zoom.append(0)
output = _util._get_output(output, input, shape=output_shape)
if input.dtype.kind in "iu":
input = input.astype(cupy.float32)
integer_output = output.dtype.kind in "iu"
_util._check_cval(mode, cval, integer_output)
large_int = max(_prod(input.shape), _prod(output_shape)) > 1 << 31
kern = _interp_kernels._get_zoom_kernel(
input.ndim,
large_int,
output_shape,
mode,
order=order,
integer_output=integer_output,
)
zoom = cupy.asarray(zoom, dtype=cupy.float64)
kern(input, zoom, output)
return output
|
def zoom(
input, zoom, output=None, order=None, mode="constant", cval=0.0, prefilter=True
):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Args:
input (cupy.ndarray): The input array.
zoom (float or sequence): The zoom factor along the axes. If a float,
``zoom`` is the same for each axis. If a sequence, ``zoom`` should
contain one value for each axis.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array.
order (int): The order of the spline interpolation. If it is not given,
order 1 is used. It is different from :mod:`scipy.ndimage` and can
change in the future. Currently it supports only order 0 and 1.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'`` or ``'opencv'``). Default is ``'constant'``.
cval (scalar): Value used for points outside the boundaries of
the input if ``mode='constant'`` or ``mode='opencv'``. Default is
0.0
prefilter (bool): It is not used yet. It just exists for compatibility
with :mod:`scipy.ndimage`.
Returns:
cupy.ndarray or None:
The zoomed input.
.. seealso:: :func:`scipy.ndimage.zoom`
"""
_check_parameter("zoom", order, mode)
zoom = _util._fix_sequence_arg(zoom, input.ndim, "zoom", float)
output_shape = []
for s, z in zip(input.shape, zoom):
output_shape.append(int(round(s * z)))
output_shape = tuple(output_shape)
if mode == "opencv":
zoom = []
offset = []
for in_size, out_size in zip(input.shape, output_shape):
if out_size > 1:
zoom.append(float(in_size) / out_size)
offset.append((zoom[-1] - 1) / 2.0)
else:
zoom.append(0)
offset.append(0)
mode = "nearest"
output = affine_transform(
input,
cupy.asarray(zoom),
offset,
output_shape,
output,
order,
mode,
cval,
prefilter,
)
else:
if order is None:
order = 1
zoom = []
for in_size, out_size in zip(input.shape, output_shape):
if out_size > 1:
zoom.append(float(in_size - 1) / (out_size - 1))
else:
zoom.append(0)
output = _util._get_output(output, input, shape=output_shape)
if input.dtype.kind in "iu":
input = input.astype(cupy.float32)
integer_output = output.dtype.kind in "iu"
large_int = max(_prod(input.shape), _prod(output_shape)) > 1 << 31
kern = _interp_kernels._get_zoom_kernel(
input.ndim,
large_int,
output_shape,
mode,
order=order,
integer_output=integer_output,
)
zoom = cupy.asarray(zoom, dtype=cupy.float64)
kern(input, zoom, output)
return output
|
https://github.com/cupy/cupy/issues/4082
|
/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py:15: UserWarning: In the current feature the default order of shift is 1. It is different from scipy.ndimage and can change in the future.
warnings.warn('In the current feature the default order of {} is 1. '
Traceback (most recent call last):
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 516, in compile
nvrtc.compileProgram(self.ptr, options)
File "cupy_backends/cuda/libs/nvrtc.pyx", line 108, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 120, in cupy_backends.cuda.libs.nvrtc.compileProgram
File "cupy_backends/cuda/libs/nvrtc.pyx", line 58, in cupy_backends.cuda.libs.nvrtc.check_status
cupy_backends.cuda.libs.nvrtc.NVRTCError: NVRTC_ERROR_COMPILATION (6)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupyx/scipy/ndimage/interpolation.py", line 379, in shift
kern(input, shift, output)
File "cupy/core/_kernel.pyx", line 821, in cupy.core._kernel.ElementwiseKernel.__call__
File "cupy/core/_kernel.pyx", line 846, in cupy.core._kernel.ElementwiseKernel._get_elementwise_kernel
File "cupy/_util.pyx", line 103, in cupy._util.memoize.decorator.ret
File "cupy/core/_kernel.pyx", line 639, in cupy.core._kernel._get_elementwise_kernel
File "cupy/core/_kernel.pyx", line 37, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/_kernel.pyx", line 60, in cupy.core._kernel._get_simple_elementwise_kernel
File "cupy/core/core.pyx", line 1937, in cupy.core.core.compile_with_cache
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 335, in compile_with_cache
return _compile_with_cache_cuda(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 402, in _compile_with_cache_cuda
ptx, mapping = compile_using_nvrtc(
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 173, in compile_using_nvrtc
return _compile(source, options, cu_path,
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 157, in _compile
ptx, mapping = prog.compile(options, log_stream)
File "/home/croat/.local/share/virtualenvs/starmap-T47byR32/lib/python3.8/site-packages/cupy/cuda/compiler.py", line 527, in compile
raise CompileException(log, self.src, self.name, options,
cupy.cuda.compiler.CompileException: /tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(28): error: invalid type conversion
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "s" was declared but never referenced
/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu(20): warning: variable "t" was declared but never referenced
1 error detected in the compilation of "/tmp/tmpbp6idobr/064ead033e63783c4dfe62433771dbed_2.cubin.cu".
|
cupy_backends.cuda.libs.nvrtc.NVRTCError
|
def _exec_fft(
a,
direction,
value_type,
norm,
axis,
overwrite_x,
out_size=None,
out=None,
plan=None,
):
fft_type = _convert_fft_type(a.dtype, value_type)
if axis % a.ndim != a.ndim - 1:
a = a.swapaxes(axis, -1)
if a.base is not None or not a.flags.c_contiguous:
a = a.copy()
n = a.shape[-1]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
if out_size is None:
out_size = n
batch = a.size // n
curr_plan = cufft.get_current_plan()
if curr_plan is not None:
if plan is None:
plan = curr_plan
else:
raise RuntimeError(
"Use the cuFFT plan either as a context manager or as an argument."
)
if plan is None:
devices = None if not config.use_multi_gpus else config._devices
plan = cufft.Plan1d(out_size, fft_type, batch, devices=devices)
else:
# check plan validity
if not isinstance(plan, cufft.Plan1d):
raise ValueError("expected plan to have type cufft.Plan1d")
if fft_type != plan.fft_type:
raise ValueError("cuFFT plan dtype mismatch.")
if out_size != plan.nx:
raise ValueError(
"Target array size does not match the plan.", out_size, plan.nx
)
if batch != plan.batch:
raise ValueError("Batch size does not match the plan.")
if config.use_multi_gpus != plan._use_multi_gpus:
raise ValueError("Unclear if multiple GPUs are to be used or not.")
if overwrite_x and value_type == "C2C":
out = a
elif out is not None:
# verify that out has the expected shape and dtype
plan.check_output_array(a, out)
else:
out = plan.get_output_array(a)
if batch != 0:
plan.fft(a, out, direction)
sz = out.shape[-1]
if fft_type == cufft.CUFFT_R2C or fft_type == cufft.CUFFT_D2Z:
sz = n
if norm is None:
if direction == cufft.CUFFT_INVERSE:
out /= sz
else:
out /= math.sqrt(sz)
if axis % a.ndim != a.ndim - 1:
out = out.swapaxes(axis, -1)
return out
|
def _exec_fft(
a,
direction,
value_type,
norm,
axis,
overwrite_x,
out_size=None,
out=None,
plan=None,
):
fft_type = _convert_fft_type(a.dtype, value_type)
if axis % a.ndim != a.ndim - 1:
a = a.swapaxes(axis, -1)
if a.base is not None or not a.flags.c_contiguous:
a = a.copy()
if out_size is None:
out_size = a.shape[-1]
batch = a.size // a.shape[-1]
curr_plan = cufft.get_current_plan()
if curr_plan is not None:
if plan is None:
plan = curr_plan
else:
raise RuntimeError(
"Use the cuFFT plan either as a context manager or as an argument."
)
if plan is None:
devices = None if not config.use_multi_gpus else config._devices
plan = cufft.Plan1d(out_size, fft_type, batch, devices=devices)
else:
# check plan validity
if not isinstance(plan, cufft.Plan1d):
raise ValueError("expected plan to have type cufft.Plan1d")
if fft_type != plan.fft_type:
raise ValueError("cuFFT plan dtype mismatch.")
if out_size != plan.nx:
raise ValueError(
"Target array size does not match the plan.", out_size, plan.nx
)
if batch != plan.batch:
raise ValueError("Batch size does not match the plan.")
if config.use_multi_gpus != plan._use_multi_gpus:
raise ValueError("Unclear if multiple GPUs are to be used or not.")
if overwrite_x and value_type == "C2C":
out = a
elif out is not None:
# verify that out has the expected shape and dtype
plan.check_output_array(a, out)
else:
out = plan.get_output_array(a)
plan.fft(a, out, direction)
sz = out.shape[-1]
if fft_type == cufft.CUFFT_R2C or fft_type == cufft.CUFFT_D2Z:
sz = a.shape[-1]
if norm is None:
if direction == cufft.CUFFT_INVERSE:
out /= sz
else:
out /= math.sqrt(sz)
if axis % a.ndim != a.ndim - 1:
out = out.swapaxes(axis, -1)
return out
|
https://github.com/cupy/cupy/issues/3241
|
np.fft.fft(np.array([]))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<__array_function__ internals>", line 5, in fft
File "/path/to/python3.8/site-packages/numpy-1.18.2-py3.8-linux-x86_64.egg/numpy/fft/_pocketfft.py", line 188, in fft
output = _raw_fft(a, n, axis, False, True, inv_norm)
File "/path/to/python3.8/site-packages/numpy-1.18.2-py3.8-linux-x86_64.egg/numpy/fft/_pocketfft.py", line 57, in _raw_fft
raise ValueError("Invalid number of FFT data points (%d) specified."
ValueError: Invalid number of FFT data points (0) specified.
cupy.fft.fft(cupy.array([]))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/cupy/cupy/fft/fft.py", line 496, in fft
return _fft(a, (n,), (axis,), norm, cupy.cuda.cufft.CUFFT_FORWARD)
File "/path/to/cupy/cupy/fft/fft.py", line 175, in _fft
a = _fft_c2c(a, direction, norm, axes, overwrite_x, plan=plan)
File "/path/to/cupy/cupy/fft/fft.py", line 142, in _fft_c2c
a = _exec_fft(a, direction, 'C2C', norm, axis, overwrite_x, plan=plan)
File "/path/to/cupy/cupy/fft/fft.py", line 91, in _exec_fft
batch = a.size // a.shape[-1]
ZeroDivisionError: integer division or modulo by zero
cupy.fft.fft(cupy.array([]), n=0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/cupy/cupy/fft/fft.py", line 496, in fft
return _fft(a, (n,), (axis,), norm, cupy.cuda.cufft.CUFFT_FORWARD)
File "/path/to/cupy/cupy/fft/fft.py", line 155, in _fft
raise ValueError(
ValueError: Invalid number of FFT data points (0) specified.
|
ValueError
|
def _fft(a, s, axes, norm, direction, value_type="C2C", overwrite_x=False, plan=None):
if norm not in (None, "ortho"):
raise ValueError('Invalid norm value %s, should be None or "ortho".' % norm)
if (s is not None) and (axes is not None) and len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if axes is None:
if s is None:
dim = a.ndim
else:
dim = len(s)
axes = [i for i in range(-dim, 0)]
else:
axes = tuple(axes)
if not axes:
if value_type == "C2C":
return a
else:
raise IndexError("list index out of range")
a = _convert_dtype(a, value_type)
a = _cook_shape(a, s, axes, value_type)
if value_type == "C2C":
a = _fft_c2c(a, direction, norm, axes, overwrite_x, plan=plan)
elif value_type == "R2C":
a = _exec_fft(a, direction, value_type, norm, axes[-1], overwrite_x)
a = _fft_c2c(a, direction, norm, axes[:-1], overwrite_x)
else: # C2R
a = _fft_c2c(a, direction, norm, axes[:-1], overwrite_x)
# _cook_shape tells us input shape only, and no output shape
out_size = _get_fftn_out_size(a.shape, s, axes[-1], value_type)
a = _exec_fft(a, direction, value_type, norm, axes[-1], overwrite_x, out_size)
return a
|
def _fft(a, s, axes, norm, direction, value_type="C2C", overwrite_x=False, plan=None):
if norm not in (None, "ortho"):
raise ValueError('Invalid norm value %s, should be None or "ortho".' % norm)
if s is not None:
for n in s:
if (n is not None) and (n < 1):
raise ValueError(
"Invalid number of FFT data points (%d) specified." % n
)
if (s is not None) and (axes is not None) and len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if axes is None:
if s is None:
dim = a.ndim
else:
dim = len(s)
axes = [i for i in range(-dim, 0)]
else:
axes = tuple(axes)
if not axes:
if value_type == "C2C":
return a
else:
raise IndexError("list index out of range")
a = _convert_dtype(a, value_type)
a = _cook_shape(a, s, axes, value_type)
if value_type == "C2C":
a = _fft_c2c(a, direction, norm, axes, overwrite_x, plan=plan)
elif value_type == "R2C":
a = _exec_fft(a, direction, value_type, norm, axes[-1], overwrite_x)
a = _fft_c2c(a, direction, norm, axes[:-1], overwrite_x)
else: # C2R
a = _fft_c2c(a, direction, norm, axes[:-1], overwrite_x)
# _cook_shape tells us input shape only, and no output shape
out_size = _get_fftn_out_size(a.shape, s, axes[-1], value_type)
a = _exec_fft(a, direction, value_type, norm, axes[-1], overwrite_x, out_size)
return a
|
https://github.com/cupy/cupy/issues/3241
|
np.fft.fft(np.array([]))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<__array_function__ internals>", line 5, in fft
File "/path/to/python3.8/site-packages/numpy-1.18.2-py3.8-linux-x86_64.egg/numpy/fft/_pocketfft.py", line 188, in fft
output = _raw_fft(a, n, axis, False, True, inv_norm)
File "/path/to/python3.8/site-packages/numpy-1.18.2-py3.8-linux-x86_64.egg/numpy/fft/_pocketfft.py", line 57, in _raw_fft
raise ValueError("Invalid number of FFT data points (%d) specified."
ValueError: Invalid number of FFT data points (0) specified.
cupy.fft.fft(cupy.array([]))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/cupy/cupy/fft/fft.py", line 496, in fft
return _fft(a, (n,), (axis,), norm, cupy.cuda.cufft.CUFFT_FORWARD)
File "/path/to/cupy/cupy/fft/fft.py", line 175, in _fft
a = _fft_c2c(a, direction, norm, axes, overwrite_x, plan=plan)
File "/path/to/cupy/cupy/fft/fft.py", line 142, in _fft_c2c
a = _exec_fft(a, direction, 'C2C', norm, axis, overwrite_x, plan=plan)
File "/path/to/cupy/cupy/fft/fft.py", line 91, in _exec_fft
batch = a.size // a.shape[-1]
ZeroDivisionError: integer division or modulo by zero
cupy.fft.fft(cupy.array([]), n=0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/cupy/cupy/fft/fft.py", line 496, in fft
return _fft(a, (n,), (axis,), norm, cupy.cuda.cufft.CUFFT_FORWARD)
File "/path/to/cupy/cupy/fft/fft.py", line 155, in _fft
raise ValueError(
ValueError: Invalid number of FFT data points (0) specified.
|
ValueError
|
def _get_cufft_plan_nd(shape, fft_type, axes=None, order="C", out_size=None):
"""Generate a CUDA FFT plan for transforming up to three axes.
Args:
shape (tuple of int): The shape of the array to transform
fft_type (int): The FFT type to perform. Supported values are:
`cufft.CUFFT_C2C`, `cufft.CUFFT_C2R`, `cufft.CUFFT_R2C`,
`cufft.CUFFT_Z2Z`, `cufft.CUFFT_Z2D`, and `cufft.CUFFT_D2Z`.
axes (None or int or tuple of int): The axes of the array to
transform. Currently, these must be a set of up to three adjacent
axes and must include either the first or the last axis of the
array. If `None`, it is assumed that all axes are transformed.
order ({'C', 'F'}): Specify whether the data to be transformed has C or
Fortran ordered data layout.
out_size (int): The output length along the last axis for R2C/C2R FFTs.
For C2C FFT, this is ignored (and set to `None`).
Returns:
plan (cufft.PlanNd): A cuFFT Plan for the chosen `fft_type`.
"""
ndim = len(shape)
if fft_type in (cufft.CUFFT_C2C, cufft.CUFFT_Z2Z):
value_type = "C2C"
elif fft_type in (cufft.CUFFT_C2R, cufft.CUFFT_Z2D):
value_type = "C2R"
else: # CUFFT_R2C or CUFFT_D2Z
value_type = "R2C"
if axes is None:
# transform over all axes
fft_axes = tuple(range(ndim))
else:
_, fft_axes = _prep_fftn_axes(ndim, s=None, axes=axes, value_type=value_type)
if not _nd_plan_is_possible(fft_axes, ndim):
raise ValueError(
"An n-dimensional cuFFT plan could not be created. The axes must "
"be contiguous and non-repeating. Between one and three axes can "
"be transformed and either the first or last axis must be "
"included in axes."
)
if order not in ["C", "F"]:
raise ValueError("order must be 'C' or 'F'")
"""
For full details on idist, istride, iembed, etc. see:
http://docs.nvidia.com/cuda/cufft/index.html#advanced-data-layout
in 1D:
input[b * idist + x * istride]
output[b * odist + x * ostride]
in 2D:
input[b * idist + (x * inembed[1] + y) * istride]
output[b * odist + (x * onembed[1] + y) * ostride]
in 3D:
input[b * idist + ((x * inembed[1] + y) * inembed[2] + z) * istride]
output[b * odist + ((x * onembed[1] + y) * onembed[2] + z) * ostride]
"""
# At this point, _default_fft_func() guarantees that for F-order arrays
# we only need to consider C2C, and not C2R or R2C.
# TODO(leofang): figure out if we really have to skip F-order?
in_dimensions = [shape[d] for d in fft_axes]
if order == "F":
in_dimensions = in_dimensions[::-1]
in_dimensions = tuple(in_dimensions)
if fft_type in (cufft.CUFFT_C2C, cufft.CUFFT_Z2Z):
out_dimensions = in_dimensions
plan_dimensions = in_dimensions
else:
out_dimensions = list(in_dimensions)
if out_size is not None: # for C2R & R2C
out_dimensions[-1] = out_size # only valid for C order!
out_dimensions = tuple(out_dimensions)
if fft_type in (cufft.CUFFT_R2C, cufft.CUFFT_D2Z):
plan_dimensions = in_dimensions
else: # CUFFT_C2R or CUFFT_Z2D
plan_dimensions = out_dimensions
inembed = in_dimensions
onembed = out_dimensions
if fft_axes == tuple(range(ndim)):
# tranfsorm over all axes
nbatch = 1
idist = odist = 1 # doesn't matter since nbatch = 1
istride = ostride = 1
else:
# batch along the first or the last axis
if 0 not in fft_axes:
# don't FFT along the first min_axis_fft axes
min_axis_fft = _reduce(min, fft_axes)
nbatch = _prod(shape[:min_axis_fft])
if order == "C":
# C-ordered GPU array with batch along first dim
idist = _prod(in_dimensions)
odist = _prod(out_dimensions)
istride = 1
ostride = 1
elif order == "F":
# F-ordered GPU array with batch along first dim
idist = 1
odist = 1
istride = nbatch
ostride = nbatch
elif (ndim - 1) not in fft_axes:
# don't FFT along the last axis
num_axes_batch = ndim - len(fft_axes)
nbatch = _prod(shape[-num_axes_batch:])
if order == "C":
# C-ordered GPU array with batch along last dim
idist = 1
odist = 1
istride = nbatch
ostride = nbatch
elif order == "F":
# F-ordered GPU array with batch along last dim
idist = _prod(in_dimensions)
odist = _prod(out_dimensions)
istride = 1
ostride = 1
else:
raise ValueError(
"General subsets of FFT axes not currently supported for "
"GPU case (Can only batch FFT over the first or last "
"spatial axes)."
)
for n in plan_dimensions:
if n < 1:
raise ValueError("Invalid number of FFT data points specified.")
plan = cufft.PlanNd(
shape=plan_dimensions,
inembed=inembed,
istride=istride,
idist=idist,
onembed=onembed,
ostride=ostride,
odist=odist,
fft_type=fft_type,
batch=nbatch,
order=order,
last_axis=fft_axes[-1],
last_size=out_size,
)
return plan
|
def _get_cufft_plan_nd(shape, fft_type, axes=None, order="C", out_size=None):
"""Generate a CUDA FFT plan for transforming up to three axes.
Args:
shape (tuple of int): The shape of the array to transform
fft_type (int): The FFT type to perform. Supported values are:
`cufft.CUFFT_C2C`, `cufft.CUFFT_C2R`, `cufft.CUFFT_R2C`,
`cufft.CUFFT_Z2Z`, `cufft.CUFFT_Z2D`, and `cufft.CUFFT_D2Z`.
axes (None or int or tuple of int): The axes of the array to
transform. Currently, these must be a set of up to three adjacent
axes and must include either the first or the last axis of the
array. If `None`, it is assumed that all axes are transformed.
order ({'C', 'F'}): Specify whether the data to be transformed has C or
Fortran ordered data layout.
out_size (int): The output length along the last axis for R2C/C2R FFTs.
For C2C FFT, this is ignored (and set to `None`).
Returns:
plan (cufft.PlanNd): A cuFFT Plan for the chosen `fft_type`.
"""
ndim = len(shape)
if fft_type in (cufft.CUFFT_C2C, cufft.CUFFT_Z2Z):
value_type = "C2C"
elif fft_type in (cufft.CUFFT_C2R, cufft.CUFFT_Z2D):
value_type = "C2R"
else: # CUFFT_R2C or CUFFT_D2Z
value_type = "R2C"
if axes is None:
# transform over all axes
fft_axes = tuple(range(ndim))
else:
_, fft_axes = _prep_fftn_axes(ndim, s=None, axes=axes, value_type=value_type)
if not _nd_plan_is_possible(fft_axes, ndim):
raise ValueError(
"An n-dimensional cuFFT plan could not be created. The axes must "
"be contiguous and non-repeating. Between one and three axes can "
"be transformed and either the first or last axis must be "
"included in axes."
)
if order not in ["C", "F"]:
raise ValueError("order must be 'C' or 'F'")
"""
For full details on idist, istride, iembed, etc. see:
http://docs.nvidia.com/cuda/cufft/index.html#advanced-data-layout
in 1D:
input[b * idist + x * istride]
output[b * odist + x * ostride]
in 2D:
input[b * idist + (x * inembed[1] + y) * istride]
output[b * odist + (x * onembed[1] + y) * ostride]
in 3D:
input[b * idist + ((x * inembed[1] + y) * inembed[2] + z) * istride]
output[b * odist + ((x * onembed[1] + y) * onembed[2] + z) * ostride]
"""
# At this point, _default_fft_func() guarantees that for F-order arrays
# we only need to consider C2C, and not C2R or R2C.
# TODO(leofang): figure out if we really have to skip F-order?
in_dimensions = [shape[d] for d in fft_axes]
if order == "F":
in_dimensions = in_dimensions[::-1]
in_dimensions = tuple(in_dimensions)
if fft_type in (cufft.CUFFT_C2C, cufft.CUFFT_Z2Z):
out_dimensions = in_dimensions
plan_dimensions = in_dimensions
else:
out_dimensions = list(in_dimensions)
if out_size is not None: # for C2R & R2C
out_dimensions[-1] = out_size # only valid for C order!
out_dimensions = tuple(out_dimensions)
if fft_type in (cufft.CUFFT_R2C, cufft.CUFFT_D2Z):
plan_dimensions = in_dimensions
else: # CUFFT_C2R or CUFFT_Z2D
plan_dimensions = out_dimensions
inembed = in_dimensions
onembed = out_dimensions
if fft_axes == tuple(range(ndim)):
# tranfsorm over all axes
nbatch = 1
idist = odist = 1 # doesn't matter since nbatch = 1
istride = ostride = 1
else:
# batch along the first or the last axis
if 0 not in fft_axes:
# don't FFT along the first min_axis_fft axes
min_axis_fft = _reduce(min, fft_axes)
nbatch = _prod(shape[:min_axis_fft])
if order == "C":
# C-ordered GPU array with batch along first dim
idist = _prod(in_dimensions)
odist = _prod(out_dimensions)
istride = 1
ostride = 1
elif order == "F":
# F-ordered GPU array with batch along first dim
idist = 1
odist = 1
istride = nbatch
ostride = nbatch
elif (ndim - 1) not in fft_axes:
# don't FFT along the last axis
num_axes_batch = ndim - len(fft_axes)
nbatch = _prod(shape[-num_axes_batch:])
if order == "C":
# C-ordered GPU array with batch along last dim
idist = 1
odist = 1
istride = nbatch
ostride = nbatch
elif order == "F":
# F-ordered GPU array with batch along last dim
idist = _prod(in_dimensions)
odist = _prod(out_dimensions)
istride = 1
ostride = 1
else:
raise ValueError(
"General subsets of FFT axes not currently supported for "
"GPU case (Can only batch FFT over the first or last "
"spatial axes)."
)
plan = cufft.PlanNd(
shape=plan_dimensions,
inembed=inembed,
istride=istride,
idist=idist,
onembed=onembed,
ostride=ostride,
odist=odist,
fft_type=fft_type,
batch=nbatch,
order=order,
last_axis=fft_axes[-1],
last_size=out_size,
)
return plan
|
https://github.com/cupy/cupy/issues/3241
|
np.fft.fft(np.array([]))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<__array_function__ internals>", line 5, in fft
File "/path/to/python3.8/site-packages/numpy-1.18.2-py3.8-linux-x86_64.egg/numpy/fft/_pocketfft.py", line 188, in fft
output = _raw_fft(a, n, axis, False, True, inv_norm)
File "/path/to/python3.8/site-packages/numpy-1.18.2-py3.8-linux-x86_64.egg/numpy/fft/_pocketfft.py", line 57, in _raw_fft
raise ValueError("Invalid number of FFT data points (%d) specified."
ValueError: Invalid number of FFT data points (0) specified.
cupy.fft.fft(cupy.array([]))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/cupy/cupy/fft/fft.py", line 496, in fft
return _fft(a, (n,), (axis,), norm, cupy.cuda.cufft.CUFFT_FORWARD)
File "/path/to/cupy/cupy/fft/fft.py", line 175, in _fft
a = _fft_c2c(a, direction, norm, axes, overwrite_x, plan=plan)
File "/path/to/cupy/cupy/fft/fft.py", line 142, in _fft_c2c
a = _exec_fft(a, direction, 'C2C', norm, axis, overwrite_x, plan=plan)
File "/path/to/cupy/cupy/fft/fft.py", line 91, in _exec_fft
batch = a.size // a.shape[-1]
ZeroDivisionError: integer division or modulo by zero
cupy.fft.fft(cupy.array([]), n=0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/cupy/cupy/fft/fft.py", line 496, in fft
return _fft(a, (n,), (axis,), norm, cupy.cuda.cufft.CUFFT_FORWARD)
File "/path/to/cupy/cupy/fft/fft.py", line 155, in _fft
raise ValueError(
ValueError: Invalid number of FFT data points (0) specified.
|
ValueError
|
def _exec_fftn(
a,
direction,
value_type,
norm,
axes,
overwrite_x,
plan=None,
out=None,
out_size=None,
):
fft_type = _convert_fft_type(a.dtype, value_type)
if a.flags.c_contiguous:
order = "C"
elif a.flags.f_contiguous:
order = "F"
else:
raise ValueError("a must be contiguous")
curr_plan = cufft.get_current_plan()
if curr_plan is not None:
plan = curr_plan
# don't check repeated usage; it's done in _default_fft_func()
if plan is None:
# generate a plan
plan = _get_cufft_plan_nd(
a.shape, fft_type, axes=axes, order=order, out_size=out_size
)
else:
if not isinstance(plan, cufft.PlanNd):
raise ValueError("expected plan to have type cufft.PlanNd")
if order != plan.order:
raise ValueError(
"array orders mismatch (plan: {}, input: {})".format(plan.order, order)
)
if a.flags.c_contiguous:
expected_shape = [a.shape[ax] for ax in axes]
if value_type == "C2R":
expected_shape[-1] = out_size
else:
# plan.shape will be reversed for Fortran-ordered inputs
expected_shape = [a.shape[ax] for ax in axes[::-1]]
# TODO(leofang): modify the shape for C2R
expected_shape = tuple(expected_shape)
if expected_shape != plan.shape:
raise ValueError(
"The cuFFT plan and a.shape do not match: "
"plan.shape = {}, expected_shape={}, a.shape = {}".format(
plan.shape, expected_shape, a.shape
)
)
if fft_type != plan.fft_type:
raise ValueError("cuFFT plan dtype mismatch.")
if value_type != "C2C":
if axes[-1] != plan.last_axis:
raise ValueError("The last axis for R2C/C2R mismatch")
if out_size != plan.last_size:
raise ValueError("The size along the last R2C/C2R axis mismatch")
# TODO(leofang): support in-place transform for R2C/C2R
if overwrite_x and value_type == "C2C":
out = a
elif out is None:
out = plan.get_output_array(a, order=order)
else:
plan.check_output_array(a, out)
if out.size != 0:
plan.fft(a, out, direction)
# normalize by the product of the shape along the transformed axes
arr = a if fft_type in (cufft.CUFFT_R2C, cufft.CUFFT_D2Z) else out
sz = _prod([arr.shape[ax] for ax in axes])
if norm is None:
if direction == cufft.CUFFT_INVERSE:
out /= sz
else:
out /= math.sqrt(sz)
return out
|
def _exec_fftn(
a,
direction,
value_type,
norm,
axes,
overwrite_x,
plan=None,
out=None,
out_size=None,
):
fft_type = _convert_fft_type(a.dtype, value_type)
if a.flags.c_contiguous:
order = "C"
elif a.flags.f_contiguous:
order = "F"
else:
raise ValueError("a must be contiguous")
curr_plan = cufft.get_current_plan()
if curr_plan is not None:
plan = curr_plan
# don't check repeated usage; it's done in _default_fft_func()
if plan is None:
# generate a plan
plan = _get_cufft_plan_nd(
a.shape, fft_type, axes=axes, order=order, out_size=out_size
)
else:
if not isinstance(plan, cufft.PlanNd):
raise ValueError("expected plan to have type cufft.PlanNd")
if order != plan.order:
raise ValueError(
"array orders mismatch (plan: {}, input: {})".format(plan.order, order)
)
if a.flags.c_contiguous:
expected_shape = [a.shape[ax] for ax in axes]
if value_type == "C2R":
expected_shape[-1] = out_size
else:
# plan.shape will be reversed for Fortran-ordered inputs
expected_shape = [a.shape[ax] for ax in axes[::-1]]
# TODO(leofang): modify the shape for C2R
expected_shape = tuple(expected_shape)
if expected_shape != plan.shape:
raise ValueError(
"The cuFFT plan and a.shape do not match: "
"plan.shape = {}, expected_shape={}, a.shape = {}".format(
plan.shape, expected_shape, a.shape
)
)
if fft_type != plan.fft_type:
raise ValueError("cuFFT plan dtype mismatch.")
if value_type != "C2C":
if axes[-1] != plan.last_axis:
raise ValueError("The last axis for R2C/C2R mismatch")
if out_size != plan.last_size:
raise ValueError("The size along the last R2C/C2R axis mismatch")
# TODO(leofang): support in-place transform for R2C/C2R
if overwrite_x and value_type == "C2C":
out = a
elif out is None:
out = plan.get_output_array(a, order=order)
else:
plan.check_output_array(a, out)
plan.fft(a, out, direction)
# normalize by the product of the shape along the transformed axes
arr = a if fft_type in (cufft.CUFFT_R2C, cufft.CUFFT_D2Z) else out
sz = _prod([arr.shape[ax] for ax in axes])
if norm is None:
if direction == cufft.CUFFT_INVERSE:
out /= sz
else:
out /= math.sqrt(sz)
return out
|
https://github.com/cupy/cupy/issues/3241
|
np.fft.fft(np.array([]))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<__array_function__ internals>", line 5, in fft
File "/path/to/python3.8/site-packages/numpy-1.18.2-py3.8-linux-x86_64.egg/numpy/fft/_pocketfft.py", line 188, in fft
output = _raw_fft(a, n, axis, False, True, inv_norm)
File "/path/to/python3.8/site-packages/numpy-1.18.2-py3.8-linux-x86_64.egg/numpy/fft/_pocketfft.py", line 57, in _raw_fft
raise ValueError("Invalid number of FFT data points (%d) specified."
ValueError: Invalid number of FFT data points (0) specified.
cupy.fft.fft(cupy.array([]))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/cupy/cupy/fft/fft.py", line 496, in fft
return _fft(a, (n,), (axis,), norm, cupy.cuda.cufft.CUFFT_FORWARD)
File "/path/to/cupy/cupy/fft/fft.py", line 175, in _fft
a = _fft_c2c(a, direction, norm, axes, overwrite_x, plan=plan)
File "/path/to/cupy/cupy/fft/fft.py", line 142, in _fft_c2c
a = _exec_fft(a, direction, 'C2C', norm, axis, overwrite_x, plan=plan)
File "/path/to/cupy/cupy/fft/fft.py", line 91, in _exec_fft
batch = a.size // a.shape[-1]
ZeroDivisionError: integer division or modulo by zero
cupy.fft.fft(cupy.array([]), n=0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/cupy/cupy/fft/fft.py", line 496, in fft
return _fft(a, (n,), (axis,), norm, cupy.cuda.cufft.CUFFT_FORWARD)
File "/path/to/cupy/cupy/fft/fft.py", line 155, in _fft
raise ValueError(
ValueError: Invalid number of FFT data points (0) specified.
|
ValueError
|
def _fftn(
a,
s,
axes,
norm,
direction,
value_type="C2C",
order="A",
plan=None,
overwrite_x=False,
out=None,
):
if norm not in (None, "ortho"):
raise ValueError('Invalid norm value %s, should be None or "ortho".' % norm)
axes, axes_sorted = _prep_fftn_axes(a.ndim, s, axes, value_type)
if not axes_sorted:
if value_type == "C2C":
return a
else:
raise IndexError("list index out of range")
a = _convert_dtype(a, value_type)
if order == "A":
if a.flags.f_contiguous:
order = "F"
elif a.flags.c_contiguous:
order = "C"
else:
a = cupy.ascontiguousarray(a)
order = "C"
elif order not in ["C", "F"]:
raise ValueError("Unsupported order: {}".format(order))
# Note: need to call _cook_shape prior to sorting the axes
a = _cook_shape(a, s, axes, value_type, order=order)
for n in a.shape:
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
if order == "C" and not a.flags.c_contiguous:
a = cupy.ascontiguousarray(a)
elif order == "F" and not a.flags.f_contiguous:
a = cupy.asfortranarray(a)
# _cook_shape tells us input shape only, and not output shape
out_size = _get_fftn_out_size(a.shape, s, axes_sorted[-1], value_type)
a = _exec_fftn(
a,
direction,
value_type,
norm=norm,
axes=axes_sorted,
overwrite_x=overwrite_x,
plan=plan,
out=out,
out_size=out_size,
)
return a
|
def _fftn(
a,
s,
axes,
norm,
direction,
value_type="C2C",
order="A",
plan=None,
overwrite_x=False,
out=None,
):
if norm not in (None, "ortho"):
raise ValueError('Invalid norm value %s, should be None or "ortho".' % norm)
axes, axes_sorted = _prep_fftn_axes(a.ndim, s, axes, value_type)
if not axes_sorted:
if value_type == "C2C":
return a
else:
raise IndexError("list index out of range")
a = _convert_dtype(a, value_type)
if order == "A":
if a.flags.f_contiguous:
order = "F"
elif a.flags.c_contiguous:
order = "C"
else:
a = cupy.ascontiguousarray(a)
order = "C"
elif order not in ["C", "F"]:
raise ValueError("Unsupported order: {}".format(order))
# Note: need to call _cook_shape prior to sorting the axes
a = _cook_shape(a, s, axes, value_type, order=order)
if order == "C" and not a.flags.c_contiguous:
a = cupy.ascontiguousarray(a)
elif order == "F" and not a.flags.f_contiguous:
a = cupy.asfortranarray(a)
# _cook_shape tells us input shape only, and not output shape
out_size = _get_fftn_out_size(a.shape, s, axes_sorted[-1], value_type)
a = _exec_fftn(
a,
direction,
value_type,
norm=norm,
axes=axes_sorted,
overwrite_x=overwrite_x,
plan=plan,
out=out,
out_size=out_size,
)
return a
|
https://github.com/cupy/cupy/issues/3241
|
np.fft.fft(np.array([]))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<__array_function__ internals>", line 5, in fft
File "/path/to/python3.8/site-packages/numpy-1.18.2-py3.8-linux-x86_64.egg/numpy/fft/_pocketfft.py", line 188, in fft
output = _raw_fft(a, n, axis, False, True, inv_norm)
File "/path/to/python3.8/site-packages/numpy-1.18.2-py3.8-linux-x86_64.egg/numpy/fft/_pocketfft.py", line 57, in _raw_fft
raise ValueError("Invalid number of FFT data points (%d) specified."
ValueError: Invalid number of FFT data points (0) specified.
cupy.fft.fft(cupy.array([]))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/cupy/cupy/fft/fft.py", line 496, in fft
return _fft(a, (n,), (axis,), norm, cupy.cuda.cufft.CUFFT_FORWARD)
File "/path/to/cupy/cupy/fft/fft.py", line 175, in _fft
a = _fft_c2c(a, direction, norm, axes, overwrite_x, plan=plan)
File "/path/to/cupy/cupy/fft/fft.py", line 142, in _fft_c2c
a = _exec_fft(a, direction, 'C2C', norm, axis, overwrite_x, plan=plan)
File "/path/to/cupy/cupy/fft/fft.py", line 91, in _exec_fft
batch = a.size // a.shape[-1]
ZeroDivisionError: integer division or modulo by zero
cupy.fft.fft(cupy.array([]), n=0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/cupy/cupy/fft/fft.py", line 496, in fft
return _fft(a, (n,), (axis,), norm, cupy.cuda.cufft.CUFFT_FORWARD)
File "/path/to/cupy/cupy/fft/fft.py", line 155, in _fft
raise ValueError(
ValueError: Invalid number of FFT data points (0) specified.
|
ValueError
|
def _min_or_max(self, axis, out, min_or_max, sum_duplicates, non_zero):
if out is not None:
raise ValueError(("Sparse matrices do not support an 'out' parameter."))
util.validateaxis(axis)
if axis is None:
if 0 in self.shape:
raise ValueError("zero-size array to reduction operation")
zero = cupy.zeros((), dtype=self.dtype)
if self.nnz == 0:
return zero
if sum_duplicates:
self.sum_duplicates()
m = min_or_max(self.data)
if non_zero:
return m
if self.nnz != internal.prod(self.shape):
if min_or_max is cupy.min:
m = cupy.minimum(zero, m)
elif min_or_max is cupy.max:
m = cupy.maximum(zero, m)
else:
assert False
return m
if axis == 0 or axis == 1:
return self._min_or_max_axis(axis, min_or_max, sum_duplicates, non_zero)
else:
raise ValueError("axis out of range")
|
def _min_or_max(self, axis, out, min_or_max, sum_duplicates, non_zero):
if out is not None:
raise ValueError(("Sparse matrices do not support an 'out' parameter."))
util.validateaxis(axis)
if axis == 0 or axis == 1:
return self._min_or_max_axis(axis, min_or_max, sum_duplicates, non_zero)
else:
raise ValueError("axis out of range")
|
https://github.com/cupy/cupy/issues/3506
|
import cupy
import cupyx
m = cupy.random.rand(100).reshape(10, 10)
m[m < 0.95] = 0
m = cupyx.scipy.sparse.csr_matrix(m)
m.min()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ext-mtakagi/cupy/cupyx/scipy/sparse/data.py", line 284, in min
return self._min_or_max(axis, out, cupy.min, sum_duplicates, nonzero)
File "/home/ext-mtakagi/cupy/cupyx/scipy/sparse/data.py", line 163, in _min_or_max
axis, min_or_max, sum_duplicates, non_zero)
File "/home/ext-mtakagi/cupy/cupyx/scipy/sparse/data.py", line 139, in _min_or_max_axis
N = self.shape[axis]
TypeError: tuple indices must be integers or slices, not NoneType
|
TypeError
|
def __init__(self, arg1, shape=None, dtype=None, copy=False):
if _scipy_available and scipy.sparse.issparse(arg1):
x = arg1.todia()
data = x.data
offsets = x.offsets
shape = x.shape
dtype = x.dtype
copy = False
elif isinstance(arg1, tuple):
data, offsets = arg1
if shape is None:
raise ValueError("expected a shape argument")
else:
raise ValueError("unrecognized form for dia_matrix constructor")
data = cupy.array(data, dtype=dtype, copy=copy)
data = cupy.atleast_2d(data)
offsets = cupy.array(offsets, dtype="i", copy=copy)
offsets = cupy.atleast_1d(offsets)
if offsets.ndim != 1:
raise ValueError("offsets array must have rank 1")
if data.ndim != 2:
raise ValueError("data array must have rank 2")
if data.shape[0] != len(offsets):
raise ValueError(
"number of diagonals (%d) does not match the number of "
"offsets (%d)" % (data.shape[0], len(offsets))
)
sorted_offsets = cupy.sort(offsets)
if (sorted_offsets[:-1] == sorted_offsets[1:]).any():
raise ValueError("offset array contains duplicate values")
self.data = data
self.offsets = offsets
if not util.isshape(shape):
raise ValueError("invalid shape (must be a 2-tuple of int)")
self._shape = int(shape[0]), int(shape[1])
|
def __init__(self, arg1, shape=None, dtype=None, copy=False):
if isinstance(arg1, tuple):
data, offsets = arg1
if shape is None:
raise ValueError("expected a shape argument")
else:
raise ValueError("unrecognized form for dia_matrix constructor")
data = cupy.array(data, dtype=dtype, copy=copy)
data = cupy.atleast_2d(data)
offsets = cupy.array(offsets, dtype="i", copy=copy)
offsets = cupy.atleast_1d(offsets)
if offsets.ndim != 1:
raise ValueError("offsets array must have rank 1")
if data.ndim != 2:
raise ValueError("data array must have rank 2")
if data.shape[0] != len(offsets):
raise ValueError(
"number of diagonals (%d) does not match the number of "
"offsets (%d)" % (data.shape[0], len(offsets))
)
sorted_offsets = cupy.sort(offsets)
if (sorted_offsets[:-1] == sorted_offsets[1:]).any():
raise ValueError("offset array contains duplicate values")
self.data = data
self.offsets = offsets
if not util.isshape(shape):
raise ValueError("invalid shape (must be a 2-tuple of int)")
self._shape = int(shape[0]), int(shape[1])
|
https://github.com/cupy/cupy/issues/3158
|
In [1]: import numpy
In [2]: import scipy.sparse
In [3]: import cupy.sparse
In [4]: a_host = numpy.array([[0, 1, 0],
...: [2, 0, 3],
...: [0, 4, 0]], dtype=float)
In [5]: asp_host = scipy.sparse.dia_matrix(a_host)
In [6]: asp_dev = cupy.sparse.dia_matrix(asp_host)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-6-8498d6de7c98> in <module>
----> 1 asp_dev = cupy.sparse.dia_matrix(asp_host)
/datasets/jkirkham/miniconda/envs/rapids13dev/lib/python3.6/site-packages/cupyx/scipy/sparse/dia.py in __init__(self, arg1, shape, dtype, copy)
41 else:
42 raise ValueError(
---> 43 'unrecognized form for dia_matrix constructor')
44
45 data = cupy.array(data, dtype=dtype, copy=copy)
ValueError: unrecognized form for dia_matrix constructor
|
ValueError
|
def norm(x, ord=None, axis=None, keepdims=False):
"""Returns one of matrix norms specified by ``ord`` parameter.
See numpy.linalg.norm for more detail.
Args:
x (cupy.ndarray): Array to take norm. If ``axis`` is None,
``x`` must be 1-D or 2-D.
ord (non-zero int, inf, -inf, 'fro'): Norm type.
axis (int, 2-tuple of ints, None): 1-D or 2-D norm is cumputed over
``axis``.
keepdims (bool): If this is set ``True``, the axes which are normed
over are left.
Returns:
cupy.ndarray
"""
if not issubclass(x.dtype.type, numpy.inexact):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if (
ord is None
or (ndim == 1 and ord == 2)
or (ndim == 2 and ord in ("f", "fro"))
):
if x.dtype.kind == "c":
s = abs(x.ravel())
s *= s
ret = cupy.sqrt(s.sum())
else:
ret = cupy.sqrt((x * x).sum())
if keepdims:
ret = ret.reshape((1,) * ndim)
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except Exception:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == numpy.Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -numpy.Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
# Convert to Python float in accordance with NumPy
return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return abs(x).sum(axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
if x.dtype.kind == "c":
s = abs(x)
s *= s
else:
s = x * x
return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))
else:
try:
float(ord)
except TypeError:
raise ValueError("Invalid norm order for vectors.")
absx = abs(x)
absx **= ord
ret = absx.sum(axis=axis, keepdims=keepdims)
ret **= cupy.reciprocal(ord, dtype=ret.dtype)
return ret
elif len(axis) == 2:
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if not (0 <= row_axis < nd and 0 <= col_axis < nd):
raise ValueError(
"Invalid axis %r for an array with shape %r" % (axis, x.shape)
)
if row_axis == col_axis:
raise ValueError("Duplicate axes given.")
if ord == 2:
op_max = functools.partial(cupy.take, indices=0)
ret = _multi_svd_norm(x, row_axis, col_axis, op_max)
elif ord == -2:
op_min = functools.partial(cupy.take, indices=-1)
ret = _multi_svd_norm(x, row_axis, col_axis, op_min)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = abs(x).sum(axis=row_axis).max(axis=col_axis)
elif ord == numpy.Inf:
if row_axis > col_axis:
row_axis -= 1
ret = abs(x).sum(axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = abs(x).sum(axis=row_axis).min(axis=col_axis)
elif ord == -numpy.Inf:
if row_axis > col_axis:
row_axis -= 1
ret = abs(x).sum(axis=col_axis).min(axis=row_axis)
elif ord in [None, "fro", "f"]:
if x.dtype.kind == "c":
s = abs(x)
s *= s
ret = cupy.sqrt(s.sum(axis=axis))
else:
ret = cupy.sqrt((x * x).sum(axis=axis))
elif ord == "nuc":
ret = _multi_svd_norm(x, row_axis, col_axis, cupy.sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
|
def norm(x, ord=None, axis=None, keepdims=False):
"""Returns one of matrix norms specified by ``ord`` parameter.
See numpy.linalg.norm for more detail.
Args:
x (cupy.ndarray): Array to take norm. If ``axis`` is None,
``x`` must be 1-D or 2-D.
ord (non-zero int, inf, -inf, 'fro'): Norm type.
axis (int, 2-tuple of ints, None): 1-D or 2-D norm is cumputed over
``axis``.
keepdims (bool): If this is set ``True``, the axes which are normed
over are left.
Returns:
cupy.ndarray
"""
if not issubclass(x.dtype.type, numpy.inexact):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if (
ord is None
or (ndim == 1 and ord == 2)
or (ndim == 2 and ord in ("f", "fro"))
):
if x.dtype.kind == "c":
s = abs(x.ravel())
s *= s
ret = cupy.sqrt(s.sum())
else:
ret = cupy.sqrt((x * x).sum())
if keepdims:
ret = ret.reshape((1,) * ndim)
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except Exception:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == numpy.Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -numpy.Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
# Convert to Python float in accordance with NumPy
return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return abs(x).sum(axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
if x.dtype.kind == "c":
s = abs(x)
s *= s
else:
s = x * x
return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))
else:
try:
float(ord)
except TypeError:
raise ValueError("Invalid norm order for vectors.")
absx = abs(x)
absx **= ord
ret = absx.sum(axis=axis, keepdims=keepdims)
ret **= cupy.reciprocal(ord, dtype=ret.dtype)
return ret
elif len(axis) == 2:
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if not (0 <= row_axis < nd and 0 <= col_axis < nd):
raise ValueError(
"Invalid axis %r for an array with shape %r" % (axis, x.shape)
)
if row_axis == col_axis:
raise ValueError("Duplicate axes given.")
if ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = abs(x).sum(axis=row_axis).max(axis=col_axis)
elif ord == numpy.Inf:
if row_axis > col_axis:
row_axis -= 1
ret = abs(x).sum(axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = abs(x).sum(axis=row_axis).min(axis=col_axis)
elif ord == -numpy.Inf:
if row_axis > col_axis:
row_axis -= 1
ret = abs(x).sum(axis=col_axis).min(axis=row_axis)
elif ord in [None, "fro", "f"]:
if x.dtype.kind == "c":
s = abs(x)
s *= s
ret = cupy.sqrt(s.sum(axis=axis))
else:
ret = cupy.sqrt((x * x).sum(axis=axis))
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
|
https://github.com/cupy/cupy/issues/3053
|
import numpy as np
a = [[2, 0, 1], [-1, 1, 0], [-3, 3, 0]]
a = np.asarray(a, dtype=np.float64)
np.linalg.norm(a, ord=2)
4.723421263784789
import cupy as cp
b = cp.asarray(a)
cp.linalg.norm(b, ord=2)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/leofang/test/cupy2/cupy/linalg/norms.py", line 124, in norm
raise ValueError('Invalid norm order for matrices.')
ValueError: Invalid norm order for matrices.
|
ValueError
|
def _check_cusolver_dev_info_if_synchronization_allowed(routine, dev_info):
# `dev_info` contains a single integer, the status code of a cuSOLVER
# routine call. It is referred to as "devInfo" in the official cuSOLVER
# documentation.
assert isinstance(dev_info, core.ndarray)
assert dev_info.size == 1
config_linalg = cupyx._ufunc_config.get_config_linalg()
# Only 'ignore' and 'raise' are currently supported.
if config_linalg == "ignore":
return
assert config_linalg == "raise"
dev_info_host = dev_info.item()
if dev_info_host != 0:
raise linalg.LinAlgError(
"Error reported by {} in cuSOLVER. devInfo = {}. Please refer"
" to the cuSOLVER documentation.".format(routine.__name__, dev_info_host)
)
|
def _check_cusolver_dev_info_if_synchronization_allowed(routine, dev_info):
# `dev_info` contains a single integer, the status code of a cuSOLVER
# routine call. It is referred to as "devInfo" in the official cuSOLVER
# documentation.
assert isinstance(dev_info, core.ndarray)
assert dev_info.size == 1
config_linalg = cupyx._ufunc_config.config.linalg
# Only 'ignore' and 'raise' are currently supported.
if config_linalg == "ignore":
return
assert config_linalg == "raise"
dev_info_host = dev_info.item()
if dev_info_host != 0:
raise linalg.LinAlgError(
"Error reported by {} in cuSOLVER. devInfo = {}. Please refer"
" to the cuSOLVER documentation.".format(routine.__name__, dev_info_host)
)
|
https://github.com/cupy/cupy/issues/2911
|
Traceback (most recent call last):
File "cpu-svd.py", line 10, in <module>
u.compute()
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/base.py", line 165, in compute
(result,) = compute(self, traverse=False, **kwargs)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/base.py", line 436, in compute
results = schedule(dsk, keys, **kwargs)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/threaded.py", line 81, in get
**kwargs
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py", line 486, in get_async
raise_exception(exc, tb)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py", line 316, in reraise
raise exc
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py", line 222, in execute_task
result = _execute_task(task, data)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py", line 119, in _execute_task
return func(*args2)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/optimization.py", line 982, in __call__
return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py", line 149, in get
result = _execute_task(task, cache)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py", line 119, in _execute_task
return func(*args2)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/array/linalg.py", line 48, in _wrapped_qr
return np.linalg.qr(a)
File "<__array_function__ internals>", line 6, in qr
File "cupy/core/core.pyx", line 1344, in cupy.core.core.ndarray.__array_function__
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/cupy/linalg/decomposition.py", line 248, in qr
geqrf, dev_info)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/cupy/linalg/util.py", line 42, in _check_cusolver_dev_info_if_synchronization_allowed
print(cupyx._ufunc_config.config.linalg)
AttributeError: '_thread._local' object has no attribute 'linalg'
|
AttributeError
|
def _check_cublas_info_array_if_synchronization_allowed(routine, info_array):
# `info_array` contains integers, the status codes of a cuBLAS routine
# call. It is referrd to as "infoArray" or "devInfoArray" in the official
# cuBLAS documentation.
assert isinstance(info_array, core.ndarray)
assert info_array.ndim == 1
config_linalg = cupyx._ufunc_config.get_config_linalg()
# Only 'ignore' and 'raise' are currently supported.
if config_linalg == "ignore":
return
assert config_linalg == "raise"
if (info_array != 0).any():
raise linalg.LinAlgError(
"Error reported by {} in cuBLAS. infoArray/devInfoArray = {}."
" Please refer to the cuBLAS documentation.".format(
routine.__name__, info_array
)
)
|
def _check_cublas_info_array_if_synchronization_allowed(routine, info_array):
# `info_array` contains integers, the status codes of a cuBLAS routine
# call. It is referrd to as "infoArray" or "devInfoArray" in the official
# cuBLAS documentation.
assert isinstance(info_array, core.ndarray)
assert info_array.ndim == 1
config_linalg = cupyx._ufunc_config.config.linalg
# Only 'ignore' and 'raise' are currently supported.
if config_linalg == "ignore":
return
assert config_linalg == "raise"
if (info_array != 0).any():
raise linalg.LinAlgError(
"Error reported by {} in cuBLAS. infoArray/devInfoArray = {}."
" Please refer to the cuBLAS documentation.".format(
routine.__name__, info_array
)
)
|
https://github.com/cupy/cupy/issues/2911
|
Traceback (most recent call last):
File "cpu-svd.py", line 10, in <module>
u.compute()
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/base.py", line 165, in compute
(result,) = compute(self, traverse=False, **kwargs)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/base.py", line 436, in compute
results = schedule(dsk, keys, **kwargs)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/threaded.py", line 81, in get
**kwargs
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py", line 486, in get_async
raise_exception(exc, tb)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py", line 316, in reraise
raise exc
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py", line 222, in execute_task
result = _execute_task(task, data)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py", line 119, in _execute_task
return func(*args2)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/optimization.py", line 982, in __call__
return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py", line 149, in get
result = _execute_task(task, cache)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py", line 119, in _execute_task
return func(*args2)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/array/linalg.py", line 48, in _wrapped_qr
return np.linalg.qr(a)
File "<__array_function__ internals>", line 6, in qr
File "cupy/core/core.pyx", line 1344, in cupy.core.core.ndarray.__array_function__
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/cupy/linalg/decomposition.py", line 248, in qr
geqrf, dev_info)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/cupy/linalg/util.py", line 42, in _check_cusolver_dev_info_if_synchronization_allowed
print(cupyx._ufunc_config.config.linalg)
AttributeError: '_thread._local' object has no attribute 'linalg'
|
AttributeError
|
def seterr(*, divide=None, over=None, under=None, invalid=None, linalg=None):
"""
TODO(hvy): Write docs.
"""
if divide is not None:
raise NotImplementedError()
if over is not None:
raise NotImplementedError()
if under is not None:
raise NotImplementedError()
if invalid is not None:
raise NotImplementedError()
if linalg is not None:
if linalg not in ("ignore", "raise"):
raise NotImplementedError()
old_state = geterr()
_config.divide = divide
_config.under = under
_config.over = over
_config.invalid = invalid
_config.linalg = linalg
return old_state
|
def seterr(*, divide=None, over=None, under=None, invalid=None, linalg=None):
"""
TODO(hvy): Write docs.
"""
if divide is not None:
raise NotImplementedError()
if over is not None:
raise NotImplementedError()
if under is not None:
raise NotImplementedError()
if invalid is not None:
raise NotImplementedError()
if linalg is not None:
if linalg not in ("ignore", "raise"):
raise NotImplementedError()
old_state = geterr()
config.divide = divide
config.under = under
config.over = over
config.invalid = invalid
config.linalg = linalg
return old_state
|
https://github.com/cupy/cupy/issues/2911
|
Traceback (most recent call last):
File "cpu-svd.py", line 10, in <module>
u.compute()
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/base.py", line 165, in compute
(result,) = compute(self, traverse=False, **kwargs)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/base.py", line 436, in compute
results = schedule(dsk, keys, **kwargs)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/threaded.py", line 81, in get
**kwargs
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py", line 486, in get_async
raise_exception(exc, tb)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py", line 316, in reraise
raise exc
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py", line 222, in execute_task
result = _execute_task(task, data)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py", line 119, in _execute_task
return func(*args2)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/optimization.py", line 982, in __call__
return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py", line 149, in get
result = _execute_task(task, cache)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py", line 119, in _execute_task
return func(*args2)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/array/linalg.py", line 48, in _wrapped_qr
return np.linalg.qr(a)
File "<__array_function__ internals>", line 6, in qr
File "cupy/core/core.pyx", line 1344, in cupy.core.core.ndarray.__array_function__
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/cupy/linalg/decomposition.py", line 248, in qr
geqrf, dev_info)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/cupy/linalg/util.py", line 42, in _check_cusolver_dev_info_if_synchronization_allowed
print(cupyx._ufunc_config.config.linalg)
AttributeError: '_thread._local' object has no attribute 'linalg'
|
AttributeError
|
def geterr():
"""
TODO(hvy): Write docs.
"""
return dict(
divide=get_config_divide(),
over=get_config_over(),
under=get_config_under(),
invalid=get_config_invalid(),
linalg=get_config_linalg(),
)
|
def geterr():
"""
TODO(hvy): Write docs.
"""
return dict(
divide=config.divide,
over=config.over,
under=config.under,
invalid=config.invalid,
linalg=config.linalg,
)
|
https://github.com/cupy/cupy/issues/2911
|
Traceback (most recent call last):
File "cpu-svd.py", line 10, in <module>
u.compute()
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/base.py", line 165, in compute
(result,) = compute(self, traverse=False, **kwargs)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/base.py", line 436, in compute
results = schedule(dsk, keys, **kwargs)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/threaded.py", line 81, in get
**kwargs
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py", line 486, in get_async
raise_exception(exc, tb)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py", line 316, in reraise
raise exc
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/local.py", line 222, in execute_task
result = _execute_task(task, data)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py", line 119, in _execute_task
return func(*args2)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/optimization.py", line 982, in __call__
return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py", line 149, in get
result = _execute_task(task, cache)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/core.py", line 119, in _execute_task
return func(*args2)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/dask/array/linalg.py", line 48, in _wrapped_qr
return np.linalg.qr(a)
File "<__array_function__ internals>", line 6, in qr
File "cupy/core/core.pyx", line 1344, in cupy.core.core.ndarray.__array_function__
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/cupy/linalg/decomposition.py", line 248, in qr
geqrf, dev_info)
File "/datasets/bzaitlen/miniconda3/envs/rapids-12/lib/python3.7/site-packages/cupy/linalg/util.py", line 42, in _check_cusolver_dev_info_if_synchronization_allowed
print(cupyx._ufunc_config.config.linalg)
AttributeError: '_thread._local' object has no attribute 'linalg'
|
AttributeError
|
def argmax(a, axis=None, dtype=None, out=None, keepdims=False):
"""Returns the indices of the maximum along an axis.
Args:
a (cupy.ndarray): Array to take argmax.
axis (int): Along which axis to find the maximum. ``a`` is flattened by
default.
dtype: Data type specifier.
out (cupy.ndarray): Output array.
keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis
of length one.
Returns:
cupy.ndarray: The indices of the maximum of ``a`` along an axis.
.. note::
``dtype`` and ``keepdim`` arguments are specific to CuPy. They are
not in NumPy.
.. note::
``axis`` argument accepts a tuple of ints, but this is specific to
CuPy. NumPy does not support it.
.. seealso:: :func:`numpy.argmax`
"""
# TODO(okuta): check type
return a.argmax(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
|
def argmax(a, axis=None, dtype=None, out=None, keepdims=False):
"""Returns the indices of the maximum along an axis.
Args:
a (cupy.ndarray): Array to take argmax.
axis (int): Along which axis to find the maximum. ``a`` is flattened by
default.
dtype: Data type specifier.
out (cupy.ndarray): Output array.
keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis
of length one.
Returns:
cupy.ndarray: The indices of the maximum of ``a`` along an axis.
.. seealso:: :func:`numpy.argmax`
"""
# TODO(okuta): check type
return a.argmax(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
|
https://github.com/cupy/cupy/issues/2595
|
import cupy as cp
a = cp.arange(60).reshape(3,4,5)
a.argmax(axis=(0,1))
array([11, 11, 11, 11, 11], dtype=int64)
import numpy as np
a = np.arange(60).reshape(3,4,5)
a.argmax(axis=(0,1))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: 'tuple' object cannot be interpreted as an integer
|
TypeError
|
def argmin(a, axis=None, dtype=None, out=None, keepdims=False):
"""Returns the indices of the minimum along an axis.
Args:
a (cupy.ndarray): Array to take argmin.
axis (int): Along which axis to find the minimum. ``a`` is flattened by
default.
dtype: Data type specifier.
out (cupy.ndarray): Output array.
keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis
of length one.
Returns:
cupy.ndarray: The indices of the minimum of ``a`` along an axis.
.. note::
``dtype`` and ``keepdim`` arguments are specific to CuPy. They are
not in NumPy.
.. note::
``axis`` argument accepts a tuple of ints, but this is specific to
CuPy. NumPy does not support it.
.. seealso:: :func:`numpy.argmin`
"""
# TODO(okuta): check type
return a.argmin(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
|
def argmin(a, axis=None, dtype=None, out=None, keepdims=False):
"""Returns the indices of the minimum along an axis.
Args:
a (cupy.ndarray): Array to take argmin.
axis (int): Along which axis to find the minimum. ``a`` is flattened by
default.
dtype: Data type specifier.
out (cupy.ndarray): Output array.
keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis
of length one.
Returns:
cupy.ndarray: The indices of the minimum of ``a`` along an axis.
.. seealso:: :func:`numpy.argmin`
"""
# TODO(okuta): check type
return a.argmin(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
|
https://github.com/cupy/cupy/issues/2595
|
import cupy as cp
a = cp.arange(60).reshape(3,4,5)
a.argmax(axis=(0,1))
array([11, 11, 11, 11, 11], dtype=int64)
import numpy as np
a = np.arange(60).reshape(3,4,5)
a.argmax(axis=(0,1))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: 'tuple' object cannot be interpreted as an integer
|
TypeError
|
def __init__(self, msg, source, name, options):
self._msg = msg
self.source = source
self.name = name
self.options = options
super(CompileException, self).__init__()
|
def __init__(self, msg, source, name, options):
self._msg = msg
self.source = source
self.name = name
self.options = options
|
https://github.com/cupy/cupy/issues/2301
|
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/usr/lib/python3.6/multiprocessing/pool.py", line 463, in _handle_results
task = get()
File "/usr/lib/python3.6/multiprocessing/connection.py", line 251, in recv
return _ForkingPickler.loads(buf.getbuffer())
File "cupy/cuda/memory.pyx", line 37, in cupy.cuda.memory.OutOfMemoryError.__init__
TypeError: __init__() takes exactly 3 positional arguments (2 given)
|
TypeError
|
def _proc_as_batch(proc, x, axis):
if x.shape[axis] == 0:
return cupy.empty_like(x)
trans, revert = _axis_to_first(x, axis)
t = x.transpose(trans)
s = t.shape
r = t.reshape(x.shape[axis], -1)
pos = 1
size = r.size
batch = r.shape[1]
while pos < size:
proc(pos, batch, r, size=size)
pos <<= 1
return r.reshape(s).transpose(revert)
|
def _proc_as_batch(proc, x, axis):
trans, revert = _axis_to_first(x, axis)
t = x.transpose(trans)
s = t.shape
r = t.reshape(x.shape[axis], -1)
pos = 1
size = r.size
batch = r.shape[1]
while pos < size:
proc(pos, batch, r, size=size)
pos <<= 1
return r.reshape(s).transpose(revert)
|
https://github.com/cupy/cupy/issues/1455
|
cupy.cumprod(cupy.ones((0, 3)))
array([], dtype=float64)
cupy.cumprod(cupy.ones((0, 3)), axis=1)
array([], shape=(0, 3), dtype=float64)
cupy.cumprod(cupy.ones((0, 3)), axis=0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/kataoka/cupy/cupy/math/sumprod.py", line 190, in cumprod
return _cum_core(a, axis, dtype, out, _cumprod_kern, _cumprod_batch_kern)
File "/home/kataoka/cupy/cupy/math/sumprod.py", line 98, in _cum_core
return _proc_as_batch(batch_kern, out, axis=axis)
File "/home/kataoka/cupy/cupy/math/sumprod.py", line 66, in _proc_as_batch
r = t.reshape(x.shape[axis], -1)
File "cupy/core/core.pyx", line 523, in cupy.core.core.ndarray.reshape
File "cupy/core/core.pyx", line 496, in cupy.core.core.ndarray._reshape
File "cupy/core/internal.pyx", line 139, in cupy.core.internal.infer_unknown_dimension
ZeroDivisionError: integer division or modulo by zero
|
ZeroDivisionError
|
def module_extension_sources(file, use_cython, no_cuda):
pyx, others = ensure_module_file(file)
base = path.join(*pyx.split("."))
if use_cython:
pyx = base + ".pyx"
if not os.path.exists(pyx):
use_cython = False
print("NOTICE: Skipping cythonize as {} does not exist.".format(pyx))
if not use_cython:
pyx = base + ".cpp"
# If CUDA SDK is not available, remove CUDA C files from extension sources
# and use stubs defined in header files.
if no_cuda:
others1 = []
for source in others:
base, ext = os.path.splitext(source)
if ext == ".cu":
continue
others1.append(source)
others = others1
return [pyx] + others
|
def module_extension_sources(file, use_cython, no_cuda):
pyx, others = ensure_module_file(file)
ext = ".pyx" if use_cython else ".cpp"
pyx = path.join(*pyx.split(".")) + ext
# If CUDA SDK is not available, remove CUDA C files from extension sources
# and use stubs defined in header files.
if no_cuda:
others1 = []
for source in others:
base, ext = os.path.splitext(source)
if ext == ".cu":
continue
others1.append(source)
others = others1
return [pyx] + others
|
https://github.com/cupy/cupy/issues/906
|
[1/3] Cythonizing cupy/cudnn.pyx
Compiling /tmp/pip-build-z0vd49d6/cupy/cupy/cudnn.pyx
Error compiling Cython file:
------------------------------------------------------------
...
compute_type = cudnn.CUDNN_DATA_FLOAT
if ndim != 2:
c_pad = pad
c_stride = stride
c_dilation.assign(ndim, 1)
^
------------------------------------------------------------
cupy/cudnn.pyx:133:25: Compiler crash in AnalyseExpressionsTransform
ModuleNode.body = StatListNode(cudnn.pyx:1:0)
StatListNode.stats[21] = StatListNode(cudnn.pyx:115:6)
StatListNode.stats[0] = CFuncDefNode(cudnn.pyx:115:6,
args = [...]/8,
modifiers = [...]/0,
overridable = 1,
visibility = 'private')
File 'Nodes.py', line 430, in analyse_expressions: StatListNode(cudnn.pyx:118:4)
File 'Nodes.py', line 5845, in analyse_expressions: IfStatNode(cudnn.pyx:130:4)
File 'Nodes.py', line 5891, in analyse_expressions: IfClauseNode(cudnn.pyx:130:7,
is_terminator = True)
File 'Nodes.py', line 430, in analyse_expressions: StatListNode(cudnn.pyx:131:8,
is_terminator = True)
File 'Nodes.py', line 4746, in analyse_expressions: ExprStatNode(cudnn.pyx:133:25)
File 'ExprNodes.py', line 519, in analyse_expressions: SimpleCallNode(cudnn.pyx:133:25,
analysed = True,
use_managed_ref = True)
File 'ExprNodes.py', line 5132, in analyse_types: SimpleCallNode(cudnn.pyx:133:25,
analysed = True,
use_managed_ref = True)
File 'ExprNodes.py', line 5187, in analyse_c_function_call: SimpleCallNode(cudnn.pyx:133:25,
analysed = True,
use_managed_ref = True)
Compiler crash traceback from this point on:
File "/usr/local/lib/python3.5/dist-packages/Cython/Compiler/ExprNodes.py", line 5187, in analyse_c_function_call
[arg.type for arg in args], alternatives, self.pos, env, args)
File "/usr/local/lib/python3.5/dist-packages/Cython/Compiler/PyrexTypes.py", line 4083, in best_match
errors.append((func, "Unable to deduce type parameters for %s given %s" % (pattern.type, actual)))
NameError: name 'pattern' is not defined
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-build-z0vd49d6/cupy/setup.py", line 102, in <module>
'sdist': sdist},
File "/usr/lib/python3.5/distutils/core.py", line 148, in setup
dist.run_commands()
File "/usr/lib/python3.5/distutils/dist.py", line 955, in run_commands
self.run_command(cmd)
File "/usr/lib/python3.5/distutils/dist.py", line 974, in run_command
cmd_obj.run()
File "/usr/local/lib/python3.5/dist-packages/setuptools/command/install.py", line 61, in run
return orig.install.run(self)
File "/usr/lib/python3.5/distutils/command/install.py", line 583, in run
self.run_command('build')
File "/usr/lib/python3.5/distutils/cmd.py", line 313, in run_command
self.distribution.run_command(command)
File "/usr/lib/python3.5/distutils/dist.py", line 974, in run_command
cmd_obj.run()
File "/usr/lib/python3.5/distutils/command/build.py", line 135, in run
self.run_command(cmd_name)
File "/usr/lib/python3.5/distutils/cmd.py", line 313, in run_command
self.distribution.run_command(command)
File "/usr/lib/python3.5/distutils/dist.py", line 974, in run_command
cmd_obj.run()
File "/tmp/pip-build-z0vd49d6/cupy/cupy_setup_build.py", line 552, in run
cythonize(ext_modules, cupy_setup_options)
File "/tmp/pip-build-z0vd49d6/cupy/cupy_setup_build.py", line 354, in cythonize
compiler_directives=directives, **cythonize_options)
File "/usr/local/lib/python3.5/dist-packages/Cython/Build/Dependencies.py", line 934, in cythonize
cythonize_one(*args)
File "/usr/local/lib/python3.5/dist-packages/Cython/Build/Dependencies.py", line 1056, in cythonize_one
raise CompileError(None, pyx_file)
Cython.Compiler.Errors.CompileError: cupy/cudnn.pyx
----------------------------------------
Command "/usr/bin/python3 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-z0vd49d6/cupy/setup.py';f=getattr(tokenize, 'open', open)(__file__);code=f.read().replace('\r\n', '\n');f.close();exec(compile(code, __file__, 'exec'))" install --record /tmp/pip-5b_xwzzp-record/install-record.txt --single-version-externally-managed --compile" failed with error code 1 in /tmp/pip-build-z0vd49d6/cupy/
|
NameError
|
def svd(a, full_matrices=True, compute_uv=True):
"""Singular Value Decomposition.
Factorizes the matrix ``a`` as ``u * np.diag(s) * v``, where ``u`` and
``v`` are unitary and ``s`` is an one-dimensional array of ``a``'s
singular values.
Args:
a (cupy.ndarray): The input matrix with dimension ``(M, N)``.
full_matrices (bool): If True, it returns u and v with dimensions
``(M, M)`` and ``(N, N)``. Otherwise, the dimensions of u and v
are respectively ``(M, K)`` and ``(K, N)``, where
``K = min(M, N)``.
compute_uv (bool): If True, it only returns singular values.
Returns:
tuple of :class:`cupy.ndarray`:
A tuple of ``(u, s, v)`` such that ``a = u * np.diag(s) * v``.
.. seealso:: :func:`numpy.linalg.svd`
"""
if not cuda.cusolver_enabled:
raise RuntimeError("Current cupy only supports cusolver in CUDA 8.0")
# TODO(Saito): Current implementation only accepts two-dimensional arrays
util._assert_cupy_array(a)
util._assert_rank2(a)
# Cast to float32 or float64
if a.dtype.char == "f" or a.dtype.char == "d":
dtype = a.dtype.char
else:
dtype = numpy.find_common_type((a.dtype.char, "f"), ()).char
# Remark 1: gesvd only supports m >= n (WHAT?)
# Remark 2: gesvd only supports jobu = 'A' and jobvt = 'A'
# Remark 3: gesvd returns matrix U and V^H
# Remark 4: Remark 2 is removed since cuda 8.0 (new!)
n, m = a.shape
# `a` must be copied because xgesvd destroys the matrix
if m >= n:
x = a.astype(dtype, order="C", copy=True)
trans_flag = False
else:
m, n = a.shape
x = a.transpose().astype(dtype, order="C", copy=True)
trans_flag = True
mn = min(m, n)
if compute_uv:
if full_matrices:
u = cupy.empty((m, m), dtype=dtype)
vt = cupy.empty((n, n), dtype=dtype)
else:
u = cupy.empty((mn, m), dtype=dtype)
vt = cupy.empty((mn, n), dtype=dtype)
u_ptr, vt_ptr = u.data.ptr, vt.data.ptr
else:
u_ptr, vt_ptr = 0, 0 # Use nullptr
s = cupy.empty(mn, dtype=dtype)
handle = device.get_cusolver_handle()
dev_info = cupy.empty(1, dtype=numpy.int32)
if compute_uv:
job = ord("A") if full_matrices else ord("S")
else:
job = ord("N")
if dtype == "f":
buffersize = cusolver.sgesvd_bufferSize(handle, m, n)
workspace = cupy.empty(buffersize, dtype=dtype)
cusolver.sgesvd(
handle,
job,
job,
m,
n,
x.data.ptr,
m,
s.data.ptr,
u_ptr,
m,
vt_ptr,
n,
workspace.data.ptr,
buffersize,
0,
dev_info.data.ptr,
)
else: # dtype == 'd'
buffersize = cusolver.dgesvd_bufferSize(handle, m, n)
workspace = cupy.empty(buffersize, dtype=dtype)
cusolver.dgesvd(
handle,
job,
job,
m,
n,
x.data.ptr,
m,
s.data.ptr,
u_ptr,
m,
vt_ptr,
n,
workspace.data.ptr,
buffersize,
0,
dev_info.data.ptr,
)
status = int(dev_info[0])
if status > 0:
raise linalg.LinAlgError("SVD computation does not converge")
elif status < 0:
raise linalg.LinAlgError(
"Parameter error (maybe caused by a bug in cupy.linalg?)"
)
# Note that the returned array may need to be transporsed
# depending on the structure of an input
if compute_uv:
if trans_flag:
return u.transpose(), s, vt.transpose()
else:
return vt, s, u
else:
return s
|
def svd(a, full_matrices=True, compute_uv=True):
"""Singular Value Decomposition.
Factorizes the matrix ``a`` as ``u * np.diag(s) * v``, where ``u`` and
``v`` are unitary and ``s`` is an one-dimensional array of ``a``'s
singular values.
Args:
a (cupy.ndarray): The input matrix with dimension ``(M, N)``.
full_matrices (bool): If True, it returns u and v with dimensions
``(M, M)`` and ``(N, N)``. Otherwise, the dimensions of u and v
are respectively ``(M, K)`` and ``(K, N)``, where
``K = min(M, N)``.
compute_uv (bool): If True, it only returns singular values.
Returns:
tuple of :class:`cupy.ndarray`:
A tuple of ``(u, s, v)`` such that ``a = u * np.diag(s) * v``.
.. seealso:: :func:`numpy.linalg.svd`
"""
if not cuda.cusolver_enabled:
raise RuntimeError("Current cupy only supports cusolver in CUDA 8.0")
# TODO(Saito): Current implementation only accepts two-dimensional arrays
util._assert_cupy_array(a)
util._assert_rank2(a)
# Cast to float32 or float64
if a.dtype.char == "f" or a.dtype.char == "d":
dtype = a.dtype.char
else:
dtype = numpy.find_common_type((a.dtype.char, "f"), ()).char
# Remark 1: gesvd only supports m >= n (WHAT?)
# Remark 2: gesvd only supports jobu = 'A' and jobvt = 'A'
# Remark 3: gesvd returns matrix U and V^H
# Remark 4: Remark 2 is removed since cuda 8.0 (new!)
n, m = a.shape
if m >= n:
x = a.astype(dtype, order="C", copy=False)
trans_flag = False
else:
m, n = a.shape
x = a.transpose().astype(dtype, order="C", copy=False)
trans_flag = True
mn = min(m, n)
if compute_uv:
if full_matrices:
u = cupy.empty((m, m), dtype=dtype)
vt = cupy.empty((n, n), dtype=dtype)
else:
u = cupy.empty((mn, m), dtype=dtype)
vt = cupy.empty((mn, n), dtype=dtype)
u_ptr, vt_ptr = u.data.ptr, vt.data.ptr
else:
u_ptr, vt_ptr = 0, 0 # Use nullptr
s = cupy.empty(mn, dtype=dtype)
handle = device.get_cusolver_handle()
dev_info = cupy.empty(1, dtype=numpy.int32)
if compute_uv:
job = ord("A") if full_matrices else ord("S")
else:
job = ord("N")
if dtype == "f":
buffersize = cusolver.sgesvd_bufferSize(handle, m, n)
workspace = cupy.empty(buffersize, dtype=dtype)
cusolver.sgesvd(
handle,
job,
job,
m,
n,
x.data.ptr,
m,
s.data.ptr,
u_ptr,
m,
vt_ptr,
n,
workspace.data.ptr,
buffersize,
0,
dev_info.data.ptr,
)
else: # dtype == 'd'
buffersize = cusolver.dgesvd_bufferSize(handle, m, n)
workspace = cupy.empty(buffersize, dtype=dtype)
cusolver.dgesvd(
handle,
job,
job,
m,
n,
x.data.ptr,
m,
s.data.ptr,
u_ptr,
m,
vt_ptr,
n,
workspace.data.ptr,
buffersize,
0,
dev_info.data.ptr,
)
status = int(dev_info[0])
if status > 0:
raise linalg.LinAlgError("SVD computation does not converge")
elif status < 0:
raise linalg.LinAlgError(
"Parameter error (maybe caused by a bug in cupy.linalg?)"
)
# Note that the returned array may need to be transporsed
# depending on the structure of an input
if compute_uv:
if trans_flag:
return u.transpose(), s, vt.transpose()
else:
return vt, s, u
else:
return s
|
https://github.com/cupy/cupy/issues/842
|
numpy 1.13.3, order=C
numpy 1.13.3, order=F
scipy 0.19.1, order=C
scipy 0.19.1, order=F
cupy 4.0.0b1, order=C
Traceback (most recent call last):
File "test.py", line 13, in <module>
assert (xp.array(init_x) == x).all()
AssertionError
|
AssertionError
|
def csrmm(a, b, c=None, alpha=1, beta=0, transa=False):
"""Matrix-matrix product for a CSR-matrix and a dense matrix.
.. math::
C = \\alpha o_a(A) B + \\beta C,
where :math:`o_a` is a transpose function when ``transa`` is ``True`` and
is an identity function otherwise.
Args:
a (cupy.sparse.csr): Sparse matrix A.
b (cupy.ndarray): Dense matrix B. It must be F-contiguous.
c (cupy.ndarray or None): Dense matrix C. It must be F-contiguous.
alpha (float): Coefficient for AB.
beta (float): Coefficient for C.
transa (bool): If ``True``, transpose of A is used.
Returns:
cupy.ndarray: Calculated C.
"""
assert a.ndim == b.ndim == 2
assert b.flags.f_contiguous
assert c is None or c.flags.f_contiguous
a_shape = a.shape if not transa else a.shape[::-1]
if a_shape[1] != b.shape[0]:
raise ValueError("dimension mismatch")
handle = device.get_cusparse_handle()
m, k = a_shape
n = b.shape[1]
a, b, c = _cast_common_type(a, b, c)
if c is None:
c = cupy.zeros((m, n), a.dtype, "F")
ldb = k
ldc = m
alpha = numpy.array(alpha, a.dtype).ctypes
beta = numpy.array(beta, a.dtype).ctypes
_call_cusparse(
"csrmm",
a.dtype,
handle,
_transpose_flag(transa),
a.shape[0],
n,
a.shape[1],
a.nnz,
alpha.data,
a._descr.descriptor,
a.data.data.ptr,
a.indptr.data.ptr,
a.indices.data.ptr,
b.data.ptr,
ldb,
beta.data,
c.data.ptr,
ldc,
)
return c
|
def csrmm(a, b, c=None, alpha=1, beta=0, transa=False):
"""Matrix-matrix product for a CSR-matrix and a dense matrix.
.. math::
C = \\alpha o_a(A) B + \\beta C,
where :math:`o_a` is a transpose function when ``transa`` is ``True`` and
is an identity function otherwise.
Args:
a (cupy.sparse.csr): Sparse matrix A.
b (cupy.ndarray): Dense matrix B. It must be F-contiguous.
c (cupy.ndarray or None): Dense matrix C. It must be F-contiguous.
alpha (float): Coefficient for AB.
beta (float): Coefficient for C.
transa (bool): If ``True``, transpose of A is used.
Returns:
cupy.ndarray: Calculated C.
"""
assert a.ndim == b.ndim == 2
assert b.flags.f_contiguous
assert c is None or c.flags.f_contiguous
a_shape = a.shape if not transa else a.shape[::-1]
if a_shape[1] != b.shape[0]:
raise ValueError("dimension mismatch")
handle = device.get_cusparse_handle()
m, k = a_shape
n = b.shape[1]
a, b, c = _cast_common_type(a, b, c)
if c is None:
c = cupy.zeros((m, n), a.dtype, "F")
ldb = k
ldc = m
alpha = numpy.array(alpha, a.dtype).ctypes
beta = numpy.array(beta, a.dtype).ctypes
_call_cusparse(
"csrmm",
a.dtype,
handle,
_transpose_flag(transa),
m,
n,
k,
a.nnz,
alpha.data,
a._descr.descriptor,
a.data.data.ptr,
a.indptr.data.ptr,
a.indices.data.ptr,
b.data.ptr,
ldb,
beta.data,
c.data.ptr,
ldc,
)
return c
|
https://github.com/cupy/cupy/issues/552
|
Traceback (most recent call last):
File "main.py", line 23, in <module>
main()
File "main.py", line 20, in main
gx = cupy.cusparse.csrmm2(W, y.T, transa=True).T
File "/home/tommi/.pyenv/versions/cupy-dev/lib/python3.4/site-packages/cupy-2.0.0rc1-py3.4-linux-x86_64.egg/cupy/cusparse.py", line 205, in csrmm2
b.data.ptr, ldb, beta.data, c.data.ptr, ldc)
File "/home/tommi/.pyenv/versions/cupy-dev/lib/python3.4/site-packages/cupy-2.0.0rc1-py3.4-linux-x86_64.egg/cupy/cusparse.py", line 52, in _call_cusparse
return f(*args)
File "cupy/cuda/cusparse.pyx", line 347, in cupy.cuda.cusparse.scsrmm2
File "cupy/cuda/cusparse.pyx", line 357, in cupy.cuda.cusparse.scsrmm2
File "cupy/cuda/cusparse.pyx", line 229, in cupy.cuda.cusparse.check_status
cupy.cuda.cusparse.CuSparseError: CUSPARSE_STATUS_INVALID_VALUE
|
cupy.cuda.cusparse.CuSparseError
|
def csrmm2(a, b, c=None, alpha=1.0, beta=0.0, transa=False, transb=False):
"""Matrix-matrix product for a CSR-matrix and a dense matrix.
.. math::
C = \\alpha o_a(A) o_b(B) + \\beta C,
where :math:`o_a` and :math:`o_b` are transpose functions when ``transa``
and ``tranb`` are ``True`` respectively. And they are identity functions
otherwise.
It is forbidden that both ``transa`` and ``transb`` are ``True`` in
cuSPARSE specification.
Args:
a (cupy.sparse.csr): Sparse matrix A.
b (cupy.ndarray): Dense matrix B. It must be F-contiguous.
c (cupy.ndarray or None): Dense matrix C. It must be F-contiguous.
alpha (float): Coefficient for AB.
beta (float): Coefficient for C.
transa (bool): If ``True``, transpose of A is used.
transb (bool): If ``True``, transpose of B is used.
Returns:
cupy.ndarray: Calculated C.
"""
assert a.ndim == b.ndim == 2
assert b.flags.f_contiguous
assert c is None or c.flags.f_contiguous
assert not (transa and transb)
a_shape = a.shape if not transa else a.shape[::-1]
b_shape = b.shape if not transb else b.shape[::-1]
if a_shape[1] != b_shape[0]:
raise ValueError("dimension mismatch")
handle = device.get_cusparse_handle()
m, k = a_shape
n = b_shape[1]
a, b, c = _cast_common_type(a, b, c)
if c is None:
c = cupy.zeros((m, n), a.dtype, "F")
ldb = b.shape[0]
ldc = c.shape[0]
op_a = _transpose_flag(transa)
op_b = _transpose_flag(transb)
alpha = numpy.array(alpha, a.dtype).ctypes
beta = numpy.array(beta, a.dtype).ctypes
_call_cusparse(
"csrmm2",
a.dtype,
handle,
op_a,
op_b,
a.shape[0],
n,
a.shape[1],
a.nnz,
alpha.data,
a._descr.descriptor,
a.data.data.ptr,
a.indptr.data.ptr,
a.indices.data.ptr,
b.data.ptr,
ldb,
beta.data,
c.data.ptr,
ldc,
)
return c
|
def csrmm2(a, b, c=None, alpha=1.0, beta=0.0, transa=False, transb=False):
"""Matrix-matrix product for a CSR-matrix and a dense matrix.
.. math::
C = \\alpha o_a(A) o_b(B) + \\beta C,
where :math:`o_a` and :math:`o_b` are transpose functions when ``transa``
and ``tranb`` are ``True`` respectively. And they are identity functions
otherwise.
Args:
a (cupy.sparse.csr): Sparse matrix A.
b (cupy.ndarray): Dense matrix B. It must be F-contiguous.
c (cupy.ndarray or None): Dense matrix C. It must be F-contiguous.
alpha (float): Coefficient for AB.
beta (float): Coefficient for C.
transa (bool): If ``True``, transpose of A is used.
transb (bool): If ``True``, transpose of B is used.
Returns:
cupy.ndarray: Calculated C.
"""
assert a.ndim == b.ndim == 2
assert b.flags.f_contiguous
assert c is None or c.flags.f_contiguous
a_shape = a.shape if not transa else a.shape[::-1]
b_shape = b.shape if not transb else b.shape[::-1]
if a_shape[1] != b.shape[0]:
raise ValueError("dimension mismatch")
handle = device.get_cusparse_handle()
m, k = a_shape
n = b_shape[1]
a, b, c = _cast_common_type(a, b, c)
if c is None:
c = cupy.zeros((m, n), a.dtype, "F")
ldb = b.shape[0]
ldc = c.shape[0]
op_a = _transpose_flag(transa)
op_b = _transpose_flag(transb)
alpha = numpy.array(alpha, a.dtype).ctypes
beta = numpy.array(beta, a.dtype).ctypes
_call_cusparse(
"csrmm2",
a.dtype,
handle,
op_a,
op_b,
m,
n,
k,
a.nnz,
alpha.data,
a._descr.descriptor,
a.data.data.ptr,
a.indptr.data.ptr,
a.indices.data.ptr,
b.data.ptr,
ldb,
beta.data,
c.data.ptr,
ldc,
)
return c
|
https://github.com/cupy/cupy/issues/552
|
Traceback (most recent call last):
File "main.py", line 23, in <module>
main()
File "main.py", line 20, in main
gx = cupy.cusparse.csrmm2(W, y.T, transa=True).T
File "/home/tommi/.pyenv/versions/cupy-dev/lib/python3.4/site-packages/cupy-2.0.0rc1-py3.4-linux-x86_64.egg/cupy/cusparse.py", line 205, in csrmm2
b.data.ptr, ldb, beta.data, c.data.ptr, ldc)
File "/home/tommi/.pyenv/versions/cupy-dev/lib/python3.4/site-packages/cupy-2.0.0rc1-py3.4-linux-x86_64.egg/cupy/cusparse.py", line 52, in _call_cusparse
return f(*args)
File "cupy/cuda/cusparse.pyx", line 347, in cupy.cuda.cusparse.scsrmm2
File "cupy/cuda/cusparse.pyx", line 357, in cupy.cuda.cusparse.scsrmm2
File "cupy/cuda/cusparse.pyx", line 229, in cupy.cuda.cusparse.check_status
cupy.cuda.cusparse.CuSparseError: CUSPARSE_STATUS_INVALID_VALUE
|
cupy.cuda.cusparse.CuSparseError
|
def csrmv(a, x, y=None, alpha=1, beta=0, transa=False):
"""Matrix-vector product for a CSR-matrix and a dense vector.
.. math::
y = \\alpha * o_a(A) x + \\beta y,
where :math:`o_a` is a transpose function when ``transa`` is ``True`` and
is an identity function otherwise.
Args:
a (cupy.cusparse.csr_matrix): Matrix A.
x (cupy.ndarray): Vector x.
y (cupy.ndarray or None): Vector y. It must be F-contiguous.
alpha (float): Coefficient for x.
beta (float): Coefficient for y.
transa (bool): If ``True``, transpose of ``A`` is used.
Returns:
cupy.ndarray: Calculated ``y``.
"""
assert y is None or y.flags.f_contiguous
a_shape = a.shape if not transa else a.shape[::-1]
if a_shape[1] != len(x):
raise ValueError("dimension mismatch")
handle = device.get_cusparse_handle()
m, n = a_shape
a, x, y = _cast_common_type(a, x, y)
dtype = a.dtype
if y is None:
y = cupy.zeros(m, dtype)
alpha = numpy.array(alpha, dtype).ctypes
beta = numpy.array(beta, dtype).ctypes
_call_cusparse(
"csrmv",
dtype,
handle,
_transpose_flag(transa),
a.shape[0],
a.shape[1],
a.nnz,
alpha.data,
a._descr.descriptor,
a.data.data.ptr,
a.indptr.data.ptr,
a.indices.data.ptr,
x.data.ptr,
beta.data,
y.data.ptr,
)
return y
|
def csrmv(a, x, y=None, alpha=1, beta=0, transa=False):
"""Matrix-vector product for a CSR-matrix and a dense vector.
.. math::
y = \\alpha * o_a(A) x + \\beta y,
where :math:`o_a` is a transpose function when ``transa`` is ``True`` and
is an identity function otherwise.
Args:
a (cupy.cusparse.csr_matrix): Matrix A.
x (cupy.ndarray): Vector x.
y (cupy.ndarray or None): Vector y. It must be F-contiguous.
alpha (float): Coefficient for x.
beta (float): Coefficient for y.
transa (bool): If ``True``, transpose of ``A`` is used.
Returns:
cupy.ndarray: Calculated ``y``.
"""
if a.shape[1] != len(x):
raise ValueError("dimension mismatch")
assert y is None or y.flags.f_contiguous
a_shape = a.shape if not transa else a.shape[::-1]
handle = device.get_cusparse_handle()
m, n = a_shape
a, x, y = _cast_common_type(a, x, y)
dtype = a.dtype
if y is None:
y = cupy.zeros(m, dtype)
alpha = numpy.array(alpha, dtype).ctypes
beta = numpy.array(beta, dtype).ctypes
_call_cusparse(
"csrmv",
dtype,
handle,
_transpose_flag(transa),
m,
n,
a.nnz,
alpha.data,
a._descr.descriptor,
a.data.data.ptr,
a.indptr.data.ptr,
a.indices.data.ptr,
x.data.ptr,
beta.data,
y.data.ptr,
)
return y
|
https://github.com/cupy/cupy/issues/552
|
Traceback (most recent call last):
File "main.py", line 23, in <module>
main()
File "main.py", line 20, in main
gx = cupy.cusparse.csrmm2(W, y.T, transa=True).T
File "/home/tommi/.pyenv/versions/cupy-dev/lib/python3.4/site-packages/cupy-2.0.0rc1-py3.4-linux-x86_64.egg/cupy/cusparse.py", line 205, in csrmm2
b.data.ptr, ldb, beta.data, c.data.ptr, ldc)
File "/home/tommi/.pyenv/versions/cupy-dev/lib/python3.4/site-packages/cupy-2.0.0rc1-py3.4-linux-x86_64.egg/cupy/cusparse.py", line 52, in _call_cusparse
return f(*args)
File "cupy/cuda/cusparse.pyx", line 347, in cupy.cuda.cusparse.scsrmm2
File "cupy/cuda/cusparse.pyx", line 357, in cupy.cuda.cusparse.scsrmm2
File "cupy/cuda/cusparse.pyx", line 229, in cupy.cuda.cusparse.check_status
cupy.cuda.cusparse.CuSparseError: CUSPARSE_STATUS_INVALID_VALUE
|
cupy.cuda.cusparse.CuSparseError
|
def all(a, axis=None, out=None, keepdims=False):
assert isinstance(a, cupy.ndarray)
return a.all(axis=axis, out=out, keepdims=keepdims)
|
def all(a, axis=None, out=None, keepdims=False):
# TODO(okuta): check type
return a.all(axis=axis, out=out, keepdims=keepdims)
|
https://github.com/cupy/cupy/issues/266
|
np.empty((0, 1)).argmax(axis=1) # array([], dtype=int64)
cupy.empty((0, 1)).argmax(axis=1)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-9-a5737d72bcba> in <module>()
----> 1 cupy.empty((0, 1)).argmax(axis=1)
cupy/core/core.pyx in cupy.core.core.ndarray.argmax (cupy/core/core.cpp:17701)()
cupy/core/core.pyx in cupy.core.core.ndarray.argmax (cupy/core/core.cpp:17556)()
cupy/core/reduction.pxi in cupy.core.core.simple_reduction_function.__call__ (cupy/core/core.cpp:52697)()
ValueError: zero-size array to reduction operation cupy_argmax which has no identity
|
ValueError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.