after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def __init__(self, index, sort=False, processed=False):
self.index = index
if not processed:
for fkey, info in iteritems(index.copy()):
if fkey.endswith("]"):
continue
for fstr in chain(
info.get("features", "").split(),
info.get("track_features", "").split(),
track_features or (),
):
self.add_feature(fstr, group=False)
for fstr in iterkeys(info.get("with_features_depends", {})):
index["%s[%s]" % (fkey, fstr)] = info
self.add_feature(fstr, group=False)
groups = {}
trackers = {}
installed = set()
for fkey, info in iteritems(index):
groups.setdefault(info["name"], []).append(fkey)
for feat in info.get("track_features", "").split():
trackers.setdefault(feat, []).append(fkey)
if "link" in info:
installed.add(fkey)
self.groups = groups
self.installed = installed
self.trackers = trackers
self.find_matches_ = {}
self.ms_depends_ = {}
if sort:
for name, group in iteritems(groups):
groups[name] = sorted(group, key=self.version_key, reverse=True)
|
def __init__(self, index, sort=False, processed=False):
self.index = index
if not processed:
for fkey, info in iteritems(index.copy()):
for fstr in chain(
info.get("features", "").split(),
info.get("track_features", "").split(),
track_features or (),
):
self.add_feature(fstr, group=False)
for fstr in iterkeys(info.get("with_features_depends", {})):
index["%s[%s]" % (fkey, fstr)] = info
self.add_feature(fstr, group=False)
groups = {}
trackers = {}
installed = set()
for fkey, info in iteritems(index):
groups.setdefault(info["name"], []).append(fkey)
for feat in info.get("track_features", "").split():
trackers.setdefault(feat, []).append(fkey)
if "link" in info:
installed.add(fkey)
self.groups = groups
self.installed = installed
self.trackers = trackers
self.find_matches_ = {}
self.ms_depends_ = {}
if sort:
for name, group in iteritems(groups):
groups[name] = sorted(group, key=self.version_key, reverse=True)
|
https://github.com/conda/conda/issues/2559
|
$ conda update conda-build
...
Traceback (most recent call last):
File "/Users/ilan/python/bin/conda", line 6, in <module>
sys.exit(conda.cli.main())
File "/Users/ilan/conda/conda/cli/main.py", line 120, in main
args_func(args, p)
File "/Users/ilan/conda/conda/cli/main.py", line 127, in args_func
args.func(args, p)
File "/Users/ilan/conda/conda/cli/main_update.py", line 64, in execute
install.install(args, parser, 'update')
File "/Users/ilan/conda/conda/cli/install.py", line 381, in install
plan.display_actions(actions, index, show_channel_urls=args.show_channel_urls)
File "/Users/ilan/conda/conda/plan.py", line 66, in display_actions
info = index[dist + '.tar.bz2']
KeyError: u'anaconda-2.4.0-np110py27_0.tar.bz2[m.tar.bz2'
|
KeyError
|
def add_cached_package(pdir, url, overwrite=False, urlstxt=False):
"""
Adds a new package to the cache. The URL is used to determine the
package filename and channel, and the directory pdir is scanned for
both a compressed and an extracted version of that package. If
urlstxt=True, this URL will be appended to the urls.txt file in the
cache, so that subsequent runs will correctly identify the package.
"""
package_cache()
if "/" in url:
dist = url.rsplit("/", 1)[-1]
else:
dist = url
url = None
if dist.endswith(".tar.bz2"):
fname = dist
dist = dist[:-8]
else:
fname = dist + ".tar.bz2"
xpkg = join(pdir, fname)
if not overwrite and xpkg in fname_table_:
return
if not isfile(xpkg):
xpkg = None
xdir = join(pdir, dist)
if not (
isdir(xdir)
and isfile(join(xdir, "info", "files"))
and isfile(join(xdir, "info", "index.json"))
):
xdir = None
if not (xpkg or xdir):
return
if url:
url = remove_binstar_tokens(url)
_, schannel = url_channel(url)
prefix = "" if schannel == "defaults" else schannel + "::"
xkey = xpkg or (xdir + ".tar.bz2")
fname_table_[xkey] = fname_table_[url_path(xkey)] = prefix
fkey = prefix + dist
rec = package_cache_.get(fkey)
if rec is None:
rec = package_cache_[fkey] = dict(files=[], dirs=[], urls=[])
if url and url not in rec["urls"]:
rec["urls"].append(url)
if xpkg and xpkg not in rec["files"]:
rec["files"].append(xpkg)
if xdir and xdir not in rec["dirs"]:
rec["dirs"].append(xdir)
if urlstxt:
try:
with open(join(pdir, "urls.txt"), "a") as fa:
fa.write("%s\n" % url)
except IOError:
pass
|
def add_cached_package(pdir, url, overwrite=False, urlstxt=False):
"""
Adds a new package to the cache. The URL is used to determine the
package filename and channel, and the directory pdir is scanned for
both a compressed and an extracted version of that package. If
urlstxt=True, this URL will be appended to the urls.txt file in the
cache, so that subsequent runs will correctly identify the package.
"""
package_cache()
if "/" in url:
dist = url.rsplit("/", 1)[-1]
else:
dist = url
url = None
if dist.endswith(".tar.bz2"):
fname = dist
dist = dist[:-8]
else:
fname = dist + ".tar.bz2"
xpkg = join(pdir, fname)
if not overwrite and xpkg in fname_table_:
return
if not isfile(xpkg):
xpkg = None
xdir = join(pdir, dist)
if not (
isdir(xdir)
and isfile(join(xdir, "info", "files"))
and isfile(join(xdir, "info", "index.json"))
):
xdir = None
if not (xpkg or xdir):
return
url = remove_binstar_tokens(url)
_, schannel = url_channel(url)
prefix = "" if schannel == "defaults" else schannel + "::"
xkey = xpkg or (xdir + ".tar.bz2")
fname_table_[xkey] = fname_table_[url_path(xkey)] = prefix
fkey = prefix + dist
rec = package_cache_.get(fkey)
if rec is None:
rec = package_cache_[fkey] = dict(files=[], dirs=[], urls=[])
if url and url not in rec["urls"]:
rec["urls"].append(url)
if xpkg and xpkg not in rec["files"]:
rec["files"].append(xpkg)
if xdir and xdir not in rec["dirs"]:
rec["dirs"].append(xdir)
if urlstxt:
try:
with open(join(pdir, "urls.txt"), "a") as fa:
fa.write("%s\n" % url)
except IOError:
pass
|
https://github.com/conda/conda/issues/2531
|
$ conda search
...
Traceback (most recent call last):
File "/Users/ilan/python/bin/conda", line 6, in <module>
sys.exit(conda.cli.main())
File "/Users/ilan/conda/conda/cli/main.py", line 120, in main
args_func(args, p)
File "/Users/ilan/conda/conda/cli/main.py", line 127, in args_func
args.func(args, p)
File "/Users/ilan/conda/conda/cli/main_search.py", line 118, in execute
execute_search(args, parser)
File "/Users/ilan/conda/conda/cli/main_search.py", line 155, in execute_search
extracted = conda.install.extracted()
File "/Users/ilan/conda/conda/install.py", line 749, in extracted
return set(dist for dist, rec in package_cache().items() if rec['dirs'])
File "/Users/ilan/conda/conda/install.py", line 673, in package_cache
add_cached_package(pdir, fn)
File "/Users/ilan/conda/conda/install.py", line 627, in add_cached_package
url = remove_binstar_tokens(url)
File "/Users/ilan/conda/conda/config.py", line 258, in remove_binstar_tokens
return BINSTAR_TOKEN_PAT.sub(r'\1', url)
TypeError: expected string or buffer
|
TypeError
|
def get_index(
channel_urls=(),
prepend=True,
platform=None,
use_local=False,
use_cache=False,
unknown=False,
offline=False,
prefix=None,
):
"""
Return the index of packages available on the channels
If prepend=False, only the channels passed in as arguments are used.
If platform=None, then the current platform is used.
If prefix is supplied, then the packages installed in that prefix are added.
"""
if use_local:
channel_urls = ["local"] + list(channel_urls)
channel_urls = normalize_urls(channel_urls, platform, offline)
if prepend:
channel_urls.extend(get_channel_urls(platform, offline))
channel_urls = prioritize_channels(channel_urls)
index = fetch_index(channel_urls, use_cache=use_cache, unknown=unknown)
if prefix:
priorities = {c: p for c, p in itervalues(channel_urls)}
for dist, info in iteritems(install.linked_data(prefix)):
fn = info["fn"]
schannel = info["schannel"]
prefix = "" if schannel == "defaults" else schannel + "::"
priority = priorities.get(schannel, 0)
key = prefix + fn
if key in index:
# Copy the link information so the resolver knows this is installed
index[key]["link"] = info.get("link")
else:
# only if the package in not in the repodata, use local
# conda-meta (with 'depends' defaulting to [])
info.setdefault("depends", [])
info["priority"] = priority
index[key] = info
return index
|
def get_index(
channel_urls=(),
prepend=True,
platform=None,
use_local=False,
use_cache=False,
unknown=False,
offline=False,
prefix=None,
):
"""
Return the index of packages available on the channels
If prepend=False, only the channels passed in as arguments are used.
If platform=None, then the current platform is used.
If prefix is supplied, then the packages installed in that prefix are added.
"""
if use_local:
channel_urls = ["local"] + list(channel_urls)
channel_urls = normalize_urls(channel_urls, platform, offline)
if prepend:
pri0 = (
max(itervalues(channel_urls), key=itemgetter(1))[1] if channel_urls else 0
)
for url, rec in iteritems(get_channel_urls(platform, offline)):
channel_urls[url] = (rec[0], rec[1] + pri0)
index = fetch_index(channel_urls, use_cache=use_cache, unknown=unknown)
if prefix:
priorities = {c: p for c, p in itervalues(channel_urls)}
for dist, info in iteritems(install.linked_data(prefix)):
fn = info["fn"]
schannel = info["schannel"]
prefix = "" if schannel == "defaults" else schannel + "::"
priority = priorities.get(schannel, 0)
key = prefix + fn
if key in index:
# Copy the link information so the resolver knows this is installed
index[key]["link"] = info.get("link")
else:
# only if the package in not in the repodata, use local
# conda-meta (with 'depends' defaulting to [])
info.setdefault("depends", [])
info["priority"] = priority
index[key] = info
return index
|
https://github.com/conda/conda/issues/2513
|
Fetching package metadata ....Traceback (most recent call last):
File "/Users/ilan/python/bin/constructor", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/Users/ilan/constructor/bin/constructor", line 6, in <module>
sys.exit(main())
File "/Users/ilan/constructor/constructor/main.py", line 146, in main
verbose=opts.verbose)
File "/Users/ilan/constructor/constructor/main.py", line 81, in main_build
fcp.main(info, verbose=verbose)
File "/Users/ilan/constructor/constructor/fcp.py", line 170, in main
for url in info['channels']))
File "/Users/ilan/python/lib/python2.7/site-packages/conda/fetch.py", line 276, in fetch_index
url_s, priority = channel_urls[channel]
TypeError: 'int' object is not iterable
|
TypeError
|
def execute(args, parser):
import os
from os.path import dirname
import conda
from conda.config import (
root_dir,
get_channel_urls,
subdir,
pkgs_dirs,
root_writable,
envs_dirs,
default_prefix,
rc_path,
user_rc_path,
sys_rc_path,
foreign,
hide_binstar_tokens,
platform,
)
from conda.resolve import Resolve
from conda.cli.main_init import is_initialized
from conda.api import get_index
if args.root:
if args.json:
common.stdout_json({"root_prefix": root_dir})
else:
print(root_dir)
return
if args.packages:
index = get_index()
r = Resolve(index)
if args.json:
common.stdout_json(
{
package: [
p._asdict()
for p in sorted(r.get_pkgs(common.arg2spec(package)))
]
for package in args.packages
}
)
else:
for package in args.packages:
versions = r.get_pkgs(common.arg2spec(package))
for pkg in sorted(versions):
pretty_package(pkg)
return
options = "envs", "system", "license"
try:
import requests
requests_version = requests.__version__
except ImportError:
requests_version = "could not import"
except Exception as e:
requests_version = "Error %s" % e
try:
import conda_build
except ImportError:
conda_build_version = "not installed"
except Exception as e:
conda_build_version = "Error %s" % e
else:
conda_build_version = conda_build.__version__
channels = get_channel_urls()
info_dict = dict(
platform=subdir,
conda_version=conda.__version__,
conda_build_version=conda_build_version,
root_prefix=root_dir,
root_writable=root_writable,
pkgs_dirs=pkgs_dirs,
envs_dirs=envs_dirs,
default_prefix=default_prefix,
channels=channels,
rc_path=rc_path,
user_rc_path=user_rc_path,
sys_rc_path=sys_rc_path,
is_foreign=bool(foreign),
envs=[],
python_version=".".join(map(str, sys.version_info)),
requests_version=requests_version,
)
if args.unsafe_channels:
if not args.json:
print("\n".join(info_dict["channels"]))
else:
print(json.dumps({"channels": info_dict["channels"]}))
return 0
else:
info_dict["channels"] = [hide_binstar_tokens(c) for c in info_dict["channels"]]
if args.all or args.json:
for option in options:
setattr(args, option, True)
if args.all or all(not getattr(args, opt) for opt in options):
for key in "pkgs_dirs", "envs_dirs", "channels":
info_dict["_" + key] = ("\n" + 24 * " ").join(info_dict[key])
info_dict["_rtwro"] = "writable" if info_dict["root_writable"] else "read only"
print(
"""\
Current conda install:
platform : %(platform)s
conda version : %(conda_version)s
conda-build version : %(conda_build_version)s
python version : %(python_version)s
requests version : %(requests_version)s
root environment : %(root_prefix)s (%(_rtwro)s)
default environment : %(default_prefix)s
envs directories : %(_envs_dirs)s
package cache : %(_pkgs_dirs)s
channel URLs : %(_channels)s
config file : %(rc_path)s
is foreign system : %(is_foreign)s
"""
% info_dict
)
if not is_initialized():
print(
"""\
# NOTE:
# root directory '%s' is uninitialized"""
% root_dir
)
if args.envs:
common.handle_envs_list(info_dict["envs"], not args.json)
if args.system and not args.json:
from conda.cli.find_commands import find_commands, find_executable
print("sys.version: %s..." % (sys.version[:40]))
print("sys.prefix: %s" % sys.prefix)
print("sys.executable: %s" % sys.executable)
print("conda location: %s" % dirname(conda.__file__))
for cmd in sorted(set(find_commands() + ["build"])):
print("conda-%s: %s" % (cmd, find_executable("conda-" + cmd)))
print("user site dirs: ", end="")
site_dirs = get_user_site()
if site_dirs:
print(site_dirs[0])
else:
print()
for site_dir in site_dirs[1:]:
print(" %s" % site_dir)
print()
evars = [
"PATH",
"PYTHONPATH",
"PYTHONHOME",
"CONDA_DEFAULT_ENV",
"CIO_TEST",
"CONDA_ENVS_PATH",
]
if platform == "linux":
evars.append("LD_LIBRARY_PATH")
elif platform == "osx":
evars.append("DYLD_LIBRARY_PATH")
for ev in sorted(evars):
print("%s: %s" % (ev, os.getenv(ev, "<not set>")))
print()
if args.license and not args.json:
try:
from _license import show_info
show_info()
except ImportError:
print("""\
WARNING: could not import _license.show_info
# try:
# $ conda install -n root _license""")
if args.json:
common.stdout_json(info_dict)
|
def execute(args, parser):
import os
from os.path import dirname
import conda
from conda.config import (
root_dir,
get_channel_urls,
subdir,
pkgs_dirs,
root_writable,
envs_dirs,
default_prefix,
rc_path,
user_rc_path,
sys_rc_path,
foreign,
hide_binstar_tokens,
platform,
)
from conda.resolve import Resolve
from conda.cli.main_init import is_initialized
from conda.api import get_index
if args.root:
if args.json:
common.stdout_json({"root_prefix": root_dir})
else:
print(root_dir)
return
if args.packages:
index = get_index()
r = Resolve(index)
if args.json:
common.stdout_json(
{
package: [
p._asdict()
for p in sorted(r.get_pkgs(common.arg2spec(package)))
]
for package in args.packages
}
)
else:
for package in args.packages:
versions = r.get_pkgs(common.arg2spec(package))
for pkg in sorted(versions):
pretty_package(pkg)
return
options = "envs", "system", "license"
try:
import requests
requests_version = requests.__version__
except ImportError:
requests_version = "could not import"
except Exception as e:
requests_version = "Error %s" % e
try:
import conda_build
except ImportError:
conda_build_version = "not installed"
except Exception as e:
conda_build_version = "Error %s" % e
else:
conda_build_version = conda_build.__version__
# this is a hack associated with channel weight until we get the package cache reworked
# in a future release
# for now, just ordering the channels for display in a semi-plausible way
d = defaultdict(list)
any(d[v[1]].append(k) for k, v in iteritems(get_channel_urls()))
channels = list(chain.from_iterable(d[q] for q in sorted(d, reverse=True)))
info_dict = dict(
platform=subdir,
conda_version=conda.__version__,
conda_build_version=conda_build_version,
root_prefix=root_dir,
root_writable=root_writable,
pkgs_dirs=pkgs_dirs,
envs_dirs=envs_dirs,
default_prefix=default_prefix,
channels=channels,
rc_path=rc_path,
user_rc_path=user_rc_path,
sys_rc_path=sys_rc_path,
is_foreign=bool(foreign),
envs=[],
python_version=".".join(map(str, sys.version_info)),
requests_version=requests_version,
)
if args.unsafe_channels:
if not args.json:
print("\n".join(info_dict["channels"]))
else:
print(json.dumps({"channels": info_dict["channels"]}))
return 0
else:
info_dict["channels"] = [hide_binstar_tokens(c) for c in info_dict["channels"]]
if args.all or args.json:
for option in options:
setattr(args, option, True)
if args.all or all(not getattr(args, opt) for opt in options):
for key in "pkgs_dirs", "envs_dirs", "channels":
info_dict["_" + key] = ("\n" + 24 * " ").join(info_dict[key])
info_dict["_rtwro"] = "writable" if info_dict["root_writable"] else "read only"
print(
"""\
Current conda install:
platform : %(platform)s
conda version : %(conda_version)s
conda-build version : %(conda_build_version)s
python version : %(python_version)s
requests version : %(requests_version)s
root environment : %(root_prefix)s (%(_rtwro)s)
default environment : %(default_prefix)s
envs directories : %(_envs_dirs)s
package cache : %(_pkgs_dirs)s
channel URLs : %(_channels)s
config file : %(rc_path)s
is foreign system : %(is_foreign)s
"""
% info_dict
)
if not is_initialized():
print(
"""\
# NOTE:
# root directory '%s' is uninitialized"""
% root_dir
)
if args.envs:
common.handle_envs_list(info_dict["envs"], not args.json)
if args.system and not args.json:
from conda.cli.find_commands import find_commands, find_executable
print("sys.version: %s..." % (sys.version[:40]))
print("sys.prefix: %s" % sys.prefix)
print("sys.executable: %s" % sys.executable)
print("conda location: %s" % dirname(conda.__file__))
for cmd in sorted(set(find_commands() + ["build"])):
print("conda-%s: %s" % (cmd, find_executable("conda-" + cmd)))
print("user site dirs: ", end="")
site_dirs = get_user_site()
if site_dirs:
print(site_dirs[0])
else:
print()
for site_dir in site_dirs[1:]:
print(" %s" % site_dir)
print()
evars = [
"PATH",
"PYTHONPATH",
"PYTHONHOME",
"CONDA_DEFAULT_ENV",
"CIO_TEST",
"CONDA_ENVS_PATH",
]
if platform == "linux":
evars.append("LD_LIBRARY_PATH")
elif platform == "osx":
evars.append("DYLD_LIBRARY_PATH")
for ev in sorted(evars):
print("%s: %s" % (ev, os.getenv(ev, "<not set>")))
print()
if args.license and not args.json:
try:
from _license import show_info
show_info()
except ImportError:
print("""\
WARNING: could not import _license.show_info
# try:
# $ conda install -n root _license""")
if args.json:
common.stdout_json(info_dict)
|
https://github.com/conda/conda/issues/2513
|
Fetching package metadata ....Traceback (most recent call last):
File "/Users/ilan/python/bin/constructor", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/Users/ilan/constructor/bin/constructor", line 6, in <module>
sys.exit(main())
File "/Users/ilan/constructor/constructor/main.py", line 146, in main
verbose=opts.verbose)
File "/Users/ilan/constructor/constructor/main.py", line 81, in main_build
fcp.main(info, verbose=verbose)
File "/Users/ilan/constructor/constructor/fcp.py", line 170, in main
for url in info['channels']))
File "/Users/ilan/python/lib/python2.7/site-packages/conda/fetch.py", line 276, in fetch_index
url_s, priority = channel_urls[channel]
TypeError: 'int' object is not iterable
|
TypeError
|
def normalize_urls(urls, platform=None, offline_only=False):
defaults = tuple(x.rstrip("/") + "/" for x in get_default_urls())
alias = binstar_channel_alias(channel_alias)
newurls = []
while urls:
url = urls[0]
urls = urls[1:]
if url == "system" and rc_path:
urls = get_rc_urls() + urls
continue
elif url in ("defaults", "system"):
t_urls = defaults
elif url == "local":
t_urls = get_local_urls()
else:
t_urls = [url]
for url0 in t_urls:
url0 = url0.rstrip("/")
if not is_url(url0):
url0 = alias + url0
if offline_only and not url0.startswith("file:"):
continue
for plat in (platform or subdir, "noarch"):
newurls.append("%s/%s/" % (url0, plat))
return newurls
|
def normalize_urls(urls, platform=None, offline_only=False):
platform = platform or subdir
defaults = tuple(x.rstrip("/") + "/" for x in get_default_urls())
alias = binstar_channel_alias(channel_alias)
def normalize_(url):
url = url.rstrip("/")
if is_url(url):
url_s = canonical_channel_name(url, True)
else:
url_s = url
url = alias + url
return url_s, url
newurls = OrderedDict()
priority = 0
while urls:
url = urls[0]
urls = urls[1:]
if url == "system" and rc_path:
urls = get_rc_urls() + urls
continue
elif url in ("defaults", "system"):
t_urls = defaults
elif url == "local":
t_urls = get_local_urls()
else:
t_urls = [url]
priority += 1
for url0 in t_urls:
url_s, url0 = normalize_(url0)
if offline_only and not url0.startswith("file:"):
continue
for plat in (platform, "noarch"):
newurls.setdefault("%s/%s/" % (url0, plat), (url_s, priority))
return newurls
|
https://github.com/conda/conda/issues/2513
|
Fetching package metadata ....Traceback (most recent call last):
File "/Users/ilan/python/bin/constructor", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/Users/ilan/constructor/bin/constructor", line 6, in <module>
sys.exit(main())
File "/Users/ilan/constructor/constructor/main.py", line 146, in main
verbose=opts.verbose)
File "/Users/ilan/constructor/constructor/main.py", line 81, in main_build
fcp.main(info, verbose=verbose)
File "/Users/ilan/constructor/constructor/fcp.py", line 170, in main
for url in info['channels']))
File "/Users/ilan/python/lib/python2.7/site-packages/conda/fetch.py", line 276, in fetch_index
url_s, priority = channel_urls[channel]
TypeError: 'int' object is not iterable
|
TypeError
|
def canonical_channel_name(channel):
if channel is None:
return "<unknown>"
channel = remove_binstar_tokens(channel).rstrip("/")
if any(channel.startswith(i) for i in get_default_urls()):
return "defaults"
elif any(channel.startswith(i) for i in get_local_urls(clear_cache=False)):
return "local"
elif channel.startswith("http://filer/"):
return "filer"
elif channel.startswith(channel_alias):
return channel.split(channel_alias, 1)[1]
elif channel.startswith("http:/"):
channel2 = "https" + channel[4:]
channel3 = canonical_channel_name(channel2)
return channel3 if channel3 != channel2 else channel
else:
return channel
|
def canonical_channel_name(channel, hide=True, no_unknown=False):
if channel is None:
return "defaults" if no_unknown else "<unknown>"
channel = remove_binstar_tokens(channel).rstrip("/")
if any(channel.startswith(i) for i in get_default_urls()):
return "defaults"
elif any(channel.startswith(i) for i in get_local_urls(clear_cache=False)):
return "local"
elif channel.startswith("http://filer/"):
return "filer"
elif channel.startswith(channel_alias):
return channel.split(channel_alias, 1)[1]
elif channel.startswith("http:/"):
channel2 = "https" + channel[4:]
channel3 = canonical_channel_name(channel2, hide, no_unknown)
return channel3 if channel3 != channel2 else channel
else:
return channel
|
https://github.com/conda/conda/issues/2513
|
Fetching package metadata ....Traceback (most recent call last):
File "/Users/ilan/python/bin/constructor", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/Users/ilan/constructor/bin/constructor", line 6, in <module>
sys.exit(main())
File "/Users/ilan/constructor/constructor/main.py", line 146, in main
verbose=opts.verbose)
File "/Users/ilan/constructor/constructor/main.py", line 81, in main_build
fcp.main(info, verbose=verbose)
File "/Users/ilan/constructor/constructor/fcp.py", line 170, in main
for url in info['channels']))
File "/Users/ilan/python/lib/python2.7/site-packages/conda/fetch.py", line 276, in fetch_index
url_s, priority = channel_urls[channel]
TypeError: 'int' object is not iterable
|
TypeError
|
def fetch_index(channel_urls, use_cache=False, unknown=False, index=None):
log.debug("channel_urls=" + repr(channel_urls))
# pool = ThreadPool(5)
if index is None:
index = {}
stdoutlog.info("Fetching package metadata ...")
if not isinstance(channel_urls, dict):
channel_urls = prioritize_channels(channel_urls)
for url in iterkeys(channel_urls):
if allowed_channels and url not in allowed_channels:
sys.exit(
"""
Error: URL '%s' not in allowed channels.
Allowed channels are:
- %s
"""
% (url, "\n - ".join(allowed_channels))
)
try:
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(10)
except (ImportError, RuntimeError):
# concurrent.futures is only available in Python >= 3.2 or if futures is installed
# RuntimeError is thrown if number of threads are limited by OS
session = CondaSession()
repodatas = [
(url, fetch_repodata(url, use_cache=use_cache, session=session))
for url in iterkeys(channel_urls)
]
else:
try:
urls = tuple(channel_urls)
futures = tuple(
executor.submit(
fetch_repodata, url, use_cache=use_cache, session=CondaSession()
)
for url in urls
)
repodatas = [(u, f.result()) for u, f in zip(urls, futures)]
finally:
executor.shutdown(wait=True)
for channel, repodata in repodatas:
if repodata is None:
continue
new_index = repodata["packages"]
url_s, priority = channel_urls[channel]
channel = channel.rstrip("/")
for fn, info in iteritems(new_index):
info["fn"] = fn
info["schannel"] = url_s
info["channel"] = channel
info["priority"] = priority
info["url"] = channel + "/" + fn
key = url_s + "::" + fn if url_s != "defaults" else fn
index[key] = info
stdoutlog.info("\n")
if unknown:
add_unknown(index, channel_urls)
if add_pip_as_python_dependency:
add_pip_dependency(index)
return index
|
def fetch_index(channel_urls, use_cache=False, unknown=False, index=None):
log.debug("channel_urls=" + repr(channel_urls))
# pool = ThreadPool(5)
if index is None:
index = {}
stdoutlog.info("Fetching package metadata ...")
if not isinstance(channel_urls, dict):
channel_urls = {url: pri + 1 for pri, url in enumerate(channel_urls)}
for url in iterkeys(channel_urls):
if allowed_channels and url not in allowed_channels:
sys.exit(
"""
Error: URL '%s' not in allowed channels.
Allowed channels are:
- %s
"""
% (url, "\n - ".join(allowed_channels))
)
try:
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(10)
except (ImportError, RuntimeError):
# concurrent.futures is only available in Python >= 3.2 or if futures is installed
# RuntimeError is thrown if number of threads are limited by OS
session = CondaSession()
repodatas = [
(url, fetch_repodata(url, use_cache=use_cache, session=session))
for url in iterkeys(channel_urls)
]
else:
try:
urls = tuple(channel_urls)
futures = tuple(
executor.submit(
fetch_repodata, url, use_cache=use_cache, session=CondaSession()
)
for url in urls
)
repodatas = [(u, f.result()) for u, f in zip(urls, futures)]
finally:
executor.shutdown(wait=True)
for channel, repodata in repodatas:
if repodata is None:
continue
new_index = repodata["packages"]
url_s, priority = channel_urls[channel]
channel = channel.rstrip("/")
for fn, info in iteritems(new_index):
info["fn"] = fn
info["schannel"] = url_s
info["channel"] = channel
info["priority"] = priority
info["url"] = channel + "/" + fn
key = url_s + "::" + fn if url_s != "defaults" else fn
index[key] = info
stdoutlog.info("\n")
if unknown:
add_unknown(index, channel_urls)
if add_pip_as_python_dependency:
add_pip_dependency(index)
return index
|
https://github.com/conda/conda/issues/2513
|
Fetching package metadata ....Traceback (most recent call last):
File "/Users/ilan/python/bin/constructor", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/Users/ilan/constructor/bin/constructor", line 6, in <module>
sys.exit(main())
File "/Users/ilan/constructor/constructor/main.py", line 146, in main
verbose=opts.verbose)
File "/Users/ilan/constructor/constructor/main.py", line 81, in main_build
fcp.main(info, verbose=verbose)
File "/Users/ilan/constructor/constructor/fcp.py", line 170, in main
for url in info['channels']))
File "/Users/ilan/python/lib/python2.7/site-packages/conda/fetch.py", line 276, in fetch_index
url_s, priority = channel_urls[channel]
TypeError: 'int' object is not iterable
|
TypeError
|
def write_head(fo):
fo.write("==> %s <==\n" % time.strftime("%Y-%m-%d %H:%M:%S"))
fo.write("# cmd: %s\n" % (" ".join(sys.argv)))
|
def write_head(fo):
fo.write("==> %s <==\n" % time.strftime(TIME_FORMAT))
fo.write("# cmd: %s\n" % (" ".join(sys.argv)))
|
https://github.com/conda/conda/issues/651
|
Fetching package metadata: ..
Solving package specifications: .
Package plan for installation in environment C:\Users\colin.dickson\Miniconda:
The following packages will be downloaded:
package | build
---------------------------|-----------------
conda-3.4.1 | py33_0 125 KB
The following packages will be UN-linked:
package | build
---------------------------|-----------------
conda-3.4.0 | py33_0
The following packages will be linked:
package | build
---------------------------|-----------------
conda-3.4.1 | py33_0 hard-link
An unexpected error has occurred, please consider sending the
following traceback to the conda GitHub issue tracker at:
https://github.com/conda/conda/issues
Include the output of the command 'conda info' in your report.
conda : Traceback (most recent call last):
+ conda update conda --yes 2>&1 > conda_update.txt
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ CategoryInfo : NotSpecified: (Traceback (most recent call last)::String) [], RemoteException
+ FullyQualifiedErrorId : NativeCommandError
File "C:\Users\colin.dickson\Miniconda\Scripts\conda-script.py", line 4, in <module>
sys.exit(main())
File "C:\Users\colin.dickson\Miniconda\lib\site-packages\conda\cli\main.py", line 179, in main
args.func(args, p)
File "C:\Users\colin.dickson\Miniconda\lib\site-packages\conda\cli\main_update.py", line 39, in execute
install.install(args, parser, 'update')
File "C:\Users\colin.dickson\Miniconda\lib\site-packages\conda\cli\install.py", line 275, in install
plan.execute_actions(actions, index, verbose=not args.quiet)
File "C:\Users\colin.dickson\Miniconda\lib\site-packages\conda\plan.py", line 418, in execute_actions
with History(actions[PREFIX]):
File "C:\Users\colin.dickson\Miniconda\lib\site-packages\conda\history.py", line 55, in __enter__
self.update()
File "C:\Users\colin.dickson\Miniconda\lib\site-packages\conda\history.py", line 75, in update
self.write_changes(last, curr)
File "C:\Users\colin.dickson\Miniconda\lib\site-packages\conda\history.py", line 147, in write_changes
write_head(fo)
File "C:\Users\colin.dickson\Miniconda\lib\site-packages\conda\history.py", line 16, in write_head
fo.write("==> %s <==\n" % time.strftime(TIME_FORMAT))
File "C:\Users\colin.dickson\Miniconda\lib\encodings\cp1252.py", line 19, in encode
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
UnicodeEncodeError: 'charmap' codec can't encode character '\x92' in position 36: character maps to <undefined>
|
UnicodeEncodeError
|
def create_parser():
p = argparse.ArgumentParser()
sub_parsers = p.add_subparsers()
main_create.configure_parser(sub_parsers)
main_export.configure_parser(sub_parsers)
main_list.configure_parser(sub_parsers)
main_remove.configure_parser(sub_parsers)
return p
|
def create_parser():
p = argparse.ArgumentParser()
sub_parsers = p.add_subparsers()
main_create.configure_parser(sub_parsers)
main_export.configure_parser(sub_parsers)
main_list.configure_parser(sub_parsers)
return p
|
https://github.com/conda/conda/issues/4
|
Z:\continuum\iopro>conda install iopro
The following packages will be downloaded:
iopro-1.2.3-np16py27_1.tar.bz2 [http://repo.continuum.io/pkgs/win-64/]
python-2.7.3-5.tar.bz2 [http://repo.continuum.io/pkgs/win-64/]
The following packages will be DE-activated:
package | build
------------------------- | ---------------
iopro-1.2.2 | np16py27_0
python-2.7.3 | 4
The following packages will be activated:
package | build
------------------------- | ---------------
iopro-1.2.3 | np16py27_1
python-2.7.3 | 5
Proceed (y/n)? y
Fetching packages...
iopro-1.2.3-np16py27_1.tar.bz2 100% |###############| Time: 0:00:02 148.82 kB/s
python-2.7.3-5.tar.bz2 100% |#######################| Time: 0:01:08 297.27 kB/s
Deactivating packages...
[ COMPLETE ] |#################################################| 100%
Activating packages...
An unexpected exceptional error has occurred, please consider sending the follow
ing traceback to the conda GitHub issue tracker at https://github.com/ContinuumI
O/conda/issues
Traceback (most recent call last):
File "C:\Anaconda\Scripts\conda-script.py", line 4, in <module>
sys.exit(main())
File "C:\Anaconda\lib\site-packages\conda\cli\main.py", line 105, in main
args.func(args)
File "C:\Anaconda\lib\site-packages\conda\cli\main_install.py", line 114, in e
xecute
plan.execute(env, args.progress_bar=="yes")
File "C:\Anaconda\lib\site-packages\conda\package_plan.py", line 81, in execut
e
self._handle_activations(env, package_progress)
File "C:\Anaconda\lib\site-packages\conda\package_plan.py", line 123, in _hand
le_activations
activate(env.conda.packages_dir, pkg.canonical_name, env.prefix)
File "C:\Anaconda\lib\site-packages\conda\install.py", line 188, in activate
t.extractall(path=prefix)
File "C:\Anaconda\lib\tarfile.py", line 2048, in extractall
File "C:\Anaconda\lib\tarfile.py", line 2085, in extract
File "C:\Anaconda\lib\tarfile.py", line 2161, in _extract_member
File "C:\Anaconda\lib\tarfile.py", line 2200, in makefile
IOError: [Errno 13] Permission denied: 'C:\\Anaconda\\DLLs\\_hashlib.pyd'
Z:\continuum\iopro>conda -h
ImportError: No module named site
|
IOError
|
def __init__(self, path):
assert isdir(path)
self.path = path
self.meta_path = join(path, "meta.yaml")
self.meta = parse(open(self.meta_path).read())
|
def __init__(self, path):
assert isdir(path)
self.path = path
meta_path = join(path, "meta.yaml")
self.meta = parse(open(meta_path).read())
|
https://github.com/conda/conda/issues/139
|
BUILD START: An unexpected error has occurred, please consider sending the
following traceback to the conda GitHub issue tracker at:
https://github.com/ContinuumIO/conda/issues"
Traceback (most recent call last):
File "/Users/aaronmeurer/anaconda/bin/conda", line 5, in <module>
sys.exit(main())
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/cli/main.py", line 110, in main
args.func(args, p)
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/cli/main_build.py", line 52, in execute
build.build(m)
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/build.py", line 118, in build
print "BUILD START:", m.dist()
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/metadata.py", line 137, in dist
return '%s-%s-%s' % (self.name(), self.version(), self.build_id())
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/metadata.py", line 104, in name
return self.get_value('package/name').lower()
AttributeError: 'NoneType' object has no attribute 'lower'
|
AttributeError
|
def name(self):
res = self.get_value("package/name")
if not res:
sys.exit("Error: package/name missing in: %r" % self.meta_path)
res = str(res)
if res != res.lower():
sys.exit("Error: package/name must be lowercase, got: %r" % res)
return res
|
def name(self):
return self.get_value("package/name").lower()
|
https://github.com/conda/conda/issues/139
|
BUILD START: An unexpected error has occurred, please consider sending the
following traceback to the conda GitHub issue tracker at:
https://github.com/ContinuumIO/conda/issues"
Traceback (most recent call last):
File "/Users/aaronmeurer/anaconda/bin/conda", line 5, in <module>
sys.exit(main())
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/cli/main.py", line 110, in main
args.func(args, p)
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/cli/main_build.py", line 52, in execute
build.build(m)
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/build.py", line 118, in build
print "BUILD START:", m.dist()
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/metadata.py", line 137, in dist
return '%s-%s-%s' % (self.name(), self.version(), self.build_id())
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/metadata.py", line 104, in name
return self.get_value('package/name').lower()
AttributeError: 'NoneType' object has no attribute 'lower'
|
AttributeError
|
def configure_parser(sub_parsers):
p = sub_parsers.add_parser(
"update",
formatter_class=RawDescriptionHelpFormatter,
description=description,
help=description,
epilog=example,
)
p.add_argument(
"-n",
"--name",
action="store",
help="name of environment (in %s)" % os.pathsep.join(config.envs_dirs),
default=None,
)
p.add_argument(
"-f",
"--file",
action="store",
help="environment definition (default: environment.yml)",
default="environment.yml",
)
p.add_argument(
"-q",
"--quiet",
default=False,
)
p.add_argument(
"remote_definition",
help="remote environment definition / IPython notebook",
action="store",
default=None,
nargs="?",
)
common.add_parser_json(p)
p.set_defaults(func=execute)
|
def configure_parser(sub_parsers):
p = sub_parsers.add_parser(
"update",
formatter_class=RawDescriptionHelpFormatter,
description=description,
help=description,
epilog=example,
)
p.add_argument(
"-n",
"--name",
action="store",
help="name of environment (in %s)" % os.pathsep.join(config.envs_dirs),
default=None,
)
p.add_argument(
"-f",
"--file",
action="store",
help="environment definition (default: environment.yml)",
default="environment.yml",
)
p.add_argument(
"-q",
"--quiet",
default=False,
)
common.add_parser_json(p)
p.set_defaults(func=execute)
|
https://github.com/conda/conda/issues/136
|
Traceback (most recent call last):
File "/Users/aaronmeurer/anaconda/bin/conda", line 5, in <module>
sys.exit(main())
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/cli/main.py", line 109, in main
args.func(args, p)
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/cli/main_build.py", line 52, in execute
build.build(m)
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/build.py", line 135, in build
post_process()
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/post.py", line 96, in post_process
rm_egg_dirs()
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/post.py", line 66, in rm_egg_dirs
os.rename(join(egg_dir, fn), join(sp_dir, fn))
OSError: [Errno 66] Directory not empty
|
OSError
|
def execute(args, parser):
name = args.remote_definition or args.name
try:
spec = install_specs.detect(
name=name, filename=args.file, directory=os.getcwd()
)
env = spec.environment
except exceptions.SpecNotFound as e:
common.error_and_exit(str(e), json=args.json)
if not args.name:
if not env.name:
# Note, this is a hack fofr get_prefix that assumes argparse results
# TODO Refactor common.get_prefix
name = os.environ.get("CONDA_DEFAULT_ENV", False)
if not name:
msg = "Unable to determine environment\n\n"
msg += textwrap.dedent("""
Please re-run this command with one of the following options:
* Provide an environment name via --name or -n
* Re-run this command inside an activated conda environment.""").lstrip()
# TODO Add json support
common.error_and_exit(msg, json=False)
# Note: stubbing out the args object as all of the
# conda.cli.common code thinks that name will always
# be specified.
args.name = env.name
prefix = common.get_prefix(args, search=False)
# CAN'T Check with this function since it assumes we will create prefix.
# cli_install.check_prefix(prefix, json=args.json)
# TODO, add capability
# common.ensure_override_channels_requires_channel(args)
# channel_urls = args.channel or ()
for installer_type, specs in env.dependencies.items():
try:
installer = get_installer(installer_type)
installer.install(prefix, specs, args, env)
except InvalidInstaller:
sys.stderr.write(
textwrap.dedent("""
Unable to install package for {0}.
Please double check and ensure you dependencies file has
the correct spelling. You might also try installing the
conda-env-{0} package to see if provides the required
installer.
""")
.lstrip()
.format(installer_type)
)
return -1
touch_nonadmin(prefix)
if not args.json:
cli_install.print_activate(args.name if args.name else prefix)
|
def execute(args, parser):
try:
env = from_file(args.file)
except exceptions.EnvironmentFileNotFound as e:
msg = "Unable to locate environment file: %s\n\n" % e.filename
msg += "\n".join(
textwrap.wrap(
textwrap.dedent("""
Please verify that the above file is present and that you have
permission read the file's contents. Note, you can specify the
file to use by explictly adding --file=/path/to/file when calling
conda env update.""").lstrip()
)
)
common.error_and_exit(msg, json=args.json)
if not args.name:
if not env.name:
# Note, this is a hack fofr get_prefix that assumes argparse results
# TODO Refactor common.get_prefix
name = os.environ.get("CONDA_DEFAULT_ENV", False)
if not name:
msg = "Unable to determine environment\n\n"
msg += textwrap.dedent("""
Please re-run this command with one of the following options:
* Provide an environment name via --name or -n
* Re-run this command inside an activated conda environment.""").lstrip()
# TODO Add json support
common.error_and_exit(msg, json=False)
# Note: stubbing out the args object as all of the
# conda.cli.common code thinks that name will always
# be specified.
args.name = env.name
prefix = common.get_prefix(args, search=False)
# CAN'T Check with this function since it assumes we will create prefix.
# cli_install.check_prefix(prefix, json=args.json)
# TODO, add capability
# common.ensure_override_channels_requires_channel(args)
# channel_urls = args.channel or ()
for installer_type, specs in env.dependencies.items():
try:
installer = get_installer(installer_type)
installer.install(prefix, specs, args, env)
except InvalidInstaller:
sys.stderr.write(
textwrap.dedent("""
Unable to install package for {0}.
Please double check and ensure you dependencies file has
the correct spelling. You might also try installing the
conda-env-{0} package to see if provides the required
installer.
""")
.lstrip()
.format(installer_type)
)
return -1
touch_nonadmin(prefix)
if not args.json:
cli_install.print_activate(args.name if args.name else prefix)
|
https://github.com/conda/conda/issues/136
|
Traceback (most recent call last):
File "/Users/aaronmeurer/anaconda/bin/conda", line 5, in <module>
sys.exit(main())
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/cli/main.py", line 109, in main
args.func(args, p)
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/cli/main_build.py", line 52, in execute
build.build(m)
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/build.py", line 135, in build
post_process()
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/post.py", line 96, in post_process
rm_egg_dirs()
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/post.py", line 66, in rm_egg_dirs
os.rename(join(egg_dir, fn), join(sp_dir, fn))
OSError: [Errno 66] Directory not empty
|
OSError
|
def create_parser():
p = ArgumentParser()
sub_parsers = p.add_subparsers()
main_attach.configure_parser(sub_parsers)
main_create.configure_parser(sub_parsers)
main_export.configure_parser(sub_parsers)
main_list.configure_parser(sub_parsers)
main_remove.configure_parser(sub_parsers)
main_upload.configure_parser(sub_parsers)
main_update.configure_parser(sub_parsers)
show_help_on_empty_command()
return p
|
def create_parser():
p = argparse.ArgumentParser()
sub_parsers = p.add_subparsers()
main_attach.configure_parser(sub_parsers)
main_create.configure_parser(sub_parsers)
main_export.configure_parser(sub_parsers)
main_list.configure_parser(sub_parsers)
main_remove.configure_parser(sub_parsers)
main_upload.configure_parser(sub_parsers)
main_update.configure_parser(sub_parsers)
show_help_on_empty_command()
return p
|
https://github.com/conda/conda/issues/133
|
File "/Users/aaronmeurer/anaconda/bin/conda", line 5, in <module>
sys.exit(main())
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/cli/main.py", line 109, in main
args.func(args, p)
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/cli/main_build.py", line 52, in execute
build.build(m)
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/build.py", line 155, in build
tarcheck.check_all(path)
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/tarcheck.py", line 56, in check_all
x.index_json()
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/tarcheck.py", line 49, in index_json
getattr(self, varname)))
Exception: version: 0.4 != '0.4'
|
Exception
|
def parse(self):
if not self.raw:
return
self.update({"conda": []})
for line in self.raw:
if isinstance(line, dict):
self.update(line)
else:
self["conda"].append(common.arg2spec(line))
|
def parse(self):
if not self.raw:
return
self.update({"conda": []})
for line in self.raw:
if type(line) is dict:
self.update(line)
else:
self["conda"].append(common.arg2spec(line))
|
https://github.com/conda/conda/issues/133
|
File "/Users/aaronmeurer/anaconda/bin/conda", line 5, in <module>
sys.exit(main())
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/cli/main.py", line 109, in main
args.func(args, p)
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/cli/main_build.py", line 52, in execute
build.build(m)
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/build.py", line 155, in build
tarcheck.check_all(path)
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/tarcheck.py", line 56, in check_all
x.index_json()
File "/Users/aaronmeurer/Documents/Continuum/conda/conda/builder/tarcheck.py", line 49, in index_json
getattr(self, varname)))
Exception: version: 0.4 != '0.4'
|
Exception
|
def from_url(cls, url: str) -> Optional["YukicoderProblem"]:
# example: https://yukicoder.me/problems/no/499
# example: http://yukicoder.me/problems/1476
result = urllib.parse.urlparse(url)
dirname, basename = posixpath.split(utils.normpath(result.path))
if result.scheme in ("", "http", "https") and result.netloc == "yukicoder.me":
n = None # type: Optional[int]
try:
n = int(basename)
except ValueError:
pass
if n is not None:
if dirname == "/problems/no":
return cls(problem_no=n)
if dirname == "/problems":
return cls(problem_id=n)
return None
|
def from_url(cls, url: str) -> Optional["YukicoderProblem"]:
# example: https://yukicoder.me/problems/no/499
# example: http://yukicoder.me/problems/1476
result = urllib.parse.urlparse(url)
dirname, basename = posixpath.split(utils.normpath(result.path))
if result.scheme in ("", "http", "https") and result.netloc == "yukicoder.me":
n = None # type: Optional[int]
try:
n = int(basename)
except ValueError:
pass
if n is not None:
if dirname == "/problems/no":
return cls(problem_no=int(n))
if dirname == "/problems":
return cls(problem_id=int(n))
return cls()
return None
|
https://github.com/online-judge-tools/oj/issues/578
|
dispatch.problem_from_url("https://yukicoder.me/contests/241/") # Assert raises
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ryo/workspace/github/online-judge-tools/onlinejudge/dispatch.py", line 53, in problem_from_url
problem = cls.from_url(url)
File "/home/ryo/workspace/github/online-judge-tools/onlinejudge/service/yukicoder.py", line 359, in from_url
return cls()
File "/home/ryo/workspace/github/online-judge-tools/onlinejudge/service/yukicoder.py", line 238, in __init__
assert problem_no or problem_id
AssertionError
dispatch.problem_from_url("https://yahoo.co.jp") # return None
|
AssertionError
|
def _AtCoderProblemContentPartial_from_row(tr: bs4.Tag):
tds = tr.find_all("td")
assert 4 <= len(tds) <= 5
path = tds[1].find("a")["href"]
problem = AtCoderProblem.from_url("https://atcoder.jp" + path)
assert problem is not None
alphabet = tds[0].text
name = tds[1].text
if tds[2].text.endswith(" msec"):
time_limit_msec = int(utils.remove_suffix(tds[2].text, " msec"))
elif tds[2].text.endswith(" sec"):
time_limit_msec = int(float(utils.remove_suffix(tds[2].text, " sec")) * 1000)
else:
assert False
memory_limit_byte = int(
float(utils.remove_suffix(tds[3].text, " MB")) * 1000 * 1000
) # TODO: confirm this is MB truly, not MiB
if len(tds) == 5:
assert tds[4].text.strip() in ("", "Submit", "提出")
self = AtCoderProblemContentPartial(
alphabet, memory_limit_byte, name, problem, time_limit_msec
)
problem._cached_content = self
return self
|
def _AtCoderProblemContentPartial_from_row(tr: bs4.Tag):
tds = tr.find_all("td")
assert 4 <= len(tds) <= 5
path = tds[1].find("a")["href"]
problem = AtCoderProblem.from_url("https://atcoder.jp" + path)
assert problem is not None
alphabet = tds[0].text
name = tds[1].text
time_limit_msec = int(float(utils.remove_suffix(tds[2].text, " sec")) * 1000)
memory_limit_byte = int(
float(utils.remove_suffix(tds[3].text, " MB")) * 1000 * 1000
) # TODO: confirm this is MB truly, not MiB
if len(tds) == 5:
assert tds[4].text.strip() in ("", "Submit", "提出")
self = AtCoderProblemContentPartial(
alphabet, memory_limit_byte, name, problem, time_limit_msec
)
problem._cached_content = self
return self
|
https://github.com/online-judge-tools/oj/issues/446
|
$ python3
Python 3.6.7 (default, Oct 22 2018, 11:32:17)
[GCC 8.2.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
import onlinejudge
onlinejudge.service.atcoder.AtCoderContest.from_url('https://atcoder.jp/contests/joi2019ho/tasks').list_problems()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/user/.local/lib/python3.6/site-packages/onlinejudge/service/atcoder.py", line 287, in list_problems
return [AtCoderProblem._from_table_row(tr) for tr in tbody.find_all('tr')]
File "/home/user/.local/lib/python3.6/site-packages/onlinejudge/service/atcoder.py", line 287, in <listcomp>
return [AtCoderProblem._from_table_row(tr) for tr in tbody.find_all('tr')]
File "/home/user/.local/lib/python3.6/site-packages/onlinejudge/service/atcoder.py", line 535, in _from_table_row
return _AtCoderProblemContentPartial_from_row(tr).problem
File "/home/user/.local/lib/python3.6/site-packages/onlinejudge/service/atcoder.py", line 364, in _AtCoderProblemContentPartial_from_row
time_limit_msec = int(float(utils.remove_suffix(tds[2].text, ' sec')) * 1000)
File "/home/user/.local/lib/python3.6/site-packages/onlinejudge/_implementation/utils.py", line 225, in remove_suffix
assert s.endswith(suffix)
AssertionError
|
AssertionError
|
def _AtCoderProblemContent_parse_partial(
soup: bs4.BeautifulSoup, problem: "AtCoderProblem"
) -> AtCoderProblemContentPartial:
h2 = soup.find("span", class_="h2")
alphabet, _, name = h2.text.partition(" - ")
time_limit, memory_limit = h2.find_next_sibling("p").text.split(" / ")
for time_limit_prefix in ("実行時間制限: ", "Time Limit: "):
if time_limit.startswith(time_limit_prefix):
break
else:
assert False
if time_limit.endswith(" msec"):
time_limit_msec = int(
utils.remove_suffix(
utils.remove_prefix(time_limit, time_limit_prefix), " msec"
)
)
elif time_limit.endswith(" sec"):
time_limit_msec = int(
float(
utils.remove_suffix(
utils.remove_prefix(time_limit, time_limit_prefix), " sec"
)
)
* 1000
)
else:
assert False
for memory_limit_prefix in ("メモリ制限: ", "Memory Limit: "):
if memory_limit.startswith(memory_limit_prefix):
break
else:
assert False
memory_limit_byte = int(
float(
utils.remove_suffix(
utils.remove_prefix(memory_limit, memory_limit_prefix), " MB"
)
)
* 1000
* 1000
)
return AtCoderProblemContentPartial(
alphabet, memory_limit_byte, name, problem, time_limit_msec
)
|
def _AtCoderProblemContent_parse_partial(
soup: bs4.BeautifulSoup, problem: "AtCoderProblem"
) -> AtCoderProblemContentPartial:
h2 = soup.find("span", class_="h2")
alphabet, _, name = h2.text.partition(" - ")
time_limit, memory_limit = h2.find_next_sibling("p").text.split(" / ")
for time_limit_prefix in ("実行時間制限: ", "Time Limit: "):
if time_limit.startswith(time_limit_prefix):
break
else:
assert False
time_limit_msec = int(
float(
utils.remove_suffix(
utils.remove_prefix(time_limit, time_limit_prefix), " sec"
)
)
* 1000
)
for memory_limit_prefix in ("メモリ制限: ", "Memory Limit: "):
if memory_limit.startswith(memory_limit_prefix):
break
else:
assert False
memory_limit_byte = int(
float(
utils.remove_suffix(
utils.remove_prefix(memory_limit, memory_limit_prefix), " MB"
)
)
* 1000
* 1000
)
return AtCoderProblemContentPartial(
alphabet, memory_limit_byte, name, problem, time_limit_msec
)
|
https://github.com/online-judge-tools/oj/issues/446
|
$ python3
Python 3.6.7 (default, Oct 22 2018, 11:32:17)
[GCC 8.2.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
import onlinejudge
onlinejudge.service.atcoder.AtCoderContest.from_url('https://atcoder.jp/contests/joi2019ho/tasks').list_problems()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/user/.local/lib/python3.6/site-packages/onlinejudge/service/atcoder.py", line 287, in list_problems
return [AtCoderProblem._from_table_row(tr) for tr in tbody.find_all('tr')]
File "/home/user/.local/lib/python3.6/site-packages/onlinejudge/service/atcoder.py", line 287, in <listcomp>
return [AtCoderProblem._from_table_row(tr) for tr in tbody.find_all('tr')]
File "/home/user/.local/lib/python3.6/site-packages/onlinejudge/service/atcoder.py", line 535, in _from_table_row
return _AtCoderProblemContentPartial_from_row(tr).problem
File "/home/user/.local/lib/python3.6/site-packages/onlinejudge/service/atcoder.py", line 364, in _AtCoderProblemContentPartial_from_row
time_limit_msec = int(float(utils.remove_suffix(tds[2].text, ' sec')) * 1000)
File "/home/user/.local/lib/python3.6/site-packages/onlinejudge/_implementation/utils.py", line 225, in remove_suffix
assert s.endswith(suffix)
AssertionError
|
AssertionError
|
def _load_details(self, session: Optional[requests.Session] = None, lang: str = "en"):
assert lang in ("en", "ja")
session = session or utils.get_default_session()
resp = _request("GET", self.get_url(type="beta", lang=lang), session=session)
soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
contest_name, _, _ = soup.find("title").text.rpartition(" - ")
contest_duration = soup.find("small", class_="contest-duration")
self._start_time, end_time = [
self._parse_start_time(a["href"]) for a in contest_duration.find_all("a")
]
self._duration = end_time - self._start_time
if lang == "en":
self._contest_name_en = contest_name
elif lang == "ja":
self._contest_name_ja = contest_name
else:
assert False
_, _, self._can_participate = soup.find(
"span", text=re.compile(r"^(Can Participate|参加対象): ")
).text.partition(": ")
_, _, self._rated_range = soup.find(
"span", text=re.compile(r"^(Rated Range|Rated対象): ")
).text.partition(": ")
penalty_text = soup.find("span", text=re.compile(r"^(Penalty|ペナルティ): ")).text
m = re.match(r"(Penalty|ペナルティ): (\d+)( minutes?|分)", penalty_text)
assert m
self._penalty = datetime.timedelta(minutes=int(m.group(2)))
|
def _load_details(self, session: Optional[requests.Session] = None, lang: str = "en"):
assert lang in ("en", "ja")
session = session or utils.get_default_session()
resp = _request("GET", self.get_url(type="beta", lang=lang), session=session)
soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
contest_name, _, _ = soup.find("title").text.rpartition(" - ")
contest_duration = soup.find("small", class_="contest-duration")
self._start_time, end_time = [
self._parse_start_time(a["href"]) for a in contest_duration.find_all("a")
]
self._duration = end_time - self._start_time
if lang == "en":
self._contest_name_en = contest_name
elif lang == "ja":
self._contest_name_ja = contest_name
else:
assert False
_, _, self._can_participate = soup.find(
"span", text=re.compile(r"^(Can Participate|参加対象): ")
).text.partition(": ")
_, _, self._rated_range = soup.find(
"span", text=re.compile(r"^(Rated Range|Rated対象): ")
).text.partition(": ")
penalty_text = soup.find("span", text=re.compile(r"^(Penalty|ペナルティ): ")).text
m = re.match(r"(Penalty|ペナルティ): (\d+)( minutes|分)", penalty_text)
assert m
self._penalty = datetime.timedelta(minutes=int(m.group(2)))
|
https://github.com/online-judge-tools/oj/issues/444
|
$ python3 -c 'from onlinejudge.service.atcoder import * ; AtCoderContest.from_url("https://atcoder.jp/contests/chokudai_S002").get_name()'
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/user/GitHub/online-judge-tools/onlinejudge/service/atcoder.py", line 261, in get_name
self._load_details(lang='en', session=session)
File "/home/user/GitHub/online-judge-tools/onlinejudge/service/atcoder.py", line 255, in _load_details
assert m
AssertionError
|
AssertionError
|
def _from_table_row(cls, tr: bs4.Tag) -> "AtCoderProblem":
tds = tr.find_all("td")
assert 4 <= len(tds) <= 5
path = tds[1].find("a")["href"]
self = cls.from_url("https://atcoder.jp" + path)
assert self is not None
self._alphabet = tds[0].text
self._task_name = tds[1].text
self._time_limit_msec = int(float(utils.remove_suffix(tds[2].text, " sec")) * 1000)
self._memory_limit_byte = (
int(utils.remove_suffix(tds[3].text, " MB")) * 1000 * 1000
) # TODO: confirm this is MB truly, not MiB
if len(tds) == 5:
assert tds[4].text.strip() in ("", "Submit", "提出")
return self
|
def _from_table_row(cls, tr: bs4.Tag) -> "AtCoderProblem":
tds = tr.find_all("td")
assert len(tds) == 5
path = tds[1].find("a")["href"]
self = cls.from_url("https://atcoder.jp" + path)
assert self is not None
self._alphabet = tds[0].text
self._task_name = tds[1].text
self._time_limit_msec = int(float(utils.remove_suffix(tds[2].text, " sec")) * 1000)
self._memory_limit_byte = (
int(utils.remove_suffix(tds[3].text, " MB")) * 1000 * 1000
) # TODO: confirm this is MB truly, not MiB
assert tds[4].text.strip() in ("", "Submit")
return self
|
https://github.com/online-judge-tools/oj/issues/391
|
$ python3 setup.py test -s tests.service_atcoder
running test
running egg_info
writing online_judge_tools.egg-info/PKG-INFO
writing dependency_links to online_judge_tools.egg-info/dependency_links.txt
writing requirements to online_judge_tools.egg-info/requires.txt
writing top-level names to online_judge_tools.egg-info/top_level.txt
reading manifest file 'online_judge_tools.egg-info/SOURCES.txt'
reading manifest template 'MANIFEST.in'
writing manifest file 'online_judge_tools.egg-info/SOURCES.txt'
running build_ext
test_from_url (tests.service_atcoder.AtCoderContestTest) ... ok
test_iterate_submissions (tests.service_atcoder.AtCoderContestTest) ... ok
test_list_problems (tests.service_atcoder.AtCoderContestTest) ... FAIL
test_load_details (tests.service_atcoder.AtCoderContestTest) ... ok
test_normal (tests.service_atcoder.AtCoderProblemGetInputFormatTest) ... ok
test_old_problem (tests.service_atcoder.AtCoderProblemGetInputFormatTest) ... ok
test_from_url (tests.service_atcoder.AtCoderProblemTest) ... ok
test_get_alphabet (tests.service_atcoder.AtCoderProblemTest) ... ok
test_get_score (tests.service_atcoder.AtCoderProblemTest) ... ok
test_iterate_submissions (tests.service_atcoder.AtCoderProblemTest) ... ok
test_load_details (tests.service_atcoder.AtCoderProblemTest) ... ok
test_from_url (tests.service_atcoder.AtCoderSerivceTest) ... ok
test_iterate_contests (tests.service_atcoder.AtCoderSerivceTest) ... ok
test_from_url (tests.service_atcoder.AtCoderSubmissionTest) ... ok
test_get_source_code (tests.service_atcoder.AtCoderSubmissionTest) ... ok
test_get_test_cases (tests.service_atcoder.AtCoderSubmissionTest) ... ok
test_get_test_sets (tests.service_atcoder.AtCoderSubmissionTest) ... ok
test_submission_info (tests.service_atcoder.AtCoderSubmissionTest) ... ok
======================================================================
FAIL: test_list_problems (tests.service_atcoder.AtCoderContestTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/user/GitHub/online-judge-tools/tests/service_atcoder.py", line 61, in test_list_problems
problems = contest.list_problems()
File "/home/user/GitHub/online-judge-tools/onlinejudge/service/atcoder.py", line 286, in list_problems
return [AtCoderProblem._from_table_row(tr) for tr in tbody.find_all('tr')]
File "/home/user/GitHub/online-judge-tools/onlinejudge/service/atcoder.py", line 286, in <listcomp>
return [AtCoderProblem._from_table_row(tr) for tr in tbody.find_all('tr')]
File "/home/user/GitHub/online-judge-tools/onlinejudge/service/atcoder.py", line 366, in _from_table_row
assert len(tds) == 5
AssertionError
----------------------------------------------------------------------
Ran 18 tests in 4.449s
FAILED (failures=1)
Test failed: <unittest.runner.TextTestResult run=18 errors=0 failures=1>
error: Test failed: <unittest.runner.TextTestResult run=18 errors=0 failures=1>
|
AssertionError
|
def getter_with_load_details(name: str, type: Union[str, type]) -> Callable:
"""
:note: confirm that the type annotation `get_foo = getter_with_load_details("_foo", type=int) # type: Callable[..., int]` is correct one
:note: this cannot be a decorator, since mypy fails to recognize the types
This functions is bad one, but I think
get_foo = getter_with_load_details("_foo", type=int) # type: Callable[..., int]
is better than
def get_foo(self, session: Optional[requests.Session] = None) -> int:
if self._foo is None:
self._load_details(session=session)
assert self._foo is not None
return self._foo
Of course the latter is better when it is used only once, but the former is better when the pattern is repeated.
"""
@functools.wraps(lambda self: getattr(self, name))
def wrapper(self, session: Optional[requests.Session] = None):
if getattr(self, name) is None:
assert session is None or isinstance(session, requests.Session)
self._load_details(session=session)
return getattr(self, name)
# add documents
assert type is not None
py_class = lambda s: ":py:class:`{}`".format(s)
if isinstance(type, str):
if type.count("[") == 0:
rst = py_class(type)
elif type.count("[") == 1:
a, b = remove_suffix(type, "]").split("[")
rst = "{} [ {} ]".format(py_class(a), py_class(b))
else:
assert False
elif type in (int, float, str, bytes, datetime.datetime, datetime.timedelta):
rst = py_class(type.__name__)
else:
assert False
wrapper.__doc__ = ":return: {}".format(rst)
return wrapper
|
def getter_with_load_details(name: str, type: Union[str, Type]) -> Callable:
"""
:note: confirm that the type annotation `get_foo = getter_with_load_details("_foo", type=int) # type: Callable[..., int]` is correct one
:note: this cannot be a decorator, since mypy fails to recognize the types
This functions is bad one, but I think
get_foo = getter_with_load_details("_foo", type=int) # type: Callable[..., int]
is better than
def get_foo(self, session: Optional[requests.Session] = None) -> int:
if self._foo is None:
self._load_details(session=session)
assert self._foo is not None
return self._foo
Of course the latter is better when it is used only once, but the former is better when the pattern is repeated.
"""
@functools.wraps(lambda self: getattr(self, name))
def wrapper(self, session: Optional[requests.Session] = None):
if getattr(self, name) is None:
assert session is None or isinstance(session, requests.Session)
self._load_details(session=session)
return getattr(self, name)
# add documents
assert type is not None
py_class = lambda s: ":py:class:`{}`".format(s)
if isinstance(type, str):
if type.count("[") == 0:
rst = py_class(type)
elif type.count("[") == 1:
a, b = remove_suffix(type, "]").split("[")
rst = "{} [ {} ]".format(py_class(a), py_class(b))
else:
assert False
elif type in (int, float, str, bytes, datetime.datetime, datetime.timedelta):
rst = py_class(type.__name__)
else:
assert False
wrapper.__doc__ = ":return: {}".format(rst)
return wrapper
|
https://github.com/online-judge-tools/oj/issues/363
|
Traceback (most recent call last):
File "/home/ryo/workspace/github/chainer-wind-env/bin/oj", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/home/ryo/workspace/github/online-judge-tools/oj", line 4, in <module>
import onlinejudge._implementation.main
File "/home/ryo/workspace/github/online-judge-tools/onlinejudge/__init__.py", line 3, in <module>
import onlinejudge.service
File "/home/ryo/workspace/github/online-judge-tools/onlinejudge/service/__init__.py", line 2, in <module>
import onlinejudge.service.anarchygolf
File "/home/ryo/workspace/github/online-judge-tools/onlinejudge/service/anarchygolf.py", line 14, in <module>
import onlinejudge._implementation.testcase_zipper
File "/home/ryo/workspace/github/online-judge-tools/onlinejudge/_implementation/testcase_zipper.py", line 9, in <module>
import onlinejudge._implementation.format_utils
File "/home/ryo/workspace/github/online-judge-tools/onlinejudge/_implementation/format_utils.py", line 11, in <module>
import onlinejudge._implementation.utils as utils
File "/home/ryo/workspace/github/online-judge-tools/onlinejudge/_implementation/utils.py", line 215, in <module>
def getter_with_load_details(name: str, type: Union[str, Type]) -> Callable:
File "/usr/lib/python3.5/typing.py", line 552, in __getitem__
dict(self.__dict__), parameters, _root=True)
File "/usr/lib/python3.5/typing.py", line 512, in __new__
for t2 in all_params - {t1} if not isinstance(t2, TypeVar)):
File "/usr/lib/python3.5/typing.py", line 512, in <genexpr>
for t2 in all_params - {t1} if not isinstance(t2, TypeVar)):
File "/usr/lib/python3.5/typing.py", line 1077, in __subclasscheck__
if super().__subclasscheck__(cls):
File "/home/ryo/workspace/github/chainer-wind-env/lib/python3.5/abc.py", line 225, in __subclasscheck__
for scls in cls.__subclasses__():
TypeError: descriptor '__subclasses__' of 'type' object needs an argument
|
TypeError
|
def download_sample_cases(
self, session: Optional[requests.Session] = None
) -> List[TestCase]:
session = session or utils.new_default_session()
# get
resp = utils.request("GET", self.get_url(), session=session)
# parse
soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
samples = onlinejudge._implementation.testcase_zipper.SampleZipper()
for pre in soup.select(".sample pre"):
log.debug("pre: %s", str(pre))
it = self._parse_sample_tag(pre)
if it is not None:
data, name = it
samples.add(data.encode(), name)
return samples.get()
|
def download_sample_cases(
self, session: Optional[requests.Session] = None
) -> List[TestCase]:
session = session or utils.new_default_session()
# get
resp = utils.request("GET", self.get_url(), session=session)
# parse
soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
samples = onlinejudge._implementation.testcase_zipper.SampleZipper()
for pre in soup.find_all("pre"):
log.debug("pre: %s", str(pre))
it = self._parse_sample_tag(pre)
if it is not None:
data, name = it
samples.add(data.encode(), name)
return samples.get()
|
https://github.com/online-judge-tools/oj/issues/355
|
kaito@PC:~/procon/procon/yukicoder/183/d$ oj dl https://yukicoder.me/problems/no/649
[x] problem recognized: <onlinejudge.service.yukicoder.YukicoderProblem object at 0x7f3e9ab14eb8>: https://yukicoder.me/problems/no/649
[x] load cookie from: /home/kaito/.local/share/online-judge-tools/cookie.jar
[x] GET: https://yukicoder.me/problems/no/649
[x] 200 OK
Traceback (most recent call last):
File "/home/kaito/.local/bin/oj", line 7, in <module>
onlinejudge.implementation.main.main(args=sys.argv[1:])
File "/home/kaito/.local/lib/python3.6/site-packages/onlinejudge/implementation/main.py", line 318, in main
run_program(namespace, parser=parser)
File "/home/kaito/.local/lib/python3.6/site-packages/onlinejudge/implementation/main.py", line 287, in run_program
download(args)
File "/home/kaito/.local/lib/python3.6/site-packages/onlinejudge/implementation/command/download.py", line 53, in download
samples = problem.download_sample_cases(session=sess) # type: ignore
File "/home/kaito/.local/lib/python3.6/site-packages/onlinejudge/service/yukicoder.py", line 275, in download_sample_cases
it = self._parse_sample_tag(pre)
File "/home/kaito/.local/lib/python3.6/site-packages/onlinejudge/service/yukicoder.py", line 312, in _parse_sample_tag
if prv.name == 'h6' and tag.parent.name == 'div' and tag.parent['class'] == ['paragraph'] and pprv.name == 'h5':
AttributeError: 'NoneType' object has no attribute 'name'
|
AttributeError
|
async def start_update_loop(self):
error = None
try:
time_frames = self.evaluator_task_manager_by_time_frame_by_symbol.keys()
# sort time frames to update them in order of accuracy
time_frames = TimeFrameManager.sort_time_frames(time_frames)
if time_frames and self.symbols:
self.in_backtesting = self._init_backtesting_if_necessary(time_frames)
# init refreshed_times at 0 for each time frame
self.refreshed_times = {
key: {symbol: 0 for symbol in self.symbols} for key in time_frames
}
# init last refresh times at 0 for each time frame
self.time_frame_last_update = {
key: {symbol: 0 for symbol in self.symbols} for key in time_frames
}
while self.keep_running:
try:
await self._trigger_update(time_frames)
except CancelledError:
self.logger.info("Update tasks cancelled.")
except Exception as e:
self.logger.error(f"exception when triggering update: {e}")
self.logger.exception(e)
else:
self.logger.warning("no time frames to monitor, going to sleep.")
except Exception as e:
self.logger.exception(e)
if self.watcher is not None:
error = e
finally:
if (
self.in_backtesting
and self.symbols is not None
and not self.exchange.get_exchange()
.get_backtesting()
.get_is_finished(self.symbols)
):
if error is None:
error = "backtesting did not finish properly."
if self.watcher is not None:
self.watcher.set_error(error)
self.logger.error(error)
|
async def start_update_loop(self):
error = None
try:
time_frames = self.evaluator_task_manager_by_time_frame_by_symbol.keys()
# sort time frames to update them in order of accuracy
time_frames = TimeFrameManager.sort_time_frames(time_frames)
if time_frames and self.symbols:
self.in_backtesting = self._init_backtesting_if_necessary(time_frames)
# init refreshed_times at 0 for each time frame
self.refreshed_times = {
key: {symbol: 0 for symbol in self.symbols} for key in time_frames
}
# init last refresh times at 0 for each time frame
self.time_frame_last_update = {
key: {symbol: 0 for symbol in self.symbols} for key in time_frames
}
while self.keep_running:
try:
await self._trigger_update(time_frames)
except Exception as e:
self.logger.error(f"exception when triggering update: {e}")
self.logger.exception(e)
else:
self.logger.warning("no time frames to monitor, going to sleep.")
except Exception as e:
self.logger.exception(e)
if self.watcher is not None:
error = e
finally:
if (
self.in_backtesting
and self.symbols is not None
and not self.exchange.get_exchange()
.get_backtesting()
.get_is_finished(self.symbols)
):
if error is None:
error = "backtesting did not finish properly."
if self.watcher is not None:
self.watcher.set_error(error)
self.logger.error(error)
|
https://github.com/Drakkar-Software/OctoBot/issues/578
|
Traceback (most recent call last):
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 163, in _refresh_time_frame_data
await self._refresh_data(time_frame, symbol, notify=notify)
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 177, in _refresh_data
return_list=False))
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/exchange_dispatcher.py", line 89, in get_symbol_prices
await self.exchange.get_symbol_prices(symbol=symbol, time_frame=time_frame, limit=limit)
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/rest_exchanges/rest_exchange.py", line 126, in get_symbol_prices
candles = await self.client.fetch_ohlcv(symbol, time_frame.value)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 503, in fetch_ohlcv
response = await self.publicGetKlines(self.extend(request, params))
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 1114, in request
response = await self.fetch2(path, api, method, params, headers, body)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/base/exchange.py", line 113, in fetch2
await self.throttle()
concurrent.futures._base.CancelledError
2019-01-19 19:05:51,432 ERROR GlobalPriceUpdater when refreshing data for time frame TimeFrames.FOUR_HOURS:
2019-01-19 19:05:51,432 ERROR GlobalPriceUpdater
Traceback (most recent call last):
File "/usr/lib/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/lib/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
concurrent.futures._base.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 163, in _refresh_time_frame_data
await self._refresh_data(time_frame, symbol, notify=notify)
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 177, in _refresh_data
return_list=False))
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/exchange_dispatcher.py", line 89, in get_symbol_prices
await self.exchange.get_symbol_prices(symbol=symbol, time_frame=time_frame, limit=limit)
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/rest_exchanges/rest_exchange.py", line 126, in get_symbol_prices
candles = await self.client.fetch_ohlcv(symbol, time_frame.value)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 503, in fetch_ohlcv
response = await self.publicGetKlines(self.extend(request, params))
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 1114, in request
response = await self.fetch2(path, api, method, params, headers, body)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/base/exchange.py", line 113, in fetch2
await self.throttle()
concurrent.futures._base.CancelledError
|
concurrent.futures._base.CancelledError
|
async def _refresh_time_frame_data(self, time_frame, symbol, notify=True):
try:
await self._refresh_data(time_frame, symbol, notify=notify)
self.time_frame_last_update[time_frame][symbol] = time.time()
except CancelledError as e:
raise e
except Exception as e:
self.logger.error(f" when refreshing data for time frame {time_frame}: {e}")
self.logger.exception(e)
|
async def _refresh_time_frame_data(self, time_frame, symbol, notify=True):
try:
await self._refresh_data(time_frame, symbol, notify=notify)
self.time_frame_last_update[time_frame][symbol] = time.time()
except Exception as e:
self.logger.error(f" when refreshing data for time frame {time_frame}: {e}")
self.logger.exception(e)
|
https://github.com/Drakkar-Software/OctoBot/issues/578
|
Traceback (most recent call last):
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 163, in _refresh_time_frame_data
await self._refresh_data(time_frame, symbol, notify=notify)
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 177, in _refresh_data
return_list=False))
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/exchange_dispatcher.py", line 89, in get_symbol_prices
await self.exchange.get_symbol_prices(symbol=symbol, time_frame=time_frame, limit=limit)
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/rest_exchanges/rest_exchange.py", line 126, in get_symbol_prices
candles = await self.client.fetch_ohlcv(symbol, time_frame.value)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 503, in fetch_ohlcv
response = await self.publicGetKlines(self.extend(request, params))
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 1114, in request
response = await self.fetch2(path, api, method, params, headers, body)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/base/exchange.py", line 113, in fetch2
await self.throttle()
concurrent.futures._base.CancelledError
2019-01-19 19:05:51,432 ERROR GlobalPriceUpdater when refreshing data for time frame TimeFrames.FOUR_HOURS:
2019-01-19 19:05:51,432 ERROR GlobalPriceUpdater
Traceback (most recent call last):
File "/usr/lib/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/lib/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
concurrent.futures._base.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 163, in _refresh_time_frame_data
await self._refresh_data(time_frame, symbol, notify=notify)
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 177, in _refresh_data
return_list=False))
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/exchange_dispatcher.py", line 89, in get_symbol_prices
await self.exchange.get_symbol_prices(symbol=symbol, time_frame=time_frame, limit=limit)
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/rest_exchanges/rest_exchange.py", line 126, in get_symbol_prices
candles = await self.client.fetch_ohlcv(symbol, time_frame.value)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 503, in fetch_ohlcv
response = await self.publicGetKlines(self.extend(request, params))
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 1114, in request
response = await self.fetch2(path, api, method, params, headers, body)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/base/exchange.py", line 113, in fetch2
await self.throttle()
concurrent.futures._base.CancelledError
|
concurrent.futures._base.CancelledError
|
def stop_threads(self):
stop_coroutines = []
# Notify stopping
if self.config[CONFIG_NOTIFICATION_INSTANCE].enabled(
CONFIG_NOTIFICATION_GLOBAL_INFO
):
# To be improved with a full async implementation
# To be done : "asyncio.run" --> replaced by a simple await
# PR discussion : https://github.com/Drakkar-Software/OctoBot/pull/563#discussion_r248088266
stop_coroutines.append(
self.config[CONFIG_NOTIFICATION_INSTANCE].notify_with_all(
NOTIFICATION_STOPPING_MESSAGE
)
)
self.logger.info("Stopping threads ...")
if self.main_task_group:
self.main_task_group.cancel()
for thread in self.dispatchers_list:
thread.stop()
# stop services
for service_instance in ServiceCreator.get_service_instances(self.config):
try:
service_instance.stop()
except Exception as e:
raise e
# stop exchanges threads
for exchange in self.exchanges_list.values():
stop_coroutines.append(exchange.stop())
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
asyncio.run(get_gather_wrapper(stop_coroutines))
self.logger.info("Threads stopped.")
|
def stop_threads(self):
# Notify stopping
if self.config[CONFIG_NOTIFICATION_INSTANCE].enabled(
CONFIG_NOTIFICATION_GLOBAL_INFO
):
# To be improved with a full async implementation
# To be done : "asyncio.run" --> replaced by a simple await
# PR discussion : https://github.com/Drakkar-Software/OctoBot/pull/563#discussion_r248088266
asyncio.run(
self.config[CONFIG_NOTIFICATION_INSTANCE].notify_with_all(
NOTIFICATION_STOPPING_MESSAGE
)
)
self.logger.info("Stopping threads ...")
if self.main_task_group:
self.main_task_group.cancel()
for thread in self.dispatchers_list:
thread.stop()
# stop services
for service_instance in ServiceCreator.get_service_instances(self.config):
try:
service_instance.stop()
except Exception as e:
raise e
# stop exchanges threads
for exchange in self.exchanges_list.values():
exchange.stop()
self.logger.info("Threads stopped.")
|
https://github.com/Drakkar-Software/OctoBot/issues/578
|
Traceback (most recent call last):
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 163, in _refresh_time_frame_data
await self._refresh_data(time_frame, symbol, notify=notify)
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 177, in _refresh_data
return_list=False))
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/exchange_dispatcher.py", line 89, in get_symbol_prices
await self.exchange.get_symbol_prices(symbol=symbol, time_frame=time_frame, limit=limit)
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/rest_exchanges/rest_exchange.py", line 126, in get_symbol_prices
candles = await self.client.fetch_ohlcv(symbol, time_frame.value)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 503, in fetch_ohlcv
response = await self.publicGetKlines(self.extend(request, params))
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 1114, in request
response = await self.fetch2(path, api, method, params, headers, body)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/base/exchange.py", line 113, in fetch2
await self.throttle()
concurrent.futures._base.CancelledError
2019-01-19 19:05:51,432 ERROR GlobalPriceUpdater when refreshing data for time frame TimeFrames.FOUR_HOURS:
2019-01-19 19:05:51,432 ERROR GlobalPriceUpdater
Traceback (most recent call last):
File "/usr/lib/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/lib/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
concurrent.futures._base.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 163, in _refresh_time_frame_data
await self._refresh_data(time_frame, symbol, notify=notify)
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 177, in _refresh_data
return_list=False))
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/exchange_dispatcher.py", line 89, in get_symbol_prices
await self.exchange.get_symbol_prices(symbol=symbol, time_frame=time_frame, limit=limit)
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/rest_exchanges/rest_exchange.py", line 126, in get_symbol_prices
candles = await self.client.fetch_ohlcv(symbol, time_frame.value)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 503, in fetch_ohlcv
response = await self.publicGetKlines(self.extend(request, params))
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 1114, in request
response = await self.fetch2(path, api, method, params, headers, body)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/base/exchange.py", line 113, in fetch2
await self.throttle()
concurrent.futures._base.CancelledError
|
concurrent.futures._base.CancelledError
|
def get_service_instances(config):
instances = []
for services in config[CONFIG_CATEGORY_SERVICES]:
if CONFIG_SERVICE_INSTANCE in config[CONFIG_CATEGORY_SERVICES][services]:
instance = config[CONFIG_CATEGORY_SERVICES][services][
CONFIG_SERVICE_INSTANCE
]
if isinstance(instance, list):
for i in instance:
instances.append(i)
else:
instances.append(instance)
return instances
|
def get_service_instances(config):
instances = []
for services in config[CONFIG_CATEGORY_SERVICES]:
if CONFIG_SERVICE_INSTANCE in config[CONFIG_CATEGORY_SERVICES][services]:
instances.append(
config[CONFIG_CATEGORY_SERVICES][services][CONFIG_SERVICE_INSTANCE]
)
return instances
|
https://github.com/Drakkar-Software/OctoBot/issues/578
|
Traceback (most recent call last):
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 163, in _refresh_time_frame_data
await self._refresh_data(time_frame, symbol, notify=notify)
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 177, in _refresh_data
return_list=False))
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/exchange_dispatcher.py", line 89, in get_symbol_prices
await self.exchange.get_symbol_prices(symbol=symbol, time_frame=time_frame, limit=limit)
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/rest_exchanges/rest_exchange.py", line 126, in get_symbol_prices
candles = await self.client.fetch_ohlcv(symbol, time_frame.value)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 503, in fetch_ohlcv
response = await self.publicGetKlines(self.extend(request, params))
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 1114, in request
response = await self.fetch2(path, api, method, params, headers, body)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/base/exchange.py", line 113, in fetch2
await self.throttle()
concurrent.futures._base.CancelledError
2019-01-19 19:05:51,432 ERROR GlobalPriceUpdater when refreshing data for time frame TimeFrames.FOUR_HOURS:
2019-01-19 19:05:51,432 ERROR GlobalPriceUpdater
Traceback (most recent call last):
File "/usr/lib/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/lib/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
concurrent.futures._base.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 163, in _refresh_time_frame_data
await self._refresh_data(time_frame, symbol, notify=notify)
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 177, in _refresh_data
return_list=False))
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/exchange_dispatcher.py", line 89, in get_symbol_prices
await self.exchange.get_symbol_prices(symbol=symbol, time_frame=time_frame, limit=limit)
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/rest_exchanges/rest_exchange.py", line 126, in get_symbol_prices
candles = await self.client.fetch_ohlcv(symbol, time_frame.value)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 503, in fetch_ohlcv
response = await self.publicGetKlines(self.extend(request, params))
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 1114, in request
response = await self.fetch2(path, api, method, params, headers, body)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/base/exchange.py", line 113, in fetch2
await self.throttle()
concurrent.futures._base.CancelledError
|
concurrent.futures._base.CancelledError
|
async def start_bot(bot, logger, catch=False):
try:
Commands.BOT = bot
loop = asyncio.get_event_loop()
# handle CTRL+C signal
signal.signal(signal.SIGINT, Commands._signal_handler)
# init
await bot.create_services()
await bot.create_exchange_traders()
bot.create_evaluation_tasks()
# start
try:
await bot.start_tasks()
except CancelledError:
logger.info("Core engine tasks cancelled.")
# join threads in a not loop blocking executor
# TODO remove this when no thread anymore
await loop.run_in_executor(None, bot.join_threads)
except Exception as e:
logger.exception(f"OctoBot Exception : {e}")
if not catch:
raise e
Commands.stop_bot(bot)
|
async def start_bot(bot, logger, catch=False):
try:
loop = asyncio.get_event_loop()
# try to init
await bot.create_services()
await bot.create_exchange_traders()
bot.create_evaluation_tasks()
# try to start
await bot.start_tasks()
# join threads in a not loop blocking executor
# TODO remove this when no thread anymore
await loop.run_in_executor(None, bot.join_threads)
except Exception as e:
logger.exception(f"OctoBot Exception : {e}")
if not catch:
raise e
Commands.stop_bot(bot)
|
https://github.com/Drakkar-Software/OctoBot/issues/578
|
Traceback (most recent call last):
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 163, in _refresh_time_frame_data
await self._refresh_data(time_frame, symbol, notify=notify)
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 177, in _refresh_data
return_list=False))
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/exchange_dispatcher.py", line 89, in get_symbol_prices
await self.exchange.get_symbol_prices(symbol=symbol, time_frame=time_frame, limit=limit)
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/rest_exchanges/rest_exchange.py", line 126, in get_symbol_prices
candles = await self.client.fetch_ohlcv(symbol, time_frame.value)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 503, in fetch_ohlcv
response = await self.publicGetKlines(self.extend(request, params))
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 1114, in request
response = await self.fetch2(path, api, method, params, headers, body)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/base/exchange.py", line 113, in fetch2
await self.throttle()
concurrent.futures._base.CancelledError
2019-01-19 19:05:51,432 ERROR GlobalPriceUpdater when refreshing data for time frame TimeFrames.FOUR_HOURS:
2019-01-19 19:05:51,432 ERROR GlobalPriceUpdater
Traceback (most recent call last):
File "/usr/lib/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/lib/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
concurrent.futures._base.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 163, in _refresh_time_frame_data
await self._refresh_data(time_frame, symbol, notify=notify)
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 177, in _refresh_data
return_list=False))
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/exchange_dispatcher.py", line 89, in get_symbol_prices
await self.exchange.get_symbol_prices(symbol=symbol, time_frame=time_frame, limit=limit)
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/rest_exchanges/rest_exchange.py", line 126, in get_symbol_prices
candles = await self.client.fetch_ohlcv(symbol, time_frame.value)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 503, in fetch_ohlcv
response = await self.publicGetKlines(self.extend(request, params))
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 1114, in request
response = await self.fetch2(path, api, method, params, headers, body)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/base/exchange.py", line 113, in fetch2
await self.throttle()
concurrent.futures._base.CancelledError
|
concurrent.futures._base.CancelledError
|
async def stop(self):
if self._web_socket_available():
self.exchange_web_socket.stop_sockets()
await self.exchange.stop()
|
def stop(self):
if self._web_socket_available():
self.exchange_web_socket.stop_sockets()
|
https://github.com/Drakkar-Software/OctoBot/issues/578
|
Traceback (most recent call last):
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 163, in _refresh_time_frame_data
await self._refresh_data(time_frame, symbol, notify=notify)
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 177, in _refresh_data
return_list=False))
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/exchange_dispatcher.py", line 89, in get_symbol_prices
await self.exchange.get_symbol_prices(symbol=symbol, time_frame=time_frame, limit=limit)
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/rest_exchanges/rest_exchange.py", line 126, in get_symbol_prices
candles = await self.client.fetch_ohlcv(symbol, time_frame.value)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 503, in fetch_ohlcv
response = await self.publicGetKlines(self.extend(request, params))
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 1114, in request
response = await self.fetch2(path, api, method, params, headers, body)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/base/exchange.py", line 113, in fetch2
await self.throttle()
concurrent.futures._base.CancelledError
2019-01-19 19:05:51,432 ERROR GlobalPriceUpdater when refreshing data for time frame TimeFrames.FOUR_HOURS:
2019-01-19 19:05:51,432 ERROR GlobalPriceUpdater
Traceback (most recent call last):
File "/usr/lib/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/lib/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
concurrent.futures._base.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 163, in _refresh_time_frame_data
await self._refresh_data(time_frame, symbol, notify=notify)
File "/home/herklos/dev/Drakkars/OctoBot/evaluator/Updaters/global_price_updater.py", line 177, in _refresh_data
return_list=False))
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/exchange_dispatcher.py", line 89, in get_symbol_prices
await self.exchange.get_symbol_prices(symbol=symbol, time_frame=time_frame, limit=limit)
File "/home/herklos/dev/Drakkars/OctoBot/trading/exchanges/rest_exchanges/rest_exchange.py", line 126, in get_symbol_prices
candles = await self.client.fetch_ohlcv(symbol, time_frame.value)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 503, in fetch_ohlcv
response = await self.publicGetKlines(self.extend(request, params))
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/binance.py", line 1114, in request
response = await self.fetch2(path, api, method, params, headers, body)
File "/home/herklos/.local/lib/python3.7/site-packages/ccxt/async_support/base/exchange.py", line 113, in fetch2
await self.throttle()
concurrent.futures._base.CancelledError
|
concurrent.futures._base.CancelledError
|
def __init__(self, base_path, home_path=Path.home(), apps=None, input_enabled=True):
self.base_path = base_path
self.home_path = home_path
self.dot_briefcase_path = home_path / ".briefcase"
self.global_config = None
self.apps = {} if apps is None else apps
self._path_index = {}
# Some details about the host machine
self.host_arch = platform.machine()
self.host_os = platform.system()
# External service APIs.
# These are abstracted to enable testing without patching.
self.cookiecutter = cookiecutter
self.requests = requests
self.input = Console(enabled=input_enabled)
self.os = os
self.sys = sys
self.shutil = shutil
self.subprocess = Subprocess(self)
# The internal Briefcase integrations API.
self.integrations = integrations
|
def __init__(self, base_path, home_path=Path.home(), apps=None, input_enabled=True):
self.base_path = base_path
self.home_path = home_path
self.dot_briefcase_path = home_path / ".briefcase"
self.global_config = None
self.apps = {} if apps is None else apps
# Some details about the host machine
self.host_arch = platform.machine()
self.host_os = platform.system()
# External service APIs.
# These are abstracted to enable testing without patching.
self.cookiecutter = cookiecutter
self.requests = requests
self.input = Console(enabled=input_enabled)
self.os = os
self.sys = sys
self.shutil = shutil
self.subprocess = Subprocess(self)
# The internal Briefcase integrations API.
self.integrations = integrations
|
https://github.com/beeware/briefcase/issues/420
|
(beeware-dev) osboxes@osboxes:~/beeware-tutorial/helloworld$ briefcase run
[helloworld] Starting app...
Traceback (most recent call last):
File "/tmp/.mount_Hello_63m7qR/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/tmp/.mount_Hello_63m7qR/usr/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/tmp/.mount_Hello_63m7qR/usr/app/helloworld/__main__.py", line 4, in <module>
main().main_loop()
File "/tmp/.mount_Hello_63m7qR/usr/app/helloworld/app.py", line 40, in main
return HelloWorld()
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/toga/app.py", line 208, in __init__
self.factory = get_platform_factory(factory)
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/toga/platform.py", line 39, in get_platform_factory
from toga_gtk import factory
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/toga_gtk/factory.py", line 1, in <module>
from .app import App, DocumentApp, MainWindow
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/toga_gtk/app.py", line 8, in <module>
import gbulb
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/gbulb/__init__.py", line 1, in <module>
from .glib_events import *
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/gbulb/glib_events.py", line 12, in <module>
from gi.repository import GLib, Gio
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/gi/__init__.py", line 42, in <module>
from . import _gi
ImportError: libffi.so.6: cannot open shared object file: No such file or directory
Unable to start app helloworld.
|
ImportError
|
def __init__(self, *args, **options):
super().__init__(*args, **options)
self._s3 = None
|
def __init__(self, *args, **options):
super().__init__(*args, **options)
self._path_index = {}
self._s3 = None
|
https://github.com/beeware/briefcase/issues/420
|
(beeware-dev) osboxes@osboxes:~/beeware-tutorial/helloworld$ briefcase run
[helloworld] Starting app...
Traceback (most recent call last):
File "/tmp/.mount_Hello_63m7qR/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/tmp/.mount_Hello_63m7qR/usr/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/tmp/.mount_Hello_63m7qR/usr/app/helloworld/__main__.py", line 4, in <module>
main().main_loop()
File "/tmp/.mount_Hello_63m7qR/usr/app/helloworld/app.py", line 40, in main
return HelloWorld()
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/toga/app.py", line 208, in __init__
self.factory = get_platform_factory(factory)
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/toga/platform.py", line 39, in get_platform_factory
from toga_gtk import factory
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/toga_gtk/factory.py", line 1, in <module>
from .app import App, DocumentApp, MainWindow
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/toga_gtk/app.py", line 8, in <module>
import gbulb
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/gbulb/__init__.py", line 1, in <module>
from .glib_events import *
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/gbulb/glib_events.py", line 12, in <module>
from gi.repository import GLib, Gio
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/gi/__init__.py", line 42, in <module>
from . import _gi
ImportError: libffi.so.6: cannot open shared object file: No such file or directory
Unable to start app helloworld.
|
ImportError
|
def verify_tools(self):
super().verify_tools()
self.linuxdeploy_appimage_path = verify_linuxdeploy(self)
|
def verify_tools(self):
super().verify_tools()
try:
print()
print("Ensure we have the linuxdeploy AppImage...")
self.linuxdeploy_appimage = self.download_url(
url=self.linuxdeploy_download_url,
download_path=self.dot_briefcase_path / "tools",
)
self.os.chmod(str(self.linuxdeploy_appimage), 0o755)
except requests_exceptions.ConnectionError:
raise NetworkFailure("downloading linuxdeploy AppImage")
|
https://github.com/beeware/briefcase/issues/420
|
(beeware-dev) osboxes@osboxes:~/beeware-tutorial/helloworld$ briefcase run
[helloworld] Starting app...
Traceback (most recent call last):
File "/tmp/.mount_Hello_63m7qR/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/tmp/.mount_Hello_63m7qR/usr/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/tmp/.mount_Hello_63m7qR/usr/app/helloworld/__main__.py", line 4, in <module>
main().main_loop()
File "/tmp/.mount_Hello_63m7qR/usr/app/helloworld/app.py", line 40, in main
return HelloWorld()
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/toga/app.py", line 208, in __init__
self.factory = get_platform_factory(factory)
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/toga/platform.py", line 39, in get_platform_factory
from toga_gtk import factory
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/toga_gtk/factory.py", line 1, in <module>
from .app import App, DocumentApp, MainWindow
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/toga_gtk/app.py", line 8, in <module>
import gbulb
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/gbulb/__init__.py", line 1, in <module>
from .glib_events import *
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/gbulb/glib_events.py", line 12, in <module>
from gi.repository import GLib, Gio
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/gi/__init__.py", line 42, in <module>
from . import _gi
ImportError: libffi.so.6: cannot open shared object file: No such file or directory
Unable to start app helloworld.
|
ImportError
|
def build_app(self, app: BaseConfig, **kwargs):
"""
Build an application.
:param app: The application to build
"""
print()
print("[{app.app_name}] Building AppImage...".format(app=app))
try:
print()
# Build the AppImage.
# For some reason, the version has to be passed in as an
# environment variable, *not* in the configuration...
env = {"VERSION": app.version}
# Find all the .so files in app and app_packages,
# so they can be passed in to linuxdeploy to have their
# dependencies added to the AppImage. Looks for any .so file
# in the application, and make sure it is marked for deployment.
so_folders = set()
for so_file in self.appdir_path(app).glob("**/*.so"):
so_folders.add(so_file.parent)
deploy_deps_args = []
for folder in sorted(so_folders):
deploy_deps_args.extend(["--deploy-deps-only", str(folder)])
# Build the app image. We use `--appimage-extract-and-run`
# because AppImages won't run natively inside Docker.
with self.dockerize(app) as docker:
docker.run(
[
str(self.linuxdeploy_appimage_path),
"--appimage-extract-and-run",
"--appdir={appdir_path}".format(appdir_path=self.appdir_path(app)),
"-d",
str(
self.appdir_path(app)
/ "{app.bundle}.{app.app_name}.desktop".format(
app=app,
)
),
"-o",
"appimage",
]
+ deploy_deps_args,
env=env,
check=True,
cwd=str(self.platform_path),
)
# Make the binary executable.
self.os.chmod(str(self.binary_path(app)), 0o755)
except subprocess.CalledProcessError:
print()
raise BriefcaseCommandError(
"Error while building app {app.app_name}.".format(app=app)
)
|
def build_app(self, app: BaseConfig, **kwargs):
"""
Build an application.
:param app: The application to build
"""
print()
print("[{app.app_name}] Building AppImage...".format(app=app))
try:
print()
# Build the AppImage.
# For some reason, the version has to be passed in as an
# environment variable, *not* in the configuration...
env = {"VERSION": app.version}
appdir_path = self.bundle_path(app) / "{app.formal_name}.AppDir".format(app=app)
with self.dockerize(app) as docker:
docker.run(
[
str(self.linuxdeploy_appimage),
"--appimage-extract-and-run",
"--appdir={appdir_path}".format(appdir_path=appdir_path),
"-d",
str(
appdir_path
/ "{app.bundle}.{app.app_name}.desktop".format(
app=app,
)
),
"-o",
"appimage",
],
env=env,
check=True,
cwd=str(self.platform_path),
)
# Make the binary executable.
self.os.chmod(str(self.binary_path(app)), 0o755)
except subprocess.CalledProcessError:
print()
raise BriefcaseCommandError(
"Error while building app {app.app_name}.".format(app=app)
)
|
https://github.com/beeware/briefcase/issues/420
|
(beeware-dev) osboxes@osboxes:~/beeware-tutorial/helloworld$ briefcase run
[helloworld] Starting app...
Traceback (most recent call last):
File "/tmp/.mount_Hello_63m7qR/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/tmp/.mount_Hello_63m7qR/usr/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/tmp/.mount_Hello_63m7qR/usr/app/helloworld/__main__.py", line 4, in <module>
main().main_loop()
File "/tmp/.mount_Hello_63m7qR/usr/app/helloworld/app.py", line 40, in main
return HelloWorld()
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/toga/app.py", line 208, in __init__
self.factory = get_platform_factory(factory)
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/toga/platform.py", line 39, in get_platform_factory
from toga_gtk import factory
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/toga_gtk/factory.py", line 1, in <module>
from .app import App, DocumentApp, MainWindow
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/toga_gtk/app.py", line 8, in <module>
import gbulb
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/gbulb/__init__.py", line 1, in <module>
from .glib_events import *
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/gbulb/glib_events.py", line 12, in <module>
from gi.repository import GLib, Gio
File "/tmp/.mount_Hello_63m7qR/usr/app_packages/gi/__init__.py", line 42, in <module>
from . import _gi
ImportError: libffi.so.6: cannot open shared object file: No such file or directory
Unable to start app helloworld.
|
ImportError
|
def fuentes(
poa_global,
temp_air,
wind_speed,
noct_installed,
module_height=5,
wind_height=9.144,
emissivity=0.84,
absorption=0.83,
surface_tilt=30,
module_width=0.31579,
module_length=1.2,
):
"""
Calculate cell or module temperature using the Fuentes model.
The Fuentes model is a first-principles heat transfer energy balance
model [1]_ that is used in PVWatts for cell temperature modeling [2]_.
Parameters
----------
poa_global : pandas Series
Total incident irradiance [W/m^2]
temp_air : pandas Series
Ambient dry bulb temperature [C]
wind_speed : pandas Series
Wind speed [m/s]
noct_installed : float
The "installed" nominal operating cell temperature as defined in [1]_.
PVWatts assumes this value to be 45 C for rack-mounted arrays and
49 C for roof mount systems with restricted air flow around the
module. [C]
module_height : float, default 5.0
The height above ground of the center of the module. The PVWatts
default is 5.0 [m]
wind_height : float, default 9.144
The height above ground at which ``wind_speed`` is measured. The
PVWatts defauls is 9.144 [m]
emissivity : float, default 0.84
The effectiveness of the module at radiating thermal energy. [unitless]
absorption : float, default 0.83
The fraction of incident irradiance that is converted to thermal
energy in the module. [unitless]
surface_tilt : float, default 30
Module tilt from horizontal. If not provided, the default value
of 30 degrees from [1]_ and [2]_ is used. [degrees]
module_width : float, default 0.31579
Module width. The default value of 0.31579 meters in combination with
the default `module_length` gives a hydraulic diameter of 0.5 as
assumed in [1]_ and [2]_. [m]
module_length : float, default 1.2
Module length. The default value of 1.2 meters in combination with
the default `module_width` gives a hydraulic diameter of 0.5 as
assumed in [1]_ and [2]_. [m]
Returns
-------
temperature_cell : pandas Series
The modeled cell temperature [C]
Notes
-----
This function returns slightly different values from PVWatts at night
and just after dawn. This is because the SAM SSC assumes that module
temperature equals ambient temperature when irradiance is zero so it can
skip the heat balance calculation at night.
References
----------
.. [1] Fuentes, M. K., 1987, "A Simplifed Thermal Model for Flat-Plate
Photovoltaic Arrays", SAND85-0330, Sandia National Laboratories,
Albuquerque NM.
http://prod.sandia.gov/techlib/access-control.cgi/1985/850330.pdf
.. [2] Dobos, A. P., 2014, "PVWatts Version 5 Manual", NREL/TP-6A20-62641,
National Renewable Energy Laboratory, Golden CO.
doi:10.2172/1158421.
"""
# ported from the FORTRAN77 code provided in Appendix A of Fuentes 1987;
# nearly all variable names are kept the same for ease of comparison.
boltz = 5.669e-8
emiss = emissivity
absorp = absorption
xlen = _hydraulic_diameter(module_width, module_length)
# cap0 has units of [J / (m^2 K)], equal to mass per unit area times
# specific heat of the module.
cap0 = 11000
tinoct = noct_installed + 273.15
# convective coefficient of top surface of module at NOCT
windmod = 1.0
tave = (tinoct + 293.15) / 2
hconv = _fuentes_hconv(
tave, windmod, tinoct, tinoct - 293.15, xlen, surface_tilt, False
)
# determine the ground temperature ratio and the ratio of the total
# convection to the top side convection
hground = emiss * boltz * (tinoct**2 + 293.15**2) * (tinoct + 293.15)
backrat = (
absorp * 800.0
- emiss * boltz * (tinoct**4 - 282.21**4)
- hconv * (tinoct - 293.15)
) / ((hground + hconv) * (tinoct - 293.15))
tground = (tinoct**4 - backrat * (tinoct**4 - 293.15**4)) ** 0.25
tground = np.clip(tground, 293.15, tinoct)
tgrat = (tground - 293.15) / (tinoct - 293.15)
convrat = (
absorp * 800 - emiss * boltz * (2 * tinoct**4 - 282.21**4 - tground**4)
) / (hconv * (tinoct - 293.15))
# adjust the capacitance (thermal mass) of the module based on the INOCT.
# It is a function of INOCT because high INOCT implies thermal coupling
# with the racking (e.g. roofmount), so the thermal mass is increased.
# `cap` has units J/(m^2 C) -- see Table 3, Equations 26 & 27
cap = cap0
if tinoct > 321.15:
cap = cap * (1 + (tinoct - 321.15) / 12)
# iterate through timeseries inputs
sun0 = 0
tmod0 = 293.15
# n.b. the way Fuentes calculates the first timedelta makes it seem like
# the value doesn't matter -- rather than recreate it here, just assume
# it's the same as the second timedelta:
timedelta_seconds = poa_global.index.to_series().diff().dt.total_seconds()
timedelta_hours = timedelta_seconds / 3600
timedelta_hours.iloc[0] = timedelta_hours.iloc[1]
tamb_array = temp_air + 273.15
sun_array = poa_global * absorp
# Two of the calculations are easily vectorized, so precalculate them:
# sky temperature -- Equation 24
tsky_array = 0.68 * (0.0552 * tamb_array**1.5) + 0.32 * tamb_array
# wind speed at module height -- Equation 22
# not sure why the 1e-4 factor is included -- maybe the equations don't
# behave well if wind == 0?
windmod_array = wind_speed * (module_height / wind_height) ** 0.2 + 1e-4
tmod0 = 293.15
tmod_array = np.zeros_like(poa_global)
iterator = zip(tamb_array, sun_array, windmod_array, tsky_array, timedelta_hours)
for i, (tamb, sun, windmod, tsky, dtime) in enumerate(iterator):
# solve the heat transfer equation, iterating because the heat loss
# terms depend on tmod. NB Fuentes doesn't show that 10 iterations is
# sufficient for convergence.
tmod = tmod0
for j in range(10):
# overall convective coefficient
tave = (tmod + tamb) / 2
hconv = convrat * _fuentes_hconv(
tave, windmod, tinoct, abs(tmod - tamb), xlen, surface_tilt, True
)
# sky radiation coefficient (Equation 3)
hsky = emiss * boltz * (tmod**2 + tsky**2) * (tmod + tsky)
# ground radiation coeffieicient (Equation 4)
tground = tamb + tgrat * (tmod - tamb)
hground = emiss * boltz * (tmod**2 + tground**2) * (tmod + tground)
# thermal lag -- Equation 8
eigen = -(hconv + hsky + hground) / cap * dtime * 3600
# not sure why this check is done, maybe as a speed optimization?
if eigen > -10:
ex = np.exp(eigen)
else:
ex = 0
# Equation 7 -- note that `sun` and `sun0` already account for
# absorption (alpha)
tmod = tmod0 * ex + (
(1 - ex)
* (
hconv * tamb
+ hsky * tsky
+ hground * tground
+ sun0
+ (sun - sun0) / eigen
)
+ sun
- sun0
) / (hconv + hsky + hground)
tmod_array[i] = tmod
tmod0 = tmod
sun0 = sun
return pd.Series(tmod_array - 273.15, index=poa_global.index, name="tmod")
|
def fuentes(
poa_global,
temp_air,
wind_speed,
noct_installed,
module_height=5,
wind_height=9.144,
emissivity=0.84,
absorption=0.83,
surface_tilt=30,
module_width=0.31579,
module_length=1.2,
):
"""
Calculate cell or module temperature using the Fuentes model.
The Fuentes model is a first-principles heat transfer energy balance
model [1]_ that is used in PVWatts for cell temperature modeling [2]_.
Parameters
----------
poa_global : pandas Series
Total incident irradiance [W/m^2]
temp_air : pandas Series
Ambient dry bulb temperature [C]
wind_speed : pandas Series
Wind speed [m/s]
noct_installed : float
The "installed" nominal operating cell temperature as defined in [1]_.
PVWatts assumes this value to be 45 C for rack-mounted arrays and
49 C for roof mount systems with restricted air flow around the
module. [C]
module_height : float, default 5.0
The height above ground of the center of the module. The PVWatts
default is 5.0 [m]
wind_height : float, default 9.144
The height above ground at which ``wind_speed`` is measured. The
PVWatts defauls is 9.144 [m]
emissivity : float, default 0.84
The effectiveness of the module at radiating thermal energy. [unitless]
absorption : float, default 0.83
The fraction of incident irradiance that is converted to thermal
energy in the module. [unitless]
surface_tilt : float, default 30
Module tilt from horizontal. If not provided, the default value
of 30 degrees from [1]_ and [2]_ is used. [degrees]
module_width : float, default 0.31579
Module width. The default value of 0.31579 meters in combination with
the default `module_length` gives a hydraulic diameter of 0.5 as
assumed in [1]_ and [2]_. [m]
module_length : float, default 1.2
Module length. The default value of 1.2 meters in combination with
the default `module_width` gives a hydraulic diameter of 0.5 as
assumed in [1]_ and [2]_. [m]
Returns
-------
temperature_cell : pandas Series
The modeled cell temperature [C]
Notes
-----
This function returns slightly different values from PVWatts at night
and just after dawn. This is because the SAM SSC assumes that module
temperature equals ambient temperature when irradiance is zero so it can
skip the heat balance calculation at night.
References
----------
.. [1] Fuentes, M. K., 1987, "A Simplifed Thermal Model for Flat-Plate
Photovoltaic Arrays", SAND85-0330, Sandia National Laboratories,
Albuquerque NM.
http://prod.sandia.gov/techlib/access-control.cgi/1985/850330.pdf
.. [2] Dobos, A. P., 2014, "PVWatts Version 5 Manual", NREL/TP-6A20-62641,
National Renewable Energy Laboratory, Golden CO.
doi:10.2172/1158421.
"""
# ported from the FORTRAN77 code provided in Appendix A of Fuentes 1987;
# nearly all variable names are kept the same for ease of comparison.
boltz = 5.669e-8
emiss = emissivity
absorp = absorption
xlen = _hydraulic_diameter(module_width, module_length)
# cap0 has units of [J / (m^2 K)], equal to mass per unit area times
# specific heat of the module.
cap0 = 11000
tinoct = noct_installed + 273.15
# convective coefficient of top surface of module at NOCT
windmod = 1.0
tave = (tinoct + 293.15) / 2
hconv = _fuentes_hconv(
tave, windmod, tinoct, tinoct - 293.15, xlen, surface_tilt, False
)
# determine the ground temperature ratio and the ratio of the total
# convection to the top side convection
hground = emiss * boltz * (tinoct**2 + 293.15**2) * (tinoct + 293.15)
backrat = (
absorp * 800.0
- emiss * boltz * (tinoct**4 - 282.21**4)
- hconv * (tinoct - 293.15)
) / ((hground + hconv) * (tinoct - 293.15))
tground = (tinoct**4 - backrat * (tinoct**4 - 293.15**4)) ** 0.25
tground = np.clip(tground, 293.15, tinoct)
tgrat = (tground - 293.15) / (tinoct - 293.15)
convrat = (
absorp * 800 - emiss * boltz * (2 * tinoct**4 - 282.21**4 - tground**4)
) / (hconv * (tinoct - 293.15))
# adjust the capacitance (thermal mass) of the module based on the INOCT.
# It is a function of INOCT because high INOCT implies thermal coupling
# with the racking (e.g. roofmount), so the thermal mass is increased.
# `cap` has units J/(m^2 C) -- see Table 3, Equations 26 & 27
cap = cap0
if tinoct > 321.15:
cap = cap * (1 + (tinoct - 321.15) / 12)
# iterate through timeseries inputs
sun0 = 0
tmod0 = 293.15
# n.b. the way Fuentes calculates the first timedelta makes it seem like
# the value doesn't matter -- rather than recreate it here, just assume
# it's the same as the second timedelta:
timedelta_hours = np.diff(poa_global.index).astype(float) / 1e9 / 60 / 60
timedelta_hours = np.append([timedelta_hours[0]], timedelta_hours)
tamb_array = temp_air + 273.15
sun_array = poa_global * absorp
# Two of the calculations are easily vectorized, so precalculate them:
# sky temperature -- Equation 24
tsky_array = 0.68 * (0.0552 * tamb_array**1.5) + 0.32 * tamb_array
# wind speed at module height -- Equation 22
# not sure why the 1e-4 factor is included -- maybe the equations don't
# behave well if wind == 0?
windmod_array = wind_speed * (module_height / wind_height) ** 0.2 + 1e-4
tmod0 = 293.15
tmod_array = np.zeros_like(poa_global)
iterator = zip(tamb_array, sun_array, windmod_array, tsky_array, timedelta_hours)
for i, (tamb, sun, windmod, tsky, dtime) in enumerate(iterator):
# solve the heat transfer equation, iterating because the heat loss
# terms depend on tmod. NB Fuentes doesn't show that 10 iterations is
# sufficient for convergence.
tmod = tmod0
for j in range(10):
# overall convective coefficient
tave = (tmod + tamb) / 2
hconv = convrat * _fuentes_hconv(
tave, windmod, tinoct, abs(tmod - tamb), xlen, surface_tilt, True
)
# sky radiation coefficient (Equation 3)
hsky = emiss * boltz * (tmod**2 + tsky**2) * (tmod + tsky)
# ground radiation coeffieicient (Equation 4)
tground = tamb + tgrat * (tmod - tamb)
hground = emiss * boltz * (tmod**2 + tground**2) * (tmod + tground)
# thermal lag -- Equation 8
eigen = -(hconv + hsky + hground) / cap * dtime * 3600
# not sure why this check is done, maybe as a speed optimization?
if eigen > -10:
ex = np.exp(eigen)
else:
ex = 0
# Equation 7 -- note that `sun` and `sun0` already account for
# absorption (alpha)
tmod = tmod0 * ex + (
(1 - ex)
* (
hconv * tamb
+ hsky * tsky
+ hground * tground
+ sun0
+ (sun - sun0) / eigen
)
+ sun
- sun0
) / (hconv + hsky + hground)
tmod_array[i] = tmod
tmod0 = tmod
sun0 = sun
return pd.Series(tmod_array - 273.15, index=poa_global.index, name="tmod")
|
https://github.com/pvlib/pvlib-python/issues/1071
|
In [1]: import pvlib
...: import pandas as pd
...:
...: index_naive = pd.date_range('2019-01-01', freq='h', periods=3)
...:
...: kwargs = {
...: 'poa_global': pd.Series(1000, index_naive),
...: 'temp_air': pd.Series(20, index_naive),
...: 'wind_speed': pd.Series(1, index_naive),
...: 'noct_installed': 45
...: }
...:
In [2]: print(pvlib.temperature.fuentes(**kwargs))
2019-01-01 00:00:00 47.85
2019-01-01 01:00:00 50.85
2019-01-01 02:00:00 50.85
Freq: H, Name: tmod, dtype: float64
In [3]: kwargs['poa_global'].index = index_naive.tz_localize('UTC')
...: print(pvlib.temperature.fuentes(**kwargs))
...:
Traceback (most recent call last):
File "<ipython-input-3-ff99badadc91>", line 2, in <module>
print(pvlib.temperature.fuentes(**kwargs))
File "/home/kevin/anaconda3/lib/python3.7/site-packages/pvlib/temperature.py", line 602, in fuentes
timedelta_hours = np.diff(poa_global.index).astype(float) / 1e9 / 60 / 60
TypeError: float() argument must be a string or a number, not 'Timedelta'
|
TypeError
|
def temperature_model(self, model):
if model is None:
self._temperature_model = self.infer_temperature_model()
elif isinstance(model, str):
model = model.lower()
if model == "sapm":
self._temperature_model = self.sapm_temp
elif model == "pvsyst":
self._temperature_model = self.pvsyst_temp
elif model == "faiman":
self._temperature_model = self.faiman_temp
elif model == "fuentes":
self._temperature_model = self.fuentes_temp
else:
raise ValueError(model + " is not a valid temperature model")
# check system.temperature_model_parameters for consistency
name_from_params = self.infer_temperature_model().__name__
if self._temperature_model.__name__ != name_from_params:
raise ValueError(
"Temperature model {} is inconsistent with "
"PVsystem.temperature_model_parameters {}".format(
self._temperature_model.__name__,
self.system.temperature_model_parameters,
)
)
else:
self._temperature_model = partial(model, self)
|
def temperature_model(self, model):
if model is None:
self._temperature_model = self.infer_temperature_model()
elif isinstance(model, str):
model = model.lower()
if model == "sapm":
self._temperature_model = self.sapm_temp
elif model == "pvsyst":
self._temperature_model = self.pvsyst_temp
elif model == "faiman":
self._temperature_model = self.faiman_temp
else:
raise ValueError(model + " is not a valid temperature model")
# check system.temperature_model_parameters for consistency
name_from_params = self.infer_temperature_model().__name__
if self._temperature_model.__name__ != name_from_params:
raise ValueError(
"Temperature model {} is inconsistent with "
"PVsystem.temperature_model_parameters {}".format(
self._temperature_model.__name__,
self.system.temperature_model_parameters,
)
)
else:
self._temperature_model = partial(model, self)
|
https://github.com/pvlib/pvlib-python/issues/1071
|
In [1]: import pvlib
...: import pandas as pd
...:
...: index_naive = pd.date_range('2019-01-01', freq='h', periods=3)
...:
...: kwargs = {
...: 'poa_global': pd.Series(1000, index_naive),
...: 'temp_air': pd.Series(20, index_naive),
...: 'wind_speed': pd.Series(1, index_naive),
...: 'noct_installed': 45
...: }
...:
In [2]: print(pvlib.temperature.fuentes(**kwargs))
2019-01-01 00:00:00 47.85
2019-01-01 01:00:00 50.85
2019-01-01 02:00:00 50.85
Freq: H, Name: tmod, dtype: float64
In [3]: kwargs['poa_global'].index = index_naive.tz_localize('UTC')
...: print(pvlib.temperature.fuentes(**kwargs))
...:
Traceback (most recent call last):
File "<ipython-input-3-ff99badadc91>", line 2, in <module>
print(pvlib.temperature.fuentes(**kwargs))
File "/home/kevin/anaconda3/lib/python3.7/site-packages/pvlib/temperature.py", line 602, in fuentes
timedelta_hours = np.diff(poa_global.index).astype(float) / 1e9 / 60 / 60
TypeError: float() argument must be a string or a number, not 'Timedelta'
|
TypeError
|
def infer_temperature_model(self):
"""Infer temperature model from system attributes."""
params = set(self.system.temperature_model_parameters.keys())
# remove or statement in v0.9
if {"a", "b", "deltaT"} <= params or (
not params
and self.system.racking_model is None
and self.system.module_type is None
):
return self.sapm_temp
elif {"u_c", "u_v"} <= params:
return self.pvsyst_temp
elif {"u0", "u1"} <= params:
return self.faiman_temp
elif {"noct_installed"} <= params:
return self.fuentes_temp
else:
raise ValueError(
"could not infer temperature model from "
"system.temperature_module_parameters {}.".format(
self.system.temperature_model_parameters
)
)
|
def infer_temperature_model(self):
"""Infer temperature model from system attributes."""
params = set(self.system.temperature_model_parameters.keys())
# remove or statement in v0.9
if {"a", "b", "deltaT"} <= params or (
not params
and self.system.racking_model is None
and self.system.module_type is None
):
return self.sapm_temp
elif {"u_c", "u_v"} <= params:
return self.pvsyst_temp
elif {"u0", "u1"} <= params:
return self.faiman_temp
else:
raise ValueError(
"could not infer temperature model from "
"system.temperature_module_parameters {}.".format(
self.system.temperature_model_parameters
)
)
|
https://github.com/pvlib/pvlib-python/issues/1071
|
In [1]: import pvlib
...: import pandas as pd
...:
...: index_naive = pd.date_range('2019-01-01', freq='h', periods=3)
...:
...: kwargs = {
...: 'poa_global': pd.Series(1000, index_naive),
...: 'temp_air': pd.Series(20, index_naive),
...: 'wind_speed': pd.Series(1, index_naive),
...: 'noct_installed': 45
...: }
...:
In [2]: print(pvlib.temperature.fuentes(**kwargs))
2019-01-01 00:00:00 47.85
2019-01-01 01:00:00 50.85
2019-01-01 02:00:00 50.85
Freq: H, Name: tmod, dtype: float64
In [3]: kwargs['poa_global'].index = index_naive.tz_localize('UTC')
...: print(pvlib.temperature.fuentes(**kwargs))
...:
Traceback (most recent call last):
File "<ipython-input-3-ff99badadc91>", line 2, in <module>
print(pvlib.temperature.fuentes(**kwargs))
File "/home/kevin/anaconda3/lib/python3.7/site-packages/pvlib/temperature.py", line 602, in fuentes
timedelta_hours = np.diff(poa_global.index).astype(float) / 1e9 / 60 / 60
TypeError: float() argument must be a string or a number, not 'Timedelta'
|
TypeError
|
def read_tmy3(filename=None, coerce_year=None, recolumn=True):
"""
Read a TMY3 file in to a pandas dataframe.
Note that values contained in the metadata dictionary are unchanged
from the TMY3 file (i.e. units are retained). In the case of any
discrepencies between this documentation and the TMY3 User's Manual
[1]_, the TMY3 User's Manual takes precedence.
The TMY3 files were updated in Jan. 2015. This function requires the
use of the updated files.
Parameters
----------
filename : None or string, default None
If None, attempts to use a Tkinter file browser. A string can be
a relative file path, absolute file path, or url.
coerce_year : None or int, default None
If supplied, the year of the index will be set to `coerce_year`, except
for the last index value which will be set to the *next* year so that
the index increases monotonically.
recolumn : bool, default True
If ``True``, apply standard names to TMY3 columns. Typically this
results in stripping the units from the column name.
Returns
-------
Tuple of the form (data, metadata).
data : DataFrame
A pandas dataframe with the columns described in the table
below. For more detailed descriptions of each component, please
consult the TMY3 User's Manual ([1]), especially tables 1-1
through 1-6.
metadata : dict
The site metadata available in the file.
Notes
-----
The returned structures have the following fields.
=============== ====== ===================
key format description
=============== ====== ===================
altitude Float site elevation
latitude Float site latitudeitude
longitude Float site longitudeitude
Name String site name
State String state
TZ Float UTC offset
USAF Int USAF identifier
=============== ====== ===================
============================= ======================================================================================================================================================
TMYData field description
============================= ======================================================================================================================================================
TMYData.Index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included)
TMYData.ETR Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.ETRN Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHI Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHISource See [1]_, Table 1-4
TMYData.GHIUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.DNI Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2
TMYData.DNISource See [1]_, Table 1-4
TMYData.DNIUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.DHI Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.DHISource See [1]_, Table 1-4
TMYData.DHIUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.GHillum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.GHillumSource See [1]_, Table 1-4
TMYData.GHillumUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.DNillum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DNillumSource See [1]_, Table 1-4
TMYData.DNillumUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.DHillum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DHillumSource See [1]_, Table 1-4
TMYData.DHillumUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.Zenithlum Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2
TMYData.ZenithlumSource See [1]_, Table 1-4
TMYData.ZenithlumUncertainty Uncertainty based on random and bias error estimates see [1]_ section 2.10
TMYData.TotCld Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky
TMYData.TotCldSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.TotCldUncertainty See [1]_, Table 1-6
TMYData.OpqCld Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky
TMYData.OpqCldSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.OpqCldUncertainty See [1]_, Table 1-6
TMYData.DryBulb Dry bulb temperature at the time indicated, deg C
TMYData.DryBulbSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.DryBulbUncertainty See [1]_, Table 1-6
TMYData.DewPoint Dew-point temperature at the time indicated, deg C
TMYData.DewPointSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.DewPointUncertainty See [1]_, Table 1-6
TMYData.RHum Relatitudeive humidity at the time indicated, percent
TMYData.RHumSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.RHumUncertainty See [1]_, Table 1-6
TMYData.Pressure Station pressure at the time indicated, 1 mbar
TMYData.PressureSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.PressureUncertainty See [1]_, Table 1-6
TMYData.Wdir Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm)
TMYData.WdirSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.WdirUncertainty See [1]_, Table 1-6
TMYData.Wspd Wind speed at the time indicated, meter/second
TMYData.WspdSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.WspdUncertainty See [1]_, Table 1-6
TMYData.Hvis Distance to discernable remote objects at time indicated (7777=unlimited), meter
TMYData.HvisSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.HvisUncertainty See [1]_, Table 1-6
TMYData.CeilHgt Height of cloud base above local terrain (7777=unlimited), meter
TMYData.CeilHgtSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.CeilHgtUncertainty See [1]_, Table 1-6
TMYData.Pwat Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm
TMYData.PwatSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.PwatUncertainty See [1]_, Table 1-6
TMYData.AOD The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless
TMYData.AODSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.AODUncertainty See [1]_, Table 1-6
TMYData.Alb The ratio of reflected solar irradiance to global horizontal irradiance, unitless
TMYData.AlbSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.AlbUncertainty See [1]_, Table 1-6
TMYData.Lprecipdepth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter
TMYData.Lprecipquantity The period of accumulatitudeion for the liquid precipitation depth field, hour
TMYData.LprecipSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.LprecipUncertainty See [1]_, Table 1-6
TMYData.PresWth Present weather code, see [2]_.
TMYData.PresWthSource Present weather code source, see [2]_.
TMYData.PresWthUncertainty Present weather code uncertainty, see [2]_.
============================= ======================================================================================================================================================
.. warning:: TMY3 irradiance data corresponds to the *previous* hour, so
the first index is 1AM, corresponding to the irradiance from midnight
to 1AM, and the last index is midnight of the *next* year. For example,
if the last index in the TMY3 file was 1988-12-31 24:00:00 this becomes
1989-01-01 00:00:00 after calling :func:`~pvlib.iotools.read_tmy3`.
.. warning:: When coercing the year, the last index in the dataframe will
become midnight of the *next* year. For example, if the last index in
the TMY3 was 1988-12-31 24:00:00, and year is coerced to 1990 then this
becomes 1991-01-01 00:00:00.
References
----------
.. [1] Wilcox, S and Marion, W. "Users Manual for TMY3 Data Sets".
NREL/TP-581-43156, Revised May 2008.
.. [2] Wilcox, S. (2007). National Solar Radiation Database 1991 2005
Update: Users Manual. 472 pp.; NREL Report No. TP-581-41364.
"""
if filename is None:
try:
filename = _interactive_load()
except ImportError:
raise ImportError(
"Interactive load failed. tkinter not supported "
"on this system. Try installing X-Quartz and "
"reloading"
)
head = ["USAF", "Name", "State", "TZ", "latitude", "longitude", "altitude"]
if str(filename).startswith("http"):
request = Request(
filename,
headers={
"User-Agent": (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 "
"Safari/537.36"
)
},
)
response = urlopen(request)
csvdata = io.StringIO(response.read().decode(errors="ignore"))
else:
# assume it's accessible via the file system
csvdata = open(str(filename), "r")
# read in file metadata, advance buffer to second line
firstline = csvdata.readline()
if "Request Rejected" in firstline:
raise IOError("Remote server rejected TMY file request")
meta = dict(zip(head, firstline.rstrip("\n").split(",")))
# convert metadata strings to numeric types
meta["altitude"] = float(meta["altitude"])
meta["latitude"] = float(meta["latitude"])
meta["longitude"] = float(meta["longitude"])
meta["TZ"] = float(meta["TZ"])
meta["USAF"] = int(meta["USAF"])
# use pandas to read the csv file/stringio buffer
# header is actually the second line in file, but tell pandas to look for
# header information on the 1st line (0 indexing) because we've already
# advanced past the true first line with the readline call above.
data = pd.read_csv(csvdata, header=0)
# get the date column as a pd.Series of numpy datetime64
data_ymd = pd.to_datetime(data["Date (MM/DD/YYYY)"], format="%m/%d/%Y")
# shift the time column so that midnite is 00:00 instead of 24:00
shifted_hour = data["Time (HH:MM)"].str[:2].astype(int) % 24
# shift the dates at midnite so they correspond to the next day
data_ymd[shifted_hour == 0] += datetime.timedelta(days=1)
# NOTE: as of pandas>=0.24 the pd.Series.array has a month attribute, but
# in pandas-0.18.1, only DatetimeIndex has month, but indices are immutable
# so we need to continue to work with the panda series of dates `data_ymd`
data_index = pd.DatetimeIndex(data_ymd)
# use indices to check for a leap day and advance it to March 1st
leapday = (data_index.month == 2) & (data_index.day == 29)
data_ymd[leapday] += datetime.timedelta(days=1)
# shifted_hour is a pd.Series, so use pd.to_timedelta to get a pd.Series of
# timedeltas
if coerce_year is not None:
data_ymd = data_ymd.map(lambda dt: dt.replace(year=coerce_year))
data_ymd.iloc[-1] = data_ymd.iloc[-1].replace(year=coerce_year + 1)
# NOTE: as of pvlib-0.6.3, min req is pandas-0.18.1, so pd.to_timedelta
# unit must be in (D,h,m,s,ms,us,ns), but pandas>=0.24 allows unit='hour'
data.index = data_ymd + pd.to_timedelta(shifted_hour, unit="h")
if recolumn:
data = _recolumn(data) # rename to standard column names
data = data.tz_localize(int(meta["TZ"] * 3600))
return data, meta
|
def read_tmy3(filename=None, coerce_year=None, recolumn=True):
"""
Read a TMY3 file in to a pandas dataframe.
Note that values contained in the metadata dictionary are unchanged
from the TMY3 file (i.e. units are retained). In the case of any
discrepencies between this documentation and the TMY3 User's Manual
[1]_, the TMY3 User's Manual takes precedence.
The TMY3 files were updated in Jan. 2015. This function requires the
use of the updated files.
Parameters
----------
filename : None or string, default None
If None, attempts to use a Tkinter file browser. A string can be
a relative file path, absolute file path, or url.
coerce_year : None or int, default None
If supplied, the year of the data will be set to this value.
recolumn : bool, default True
If True, apply standard names to TMY3 columns. Typically this
results in stripping the units from the column name.
Returns
-------
Tuple of the form (data, metadata).
data : DataFrame
A pandas dataframe with the columns described in the table
below. For more detailed descriptions of each component, please
consult the TMY3 User's Manual ([1]), especially tables 1-1
through 1-6.
metadata : dict
The site metadata available in the file.
Notes
-----
The returned structures have the following fields.
=============== ====== ===================
key format description
=============== ====== ===================
altitude Float site elevation
latitude Float site latitudeitude
longitude Float site longitudeitude
Name String site name
State String state
TZ Float UTC offset
USAF Int USAF identifier
=============== ====== ===================
============================= ======================================================================================================================================================
TMYData field description
============================= ======================================================================================================================================================
TMYData.Index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included)
TMYData.ETR Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.ETRN Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHI Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHISource See [1]_, Table 1-4
TMYData.GHIUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.DNI Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2
TMYData.DNISource See [1]_, Table 1-4
TMYData.DNIUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.DHI Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.DHISource See [1]_, Table 1-4
TMYData.DHIUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.GHillum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.GHillumSource See [1]_, Table 1-4
TMYData.GHillumUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.DNillum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DNillumSource See [1]_, Table 1-4
TMYData.DNillumUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.DHillum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DHillumSource See [1]_, Table 1-4
TMYData.DHillumUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.Zenithlum Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2
TMYData.ZenithlumSource See [1]_, Table 1-4
TMYData.ZenithlumUncertainty Uncertainty based on random and bias error estimates see [1]_ section 2.10
TMYData.TotCld Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky
TMYData.TotCldSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.TotCldUncertainty See [1]_, Table 1-6
TMYData.OpqCld Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky
TMYData.OpqCldSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.OpqCldUncertainty See [1]_, Table 1-6
TMYData.DryBulb Dry bulb temperature at the time indicated, deg C
TMYData.DryBulbSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.DryBulbUncertainty See [1]_, Table 1-6
TMYData.DewPoint Dew-point temperature at the time indicated, deg C
TMYData.DewPointSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.DewPointUncertainty See [1]_, Table 1-6
TMYData.RHum Relatitudeive humidity at the time indicated, percent
TMYData.RHumSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.RHumUncertainty See [1]_, Table 1-6
TMYData.Pressure Station pressure at the time indicated, 1 mbar
TMYData.PressureSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.PressureUncertainty See [1]_, Table 1-6
TMYData.Wdir Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm)
TMYData.WdirSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.WdirUncertainty See [1]_, Table 1-6
TMYData.Wspd Wind speed at the time indicated, meter/second
TMYData.WspdSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.WspdUncertainty See [1]_, Table 1-6
TMYData.Hvis Distance to discernable remote objects at time indicated (7777=unlimited), meter
TMYData.HvisSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.HvisUncertainty See [1]_, Table 1-6
TMYData.CeilHgt Height of cloud base above local terrain (7777=unlimited), meter
TMYData.CeilHgtSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.CeilHgtUncertainty See [1]_, Table 1-6
TMYData.Pwat Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm
TMYData.PwatSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.PwatUncertainty See [1]_, Table 1-6
TMYData.AOD The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless
TMYData.AODSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.AODUncertainty See [1]_, Table 1-6
TMYData.Alb The ratio of reflected solar irradiance to global horizontal irradiance, unitless
TMYData.AlbSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.AlbUncertainty See [1]_, Table 1-6
TMYData.Lprecipdepth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter
TMYData.Lprecipquantity The period of accumulatitudeion for the liquid precipitation depth field, hour
TMYData.LprecipSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.LprecipUncertainty See [1]_, Table 1-6
TMYData.PresWth Present weather code, see [2]_.
TMYData.PresWthSource Present weather code source, see [2]_.
TMYData.PresWthUncertainty Present weather code uncertainty, see [2]_.
============================= ======================================================================================================================================================
.. warning:: TMY3 irradiance data corresponds to the previous hour, so the
first hour is 1AM, corresponding to the net irradiance from midnite to
1AM, and the last hour is midnite of the *next* year, unless the year
has been coerced. EG: if TMY3 was 1988-12-31 24:00:00 this becomes
1989-01-01 00:00:00
.. warning:: When coercing the year, the last index in the dataframe will
be the first hour of the same year, EG: if TMY3 was 1988-12-31 24:00:00
and year is coerced to 1990 this becomes 1990-01-01
References
----------
.. [1] Wilcox, S and Marion, W. "Users Manual for TMY3 Data Sets".
NREL/TP-581-43156, Revised May 2008.
.. [2] Wilcox, S. (2007). National Solar Radiation Database 1991 2005
Update: Users Manual. 472 pp.; NREL Report No. TP-581-41364.
"""
if filename is None:
try:
filename = _interactive_load()
except ImportError:
raise ImportError(
"Interactive load failed. tkinter not supported "
"on this system. Try installing X-Quartz and "
"reloading"
)
head = ["USAF", "Name", "State", "TZ", "latitude", "longitude", "altitude"]
if str(filename).startswith("http"):
request = Request(
filename,
headers={
"User-Agent": (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 "
"Safari/537.36"
)
},
)
response = urlopen(request)
csvdata = io.StringIO(response.read().decode(errors="ignore"))
else:
# assume it's accessible via the file system
csvdata = open(str(filename), "r")
# read in file metadata, advance buffer to second line
firstline = csvdata.readline()
if "Request Rejected" in firstline:
raise IOError("Remote server rejected TMY file request")
meta = dict(zip(head, firstline.rstrip("\n").split(",")))
# convert metadata strings to numeric types
meta["altitude"] = float(meta["altitude"])
meta["latitude"] = float(meta["latitude"])
meta["longitude"] = float(meta["longitude"])
meta["TZ"] = float(meta["TZ"])
meta["USAF"] = int(meta["USAF"])
# use pandas to read the csv file/stringio buffer
# header is actually the second line in file, but tell pandas to look for
# header information on the 1st line (0 indexing) because we've already
# advanced past the true first line with the readline call above.
data = pd.read_csv(csvdata, header=0)
# get the date column as a pd.Series of numpy datetime64
data_ymd = pd.to_datetime(data["Date (MM/DD/YYYY)"], format="%m/%d/%Y")
# shift the time column so that midnite is 00:00 instead of 24:00
shifted_hour = data["Time (HH:MM)"].str[:2].astype(int) % 24
# shift the dates at midnite so they correspond to the next day
data_ymd[shifted_hour == 0] += datetime.timedelta(days=1)
# NOTE: as of pandas>=0.24 the pd.Series.array has a month attribute, but
# in pandas-0.18.1, only DatetimeIndex has month, but indices are immutable
# so we need to continue to work with the panda series of dates `data_ymd`
data_index = pd.DatetimeIndex(data_ymd)
# use indices to check for a leap day and advance it to March 1st
leapday = (data_index.month == 2) & (data_index.day == 29)
data_ymd[leapday] += datetime.timedelta(days=1)
# shifted_hour is a pd.Series, so use pd.to_timedelta to get a pd.Series of
# timedeltas
# NOTE: as of pvlib-0.6.3, min req is pandas-0.18.1, so pd.to_timedelta
# unit must be in (D,h,m,s,ms,us,ns), but pandas>=0.24 allows unit='hour'
data.index = data_ymd + pd.to_timedelta(shifted_hour, unit="h")
if coerce_year is not None:
data.index = data.index.map(lambda dt: dt.replace(year=coerce_year))
if recolumn:
data = _recolumn(data) # rename to standard column names
data = data.tz_localize(int(meta["TZ"] * 3600))
return data, meta
|
https://github.com/pvlib/pvlib-python/issues/889
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-95-ff36a88e4e3a> in <module>
----> 1 soiling_hsu(greensboro_rain, cleaning_threshold=6, tilt=10, pm2_5=1e-5, pm10=1e-5)
~\Projects\pvlib-python\pvlib\losses.py in soiling_hsu(rainfall, cleaning_threshold, tilt, pm2_5, pm10, depo_veloc, rain_accum_period)
67
68 # accumulate rainfall into periods for comparison with threshold
---> 69 accum_rain = rainfall.rolling(rain_accum_period, closed='right').sum()
70 # cleaning is True for intervals with rainfall greater than threshold
71 cleaning_times = accum_rain.index[accum_rain >= cleaning_threshold]
~\projects\pvlib-python\venv\lib\site-packages\pandas\core\generic.py in rolling(self, window, min_periods, center, win_type, on, axis, closed)
10145 min_periods=min_periods,
10146 center=center, win_type=win_type,
10147 on=on, axis=axis, closed=closed)
10148
10149 cls.rolling = rolling
~\projects\pvlib-python\venv\lib\site-packages\pandas\core\window.py in rolling(obj, win_type, **kwds)
2624 return Window(obj, win_type=win_type, **kwds)
2625
-> 2626 return Rolling(obj, **kwds)
2627
2628
~\projects\pvlib-python\venv\lib\site-packages\pandas\core\window.py in __init__(self, obj, window, min_periods, center, win_type, axis, on, closed, **kwargs)
63 self.win_freq = None
64 self.axis = obj._get_axis_number(axis) if axis is not None else None
---> 65 self.validate()
66
67 @property
~\projects\pvlib-python\venv\lib\site-packages\pandas\core\window.py in validate(self)
1576 timedelta))):
1577
-> 1578 self._validate_monotonic()
1579 freq = self._validate_freq()
1580
~\projects\pvlib-python\venv\lib\site-packages\pandas\core\window.py in _validate_monotonic(self)
1610 formatted = self.on or 'index'
1611 raise ValueError("{0} must be "
-> 1612 "monotonic".format(formatted))
1613
1614 def _validate_freq(self):
|
ValueError
|
def soiling_hsu(
rainfall,
cleaning_threshold,
tilt,
pm2_5,
pm10,
depo_veloc=None,
rain_accum_period=pd.Timedelta("1h"),
):
"""
Calculates soiling ratio given particulate and rain data using the model
from Humboldt State University (HSU).
The HSU soiling model [1]_ returns the soiling ratio, a value between zero
and one which is equivalent to (1 - transmission loss). Therefore a soiling
ratio of 1.0 is equivalent to zero transmission loss.
Parameters
----------
rainfall : Series
Rain accumulated in each time period. [mm]
cleaning_threshold : float
Amount of rain in an accumulation period needed to clean the PV
modules. [mm]
tilt : float
Tilt of the PV panels from horizontal. [degree]
pm2_5 : numeric
Concentration of airborne particulate matter (PM) with
aerodynamic diameter less than 2.5 microns. [g/m^3]
pm10 : numeric
Concentration of airborne particulate matter (PM) with
aerodynamicdiameter less than 10 microns. [g/m^3]
depo_veloc : dict, default {'2_5': 0.4, '10': 0.09}
Deposition or settling velocity of particulates. [m/s]
rain_accum_period : Timedelta, default 1 hour
Period for accumulating rainfall to check against `cleaning_threshold`
It is recommended that `rain_accum_period` be between 1 hour and
24 hours.
Returns
-------
soiling_ratio : Series
Values between 0 and 1. Equal to 1 - transmission loss.
References
-----------
.. [1] M. Coello and L. Boyle, "Simple Model For Predicting Time Series
Soiling of Photovoltaic Panels," in IEEE Journal of Photovoltaics.
doi: 10.1109/JPHOTOV.2019.2919628
.. [2] Atmospheric Chemistry and Physics: From Air Pollution to Climate
Change. J. Seinfeld and S. Pandis. Wiley and Sons 2001.
"""
try:
from scipy.special import erf
except ImportError:
raise ImportError("The soiling_hsu function requires scipy.")
# never use mutable input arguments
if depo_veloc is None:
depo_veloc = {"2_5": 0.004, "10": 0.0009}
# accumulate rainfall into periods for comparison with threshold
accum_rain = rainfall.rolling(rain_accum_period, closed="right").sum()
# cleaning is True for intervals with rainfall greater than threshold
cleaning_times = accum_rain.index[accum_rain >= cleaning_threshold]
horiz_mass_rate = (
pm2_5 * depo_veloc["2_5"] + np.maximum(pm10 - pm2_5, 0.0) * depo_veloc["10"]
)
tilted_mass_rate = horiz_mass_rate * cosd(tilt) # assuming no rain
# tms -> tilt_mass_rate
tms_cumsum = np.cumsum(tilted_mass_rate * np.ones(rainfall.shape))
mass_no_cleaning = pd.Series(index=rainfall.index, data=tms_cumsum)
# specify dtype so pandas doesn't assume object
mass_removed = pd.Series(index=rainfall.index, dtype="float64")
mass_removed[0] = 0.0
mass_removed[cleaning_times] = mass_no_cleaning[cleaning_times]
accum_mass = mass_no_cleaning - mass_removed.ffill()
soiling_ratio = 1 - 0.3437 * erf(0.17 * accum_mass**0.8473)
return soiling_ratio
|
def soiling_hsu(
rainfall,
cleaning_threshold,
tilt,
pm2_5,
pm10,
depo_veloc={"2_5": 0.004, "10": 0.0009},
rain_accum_period=pd.Timedelta("1h"),
):
"""
Calculates soiling ratio given particulate and rain data using the model
from Humboldt State University [1]_.
Parameters
----------
rainfall : Series
Rain accumulated in each time period. [mm]
cleaning_threshold : float
Amount of rain in an accumulation period needed to clean the PV
modules. [mm]
tilt : float
Tilt of the PV panels from horizontal. [degree]
pm2_5 : numeric
Concentration of airborne particulate matter (PM) with
aerodynamic diameter less than 2.5 microns. [g/m^3]
pm10 : numeric
Concentration of airborne particulate matter (PM) with
aerodynamicdiameter less than 10 microns. [g/m^3]
depo_veloc : dict, default {'2_5': 0.4, '10': 0.09}
Deposition or settling velocity of particulates. [m/s]
rain_accum_period : Timedelta, default 1 hour
Period for accumulating rainfall to check against `cleaning_threshold`
It is recommended that `rain_accum_period` be between 1 hour and
24 hours.
Returns
-------
soiling_ratio : Series
Values between 0 and 1. Equal to 1 - transmission loss.
References
-----------
.. [1] M. Coello and L. Boyle, "Simple Model For Predicting Time Series
Soiling of Photovoltaic Panels," in IEEE Journal of Photovoltaics.
doi: 10.1109/JPHOTOV.2019.2919628
.. [2] Atmospheric Chemistry and Physics: From Air Pollution to Climate
Change. J. Seinfeld and S. Pandis. Wiley and Sons 2001.
"""
try:
from scipy.special import erf
except ImportError:
raise ImportError("The soiling_hsu function requires scipy.")
# accumulate rainfall into periods for comparison with threshold
accum_rain = rainfall.rolling(rain_accum_period, closed="right").sum()
# cleaning is True for intervals with rainfall greater than threshold
cleaning_times = accum_rain.index[accum_rain >= cleaning_threshold]
horiz_mass_rate = (
pm2_5 * depo_veloc["2_5"] + np.maximum(pm10 - pm2_5, 0.0) * depo_veloc["10"]
)
tilted_mass_rate = horiz_mass_rate * cosd(tilt) # assuming no rain
# tms -> tilt_mass_rate
tms_cumsum = np.cumsum(tilted_mass_rate * np.ones(rainfall.shape))
mass_no_cleaning = pd.Series(index=rainfall.index, data=tms_cumsum)
# specify dtype so pandas doesn't assume object
mass_removed = pd.Series(index=rainfall.index, dtype="float64")
mass_removed[0] = 0.0
mass_removed[cleaning_times] = mass_no_cleaning[cleaning_times]
accum_mass = mass_no_cleaning - mass_removed.ffill()
soiling_ratio = 1 - 0.3437 * erf(0.17 * accum_mass**0.8473)
return soiling_ratio
|
https://github.com/pvlib/pvlib-python/issues/889
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-95-ff36a88e4e3a> in <module>
----> 1 soiling_hsu(greensboro_rain, cleaning_threshold=6, tilt=10, pm2_5=1e-5, pm10=1e-5)
~\Projects\pvlib-python\pvlib\losses.py in soiling_hsu(rainfall, cleaning_threshold, tilt, pm2_5, pm10, depo_veloc, rain_accum_period)
67
68 # accumulate rainfall into periods for comparison with threshold
---> 69 accum_rain = rainfall.rolling(rain_accum_period, closed='right').sum()
70 # cleaning is True for intervals with rainfall greater than threshold
71 cleaning_times = accum_rain.index[accum_rain >= cleaning_threshold]
~\projects\pvlib-python\venv\lib\site-packages\pandas\core\generic.py in rolling(self, window, min_periods, center, win_type, on, axis, closed)
10145 min_periods=min_periods,
10146 center=center, win_type=win_type,
10147 on=on, axis=axis, closed=closed)
10148
10149 cls.rolling = rolling
~\projects\pvlib-python\venv\lib\site-packages\pandas\core\window.py in rolling(obj, win_type, **kwds)
2624 return Window(obj, win_type=win_type, **kwds)
2625
-> 2626 return Rolling(obj, **kwds)
2627
2628
~\projects\pvlib-python\venv\lib\site-packages\pandas\core\window.py in __init__(self, obj, window, min_periods, center, win_type, axis, on, closed, **kwargs)
63 self.win_freq = None
64 self.axis = obj._get_axis_number(axis) if axis is not None else None
---> 65 self.validate()
66
67 @property
~\projects\pvlib-python\venv\lib\site-packages\pandas\core\window.py in validate(self)
1576 timedelta))):
1577
-> 1578 self._validate_monotonic()
1579 freq = self._validate_freq()
1580
~\projects\pvlib-python\venv\lib\site-packages\pandas\core\window.py in _validate_monotonic(self)
1610 formatted = self.on or 'index'
1611 raise ValueError("{0} must be "
-> 1612 "monotonic".format(formatted))
1613
1614 def _validate_freq(self):
|
ValueError
|
def soiling_kimber(
rainfall,
cleaning_threshold=6,
soiling_loss_rate=0.0015,
grace_period=14,
max_soiling=0.3,
manual_wash_dates=None,
initial_soiling=0,
rain_accum_period=24,
):
"""
Calculates fraction of energy lost due to soiling given rainfall data and
daily loss rate using the Kimber model.
Kimber soiling model [1]_ assumes soiling builds up at a daily rate unless
the daily rainfall is greater than a threshold. The model also assumes that
if daily rainfall has exceeded the threshold within a grace period, then
the ground is too damp to cause soiling build-up. The model also assumes
there is a maximum soiling build-up. Scheduled manual washes and rain
events are assumed to reset soiling to zero.
Parameters
----------
rainfall: pandas.Series
Accumulated rainfall at the end of each time period. [mm]
cleaning_threshold: float, default 6
Amount of daily rainfall required to clean the panels. [mm]
soiling_loss_rate: float, default 0.0015
Fraction of energy lost due to one day of soiling. [unitless]
grace_period : int, default 14
Number of days after a rainfall event when it's assumed the ground is
damp, and so it's assumed there is no soiling. [days]
max_soiling : float, default 0.3
Maximum fraction of energy lost due to soiling. Soiling will build up
until this value. [unitless]
manual_wash_dates : sequence or None, default None
List or tuple of dates as Python ``datetime.date`` when the panels were
washed manually. Note there is no grace period after a manual wash, so
soiling begins to build up immediately.
initial_soiling : float, default 0
Initial fraction of energy lost due to soiling at time zero in the
`rainfall` series input. [unitless]
rain_accum_period : int, default 24
Period for accumulating rainfall to check against `cleaning_threshold`.
The Kimber model defines this period as one day. [hours]
Returns
-------
pandas.Series
fraction of energy lost due to soiling, has same intervals as input
Notes
-----
The soiling loss rate depends on both the geographical region and the
soiling environment type. Rates measured by Kimber [1]_ are summarized in
the following table:
=================== ======= ========= ======================
Region/Environment Rural Suburban Urban/Highway/Airport
=================== ======= ========= ======================
Central Valley 0.0011 0.0019 0.0020
Northern CA 0.0011 0.0010 0.0016
Southern CA 0 0.0016 0.0019
Desert 0.0030 0.0030 0.0030
=================== ======= ========= ======================
Rainfall thresholds and grace periods may also vary by region. Please
consult [1]_ for more information.
References
----------
.. [1] "The Effect of Soiling on Large Grid-Connected Photovoltaic Systems
in California and the Southwest Region of the United States," Adrianne
Kimber, et al., IEEE 4th World Conference on Photovoltaic Energy
Conference, 2006, :doi:`10.1109/WCPEC.2006.279690`
"""
# convert rain_accum_period to timedelta
rain_accum_period = datetime.timedelta(hours=rain_accum_period)
# convert grace_period to timedelta
grace_period = datetime.timedelta(days=grace_period)
# get indices as numpy datetime64, calculate timestep as numpy timedelta64,
# and convert timestep to fraction of days
rain_index_vals = rainfall.index.values
timestep_interval = rain_index_vals[1] - rain_index_vals[0]
day_fraction = timestep_interval / np.timedelta64(24, "h")
# accumulate rainfall
accumulated_rainfall = rainfall.rolling(rain_accum_period, closed="right").sum()
# soiling rate
soiling = np.ones_like(rainfall.values) * soiling_loss_rate * day_fraction
soiling[0] = initial_soiling
soiling = np.cumsum(soiling)
soiling = pd.Series(soiling, index=rainfall.index, name="soiling")
# rainfall events that clean the panels
rain_events = accumulated_rainfall > cleaning_threshold
# grace periods windows during which ground is assumed damp, so no soiling
grace_windows = rain_events.rolling(grace_period, closed="right").sum() > 0
# clean panels by subtracting soiling for indices in grace period windows
cleaning = pd.Series(float("NaN"), index=rainfall.index)
cleaning.iloc[0] = 0.0
cleaning[grace_windows] = soiling[grace_windows]
# manual wash dates
if manual_wash_dates is not None:
rain_tz = rainfall.index.tz
# convert manual wash dates to datetime index in the timezone of rain
manual_wash_dates = pd.DatetimeIndex(manual_wash_dates, tz=rain_tz)
cleaning[manual_wash_dates] = soiling[manual_wash_dates]
# remove soiling by foward filling cleaning where NaN
soiling -= cleaning.ffill()
# check if soiling has reached the maximum
return soiling.where(soiling < max_soiling, max_soiling)
|
def soiling_kimber(
rainfall,
cleaning_threshold=6,
soiling_loss_rate=0.0015,
grace_period=14,
max_soiling=0.3,
manual_wash_dates=None,
initial_soiling=0,
rain_accum_period=24,
istmy=False,
):
"""
Calculate soiling ratio with rainfall data and a daily soiling rate using
the Kimber soiling model [1]_.
Kimber soiling model assumes soiling builds up at a daily rate unless
the daily rainfall is greater than a threshold. The model also assumes that
if daily rainfall has exceeded the threshold within a grace period, then
the ground is too damp to cause soiling build-up. The model also assumes
there is a maximum soiling build-up. Scheduled manual washes and rain
events are assumed to reset soiling to zero.
Parameters
----------
rainfall: pandas.Series
Accumulated rainfall at the end of each time period. [mm]
cleaning_threshold: float, default 6
Amount of daily rainfall required to clean the panels. [mm]
soiling_loss_rate: float, default 0.0015
Fraction of energy lost due to one day of soiling. [unitless]
grace_period : int, default 14
Number of days after a rainfall event when it's assumed the ground is
damp, and so it's assumed there is no soiling. [days]
max_soiling : float, default 0.3
Maximum fraction of energy lost due to soiling. Soiling will build up
until this value. [unitless]
manual_wash_dates : sequence or None, default None
List or tuple of dates as Python ``datetime.date`` when the panels were
washed manually. Note there is no grace period after a manual wash, so
soiling begins to build up immediately.
initial_soiling : float, default 0
Initial fraction of energy lost due to soiling at time zero in the
`rainfall` series input. [unitless]
rain_accum_period : int, default 24
Period for accumulating rainfall to check against `cleaning_threshold`.
The Kimber model defines this period as one day. [hours]
istmy : bool, default False
Fix last timestep in TMY so that it is monotonically increasing.
Returns
-------
pandas.Series
fraction of energy lost due to soiling, has same intervals as input
Notes
-----
The soiling loss rate depends on both the geographical region and the
soiling environment type. Rates measured by Kimber [1]_ are summarized in
the following table:
=================== ======= ========= ======================
Region/Environment Rural Suburban Urban/Highway/Airport
=================== ======= ========= ======================
Central Valley 0.0011 0.0019 0.0020
Northern CA 0.0011 0.0010 0.0016
Southern CA 0 0.0016 0.0019
Desert 0.0030 0.0030 0.0030
=================== ======= ========= ======================
Rainfall thresholds and grace periods may also vary by region. Please
consult [1]_ for more information.
References
----------
.. [1] "The Effect of Soiling on Large Grid-Connected Photovoltaic Systems
in California and the Southwest Region of the United States," Adrianne
Kimber, et al., IEEE 4th World Conference on Photovoltaic Energy
Conference, 2006, :doi:`10.1109/WCPEC.2006.279690`
"""
# convert rain_accum_period to timedelta
rain_accum_period = datetime.timedelta(hours=rain_accum_period)
# convert grace_period to timedelta
grace_period = datetime.timedelta(days=grace_period)
# get rainfall timezone, timestep as timedelta64, and timestep in int days
rain_tz = rainfall.index.tz
rain_index = rainfall.index.values
timestep_interval = rain_index[1] - rain_index[0]
day_fraction = timestep_interval / np.timedelta64(24, "h")
# if TMY fix to be monotonically increasing by rolling index by 1 interval
# and then adding 1 interval, while the values stay the same
if istmy:
rain_index = np.roll(rain_index, 1) + timestep_interval
# NOTE: numpy datetim64[ns] has no timezone
# convert to datetimeindex at UTC and convert to original timezone
rain_index = pd.DatetimeIndex(rain_index, tz="UTC").tz_convert(rain_tz)
# fixed rainfall timeseries with monotonically increasing index
rainfall = pd.Series(rainfall.values, index=rain_index, name=rainfall.name)
# accumulate rainfall
accumulated_rainfall = rainfall.rolling(rain_accum_period, closed="right").sum()
# soiling rate
soiling = np.ones_like(rainfall.values) * soiling_loss_rate * day_fraction
soiling[0] = initial_soiling
soiling = np.cumsum(soiling)
# rainfall events that clean the panels
rain_events = accumulated_rainfall > cleaning_threshold
# grace periods windows during which ground is assumed damp, so no soiling
grace_windows = rain_events.rolling(grace_period, closed="right").sum() > 0
# clean panels by subtracting soiling for indices in grace period windows
cleaning = pd.Series(float("NaN"), index=rainfall.index)
cleaning.iloc[0] = 0.0
cleaning[grace_windows] = soiling[grace_windows]
# manual wash dates
if manual_wash_dates is not None:
manual_wash_dates = pd.DatetimeIndex(manual_wash_dates, tz=rain_tz)
soiling = pd.Series(soiling, index=rain_index, name="soiling")
cleaning[manual_wash_dates] = soiling[manual_wash_dates]
# remove soiling by foward filling cleaning where NaN
soiling -= cleaning.ffill()
# check if soiling has reached the maximum
return soiling.where(soiling < max_soiling, max_soiling)
|
https://github.com/pvlib/pvlib-python/issues/889
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-95-ff36a88e4e3a> in <module>
----> 1 soiling_hsu(greensboro_rain, cleaning_threshold=6, tilt=10, pm2_5=1e-5, pm10=1e-5)
~\Projects\pvlib-python\pvlib\losses.py in soiling_hsu(rainfall, cleaning_threshold, tilt, pm2_5, pm10, depo_veloc, rain_accum_period)
67
68 # accumulate rainfall into periods for comparison with threshold
---> 69 accum_rain = rainfall.rolling(rain_accum_period, closed='right').sum()
70 # cleaning is True for intervals with rainfall greater than threshold
71 cleaning_times = accum_rain.index[accum_rain >= cleaning_threshold]
~\projects\pvlib-python\venv\lib\site-packages\pandas\core\generic.py in rolling(self, window, min_periods, center, win_type, on, axis, closed)
10145 min_periods=min_periods,
10146 center=center, win_type=win_type,
10147 on=on, axis=axis, closed=closed)
10148
10149 cls.rolling = rolling
~\projects\pvlib-python\venv\lib\site-packages\pandas\core\window.py in rolling(obj, win_type, **kwds)
2624 return Window(obj, win_type=win_type, **kwds)
2625
-> 2626 return Rolling(obj, **kwds)
2627
2628
~\projects\pvlib-python\venv\lib\site-packages\pandas\core\window.py in __init__(self, obj, window, min_periods, center, win_type, axis, on, closed, **kwargs)
63 self.win_freq = None
64 self.axis = obj._get_axis_number(axis) if axis is not None else None
---> 65 self.validate()
66
67 @property
~\projects\pvlib-python\venv\lib\site-packages\pandas\core\window.py in validate(self)
1576 timedelta))):
1577
-> 1578 self._validate_monotonic()
1579 freq = self._validate_freq()
1580
~\projects\pvlib-python\venv\lib\site-packages\pandas\core\window.py in _validate_monotonic(self)
1610 formatted = self.on or 'index'
1611 raise ValueError("{0} must be "
-> 1612 "monotonic".format(formatted))
1613
1614 def _validate_freq(self):
|
ValueError
|
def read_tmy3(filename=None, coerce_year=None, recolumn=True):
"""
Read a TMY3 file in to a pandas dataframe.
Note that values contained in the metadata dictionary are unchanged
from the TMY3 file (i.e. units are retained). In the case of any
discrepencies between this documentation and the TMY3 User's Manual
[1]_, the TMY3 User's Manual takes precedence.
The TMY3 files were updated in Jan. 2015. This function requires the
use of the updated files.
Parameters
----------
filename : None or string, default None
If None, attempts to use a Tkinter file browser. A string can be
a relative file path, absolute file path, or url.
coerce_year : None or int, default None
If supplied, the year of the data will be set to this value.
recolumn : bool, default True
If True, apply standard names to TMY3 columns. Typically this
results in stripping the units from the column name.
Returns
-------
Tuple of the form (data, metadata).
data : DataFrame
A pandas dataframe with the columns described in the table
below. For more detailed descriptions of each component, please
consult the TMY3 User's Manual ([1]), especially tables 1-1
through 1-6.
metadata : dict
The site metadata available in the file.
Notes
-----
The returned structures have the following fields.
=============== ====== ===================
key format description
=============== ====== ===================
altitude Float site elevation
latitude Float site latitudeitude
longitude Float site longitudeitude
Name String site name
State String state
TZ Float UTC offset
USAF Int USAF identifier
=============== ====== ===================
============================= ======================================================================================================================================================
TMYData field description
============================= ======================================================================================================================================================
TMYData.Index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included)
TMYData.ETR Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.ETRN Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHI Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHISource See [1]_, Table 1-4
TMYData.GHIUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.DNI Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2
TMYData.DNISource See [1]_, Table 1-4
TMYData.DNIUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.DHI Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.DHISource See [1]_, Table 1-4
TMYData.DHIUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.GHillum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.GHillumSource See [1]_, Table 1-4
TMYData.GHillumUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.DNillum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DNillumSource See [1]_, Table 1-4
TMYData.DNillumUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.DHillum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DHillumSource See [1]_, Table 1-4
TMYData.DHillumUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.Zenithlum Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2
TMYData.ZenithlumSource See [1]_, Table 1-4
TMYData.ZenithlumUncertainty Uncertainty based on random and bias error estimates see [1]_ section 2.10
TMYData.TotCld Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky
TMYData.TotCldSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.TotCldUncertainty See [1]_, Table 1-6
TMYData.OpqCld Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky
TMYData.OpqCldSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.OpqCldUncertainty See [1]_, Table 1-6
TMYData.DryBulb Dry bulb temperature at the time indicated, deg C
TMYData.DryBulbSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.DryBulbUncertainty See [1]_, Table 1-6
TMYData.DewPoint Dew-point temperature at the time indicated, deg C
TMYData.DewPointSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.DewPointUncertainty See [1]_, Table 1-6
TMYData.RHum Relatitudeive humidity at the time indicated, percent
TMYData.RHumSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.RHumUncertainty See [1]_, Table 1-6
TMYData.Pressure Station pressure at the time indicated, 1 mbar
TMYData.PressureSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.PressureUncertainty See [1]_, Table 1-6
TMYData.Wdir Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm)
TMYData.WdirSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.WdirUncertainty See [1]_, Table 1-6
TMYData.Wspd Wind speed at the time indicated, meter/second
TMYData.WspdSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.WspdUncertainty See [1]_, Table 1-6
TMYData.Hvis Distance to discernable remote objects at time indicated (7777=unlimited), meter
TMYData.HvisSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.HvisUncertainty See [1]_, Table 1-6
TMYData.CeilHgt Height of cloud base above local terrain (7777=unlimited), meter
TMYData.CeilHgtSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.CeilHgtUncertainty See [1]_, Table 1-6
TMYData.Pwat Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm
TMYData.PwatSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.PwatUncertainty See [1]_, Table 1-6
TMYData.AOD The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless
TMYData.AODSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.AODUncertainty See [1]_, Table 1-6
TMYData.Alb The ratio of reflected solar irradiance to global horizontal irradiance, unitless
TMYData.AlbSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.AlbUncertainty See [1]_, Table 1-6
TMYData.Lprecipdepth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter
TMYData.Lprecipquantity The period of accumulatitudeion for the liquid precipitation depth field, hour
TMYData.LprecipSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.LprecipUncertainty See [1]_, Table 1-6
TMYData.PresWth Present weather code, see [2]_.
TMYData.PresWthSource Present weather code source, see [2]_.
TMYData.PresWthUncertainty Present weather code uncertainty, see [2]_.
============================= ======================================================================================================================================================
.. warning:: TMY3 irradiance data corresponds to the previous hour, so the
first hour is 1AM, corresponding to the net irradiance from midnite to
1AM, and the last hour is midnite of the *next* year, unless the year
has been coerced. EG: if TMY3 was 1988-12-31 24:00:00 this becomes
1989-01-01 00:00:00
.. warning:: When coercing the year, the last index in the dataframe will
be the first hour of the same year, EG: if TMY3 was 1988-12-31 24:00:00
and year is coerced to 1990 this becomes 1990-01-01
References
----------
.. [1] Wilcox, S and Marion, W. "Users Manual for TMY3 Data Sets".
NREL/TP-581-43156, Revised May 2008.
.. [2] Wilcox, S. (2007). National Solar Radiation Database 1991 2005
Update: Users Manual. 472 pp.; NREL Report No. TP-581-41364.
"""
if filename is None:
try:
filename = _interactive_load()
except ImportError:
raise ImportError(
"Interactive load failed. tkinter not supported "
"on this system. Try installing X-Quartz and "
"reloading"
)
head = ["USAF", "Name", "State", "TZ", "latitude", "longitude", "altitude"]
if str(filename).startswith("http"):
request = Request(
filename,
headers={
"User-Agent": (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 "
"Safari/537.36"
)
},
)
response = urlopen(request)
csvdata = io.StringIO(response.read().decode(errors="ignore"))
else:
# assume it's accessible via the file system
csvdata = open(str(filename), "r")
# read in file metadata, advance buffer to second line
firstline = csvdata.readline()
if "Request Rejected" in firstline:
raise IOError("Remote server rejected TMY file request")
meta = dict(zip(head, firstline.rstrip("\n").split(",")))
# convert metadata strings to numeric types
meta["altitude"] = float(meta["altitude"])
meta["latitude"] = float(meta["latitude"])
meta["longitude"] = float(meta["longitude"])
meta["TZ"] = float(meta["TZ"])
meta["USAF"] = int(meta["USAF"])
# use pandas to read the csv file/stringio buffer
# header is actually the second line in file, but tell pandas to look for
# header information on the 1st line (0 indexing) because we've already
# advanced past the true first line with the readline call above.
data = pd.read_csv(csvdata, header=0)
# get the date column as a pd.Series of numpy datetime64
data_ymd = pd.to_datetime(data["Date (MM/DD/YYYY)"], format="%m/%d/%Y")
# shift the time column so that midnite is 00:00 instead of 24:00
shifted_hour = data["Time (HH:MM)"].str[:2].astype(int) % 24
# shift the dates at midnite so they correspond to the next day
data_ymd[shifted_hour == 0] += datetime.timedelta(days=1)
# NOTE: as of pandas>=0.24 the pd.Series.array has a month attribute, but
# in pandas-0.18.1, only DatetimeIndex has month, but indices are immutable
# so we need to continue to work with the panda series of dates `data_ymd`
data_index = pd.DatetimeIndex(data_ymd)
# use indices to check for a leap day and advance it to March 1st
leapday = (data_index.month == 2) & (data_index.day == 29)
data_ymd[leapday] += datetime.timedelta(days=1)
# shifted_hour is a pd.Series, so use pd.to_timedelta to get a pd.Series of
# timedeltas
# NOTE: as of pvlib-0.6.3, min req is pandas-0.18.1, so pd.to_timedelta
# unit must be in (D,h,m,s,ms,us,ns), but pandas>=0.24 allows unit='hour'
data.index = data_ymd + pd.to_timedelta(shifted_hour, unit="h")
if coerce_year is not None:
data.index = data.index.map(lambda dt: dt.replace(year=coerce_year))
if recolumn:
data = _recolumn(data) # rename to standard column names
data = data.tz_localize(int(meta["TZ"] * 3600))
return data, meta
|
def read_tmy3(filename=None, coerce_year=None, recolumn=True):
"""
Read a TMY3 file in to a pandas dataframe.
Note that values contained in the metadata dictionary are unchanged
from the TMY3 file (i.e. units are retained). In the case of any
discrepencies between this documentation and the TMY3 User's Manual
[1]_, the TMY3 User's Manual takes precedence.
The TMY3 files were updated in Jan. 2015. This function requires the
use of the updated files.
Parameters
----------
filename : None or string, default None
If None, attempts to use a Tkinter file browser. A string can be
a relative file path, absolute file path, or url.
coerce_year : None or int, default None
If supplied, the year of the data will be set to this value.
recolumn : bool, default True
If True, apply standard names to TMY3 columns. Typically this
results in stripping the units from the column name.
Returns
-------
Tuple of the form (data, metadata).
data : DataFrame
A pandas dataframe with the columns described in the table
below. For more detailed descriptions of each component, please
consult the TMY3 User's Manual ([1]), especially tables 1-1
through 1-6.
metadata : dict
The site metadata available in the file.
Notes
-----
The returned structures have the following fields.
=============== ====== ===================
key format description
=============== ====== ===================
altitude Float site elevation
latitude Float site latitudeitude
longitude Float site longitudeitude
Name String site name
State String state
TZ Float UTC offset
USAF Int USAF identifier
=============== ====== ===================
============================= ======================================================================================================================================================
TMYData field description
============================= ======================================================================================================================================================
TMYData.Index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included)
TMYData.ETR Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.ETRN Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHI Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHISource See [1]_, Table 1-4
TMYData.GHIUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.DNI Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2
TMYData.DNISource See [1]_, Table 1-4
TMYData.DNIUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.DHI Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.DHISource See [1]_, Table 1-4
TMYData.DHIUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.GHillum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.GHillumSource See [1]_, Table 1-4
TMYData.GHillumUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.DNillum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DNillumSource See [1]_, Table 1-4
TMYData.DNillumUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.DHillum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DHillumSource See [1]_, Table 1-4
TMYData.DHillumUncertainty Uncertainty based on random and bias error estimates see [2]_
TMYData.Zenithlum Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2
TMYData.ZenithlumSource See [1]_, Table 1-4
TMYData.ZenithlumUncertainty Uncertainty based on random and bias error estimates see [1]_ section 2.10
TMYData.TotCld Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky
TMYData.TotCldSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.TotCldUncertainty See [1]_, Table 1-6
TMYData.OpqCld Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky
TMYData.OpqCldSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.OpqCldUncertainty See [1]_, Table 1-6
TMYData.DryBulb Dry bulb temperature at the time indicated, deg C
TMYData.DryBulbSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.DryBulbUncertainty See [1]_, Table 1-6
TMYData.DewPoint Dew-point temperature at the time indicated, deg C
TMYData.DewPointSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.DewPointUncertainty See [1]_, Table 1-6
TMYData.RHum Relatitudeive humidity at the time indicated, percent
TMYData.RHumSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.RHumUncertainty See [1]_, Table 1-6
TMYData.Pressure Station pressure at the time indicated, 1 mbar
TMYData.PressureSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.PressureUncertainty See [1]_, Table 1-6
TMYData.Wdir Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm)
TMYData.WdirSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.WdirUncertainty See [1]_, Table 1-6
TMYData.Wspd Wind speed at the time indicated, meter/second
TMYData.WspdSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.WspdUncertainty See [1]_, Table 1-6
TMYData.Hvis Distance to discernable remote objects at time indicated (7777=unlimited), meter
TMYData.HvisSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.HvisUncertainty See [1]_, Table 1-6
TMYData.CeilHgt Height of cloud base above local terrain (7777=unlimited), meter
TMYData.CeilHgtSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.CeilHgtUncertainty See [1]_, Table 1-6
TMYData.Pwat Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm
TMYData.PwatSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.PwatUncertainty See [1]_, Table 1-6
TMYData.AOD The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless
TMYData.AODSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.AODUncertainty See [1]_, Table 1-6
TMYData.Alb The ratio of reflected solar irradiance to global horizontal irradiance, unitless
TMYData.AlbSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.AlbUncertainty See [1]_, Table 1-6
TMYData.Lprecipdepth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter
TMYData.Lprecipquantity The period of accumulatitudeion for the liquid precipitation depth field, hour
TMYData.LprecipSource See [1]_, Table 1-5, 8760x1 cell array of strings
TMYData.LprecipUncertainty See [1]_, Table 1-6
TMYData.PresWth Present weather code, see [2]_.
TMYData.PresWthSource Present weather code source, see [2]_.
TMYData.PresWthUncertainty Present weather code uncertainty, see [2]_.
============================= ======================================================================================================================================================
References
----------
.. [1] Wilcox, S and Marion, W. "Users Manual for TMY3 Data Sets".
NREL/TP-581-43156, Revised May 2008.
.. [2] Wilcox, S. (2007). National Solar Radiation Database 1991 2005
Update: Users Manual. 472 pp.; NREL Report No. TP-581-41364.
"""
if filename is None:
try:
filename = _interactive_load()
except ImportError:
raise ImportError(
"Interactive load failed. tkinter not supported "
"on this system. Try installing X-Quartz and "
"reloading"
)
head = ["USAF", "Name", "State", "TZ", "latitude", "longitude", "altitude"]
if str(filename).startswith("http"):
request = Request(
filename,
headers={
"User-Agent": (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 "
"Safari/537.36"
)
},
)
response = urlopen(request)
csvdata = io.StringIO(response.read().decode(errors="ignore"))
else:
# assume it's accessible via the file system
csvdata = open(str(filename), "r")
# read in file metadata, advance buffer to second line
firstline = csvdata.readline()
if "Request Rejected" in firstline:
raise IOError("Remote server rejected TMY file request")
meta = dict(zip(head, firstline.rstrip("\n").split(",")))
# convert metadata strings to numeric types
meta["altitude"] = float(meta["altitude"])
meta["latitude"] = float(meta["latitude"])
meta["longitude"] = float(meta["longitude"])
meta["TZ"] = float(meta["TZ"])
meta["USAF"] = int(meta["USAF"])
# use pandas to read the csv file/stringio buffer
# header is actually the second line in file, but tell pandas to look for
# header information on the 1st line (0 indexing) because we've already
# advanced past the true first line with the readline call above.
data = pd.read_csv(
csvdata,
header=0,
parse_dates={"datetime": ["Date (MM/DD/YYYY)", "Time (HH:MM)"]},
date_parser=lambda *x: _parsedate(*x, year=coerce_year),
index_col="datetime",
)
if recolumn:
data = _recolumn(data) # rename to standard column names
data = data.tz_localize(int(meta["TZ"] * 3600))
return data, meta
|
https://github.com/pvlib/pvlib-python/issues/865
|
In [1]: import pvlib
In [2]: greensboro = pvlib.iotools.read_tmy3('723170TYA.CSV', coerce_year=1990)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pandas\io\parsers.py in converter(*date_cols)
3280 result = tools.to_datetime(
-> 3281 date_parser(*date_cols), errors="ignore", cache=cache_dates
3282 )
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pvlib\iotools\tmy.py in <lambda>(*x)
194 parse_dates={'datetime': ['Date (MM/DD/YYYY)', 'Time (HH:MM)']},
--> 195 date_parser=lambda *x: _parsedate(*x, year=coerce_year),
196 index_col='datetime')
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pvlib\iotools\tmy.py in _parsedate(ymd, hour, year)
215 # and dateutil's inability to handle that.
--> 216 offset_hour = int(hour[:2]) - 1
217 offset_datetime = '{} {}:00'.format(ymd, offset_hour)
TypeError: only size-1 arrays can be converted to Python scalars
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pandas\io\parsers.py in converter(*date_cols)
3291 parser=date_parser,
-> 3292 dayfirst=dayfirst,
3293 ),
pandas/_libs/tslibs/parsing.pyx in pandas._libs.tslibs.parsing.try_parse_dates()
pandas/_libs/tslibs/parsing.pyx in pandas._libs.tslibs.parsing.try_parse_dates()
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pvlib\iotools\tmy.py in <lambda>(*x)
194 parse_dates={'datetime': ['Date (MM/DD/YYYY)', 'Time (HH:MM)']},
--> 195 date_parser=lambda *x: _parsedate(*x, year=coerce_year),
196 index_col='datetime')
TypeError: _parsedate() missing 1 required positional argument: 'hour'
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-2-6078abbb94dd> in <module>
----> 1 greensboro = pvlib.iotools.read_tmy3('723170TYA.CSV', coerce_year=1990)
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pvlib\iotools\tmy.py in read_tmy3(filename, coerce_year, recolumn)
194 parse_dates={'datetime': ['Date (MM/DD/YYYY)', 'Time (HH:MM)']},
195 date_parser=lambda *x: _parsedate(*x, year=coerce_year),
--> 196 index_col='datetime')
197
198 if recolumn:
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pandas\io\parsers.py in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, dialect, error_bad_lines, warn_bad_lines, delim_whitespace, low_memory, memory_map, float_precision)
683 )
684
--> 685 return _read(filepath_or_buffer, kwds)
686
687 parser_f.__name__ = name
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pandas\io\parsers.py in _read(filepath_or_buffer, kwds)
461
462 try:
--> 463 data = parser.read(nrows)
464 finally:
465 parser.close()
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pandas\io\parsers.py in read(self, nrows)
1152 def read(self, nrows=None):
1153 nrows = _validate_integer("nrows", nrows)
-> 1154 ret = self._engine.read(nrows)
1155
1156 # May alter columns / col_dict
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pandas\io\parsers.py in read(self, nrows)
2132 data = {k: v for k, (i, v) in zip(names, data)}
2133
-> 2134 names, data = self._do_date_conversions(names, data)
2135 index, names = self._make_index(data, alldata, names)
2136
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pandas\io\parsers.py in _do_date_conversions(self, names, data)
1883 self.index_names,
1884 names,
-> 1885 keep_date_col=self.keep_date_col,
1886 )
1887
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pandas\io\parsers.py in _process_date_conversion(data_dict, converter, parse_spec, index_col, index_names, columns, keep_date_col)
3355
3356 _, col, old_names = _try_convert_dates(
-> 3357 converter, colspec, data_dict, orig_names
3358 )
3359
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pandas\io\parsers.py in _try_convert_dates(parser, colspec, data_dict, columns)
3388 to_parse = [data_dict[c] for c in colnames if c in data_dict]
3389
-> 3390 new_col = parser(*to_parse)
3391 return new_name, new_col, colnames
3392
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pandas\io\parsers.py in converter(*date_cols)
3295 )
3296 except Exception:
-> 3297 return generic_parser(date_parser, *date_cols)
3298
3299 return converter
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pandas\io\date_converters.py in generic_parser(parse_func, *cols)
36 for i in range(N):
37 args = [c[i] for c in cols]
---> 38 results[i] = parse_func(*args)
39
40 return results
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pvlib\iotools\tmy.py in <lambda>(*x)
193 csvdata, header=0,
194 parse_dates={'datetime': ['Date (MM/DD/YYYY)', 'Time (HH:MM)']},
--> 195 date_parser=lambda *x: _parsedate(*x, year=coerce_year),
196 index_col='datetime')
197
~\AppData\Local\Continuum\miniconda3\envs\py37\lib\site-packages\pvlib\iotools\tmy.py in _parsedate(ymd, hour, year)
219 true_date = offset_date + dateutil.relativedelta.relativedelta(hours=1)
220 if year is not None:
--> 221 true_date = true_date.replace(year=year)
222 return true_date
223
ValueError: day is out of range for month
|
TypeError
|
def get_data(
self,
latitude,
longitude,
start,
end,
vert_level=None,
query_variables=None,
close_netcdf_data=True,
**kwargs,
):
"""
Submits a query to the UNIDATA servers using Siphon NCSS and
converts the netcdf data to a pandas DataFrame.
Parameters
----------
latitude: float
The latitude value.
longitude: float
The longitude value.
start: datetime or timestamp
The start time.
end: datetime or timestamp
The end time.
vert_level: None, float or integer, default None
Vertical altitude of interest.
query_variables: None or list, default None
If None, uses self.variables.
close_netcdf_data: bool, default True
Controls if the temporary netcdf data file should be closed.
Set to False to access the raw data.
**kwargs:
Additional keyword arguments are silently ignored.
Returns
-------
forecast_data : DataFrame
column names are the weather model's variable names.
"""
if not self.connected:
self.connect_to_catalog()
if vert_level is not None:
self.vert_level = vert_level
if query_variables is None:
self.query_variables = list(self.variables.values())
else:
self.query_variables = query_variables
self.latitude = latitude
self.longitude = longitude
self.set_query_latlon() # modifies self.query
self.set_location(start, latitude, longitude)
self.start = start
self.end = end
self.query.time_range(self.start, self.end)
if self.vert_level is not None:
self.query.vertical_level(self.vert_level)
self.query.variables(*self.query_variables)
self.query.accept(self.data_format)
self.netcdf_data = self.ncss.get_data(self.query)
# might be better to go to xarray here so that we can handle
# higher dimensional data for more advanced applications
self.data = self._netcdf2pandas(
self.netcdf_data, self.query_variables, self.start, self.end
)
if close_netcdf_data:
self.netcdf_data.close()
return self.data
|
def get_data(
self,
latitude,
longitude,
start,
end,
vert_level=None,
query_variables=None,
close_netcdf_data=True,
):
"""
Submits a query to the UNIDATA servers using Siphon NCSS and
converts the netcdf data to a pandas DataFrame.
Parameters
----------
latitude: float
The latitude value.
longitude: float
The longitude value.
start: datetime or timestamp
The start time.
end: datetime or timestamp
The end time.
vert_level: None, float or integer, default None
Vertical altitude of interest.
query_variables: None or list, default None
If None, uses self.variables.
close_netcdf_data: bool, default True
Controls if the temporary netcdf data file should be closed.
Set to False to access the raw data.
Returns
-------
forecast_data : DataFrame
column names are the weather model's variable names.
"""
if not self.connected:
self.connect_to_catalog()
if vert_level is not None:
self.vert_level = vert_level
if query_variables is None:
self.query_variables = list(self.variables.values())
else:
self.query_variables = query_variables
self.latitude = latitude
self.longitude = longitude
self.set_query_latlon() # modifies self.query
self.set_location(start, latitude, longitude)
self.start = start
self.end = end
self.query.time_range(self.start, self.end)
if self.vert_level is not None:
self.query.vertical_level(self.vert_level)
self.query.variables(*self.query_variables)
self.query.accept(self.data_format)
self.netcdf_data = self.ncss.get_data(self.query)
# might be better to go to xarray here so that we can handle
# higher dimensional data for more advanced applications
self.data = self._netcdf2pandas(
self.netcdf_data, self.query_variables, self.start, self.end
)
if close_netcdf_data:
self.netcdf_data.close()
return self.data
|
https://github.com/pvlib/pvlib-python/issues/745
|
Traceback (most recent call last):
File "<stdin>", line 5, in <module>
File "/anaconda3/envs/tonopah/lib/python3.7/site-packages/pvlib/forecast.py", line 306, in get_processed_data
return self.process_data(self.get_data(*args, **kwargs), **kwargs)
TypeError: get_data() got an unexpected keyword argument 'how'
|
TypeError
|
def lookup_linke_turbidity(
time, latitude, longitude, filepath=None, interp_turbidity=True
):
"""
Look up the Linke Turibidity from the ``LinkeTurbidities.mat``
data file supplied with pvlib.
Parameters
----------
time : pandas.DatetimeIndex
latitude : float
longitude : float
filepath : string
The path to the ``.mat`` file.
interp_turbidity : bool
If ``True``, interpolates the monthly Linke turbidity values
found in ``LinkeTurbidities.mat`` to daily values.
Returns
-------
turbidity : Series
"""
# The .mat file 'LinkeTurbidities.mat' contains a single 2160 x 4320 x 12
# matrix of type uint8 called 'LinkeTurbidity'. The rows represent global
# latitudes from 90 to -90 degrees; the columns represent global longitudes
# from -180 to 180; and the depth (third dimension) represents months of
# the year from January (1) to December (12). To determine the Linke
# turbidity for a position on the Earth's surface for a given month do the
# following: LT = LinkeTurbidity(LatitudeIndex, LongitudeIndex, month).
# Note that the numbers within the matrix are 20 * Linke Turbidity,
# so divide the number from the file by 20 to get the
# turbidity.
# The nodes of the grid are 5' (1/12=0.0833[arcdeg]) apart.
# From Section 8 of Aerosol optical depth and Linke turbidity climatology
# http://www.meteonorm.com/images/uploads/downloads/ieashc36_report_TL_AOD_climatologies.pdf
# 1st row: 89.9583 S, 2nd row: 89.875 S
# 1st column: 179.9583 W, 2nd column: 179.875 W
try:
import scipy.io
except ImportError:
raise ImportError(
"The Linke turbidity lookup table requires scipy. "
+ "You can still use clearsky.ineichen if you "
+ "supply your own turbidities."
)
if filepath is None:
pvlib_path = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(pvlib_path, "data", "LinkeTurbidities.mat")
mat = scipy.io.loadmat(filepath)
linke_turbidity_table = mat["LinkeTurbidity"]
latitude_index = np.around(_linearly_scale(latitude, 90, -90, 0, 2160)).astype(
np.int64
)
longitude_index = np.around(_linearly_scale(longitude, -180, 180, 0, 4320)).astype(
np.int64
)
g = linke_turbidity_table[latitude_index][longitude_index]
if interp_turbidity:
# Data covers 1 year. Assume that data corresponds to the value at the
# middle of each month. This means that we need to add previous Dec and
# next Jan to the array so that the interpolation will work for
# Jan 1 - Jan 15 and Dec 16 - Dec 31.
g2 = np.concatenate([[g[-1]], g, [g[0]]])
# Then we map the month value to the day of year value.
isleap = [calendar.isleap(t.year) for t in time]
if all(isleap):
days = _calendar_month_middles(2016) # all years are leap
elif not any(isleap):
days = _calendar_month_middles(2015) # none of the years are leap
else:
days = None # some of the years are leap years and some are not
if days is None:
# Loop over different years, might be slow for large timeserires
linke_turbidity = pd.Series(
[
np.interp(t.dayofyear, _calendar_month_middles(t.year), g2)
for t in time
],
index=time,
)
else:
linke_turbidity = pd.Series(np.interp(time.dayofyear, days, g2), index=time)
else:
linke_turbidity = pd.DataFrame(time.month, index=time)
# apply monthly data
linke_turbidity = linke_turbidity.apply(lambda x: g[x[0] - 1], axis=1)
linke_turbidity /= 20.0
return linke_turbidity
|
def lookup_linke_turbidity(
time, latitude, longitude, filepath=None, interp_turbidity=True
):
"""
Look up the Linke Turibidity from the ``LinkeTurbidities.mat``
data file supplied with pvlib.
Parameters
----------
time : pandas.DatetimeIndex
latitude : float
longitude : float
filepath : string
The path to the ``.mat`` file.
interp_turbidity : bool
If ``True``, interpolates the monthly Linke turbidity values
found in ``LinkeTurbidities.mat`` to daily values.
Returns
-------
turbidity : Series
"""
# The .mat file 'LinkeTurbidities.mat' contains a single 2160 x 4320 x 12
# matrix of type uint8 called 'LinkeTurbidity'. The rows represent global
# latitudes from 90 to -90 degrees; the columns represent global longitudes
# from -180 to 180; and the depth (third dimension) represents months of
# the year from January (1) to December (12). To determine the Linke
# turbidity for a position on the Earth's surface for a given month do the
# following: LT = LinkeTurbidity(LatitudeIndex, LongitudeIndex, month).
# Note that the numbers within the matrix are 20 * Linke Turbidity,
# so divide the number from the file by 20 to get the
# turbidity.
try:
import scipy.io
except ImportError:
raise ImportError(
"The Linke turbidity lookup table requires scipy. "
+ "You can still use clearsky.ineichen if you "
+ "supply your own turbidities."
)
if filepath is None:
pvlib_path = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(pvlib_path, "data", "LinkeTurbidities.mat")
mat = scipy.io.loadmat(filepath)
linke_turbidity_table = mat["LinkeTurbidity"]
latitude_index = np.around(_linearly_scale(latitude, 90, -90, 1, 2160)).astype(
np.int64
)
longitude_index = np.around(_linearly_scale(longitude, -180, 180, 1, 4320)).astype(
np.int64
)
g = linke_turbidity_table[latitude_index][longitude_index]
if interp_turbidity:
# Data covers 1 year.
# Assume that data corresponds to the value at
# the middle of each month.
# This means that we need to add previous Dec and next Jan
# to the array so that the interpolation will work for
# Jan 1 - Jan 15 and Dec 16 - Dec 31.
# Then we map the month value to the day of year value.
# This is approximate and could be made more accurate.
g2 = np.concatenate([[g[-1]], g, [g[0]]])
days = np.linspace(-15, 380, num=14)
linke_turbidity = pd.Series(np.interp(time.dayofyear, days, g2), index=time)
else:
linke_turbidity = pd.DataFrame(time.month, index=time)
# apply monthly data
linke_turbidity = linke_turbidity.apply(lambda x: g[x[0] - 1], axis=1)
linke_turbidity /= 20.0
return linke_turbidity
|
https://github.com/pvlib/pvlib-python/issues/262
|
from pvlib.clearsky import lookup_linke_turbidity
import pandas as pd
time = pd.DatetimeIndex(start='1/1/2016', end='1/2/2016', freq='H')
import os
lookup_linke_turbidity(time, 90, 180, os.path.join('Downloads', 'LinkeTurbidities.mat'))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python27\lib\site-packages\pvlib\clearsky.py", line 210, in lookup_linke_turbidity
g = linke_turbidity_table[latitude_index][longitude_index]
IndexError: index 4320 is out of bounds for axis 0 with size 4320
|
IndexError
|
def _linearly_scale(inputmatrix, inputmin, inputmax, outputmin, outputmax):
"""linearly scale input to output, used by Linke turbidity lookup"""
inputrange = inputmax - inputmin
outputrange = outputmax - outputmin
delta = outputrange / inputrange # number of indices per input unit
inputmin = inputmin + 1.0 / delta / 2.0 # shift to center of index
outputmax = outputmax - 1 # shift index to zero indexing
outputmatrix = (inputmatrix - inputmin) * delta + outputmin
err = IndexError(
"Input, %g, is out of range (%g, %g)."
% (inputmatrix, inputmax - inputrange, inputmax)
)
# round down if input is within half an index or else raise index error
if outputmatrix > outputmax:
if np.around(outputmatrix - outputmax, 1) <= 0.5:
outputmatrix = outputmax
else:
raise err
elif outputmatrix < outputmin:
if np.around(outputmin - outputmatrix, 1) <= 0.5:
outputmatrix = outputmin
else:
raise err
return outputmatrix
|
def _linearly_scale(inputmatrix, inputmin, inputmax, outputmin, outputmax):
"""used by linke turbidity lookup function"""
inputrange = inputmax - inputmin
outputrange = outputmax - outputmin
outputmatrix = (inputmatrix - inputmin) * outputrange / inputrange + outputmin
return outputmatrix
|
https://github.com/pvlib/pvlib-python/issues/262
|
from pvlib.clearsky import lookup_linke_turbidity
import pandas as pd
time = pd.DatetimeIndex(start='1/1/2016', end='1/2/2016', freq='H')
import os
lookup_linke_turbidity(time, 90, 180, os.path.join('Downloads', 'LinkeTurbidities.mat'))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python27\lib\site-packages\pvlib\clearsky.py", line 210, in lookup_linke_turbidity
g = linke_turbidity_table[latitude_index][longitude_index]
IndexError: index 4320 is out of bounds for axis 0 with size 4320
|
IndexError
|
def slugify(value):
"""Remove special characters from a string and slugify it.
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
_value = str(value)
# This differs from the Lutris website implementation which uses the Django
# version of `slugify` and uses the "NFKD" normalization method instead of
# "NFD". This creates some inconsistencies in titles containing a trademark
# symbols or some other special characters. The website version of slugify
# will likely get updated to use the same normalization method.
_value = unicodedata.normalize("NFD", _value).encode("ascii", "ignore")
_value = _value.decode("utf-8")
_value = str(re.sub(r"[^\w\s-]", "", _value)).strip().lower()
slug = re.sub(r"[-\s]+", "-", _value)
if not slug:
# The slug is empty, likely because the string contains only non-latin
# characters
slug = str(uuid.uuid5(uuid.NAMESPACE_URL, str(value)))
return slug
|
def slugify(value):
"""Remove special characters from a string and slugify it.
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
value = str(value)
# This differs from the Lutris website implementation which uses the Django
# version of `slugify` and uses the "NFKD" normalization method instead of
# "NFD". This creates some inconsistencies in titles containing a trademark
# symbols or some other special characters. The website version of slugify
# will likely get updated to use the same normalization method.
value = unicodedata.normalize("NFD", value).encode("ascii", "ignore")
value = value.decode("utf-8")
value = str(re.sub(r"[^\w\s-]", "", value)).strip().lower()
return re.sub(r"[-\s]+", "-", value)
|
https://github.com/lutris/lutris/issues/2932
|
Saving メイド☆ぱらだいす ~目指せ!メイドナンバーワン!~ (ppsspp)
DEBUG 2020-06-10 11:02:58,692 [config.save:211]:Saving LutrisConfig(level=game, game_config_id=-1591779765, runner=ppsspp) config to ~/.config/lutris/games/-1591779765.yml
Traceback (most recent call last):
File "~/src/lutris/lutris/gui/config/common.py", line 535, in on_save
self.game.save()
File "~/src/lutris/lutris/game.py", line 252, in save
playtime=self.playtime,
File "~/src/lutris/lutris/pga.py", line 382, in add_or_update
game_id = get_matching_game(params)
File "~/src/lutris/lutris/pga.py", line 400, in get_matching_game
raise ValueError("Can't add or update without an identifier")
ValueError: Can't add or update without an identifier
|
ValueError
|
def is_valid(self):
if not self.runner_name:
ErrorDialog(_("Runner not provided"))
return False
if not self.name_entry.get_text():
ErrorDialog(_("Please fill in the name"))
return False
if (
self.runner_name in ("steam", "winesteam")
and self.lutris_config.game_config.get("appid") is None
):
ErrorDialog(_("Steam AppId not provided"))
return False
invalid_fields = []
runner_class = import_runner(self.runner_name)
runner_instance = runner_class()
for config in ["game", "runner"]:
for k, v in getattr(self.lutris_config, config + "_config").items():
option = runner_instance.find_option(config + "_options", k)
if option is None:
continue
validator = option.get("validator")
if validator is not None:
try:
res = validator(v)
logger.debug("%s validated successfully: %s", k, res)
except Exception:
invalid_fields.append(option.get("label"))
if invalid_fields:
ErrorDialog(
_("The following fields have invalid values: ") + ", ".join(invalid_fields)
)
return False
return True
|
def is_valid(self):
if not self.runner_name:
ErrorDialog(_("Runner not provided"))
return False
if not self.name_entry.get_text():
ErrorDialog(_("Please fill in the name"))
return False
if (
self.runner_name in ("steam", "winesteam")
and self.lutris_config.game_config.get("appid") is None
):
ErrorDialog(_("Steam AppId not provided"))
return False
invalid_fields = []
runner_module = importlib.import_module("lutris.runners." + self.runner_name)
runner_class = getattr(runner_module, self.runner_name)
runner_instance = runner_class()
for config in ["game", "runner"]:
for k, v in getattr(self.lutris_config, config + "_config").items():
option = runner_instance.find_option(config + "_options", k)
if option is None:
continue
validator = option.get("validator")
if validator is not None:
try:
res = validator(v)
logger.debug("%s validated successfully: %s", k, res)
except Exception:
invalid_fields.append(option.get("label"))
if invalid_fields:
ErrorDialog(
_("The following fields have invalid values: ") + ", ".join(invalid_fields)
)
return False
return True
|
https://github.com/lutris/lutris/issues/3009
|
Traceback (most recent call last):
File "~/src/lutris/lutris/gui/config/common.py", line 503, in on_save
if not self.is_valid():
File "~/src/lutris/lutris/gui/config/common.py", line 481, in is_valid
runner_module = importlib.import_module("lutris.runners." + self.runner_name)
File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 953, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'lutris.runners.citra'
|
ModuleNotFoundError
|
def toggle_dxvk(self, enable, version=None, dxvk_manager: dxvk.DXVKManager = None):
# manual version only sets the dlls to native
if version.lower() != "manual":
if enable:
if not dxvk_manager.is_available():
logger.info("DXVK %s is not available yet, downloading...")
dxvk_manager.download()
dxvk_manager.enable()
else:
dxvk_manager.disable()
if enable:
for dll in dxvk_manager.dxvk_dlls:
# We have to make sure that the dll exists before setting it to native
if dxvk_manager.dxvk_dll_exists(dll):
self.dll_overrides[dll] = "n"
|
def toggle_dxvk(self, enable, version=None, dxvk_manager: dxvk.DXVKManager = None):
# manual version only sets the dlls to native
if version.lower() != "manual":
if enable:
if not dxvk_manager.is_available():
dxvk_manager.download()
dxvk_manager.enable()
else:
dxvk_manager.disable()
if enable:
for dll in dxvk_manager.dxvk_dlls:
# We have to make sure that the dll exists before setting it to native
if dxvk_manager.dxvk_dll_exists(dll):
self.dll_overrides[dll] = "n"
|
https://github.com/lutris/lutris/issues/2488
|
DEBUG 2019-11-26 02:20:55,510 [application.launch:339]:Launching Overwatch (wine) (139814221039744)
DEBUG 2019-11-26 02:20:55,543 [game.play:295]:Prelaunching <lutris.runners.wine.wine object at 0x7f290bc16c10>
DEBUG 2019-11-26 02:20:55,765 [downloader.start:55]:Starting download of:
https://github.com/doitsujin/dxvk/releases/download/vdxvk-time2/dxvk-dxvk-time2.tar.gz
DEBUG 2019-11-26 02:20:56,518 [downloader.on_done:97]:Finished downloading https://github.com/doitsujin/dxvk/releases/download/vdxvk-time2/dxvk-dxvk-time2.tar.gz
DEBUG 2019-11-26 02:20:56,668 [extract.extract_archive:69]:Extracting /home/alex/.local/share/lutris/runtime/dxvk/dxvk-dxvk-time2.tar.gz to /home/alex/.local/share/lutris/runtime/dxvk/dxvk-time2
ERROR 2019-11-26 02:20:56,668 [extract.extract_archive:116]:Extraction failed: not a gzip file
Traceback (most recent call last):
File "/usr/lib/python3.8/tarfile.py", line 1671, in gzopen
t = cls.taropen(name, mode, fileobj, **kwargs)
File "/usr/lib/python3.8/tarfile.py", line 1647, in taropen
return cls(name, mode, fileobj, **kwargs)
File "/usr/lib/python3.8/tarfile.py", line 1510, in __init__
self.firstmember = self.next()
File "/usr/lib/python3.8/tarfile.py", line 2313, in next
tarinfo = self.tarinfo.fromtarfile(self)
File "/usr/lib/python3.8/tarfile.py", line 1102, in fromtarfile
buf = tarfile.fileobj.read(BLOCKSIZE)
File "/usr/lib/python3.8/gzip.py", line 286, in read
return self._buffer.read(size)
File "/usr/lib/python3.8/_compression.py", line 68, in readinto
data = self.read(len(byte_view))
File "/usr/lib/python3.8/gzip.py", line 473, in read
if not self._read_gzip_header():
File "/usr/lib/python3.8/gzip.py", line 421, in _read_gzip_header
raise BadGzipFile('Not a gzipped file (%r)' % magic)
gzip.BadGzipFile: Not a gzipped file (b'No')
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/lutris/util/extract.py", line 114, in extract_archive
_do_extract(path, temp_path, opener, mode, extractor)
File "/usr/lib/python3.8/site-packages/lutris/util/extract.py", line 166, in _do_extract
handler = opener(archive, mode)
File "/usr/lib/python3.8/tarfile.py", line 1617, in open
return func(name, filemode, fileobj, **kwargs)
File "/usr/lib/python3.8/tarfile.py", line 1675, in gzopen
raise ReadError("not a gzip file")
tarfile.ReadError: not a gzip file
ERROR 2019-11-26 02:20:56,669 [jobs.target:32]:Error while completing task <bound method wine.prelaunch of <lutris.runners.wine.wine object at 0x7f290bc16c10>>: not a gzip file
<class 'lutris.util.extract.ExtractFailure'> not a gzip file
File "/usr/lib/python3.8/site-packages/lutris/util/jobs.py", line 30, in target
result = self.function(*args, **kwargs)
File "/usr/lib/python3.8/site-packages/lutris/runners/wine.py", line 794, in prelaunch
self.setup_dxvk(
File "/usr/lib/python3.8/site-packages/lutris/runners/wine.py", line 775, in setup_dxvk
self.toggle_dxvk(
File "/usr/lib/python3.8/site-packages/lutris/runners/wine.py", line 760, in toggle_dxvk
dxvk_manager.download()
File "/usr/lib/python3.8/site-packages/lutris/util/wine/dxvk.py", line 142, in download
extract_archive(dxvk_archive_path, self.dxvk_path, merge_single=True)
File "/usr/lib/python3.8/site-packages/lutris/util/extract.py", line 117, in extract_archive
raise ExtractFailure(str(ex))
ERROR 2019-11-26 02:20:56,669 [game.configure_game:312]:not a gzip file
ERROR 2019-11-26 02:20:58,157 [game.configure_game:315]:Game prelaunch unsuccessful
TypeError: refresh() takes 1 positional argument but 3 were given
|
tarfile.ReadError
|
def async_download(self, stop_request=None):
headers = requests.utils.default_headers()
headers["User-Agent"] = "Lutris/%s" % __version__
if self.referer:
headers["Referer"] = self.referer
response = requests.get(self.url, headers=headers, stream=True)
if response.status_code != 200:
logger.info("%s returned a %s error" % (self.url, response.status_code))
response.raise_for_status()
self.full_size = int(response.headers.get("Content-Length", "").strip() or 0)
for chunk in response.iter_content(chunk_size=1024 * 1024):
if not self.file_pointer:
break
if chunk:
self.downloaded_size += len(chunk)
self.file_pointer.write(chunk)
|
def async_download(self, stop_request=None):
headers = requests.utils.default_headers()
headers["User-Agent"] = "Lutris/%s" % __version__
if self.referer:
headers["Referer"] = self.referer
response = requests.get(self.url, headers=headers, stream=True)
self.full_size = int(response.headers.get("Content-Length", "").strip() or 0)
for chunk in response.iter_content(chunk_size=1024 * 1024):
if not self.file_pointer:
break
if chunk:
self.downloaded_size += len(chunk)
self.file_pointer.write(chunk)
|
https://github.com/lutris/lutris/issues/2488
|
DEBUG 2019-11-26 02:20:55,510 [application.launch:339]:Launching Overwatch (wine) (139814221039744)
DEBUG 2019-11-26 02:20:55,543 [game.play:295]:Prelaunching <lutris.runners.wine.wine object at 0x7f290bc16c10>
DEBUG 2019-11-26 02:20:55,765 [downloader.start:55]:Starting download of:
https://github.com/doitsujin/dxvk/releases/download/vdxvk-time2/dxvk-dxvk-time2.tar.gz
DEBUG 2019-11-26 02:20:56,518 [downloader.on_done:97]:Finished downloading https://github.com/doitsujin/dxvk/releases/download/vdxvk-time2/dxvk-dxvk-time2.tar.gz
DEBUG 2019-11-26 02:20:56,668 [extract.extract_archive:69]:Extracting /home/alex/.local/share/lutris/runtime/dxvk/dxvk-dxvk-time2.tar.gz to /home/alex/.local/share/lutris/runtime/dxvk/dxvk-time2
ERROR 2019-11-26 02:20:56,668 [extract.extract_archive:116]:Extraction failed: not a gzip file
Traceback (most recent call last):
File "/usr/lib/python3.8/tarfile.py", line 1671, in gzopen
t = cls.taropen(name, mode, fileobj, **kwargs)
File "/usr/lib/python3.8/tarfile.py", line 1647, in taropen
return cls(name, mode, fileobj, **kwargs)
File "/usr/lib/python3.8/tarfile.py", line 1510, in __init__
self.firstmember = self.next()
File "/usr/lib/python3.8/tarfile.py", line 2313, in next
tarinfo = self.tarinfo.fromtarfile(self)
File "/usr/lib/python3.8/tarfile.py", line 1102, in fromtarfile
buf = tarfile.fileobj.read(BLOCKSIZE)
File "/usr/lib/python3.8/gzip.py", line 286, in read
return self._buffer.read(size)
File "/usr/lib/python3.8/_compression.py", line 68, in readinto
data = self.read(len(byte_view))
File "/usr/lib/python3.8/gzip.py", line 473, in read
if not self._read_gzip_header():
File "/usr/lib/python3.8/gzip.py", line 421, in _read_gzip_header
raise BadGzipFile('Not a gzipped file (%r)' % magic)
gzip.BadGzipFile: Not a gzipped file (b'No')
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/lutris/util/extract.py", line 114, in extract_archive
_do_extract(path, temp_path, opener, mode, extractor)
File "/usr/lib/python3.8/site-packages/lutris/util/extract.py", line 166, in _do_extract
handler = opener(archive, mode)
File "/usr/lib/python3.8/tarfile.py", line 1617, in open
return func(name, filemode, fileobj, **kwargs)
File "/usr/lib/python3.8/tarfile.py", line 1675, in gzopen
raise ReadError("not a gzip file")
tarfile.ReadError: not a gzip file
ERROR 2019-11-26 02:20:56,669 [jobs.target:32]:Error while completing task <bound method wine.prelaunch of <lutris.runners.wine.wine object at 0x7f290bc16c10>>: not a gzip file
<class 'lutris.util.extract.ExtractFailure'> not a gzip file
File "/usr/lib/python3.8/site-packages/lutris/util/jobs.py", line 30, in target
result = self.function(*args, **kwargs)
File "/usr/lib/python3.8/site-packages/lutris/runners/wine.py", line 794, in prelaunch
self.setup_dxvk(
File "/usr/lib/python3.8/site-packages/lutris/runners/wine.py", line 775, in setup_dxvk
self.toggle_dxvk(
File "/usr/lib/python3.8/site-packages/lutris/runners/wine.py", line 760, in toggle_dxvk
dxvk_manager.download()
File "/usr/lib/python3.8/site-packages/lutris/util/wine/dxvk.py", line 142, in download
extract_archive(dxvk_archive_path, self.dxvk_path, merge_single=True)
File "/usr/lib/python3.8/site-packages/lutris/util/extract.py", line 117, in extract_archive
raise ExtractFailure(str(ex))
ERROR 2019-11-26 02:20:56,669 [game.configure_game:312]:not a gzip file
ERROR 2019-11-26 02:20:58,157 [game.configure_game:315]:Game prelaunch unsuccessful
TypeError: refresh() takes 1 positional argument but 3 were given
|
tarfile.ReadError
|
def extract_archive(path, to_directory=".", merge_single=True, extractor=None):
path = os.path.abspath(path)
mode = None
logger.debug("Extracting %s to %s", path, to_directory)
if extractor is None:
if path.endswith(".tar.gz") or path.endswith(".tgz"):
extractor = "tgz"
elif path.endswith(".tar.xz") or path.endswith(".txz"):
extractor = "txz"
elif path.endswith(".tar"):
extractor = "tar"
elif path.endswith(".tar.bz2") or path.endswith(".tbz"):
extractor = "bz2"
elif path.endswith(".gz"):
extractor = "gzip"
elif path.endswith(".exe"):
extractor = "exe"
elif is_7zip_supported(path, None):
extractor = None
else:
raise RuntimeError(
"Could not extract `%s` - no appropriate extractor found" % path
)
if extractor == "tgz":
opener, mode = tarfile.open, "r:gz"
elif extractor == "txz":
opener, mode = tarfile.open, "r:xz"
elif extractor == "tar":
opener, mode = tarfile.open, "r:"
elif extractor == "bz2":
opener, mode = tarfile.open, "r:bz2"
elif extractor == "gzip":
decompress_gz(path, to_directory)
return
elif extractor == "gog":
opener = "innoextract"
elif extractor == "exe":
opener = "exe"
elif extractor is None or is_7zip_supported(path, extractor):
opener = "7zip"
else:
raise RuntimeError("Could not extract `%s` - unknown format specified" % path)
temp_name = ".extract-" + str(uuid.uuid4())[:8]
temp_path = temp_dir = os.path.join(to_directory, temp_name)
try:
_do_extract(path, temp_path, opener, mode, extractor)
except (OSError, zlib.error, tarfile.ReadError, EOFError) as ex:
logger.error("Extraction failed: %s", ex)
raise ExtractFailure(str(ex))
if merge_single:
extracted = os.listdir(temp_path)
if len(extracted) == 1:
temp_path = os.path.join(temp_path, extracted[0])
if os.path.isfile(temp_path):
destination_path = os.path.join(to_directory, extracted[0])
if os.path.isfile(destination_path):
logger.warning("Overwrite existing file %s", destination_path)
os.remove(destination_path)
shutil.move(temp_path, to_directory)
os.removedirs(temp_dir)
else:
for archive_file in os.listdir(temp_path):
source_path = os.path.join(temp_path, archive_file)
destination_path = os.path.join(to_directory, archive_file)
# logger.debug("Moving extracted files from %s to %s", source_path, destination_path)
if system.path_exists(destination_path):
logger.warning("Overwrite existing path %s", destination_path)
if os.path.isfile(destination_path):
os.remove(destination_path)
shutil.move(source_path, destination_path)
elif os.path.isdir(destination_path):
try:
system.merge_folders(source_path, destination_path)
except OSError as ex:
logger.error(
"Failed to merge to destination %s: %s",
destination_path,
ex,
)
raise ExtractFailure(str(ex))
else:
shutil.move(source_path, destination_path)
system.remove_folder(temp_dir)
logger.debug("Finished extracting %s to %s", path, to_directory)
return path, to_directory
|
def extract_archive(path, to_directory=".", merge_single=True, extractor=None):
path = os.path.abspath(path)
mode = None
logger.debug("Extracting %s to %s", path, to_directory)
if extractor is None:
if path.endswith(".tar.gz") or path.endswith(".tgz"):
extractor = "tgz"
elif path.endswith(".tar.xz") or path.endswith(".txz"):
extractor = "txz"
elif path.endswith(".tar"):
extractor = "tar"
elif path.endswith(".tar.bz2") or path.endswith(".tbz"):
extractor = "bz2"
elif path.endswith(".gz"):
extractor = "gzip"
elif path.endswith(".exe"):
extractor = "exe"
elif is_7zip_supported(path, None):
extractor = None
else:
raise RuntimeError(
"Could not extract `%s` - no appropriate extractor found" % path
)
if extractor == "tgz":
opener, mode = tarfile.open, "r:gz"
elif extractor == "txz":
opener, mode = tarfile.open, "r:xz"
elif extractor == "tar":
opener, mode = tarfile.open, "r:"
elif extractor == "bz2":
opener, mode = tarfile.open, "r:bz2"
elif extractor == "gzip":
decompress_gz(path, to_directory)
return
elif extractor == "gog":
opener = "innoextract"
elif extractor == "exe":
opener = "exe"
elif extractor is None or is_7zip_supported(path, extractor):
opener = "7zip"
else:
raise RuntimeError("Could not extract `%s` - unknown format specified" % path)
temp_name = ".extract-" + str(uuid.uuid4())[:8]
temp_path = temp_dir = os.path.join(to_directory, temp_name)
try:
_do_extract(path, temp_path, opener, mode, extractor)
except (OSError, zlib.error, tarfile.ReadError, EOFError) as ex:
logger.exception("Extraction failed: %s", ex)
raise ExtractFailure(str(ex))
if merge_single:
extracted = os.listdir(temp_path)
if len(extracted) == 1:
temp_path = os.path.join(temp_path, extracted[0])
if os.path.isfile(temp_path):
destination_path = os.path.join(to_directory, extracted[0])
if os.path.isfile(destination_path):
logger.warning("Overwrite existing file %s", destination_path)
os.remove(destination_path)
shutil.move(temp_path, to_directory)
os.removedirs(temp_dir)
else:
for archive_file in os.listdir(temp_path):
source_path = os.path.join(temp_path, archive_file)
destination_path = os.path.join(to_directory, archive_file)
# logger.debug("Moving extracted files from %s to %s", source_path, destination_path)
if system.path_exists(destination_path):
logger.warning("Overwrite existing path %s", destination_path)
if os.path.isfile(destination_path):
os.remove(destination_path)
shutil.move(source_path, destination_path)
elif os.path.isdir(destination_path):
try:
system.merge_folders(source_path, destination_path)
except OSError as ex:
logger.error(
"Failed to merge to destination %s: %s",
destination_path,
ex,
)
raise ExtractFailure(str(ex))
else:
shutil.move(source_path, destination_path)
system.remove_folder(temp_dir)
logger.debug("Finished extracting %s to %s", path, to_directory)
return path, to_directory
|
https://github.com/lutris/lutris/issues/2488
|
DEBUG 2019-11-26 02:20:55,510 [application.launch:339]:Launching Overwatch (wine) (139814221039744)
DEBUG 2019-11-26 02:20:55,543 [game.play:295]:Prelaunching <lutris.runners.wine.wine object at 0x7f290bc16c10>
DEBUG 2019-11-26 02:20:55,765 [downloader.start:55]:Starting download of:
https://github.com/doitsujin/dxvk/releases/download/vdxvk-time2/dxvk-dxvk-time2.tar.gz
DEBUG 2019-11-26 02:20:56,518 [downloader.on_done:97]:Finished downloading https://github.com/doitsujin/dxvk/releases/download/vdxvk-time2/dxvk-dxvk-time2.tar.gz
DEBUG 2019-11-26 02:20:56,668 [extract.extract_archive:69]:Extracting /home/alex/.local/share/lutris/runtime/dxvk/dxvk-dxvk-time2.tar.gz to /home/alex/.local/share/lutris/runtime/dxvk/dxvk-time2
ERROR 2019-11-26 02:20:56,668 [extract.extract_archive:116]:Extraction failed: not a gzip file
Traceback (most recent call last):
File "/usr/lib/python3.8/tarfile.py", line 1671, in gzopen
t = cls.taropen(name, mode, fileobj, **kwargs)
File "/usr/lib/python3.8/tarfile.py", line 1647, in taropen
return cls(name, mode, fileobj, **kwargs)
File "/usr/lib/python3.8/tarfile.py", line 1510, in __init__
self.firstmember = self.next()
File "/usr/lib/python3.8/tarfile.py", line 2313, in next
tarinfo = self.tarinfo.fromtarfile(self)
File "/usr/lib/python3.8/tarfile.py", line 1102, in fromtarfile
buf = tarfile.fileobj.read(BLOCKSIZE)
File "/usr/lib/python3.8/gzip.py", line 286, in read
return self._buffer.read(size)
File "/usr/lib/python3.8/_compression.py", line 68, in readinto
data = self.read(len(byte_view))
File "/usr/lib/python3.8/gzip.py", line 473, in read
if not self._read_gzip_header():
File "/usr/lib/python3.8/gzip.py", line 421, in _read_gzip_header
raise BadGzipFile('Not a gzipped file (%r)' % magic)
gzip.BadGzipFile: Not a gzipped file (b'No')
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/lutris/util/extract.py", line 114, in extract_archive
_do_extract(path, temp_path, opener, mode, extractor)
File "/usr/lib/python3.8/site-packages/lutris/util/extract.py", line 166, in _do_extract
handler = opener(archive, mode)
File "/usr/lib/python3.8/tarfile.py", line 1617, in open
return func(name, filemode, fileobj, **kwargs)
File "/usr/lib/python3.8/tarfile.py", line 1675, in gzopen
raise ReadError("not a gzip file")
tarfile.ReadError: not a gzip file
ERROR 2019-11-26 02:20:56,669 [jobs.target:32]:Error while completing task <bound method wine.prelaunch of <lutris.runners.wine.wine object at 0x7f290bc16c10>>: not a gzip file
<class 'lutris.util.extract.ExtractFailure'> not a gzip file
File "/usr/lib/python3.8/site-packages/lutris/util/jobs.py", line 30, in target
result = self.function(*args, **kwargs)
File "/usr/lib/python3.8/site-packages/lutris/runners/wine.py", line 794, in prelaunch
self.setup_dxvk(
File "/usr/lib/python3.8/site-packages/lutris/runners/wine.py", line 775, in setup_dxvk
self.toggle_dxvk(
File "/usr/lib/python3.8/site-packages/lutris/runners/wine.py", line 760, in toggle_dxvk
dxvk_manager.download()
File "/usr/lib/python3.8/site-packages/lutris/util/wine/dxvk.py", line 142, in download
extract_archive(dxvk_archive_path, self.dxvk_path, merge_single=True)
File "/usr/lib/python3.8/site-packages/lutris/util/extract.py", line 117, in extract_archive
raise ExtractFailure(str(ex))
ERROR 2019-11-26 02:20:56,669 [game.configure_game:312]:not a gzip file
ERROR 2019-11-26 02:20:58,157 [game.configure_game:315]:Game prelaunch unsuccessful
TypeError: refresh() takes 1 positional argument but 3 were given
|
tarfile.ReadError
|
def sort_view(self, key="name", ascending=True):
"""Sort the model on a given column name"""
try:
sort_column = self.sort_columns[key]
except KeyError:
logger.error("Invalid column name '%s'", key)
sort_column = COL_NAME
self.modelsort.set_sort_column_id(
sort_column,
Gtk.SortType.ASCENDING if ascending else Gtk.SortType.DESCENDING,
)
|
def sort_view(self, key="name", ascending=True):
self.modelsort.set_sort_column_id(
self.sort_columns[key],
Gtk.SortType.ASCENDING if ascending else Gtk.SortType.DESCENDING,
)
|
https://github.com/lutris/lutris/issues/2078
|
Traceback (most recent call last):
File "/home/batapete/src/lutris/lutris/gui/views/store.py", line 237, in on_sort_column_changed
raise ValueError("Invalid sort key for col %s" % col)
ValueError: Invalid sort key for col 9
|
ValueError
|
def game_exe(self):
"""Return the game's executable's path."""
exe = self.game_config.get("exe")
if not exe:
return
if os.path.isabs(exe):
return exe
if self.game_path:
return os.path.join(self.game_path, exe)
return system.find_executable(exe)
|
def game_exe(self):
"""Return the game's executable's path."""
exe = self.game_config.get("exe")
if exe:
if os.path.isabs(exe):
exe_path = exe
else:
exe_path = os.path.join(self.game_path, exe)
return exe_path
|
https://github.com/lutris/lutris/issues/2019
|
DEBUG 2019-04-10 16:55:14,595 [game.save:212]:Saving S (linux)
DEBUG 2019-04-10 16:55:14,595 [config.save:208]:Saving LutrisConfig(level=game, game_config_id=s-1554908114, runner=linux) config to /home/user/.config/lutris/games/s-1554908114.yml
DEBUG 2019-04-10 16:55:14,667 [lutriswindow.on_game_updated:697]:Updating game S (linux)
DEBUG 2019-04-10 16:55:14,675 [store.get_missing_media:182]:Requesting missing icons from API for s
DEBUG 2019-04-10 16:55:14,677 [http.get:68]:GET https://lutris.net/api/games
DEBUG 2019-04-10 16:55:14,829 [api.get_game_api_page:134]:No game found for s
DEBUG 2019-04-10 16:55:23,219 [game.save:212]:Saving S (linux)
DEBUG 2019-04-10 16:55:23,220 [config.save:208]:Saving LutrisConfig(level=game, game_config_id=s-1554908114, runner=linux) config to /home/user/.config/lutris/games/s-1554908114.yml
DEBUG 2019-04-10 16:55:23,280 [lutriswindow.on_game_updated:697]:Updating game S (linux)
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/lutris/game_actions.py", line 201, in on_edit_game_configuration
EditGameConfigDialog(self.window, self.game)
File "/usr/lib/python3/dist-packages/lutris/gui/config/edit_game.py", line 19, in __init__
self.build_tabs("game")
File "/usr/lib/python3/dist-packages/lutris/gui/config/common.py", line 68, in build_tabs
self._build_game_tab()
File "/usr/lib/python3/dist-packages/lutris/gui/config/common.py", line 307, in _build_game_tab
self.game_box = GameBox(self.lutris_config, self.game)
File "/usr/lib/python3/dist-packages/lutris/gui/config/boxes.py", line 615, in __init__
self.generate_widgets("game")
File "/usr/lib/python3/dist-packages/lutris/gui/config/boxes.py", line 90, in generate_widgets
self.call_widget_generator(option, option_key, value, default)
File "/usr/lib/python3/dist-packages/lutris/gui/config/boxes.py", line 185, in call_widget_generator
self.generate_file_chooser(option, value)
File "/usr/lib/python3/dist-packages/lutris/gui/config/boxes.py", line 390, in generate_file_chooser
path = os.path.join(self.game.directory, path)
File "/usr/lib/python3.6/posixpath.py", line 80, in join
a = os.fspath(a)
TypeError: expected str, bytes or os.PathLike object, not NoneType
|
TypeError
|
def get_system_paths():
"""Return paths of system libraries"""
paths = []
# This prioritizes system libraries over
# the Lutris and Steam runtimes.
for lib_paths in LINUX_SYSTEM.iter_lib_folders():
for path in lib_paths:
paths.append(path)
return paths
|
def get_system_paths():
"""Return paths of system libraries"""
paths = []
# This prioritizes system libraries over
# the Lutris and Steam runtimes.
for lib_paths in LINUX_SYSTEM.iter_lib_folders():
for index, _arch in enumerate(LINUX_SYSTEM.runtime_architectures):
paths.append(lib_paths[index])
return paths
|
https://github.com/lutris/lutris/issues/1969
|
DEBUG 2019-03-30 14:02:57,737 [application.launch:341]:Adding game EFMB (linux) (140618847460016) to running games
DEBUG 2019-03-30 14:02:57,742 [display._get_vidmodes:15]:Retrieving video modes from XrandR
DEBUG 2019-03-30 14:02:57,761 [game.configure_game:305]:Launching EFMB: {'command': ['/home/cxf/Downloads/EFMB_linux/EFMB/runner']}
DEBUG 2019-03-30 14:02:57,761 [game.configure_game:306]:Game info: {
"command": [
"/home/cxf/Downloads/EFMB_linux/EFMB/runner"
]
}
Traceback (most recent call last):
File "/home/cxf/.lutris/lutris/game_actions.py", line 156, in on_game_run
self.application.launch(self.game)
File "/home/cxf/.lutris/lutris/gui/application.py", line 344, in launch
game.play()
File "/home/cxf/.lutris/lutris/game.py", line 277, in play
self.configure_game(True)
File "/home/cxf/.lutris/lutris/exceptions.py", line 26, in wrapper
return function(*args, **kwargs)
File "/home/cxf/.lutris/lutris/game.py", line 447, in configure_game
game_env = gameplay_info.get("env") or self.runner.get_env()
File "/home/cxf/.lutris/lutris/runners/runner.py", line 149, in get_env
runtime_env = self.get_runtime_env()
File "/home/cxf/.lutris/lutris/runners/runner.py", line 176, in get_runtime_env
prefer_system_libs=self.system_config.get("prefer_system_libs", True)
File "/home/cxf/.lutris/lutris/runtime.py", line 214, in get_env
wine_path=wine_path
File "/home/cxf/.lutris/lutris/runtime.py", line 291, in get_paths
wine_path=wine_path
File "/home/cxf/.lutris/lutris/runtime.py", line 279, in get_runtime_paths
paths += get_system_paths()
File "/home/cxf/.lutris/lutris/runtime.py", line 240, in get_system_paths
paths.append(lib_paths[index])
IndexError: list index out of range
|
IndexError
|
def __init__(self, game_id=None):
super().__init__()
self.id = game_id # pylint: disable=invalid-name
self.runner = None
self.config = None
# Load attributes from database
game_data = pga.get_game_by_field(game_id, "id")
self.slug = game_data.get("slug") or ""
self.runner_name = game_data.get("runner") or ""
self.directory = game_data.get("directory") or ""
self.name = game_data.get("name") or ""
self.game_config_id = game_data.get("configpath") or ""
self.is_installed = bool(game_data.get("installed")) and self.game_config_id
self.platform = game_data.get("platform") or ""
self.year = game_data.get("year") or ""
self.lastplayed = game_data.get("lastplayed") or 0
self.steamid = game_data.get("steamid") or ""
self.has_custom_banner = bool(game_data.get("has_custom_banner"))
self.has_custom_icon = bool(game_data.get("has_custom_icon"))
try:
self.playtime = float(game_data.get("playtime") or 0.0)
except ValueError:
logger.error("Invalid playtime value %s", game_data.get("playtime"))
self.playtime = 0.0
if self.game_config_id:
self.load_config()
self.game_thread = None
self.prelaunch_executor = None
self.heartbeat = None
self.killswitch = None
self.state = self.STATE_IDLE
self.exit_main_loop = False
self.xboxdrv_thread = None
self.game_runtime_config = {}
self.resolution_changed = False
self.compositor_disabled = False
self.stop_compositor = self.start_compositor = ""
self.original_outputs = None
self.log_buffer = Gtk.TextBuffer()
self.log_buffer.create_tag("warning", foreground="red")
self.timer = Timer()
|
def __init__(self, game_id=None):
super().__init__()
self.id = game_id # pylint: disable=invalid-name
self.runner = None
self.config = None
# Load attributes from database
game_data = pga.get_game_by_field(game_id, "id")
self.slug = game_data.get("slug") or ""
self.runner_name = game_data.get("runner") or ""
self.directory = game_data.get("directory") or ""
self.name = game_data.get("name") or ""
self.game_config_id = game_data.get("configpath") or ""
self.is_installed = bool(game_data.get("installed")) and self.game_config_id
self.platform = game_data.get("platform") or ""
self.year = game_data.get("year") or ""
self.lastplayed = game_data.get("lastplayed") or 0
self.steamid = game_data.get("steamid") or ""
self.has_custom_banner = bool(game_data.get("has_custom_banner"))
self.has_custom_icon = bool(game_data.get("has_custom_icon"))
try:
self.playtime = float(game_data.get("playtime") or 0.0)
except ValueError:
logger.error("Invalid playtime value %s", game_data.get("playtime"))
if self.game_config_id:
self.load_config()
self.game_thread = None
self.prelaunch_executor = None
self.heartbeat = None
self.killswitch = None
self.state = self.STATE_IDLE
self.exit_main_loop = False
self.xboxdrv_thread = None
self.game_runtime_config = {}
self.resolution_changed = False
self.compositor_disabled = False
self.stop_compositor = self.start_compositor = ""
self.original_outputs = None
self.log_buffer = Gtk.TextBuffer()
self.log_buffer.create_tag("warning", foreground="red")
self.timer = Timer()
|
https://github.com/lutris/lutris/issues/1698
|
INFO 2019-02-02 10:25:28,606 [application.do_command_line:222]:Running Lutris 0.5.0
INFO 2019-02-02 10:25:28,607 [startup.check_driver:43]:Using NVIDIA drivers 396.54 for x86_64
INFO 2019-02-02 10:25:28,607 [startup.check_driver:47]:GPU: GeForce GTX 1070 Ti
INFO 2019-02-02 10:25:28,607 [startup.check_driver:63]:GPU: 10DE:1B82 1043:861E using nvidia drivers
INFO 2019-02-02 10:25:28,653 [startup.check_vulkan:84]:Vulkan is supported
ERROR 2019-02-02 10:25:28,654 [game.__init__:69]:Invalid playtime value 0.0 hrs
ERROR 2019-02-02 10:25:28,656 [startup.fill_missing_platforms:98]:Providing missing platorm for game super-smash-bros-melee
WARNING 2019-02-02 10:25:28,656 [game.set_platform_from_runner:193]:Can't get platform for runner Dolphin
DEBUG 2019-02-02 10:25:28,656 [game.save:201]:Saving Super Smash Bros. Melee (dolphin)
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/lutris/gui/application.py", line 223, in do_command_line
run_all_checks()
File "/usr/lib/python3/dist-packages/lutris/startup.py", line 109, in run_all_checks
fill_missing_platforms()
File "/usr/lib/python3/dist-packages/lutris/startup.py", line 100, in fill_missing_platforms
game.save(metadata_only=True)
File "/usr/lib/python3/dist-packages/lutris/game.py", line 216, in save
playtime=self.playtime,
AttributeError: 'Game' object has no attribute 'playtime'
INFO 2019-02-02 10:25:28,688 [application.do_shutdown:439]:Shutting down Lutris
|
AttributeError
|
def check_driver():
"""Report on the currently running driver"""
if drivers.is_nvidia():
driver_info = drivers.get_nvidia_driver_info()
# pylint: disable=logging-format-interpolation
logger.info(
"Using {vendor} drivers {version} for {arch}".format(**driver_info["nvrm"])
)
gpus = drivers.get_nvidia_gpu_ids()
for gpu_id in gpus:
gpu_info = drivers.get_nvidia_gpu_info(gpu_id)
logger.info("GPU: %s", gpu_info.get("Model"))
elif hasattr(LINUX_SYSTEM, "glxinfo"):
logger.info("Using %s", LINUX_SYSTEM.glxinfo.opengl_vendor)
if hasattr(LINUX_SYSTEM.glxinfo, "GLX_MESA_query_renderer"):
logger.info(
"Running Mesa driver %s on %s",
LINUX_SYSTEM.glxinfo.GLX_MESA_query_renderer.version,
LINUX_SYSTEM.glxinfo.GLX_MESA_query_renderer.device,
)
else:
logger.warning(
"glxinfo is not available on your system, unable to detect driver version"
)
for card in drivers.get_gpus():
# pylint: disable=logging-format-interpolation
try:
logger.info(
"GPU: {PCI_ID} {PCI_SUBSYS_ID} using {DRIVER} drivers".format(
**drivers.get_gpu_info(card)
)
)
except KeyError:
logger.error("Unable to get GPU information from '%s'", card)
|
def check_driver():
"""Report on the currently running driver"""
if drivers.is_nvidia():
driver_info = drivers.get_nvidia_driver_info()
# pylint: disable=logging-format-interpolation
logger.info(
"Using {vendor} drivers {version} for {arch}".format(**driver_info["nvrm"])
)
gpus = drivers.get_nvidia_gpu_ids()
for gpu_id in gpus:
gpu_info = drivers.get_nvidia_gpu_info(gpu_id)
logger.info("GPU: %s", gpu_info.get("Model"))
elif hasattr(LINUX_SYSTEM, "glxinfo"):
logger.info("Using %s", LINUX_SYSTEM.glxinfo.opengl_vendor)
if hasattr(LINUX_SYSTEM.glxinfo, "GLX_MESA_query_renderer"):
logger.info(
"Running Mesa driver %s on %s",
LINUX_SYSTEM.glxinfo.GLX_MESA_query_renderer.version,
LINUX_SYSTEM.glxinfo.GLX_MESA_query_renderer.device,
)
else:
logger.warning(
"glxinfo is not available on your system, unable to detect driver version"
)
for card in drivers.get_gpus():
# pylint: disable=logging-format-interpolation
logger.info(
"GPU: {PCI_ID} {PCI_SUBSYS_ID} using {DRIVER} drivers".format(
**drivers.get_gpu_info(card)
)
)
|
https://github.com/lutris/lutris/issues/1706
|
$ lutris
2019-02-02 21:46:38,710: Running Lutris 0.5.0
2019-02-02 21:46:38,711: glxinfo is not available on your system, unable to detect driver version
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/lutris/gui/application.py", line 223, in do_command_line
run_all_checks()
File "/usr/lib/python3/dist-packages/lutris/startup.py", line 106, in run_all_checks
check_driver()
File "/usr/lib/python3/dist-packages/lutris/startup.py", line 63, in check_driver
**drivers.get_gpu_info(card)
KeyError: 'PCI_ID'
2019-02-02 21:46:38,753: Shutting down Lutris
|
KeyError
|
def sync_with_lutris():
apps = get_games()
desktop_games_in_lutris = pga.get_desktop_games()
slugs_in_lutris = set([str(game["slug"]) for game in desktop_games_in_lutris])
seen_slugs = set()
for app in apps:
game_info = None
name = app[0]
appid = app[1]
slug = slugify(name)
# if it fails to get slug from the name
if not slug:
slug = slugify(appid)
if not name or not slug or not appid:
logger.error(
"Failed to load desktop game "
'"' + str(name) + '" '
"(app: " + str(appid) + ", slug: " + slug + ")"
)
continue
else:
logger.debug(
"Found desktop game "
'"' + str(name) + '" '
"(app: " + str(appid) + ", slug: " + slug + ")"
)
seen_slugs.add(slug)
if slug not in slugs_in_lutris:
game_info = {
"name": name,
"slug": slug,
"config_path": slug + "-desktopapp",
"installer_slug": "desktopapp",
"exe": app[2],
"args": app[3],
}
mark_as_installed(appid, "linux", game_info)
unavailable_slugs = slugs_in_lutris.difference(seen_slugs)
for slug in unavailable_slugs:
for game in desktop_games_in_lutris:
if game["slug"] == slug:
mark_as_uninstalled(game)
|
def sync_with_lutris():
apps = get_games()
desktop_games_in_lutris = pga.get_desktop_games()
slugs_in_lutris = set([str(game["slug"]) for game in desktop_games_in_lutris])
seen_slugs = set()
for app in apps:
game_info = None
name = app[0]
slug = slugify(name)
appid = app[1]
if not name or not slug or not appid:
logger.error(
"Failed to load desktop game "
'"' + str(name) + '" (' + str(appid) + ".desktop)"
)
continue
else:
logger.debug(
'Found desktop game "' + str(name) + '" (' + str(appid) + ".desktop)"
)
seen_slugs.add(slug)
if slug not in slugs_in_lutris:
game_info = {
"name": name,
"slug": slug,
"config_path": slug + "-desktopapp",
"installer_slug": "desktopapp",
"exe": app[2],
"args": app[3],
}
mark_as_installed(appid, "linux", game_info)
unavailable_slugs = slugs_in_lutris.difference(seen_slugs)
for slug in unavailable_slugs:
for game in desktop_games_in_lutris:
if game["slug"] == slug:
mark_as_uninstalled(game)
|
https://github.com/lutris/lutris/issues/525
|
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/lutris/gui/application.py", line 152, in do_activate
self.window = LutrisWindow(application=self)
File "/usr/lib/python3/dist-packages/lutris/gui/lutriswindow.py", line 84, in init
desktopapps.sync_with_lutris()
File "/usr/lib/python3/dist-packages/lutris/util/desktopapps.py", line 87, in sync_with_lutris
mark_as_installed(appid, 'linux', game_info)
File "/usr/lib/python3/dist-packages/lutris/util/desktopapps.py", line 32, in mark_as_installed
assert game_info[key]
AssertionError
|
AssertionError
|
def get_outputs():
"""Return list of tuples containing output name and geometry"""
outputs = list()
for line in get_vidmodes():
parts = line.split()
if len(parts) < 2:
continue
if parts[1] == "connected":
if len(parts) == 2:
continue
geom = parts[2] if parts[2] != "primary" else parts[3]
if geom.startswith("("): # Screen turned off, no geometry
continue
outputs.append((parts[0], geom))
return outputs
|
def get_outputs():
"""Return list of tuples containing output name and geometry"""
outputs = list()
for line in get_vidmodes():
parts = line.split()
if len(parts) < 2:
continue
if parts[1] == "connected":
geom = parts[2] if parts[2] != "primary" else parts[3]
if geom.startswith("("): # Screen turned off, no geometry
continue
outputs.append((parts[0], geom))
return outputs
|
https://github.com/lutris/lutris/issues/165
|
ubuntu-gnome@ubuntu-gnome:~$ lutris
Traceback (most recent call last):
File "/usr/bin/lutris", line 48, in <module>
from lutris.installer import InstallerDialog
File "/usr/lib/pymodules/python2.7/lutris/installer.py", line 24, in <module>
from lutris.gui.config_dialogs import AddGameDialog
File "/usr/lib/pymodules/python2.7/lutris/gui/config_dialogs.py", line 11, in <module>
from lutris.gui.config_boxes import GameBox, RunnerBox, SystemBox
File "/usr/lib/pymodules/python2.7/lutris/gui/config_boxes.py", line 7, in <module>
from lutris import sysoptions
File "/usr/lib/pymodules/python2.7/lutris/sysoptions.py", line 16, in <module>
outputs = display.get_output_names()
File "/usr/lib/pymodules/python2.7/lutris/util/display.py", line 29, in get_output_names
return [output[0] for output in get_outputs()]
File "/usr/lib/pymodules/python2.7/lutris/util/display.py", line 21, in get_outputs
geom = parts[2] if parts[2] != 'primary' else parts[3]
IndexError: list index out of range
|
IndexError
|
def launch_install(self, script_index):
script = self.scripts[script_index]
self.interpreter = installer.ScriptInterpreter(script, self)
game_name = self.interpreter.game_name.replace("&", "&")
self.title_label.set_markup("<b>Installing {}</b>".format(game_name))
self.continue_install()
|
def launch_install(self, script_index):
script = self.scripts[script_index]
self.interpreter = installer.ScriptInterpreter(script, self)
game_name = self.interpreter.game_name.replace("&", "&")
self.title_label.set_markup("<b>Installing {}</b>".format(game_name))
self.continue_install()
|
https://github.com/lutris/lutris/issues/178
|
$ lutris -d
DEBUG 2015-04-09 14:16:41,545 [lutris]:Welcome to Lutris
(lutris:11906): Gtk-WARNING **: Unknown property: GtkLabel.lines
(lutris:11906): Gtk-WARNING **: Unknown property: GtkLabel.lines
DEBUG 2015-04-09 14:16:41,678 [lutriswindow]:Getting game list
DEBUG 2015-04-09 14:16:41,886 [lutriswindow]:Switching view
DEBUG 2015-04-09 14:16:42,207 [lutriswindow]:Connecting signals
INFO 2015-04-09 14:16:42,334 [lutriswindow]:Connected as orbea
DEBUG 2015-04-09 14:16:42,336 [jobs]:Async call: sync_all
DEBUG 2015-04-09 14:16:42,336 [sync]:Syncing game library
DEBUG 2015-04-09 14:16:42,336 [jobs]:Async call: update_runtime
DEBUG 2015-04-09 14:16:42,336 [sync]:97 games in local library
DEBUG 2015-04-09 14:16:42,338 [api]:Fetching game library
DEBUG 2015-04-09 14:16:43,747 [sync]:97 games in remote library (inc. unpublished)
DEBUG 2015-04-09 14:16:43,770 [sync]:0 games updated
DEBUG 2015-04-09 14:16:43,770 [sync]:Syncing local steam games
DEBUG 2015-04-09 14:16:43,992 [jobs]:Async call: sync_icons
DEBUG 2015-04-09 14:16:43,993 [resources]:Fetching icons
Traceback (most recent call last):
File "/usr/lib64/python2.7/site-packages/lutris/installer.py", line 833, in on_installer_selected
self.launch_install(self.installer_choice)
File "/usr/lib64/python2.7/site-packages/lutris/installer.py", line 781, in launch_install
self.title_label.set_markup("<b>Installing {}</b>".format(game_name))
UnicodeEncodeError: 'ascii' codec can't encode character u'\u014d' in position 12: ordinal not in range(128)
|
UnicodeEncodeError
|
def db_insert(db_path, table, fields):
field_names = ", ".join(fields.keys())
placeholders = ("?, " * len(fields))[:-2]
field_values = _decode_utf8_values(fields.values())
with db_cursor(db_path) as cursor:
cursor.execute(
"insert into {0}({1}) values ({2})".format(
table, field_names, placeholders
),
field_values,
)
|
def db_insert(db_path, table, fields):
field_names = ", ".join(fields.keys())
placeholders = ("?, " * len(fields))[:-2]
field_values = tuple(fields.values())
with db_cursor(db_path) as cursor:
cursor.execute(
"insert into {0}({1}) values ({2})".format(
table, field_names, placeholders
),
field_values,
)
|
https://github.com/lutris/lutris/issues/178
|
$ lutris -d
DEBUG 2015-04-09 14:16:41,545 [lutris]:Welcome to Lutris
(lutris:11906): Gtk-WARNING **: Unknown property: GtkLabel.lines
(lutris:11906): Gtk-WARNING **: Unknown property: GtkLabel.lines
DEBUG 2015-04-09 14:16:41,678 [lutriswindow]:Getting game list
DEBUG 2015-04-09 14:16:41,886 [lutriswindow]:Switching view
DEBUG 2015-04-09 14:16:42,207 [lutriswindow]:Connecting signals
INFO 2015-04-09 14:16:42,334 [lutriswindow]:Connected as orbea
DEBUG 2015-04-09 14:16:42,336 [jobs]:Async call: sync_all
DEBUG 2015-04-09 14:16:42,336 [sync]:Syncing game library
DEBUG 2015-04-09 14:16:42,336 [jobs]:Async call: update_runtime
DEBUG 2015-04-09 14:16:42,336 [sync]:97 games in local library
DEBUG 2015-04-09 14:16:42,338 [api]:Fetching game library
DEBUG 2015-04-09 14:16:43,747 [sync]:97 games in remote library (inc. unpublished)
DEBUG 2015-04-09 14:16:43,770 [sync]:0 games updated
DEBUG 2015-04-09 14:16:43,770 [sync]:Syncing local steam games
DEBUG 2015-04-09 14:16:43,992 [jobs]:Async call: sync_icons
DEBUG 2015-04-09 14:16:43,993 [resources]:Fetching icons
Traceback (most recent call last):
File "/usr/lib64/python2.7/site-packages/lutris/installer.py", line 833, in on_installer_selected
self.launch_install(self.installer_choice)
File "/usr/lib64/python2.7/site-packages/lutris/installer.py", line 781, in launch_install
self.title_label.set_markup("<b>Installing {}</b>".format(game_name))
UnicodeEncodeError: 'ascii' codec can't encode character u'\u014d' in position 12: ordinal not in range(128)
|
UnicodeEncodeError
|
def db_update(db_path, table, updated_fields, row):
"""update `table` with the values given in the dict `values` on the
condition given with the tuple `row`
"""
field_names = "=?, ".join(updated_fields.keys()) + "=?"
field_values = _decode_utf8_values(updated_fields.values())
condition_field = "{0}=?".format(row[0])
condition_value = (row[1],)
with db_cursor(db_path) as cursor:
query = "UPDATE {0} SET {1} WHERE {2}".format(
table, field_names, condition_field
)
cursor.execute(query, field_values + condition_value)
|
def db_update(db_path, table, updated_fields, row):
"""update `table` with the values given in the dict `values` on the
condition given with the tuple `row`
"""
field_names = "=?, ".join(updated_fields.keys()) + "=?"
field_values = tuple(updated_fields.values())
condition_field = "{0}=?".format(row[0])
condition_value = (row[1],)
with db_cursor(db_path) as cursor:
query = "UPDATE {0} SET {1} WHERE {2}".format(
table, field_names, condition_field
)
cursor.execute(query, field_values + condition_value)
|
https://github.com/lutris/lutris/issues/178
|
$ lutris -d
DEBUG 2015-04-09 14:16:41,545 [lutris]:Welcome to Lutris
(lutris:11906): Gtk-WARNING **: Unknown property: GtkLabel.lines
(lutris:11906): Gtk-WARNING **: Unknown property: GtkLabel.lines
DEBUG 2015-04-09 14:16:41,678 [lutriswindow]:Getting game list
DEBUG 2015-04-09 14:16:41,886 [lutriswindow]:Switching view
DEBUG 2015-04-09 14:16:42,207 [lutriswindow]:Connecting signals
INFO 2015-04-09 14:16:42,334 [lutriswindow]:Connected as orbea
DEBUG 2015-04-09 14:16:42,336 [jobs]:Async call: sync_all
DEBUG 2015-04-09 14:16:42,336 [sync]:Syncing game library
DEBUG 2015-04-09 14:16:42,336 [jobs]:Async call: update_runtime
DEBUG 2015-04-09 14:16:42,336 [sync]:97 games in local library
DEBUG 2015-04-09 14:16:42,338 [api]:Fetching game library
DEBUG 2015-04-09 14:16:43,747 [sync]:97 games in remote library (inc. unpublished)
DEBUG 2015-04-09 14:16:43,770 [sync]:0 games updated
DEBUG 2015-04-09 14:16:43,770 [sync]:Syncing local steam games
DEBUG 2015-04-09 14:16:43,992 [jobs]:Async call: sync_icons
DEBUG 2015-04-09 14:16:43,993 [resources]:Fetching icons
Traceback (most recent call last):
File "/usr/lib64/python2.7/site-packages/lutris/installer.py", line 833, in on_installer_selected
self.launch_install(self.installer_choice)
File "/usr/lib64/python2.7/site-packages/lutris/installer.py", line 781, in launch_install
self.title_label.set_markup("<b>Installing {}</b>".format(game_name))
UnicodeEncodeError: 'ascii' codec can't encode character u'\u014d' in position 12: ordinal not in range(128)
|
UnicodeEncodeError
|
def slugify(value):
"""Remove special characters from a string and slugify it.
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
value = value.decode("UTF-8")
value = unicodedata.normalize("NFKD", value).encode("ascii", "ignore")
value = unicode(re.sub("[^\w\s-]", "", value).strip().lower())
return re.sub("[-\s]+", "-", value)
|
def slugify(value):
"""Remove special characters from a string and slugify it.
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
value = unicodedata.normalize("NFKD", unicode(value)).encode("ascii", "ignore")
value = unicode(re.sub("[^\w\s-]", "", value).strip().lower())
return re.sub("[-\s]+", "-", value)
|
https://github.com/lutris/lutris/issues/178
|
$ lutris -d
DEBUG 2015-04-09 14:16:41,545 [lutris]:Welcome to Lutris
(lutris:11906): Gtk-WARNING **: Unknown property: GtkLabel.lines
(lutris:11906): Gtk-WARNING **: Unknown property: GtkLabel.lines
DEBUG 2015-04-09 14:16:41,678 [lutriswindow]:Getting game list
DEBUG 2015-04-09 14:16:41,886 [lutriswindow]:Switching view
DEBUG 2015-04-09 14:16:42,207 [lutriswindow]:Connecting signals
INFO 2015-04-09 14:16:42,334 [lutriswindow]:Connected as orbea
DEBUG 2015-04-09 14:16:42,336 [jobs]:Async call: sync_all
DEBUG 2015-04-09 14:16:42,336 [sync]:Syncing game library
DEBUG 2015-04-09 14:16:42,336 [jobs]:Async call: update_runtime
DEBUG 2015-04-09 14:16:42,336 [sync]:97 games in local library
DEBUG 2015-04-09 14:16:42,338 [api]:Fetching game library
DEBUG 2015-04-09 14:16:43,747 [sync]:97 games in remote library (inc. unpublished)
DEBUG 2015-04-09 14:16:43,770 [sync]:0 games updated
DEBUG 2015-04-09 14:16:43,770 [sync]:Syncing local steam games
DEBUG 2015-04-09 14:16:43,992 [jobs]:Async call: sync_icons
DEBUG 2015-04-09 14:16:43,993 [resources]:Fetching icons
Traceback (most recent call last):
File "/usr/lib64/python2.7/site-packages/lutris/installer.py", line 833, in on_installer_selected
self.launch_install(self.installer_choice)
File "/usr/lib64/python2.7/site-packages/lutris/installer.py", line 781, in launch_install
self.title_label.set_markup("<b>Installing {}</b>".format(game_name))
UnicodeEncodeError: 'ascii' codec can't encode character u'\u014d' in position 12: ordinal not in range(128)
|
UnicodeEncodeError
|
def get_forecast_component_plotly_props(
m, fcst, name, uncertainty=True, plot_cap=False
):
"""Prepares a dictionary for plotting the selected forecast component with Plotly
Parameters
----------
m: Prophet model.
fcst: pd.DataFrame output of m.predict.
name: Name of the component to plot.
uncertainty: Optional boolean to plot uncertainty intervals, which will
only be done if m.uncertainty_samples > 0.
plot_cap: Optional boolean indicating if the capacity should be shown
in the figure, if available.
Returns
-------
A dictionary with Plotly traces, xaxis and yaxis
"""
prediction_color = "#0072B2"
error_color = "rgba(0, 114, 178, 0.2)" # '#0072B2' with 0.2 opacity
cap_color = "black"
zeroline_color = "#AAA"
line_width = 2
range_margin = (fcst["ds"].max() - fcst["ds"].min()) * 0.05
range_x = [fcst["ds"].min() - range_margin, fcst["ds"].max() + range_margin]
text = None
mode = "lines"
if name == "holidays":
# Combine holidays into one hover text
holidays = m.construct_holiday_dataframe(fcst["ds"])
holiday_features, _, _ = m.make_holiday_features(fcst["ds"], holidays)
holiday_features.columns = holiday_features.columns.str.replace(
"_delim_", "", regex=False
)
holiday_features.columns = holiday_features.columns.str.replace(
"+0", "", regex=False
)
text = pd.Series(data="", index=holiday_features.index)
for holiday_feature, idxs in holiday_features.iteritems():
text[idxs.astype(bool) & (text != "")] += (
"<br>" # Add newline if additional holiday
)
text[idxs.astype(bool)] += holiday_feature
traces = []
traces.append(
go.Scatter(
name=name,
x=fcst["ds"],
y=fcst[name],
mode=mode,
line=go.scatter.Line(color=prediction_color, width=line_width),
text=text,
)
)
if (
uncertainty
and m.uncertainty_samples
and (fcst[name + "_upper"] != fcst[name + "_lower"]).any()
):
if mode == "markers":
traces[0].update(
error_y=dict(
type="data",
symmetric=False,
array=fcst[name + "_upper"],
arrayminus=fcst[name + "_lower"],
width=0,
color=error_color,
)
)
else:
traces.append(
go.Scatter(
name=name + "_upper",
x=fcst["ds"],
y=fcst[name + "_upper"],
mode=mode,
line=go.scatter.Line(width=0, color=error_color),
)
)
traces.append(
go.Scatter(
name=name + "_lower",
x=fcst["ds"],
y=fcst[name + "_lower"],
mode=mode,
line=go.scatter.Line(width=0, color=error_color),
fillcolor=error_color,
fill="tonexty",
)
)
if "cap" in fcst and plot_cap:
traces.append(
go.Scatter(
name="Cap",
x=fcst["ds"],
y=fcst["cap"],
mode="lines",
line=go.scatter.Line(color=cap_color, dash="dash", width=line_width),
)
)
if m.logistic_floor and "floor" in fcst and plot_cap:
traces.append(
go.Scatter(
name="Floor",
x=fcst["ds"],
y=fcst["floor"],
mode="lines",
line=go.scatter.Line(color=cap_color, dash="dash", width=line_width),
)
)
xaxis = go.layout.XAxis(type="date", range=range_x)
yaxis = go.layout.YAxis(
rangemode="normal" if name == "trend" else "tozero",
title=go.layout.yaxis.Title(text=name),
zerolinecolor=zeroline_color,
)
if name in m.component_modes["multiplicative"]:
yaxis.update(tickformat="%", hoverformat=".2%")
return {"traces": traces, "xaxis": xaxis, "yaxis": yaxis}
|
def get_forecast_component_plotly_props(
m, fcst, name, uncertainty=True, plot_cap=False
):
"""Prepares a dictionary for plotting the selected forecast component with Plotly
Parameters
----------
m: Prophet model.
fcst: pd.DataFrame output of m.predict.
name: Name of the component to plot.
uncertainty: Optional boolean to plot uncertainty intervals, which will
only be done if m.uncertainty_samples > 0.
plot_cap: Optional boolean indicating if the capacity should be shown
in the figure, if available.
Returns
-------
A dictionary with Plotly traces, xaxis and yaxis
"""
prediction_color = "#0072B2"
error_color = "rgba(0, 114, 178, 0.2)" # '#0072B2' with 0.2 opacity
cap_color = "black"
zeroline_color = "#AAA"
line_width = 2
range_margin = (fcst["ds"].max() - fcst["ds"].min()) * 0.05
range_x = [fcst["ds"].min() - range_margin, fcst["ds"].max() + range_margin]
text = None
mode = "lines"
if name == "holidays":
fcst = fcst[fcst[name] != 0].copy()
mode = "markers"
# Combine holidays into one hover text
holiday_features, _, _ = m.make_holiday_features(fcst["ds"], m.holidays)
holiday_features.columns = holiday_features.columns.str.replace(
"_delim_", "", regex=False
)
holiday_features.columns = holiday_features.columns.str.replace(
"+0", "", regex=False
)
text = pd.Series(data="", index=holiday_features.index)
for holiday_feature, idxs in holiday_features.iteritems():
text[idxs.astype(bool) & (text != "")] += (
"<br>" # Add newline if additional holiday
)
text[idxs.astype(bool)] += holiday_feature
traces = []
traces.append(
go.Scatter(
name=name,
x=fcst["ds"],
y=fcst[name],
mode=mode,
line=go.scatter.Line(color=prediction_color, width=line_width),
text=text,
)
)
if (
uncertainty
and m.uncertainty_samples
and (fcst[name + "_upper"] != fcst[name + "_lower"]).any()
):
if mode == "markers":
traces[0].update(
error_y=dict(
type="data",
symmetric=False,
array=fcst[name + "_upper"],
arrayminus=fcst[name + "_lower"],
width=0,
color=error_color,
)
)
else:
traces.append(
go.Scatter(
name=name + "_upper",
x=fcst["ds"],
y=fcst[name + "_upper"],
mode=mode,
line=go.scatter.Line(width=0, color=error_color),
)
)
traces.append(
go.Scatter(
name=name + "_lower",
x=fcst["ds"],
y=fcst[name + "_lower"],
mode=mode,
line=go.scatter.Line(width=0, color=error_color),
fillcolor=error_color,
fill="tonexty",
)
)
if "cap" in fcst and plot_cap:
traces.append(
go.Scatter(
name="Cap",
x=fcst["ds"],
y=fcst["cap"],
mode="lines",
line=go.scatter.Line(color=cap_color, dash="dash", width=line_width),
)
)
if m.logistic_floor and "floor" in fcst and plot_cap:
traces.append(
go.Scatter(
name="Floor",
x=fcst["ds"],
y=fcst["floor"],
mode="lines",
line=go.scatter.Line(color=cap_color, dash="dash", width=line_width),
)
)
xaxis = go.layout.XAxis(type="date", range=range_x)
yaxis = go.layout.YAxis(
rangemode="normal" if name == "trend" else "tozero",
title=go.layout.yaxis.Title(text=name),
zerolinecolor=zeroline_color,
)
if name in m.component_modes["multiplicative"]:
yaxis.update(tickformat="%", hoverformat=".2%")
return {"traces": traces, "xaxis": xaxis, "yaxis": yaxis}
|
https://github.com/facebook/prophet/issues/1525
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-50-fce739a52069> in <module>
----> 1 fig = fbprophet.plot.plot_components_plotly(m, forecast)
2 fig.update_layout(layout)
3 fig.show()
~\AppData\Local\Continuum\anaconda3\envs\forecast\lib\site-packages\fbprophet\plot.py in plot_components_plotly(m, fcst, uncertainty, plot_cap, figsize)
713 if m.train_holiday_names is not None and 'holidays' in fcst:
714 components['holidays'] = get_forecast_component_plotly_props(
--> 715 m, fcst, 'holidays', uncertainty)
716
717 regressors = {'additive': False, 'multiplicative': False}
~\AppData\Local\Continuum\anaconda3\envs\forecast\lib\site-packages\fbprophet\plot.py in get_forecast_component_plotly_props(m, fcst, name, uncertainty, plot_cap)
837 mode = 'markers'
838 # Combine holidays into one hover text
--> 839 holiday_features, _, _ = m.make_holiday_features(fcst['ds'], m.holidays)
840 holiday_features.columns = holiday_features.columns.str.replace('_delim_', '', regex=False)
841 holiday_features.columns = holiday_features.columns.str.replace('+0', '', regex=False)
~\AppData\Local\Continuum\anaconda3\envs\forecast\lib\site-packages\fbprophet\forecaster.py in make_holiday_features(self, dates, holidays)
532 row_index = pd.DatetimeIndex(dates.apply(lambda x: x.date()))
533
--> 534 for _ix, row in holidays.iterrows():
535 dt = row.ds.date()
536 try:
AttributeError: 'NoneType' object has no attribute 'iterrows'
|
AttributeError
|
def setup_dataframe(self, df, initialize_scales=False):
"""Prepare dataframe for fitting or predicting.
Adds a time index and scales y. Creates auxiliary columns 't', 't_ix',
'y_scaled', and 'cap_scaled'. These columns are used during both
fitting and predicting.
Parameters
----------
df: pd.DataFrame with columns ds, y, and cap if logistic growth. Any
specified additional regressors must also be present.
initialize_scales: Boolean set scaling factors in self from df.
Returns
-------
pd.DataFrame prepared for fitting or predicting.
"""
if "y" in df:
df["y"] = pd.to_numeric(df["y"])
if np.isinf(df["y"].values).any():
raise ValueError("Found infinity in column y.")
if df["ds"].dtype == np.int64:
df["ds"] = df["ds"].astype(str)
df["ds"] = pd.to_datetime(df["ds"])
if df["ds"].isnull().any():
raise ValueError("Found NaN in column ds.")
for name in self.extra_regressors:
if name not in df:
raise ValueError('Regressor "{}" missing from dataframe'.format(name))
df[name] = pd.to_numeric(df[name])
if df[name].isnull().any():
raise ValueError("Found NaN in column " + name)
for props in self.seasonalities.values():
condition_name = props["condition_name"]
if condition_name is not None:
if condition_name not in df:
raise ValueError(
'Condition "{}" missing from dataframe'.format(condition_name)
)
if not df[condition_name].isin([True, False]).all():
raise ValueError("Found non-boolean in column " + condition_name)
df[condition_name] = df[condition_name].astype("bool")
df = df.sort_values("ds")
df.reset_index(inplace=True, drop=True)
self.initialize_scales(initialize_scales, df)
if self.logistic_floor:
if "floor" not in df:
raise ValueError("Expected column 'floor'.")
else:
df["floor"] = 0
if self.growth == "logistic":
if "cap" not in df:
raise ValueError(
"Capacities must be supplied for logistic growth in column 'cap'"
)
if (df["cap"] <= df["floor"]).any():
raise ValueError("cap must be greater than floor (which defaults to 0).")
df["cap_scaled"] = (df["cap"] - df["floor"]) / self.y_scale
df["t"] = (df["ds"] - self.start) / self.t_scale
if "y" in df:
df["y_scaled"] = (df["y"] - df["floor"]) / self.y_scale
for name, props in self.extra_regressors.items():
df[name] = (df[name] - props["mu"]) / props["std"]
return df
|
def setup_dataframe(self, df, initialize_scales=False):
"""Prepare dataframe for fitting or predicting.
Adds a time index and scales y. Creates auxiliary columns 't', 't_ix',
'y_scaled', and 'cap_scaled'. These columns are used during both
fitting and predicting.
Parameters
----------
df: pd.DataFrame with columns ds, y, and cap if logistic growth. Any
specified additional regressors must also be present.
initialize_scales: Boolean set scaling factors in self from df.
Returns
-------
pd.DataFrame prepared for fitting or predicting.
"""
if "y" in df:
df["y"] = pd.to_numeric(df["y"])
if np.isinf(df["y"].values).any():
raise ValueError("Found infinity in column y.")
if df["ds"].dtype == np.int64:
df["ds"] = df["ds"].astype(str)
df["ds"] = pd.to_datetime(df["ds"])
if df["ds"].isnull().any():
raise ValueError("Found NaN in column ds.")
for name in self.extra_regressors:
if name not in df:
raise ValueError('Regressor "{}" missing from dataframe'.format(name))
df[name] = pd.to_numeric(df[name])
if df[name].isnull().any():
raise ValueError("Found NaN in column " + name)
for props in self.seasonalities.values():
condition_name = props["condition_name"]
if condition_name is not None:
if condition_name not in df:
raise ValueError(
'Condition "{}" missing from dataframe'.format(condition_name)
)
if not df[condition_name].isin([True, False]).all():
raise ValueError("Found non-boolean in column " + condition_name)
df[condition_name] = df[condition_name].astype("bool")
df = df.sort_values("ds")
df.reset_index(inplace=True, drop=True)
self.initialize_scales(initialize_scales, df)
if self.logistic_floor:
if "floor" not in df:
raise ValueError("Expected column 'floor'.")
else:
df["floor"] = 0
if self.growth == "logistic":
if "cap" not in df:
raise ValueError(
"Capacities must be supplied for logistic growth in column 'cap'"
)
df["cap_scaled"] = (df["cap"] - df["floor"]) / self.y_scale
df["t"] = (df["ds"] - self.start) / self.t_scale
if "y" in df:
df["y_scaled"] = (df["y"] - df["floor"]) / self.y_scale
for name, props in self.extra_regressors.items():
df[name] = (df[name] - props["mu"]) / props["std"]
return df
|
https://github.com/facebook/prophet/issues/807
|
Rejecting initial value:
Error evaluating the log probability at the initial value.
Exception: normal_lpdf: Random variable is -nan(ind), but must not be nan! (in 'unknown file name' at line 149)
Rejecting initial value:
Error evaluating the log probability at the initial value.
Exception: normal_lpdf: Random variable is -nan(ind), but must not be nan! (in 'unknown file name' at line 149)
Rejecting initial value:
Error evaluating the log probability at the initial value.
Exception: normal_lpdf: Random variable is -nan(ind), but must not be nan! (in 'unknown file name' at line 149)
Rejecting initial value:
Error evaluating the log probability at the initial value.
Exception: normal_lpdf: Random variable is -nan(ind), but must not be nan! (in 'unknown file name' at line 149)
(repeats many times)
Traceback (most recent call last):
File "C:\Users\pier_lim\AppData\Local\Continuum\anaconda3\envs\ana\lib\site-packages\fbprophet\forecaster.py", line 1003, in fit
dat, init=stan_init, iter=1e4, **kwargs)
File "C:\Users\pier_lim\AppData\Local\Continuum\anaconda3\envs\ana\lib\site-packages\pystan\model.py", line 510, in optimizing
ret, sample = fit._call_sampler(stan_args)
File "stanfit4anon_model_db8febf10f23d7fa27a62fbcfd7f17b2_8347890744695566330.pyx", line 876, in stanfit4anon_model_db8febf10f23d7fa27a62fbcfd7f17b2_8347890744695566330.StanFit4Model._call_sampler
File "stanfit4anon_model_db8febf10f23d7fa27a62fbcfd7f17b2_8347890744695566330.pyx", line 401, in stanfit4anon_model_db8febf10f23d7fa27a62fbcfd7f17b2_8347890744695566330._call_sampler
RuntimeError: Initialization failed.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<input>", line 24, in <module>
File "C:\Users\pier_lim\AppData\Local\Continuum\anaconda3\envs\ana\lib\site-packages\fbprophet\forecaster.py", line 1007, in fit
**kwargs
File "C:\Users\pier_lim\AppData\Local\Continuum\anaconda3\envs\ana\lib\site-packages\pystan\model.py", line 510, in optimizing
ret, sample = fit._call_sampler(stan_args)
File "stanfit4anon_model_db8febf10f23d7fa27a62fbcfd7f17b2_8347890744695566330.pyx", line 876, in stanfit4anon_model_db8febf10f23d7fa27a62fbcfd7f17b2_8347890744695566330.StanFit4Model._call_sampler
File "stanfit4anon_model_db8febf10f23d7fa27a62fbcfd7f17b2_8347890744695566330.pyx", line 401, in stanfit4anon_model_db8febf10f23d7fa27a62fbcfd7f17b2_8347890744695566330._call_sampler
RuntimeError: Initialization failed.
|
RuntimeError
|
def _check_for_async(self):
if self.config.check_for_async:
in_async = False
if sys.version_info >= (3, 7, 0):
try:
asyncio.get_running_loop()
in_async = True
except RuntimeError:
pass
else: # pragma: no cover # not able to be covered in > Python 3.6.12
in_async = asyncio.get_event_loop().is_running()
if in_async:
logger.warning(
"It appears that you are using PRAW in an asynchronous"
" environment.\nIt is strongly recommended to use Async PRAW:"
" https://asyncpraw.readthedocs.io.\nSee"
" https://praw.readthedocs.io/en/latest/getting_started/multiple_instances.html#discord-bots-and-asynchronous-environments"
" for more info.\n",
)
|
def _check_for_async(self):
if self.config.check_for_async:
try:
asyncio.get_running_loop()
except RuntimeError:
pass
else:
logger.warning(
"It appears that you are using PRAW in an asynchronous"
" environment.\nIt is strongly recommended to use Async PRAW:"
" https://asyncpraw.readthedocs.io.\nSee"
" https://praw.readthedocs.io/en/latest/getting_started/multiple_instances.html#discord-bots-and-asynchronous-environments"
" for more info.\n",
)
|
https://github.com/praw-dev/praw/issues/1636
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/../venv/lib64/python3.6/site-packages/praw/models/reddit/base.py", line 34, in __getattr__
self._fetch()
File "/../venv/lib64/python3.6/site-packages/praw/models/reddit/redditor.py", line 170, in _fetch
data = self._fetch_data()
File "/../venv/lib64/python3.6/site-packages/praw/models/reddit/redditor.py", line 167, in _fetch_data
return self._reddit.request("GET", path, params)
File "/../venv/lib64/python3.6/site-packages/praw/reddit.py", line 793, in request
self._check_for_async()
File "/../venv/lib64/python3.6/site-packages/praw/reddit.py", line 371, in _check_for_async
asyncio.get_running_loop()
AttributeError: module 'asyncio' has no attribute 'get_running_loop'
|
AttributeError
|
def __init__(
self,
reddit: "Reddit",
_data: Optional[Dict[str, Any]],
_extra_attribute_to_check: Optional[str] = None,
_fetched: bool = False,
_str_field: bool = True,
):
"""Initialize a RedditBase instance (or a subclass).
:param reddit: An instance of :class:`~.Reddit`.
"""
super().__init__(reddit, _data=_data)
self._fetched = _fetched
if _str_field and self.STR_FIELD not in self.__dict__:
if (
_extra_attribute_to_check is not None
and _extra_attribute_to_check in self.__dict__
):
return
raise ValueError(
f"An invalid value was specified for {self.STR_FIELD}. Check that the "
f"argument for the {self.STR_FIELD} parameter is not empty."
)
|
def __init__(self, reddit: "Reddit", _data: Optional[Dict[str, Any]]):
"""Initialize a RedditBase instance (or a subclass).
:param reddit: An instance of :class:`~.Reddit`.
"""
super().__init__(reddit, _data=_data)
self._fetched = False
|
https://github.com/praw-dev/praw/issues/1552
|
Traceback (most recent call last):
File "C:\00. Datos\90. Descargas\claimdoneremover-master (1)\claimdoneremover-master\cdremover\main.py", line 49, in <module>
for comment in reddit.redditor(user).comments.new(limit=limit):
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\util\cache.py", line 39, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\listing\mixins\redditor.py", line 47, in comments
return SubListing(self._reddit, self._path, "comments")
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 122, in _path
return API_PATH["user"].format(user=self)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 61, in __str__
return getattr(self, self.STR_FIELD)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
...
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 161, in _fetch_info
if hasattr(self, "_fullname"):
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def __init__(
self,
reddit: "Reddit",
_data: Dict[str, Any] = None,
collection_id: Optional[str] = None,
permalink: Optional[str] = None,
):
"""Initialize this collection.
:param reddit: An instance of :class:`.Reddit`.
:param _data: Any data associated with the Collection (optional).
:param collection_id: The ID of the Collection (optional).
:param permalink: The permalink of the Collection (optional).
"""
if (_data, collection_id, permalink).count(None) != 2:
raise TypeError(
"Exactly one of _data, collection_id, or permalink must be provided."
)
if permalink:
collection_id = self._url_parts(permalink)[4]
if collection_id:
self.collection_id = collection_id # set from _data otherwise
super().__init__(reddit, _data)
self._info_params = {
"collection_id": self.collection_id,
"include_links": True,
}
|
def __init__(
self,
reddit: "Reddit",
_data: Dict[str, Any] = None,
collection_id: Optional[str] = None,
permalink: Optional[str] = None,
):
"""Initialize this collection.
:param reddit: An instance of :class:`.Reddit`.
:param _data: Any data associated with the Collection (optional).
:param collection_id: The ID of the Collection (optional).
:param permalink: The permalink of the Collection (optional).
"""
super().__init__(reddit, _data)
if (_data, collection_id, permalink).count(None) != 2:
raise TypeError(
"Exactly one of _data, collection_id, or permalink must be provided."
)
if permalink is not None:
collection_id = self._url_parts(permalink)[4]
if collection_id is not None:
self.collection_id = collection_id # set from _data otherwise
self._info_params = {
"collection_id": self.collection_id,
"include_links": True,
}
|
https://github.com/praw-dev/praw/issues/1552
|
Traceback (most recent call last):
File "C:\00. Datos\90. Descargas\claimdoneremover-master (1)\claimdoneremover-master\cdremover\main.py", line 49, in <module>
for comment in reddit.redditor(user).comments.new(limit=limit):
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\util\cache.py", line 39, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\listing\mixins\redditor.py", line 47, in comments
return SubListing(self._reddit, self._path, "comments")
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 122, in _path
return API_PATH["user"].format(user=self)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 61, in __str__
return getattr(self, self.STR_FIELD)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
...
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 161, in _fetch_info
if hasattr(self, "_fullname"):
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def __init__(
self,
reddit: "Reddit",
id: Optional[str] = None, # pylint: disable=redefined-builtin
url: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Construct an instance of the Comment object."""
if (id, url, _data).count(None) != 2:
raise TypeError("Exactly one of `id`, `url`, or `_data` must be provided.")
fetched = False
self._replies = []
self._submission = None
if id:
self.id = id
elif url:
self.id = self.id_from_url(url)
else:
fetched = True
super().__init__(reddit, _data=_data, _fetched=fetched)
|
def __init__(
self,
reddit: "Reddit",
id: Optional[str] = None, # pylint: disable=redefined-builtin
url: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Construct an instance of the Comment object."""
if (id, url, _data).count(None) != 2:
raise TypeError("Exactly one of `id`, `url`, or `_data` must be provided.")
self._replies = []
self._submission = None
super().__init__(reddit, _data=_data)
if id:
self.id = id
elif url:
self.id = self.id_from_url(url)
else:
self._fetched = True
|
https://github.com/praw-dev/praw/issues/1552
|
Traceback (most recent call last):
File "C:\00. Datos\90. Descargas\claimdoneremover-master (1)\claimdoneremover-master\cdremover\main.py", line 49, in <module>
for comment in reddit.redditor(user).comments.new(limit=limit):
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\util\cache.py", line 39, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\listing\mixins\redditor.py", line 47, in comments
return SubListing(self._reddit, self._path, "comments")
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 122, in _path
return API_PATH["user"].format(user=self)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 61, in __str__
return getattr(self, self.STR_FIELD)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
...
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 161, in _fetch_info
if hasattr(self, "_fullname"):
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def __init__(
self,
reddit: "Reddit",
id: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None, # pylint: disable=redefined-builtin
):
"""Initialize a lazy :class:`.LiveThread` instance.
:param reddit: An instance of :class:`.Reddit`.
:param id: A live thread ID, e.g., ``"ukaeu1ik4sw5"``
"""
if (id, _data).count(None) != 1:
raise TypeError("Either `id` or `_data` must be provided.")
if id:
self.id = id
super().__init__(reddit, _data=_data)
|
def __init__(
self,
reddit: "Reddit",
id: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None, # pylint: disable=redefined-builtin
):
"""Initialize a lazy :class:`.LiveThread` instance.
:param reddit: An instance of :class:`.Reddit`.
:param id: A live thread ID, e.g., ``"ukaeu1ik4sw5"``
"""
if bool(id) == bool(_data):
raise TypeError("Either `id` or `_data` must be provided.")
super().__init__(reddit, _data=_data)
if id:
self.id = id
|
https://github.com/praw-dev/praw/issues/1552
|
Traceback (most recent call last):
File "C:\00. Datos\90. Descargas\claimdoneremover-master (1)\claimdoneremover-master\cdremover\main.py", line 49, in <module>
for comment in reddit.redditor(user).comments.new(limit=limit):
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\util\cache.py", line 39, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\listing\mixins\redditor.py", line 47, in comments
return SubListing(self._reddit, self._path, "comments")
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 122, in _path
return API_PATH["user"].format(user=self)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 61, in __str__
return getattr(self, self.STR_FIELD)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
...
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 161, in _fetch_info
if hasattr(self, "_fullname"):
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def __init__(
self,
reddit: "Reddit",
thread_id: Optional[str] = None,
update_id: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Initialize a lazy :class:`.LiveUpdate` instance.
Either ``thread_id`` and ``update_id``, or ``_data`` must be provided.
:param reddit: An instance of :class:`.Reddit`.
:param thread_id: A live thread ID, e.g., ``"ukaeu1ik4sw5"``.
:param update_id: A live update ID, e.g.,
``"7827987a-c998-11e4-a0b9-22000b6a88d2"``.
Usage:
.. code-block:: python
update = LiveUpdate(reddit, "ukaeu1ik4sw5", "7827987a-c998-11e4-a0b9-22000b6a88d2")
update.thread # LiveThread(id="ukaeu1ik4sw5")
update.id # "7827987a-c998-11e4-a0b9-22000b6a88d2"
update.author # "umbrae"
"""
if _data is not None:
# Since _data (part of JSON returned from reddit) have no thread ID,
# self._thread must be set by the caller of LiveUpdate(). See the code of
# LiveThread.updates() for example.
super().__init__(reddit, _data=_data, _fetched=True)
elif thread_id and update_id:
self.id = update_id
super().__init__(reddit, _data=None)
self._thread = LiveThread(self._reddit, thread_id)
else:
raise TypeError(
"Either `thread_id` and `update_id`, or `_data` must be provided."
)
|
def __init__(
self,
reddit: "Reddit",
thread_id: Optional[str] = None,
update_id: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Initialize a lazy :class:`.LiveUpdate` instance.
Either ``thread_id`` and ``update_id``, or ``_data`` must be provided.
:param reddit: An instance of :class:`.Reddit`.
:param thread_id: A live thread ID, e.g., ``"ukaeu1ik4sw5"``.
:param update_id: A live update ID, e.g.,
``"7827987a-c998-11e4-a0b9-22000b6a88d2"``.
Usage:
.. code-block:: python
update = LiveUpdate(reddit, "ukaeu1ik4sw5", "7827987a-c998-11e4-a0b9-22000b6a88d2")
update.thread # LiveThread(id="ukaeu1ik4sw5")
update.id # "7827987a-c998-11e4-a0b9-22000b6a88d2"
update.author # "umbrae"
"""
if _data is not None:
# Since _data (part of JSON returned from reddit) have no thread ID,
# self._thread must be set by the caller of LiveUpdate(). See the code of
# LiveThread.updates() for example.
super().__init__(reddit, _data=_data)
self._fetched = True
elif thread_id and update_id:
super().__init__(reddit, _data=None)
self._thread = LiveThread(self._reddit, thread_id)
self.id = update_id
else:
raise TypeError(
"Either `thread_id` and `update_id`, or `_data` must be provided."
)
|
https://github.com/praw-dev/praw/issues/1552
|
Traceback (most recent call last):
File "C:\00. Datos\90. Descargas\claimdoneremover-master (1)\claimdoneremover-master\cdremover\main.py", line 49, in <module>
for comment in reddit.redditor(user).comments.new(limit=limit):
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\util\cache.py", line 39, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\listing\mixins\redditor.py", line 47, in comments
return SubListing(self._reddit, self._path, "comments")
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 122, in _path
return API_PATH["user"].format(user=self)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 61, in __str__
return getattr(self, self.STR_FIELD)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
...
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 161, in _fetch_info
if hasattr(self, "_fullname"):
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def __init__(self, reddit: "Reddit", _data: Dict[str, Any]):
"""Construct an instance of the Message object."""
super().__init__(reddit, _data=_data, _fetched=True)
|
def __init__(self, reddit: "Reddit", _data: Dict[str, Any]):
"""Construct an instance of the Message object."""
super().__init__(reddit, _data=_data)
self._fetched = True
|
https://github.com/praw-dev/praw/issues/1552
|
Traceback (most recent call last):
File "C:\00. Datos\90. Descargas\claimdoneremover-master (1)\claimdoneremover-master\cdremover\main.py", line 49, in <module>
for comment in reddit.redditor(user).comments.new(limit=limit):
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\util\cache.py", line 39, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\listing\mixins\redditor.py", line 47, in comments
return SubListing(self._reddit, self._path, "comments")
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 122, in _path
return API_PATH["user"].format(user=self)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 61, in __str__
return getattr(self, self.STR_FIELD)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
...
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 161, in _fetch_info
if hasattr(self, "_fullname"):
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def __init__(
self,
reddit: "Reddit",
id: Optional[str] = None, # pylint: disable=redefined-builtin
mark_read: bool = False,
_data: Optional[Dict[str, Any]] = None,
):
"""Construct an instance of the ModmailConversation object.
:param mark_read: If True, conversation is marked as read (default: False).
"""
if bool(id) == bool(_data):
raise TypeError("Either `id` or `_data` must be provided.")
if id:
self.id = id
super().__init__(reddit, _data=_data)
self._info_params = {"markRead": True} if mark_read else None
|
def __init__(
self,
reddit: "Reddit",
id: Optional[str] = None, # pylint: disable=redefined-builtin
mark_read: bool = False,
_data: Optional[Dict[str, Any]] = None,
):
"""Construct an instance of the ModmailConversation object.
:param mark_read: If True, conversation is marked as read (default: False).
"""
super().__init__(reddit, _data=_data)
if bool(id) == bool(_data):
raise TypeError("Either `id` or `_data` must be provided.")
if id:
self.id = id
self._info_params = {"markRead": True} if mark_read else None
|
https://github.com/praw-dev/praw/issues/1552
|
Traceback (most recent call last):
File "C:\00. Datos\90. Descargas\claimdoneremover-master (1)\claimdoneremover-master\cdremover\main.py", line 49, in <module>
for comment in reddit.redditor(user).comments.new(limit=limit):
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\util\cache.py", line 39, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\listing\mixins\redditor.py", line 47, in comments
return SubListing(self._reddit, self._path, "comments")
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 122, in _path
return API_PATH["user"].format(user=self)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 61, in __str__
return getattr(self, self.STR_FIELD)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
...
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 161, in _fetch_info
if hasattr(self, "_fullname"):
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def __init__(
self,
reddit: "Reddit",
name: Optional[str] = None,
fullname: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Initialize a Redditor instance.
:param reddit: An instance of :class:`~.Reddit`.
:param name: The name of the redditor.
:param fullname: The fullname of the redditor, starting with ``t2_``.
Exactly one of ``name``, ``fullname`` or ``_data`` must be provided.
"""
if (name, fullname, _data).count(None) != 2:
raise TypeError(
"Exactly one of `name`, `fullname`, or `_data` must be provided."
)
if _data:
assert isinstance(_data, dict) and "name" in _data, (
"Please file a bug with PRAW"
)
self._listing_use_sort = True
if name:
self.name = name
elif fullname:
self._fullname = fullname
super().__init__(reddit, _data=_data, _extra_attribute_to_check="_fullname")
|
def __init__(
self,
reddit: "Reddit",
name: Optional[str] = None,
fullname: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Initialize a Redditor instance.
:param reddit: An instance of :class:`~.Reddit`.
:param name: The name of the redditor.
:param fullname: The fullname of the redditor, starting with ``t2_``.
Exactly one of ``name``, ``fullname`` or ``_data`` must be provided.
"""
if (name, fullname, _data).count(None) != 2:
raise TypeError(
"Exactly one of `name`, `fullname`, or `_data` must be provided."
)
if _data:
assert isinstance(_data, dict) and "name" in _data, (
"Please file a bug with PRAW"
)
super().__init__(reddit, _data=_data)
self._listing_use_sort = True
if name:
self.name = name
elif fullname:
self._fullname = fullname
|
https://github.com/praw-dev/praw/issues/1552
|
Traceback (most recent call last):
File "C:\00. Datos\90. Descargas\claimdoneremover-master (1)\claimdoneremover-master\cdremover\main.py", line 49, in <module>
for comment in reddit.redditor(user).comments.new(limit=limit):
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\util\cache.py", line 39, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\listing\mixins\redditor.py", line 47, in comments
return SubListing(self._reddit, self._path, "comments")
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 122, in _path
return API_PATH["user"].format(user=self)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 61, in __str__
return getattr(self, self.STR_FIELD)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
...
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 161, in _fetch_info
if hasattr(self, "_fullname"):
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def __init__(
self,
reddit: "Reddit",
subreddit: "Subreddit",
id: Optional[str] = None, # pylint: disable=redefined-builtin
reason_id: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Construct an instance of the Removal Reason object.
:param reddit: An instance of :class:`.Reddit`.
:param subreddit: An instance of :class:`.Subreddit`.
:param id: The id of the removal reason.
:param reason_id: (Deprecated) The original name of the ``id`` parameter. Used
for backwards compatibility. This parameter should not be used.
"""
id = self._warn_reason_id(reason_id, id)
if (id, _data).count(None) != 1:
raise ValueError("Either id or _data needs to be given.")
if id:
self.id = id
self.subreddit = subreddit
super().__init__(reddit, _data=_data)
|
def __init__(
self,
reddit: "Reddit",
subreddit: "Subreddit",
id: Optional[str] = None, # pylint: disable=redefined-builtin
reason_id: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Construct an instance of the Removal Reason object.
:param reddit: An instance of :class:`.Reddit`.
:param subreddit: An instance of :class:`.Subreddit`.
:param id: The id of the removal reason.
:param reason_id: (Deprecated) The original name of the ``id`` parameter. Used
for backwards compatibility. This parameter should not be used.
"""
id = self._warn_reason_id(reason_id, id)
if (id, _data).count(None) != 1:
raise ValueError("Either id or _data needs to be given.")
self.id = id
self.subreddit = subreddit
super().__init__(reddit, _data=_data)
|
https://github.com/praw-dev/praw/issues/1552
|
Traceback (most recent call last):
File "C:\00. Datos\90. Descargas\claimdoneremover-master (1)\claimdoneremover-master\cdremover\main.py", line 49, in <module>
for comment in reddit.redditor(user).comments.new(limit=limit):
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\util\cache.py", line 39, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\listing\mixins\redditor.py", line 47, in comments
return SubListing(self._reddit, self._path, "comments")
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 122, in _path
return API_PATH["user"].format(user=self)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 61, in __str__
return getattr(self, self.STR_FIELD)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
...
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 161, in _fetch_info
if hasattr(self, "_fullname"):
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def __init__(
self,
reddit: "Reddit",
subreddit: Optional["Subreddit"] = None,
short_name: Optional[str] = None,
_data: Optional[Dict[str, str]] = None,
):
"""Construct an instance of the Rule object."""
if (short_name, _data).count(None) != 1:
raise ValueError("Either short_name or _data needs to be given.")
if short_name:
self.short_name = short_name
# Note: The subreddit parameter can be None, because the objector does not know
# this info. In that case, it is the responsibility of the caller to set the
# `subreddit` property on the returned value.
self.subreddit = subreddit
super().__init__(reddit, _data=_data)
|
def __init__(
self,
reddit: "Reddit",
subreddit: Optional["Subreddit"] = None,
short_name: Optional[str] = None,
_data: Optional[Dict[str, str]] = None,
):
"""Construct an instance of the Rule object."""
if (short_name, _data).count(None) != 1:
raise ValueError("Either short_name or _data needs to be given.")
self.short_name = short_name
# Note: The subreddit parameter can be None, because the objector does not know
# this info. In that case, it is the responsibility of the caller to set the
# `subreddit` property on the returned value.
self.subreddit = subreddit
super().__init__(reddit, _data=_data)
|
https://github.com/praw-dev/praw/issues/1552
|
Traceback (most recent call last):
File "C:\00. Datos\90. Descargas\claimdoneremover-master (1)\claimdoneremover-master\cdremover\main.py", line 49, in <module>
for comment in reddit.redditor(user).comments.new(limit=limit):
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\util\cache.py", line 39, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\listing\mixins\redditor.py", line 47, in comments
return SubListing(self._reddit, self._path, "comments")
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 122, in _path
return API_PATH["user"].format(user=self)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 61, in __str__
return getattr(self, self.STR_FIELD)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
...
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 161, in _fetch_info
if hasattr(self, "_fullname"):
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def __init__(
self,
reddit: "Reddit",
id: Optional[str] = None, # pylint: disable=redefined-builtin
url: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Initialize a Submission instance.
:param reddit: An instance of :class:`~.Reddit`.
:param id: A reddit base36 submission ID, e.g., ``2gmzqe``.
:param url: A URL supported by :meth:`~praw.models.Submission.id_from_url`.
Either ``id`` or ``url`` can be provided, but not both.
"""
if (id, url, _data).count(None) != 2:
raise TypeError("Exactly one of `id`, `url`, or `_data` must be provided.")
self.comment_limit = 2048
# Specify the sort order for ``comments``
self.comment_sort = "confidence"
if id:
self.id = id
elif url:
self.id = self.id_from_url(url)
super().__init__(reddit, _data=_data)
self._comments_by_id = {}
|
def __init__(
self,
reddit: "Reddit",
id: Optional[str] = None, # pylint: disable=redefined-builtin
url: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Initialize a Submission instance.
:param reddit: An instance of :class:`~.Reddit`.
:param id: A reddit base36 submission ID, e.g., ``2gmzqe``.
:param url: A URL supported by :meth:`~praw.models.Submission.id_from_url`.
Either ``id`` or ``url`` can be provided, but not both.
"""
if (id, url, _data).count(None) != 2:
raise TypeError("Exactly one of `id`, `url`, or `_data` must be provided.")
super().__init__(reddit, _data=_data)
self.comment_limit = 2048
# Specify the sort order for ``comments``
self.comment_sort = "confidence"
if id is not None:
self.id = id
elif url is not None:
self.id = self.id_from_url(url)
self._comments_by_id = {}
|
https://github.com/praw-dev/praw/issues/1552
|
Traceback (most recent call last):
File "C:\00. Datos\90. Descargas\claimdoneremover-master (1)\claimdoneremover-master\cdremover\main.py", line 49, in <module>
for comment in reddit.redditor(user).comments.new(limit=limit):
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\util\cache.py", line 39, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\listing\mixins\redditor.py", line 47, in comments
return SubListing(self._reddit, self._path, "comments")
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 122, in _path
return API_PATH["user"].format(user=self)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 61, in __str__
return getattr(self, self.STR_FIELD)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
...
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 161, in _fetch_info
if hasattr(self, "_fullname"):
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def __init__(self, reddit, display_name=None, _data=None):
"""Initialize a Subreddit instance.
:param reddit: An instance of :class:`~.Reddit`.
:param display_name: The name of the subreddit.
.. note::
This class should not be initialized directly. Instead obtain an instance
via: ``reddit.subreddit("subreddit_name")``
"""
if (display_name, _data).count(None) != 1:
raise TypeError("Either `display_name` or `_data` must be provided.")
if display_name:
self.display_name = display_name
super().__init__(reddit, _data=_data)
self._path = API_PATH["subreddit"].format(subreddit=self)
|
def __init__(self, reddit, display_name=None, _data=None):
"""Initialize a Subreddit instance.
:param reddit: An instance of :class:`~.Reddit`.
:param display_name: The name of the subreddit.
.. note::
This class should not be initialized directly. Instead obtain an instance
via: ``reddit.subreddit("subreddit_name")``
"""
if bool(display_name) == bool(_data):
raise TypeError("Either `display_name` or `_data` must be provided.")
super().__init__(reddit, _data=_data)
if display_name:
self.display_name = display_name
self._path = API_PATH["subreddit"].format(subreddit=self)
|
https://github.com/praw-dev/praw/issues/1552
|
Traceback (most recent call last):
File "C:\00. Datos\90. Descargas\claimdoneremover-master (1)\claimdoneremover-master\cdremover\main.py", line 49, in <module>
for comment in reddit.redditor(user).comments.new(limit=limit):
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\util\cache.py", line 39, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\listing\mixins\redditor.py", line 47, in comments
return SubListing(self._reddit, self._path, "comments")
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 122, in _path
return API_PATH["user"].format(user=self)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 61, in __str__
return getattr(self, self.STR_FIELD)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
...
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 161, in _fetch_info
if hasattr(self, "_fullname"):
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def __init__(
self,
reddit: "Reddit",
subreddit: "Subreddit",
name: str,
revision: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Construct an instance of the WikiPage object.
:param revision: A specific revision ID to fetch. By default, fetches the most
recent revision.
"""
self.name = name
self._revision = revision
self.subreddit = subreddit
super().__init__(reddit, _data=_data, _str_field=False)
|
def __init__(
self,
reddit: "Reddit",
subreddit: "Subreddit",
name: str,
revision: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Construct an instance of the WikiPage object.
:param revision: A specific revision ID to fetch. By default, fetches the most
recent revision.
"""
self.name = name
self._revision = revision
self.subreddit = subreddit
super().__init__(reddit, _data=_data)
|
https://github.com/praw-dev/praw/issues/1552
|
Traceback (most recent call last):
File "C:\00. Datos\90. Descargas\claimdoneremover-master (1)\claimdoneremover-master\cdremover\main.py", line 49, in <module>
for comment in reddit.redditor(user).comments.new(limit=limit):
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\util\cache.py", line 39, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\listing\mixins\redditor.py", line 47, in comments
return SubListing(self._reddit, self._path, "comments")
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 122, in _path
return API_PATH["user"].format(user=self)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 61, in __str__
return getattr(self, self.STR_FIELD)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
...
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 163, in _fetch_info
return ("user_about", {"user": self.name}, None)
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\base.py", line 34, in __getattr__
self._fetch()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 171, in _fetch
data = self._fetch_data()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 166, in _fetch_data
name, fields, params = self._fetch_info()
File "C:\Users\Andres\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\redditor.py", line 161, in _fetch_info
if hasattr(self, "_fullname"):
RecursionError: maximum recursion depth exceeded while calling a Python object
|
RecursionError
|
def id_from_url(url: str) -> str:
"""Return the ID contained within a submission URL.
:param url: A url to a submission in one of the following formats (http urls
will also work):
* https://redd.it/2gmzqe
* https://reddit.com/comments/2gmzqe/
* https://www.reddit.com/r/redditdev/comments/2gmzqe/praw_https/
* https://www.reddit.com/gallery/2gmzqe
:raises: :class:`.InvalidURL` if URL is not a valid submission URL.
"""
parts = RedditBase._url_parts(url)
if "comments" not in parts and "gallery" not in parts:
submission_id = parts[-1]
if "r" in parts:
raise InvalidURL(url, message="Invalid URL (subreddit, not submission): {}")
elif "gallery" in parts:
submission_id = parts[parts.index("gallery") + 1]
elif parts[-1] == "comments":
raise InvalidURL(url, message="Invalid URL (submission id not present): {}")
else:
submission_id = parts[parts.index("comments") + 1]
if not submission_id.isalnum():
raise InvalidURL(url)
return submission_id
|
def id_from_url(url: str) -> str:
"""Return the ID contained within a submission URL.
:param url: A url to a submission in one of the following formats (http urls
will also work):
* https://redd.it/2gmzqe
* https://reddit.com/comments/2gmzqe/
* https://www.reddit.com/r/redditdev/comments/2gmzqe/praw_https/
* https://www.reddit.com/gallery/2gmzqe
:raises: :class:`.InvalidURL` if URL is not a valid submission URL.
"""
parts = RedditBase._url_parts(url)
if "comments" not in parts and "gallery" not in parts:
submission_id = parts[-1]
if "r" in parts:
raise InvalidURL(url, message="Invalid URL (subreddit, not submission): {}")
elif "gallery" in parts:
submission_id = parts[parts.index("gallery") + 1]
else:
submission_id = parts[parts.index("comments") + 1]
if not submission_id.isalnum():
raise InvalidURL(url)
return submission_id
|
https://github.com/praw-dev/praw/issues/1553
|
IndexError Traceback (most recent call last)
<ipython-input-14-87a53f614e22> in <module>
----> 1 reddit.submission(url="https://reddit.com/comments/")
~/.local/share/virtualenvs/tmp-agwWamBd/lib/python3.8/site-packages/praw/reddit.py in submission(self, id, url)
798
799 """
--> 800 return models.Submission(self, id=id, url=url)
~/.local/share/virtualenvs/tmp-agwWamBd/lib/python3.8/site-packages/praw/models/reddit/submission.py in __init__(self, reddit, id, url, _data)
538 self.id = id
539 elif url is not None:
--> 540 self.id = self.id_from_url(url)
541
542 self._comments_by_id = {}
~/.local/share/virtualenvs/tmp-agwWamBd/lib/python3.8/site-packages/praw/models/reddit/submission.py in id_from_url(url)
422 )
423 else:
--> 424 submission_id = parts[parts.index("comments") + 1]
425
426 if not submission_id.isalnum():
IndexError: list index out of range
|
IndexError
|
def request(
self,
method: str,
path: str,
params: Optional[Union[str, Dict[str, str]]] = None,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
json=None,
) -> Any:
"""Return the parsed JSON data returned from a request to URL.
:param method: The HTTP method (e.g., GET, POST, PUT, DELETE).
:param path: The path to fetch.
:param params: The query parameters to add to the request (default:
None).
:param data: Dictionary, bytes, or file-like object to send in the body
of the request (default: None).
:param files: Dictionary, filename to file (like) object mapping
(default: None).
:param json: JSON-serializable object to send in the body
of the request with a Content-Type header of application/json
(default: None). If ``json`` is provided, ``data`` should not be.
"""
if data and json:
raise ClientException("At most one of `data` and `json` is supported.")
try:
return self._core.request(
method,
path,
data=data,
files=files,
params=params,
timeout=self.config.timeout,
json=json,
)
except BadRequest as exception:
try:
data = exception.response.json()
except ValueError:
# TODO: Remove this exception after 2020-12-31 if no one has
# filed a bug against it.
raise Exception(
"Unexpected BadRequest without json body. Please file a "
"bug at https://github.com/praw-dev/praw/issues"
) from exception
if set(data) == {"error", "message"}:
raise
if "fields" in data:
assert len(data["fields"]) == 1
field = data["fields"][0]
else:
field = None
raise RedditAPIException(
[data["reason"], data["explanation"], field]
) from exception
|
def request(
self,
method: str,
path: str,
params: Optional[Union[str, Dict[str, str]]] = None,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
json=None,
) -> Any:
"""Return the parsed JSON data returned from a request to URL.
:param method: The HTTP method (e.g., GET, POST, PUT, DELETE).
:param path: The path to fetch.
:param params: The query parameters to add to the request (default:
None).
:param data: Dictionary, bytes, or file-like object to send in the body
of the request (default: None).
:param files: Dictionary, filename to file (like) object mapping
(default: None).
:param json: JSON-serializable object to send in the body
of the request with a Content-Type header of application/json
(default: None). If ``json`` is provided, ``data`` should not be.
"""
if data and json:
raise ClientException("At most one of `data` and `json` is supported.")
try:
return self._core.request(
method,
path,
data=data,
files=files,
params=params,
timeout=self.config.timeout,
json=json,
)
except BadRequest as exception:
try:
data = exception.response.json()
except ValueError:
# TODO: Remove this exception after 2020-12-31 if no one has
# filed a bug against it.
raise Exception(
"Unexpected BadRequest without json body. Please file a "
"bug at https://github.com/praw-dev/praw/issues"
) from exception
if "fields" in data:
assert len(data["fields"]) == 1
field = data["fields"][0]
else:
field = None
raise RedditAPIException(
[data["reason"], data["explanation"], field]
) from exception
|
https://github.com/praw-dev/praw/issues/1429
|
---------------------------------------------------------------------------
BadRequest Traceback (most recent call last)
~/PycharmProjects/praw/praw/reddit.py in request(self, method, path, params, data, files)
731 params=params,
--> 732 timeout=self.config.timeout,
733 )
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/sessions.py in request(self, method, path, data, files, json, params, timeout)
335 timeout=timeout,
--> 336 url=url,
337 )
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/sessions.py in _request_with_retries(self, data, files, json, method, params, timeout, url, retry_strategy_state)
264 elif response.status_code in self.STATUS_EXCEPTIONS:
--> 265 raise self.STATUS_EXCEPTIONS[response.status_code](response)
266 elif response.status_code == codes["no_content"]:
BadRequest: received 400 HTTP response
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-5-e7b204c10e9b> in <module>
----> 1 r = reddit.post("/api/validate_submission_field", data={"sr": "AskReddit", "field": "link", "kind": "link", "title": "l", "text": "lol", "show_error_list": True, })
~/PycharmProjects/praw/praw/reddit.py in post(self, path, data, files, params)
635 method="POST",
636 params=params,
--> 637 path=path,
638 )
639 except RedditAPIException as exception:
~/PycharmProjects/praw/praw/reddit.py in _objectify_request(self, data, files, method, params, path)
572 return self._objector.objectify(
573 self.request(
--> 574 data=data, files=files, method=method, params=params, path=path
575 )
576 )
~/PycharmProjects/praw/praw/reddit.py in request(self, method, path, params, data, files)
748 field = None
749 raise RedditAPIException(
--> 750 [data["reason"], data["explanation"], field]
751 ) from exception
752
KeyError: 'reason'
|
KeyError
|
def _initialize_attributes(self):
self._short_url = self._fetch_default("short_url") or self.CONFIG_NOT_SET
self.check_for_updates = self._config_boolean(
self._fetch_or_not_set("check_for_updates")
)
self.kinds = {
x: self._fetch("{}_kind".format(x))
for x in [
"comment",
"message",
"redditor",
"submission",
"subreddit",
"trophy",
]
}
for attribute in (
"client_id",
"client_secret",
"redirect_uri",
"refresh_token",
"password",
"user_agent",
"username",
):
setattr(self, attribute, self._fetch_or_not_set(attribute))
for required_attribute in (
"oauth_url",
"ratelimit_seconds",
"reddit_url",
"timeout",
):
setattr(self, required_attribute, self._fetch(required_attribute))
for attribute, conversion in {
"ratelimit_seconds": int,
"timeout": int,
}.items():
try:
setattr(self, attribute, conversion(getattr(self, attribute)))
except ValueError:
raise ValueError(
"An incorrect config type was given for option {}. The "
"expected type is {}, but the given value is {}.".format(
attribute,
conversion.__name__,
getattr(self, attribute),
)
)
|
def _initialize_attributes(self):
self._short_url = self._fetch_default("short_url") or self.CONFIG_NOT_SET
self.check_for_updates = self._config_boolean(
self._fetch_or_not_set("check_for_updates")
)
self.kinds = {
x: self._fetch("{}_kind".format(x))
for x in [
"comment",
"message",
"redditor",
"submission",
"subreddit",
"trophy",
]
}
for attribute in (
"client_id",
"client_secret",
"redirect_uri",
"refresh_token",
"password",
"user_agent",
"username",
):
setattr(self, attribute, self._fetch_or_not_set(attribute))
for required_attribute in ("oauth_url", "reddit_url", "timeout"):
setattr(self, required_attribute, self._fetch(required_attribute))
for attribute, conversion in {"timeout": int}.items():
try:
setattr(self, attribute, conversion(getattr(self, attribute)))
except ValueError:
raise ValueError(
"An incorrect config type was given for option {}. The "
"expected type is {}, but the given value is {}.".format(
attribute,
conversion.__name__,
getattr(self, attribute),
)
)
|
https://github.com/praw-dev/praw/issues/1395
|
---------------------------------------------------------------------------
APIException Traceback (most recent call last)
<ipython-input-14-5b88e4f8d60d> in <module>
1 while True:
----> 2 print(repr(s[1].reply("F")))
3
/usr/local/lib/python3.7/site-packages/praw/models/reddit/mixins/replyable.py in reply(self, body)
31 """
32 data = {"text": body, "thing_id": self.fullname}
---> 33 comments = self._reddit.post(API_PATH["comment"], data=data)
34 try:
35 return comments[0]
/usr/local/lib/python3.7/site-packages/praw/reddit.py in post(self, path, data, files, params)
553 "POST", path, data=data or {}, files=files, params=params
554 )
--> 555 return self._objector.objectify(data)
556
557 def put(
/usr/local/lib/python3.7/site-packages/praw/objector.py in objectify(self, data)
168 errors = data["json"]["errors"]
169 if len(errors) == 1:
--> 170 raise APIException(*errors[0])
171 assert not errors
172 elif isinstance(data, dict):
APIException: RATELIMIT: 'you are doing that too much. try again in 5 seconds.' on field 'ratelimit'
|
APIException
|
def get(self, path: str, params: Optional[Union[str, Dict[str, str]]] = None):
"""Return parsed objects returned from a GET request to ``path``.
:param path: The path to fetch.
:param params: The query parameters to add to the request (default:
None).
"""
return self._objectify_request(method="GET", params=params, path=path)
|
def get(self, path: str, params: Optional[Union[str, Dict[str, str]]] = None):
"""Return parsed objects returned from a GET request to ``path``.
:param path: The path to fetch.
:param params: The query parameters to add to the request (default:
None).
"""
data = self.request("GET", path, params=params)
return self._objector.objectify(data)
|
https://github.com/praw-dev/praw/issues/1395
|
---------------------------------------------------------------------------
APIException Traceback (most recent call last)
<ipython-input-14-5b88e4f8d60d> in <module>
1 while True:
----> 2 print(repr(s[1].reply("F")))
3
/usr/local/lib/python3.7/site-packages/praw/models/reddit/mixins/replyable.py in reply(self, body)
31 """
32 data = {"text": body, "thing_id": self.fullname}
---> 33 comments = self._reddit.post(API_PATH["comment"], data=data)
34 try:
35 return comments[0]
/usr/local/lib/python3.7/site-packages/praw/reddit.py in post(self, path, data, files, params)
553 "POST", path, data=data or {}, files=files, params=params
554 )
--> 555 return self._objector.objectify(data)
556
557 def put(
/usr/local/lib/python3.7/site-packages/praw/objector.py in objectify(self, data)
168 errors = data["json"]["errors"]
169 if len(errors) == 1:
--> 170 raise APIException(*errors[0])
171 assert not errors
172 elif isinstance(data, dict):
APIException: RATELIMIT: 'you are doing that too much. try again in 5 seconds.' on field 'ratelimit'
|
APIException
|
def patch(
self,
path: str,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
) -> Any:
"""Return parsed objects returned from a PATCH request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body
of the request (default: None).
"""
return self._objectify_request(data=data, method="PATCH", path=path)
|
def patch(
self,
path: str,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
) -> Any:
"""Return parsed objects returned from a PATCH request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body
of the request (default: None).
"""
data = self.request("PATCH", path, data=data)
return self._objector.objectify(data)
|
https://github.com/praw-dev/praw/issues/1395
|
---------------------------------------------------------------------------
APIException Traceback (most recent call last)
<ipython-input-14-5b88e4f8d60d> in <module>
1 while True:
----> 2 print(repr(s[1].reply("F")))
3
/usr/local/lib/python3.7/site-packages/praw/models/reddit/mixins/replyable.py in reply(self, body)
31 """
32 data = {"text": body, "thing_id": self.fullname}
---> 33 comments = self._reddit.post(API_PATH["comment"], data=data)
34 try:
35 return comments[0]
/usr/local/lib/python3.7/site-packages/praw/reddit.py in post(self, path, data, files, params)
553 "POST", path, data=data or {}, files=files, params=params
554 )
--> 555 return self._objector.objectify(data)
556
557 def put(
/usr/local/lib/python3.7/site-packages/praw/objector.py in objectify(self, data)
168 errors = data["json"]["errors"]
169 if len(errors) == 1:
--> 170 raise APIException(*errors[0])
171 assert not errors
172 elif isinstance(data, dict):
APIException: RATELIMIT: 'you are doing that too much. try again in 5 seconds.' on field 'ratelimit'
|
APIException
|
def post(
self,
path: str,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
params: Optional[Union[str, Dict[str, str]]] = None,
) -> Any:
"""Return parsed objects returned from a POST request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body
of the request (default: None).
:param files: Dictionary, filename to file (like) object mapping
(default: None).
:param params: The query parameters to add to the request (default:
None).
"""
data = data or {}
try:
return self._objectify_request(
data=data,
files=files,
method="POST",
params=params,
path=path,
)
except RedditAPIException as exception:
seconds = self._handle_rate_limit(exception=exception)
if seconds is not None:
logger.debug("Rate limit hit, sleeping for {:.2f} seconds".format(seconds))
time.sleep(seconds)
return self._objectify_request(
data=data,
files=files,
method="POST",
params=params,
path=path,
)
raise
|
def post(
self,
path: str,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
params: Optional[Union[str, Dict[str, str]]] = None,
) -> Any:
"""Return parsed objects returned from a POST request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body
of the request (default: None).
:param files: Dictionary, filename to file (like) object mapping
(default: None).
:param params: The query parameters to add to the request (default:
None).
"""
data = self.request("POST", path, data=data or {}, files=files, params=params)
return self._objector.objectify(data)
|
https://github.com/praw-dev/praw/issues/1395
|
---------------------------------------------------------------------------
APIException Traceback (most recent call last)
<ipython-input-14-5b88e4f8d60d> in <module>
1 while True:
----> 2 print(repr(s[1].reply("F")))
3
/usr/local/lib/python3.7/site-packages/praw/models/reddit/mixins/replyable.py in reply(self, body)
31 """
32 data = {"text": body, "thing_id": self.fullname}
---> 33 comments = self._reddit.post(API_PATH["comment"], data=data)
34 try:
35 return comments[0]
/usr/local/lib/python3.7/site-packages/praw/reddit.py in post(self, path, data, files, params)
553 "POST", path, data=data or {}, files=files, params=params
554 )
--> 555 return self._objector.objectify(data)
556
557 def put(
/usr/local/lib/python3.7/site-packages/praw/objector.py in objectify(self, data)
168 errors = data["json"]["errors"]
169 if len(errors) == 1:
--> 170 raise APIException(*errors[0])
171 assert not errors
172 elif isinstance(data, dict):
APIException: RATELIMIT: 'you are doing that too much. try again in 5 seconds.' on field 'ratelimit'
|
APIException
|
def put(
self,
path: str,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
):
"""Return parsed objects returned from a PUT request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body
of the request (default: None).
"""
return self._objectify_request(
data=data,
method="PUT",
path=path,
)
|
def put(
self,
path: str,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
):
"""Return parsed objects returned from a PUT request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body
of the request (default: None).
"""
data = self.request("PUT", path, data=data)
return self._objector.objectify(data)
|
https://github.com/praw-dev/praw/issues/1395
|
---------------------------------------------------------------------------
APIException Traceback (most recent call last)
<ipython-input-14-5b88e4f8d60d> in <module>
1 while True:
----> 2 print(repr(s[1].reply("F")))
3
/usr/local/lib/python3.7/site-packages/praw/models/reddit/mixins/replyable.py in reply(self, body)
31 """
32 data = {"text": body, "thing_id": self.fullname}
---> 33 comments = self._reddit.post(API_PATH["comment"], data=data)
34 try:
35 return comments[0]
/usr/local/lib/python3.7/site-packages/praw/reddit.py in post(self, path, data, files, params)
553 "POST", path, data=data or {}, files=files, params=params
554 )
--> 555 return self._objector.objectify(data)
556
557 def put(
/usr/local/lib/python3.7/site-packages/praw/objector.py in objectify(self, data)
168 errors = data["json"]["errors"]
169 if len(errors) == 1:
--> 170 raise APIException(*errors[0])
171 assert not errors
172 elif isinstance(data, dict):
APIException: RATELIMIT: 'you are doing that too much. try again in 5 seconds.' on field 'ratelimit'
|
APIException
|
def _initialize_attributes(self):
self._short_url = self._fetch_default("short_url") or self.CONFIG_NOT_SET
self.check_for_updates = self._config_boolean(
self._fetch_or_not_set("check_for_updates")
)
self.kinds = {
x: self._fetch("{}_kind".format(x))
for x in [
"comment",
"message",
"redditor",
"submission",
"subreddit",
"trophy",
]
}
for attribute in (
"client_id",
"client_secret",
"redirect_uri",
"refresh_token",
"password",
"user_agent",
"username",
):
setattr(self, attribute, self._fetch_or_not_set(attribute))
for required_attribute in ("oauth_url", "reddit_url", "timeout"):
setattr(self, required_attribute, self._fetch(required_attribute))
for attribute, conversion in {"timeout": int}.items():
try:
setattr(self, attribute, conversion(getattr(self, attribute)))
except ValueError:
raise ValueError(
"An incorrect config type was given for option {}. The "
"expected type is {}, but the given value is {}.".format(
attribute,
conversion.__name__,
getattr(self, attribute),
)
)
|
def _initialize_attributes(self):
self._short_url = self._fetch_default("short_url") or self.CONFIG_NOT_SET
self.check_for_updates = self._config_boolean(
self._fetch_or_not_set("check_for_updates")
)
self.kinds = {
x: self._fetch("{}_kind".format(x))
for x in [
"comment",
"message",
"redditor",
"submission",
"subreddit",
"trophy",
]
}
for attribute in (
"client_id",
"client_secret",
"redirect_uri",
"refresh_token",
"password",
"user_agent",
"username",
):
setattr(self, attribute, self._fetch_or_not_set(attribute))
for required_attribute in ("oauth_url", "reddit_url", "timeout"):
setattr(self, required_attribute, self._fetch(required_attribute))
|
https://github.com/praw-dev/praw/issues/1421
|
In [4]: reddit.subreddit("The_Donald").subscribers
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/urllib3/util/timeout.py in _validate_timeout(cls, value, name)
139 try:
--> 140 if value <= 0:
141 raise ValueError(
TypeError: '<=' not supported between instances of 'str' and 'int'
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/requestor.py in request(self, timeout, *args, **kwargs)
52 try:
---> 53 return self._http.request(*args, timeout=timeout, **kwargs)
54 except Exception as exc:
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/requests/sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
529 send_kwargs.update(settings)
--> 530 resp = self.send(prep, **send_kwargs)
531
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/requests/sessions.py in send(self, request, **kwargs)
642 # Send the request
--> 643 r = adapter.send(request, **kwargs)
644
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/requests/adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
434 else:
--> 435 timeout = TimeoutSauce(connect=timeout, read=timeout)
436
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/urllib3/util/timeout.py in __init__(self, total, connect, read)
95 def __init__(self, total=None, connect=_Default, read=_Default):
---> 96 self._connect = self._validate_timeout(connect, "connect")
97 self._read = self._validate_timeout(read, "read")
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/urllib3/util/timeout.py in _validate_timeout(cls, value, name)
149 "Timeout value %s was %s, but it must be an "
--> 150 "int, float or None." % (name, value)
151 )
ValueError: Timeout value connect was 16, but it must be an int, float or None.
During handling of the above exception, another exception occurred:
RequestException Traceback (most recent call last)
<ipython-input-4-a5aa025940f1> in <module>
----> 1 reddit.subreddit("The_Donald").subscribers
~/PycharmProjects/praw/praw/models/reddit/base.py in __getattr__(self, attribute)
31 """Return the value of `attribute`."""
32 if not attribute.startswith("_") and not self._fetched:
---> 33 self._fetch()
34 return getattr(self, attribute)
35 raise AttributeError(
~/PycharmProjects/praw/praw/models/reddit/subreddit.py in _fetch(self)
514
515 def _fetch(self):
--> 516 data = self._fetch_data()
517 data = data["data"]
518 other = type(self)(self._reddit, _data=data)
~/PycharmProjects/praw/praw/models/reddit/subreddit.py in _fetch_data(self)
511 name, fields, params = self._fetch_info()
512 path = API_PATH[name].format(**fields)
--> 513 return self._reddit.request("GET", path, params)
514
515 def _fetch(self):
~/PycharmProjects/praw/praw/reddit.py in request(self, method, path, params, data, files)
661 files=files,
662 params=params,
--> 663 timeout=self.config.timeout,
664 )
665 except BadRequest as exception:
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/sessions.py in request(self, method, path, data, files, json, params, timeout)
334 params=params,
335 timeout=timeout,
--> 336 url=url,
337 )
338
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/sessions.py in _request_with_retries(self, data, files, json, method, params, timeout, url, retry_strategy_state)
233 retry_strategy_state,
234 timeout,
--> 235 url,
236 )
237
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/sessions.py in _make_request(self, data, files, json, method, params, retry_strategy_state, timeout, url)
193 json=json,
194 params=params,
--> 195 timeout=timeout,
196 )
197 log.debug(
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/rate_limit.py in call(self, request_function, set_header_callback, *args, **kwargs)
34 self.delay()
35 kwargs["headers"] = set_header_callback()
---> 36 response = request_function(*args, **kwargs)
37 self.update(response.headers)
38 return response
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/requestor.py in request(self, timeout, *args, **kwargs)
53 return self._http.request(*args, timeout=timeout, **kwargs)
54 except Exception as exc:
---> 55 raise RequestException(exc, args, kwargs)
RequestException: error with request Timeout value connect was 16, but it must be an int, float or None.
|
TypeError
|
def refresh(self):
"""Refresh the comment's attributes.
If using :meth:`.Reddit.comment` this method must be called in order to
obtain the comment's replies.
"""
if "context" in self.__dict__: # Using hasattr triggers a fetch
comment_path = self.context.split("?", 1)[0]
else:
comment_path = "{}_/{}".format(
self.submission._info_path(), # pylint: disable=no-member
self.id,
)
# The context limit appears to be 8, but let's ask for more anyway.
comment_list = self._reddit.get(comment_path, params={"context": 100})[1].children
if not comment_list:
raise ClientException(self.MISSING_COMMENT_MESSAGE)
# With context, the comment may be nested so we have to find it
comment = None
queue = comment_list[:]
while queue and (comment is None or comment.id != self.id):
comment = queue.pop()
if isinstance(comment, Comment):
queue.extend(comment._replies)
if comment.id != self.id:
raise ClientException(self.MISSING_COMMENT_MESSAGE)
if self._submission is not None:
del comment.__dict__["_submission"] # Don't replace if set
self.__dict__.update(comment.__dict__)
for reply in comment_list:
reply.submission = self.submission
return self
|
def refresh(self):
"""Refresh the comment's attributes.
If using :meth:`.Reddit.comment` this method must be called in order to
obtain the comment's replies.
"""
if "context" in self.__dict__: # Using hasattr triggers a fetch
comment_path = self.context.split("?", 1)[0]
else:
comment_path = "{}_/{}".format(
self.submission._info_path(), # pylint: disable=no-member
self.id,
)
# The context limit appears to be 8, but let's ask for more anyway.
comment_list = self._reddit.get(comment_path, params={"context": 100})[1].children
if not comment_list:
raise ClientException(self.MISSING_COMMENT_MESSAGE)
# With context, the comment may be nested so we have to find it
comment = None
queue = comment_list[:]
while queue and (comment is None or comment.id != self.id):
comment = queue.pop()
queue.extend(comment._replies)
if comment.id != self.id:
raise ClientException(self.MISSING_COMMENT_MESSAGE)
if self._submission is not None:
del comment.__dict__["_submission"] # Don't replace if set
self.__dict__.update(comment.__dict__)
for reply in comment_list:
reply.submission = self.submission
return self
|
https://github.com/praw-dev/praw/issues/841
|
Traceback (most recent call last):
File "./test.py", line 12, in <module>
comment.refresh()
File "/usr/local/lib/python3.4/dist-packages/praw/models/reddit/comment.py", line 196, in refresh
queue.extend(comment._replies)
AttributeError: 'MoreComments' object has no attribute '_replies'
|
AttributeError
|
def valve_flowreorder(input_ofmsgs, use_barriers=True):
"""Reorder flows for better OFA performance."""
# Move all deletes to be first, and add one barrier,
# while optionally randomizing order. Platforms that do
# parallel delete will perform better and platforms that
# don't will have at most only one barrier to deal with.
output_ofmsgs = []
by_kind = _partition_ofmsgs(input_ofmsgs)
# Suppress all other relevant deletes if a global delete is present.
delete_global_ofmsgs = by_kind.get("deleteglobal", [])
if delete_global_ofmsgs:
global_types = []
for ofmsg in delete_global_ofmsgs:
global_types.append(type(ofmsg))
new_delete = []
for ofmsg in by_kind.get("delete", []):
if type(ofmsg) not in global_types:
new_delete.append(ofmsg)
by_kind["delete"] = new_delete
for kind, random_order, suggest_barrier in _OFMSG_ORDER:
ofmsgs = dedupe_ofmsgs(by_kind.get(kind, []), random_order)
if ofmsgs:
output_ofmsgs.extend(ofmsgs)
if use_barriers and suggest_barrier:
output_ofmsgs.append(barrier())
return output_ofmsgs
|
def valve_flowreorder(input_ofmsgs, use_barriers=True):
"""Reorder flows for better OFA performance."""
# Move all deletes to be first, and add one barrier,
# while optionally randomizing order. Platforms that do
# parallel delete will perform better and platforms that
# don't will have at most only one barrier to deal with.
output_ofmsgs = []
by_kind = _partition_ofmsgs(input_ofmsgs)
# Suppress all other deletes if a global delete is present.
delete_global_ofmsgs = by_kind.get("deleteglobal", [])
if delete_global_ofmsgs:
by_kind["delete"] = []
for kind, random_order, suggest_barrier in _OFMSG_ORDER:
ofmsgs = dedupe_ofmsgs(by_kind.get(kind, []), random_order)
if ofmsgs:
output_ofmsgs.extend(ofmsgs)
if use_barriers and suggest_barrier:
output_ofmsgs.append(barrier())
return output_ofmsgs
|
https://github.com/faucetsdn/faucet/issues/3495
|
port order: -o 2,3,0,1
Using config from /etc/faucet/hw_switch_config.yaml
Testing hardware, forcing test serialization
Logging test results in /var/tmp/faucet-tests-sakye6bn
test ports server started
adding test FaucetNailedFailoverForwardingOrderedTest
adding test FaucetNailedFailoverForwardingTest
----------------------------------------------------------------------
Ran 0 tests in 0.000s
OK
running 0 tests in parallel and 2 tests serial
test_untagged (mininet_tests.FaucetNailedFailoverForwardingOrderedTest) ... *** Error setting resource limits. Mininet's performance may be affected.
ok
test_untagged (mininet_tests.FaucetNailedFailoverForwardingTest) ... FAIL
======================================================================
FAIL: test_untagged (mininet_tests.FaucetNailedFailoverForwardingTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/faucet-src/clib/mininet_test_base.py", line 481, in tearDown
self.assertFalse(oferrors, msg=oferrors)
AssertionError: "Mar 17 22:12:31 faucet.valve ERROR DPID 222613276036429 (0xca77313d8d4d) faucet-1 OFError type: OFPET_GROUP_MOD_FAILED code: OFPGMFC_GROUP_EXISTS version=0x4,msg_type=0x1,msg_len=0x5c,xid=0xc369eeb2,OFPErrorMsg(code=0,data=bytearray(b'\\x04\\x0f\\x00P\\xc3i\\xee\\xb2\\x00\\x00\\x03\\x00\\x00\\x00\\x03\\xe9\\x00 \\x00\\x00\\x00\\x00\\x00\\x05\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x05\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00 \\x00\\x00\\x00\\x00\\x00\\x02\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'),type=6) caused by version=0x4,msg_type=0xf,msg_len=0x50,xid=0xc369eeb2,OFPGroupMod(buckets=[OFPBucket(actions=[OFPActionOutput(len=16,max_len=0,port=5,type=0)],len=32,watch_group=4294967295,watch_port=5,weight=0), OFPBucket(actions=[OFPActionOutput(len=16,max_len=0,port=2,type=0)],len=32,watch_group=4294967295,watch_port=2,weight=0)],command=0,group_id=1001,type=3)\n\n\nMar 17 22:12:31 faucet.valve ERROR DPID 222613276036429 (0xca77313d8d4d) faucet-1 OFError type: OFPET_GROUP_MOD_FAILED code: OFPGMFC_GROUP_EXISTS version=0x4,msg_type=0x1,msg_len=0x5c,xid=0xc369eeb3,OFPErrorMsg(code=0,data=bytearray(b'\\x04\\x0f\\x00P\\xc3i\\xee\\xb3\\x00\\x00\\x03\\x00\\x00\\x00\\x03\\xea\\x00 \\x00\\x00\\x00\\x00\\x00\\x05\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x05\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00 \\x00\\x00\\x00\\x00\\x00\\x02\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'),type=6) caused by version=0x4,msg_type=0xf,msg_len=0x50,xid=0xc369eeb3,OFPGroupMod(buckets=[OFPBucket(actions=[OFPActionOutput(len=16,max_len=0,port=5,type=0)],len=32,watch_group=4294967295,watch_port=5,weight=0), OFPBucket(actions=[OFPActionOutput(len=16,max_len=0,port=2,type=0)],len=32,watch_group=4294967295,watch_port=2,weight=0)],command=0,group_id=1002,type=3)\n" is not false : Mar 17 22:12:31 faucet.valve ERROR DPID 222613276036429 (0xca77313d8d4d) faucet-1 OFError type: OFPET_GROUP_MOD_FAILED code: OFPGMFC_GROUP_EXISTS version=0x4,msg_type=0x1,msg_len=0x5c,xid=0xc369eeb2,OFPErrorMsg(code=0,data=bytearray(b'\x04\x0f\x00P\xc3i\xee\xb2\x00\x00\x03\x00\x00\x00\x03\xe9\x00 \x00\x00\x00\x00\x00\x05\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x02\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00'),type=6) caused by version=0x4,msg_type=0xf,msg_len=0x50,xid=0xc369eeb2,OFPGroupMod(buckets=[OFPBucket(actions=[OFPActionOutput(len=16,max_len=0,port=5,type=0)],len=32,watch_group=4294967295,watch_port=5,weight=0), OFPBucket(actions=[OFPActionOutput(len=16,max_len=0,port=2,type=0)],len=32,watch_group=4294967295,watch_port=2,weight=0)],command=0,group_id=1001,type=3)
Mar 17 22:12:31 faucet.valve ERROR DPID 222613276036429 (0xca77313d8d4d) faucet-1 OFError type: OFPET_GROUP_MOD_FAILED code: OFPGMFC_GROUP_EXISTS version=0x4,msg_type=0x1,msg_len=0x5c,xid=0xc369eeb3,OFPErrorMsg(code=0,data=bytearray(b'\x04\x0f\x00P\xc3i\xee\xb3\x00\x00\x03\x00\x00\x00\x03\xea\x00 \x00\x00\x00\x00\x00\x05\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x02\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00'),type=6) caused by version=0x4,msg_type=0xf,msg_len=0x50,xid=0xc369eeb3,OFPGroupMod(buckets=[OFPBucket(actions=[OFPActionOutput(len=16,max_len=0,port=5,type=0)],len=32,watch_group=4294967295,watch_port=5,weight=0), OFPBucket(actions=[OFPActionOutput(len=16,max_len=0,port=2,type=0)],len=32,watch_group=4294967295,watch_port=2,weight=0)],command=0,group_id=1002,type=3)
----------------------------------------------------------------------
Ran 2 tests in 80.138s
|
AssertionError
|
def scrape_prometheus(endpoints, retries=3, err_output_file=sys.stdout):
"""Scrape a list of Prometheus/FAUCET/Gauge endpoints and aggregate results."""
metrics = []
for endpoint in endpoints:
content = None
err = None
for _ in range(retries):
try:
if endpoint.startswith("http"):
response = requests.get(endpoint)
if response.status_code == requests.status_codes.codes.ok: # pylint: disable=no-member
content = response.content.decode("utf-8", "strict")
break
else:
response = urllib.request.urlopen(
endpoint
) # pytype: disable=module-attr
content = response.read().decode("utf-8", "strict")
break
except (requests.exceptions.ConnectionError, ValueError) as exception:
err = exception
time.sleep(1)
if err is not None:
err_output_file.write(str(err))
return None
try:
endpoint_metrics = parser.text_string_to_metric_families(content)
metrics.extend(endpoint_metrics)
except ValueError as err:
err_output_file.write(str(err))
return None
return metrics
|
def scrape_prometheus(endpoints, retries=3, err_output_file=sys.stdout):
"""Scrape a list of Prometheus/FAUCET/Gauge endpoints and aggregate results."""
metrics = []
for endpoint in endpoints:
content = None
err = None
for _ in range(retries):
try:
if endpoint.startswith("http"):
response = requests.get(endpoint)
if response.status_code == requests.status_codes.codes.ok: # pylint: disable=no-member
content = response.content.decode("utf-8", "strict")
break
else:
response = urllib.request.urlopen(
endpoint
) # pytype: disable=module-attr
content = response.read().decode("utf-8", "strict")
break
except requests.exceptions.ConnectionError as exception:
err = exception
time.sleep(1)
if err is not None:
err_output_file.write(str(err))
return None
endpoint_metrics = parser.text_string_to_metric_families(content)
metrics.extend(endpoint_metrics)
return metrics
|
https://github.com/faucetsdn/faucet/issues/2710
|
$ python3 fctl.py -n --endpoints=http://0.0.0.0:9090 --metrics=learned_macs --labels=dp_id:0x1
Traceback (most recent call last):
File "/home/foo/.local/lib/python3.5/site-packages/prometheus_client/parser.py", line 122, in _parse_sample
label_start, label_end = text.index("{"), text.rindex("}")
ValueError: substring not found
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "fctl.py", line 176, in <module>
main()
File "fctl.py", line 162, in main
metrics = scrape_prometheus(endpoints)
File "fctl.py", line 73, in scrape_prometheus
metrics.extend(endpoint_metrics)
File "/home/foo/.local/lib/python3.5/site-packages/prometheus_client/parser.py", line 22, in text_string_to_metric_families
for metric_family in text_fd_to_metric_families(StringIO.StringIO(text)):
File "/home/foo/.local/lib/python3.5/site-packages/prometheus_client/parser.py", line 217, in text_fd_to_metric_families
sample = _parse_sample(line)
File "/home/foo/.local/lib/python3.5/site-packages/prometheus_client/parser.py", line 140, in _parse_sample
value = float(_parse_value(text[name_end:]))
ValueError: could not convert string to float: 'html>'
|
ValueError
|
def __init__(self, _id, dp_id, conf, prom_client):
self.db = None # pylint: disable=invalid-name
self.dbs = None
self.dp = None # pylint: disable=invalid-name
self.all_dps = None
self.type = None
self.interval = None
self.db_type = None
self.dps = None
self.compress = None
self.file = None
self.influx_db = None
self.influx_host = None
self.influx_port = None
self.influx_user = None
self.influx_pwd = None
self.influx_timeout = None
self.influx_retries = None
self.name = None
self.prometheus_port = None
self.prometheus_addr = None
self.prometheus_test_thread = None
self.defaults.update(self.db_defaults)
self.defaults_types.update(self.db_defaults_types)
super(WatcherConf, self).__init__(_id, dp_id, conf)
self.name = str(self._id)
self.prom_client = prom_client
|
def __init__(self, _id, dp_id, conf, prom_client):
self.db = None # pylint: disable=invalid-name
self.dbs = None
self.dp = None # pylint: disable=invalid-name
self.all_dps = None
self.type = None
self.interval = None
self.db_type = None
self.dps = None
self.compress = None
self.file = None
self.influx_db = None
self.influx_host = None
self.influx_port = None
self.influx_user = None
self.influx_pwd = None
self.influx_timeout = None
self.influx_retries = None
self.name = None
self.prometheus_port = None
self.prometheus_addr = None
self.prometheus_test_thread = None
super(WatcherConf, self).__init__(_id, dp_id, conf)
self.name = str(self._id)
self.prom_client = prom_client
|
https://github.com/faucetsdn/faucet/issues/2598
|
Oct 26 13:31:16 gauge.exception ERROR Unhandled exception, killing RYU
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/faucet/valve_util.py", line 34, in __koe
func(*args, **kwargs)
File "/usr/lib/python3/dist-packages/faucet/gauge.py", line 101, in _update_watcher
watcher.update(ryu_event.timestamp, ryu_dp.id, msg)
File "/usr/lib/python3/dist-packages/faucet/watcher.py", line 148, in update
with gzip.open(filename, 'at') as outfile:
File "/usr/lib/python3.5/gzip.py", line 53, in open
binary_file = GzipFile(filename, gz_mode, compresslevel)
File "/usr/lib/python3.5/gzip.py", line 163, in __init__
fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')
FileNotFoundError: [Errno 2] No such file or directory: '/var/log/faucet/flowtable/ft.yml.gz'
|
FileNotFoundError
|
def add_db(self, db_conf):
"""Add database config to this watcher."""
self._check_conf_types(db_conf, self.db_defaults_types)
db_conf = deepcopy(db_conf)
db_type = db_conf.pop("type")
db_conf["db_type"] = db_type
self.update(db_conf)
test_config_condition(
self.file is not None
and not (
os.path.dirname(self.file)
and os.access(os.path.dirname(self.file), os.W_OK)
),
"%s is not writable" % self.file,
)
|
def add_db(self, db_conf):
"""Add database config to this watcher."""
db_conf = deepcopy(db_conf)
db_type = db_conf.pop("type")
db_conf["db_type"] = db_type
self.update(db_conf)
|
https://github.com/faucetsdn/faucet/issues/2598
|
Oct 26 13:31:16 gauge.exception ERROR Unhandled exception, killing RYU
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/faucet/valve_util.py", line 34, in __koe
func(*args, **kwargs)
File "/usr/lib/python3/dist-packages/faucet/gauge.py", line 101, in _update_watcher
watcher.update(ryu_event.timestamp, ryu_dp.id, msg)
File "/usr/lib/python3/dist-packages/faucet/watcher.py", line 148, in update
with gzip.open(filename, 'at') as outfile:
File "/usr/lib/python3.5/gzip.py", line 53, in open
binary_file = GzipFile(filename, gz_mode, compresslevel)
File "/usr/lib/python3.5/gzip.py", line 163, in __init__
fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')
FileNotFoundError: [Errno 2] No such file or directory: '/var/log/faucet/flowtable/ft.yml.gz'
|
FileNotFoundError
|
def __init__(self, _id, dp_id, conf):
self.rules = []
self.exact_match = None
self.meter = False
self.matches = {}
self.set_fields = set()
for match_fields in (MATCH_FIELDS, OLD_MATCH_FIELDS):
self.rule_types.update({match: (str, int) for match in match_fields.keys()})
conf = copy.deepcopy(conf)
if isinstance(conf, dict):
rules = conf.get("rules", [])
elif isinstance(conf, list):
rules = conf
conf = {}
else:
raise InvalidConfigError("ACL conf is an invalid type %s" % _id)
conf["rules"] = []
for rule in rules:
normalized_rule = rule
if isinstance(rule, dict):
normalized_rule = rule.get("rule", rule)
if normalized_rule is None:
normalized_rule = {k: v for k, v in rule.items() if v is not None}
test_config_condition(
not isinstance(normalized_rule, dict),
("ACL rule is %s not %s (%s)" % (type(normalized_rule), dict, rules)),
)
conf["rules"].append(normalized_rule)
super(ACL, self).__init__(_id, dp_id, conf)
|
def __init__(self, _id, dp_id, conf):
self.rules = []
self.exact_match = None
self.meter = False
self.matches = {}
self.set_fields = set()
conf = copy.deepcopy(conf)
if isinstance(conf, dict):
rules = conf.get("rules", [])
elif isinstance(conf, list):
rules = conf
conf = {}
else:
raise InvalidConfigError("ACL conf is an invalid type %s" % _id)
conf["rules"] = []
for rule in rules:
test_config_condition(
not isinstance(rule, dict), ("ACL rule is %s not %s" % (type(rule), dict))
)
conf["rules"].append(rule.get("rule", rule))
super(ACL, self).__init__(_id, dp_id, conf)
|
https://github.com/faucetsdn/faucet/issues/2595
|
Oct 25 16:13:59 faucet.exception ERROR Unhandled exception, killing RYU
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/faucet/valve_util.py", line 34, in __koe
func(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/faucet/faucet.py", line 156, in reload_config
time.time(), self.config_file, delete_dp=self._delete_deconfigured_dp)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 139, in request_reload_configs
self.load_configs(now, new_config_file, delete_dp=delete_dp)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 108, in load_configs
new_dps = self.parse_configs(new_config_file)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 88, in parse_configs
new_config_hashes, new_dps = dp_parser(new_config_file, self.logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 51, in dp_parser
config_hashes, dps = _config_parser_v2(config_file, logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 213, in _config_parser_v2
top_confs['vlans'])
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 177, in _dp_parser_v2
for dp_key, dp_conf in dps_conf.items()]
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 177, in <listcomp>
for dp_key, dp_conf in dps_conf.items()]
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 161, in _parse_dp
acl = ACL(acl_key, dp_id, acl_conf)
File "/usr/lib/python3.6/site-packages/faucet/acl.py", line 118, in __init__
super(ACL, self).__init__(_id, dp_id, conf)
File "/usr/lib/python3.6/site-packages/faucet/conf.py", line 54, in __init__
self.check_config()
File "/usr/lib/python3.6/site-packages/faucet/acl.py", line 127, in check_config
self._check_conf_types(rule, self.rule_types)
File "/usr/lib/python3.6/site-packages/faucet/conf.py", line 76, in _check_conf_types
for conf_key, conf_value in conf.items():
AttributeError: 'NoneType' object has no attribute 'items'
|
AttributeError
|
def check_config(self):
test_config_condition(not self.rules, "no rules found for ACL %s" % self._id)
for rule in self.rules:
self._check_conf_types(rule, self.rule_types)
for rule_field, rule_conf in rule.items():
if rule_field == "cookie":
test_config_condition(
rule_conf < 0 or rule_conf > 2**16,
"rule cookie value must be 0-2**16",
)
elif rule_field == "actions":
test_config_condition(
not rule_conf, "Missing rule actions in ACL %s" % self._id
)
self._check_conf_types(rule_conf, self.actions_types)
for action_name, action_conf in rule_conf.items():
if action_name == "output":
self._check_conf_types(action_conf, self.output_actions_types)
|
def check_config(self):
test_config_condition(not self.rules, "no rules found for ACL %s" % self._id)
for match_fields in (MATCH_FIELDS, OLD_MATCH_FIELDS):
for match in match_fields.keys():
self.rule_types[match] = (str, int)
for rule in self.rules:
self._check_conf_types(rule, self.rule_types)
for rule_field, rule_conf in rule.items():
if rule_field == "cookie":
test_config_condition(
rule_conf < 0 or rule_conf > 2**16,
("rule cookie value must be 0-2**16"),
)
elif rule_field == "actions":
test_config_condition(
not rule_conf, "Missing rule actions in ACL %s" % self._id
)
self._check_conf_types(rule_conf, self.actions_types)
for action_name, action_conf in rule_conf.items():
if action_name == "output":
self._check_conf_types(action_conf, self.output_actions_types)
|
https://github.com/faucetsdn/faucet/issues/2595
|
Oct 25 16:13:59 faucet.exception ERROR Unhandled exception, killing RYU
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/faucet/valve_util.py", line 34, in __koe
func(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/faucet/faucet.py", line 156, in reload_config
time.time(), self.config_file, delete_dp=self._delete_deconfigured_dp)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 139, in request_reload_configs
self.load_configs(now, new_config_file, delete_dp=delete_dp)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 108, in load_configs
new_dps = self.parse_configs(new_config_file)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 88, in parse_configs
new_config_hashes, new_dps = dp_parser(new_config_file, self.logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 51, in dp_parser
config_hashes, dps = _config_parser_v2(config_file, logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 213, in _config_parser_v2
top_confs['vlans'])
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 177, in _dp_parser_v2
for dp_key, dp_conf in dps_conf.items()]
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 177, in <listcomp>
for dp_key, dp_conf in dps_conf.items()]
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 161, in _parse_dp
acl = ACL(acl_key, dp_id, acl_conf)
File "/usr/lib/python3.6/site-packages/faucet/acl.py", line 118, in __init__
super(ACL, self).__init__(_id, dp_id, conf)
File "/usr/lib/python3.6/site-packages/faucet/conf.py", line 54, in __init__
self.check_config()
File "/usr/lib/python3.6/site-packages/faucet/acl.py", line 127, in check_config
self._check_conf_types(rule, self.rule_types)
File "/usr/lib/python3.6/site-packages/faucet/conf.py", line 76, in _check_conf_types
for conf_key, conf_value in conf.items():
AttributeError: 'NoneType' object has no attribute 'items'
|
AttributeError
|
def reset_ports(self, ports):
"""Reset tagged and untagged port lists."""
sorted_ports = sorted(ports, key=lambda i: i.number)
self.tagged = tuple([port for port in sorted_ports if self in port.tagged_vlans])
self.untagged = tuple([port for port in sorted_ports if self == port.native_vlan])
|
def reset_ports(self, ports):
"""Reset tagged and untagged port lists."""
self.tagged = tuple([port for port in ports if self in port.tagged_vlans])
self.untagged = tuple([port for port in ports if self == port.native_vlan])
|
https://github.com/faucetsdn/faucet/issues/2589
|
test_tagged (mininet_tests.FaucetTaggedIPv4RouteTest) ... FaucetTaggedIPv4RouteTest-test_tagged: 260 mbps to 10.0.2.1
FaucetTaggedIPv4RouteTest-test_tagged: 262 mbps to 10.0.1.1
FaucetTaggedIPv4RouteTest-test_tagged: 266 mbps to 10.0.2.1
FaucetTaggedIPv4RouteTest-test_tagged: 255 mbps to 10.0.1.1
FaucetTaggedIPv4RouteTest-test_tagged: 262 mbps to 10.0.2.1
FaucetTaggedIPv4RouteTest-test_tagged: 263 mbps to 10.0.1.1
FaucetTaggedIPv4RouteTest-test_tagged: 259 mbps to 10.0.2.1
FaucetTaggedIPv4RouteTest-test_tagged: 260 mbps to 10.0.1.1
FaucetTaggedIPv4RouteTest-test_tagged: 256 mbps to 10.0.2.1
FaucetTaggedIPv4RouteTest-test_tagged: 260 mbps to 10.0.1.1
FaucetTaggedIPv4RouteTest-test_tagged: 257 mbps to 10.0.2.1
FaucetTaggedIPv4RouteTest-test_tagged: 265 mbps to 10.0.1.1
FAIL
======================================================================
FAIL: test_tagged (mininet_tests.FaucetTaggedIPv4RouteTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/faucet-src/tests/integration/mininet_tests.py", line 5183, in test_tagged
restart=True, cold_start=False)
File "/faucet-src/clib/mininet_test_base.py", line 1191, in change_port_config
restart, cold_start, hup=hup)
File "/faucet-src/clib/mininet_test_base.py", line 1151, in reload_conf
verify_faucet_reconf_func()
File "/faucet-src/clib/mininet_test_base.py", line 1656, in verify_faucet_reconf
msg='%s did not increment: %u' % (var, new_count))
AssertionError: 1 != 0 : faucet_config_reload_warm did not increment: 0
----------------------------------------------------------------------
Ran 1 test in 129.772s
FAILED (failures=1)
|
AssertionError
|
def __str__(self):
str_ports = []
if self.tagged:
str_ports.append("tagged: %s" % ",".join([str(p) for p in self.tagged]))
if self.untagged:
str_ports.append("untagged: %s" % ",".join([str(p) for p in self.untagged]))
return "VLAN %s vid:%s %s" % (self.name, self.vid, " ".join(str_ports))
|
def __str__(self):
port_list = tuple([str(x) for x in self.get_ports()])
ports = ",".join(port_list)
return "VLAN %s vid:%s ports:%s" % (self.name, self.vid, ports)
|
https://github.com/faucetsdn/faucet/issues/2589
|
test_tagged (mininet_tests.FaucetTaggedIPv4RouteTest) ... FaucetTaggedIPv4RouteTest-test_tagged: 260 mbps to 10.0.2.1
FaucetTaggedIPv4RouteTest-test_tagged: 262 mbps to 10.0.1.1
FaucetTaggedIPv4RouteTest-test_tagged: 266 mbps to 10.0.2.1
FaucetTaggedIPv4RouteTest-test_tagged: 255 mbps to 10.0.1.1
FaucetTaggedIPv4RouteTest-test_tagged: 262 mbps to 10.0.2.1
FaucetTaggedIPv4RouteTest-test_tagged: 263 mbps to 10.0.1.1
FaucetTaggedIPv4RouteTest-test_tagged: 259 mbps to 10.0.2.1
FaucetTaggedIPv4RouteTest-test_tagged: 260 mbps to 10.0.1.1
FaucetTaggedIPv4RouteTest-test_tagged: 256 mbps to 10.0.2.1
FaucetTaggedIPv4RouteTest-test_tagged: 260 mbps to 10.0.1.1
FaucetTaggedIPv4RouteTest-test_tagged: 257 mbps to 10.0.2.1
FaucetTaggedIPv4RouteTest-test_tagged: 265 mbps to 10.0.1.1
FAIL
======================================================================
FAIL: test_tagged (mininet_tests.FaucetTaggedIPv4RouteTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/faucet-src/tests/integration/mininet_tests.py", line 5183, in test_tagged
restart=True, cold_start=False)
File "/faucet-src/clib/mininet_test_base.py", line 1191, in change_port_config
restart, cold_start, hup=hup)
File "/faucet-src/clib/mininet_test_base.py", line 1151, in reload_conf
verify_faucet_reconf_func()
File "/faucet-src/clib/mininet_test_base.py", line 1656, in verify_faucet_reconf
msg='%s did not increment: %u' % (var, new_count))
AssertionError: 1 != 0 : faucet_config_reload_warm did not increment: 0
----------------------------------------------------------------------
Ran 1 test in 129.772s
FAILED (failures=1)
|
AssertionError
|
def resolve_stack_topology(self, dps):
"""Resolve inter-DP config for stacking."""
root_dp = None
stack_dps = []
for dp in dps:
if dp.stack is not None:
stack_dps.append(dp)
if "priority" in dp.stack:
test_config_condition(
not isinstance(dp.stack["priority"], int),
(
"stack priority must be type %s not %s"
% (int, type(dp.stack["priority"]))
),
)
test_config_condition(
dp.stack["priority"] <= 0, ("stack priority must be > 0")
)
test_config_condition(
root_dp is not None, "cannot have multiple stack roots"
)
root_dp = dp
for vlan in dp.vlans.values():
test_config_condition(
vlan.faucet_vips, ("routing + stacking not supported")
)
if root_dp is None:
test_config_condition(stack_dps, "stacking enabled but no root_dp")
return
edge_count = Counter()
graph = networkx.MultiGraph()
for dp in dps:
if dp.stack_ports:
graph.add_node(dp.name)
for port in dp.stack_ports:
edge_name = self.add_stack_link(graph, dp, port)
edge_count[edge_name] += 1
if graph.size():
for edge_name, count in edge_count.items():
test_config_condition(
count != 2, "%s defined only in one direction" % edge_name
)
if self.name in graph:
if self.stack is None:
self.stack = {}
self.stack["root_dp"] = root_dp
self.stack["graph"] = graph
longest_path_to_root_len = 0
for dp in graph.nodes():
path_to_root_len = len(self.shortest_path(root_dp.name, src_dp=dp))
test_config_condition(
path_to_root_len == 0, "%s not connected to stack" % dp
)
longest_path_to_root_len = max(
path_to_root_len, longest_path_to_root_len
)
self.stack["longest_path_to_root_len"] = longest_path_to_root_len
|
def resolve_stack_topology(self, dps):
"""Resolve inter-DP config for stacking."""
root_dp = None
stack_dps = []
for dp in dps:
if dp.stack is not None:
stack_dps.append(dp)
if "priority" in dp.stack:
test_config_condition(
dp.stack["priority"] <= 0, ("stack priority must be > 0")
)
test_config_condition(
root_dp is not None, "cannot have multiple stack roots"
)
root_dp = dp
for vlan in dp.vlans.values():
test_config_condition(
vlan.faucet_vips, ("routing + stacking not supported")
)
if root_dp is None:
test_config_condition(stack_dps, "stacking enabled but no root_dp")
return
edge_count = Counter()
graph = networkx.MultiGraph()
for dp in dps:
if dp.stack_ports:
graph.add_node(dp.name)
for port in dp.stack_ports:
edge_name = self.add_stack_link(graph, dp, port)
edge_count[edge_name] += 1
if graph.size():
for edge_name, count in edge_count.items():
test_config_condition(
count != 2, "%s defined only in one direction" % edge_name
)
if self.name in graph:
if self.stack is None:
self.stack = {}
self.stack["root_dp"] = root_dp
self.stack["graph"] = graph
longest_path_to_root_len = 0
for dp in graph.nodes():
path_to_root_len = len(self.shortest_path(root_dp.name, src_dp=dp))
test_config_condition(
path_to_root_len == 0, "%s not connected to stack" % dp
)
longest_path_to_root_len = max(
path_to_root_len, longest_path_to_root_len
)
self.stack["longest_path_to_root_len"] = longest_path_to_root_len
|
https://github.com/faucetsdn/faucet/issues/2606
|
Oct 30 03:01:40 faucet.exception ERROR Unhandled exception, killing RYU
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/faucet/valve_util.py", line 34, in __koe
func(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/faucet/faucet.py", line 156, in reload_config
time.time(), self.config_file, delete_dp=self._delete_deconfigured_dp)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 139, in request_reload_configs
self.load_configs(now, new_config_file, delete_dp=delete_dp)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 108, in load_configs
new_dps = self.parse_configs(new_config_file)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 88, in parse_configs
new_config_hashes, new_dps = dp_parser(new_config_file, self.logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 51, in dp_parser
config_hashes, dps = _config_parser_v2(config_file, logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 213, in _config_parser_v2
top_confs['vlans'])
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 181, in _dp_parser_v2
dp.resolve_stack_topology(dps)
File "/usr/lib/python3.6/site-packages/faucet/dp.py", line 614, in resolve_stack_topology
test_config_condition(dp.stack['priority'] <= 0, (
TypeError: '<=' not supported between instances of 'NoneType' and 'int'
|
TypeError
|
def _check_conf_types(self, conf, conf_types):
"""Check that conf value is of the correct type."""
test_config_condition(
not isinstance(conf, dict),
("Conf object must be type %s not %s" % (dict, type(conf))),
)
for conf_key, conf_value in conf.items():
test_config_condition(
conf_key not in conf_types,
"%s field unknown in %s (known types %s)"
% (conf_key, self._id, conf_types),
)
if conf_value is not None:
conf_type = conf_types[conf_key]
test_config_condition(
not isinstance(conf_value, conf_type),
"%s value %s must be %s not %s"
% (conf_key, conf_value, conf_type, type(conf_value)),
) # pytype: disable=invalid-typevar
|
def _check_conf_types(self, conf, conf_types):
"""Check that conf value is of the correct type."""
for conf_key, conf_value in conf.items():
test_config_condition(
conf_key not in conf_types,
"%s field unknown in %s (known types %s)"
% (conf_key, self._id, conf_types),
)
if conf_value is not None:
conf_type = conf_types[conf_key]
test_config_condition(
not isinstance(conf_value, conf_type),
"%s value %s must be %s not %s"
% (conf_key, conf_value, conf_type, type(conf_value)),
) # pytype: disable=invalid-typevar
|
https://github.com/faucetsdn/faucet/issues/2607
|
Oct 30 02:49:35 faucet.exception ERROR Unhandled exception, killing RYU
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/faucet/valve_util.py", line 34, in __koe
func(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/faucet/faucet.py", line 156, in reload_config
time.time(), self.config_file, delete_dp=self._delete_deconfigured_dp)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 139, in request_reload_configs
self.load_configs(now, new_config_file, delete_dp=delete_dp)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 108, in load_configs
new_dps = self.parse_configs(new_config_file)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 88, in parse_configs
new_config_hashes, new_dps = dp_parser(new_config_file, self.logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 51, in dp_parser
config_hashes, dps = _config_parser_v2(config_file, logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 213, in _config_parser_v2
top_confs['vlans'])
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 177, in _dp_parser_v2
for dp_key, dp_conf in dps_conf.items()]
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 177, in <listcomp>
for dp_key, dp_conf in dps_conf.items()]
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 161, in _parse_dp
acl = ACL(acl_key, dp_id, acl_conf)
File "/usr/lib/python3.6/site-packages/faucet/acl.py", line 118, in __init__
super(ACL, self).__init__(_id, dp_id, conf)
File "/usr/lib/python3.6/site-packages/faucet/conf.py", line 54, in __init__
self.check_config()
File "/usr/lib/python3.6/site-packages/faucet/acl.py", line 127, in check_config
self._check_conf_types(rule, self.rule_types)
File "/usr/lib/python3.6/site-packages/faucet/conf.py", line 76, in _check_conf_types
for conf_key, conf_value in conf.items():
AttributeError: 'NoneType' object has no attribute 'items'
|
AttributeError
|
def __str__(self):
return str(self._id)
|
def __str__(self):
return self._id
|
https://github.com/faucetsdn/faucet/issues/2608
|
Oct 30 03:20:12 faucet.exception ERROR Unhandled exception, killing RYU
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/faucet/valve_util.py", line 34, in __koe
func(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/faucet/faucet.py", line 156, in reload_config
time.time(), self.config_file, delete_dp=self._delete_deconfigured_dp)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 139, in request_reload_configs
self.load_configs(now, new_config_file, delete_dp=delete_dp)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 108, in load_configs
new_dps = self.parse_configs(new_config_file)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 88, in parse_configs
new_config_hashes, new_dps = dp_parser(new_config_file, self.logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 51, in dp_parser
config_hashes, dps = _config_parser_v2(config_file, logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 213, in _config_parser_v2
top_confs['vlans'])
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 177, in _dp_parser_v2
for dp_key, dp_conf in dps_conf.items()]
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 177, in <listcomp>
for dp_key, dp_conf in dps_conf.items()]
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 164, in _parse_dp
router = Router(router_key, dp_id, router_conf)
File "/usr/lib/python3.6/site-packages/faucet/router.py", line 38, in __init__
super(Router, self).__init__(_id, dp_id, conf)
File "/usr/lib/python3.6/site-packages/faucet/conf.py", line 54, in __init__
self.check_config()
File "/usr/lib/python3.6/site-packages/faucet/router.py", line 46, in check_config
'router %s must have at least 2 VLANs configured' % self))
TypeError: __str__ returned non-string (type int)
|
TypeError
|
def __init__(self, _id, dp_id, conf):
self.rules = []
self.exact_match = None
self.meter = False
self.matches = {}
self.set_fields = set()
conf = copy.deepcopy(conf)
if isinstance(conf, dict):
rules = conf.get("rules", [])
elif isinstance(conf, list):
rules = conf
conf = {}
else:
raise InvalidConfigError("ACL conf is an invalid type %s" % _id)
conf["rules"] = []
for rule in rules:
test_config_condition(
not isinstance(rule, dict), ("ACL rule is %s not %s" % (type(rule), dict))
)
conf["rules"].append(rule.get("rule", rule))
super(ACL, self).__init__(_id, dp_id, conf)
|
def __init__(self, _id, dp_id, conf):
self.rules = []
self.exact_match = None
self.meter = False
self.matches = {}
self.set_fields = set()
conf = copy.deepcopy(conf)
if isinstance(conf, dict):
rules = conf.get("rules", [])
elif isinstance(conf, list):
rules = conf
conf = {}
else:
raise InvalidConfigError("ACL conf is an invalid type %s" % self._id)
conf["rules"] = []
for rule in rules:
test_config_condition(
not isinstance(rule, dict), ("ACL rule is %s not %s" % (type(rule), dict))
)
conf["rules"].append(rule.get("rule", rule))
super(ACL, self).__init__(_id, dp_id, conf)
|
https://github.com/faucetsdn/faucet/issues/2609
|
Oct 30 03:15:23 faucet.exception ERROR Unhandled exception, killing RYU
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/faucet/valve_util.py", line 34, in __koe
func(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/faucet/faucet.py", line 156, in reload_config
time.time(), self.config_file, delete_dp=self._delete_deconfigured_dp)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 139, in request_reload_configs
self.load_configs(now, new_config_file, delete_dp=delete_dp)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 108, in load_configs
new_dps = self.parse_configs(new_config_file)
File "/usr/lib/python3.6/site-packages/faucet/valves_manager.py", line 88, in parse_configs
new_config_hashes, new_dps = dp_parser(new_config_file, self.logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 51, in dp_parser
config_hashes, dps = _config_parser_v2(config_file, logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 213, in _config_parser_v2
top_confs['vlans'])
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 177, in _dp_parser_v2
for dp_key, dp_conf in dps_conf.items()]
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 177, in <listcomp>
for dp_key, dp_conf in dps_conf.items()]
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 161, in _parse_dp
acl = ACL(acl_key, dp_id, acl_conf)
File "/usr/lib/python3.6/site-packages/faucet/acl.py", line 112, in __init__
'ACL conf is an invalid type %s' % self._id)
AttributeError: 'ACL' object has no attribute '_id'
|
AttributeError
|
def _watcher_parser_v2(conf, logname, prom_client):
logger = config_parser_util.get_logger(logname)
result = []
dps = {}
for faucet_file in conf["faucet_configs"]:
_, dp_list = dp_parser(faucet_file, logname)
if dp_list:
for dp in dp_list:
dps[dp.name] = dp
dbs = conf.pop("dbs")
for watcher_name, watcher_conf in list(conf["watchers"].items()):
if watcher_conf.get("all_dps", False):
watcher_dps = list(dps.keys())
else:
watcher_dps = watcher_conf["dps"]
# Watcher config has a list of DPs, but actually a WatcherConf is
# created for each DP.
# TODO: refactor watcher_conf as a container.
for dp_name in watcher_dps:
if dp_name not in dps:
logger.error("DP %s in Gauge but not configured in FAUCET", dp_name)
continue
dp = dps[dp_name]
watcher = WatcherConf(watcher_name, dp.dp_id, watcher_conf, prom_client)
watcher.add_db(dbs[watcher.db])
watcher.add_dp(dp)
result.append(watcher)
return result
|
def _watcher_parser_v2(conf, logname, prom_client):
logger = config_parser_util.get_logger(logname)
result = []
dps = {}
for faucet_file in conf["faucet_configs"]:
_, dp_list = dp_parser(faucet_file, logname)
if dp_list:
for dp in dp_list:
dps[dp.name] = dp
dbs = conf.pop("dbs")
for watcher_name, watcher_conf in list(conf["watchers"].items()):
watcher_dps = watcher_conf["dps"]
if watcher_conf.get("all_dps", False):
watcher_dps = list(dps.keys())
# Watcher config has a list of DPs, but actually a WatcherConf is
# created for each DP.
# TODO: refactor watcher_conf as a container.
for dp_name in watcher_dps:
if dp_name not in dps:
logger.error("DP %s in Gauge but not configured in FAUCET", dp_name)
continue
dp = dps[dp_name]
watcher = WatcherConf(watcher_name, dp.dp_id, watcher_conf, prom_client)
watcher.add_db(dbs[watcher.db])
watcher.add_dp(dp)
result.append(watcher)
return result
|
https://github.com/faucetsdn/faucet/issues/1441
|
Dec 20 14:06:38 gauge.exception ERROR Unhandled exception, killing RYU
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/faucet/valve_util.py", line 34, in __koe
func(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/faucet/gauge.py", line 99, in _load_config
new_confs = watcher_parser(self.config_file, self.logname, self.prom_client)
File "/usr/local/lib/python3.5/dist-packages/faucet/config_parser.py", line 212, in watcher_parser
return _watcher_parser_v2(conf, logname, prom_client)
File "/usr/local/lib/python3.5/dist-packages/faucet/config_parser.py", line 229, in _watcher_parser_v2
watcher_dps = watcher_conf['dps']
KeyError: 'dps'
|
KeyError
|
def finalize_config(self, dps):
"""Perform consistency checks after initial config parsing."""
def resolve_port_no(port_name):
"""Resolve port by name or number."""
if port_name in port_by_name:
return port_by_name[port_name].number
elif port_name in self.ports:
return port_name
return None
def resolve_vlan(vlan_name):
"""Resolve VLAN by name or VID."""
if vlan_name in vlan_by_name:
return vlan_by_name[vlan_name]
elif vlan_name in self.vlans:
return self.vlans[vlan_name]
return None
def resolve_stack_dps():
"""Resolve DP references in stacking config."""
port_stack_dp = {}
for port in self.stack_ports:
stack_dp = port.stack["dp"]
port_stack_dp[port] = dp_by_name[stack_dp]
for port, dp in list(port_stack_dp.items()):
port.stack["dp"] = dp
stack_port_name = port.stack["port"]
port.stack["port"] = dp.ports[stack_port_name]
def resolve_mirror_destinations():
"""Resolve mirror port references and destinations."""
mirror_from_port = {}
for port in list(self.ports.values()):
if port.mirror is not None:
if port.mirror in port_by_name:
mirror_from_port[port] = port_by_name[port.mirror]
else:
mirror_from_port[self.ports[port.mirror]] = port
for port, mirror_destination_port in list(mirror_from_port.items()):
port.mirror = mirror_destination_port.number
mirror_destination_port.mirror_destination = True
def resolve_names_in_acls():
"""Resolve config references in ACLs."""
for acl in list(self.acls.values()):
for rule_conf in acl.rules:
for attrib, attrib_value in list(rule_conf.items()):
if attrib == "actions":
if "meter" in attrib_value:
meter_name = attrib_value["meter"]
assert meter_name in self.meters
if "mirror" in attrib_value:
port_name = attrib_value["mirror"]
port_no = resolve_port_no(port_name)
# in V2 config, we might have an ACL that does
# not apply to a DP.
if port_no is not None:
attrib_value["mirror"] = port_no
port = self.ports[port_no]
port.mirror_destination = True
if "output" in attrib_value:
output_values = attrib_value["output"]
if "port" in output_values:
port_name = output_values["port"]
port_no = resolve_port_no(port_name)
if port_no is not None:
output_values["port"] = port_no
if "failover" in output_values:
failover = output_values["failover"]
resolved_ports = []
for port_name in failover["ports"]:
port_no = resolve_port_no(port_name)
if port_no is not None:
resolved_ports.append(port_no)
failover["ports"] = resolved_ports
def resolve_acls():
"""Resolve ACL references in config."""
def build_acl(acl, vid=None):
"""Check that ACL can be built from config."""
if acl.rules:
assert valve_acl.build_acl_ofmsgs(
[acl],
self.wildcard_table,
valve_of.goto_table(self.wildcard_table),
2**16,
self.meters,
acl.exact_match,
vlan_vid=vid,
)
for vlan in list(self.vlans.values()):
if vlan.acl_in:
if vlan.acl_in in self.acls:
vlan.acl_in = self.acls[vlan.acl_in]
build_acl(vlan.acl_in, vid=1)
else:
assert False, "Unconfigured vlan for %s" % self.name
for port in list(self.ports.values()):
if port.acl_in:
if port.acl_in in self.acls:
port.acl_in = self.acls[port.acl_in]
build_acl(port.acl_in)
else:
assert False, "Unconfigured acl for %s" % self.name
def resolve_vlan_names_in_routers():
"""Resolve VLAN references in routers."""
dp_routers = {}
for router_name, router in list(self.routers.items()):
vlans = []
for vlan_name in router.vlans:
vlan = resolve_vlan(vlan_name)
if vlan is not None:
vlans.append(vlan)
if len(vlans) > 1:
dp_router = copy.copy(router)
dp_router.vlans = vlans
dp_routers[router_name] = dp_router
self.routers = dp_routers
assert self.ports, "no interfaces defined for %s" % self.name
assert self.vlans, "no VLANs referenced by interfaces in %s" % self.name
port_by_name = {}
for port in list(self.ports.values()):
port_by_name[port.name] = port
dp_by_name = {}
for dp in dps:
dp_by_name[dp.name] = dp
vlan_by_name = {}
for vlan in list(self.vlans.values()):
vlan_by_name[vlan.name] = vlan
resolve_stack_dps()
resolve_mirror_destinations()
resolve_vlan_names_in_routers()
resolve_names_in_acls()
resolve_acls()
for port in list(self.ports.values()):
port.finalize()
for vlan in list(self.vlans.values()):
vlan.finalize()
for acl in list(self.acls.values()):
acl.finalize()
for router in list(self.routers.values()):
router.finalize()
self.finalize()
|
def finalize_config(self, dps):
"""Perform consistency checks after initial config parsing."""
def resolve_port_no(port_name):
"""Resolve port by name or number."""
if port_name in port_by_name:
return port_by_name[port_name].number
elif port_name in self.ports:
return port_name
return None
def resolve_vlan(vlan_name):
"""Resolve VLAN by name or VID."""
if vlan_name in vlan_by_name:
return vlan_by_name[vlan_name]
elif vlan_name in self.vlans:
return self.vlans[vlan_name]
return None
def resolve_stack_dps():
"""Resolve DP references in stacking config."""
port_stack_dp = {}
for port in self.stack_ports:
stack_dp = port.stack["dp"]
port_stack_dp[port] = dp_by_name[stack_dp]
for port, dp in list(port_stack_dp.items()):
port.stack["dp"] = dp
stack_port_name = port.stack["port"]
port.stack["port"] = dp.ports[stack_port_name]
def resolve_mirror_destinations():
"""Resolve mirror port references and destinations."""
mirror_from_port = {}
for port in list(self.ports.values()):
if port.mirror is not None:
if port.mirror in port_by_name:
mirror_from_port[port] = port_by_name[port.mirror]
else:
mirror_from_port[self.ports[port.mirror]] = port
for port, mirror_destination_port in list(mirror_from_port.items()):
port.mirror = mirror_destination_port.number
mirror_destination_port.mirror_destination = True
def resolve_names_in_acls():
"""Resolve config references in ACLs."""
for acl in list(self.acls.values()):
for rule_conf in acl.rules:
for attrib, attrib_value in list(rule_conf.items()):
if attrib == "actions":
if "meter" in attrib_value:
meter_name = attrib_value["meter"]
assert meter_name in self.meters
if "mirror" in attrib_value:
port_name = attrib_value["mirror"]
port_no = resolve_port_no(port_name)
# in V2 config, we might have an ACL that does
# not apply to a DP.
if port_no is not None:
attrib_value["mirror"] = port_no
port = self.ports[port_no]
port.mirror_destination = True
if "output" in attrib_value:
output_values = attrib_value["output"]
if "port" in output_values:
port_name = output_values["port"]
port_no = resolve_port_no(port_name)
if port_no is not None:
output_values["port"] = port_no
if "failover" in output_values:
failover = output_values["failover"]
resolved_ports = []
for port_name in failover["ports"]:
port_no = resolve_port_no(port_name)
if port_no is not None:
resolved_ports.append(port_no)
failover["ports"] = resolved_ports
def resolve_acls():
"""Resolve ACL references in config."""
def build_acl(acl, vid=None):
"""Check that ACL can be built from config."""
if acl.rules:
assert valve_acl.build_acl_ofmsgs(
[acl],
self.wildcard_table,
valve_of.goto_table(self.wildcard_table),
2**16,
self.meters,
acl.exact_match,
vlan_vid=vid,
)
for vlan in list(self.vlans.values()):
if vlan.acl_in:
vlan.acl_in = self.acls[vlan.acl_in]
build_acl(vlan.acl_in, vid=1)
for port in list(self.ports.values()):
if port.acl_in:
if port.acl_in in self.acls:
port.acl_in = self.acls[port.acl_in]
build_acl(port.acl_in)
else:
assert False, "Unconfigured acl for %s" % self.name
def resolve_vlan_names_in_routers():
"""Resolve VLAN references in routers."""
dp_routers = {}
for router_name, router in list(self.routers.items()):
vlans = []
for vlan_name in router.vlans:
vlan = resolve_vlan(vlan_name)
if vlan is not None:
vlans.append(vlan)
if len(vlans) > 1:
dp_router = copy.copy(router)
dp_router.vlans = vlans
dp_routers[router_name] = dp_router
self.routers = dp_routers
assert self.ports, "no interfaces defined for %s" % self.name
assert self.vlans, "no VLANs referenced by interfaces in %s" % self.name
port_by_name = {}
for port in list(self.ports.values()):
port_by_name[port.name] = port
dp_by_name = {}
for dp in dps:
dp_by_name[dp.name] = dp
vlan_by_name = {}
for vlan in list(self.vlans.values()):
vlan_by_name[vlan.name] = vlan
resolve_stack_dps()
resolve_mirror_destinations()
resolve_vlan_names_in_routers()
resolve_names_in_acls()
resolve_acls()
for port in list(self.ports.values()):
port.finalize()
for vlan in list(self.vlans.values()):
vlan.finalize()
for acl in list(self.acls.values()):
acl.finalize()
for router in list(self.routers.values()):
router.finalize()
self.finalize()
|
https://github.com/faucetsdn/faucet/issues/1209
|
Nov 16 02:50:28 faucet.exception ERROR Unhandled exception, killing RYU
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/faucet/valve_util.py", line 34, in __koe
func(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/faucet/faucet.py", line 146, in _load_configs
new_config_file, self.logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 50, in dp_parser
config_hashes, dps = _config_parser_v2(config_file, logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 165, in _config_parser_v2
top_confs['vlans'])
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 133, in _dp_parser_v2
dp.finalize_config(dps)
File "/usr/lib/python3.6/site-packages/faucet/dp.py", line 509, in finalize_config
resolve_acls()
File "/usr/lib/python3.6/site-packages/faucet/dp.py", line 472, in resolve_acls
vlan.acl_in = self.acls[vlan.acl_in]
KeyError: 'office-vlan-protect'
|
KeyError
|
def resolve_acls():
"""Resolve ACL references in config."""
def build_acl(acl, vid=None):
"""Check that ACL can be built from config."""
if acl.rules:
assert valve_acl.build_acl_ofmsgs(
[acl],
self.wildcard_table,
valve_of.goto_table(self.wildcard_table),
2**16,
self.meters,
acl.exact_match,
vlan_vid=vid,
)
for vlan in list(self.vlans.values()):
if vlan.acl_in:
if vlan.acl_in in self.acls:
vlan.acl_in = self.acls[vlan.acl_in]
build_acl(vlan.acl_in, vid=1)
else:
assert False, "Unconfigured vlan for %s" % self.name
for port in list(self.ports.values()):
if port.acl_in:
if port.acl_in in self.acls:
port.acl_in = self.acls[port.acl_in]
build_acl(port.acl_in)
else:
assert False, "Unconfigured acl for %s" % self.name
|
def resolve_acls():
"""Resolve ACL references in config."""
def build_acl(acl, vid=None):
"""Check that ACL can be built from config."""
if acl.rules:
assert valve_acl.build_acl_ofmsgs(
[acl],
self.wildcard_table,
valve_of.goto_table(self.wildcard_table),
2**16,
self.meters,
acl.exact_match,
vlan_vid=vid,
)
for vlan in list(self.vlans.values()):
if vlan.acl_in:
vlan.acl_in = self.acls[vlan.acl_in]
build_acl(vlan.acl_in, vid=1)
for port in list(self.ports.values()):
if port.acl_in:
if port.acl_in in self.acls:
port.acl_in = self.acls[port.acl_in]
build_acl(port.acl_in)
else:
assert False, "Unconfigured acl for %s" % self.name
|
https://github.com/faucetsdn/faucet/issues/1209
|
Nov 16 02:50:28 faucet.exception ERROR Unhandled exception, killing RYU
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/faucet/valve_util.py", line 34, in __koe
func(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/faucet/faucet.py", line 146, in _load_configs
new_config_file, self.logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 50, in dp_parser
config_hashes, dps = _config_parser_v2(config_file, logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 165, in _config_parser_v2
top_confs['vlans'])
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 133, in _dp_parser_v2
dp.finalize_config(dps)
File "/usr/lib/python3.6/site-packages/faucet/dp.py", line 509, in finalize_config
resolve_acls()
File "/usr/lib/python3.6/site-packages/faucet/dp.py", line 472, in resolve_acls
vlan.acl_in = self.acls[vlan.acl_in]
KeyError: 'office-vlan-protect'
|
KeyError
|
def dp_parser(config_file, logname):
logger = config_parser_util.get_logger(logname)
conf = config_parser_util.read_config(config_file, logname)
config_hashes = None
dps = None
if conf is not None:
if type(conf) is dict:
version = conf.pop("version", 2)
if version != 2:
logger.fatal("Only config version 2 is supported")
else:
logger.fatal("Config file does not form a dictionary")
return None, None
config_hashes, dps = _config_parser_v2(config_file, logname)
return config_hashes, dps
|
def dp_parser(config_file, logname):
logger = config_parser_util.get_logger(logname)
conf = config_parser_util.read_config(config_file, logname)
config_hashes = None
dps = None
if conf is not None:
version = conf.pop("version", 2)
if version != 2:
logger.fatal("Only config version 2 is supported")
config_hashes, dps = _config_parser_v2(config_file, logname)
return config_hashes, dps
|
https://github.com/faucetsdn/faucet/issues/1208
|
Nov 15 20:44:01 faucet.exception ERROR Unhandled exception, killing RYU
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/faucet/valve_util.py", line 34, in __koe
func(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/faucet/faucet.py", line 146, in _load_configs
new_config_file, self.logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 46, in dp_parser
version = conf.pop('version', 2)
AttributeError: 'str' object has no attribute 'pop'
|
AttributeError
|
def finalize_config(self, dps):
"""Perform consistency checks after initial config parsing."""
def resolve_port_no(port_name):
"""Resolve port by name or number."""
if port_name in port_by_name:
return port_by_name[port_name].number
elif port_name in self.ports:
return port_name
return None
def resolve_vlan(vlan_name):
"""Resolve VLAN by name or VID."""
if vlan_name in vlan_by_name:
return vlan_by_name[vlan_name]
elif vlan_name in self.vlans:
return self.vlans[vlan_name]
return None
def resolve_stack_dps():
"""Resolve DP references in stacking config."""
port_stack_dp = {}
for port in self.stack_ports:
stack_dp = port.stack["dp"]
port_stack_dp[port] = dp_by_name[stack_dp]
for port, dp in list(port_stack_dp.items()):
port.stack["dp"] = dp
stack_port_name = port.stack["port"]
port.stack["port"] = dp.ports[stack_port_name]
def resolve_mirror_destinations():
"""Resolve mirror port references and destinations."""
mirror_from_port = {}
for port in list(self.ports.values()):
if port.mirror is not None:
if port.mirror in port_by_name:
mirror_from_port[port] = port_by_name[port.mirror]
else:
mirror_from_port[self.ports[port.mirror]] = port
for port, mirror_destination_port in list(mirror_from_port.items()):
port.mirror = mirror_destination_port.number
mirror_destination_port.mirror_destination = True
def resolve_names_in_acls():
"""Resolve config references in ACLs."""
for acl in list(self.acls.values()):
for rule_conf in acl.rules:
for attrib, attrib_value in list(rule_conf.items()):
if attrib == "actions":
if "meter" in attrib_value:
meter_name = attrib_value["meter"]
assert meter_name in self.meters
if "mirror" in attrib_value:
port_name = attrib_value["mirror"]
port_no = resolve_port_no(port_name)
# in V2 config, we might have an ACL that does
# not apply to a DP.
if port_no is not None:
attrib_value["mirror"] = port_no
port = self.ports[port_no]
port.mirror_destination = True
if "output" in attrib_value:
output_values = attrib_value["output"]
if "port" in output_values:
port_name = output_values["port"]
port_no = resolve_port_no(port_name)
if port_no is not None:
output_values["port"] = port_no
if "failover" in output_values:
failover = output_values["failover"]
resolved_ports = []
for port_name in failover["ports"]:
port_no = resolve_port_no(port_name)
if port_no is not None:
resolved_ports.append(port_no)
failover["ports"] = resolved_ports
def resolve_acls():
"""Resolve ACL references in config."""
def build_acl(acl, vid=None):
"""Check that ACL can be built from config."""
if acl.rules:
assert valve_acl.build_acl_ofmsgs(
[acl],
self.wildcard_table,
valve_of.goto_table(self.wildcard_table),
2**16,
self.meters,
acl.exact_match,
vlan_vid=vid,
)
for vlan in list(self.vlans.values()):
if vlan.acl_in:
vlan.acl_in = self.acls[vlan.acl_in]
build_acl(vlan.acl_in, vid=1)
for port in list(self.ports.values()):
if port.acl_in:
if port.acl_in in self.acls:
port.acl_in = self.acls[port.acl_in]
build_acl(port.acl_in)
else:
assert False, "Unconfigured acl for %s" % self.name
def resolve_vlan_names_in_routers():
"""Resolve VLAN references in routers."""
dp_routers = {}
for router_name, router in list(self.routers.items()):
vlans = []
for vlan_name in router.vlans:
vlan = resolve_vlan(vlan_name)
if vlan is not None:
vlans.append(vlan)
if len(vlans) > 1:
dp_router = copy.copy(router)
dp_router.vlans = vlans
dp_routers[router_name] = dp_router
self.routers = dp_routers
assert self.ports, "no interfaces defined for %s" % self.name
assert self.vlans, "no VLANs referenced by interfaces in %s" % self.name
port_by_name = {}
for port in list(self.ports.values()):
port_by_name[port.name] = port
dp_by_name = {}
for dp in dps:
dp_by_name[dp.name] = dp
vlan_by_name = {}
for vlan in list(self.vlans.values()):
vlan_by_name[vlan.name] = vlan
resolve_stack_dps()
resolve_mirror_destinations()
resolve_vlan_names_in_routers()
resolve_names_in_acls()
resolve_acls()
for port in list(self.ports.values()):
port.finalize()
for vlan in list(self.vlans.values()):
vlan.finalize()
for acl in list(self.acls.values()):
acl.finalize()
for router in list(self.routers.values()):
router.finalize()
self.finalize()
|
def finalize_config(self, dps):
"""Perform consistency checks after initial config parsing."""
def resolve_port_no(port_name):
"""Resolve port by name or number."""
if port_name in port_by_name:
return port_by_name[port_name].number
elif port_name in self.ports:
return port_name
return None
def resolve_vlan(vlan_name):
"""Resolve VLAN by name or VID."""
if vlan_name in vlan_by_name:
return vlan_by_name[vlan_name]
elif vlan_name in self.vlans:
return self.vlans[vlan_name]
return None
def resolve_stack_dps():
"""Resolve DP references in stacking config."""
port_stack_dp = {}
for port in self.stack_ports:
stack_dp = port.stack["dp"]
port_stack_dp[port] = dp_by_name[stack_dp]
for port, dp in list(port_stack_dp.items()):
port.stack["dp"] = dp
stack_port_name = port.stack["port"]
port.stack["port"] = dp.ports[stack_port_name]
def resolve_mirror_destinations():
"""Resolve mirror port references and destinations."""
mirror_from_port = {}
for port in list(self.ports.values()):
if port.mirror is not None:
if port.mirror in port_by_name:
mirror_from_port[port] = port_by_name[port.mirror]
else:
mirror_from_port[self.ports[port.mirror]] = port
for port, mirror_destination_port in list(mirror_from_port.items()):
port.mirror = mirror_destination_port.number
mirror_destination_port.mirror_destination = True
def resolve_names_in_acls():
"""Resolve config references in ACLs."""
for acl in list(self.acls.values()):
for rule_conf in acl.rules:
for attrib, attrib_value in list(rule_conf.items()):
if attrib == "actions":
if "meter" in attrib_value:
meter_name = attrib_value["meter"]
assert meter_name in self.meters
if "mirror" in attrib_value:
port_name = attrib_value["mirror"]
port_no = resolve_port_no(port_name)
# in V2 config, we might have an ACL that does
# not apply to a DP.
if port_no is not None:
attrib_value["mirror"] = port_no
port = self.ports[port_no]
port.mirror_destination = True
if "output" in attrib_value:
output_values = attrib_value["output"]
if "port" in output_values:
port_name = output_values["port"]
port_no = resolve_port_no(port_name)
if port_no is not None:
output_values["port"] = port_no
if "failover" in output_values:
failover = output_values["failover"]
resolved_ports = []
for port_name in failover["ports"]:
port_no = resolve_port_no(port_name)
if port_no is not None:
resolved_ports.append(port_no)
failover["ports"] = resolved_ports
def resolve_acls():
"""Resolve ACL references in config."""
def build_acl(acl, vid=None):
"""Check that ACL can be built from config."""
if acl.rules:
assert valve_acl.build_acl_ofmsgs(
[acl],
self.wildcard_table,
valve_of.goto_table(self.wildcard_table),
2**16,
self.meters,
acl.exact_match,
vlan_vid=vid,
)
for vlan in list(self.vlans.values()):
if vlan.acl_in:
vlan.acl_in = self.acls[vlan.acl_in]
build_acl(vlan.acl_in, vid=1)
for port in list(self.ports.values()):
if port.acl_in:
port.acl_in = self.acls[port.acl_in]
build_acl(port.acl_in)
def resolve_vlan_names_in_routers():
"""Resolve VLAN references in routers."""
dp_routers = {}
for router_name, router in list(self.routers.items()):
vlans = []
for vlan_name in router.vlans:
vlan = resolve_vlan(vlan_name)
if vlan is not None:
vlans.append(vlan)
if len(vlans) > 1:
dp_router = copy.copy(router)
dp_router.vlans = vlans
dp_routers[router_name] = dp_router
self.routers = dp_routers
assert self.ports, "no interfaces defined for %s" % self.name
assert self.vlans, "no VLANs referenced by interfaces in %s" % self.name
port_by_name = {}
for port in list(self.ports.values()):
port_by_name[port.name] = port
dp_by_name = {}
for dp in dps:
dp_by_name[dp.name] = dp
vlan_by_name = {}
for vlan in list(self.vlans.values()):
vlan_by_name[vlan.name] = vlan
resolve_stack_dps()
resolve_mirror_destinations()
resolve_vlan_names_in_routers()
resolve_names_in_acls()
resolve_acls()
for port in list(self.ports.values()):
port.finalize()
for vlan in list(self.vlans.values()):
vlan.finalize()
for acl in list(self.acls.values()):
acl.finalize()
for router in list(self.routers.values()):
router.finalize()
self.finalize()
|
https://github.com/faucetsdn/faucet/issues/1201
|
Nov 15 01:59:34 faucet.exception ERROR Unhandled exception, killing RYU
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/faucet/valve_util.py", line 34, in __koe
func(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/faucet/faucet.py", line 146, in _load_configs
new_config_file, self.logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 50, in dp_parser
config_hashes, dps = _config_parser_v2(config_file, logname)
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 165, in _config_parser_v2
top_confs['vlans'])
File "/usr/lib/python3.6/site-packages/faucet/config_parser.py", line 133, in _dp_parser_v2
dp.finalize_config(dps)
File "/usr/lib/python3.6/site-packages/faucet/dp.py", line 508, in finalize_config
resolve_acls()
File "/usr/lib/python3.6/site-packages/faucet/dp.py", line 476, in resolve_acls
port.acl_in = self.acls[port.acl_in]
KeyError: 'access-port-protect'
|
KeyError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.